1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2019 Mellanox Technologies */ 3 4 #include <linux/mlx5/vport.h> 5 #include "mlx5_core.h" 6 #include "fs_core.h" 7 #include "fs_cmd.h" 8 #include "mlx5dr.h" 9 #include "fs_dr.h" 10 11 static bool mlx5_dr_is_fw_table(u32 flags) 12 { 13 if (flags & MLX5_FLOW_TABLE_TERMINATION) 14 return true; 15 16 return false; 17 } 18 19 static int mlx5_cmd_dr_update_root_ft(struct mlx5_flow_root_namespace *ns, 20 struct mlx5_flow_table *ft, 21 u32 underlay_qpn, 22 bool disconnect) 23 { 24 return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn, 25 disconnect); 26 } 27 28 static int set_miss_action(struct mlx5_flow_root_namespace *ns, 29 struct mlx5_flow_table *ft, 30 struct mlx5_flow_table *next_ft) 31 { 32 struct mlx5dr_action *old_miss_action; 33 struct mlx5dr_action *action = NULL; 34 struct mlx5dr_table *next_tbl; 35 int err; 36 37 next_tbl = next_ft ? next_ft->fs_dr_table.dr_table : NULL; 38 if (next_tbl) { 39 action = mlx5dr_action_create_dest_table(next_tbl); 40 if (!action) 41 return -EINVAL; 42 } 43 old_miss_action = ft->fs_dr_table.miss_action; 44 err = mlx5dr_table_set_miss_action(ft->fs_dr_table.dr_table, action); 45 if (err && action) { 46 err = mlx5dr_action_destroy(action); 47 if (err) 48 mlx5_core_err(ns->dev, 49 "Failed to destroy action (%d)\n", err); 50 action = NULL; 51 } 52 ft->fs_dr_table.miss_action = action; 53 if (old_miss_action) { 54 err = mlx5dr_action_destroy(old_miss_action); 55 if (err) 56 mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n", 57 err); 58 } 59 60 return err; 61 } 62 63 static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns, 64 struct mlx5_flow_table *ft, 65 unsigned int size, 66 struct mlx5_flow_table *next_ft) 67 { 68 struct mlx5dr_table *tbl; 69 u32 flags; 70 int err; 71 72 if (mlx5_dr_is_fw_table(ft->flags)) 73 return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft, 74 size, 75 next_ft); 76 flags = ft->flags; 77 /* turn off encap/decap if not supported for sw-str by fw */ 78 if (!MLX5_CAP_FLOWTABLE(ns->dev, sw_owner_reformat_supported)) 79 flags = ft->flags & ~(MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | 80 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); 81 82 tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, ft->level, flags); 83 if (!tbl) { 84 mlx5_core_err(ns->dev, "Failed creating dr flow_table\n"); 85 return -EINVAL; 86 } 87 88 ft->fs_dr_table.dr_table = tbl; 89 ft->id = mlx5dr_table_get_id(tbl); 90 91 if (next_ft) { 92 err = set_miss_action(ns, ft, next_ft); 93 if (err) { 94 mlx5dr_table_destroy(tbl); 95 ft->fs_dr_table.dr_table = NULL; 96 return err; 97 } 98 } 99 100 ft->max_fte = INT_MAX; 101 102 return 0; 103 } 104 105 static int mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace *ns, 106 struct mlx5_flow_table *ft) 107 { 108 struct mlx5dr_action *action = ft->fs_dr_table.miss_action; 109 int err; 110 111 if (mlx5_dr_is_fw_table(ft->flags)) 112 return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft); 113 114 err = mlx5dr_table_destroy(ft->fs_dr_table.dr_table); 115 if (err) { 116 mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n", 117 err); 118 return err; 119 } 120 if (action) { 121 err = mlx5dr_action_destroy(action); 122 if (err) { 123 mlx5_core_err(ns->dev, "Failed to destroy action(%d)\n", 124 err); 125 return err; 126 } 127 } 128 129 return err; 130 } 131 132 static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns, 133 struct mlx5_flow_table *ft, 134 struct mlx5_flow_table *next_ft) 135 { 136 if (mlx5_dr_is_fw_table(ft->flags)) 137 return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft); 138 139 return set_miss_action(ns, ft, next_ft); 140 } 141 142 static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns, 143 struct mlx5_flow_table *ft, 144 u32 *in, 145 struct mlx5_flow_group *fg) 146 { 147 struct mlx5dr_matcher *matcher; 148 u32 priority = MLX5_GET(create_flow_group_in, in, 149 start_flow_index); 150 u8 match_criteria_enable = MLX5_GET(create_flow_group_in, 151 in, 152 match_criteria_enable); 153 struct mlx5dr_match_parameters mask; 154 155 if (mlx5_dr_is_fw_table(ft->flags)) 156 return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in, 157 fg); 158 159 mask.match_buf = MLX5_ADDR_OF(create_flow_group_in, 160 in, match_criteria); 161 mask.match_sz = sizeof(fg->mask.match_criteria); 162 163 matcher = mlx5dr_matcher_create(ft->fs_dr_table.dr_table, 164 priority, 165 match_criteria_enable, 166 &mask); 167 if (!matcher) { 168 mlx5_core_err(ns->dev, "Failed creating matcher\n"); 169 return -EINVAL; 170 } 171 172 fg->fs_dr_matcher.dr_matcher = matcher; 173 return 0; 174 } 175 176 static int mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace *ns, 177 struct mlx5_flow_table *ft, 178 struct mlx5_flow_group *fg) 179 { 180 if (mlx5_dr_is_fw_table(ft->flags)) 181 return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg); 182 183 return mlx5dr_matcher_destroy(fg->fs_dr_matcher.dr_matcher); 184 } 185 186 static struct mlx5dr_action *create_vport_action(struct mlx5dr_domain *domain, 187 struct mlx5_flow_rule *dst) 188 { 189 struct mlx5_flow_destination *dest_attr = &dst->dest_attr; 190 191 return mlx5dr_action_create_dest_vport(domain, dest_attr->vport.num, 192 dest_attr->vport.flags & 193 MLX5_FLOW_DEST_VPORT_VHCA_ID, 194 dest_attr->vport.vhca_id); 195 } 196 197 static struct mlx5dr_action *create_uplink_action(struct mlx5dr_domain *domain, 198 struct mlx5_flow_rule *dst) 199 { 200 struct mlx5_flow_destination *dest_attr = &dst->dest_attr; 201 202 return mlx5dr_action_create_dest_vport(domain, MLX5_VPORT_UPLINK, 1, 203 dest_attr->vport.vhca_id); 204 } 205 206 static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain, 207 struct mlx5_flow_rule *dst) 208 { 209 struct mlx5_flow_table *dest_ft = dst->dest_attr.ft; 210 211 if (mlx5_dr_is_fw_table(dest_ft->flags)) 212 return mlx5dr_action_create_dest_flow_fw_table(domain, dest_ft); 213 return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table); 214 } 215 216 static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domain, 217 struct mlx5_fs_vlan *vlan) 218 { 219 u16 n_ethtype = vlan->ethtype; 220 u8 prio = vlan->prio; 221 u16 vid = vlan->vid; 222 u32 vlan_hdr; 223 224 vlan_hdr = (u32)n_ethtype << 16 | (u32)(prio) << 12 | (u32)vid; 225 return mlx5dr_action_create_push_vlan(domain, htonl(vlan_hdr)); 226 } 227 228 static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst) 229 { 230 return (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT || 231 dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) && 232 dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID; 233 } 234 235 /* We want to support a rule with 32 destinations, which means we need to 236 * account for 32 destinations plus usually a counter plus one more action 237 * for a multi-destination flow table. 238 */ 239 #define MLX5_FLOW_CONTEXT_ACTION_MAX 34 240 static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, 241 struct mlx5_flow_table *ft, 242 struct mlx5_flow_group *group, 243 struct fs_fte *fte) 244 { 245 struct mlx5dr_domain *domain = ns->fs_dr_domain.dr_domain; 246 struct mlx5dr_action_dest *term_actions; 247 struct mlx5dr_match_parameters params; 248 struct mlx5_core_dev *dev = ns->dev; 249 struct mlx5dr_action **fs_dr_actions; 250 struct mlx5dr_action *tmp_action; 251 struct mlx5dr_action **actions; 252 bool delay_encap_set = false; 253 struct mlx5dr_rule *rule; 254 struct mlx5_flow_rule *dst; 255 int fs_dr_num_actions = 0; 256 int num_term_actions = 0; 257 int num_actions = 0; 258 size_t match_sz; 259 int err = 0; 260 int i; 261 262 if (mlx5_dr_is_fw_table(ft->flags)) 263 return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte); 264 265 actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions), 266 GFP_KERNEL); 267 if (!actions) { 268 err = -ENOMEM; 269 goto out_err; 270 } 271 272 fs_dr_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, 273 sizeof(*fs_dr_actions), GFP_KERNEL); 274 if (!fs_dr_actions) { 275 err = -ENOMEM; 276 goto free_actions_alloc; 277 } 278 279 term_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, 280 sizeof(*term_actions), GFP_KERNEL); 281 if (!term_actions) { 282 err = -ENOMEM; 283 goto free_fs_dr_actions_alloc; 284 } 285 286 match_sz = sizeof(fte->val); 287 288 /* Drop reformat action bit if destination vport set with reformat */ 289 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 290 list_for_each_entry(dst, &fte->node.children, node.list) { 291 if (!contain_vport_reformat_action(dst)) 292 continue; 293 294 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 295 break; 296 } 297 } 298 299 /* The order of the actions are must to be keep, only the following 300 * order is supported by SW steering: 301 * TX: modify header -> push vlan -> encap 302 * RX: decap -> pop vlan -> modify header 303 */ 304 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { 305 enum mlx5dr_action_reformat_type decap_type = 306 DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2; 307 308 tmp_action = mlx5dr_action_create_packet_reformat(domain, 309 decap_type, 310 0, 0, 0, 311 NULL); 312 if (!tmp_action) { 313 err = -ENOMEM; 314 goto free_actions; 315 } 316 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 317 actions[num_actions++] = tmp_action; 318 } 319 320 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) { 321 bool is_decap = fte->action.pkt_reformat->reformat_type == 322 MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2; 323 324 if (is_decap) 325 actions[num_actions++] = 326 fte->action.pkt_reformat->action.dr_action; 327 else 328 delay_encap_set = true; 329 } 330 331 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) { 332 tmp_action = 333 mlx5dr_action_create_pop_vlan(); 334 if (!tmp_action) { 335 err = -ENOMEM; 336 goto free_actions; 337 } 338 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 339 actions[num_actions++] = tmp_action; 340 } 341 342 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) { 343 tmp_action = 344 mlx5dr_action_create_pop_vlan(); 345 if (!tmp_action) { 346 err = -ENOMEM; 347 goto free_actions; 348 } 349 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 350 actions[num_actions++] = tmp_action; 351 } 352 353 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 354 actions[num_actions++] = 355 fte->action.modify_hdr->action.dr_action; 356 357 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { 358 tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]); 359 if (!tmp_action) { 360 err = -ENOMEM; 361 goto free_actions; 362 } 363 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 364 actions[num_actions++] = tmp_action; 365 } 366 367 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { 368 tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]); 369 if (!tmp_action) { 370 err = -ENOMEM; 371 goto free_actions; 372 } 373 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 374 actions[num_actions++] = tmp_action; 375 } 376 377 if (delay_encap_set) 378 actions[num_actions++] = 379 fte->action.pkt_reformat->action.dr_action; 380 381 /* The order of the actions below is not important */ 382 383 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) { 384 tmp_action = mlx5dr_action_create_drop(); 385 if (!tmp_action) { 386 err = -ENOMEM; 387 goto free_actions; 388 } 389 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 390 term_actions[num_term_actions++].dest = tmp_action; 391 } 392 393 if (fte->flow_context.flow_tag) { 394 tmp_action = 395 mlx5dr_action_create_tag(fte->flow_context.flow_tag); 396 if (!tmp_action) { 397 err = -ENOMEM; 398 goto free_actions; 399 } 400 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 401 actions[num_actions++] = tmp_action; 402 } 403 404 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 405 list_for_each_entry(dst, &fte->node.children, node.list) { 406 enum mlx5_flow_destination_type type = dst->dest_attr.type; 407 u32 id; 408 409 if (fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || 410 num_term_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { 411 err = -EOPNOTSUPP; 412 goto free_actions; 413 } 414 415 if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) 416 continue; 417 418 switch (type) { 419 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: 420 tmp_action = create_ft_action(domain, dst); 421 if (!tmp_action) { 422 err = -ENOMEM; 423 goto free_actions; 424 } 425 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 426 term_actions[num_term_actions++].dest = tmp_action; 427 break; 428 case MLX5_FLOW_DESTINATION_TYPE_UPLINK: 429 case MLX5_FLOW_DESTINATION_TYPE_VPORT: 430 tmp_action = type == MLX5_FLOW_DESTINATION_TYPE_VPORT ? 431 create_vport_action(domain, dst) : 432 create_uplink_action(domain, dst); 433 if (!tmp_action) { 434 err = -ENOMEM; 435 goto free_actions; 436 } 437 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 438 term_actions[num_term_actions].dest = tmp_action; 439 440 if (dst->dest_attr.vport.flags & 441 MLX5_FLOW_DEST_VPORT_REFORMAT_ID) 442 term_actions[num_term_actions].reformat = 443 dst->dest_attr.vport.pkt_reformat->action.dr_action; 444 445 num_term_actions++; 446 break; 447 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM: 448 id = dst->dest_attr.ft_num; 449 tmp_action = mlx5dr_action_create_dest_table_num(domain, 450 id); 451 if (!tmp_action) { 452 err = -ENOMEM; 453 goto free_actions; 454 } 455 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 456 term_actions[num_term_actions++].dest = tmp_action; 457 break; 458 case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER: 459 id = dst->dest_attr.sampler_id; 460 tmp_action = mlx5dr_action_create_flow_sampler(domain, 461 id); 462 if (!tmp_action) { 463 err = -ENOMEM; 464 goto free_actions; 465 } 466 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 467 term_actions[num_term_actions++].dest = tmp_action; 468 break; 469 default: 470 err = -EOPNOTSUPP; 471 goto free_actions; 472 } 473 } 474 } 475 476 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 477 list_for_each_entry(dst, &fte->node.children, node.list) { 478 u32 id; 479 480 if (dst->dest_attr.type != 481 MLX5_FLOW_DESTINATION_TYPE_COUNTER) 482 continue; 483 484 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || 485 fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { 486 err = -EOPNOTSUPP; 487 goto free_actions; 488 } 489 490 id = dst->dest_attr.counter_id; 491 tmp_action = 492 mlx5dr_action_create_flow_counter(id); 493 if (!tmp_action) { 494 err = -ENOMEM; 495 goto free_actions; 496 } 497 498 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 499 actions[num_actions++] = tmp_action; 500 } 501 } 502 503 params.match_sz = match_sz; 504 params.match_buf = (u64 *)fte->val; 505 if (num_term_actions == 1) { 506 if (term_actions->reformat) { 507 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { 508 err = -EOPNOTSUPP; 509 goto free_actions; 510 } 511 actions[num_actions++] = term_actions->reformat; 512 } 513 514 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { 515 err = -EOPNOTSUPP; 516 goto free_actions; 517 } 518 actions[num_actions++] = term_actions->dest; 519 } else if (num_term_actions > 1) { 520 bool ignore_flow_level = 521 !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL); 522 u32 flow_source = fte->flow_context.flow_source; 523 524 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || 525 fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { 526 err = -EOPNOTSUPP; 527 goto free_actions; 528 } 529 tmp_action = mlx5dr_action_create_mult_dest_tbl(domain, 530 term_actions, 531 num_term_actions, 532 ignore_flow_level, 533 flow_source); 534 if (!tmp_action) { 535 err = -EOPNOTSUPP; 536 goto free_actions; 537 } 538 fs_dr_actions[fs_dr_num_actions++] = tmp_action; 539 actions[num_actions++] = tmp_action; 540 } 541 542 rule = mlx5dr_rule_create(group->fs_dr_matcher.dr_matcher, 543 ¶ms, 544 num_actions, 545 actions, 546 fte->flow_context.flow_source); 547 if (!rule) { 548 err = -EINVAL; 549 goto free_actions; 550 } 551 552 kfree(term_actions); 553 kfree(actions); 554 555 fte->fs_dr_rule.dr_rule = rule; 556 fte->fs_dr_rule.num_actions = fs_dr_num_actions; 557 fte->fs_dr_rule.dr_actions = fs_dr_actions; 558 559 return 0; 560 561 free_actions: 562 /* Free in reverse order to handle action dependencies */ 563 for (i = fs_dr_num_actions - 1; i >= 0; i--) 564 if (!IS_ERR_OR_NULL(fs_dr_actions[i])) 565 mlx5dr_action_destroy(fs_dr_actions[i]); 566 567 kfree(term_actions); 568 free_fs_dr_actions_alloc: 569 kfree(fs_dr_actions); 570 free_actions_alloc: 571 kfree(actions); 572 out_err: 573 mlx5_core_err(dev, "Failed to create dr rule err(%d)\n", err); 574 return err; 575 } 576 577 static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns, 578 struct mlx5_pkt_reformat_params *params, 579 enum mlx5_flow_namespace_type namespace, 580 struct mlx5_pkt_reformat *pkt_reformat) 581 { 582 struct mlx5dr_domain *dr_domain = ns->fs_dr_domain.dr_domain; 583 struct mlx5dr_action *action; 584 int dr_reformat; 585 586 switch (params->type) { 587 case MLX5_REFORMAT_TYPE_L2_TO_VXLAN: 588 case MLX5_REFORMAT_TYPE_L2_TO_NVGRE: 589 case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL: 590 dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2; 591 break; 592 case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2: 593 dr_reformat = DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2; 594 break; 595 case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL: 596 dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3; 597 break; 598 case MLX5_REFORMAT_TYPE_INSERT_HDR: 599 dr_reformat = DR_ACTION_REFORMAT_TYP_INSERT_HDR; 600 break; 601 case MLX5_REFORMAT_TYPE_REMOVE_HDR: 602 dr_reformat = DR_ACTION_REFORMAT_TYP_REMOVE_HDR; 603 break; 604 default: 605 mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n", 606 params->type); 607 return -EOPNOTSUPP; 608 } 609 610 action = mlx5dr_action_create_packet_reformat(dr_domain, 611 dr_reformat, 612 params->param_0, 613 params->param_1, 614 params->size, 615 params->data); 616 if (!action) { 617 mlx5_core_err(ns->dev, "Failed allocating packet-reformat action\n"); 618 return -EINVAL; 619 } 620 621 pkt_reformat->action.dr_action = action; 622 623 return 0; 624 } 625 626 static void mlx5_cmd_dr_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns, 627 struct mlx5_pkt_reformat *pkt_reformat) 628 { 629 mlx5dr_action_destroy(pkt_reformat->action.dr_action); 630 } 631 632 static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns, 633 u8 namespace, u8 num_actions, 634 void *modify_actions, 635 struct mlx5_modify_hdr *modify_hdr) 636 { 637 struct mlx5dr_domain *dr_domain = ns->fs_dr_domain.dr_domain; 638 struct mlx5dr_action *action; 639 size_t actions_sz; 640 641 actions_sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * 642 num_actions; 643 action = mlx5dr_action_create_modify_header(dr_domain, 0, 644 actions_sz, 645 modify_actions); 646 if (!action) { 647 mlx5_core_err(ns->dev, "Failed allocating modify-header action\n"); 648 return -EINVAL; 649 } 650 651 modify_hdr->action.dr_action = action; 652 653 return 0; 654 } 655 656 static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *ns, 657 struct mlx5_modify_hdr *modify_hdr) 658 { 659 mlx5dr_action_destroy(modify_hdr->action.dr_action); 660 } 661 662 static int 663 mlx5_cmd_dr_destroy_match_definer(struct mlx5_flow_root_namespace *ns, 664 int definer_id) 665 { 666 return -EOPNOTSUPP; 667 } 668 669 static int mlx5_cmd_dr_create_match_definer(struct mlx5_flow_root_namespace *ns, 670 u16 format_id, u32 *match_mask) 671 { 672 return -EOPNOTSUPP; 673 } 674 675 static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns, 676 struct mlx5_flow_table *ft, 677 struct fs_fte *fte) 678 { 679 struct mlx5_fs_dr_rule *rule = &fte->fs_dr_rule; 680 int err; 681 int i; 682 683 if (mlx5_dr_is_fw_table(ft->flags)) 684 return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte); 685 686 err = mlx5dr_rule_destroy(rule->dr_rule); 687 if (err) 688 return err; 689 690 /* Free in reverse order to handle action dependencies */ 691 for (i = rule->num_actions - 1; i >= 0; i--) 692 if (!IS_ERR_OR_NULL(rule->dr_actions[i])) 693 mlx5dr_action_destroy(rule->dr_actions[i]); 694 695 kfree(rule->dr_actions); 696 return 0; 697 } 698 699 static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns, 700 struct mlx5_flow_table *ft, 701 struct mlx5_flow_group *group, 702 int modify_mask, 703 struct fs_fte *fte) 704 { 705 struct fs_fte fte_tmp = {}; 706 int ret; 707 708 if (mlx5_dr_is_fw_table(ft->flags)) 709 return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group, modify_mask, fte); 710 711 /* Backup current dr rule details */ 712 fte_tmp.fs_dr_rule = fte->fs_dr_rule; 713 memset(&fte->fs_dr_rule, 0, sizeof(struct mlx5_fs_dr_rule)); 714 715 /* First add the new updated rule, then delete the old rule */ 716 ret = mlx5_cmd_dr_create_fte(ns, ft, group, fte); 717 if (ret) 718 goto restore_fte; 719 720 ret = mlx5_cmd_dr_delete_fte(ns, ft, &fte_tmp); 721 WARN_ONCE(ret, "dr update fte duplicate rule deletion failed\n"); 722 return ret; 723 724 restore_fte: 725 fte->fs_dr_rule = fte_tmp.fs_dr_rule; 726 return ret; 727 } 728 729 static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns, 730 struct mlx5_flow_root_namespace *peer_ns) 731 { 732 struct mlx5dr_domain *peer_domain = NULL; 733 734 if (peer_ns) 735 peer_domain = peer_ns->fs_dr_domain.dr_domain; 736 mlx5dr_domain_set_peer(ns->fs_dr_domain.dr_domain, 737 peer_domain); 738 return 0; 739 } 740 741 static int mlx5_cmd_dr_create_ns(struct mlx5_flow_root_namespace *ns) 742 { 743 ns->fs_dr_domain.dr_domain = 744 mlx5dr_domain_create(ns->dev, 745 MLX5DR_DOMAIN_TYPE_FDB); 746 if (!ns->fs_dr_domain.dr_domain) { 747 mlx5_core_err(ns->dev, "Failed to create dr flow namespace\n"); 748 return -EOPNOTSUPP; 749 } 750 return 0; 751 } 752 753 static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns) 754 { 755 return mlx5dr_domain_destroy(ns->fs_dr_domain.dr_domain); 756 } 757 758 static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns, 759 enum fs_flow_table_type ft_type) 760 { 761 if (ft_type != FS_FT_FDB || 762 MLX5_CAP_GEN(ns->dev, steering_format_version) == MLX5_STEERING_FORMAT_CONNECTX_5) 763 return 0; 764 765 return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX | MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX; 766 } 767 768 bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev) 769 { 770 return mlx5dr_is_supported(dev); 771 } 772 773 static const struct mlx5_flow_cmds mlx5_flow_cmds_dr = { 774 .create_flow_table = mlx5_cmd_dr_create_flow_table, 775 .destroy_flow_table = mlx5_cmd_dr_destroy_flow_table, 776 .modify_flow_table = mlx5_cmd_dr_modify_flow_table, 777 .create_flow_group = mlx5_cmd_dr_create_flow_group, 778 .destroy_flow_group = mlx5_cmd_dr_destroy_flow_group, 779 .create_fte = mlx5_cmd_dr_create_fte, 780 .update_fte = mlx5_cmd_dr_update_fte, 781 .delete_fte = mlx5_cmd_dr_delete_fte, 782 .update_root_ft = mlx5_cmd_dr_update_root_ft, 783 .packet_reformat_alloc = mlx5_cmd_dr_packet_reformat_alloc, 784 .packet_reformat_dealloc = mlx5_cmd_dr_packet_reformat_dealloc, 785 .modify_header_alloc = mlx5_cmd_dr_modify_header_alloc, 786 .modify_header_dealloc = mlx5_cmd_dr_modify_header_dealloc, 787 .create_match_definer = mlx5_cmd_dr_create_match_definer, 788 .destroy_match_definer = mlx5_cmd_dr_destroy_match_definer, 789 .set_peer = mlx5_cmd_dr_set_peer, 790 .create_ns = mlx5_cmd_dr_create_ns, 791 .destroy_ns = mlx5_cmd_dr_destroy_ns, 792 .get_capabilities = mlx5_cmd_dr_get_capabilities, 793 }; 794 795 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void) 796 { 797 return &mlx5_flow_cmds_dr; 798 } 799