1 /* 2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/etherdevice.h> 34 #include <linux/idr.h> 35 #include <linux/mlx5/driver.h> 36 #include <linux/mlx5/mlx5_ifc.h> 37 #include <linux/mlx5/vport.h> 38 #include <linux/mlx5/fs.h> 39 #include "mlx5_core.h" 40 #include "eswitch.h" 41 #include "esw/indir_table.h" 42 #include "esw/acl/ofld.h" 43 #include "rdma.h" 44 #include "en.h" 45 #include "fs_core.h" 46 #include "lib/devcom.h" 47 #include "lib/eq.h" 48 #include "lib/fs_chains.h" 49 #include "en_tc.h" 50 #include "en/mapping.h" 51 #include "devlink.h" 52 #include "lag/lag.h" 53 54 #define mlx5_esw_for_each_rep(esw, i, rep) \ 55 xa_for_each(&((esw)->offloads.vport_reps), i, rep) 56 57 #define mlx5_esw_for_each_sf_rep(esw, i, rep) \ 58 xa_for_each_marked(&((esw)->offloads.vport_reps), i, rep, MLX5_ESW_VPT_SF) 59 60 #define mlx5_esw_for_each_vf_rep(esw, index, rep) \ 61 mlx5_esw_for_each_entry_marked(&((esw)->offloads.vport_reps), index, \ 62 rep, (esw)->esw_funcs.num_vfs, MLX5_ESW_VPT_VF) 63 64 /* There are two match-all miss flows, one for unicast dst mac and 65 * one for multicast. 66 */ 67 #define MLX5_ESW_MISS_FLOWS (2) 68 #define UPLINK_REP_INDEX 0 69 70 #define MLX5_ESW_VPORT_TBL_SIZE 128 71 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4 72 73 #define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1) 74 75 static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = { 76 .max_fte = MLX5_ESW_VPORT_TBL_SIZE, 77 .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS, 78 .flags = 0, 79 }; 80 81 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, 82 u16 vport_num) 83 { 84 return xa_load(&esw->offloads.vport_reps, vport_num); 85 } 86 87 static void 88 mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw, 89 struct mlx5_flow_spec *spec, 90 struct mlx5_esw_flow_attr *attr) 91 { 92 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) || !attr || !attr->in_rep) 93 return; 94 95 if (attr->int_port) { 96 spec->flow_context.flow_source = mlx5e_tc_int_port_get_flow_source(attr->int_port); 97 98 return; 99 } 100 101 spec->flow_context.flow_source = (attr->in_rep->vport == MLX5_VPORT_UPLINK) ? 102 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK : 103 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; 104 } 105 106 /* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits 107 * are not needed as well in the following process. So clear them all for simplicity. 108 */ 109 void 110 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec) 111 { 112 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 113 void *misc2; 114 115 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); 116 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0); 117 118 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 119 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0); 120 121 if (!memchr_inv(misc2, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc2))) 122 spec->match_criteria_enable &= ~MLX5_MATCH_MISC_PARAMETERS_2; 123 } 124 } 125 126 static void 127 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, 128 struct mlx5_flow_spec *spec, 129 struct mlx5_flow_attr *attr, 130 struct mlx5_eswitch *src_esw, 131 u16 vport) 132 { 133 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 134 u32 metadata; 135 void *misc2; 136 void *misc; 137 138 /* Use metadata matching because vport is not represented by single 139 * VHCA in dual-port RoCE mode, and matching on source vport may fail. 140 */ 141 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 142 if (mlx5_esw_indir_table_decap_vport(attr)) 143 vport = mlx5_esw_indir_table_decap_vport(attr); 144 145 if (attr && !attr->chain && esw_attr->int_port) 146 metadata = 147 mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port); 148 else 149 metadata = 150 mlx5_eswitch_get_vport_metadata_for_match(src_esw, vport); 151 152 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); 153 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, metadata); 154 155 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 156 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 157 mlx5_eswitch_get_vport_metadata_mask()); 158 159 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 160 } else { 161 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 162 MLX5_SET(fte_match_set_misc, misc, source_port, vport); 163 164 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 165 MLX5_SET(fte_match_set_misc, misc, 166 source_eswitch_owner_vhca_id, 167 MLX5_CAP_GEN(src_esw->dev, vhca_id)); 168 169 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 170 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 171 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 172 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 173 source_eswitch_owner_vhca_id); 174 175 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; 176 } 177 } 178 179 static int 180 esw_setup_decap_indir(struct mlx5_eswitch *esw, 181 struct mlx5_flow_attr *attr, 182 struct mlx5_flow_spec *spec) 183 { 184 struct mlx5_flow_table *ft; 185 186 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE)) 187 return -EOPNOTSUPP; 188 189 ft = mlx5_esw_indir_table_get(esw, attr, spec, 190 mlx5_esw_indir_table_decap_vport(attr), true); 191 return PTR_ERR_OR_ZERO(ft); 192 } 193 194 static void 195 esw_cleanup_decap_indir(struct mlx5_eswitch *esw, 196 struct mlx5_flow_attr *attr) 197 { 198 if (mlx5_esw_indir_table_decap_vport(attr)) 199 mlx5_esw_indir_table_put(esw, attr, 200 mlx5_esw_indir_table_decap_vport(attr), 201 true); 202 } 203 204 static int 205 esw_setup_sampler_dest(struct mlx5_flow_destination *dest, 206 struct mlx5_flow_act *flow_act, 207 u32 sampler_id, 208 int i) 209 { 210 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 211 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER; 212 dest[i].sampler_id = sampler_id; 213 214 return 0; 215 } 216 217 static int 218 esw_setup_ft_dest(struct mlx5_flow_destination *dest, 219 struct mlx5_flow_act *flow_act, 220 struct mlx5_eswitch *esw, 221 struct mlx5_flow_attr *attr, 222 struct mlx5_flow_spec *spec, 223 int i) 224 { 225 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 226 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 227 dest[i].ft = attr->dest_ft; 228 229 if (mlx5_esw_indir_table_decap_vport(attr)) 230 return esw_setup_decap_indir(esw, attr, spec); 231 return 0; 232 } 233 234 static void 235 esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, 236 struct mlx5_fs_chains *chains, int i) 237 { 238 if (mlx5_chains_ignore_flow_level_supported(chains)) 239 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 240 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 241 dest[i].ft = mlx5_chains_get_tc_end_ft(chains); 242 } 243 244 static void 245 esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, 246 struct mlx5_eswitch *esw, int i) 247 { 248 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level)) 249 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 250 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 251 dest[i].ft = esw->fdb_table.offloads.slow_fdb; 252 } 253 254 static int 255 esw_setup_chain_dest(struct mlx5_flow_destination *dest, 256 struct mlx5_flow_act *flow_act, 257 struct mlx5_fs_chains *chains, 258 u32 chain, u32 prio, u32 level, 259 int i) 260 { 261 struct mlx5_flow_table *ft; 262 263 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 264 ft = mlx5_chains_get_table(chains, chain, prio, level); 265 if (IS_ERR(ft)) 266 return PTR_ERR(ft); 267 268 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 269 dest[i].ft = ft; 270 return 0; 271 } 272 273 static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, 274 int from, int to) 275 { 276 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 277 struct mlx5_fs_chains *chains = esw_chains(esw); 278 int i; 279 280 for (i = from; i < to; i++) 281 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) 282 mlx5_chains_put_table(chains, 0, 1, 0); 283 else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, 284 esw_attr->dests[i].mdev)) 285 mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport, 286 false); 287 } 288 289 static bool 290 esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr) 291 { 292 int i; 293 294 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) 295 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) 296 return true; 297 return false; 298 } 299 300 static int 301 esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest, 302 struct mlx5_flow_act *flow_act, 303 struct mlx5_eswitch *esw, 304 struct mlx5_fs_chains *chains, 305 struct mlx5_flow_attr *attr, 306 int *i) 307 { 308 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 309 int err; 310 311 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE)) 312 return -EOPNOTSUPP; 313 314 /* flow steering cannot handle more than one dest with the same ft 315 * in a single flow 316 */ 317 if (esw_attr->out_count - esw_attr->split_count > 1) 318 return -EOPNOTSUPP; 319 320 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i); 321 if (err) 322 return err; 323 324 if (esw_attr->dests[esw_attr->split_count].pkt_reformat) { 325 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 326 flow_act->pkt_reformat = esw_attr->dests[esw_attr->split_count].pkt_reformat; 327 } 328 (*i)++; 329 330 return 0; 331 } 332 333 static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw, 334 struct mlx5_flow_attr *attr) 335 { 336 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 337 338 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count); 339 } 340 341 static bool 342 esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) 343 { 344 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 345 bool result = false; 346 int i; 347 348 /* Indirect table is supported only for flows with in_port uplink 349 * and the destination is vport on the same eswitch as the uplink, 350 * return false in case at least one of destinations doesn't meet 351 * this criteria. 352 */ 353 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) { 354 if (esw_attr->dests[i].rep && 355 mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, 356 esw_attr->dests[i].mdev)) { 357 result = true; 358 } else { 359 result = false; 360 break; 361 } 362 } 363 return result; 364 } 365 366 static int 367 esw_setup_indir_table(struct mlx5_flow_destination *dest, 368 struct mlx5_flow_act *flow_act, 369 struct mlx5_eswitch *esw, 370 struct mlx5_flow_attr *attr, 371 struct mlx5_flow_spec *spec, 372 bool ignore_flow_lvl, 373 int *i) 374 { 375 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 376 int j, err; 377 378 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE)) 379 return -EOPNOTSUPP; 380 381 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) { 382 if (ignore_flow_lvl) 383 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 384 dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 385 386 dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec, 387 esw_attr->dests[j].rep->vport, false); 388 if (IS_ERR(dest[*i].ft)) { 389 err = PTR_ERR(dest[*i].ft); 390 goto err_indir_tbl_get; 391 } 392 } 393 394 if (mlx5_esw_indir_table_decap_vport(attr)) { 395 err = esw_setup_decap_indir(esw, attr, spec); 396 if (err) 397 goto err_indir_tbl_get; 398 } 399 400 return 0; 401 402 err_indir_tbl_get: 403 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j); 404 return err; 405 } 406 407 static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) 408 { 409 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 410 411 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count); 412 esw_cleanup_decap_indir(esw, attr); 413 } 414 415 static void 416 esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level) 417 { 418 mlx5_chains_put_table(chains, chain, prio, level); 419 } 420 421 static void 422 esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, 423 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, 424 int attr_idx, int dest_idx, bool pkt_reformat) 425 { 426 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 427 dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport; 428 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { 429 dest[dest_idx].vport.vhca_id = 430 MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id); 431 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 432 if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK && 433 mlx5_lag_mpesw_is_activated(esw->dev)) 434 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK; 435 } 436 if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) { 437 if (pkt_reformat) { 438 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 439 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat; 440 } 441 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID; 442 dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat; 443 } 444 } 445 446 static int 447 esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, 448 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, 449 int i) 450 { 451 int j; 452 453 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++) 454 esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true); 455 return i; 456 } 457 458 static bool 459 esw_src_port_rewrite_supported(struct mlx5_eswitch *esw) 460 { 461 return MLX5_CAP_GEN(esw->dev, reg_c_preserve) && 462 mlx5_eswitch_vport_match_metadata_enabled(esw) && 463 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level); 464 } 465 466 static int 467 esw_setup_dests(struct mlx5_flow_destination *dest, 468 struct mlx5_flow_act *flow_act, 469 struct mlx5_eswitch *esw, 470 struct mlx5_flow_attr *attr, 471 struct mlx5_flow_spec *spec, 472 int *i) 473 { 474 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 475 struct mlx5_fs_chains *chains = esw_chains(esw); 476 int err = 0; 477 478 if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) && 479 esw_src_port_rewrite_supported(esw)) 480 attr->flags |= MLX5_ATTR_FLAG_SRC_REWRITE; 481 482 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE && 483 !(attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)) { 484 esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i); 485 (*i)++; 486 } else if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) { 487 esw_setup_slow_path_dest(dest, flow_act, esw, *i); 488 (*i)++; 489 } else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) { 490 esw_setup_accept_dest(dest, flow_act, chains, *i); 491 (*i)++; 492 } else if (esw_is_indir_table(esw, attr)) { 493 err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i); 494 } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) { 495 err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i); 496 } else { 497 *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i); 498 499 if (attr->dest_ft) { 500 err = esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i); 501 (*i)++; 502 } else if (attr->dest_chain) { 503 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 504 1, 0, *i); 505 (*i)++; 506 } 507 } 508 509 return err; 510 } 511 512 static void 513 esw_cleanup_dests(struct mlx5_eswitch *esw, 514 struct mlx5_flow_attr *attr) 515 { 516 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 517 struct mlx5_fs_chains *chains = esw_chains(esw); 518 519 if (attr->dest_ft) { 520 esw_cleanup_decap_indir(esw, attr); 521 } else if (!mlx5e_tc_attr_flags_skip(attr->flags)) { 522 if (attr->dest_chain) 523 esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0); 524 else if (esw_is_indir_table(esw, attr)) 525 esw_cleanup_indir_table(esw, attr); 526 else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) 527 esw_cleanup_chain_src_port_rewrite(esw, attr); 528 } 529 } 530 531 static void 532 esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act) 533 { 534 struct mlx5e_flow_meter_handle *meter; 535 536 meter = attr->meter_attr.meter; 537 flow_act->exe_aso.type = attr->exe_aso_type; 538 flow_act->exe_aso.object_id = meter->obj_id; 539 flow_act->exe_aso.flow_meter.meter_idx = meter->idx; 540 flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN; 541 /* use metadata reg 5 for packet color */ 542 flow_act->exe_aso.return_reg_id = 5; 543 } 544 545 struct mlx5_flow_handle * 546 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, 547 struct mlx5_flow_spec *spec, 548 struct mlx5_flow_attr *attr) 549 { 550 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; 551 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 552 struct mlx5_fs_chains *chains = esw_chains(esw); 553 bool split = !!(esw_attr->split_count); 554 struct mlx5_vport_tbl_attr fwd_attr; 555 struct mlx5_flow_destination *dest; 556 struct mlx5_flow_handle *rule; 557 struct mlx5_flow_table *fdb; 558 int i = 0; 559 560 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 561 return ERR_PTR(-EOPNOTSUPP); 562 563 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL); 564 if (!dest) 565 return ERR_PTR(-ENOMEM); 566 567 flow_act.action = attr->action; 568 /* if per flow vlan pop/push is emulated, don't set that into the firmware */ 569 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) 570 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | 571 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); 572 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { 573 flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]); 574 flow_act.vlan[0].vid = esw_attr->vlan_vid[0]; 575 flow_act.vlan[0].prio = esw_attr->vlan_prio[0]; 576 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { 577 flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]); 578 flow_act.vlan[1].vid = esw_attr->vlan_vid[1]; 579 flow_act.vlan[1].prio = esw_attr->vlan_prio[1]; 580 } 581 } 582 583 mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr); 584 585 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 586 int err; 587 588 err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i); 589 if (err) { 590 rule = ERR_PTR(err); 591 goto err_create_goto_table; 592 } 593 } 594 595 if (esw_attr->decap_pkt_reformat) 596 flow_act.pkt_reformat = esw_attr->decap_pkt_reformat; 597 598 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 599 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 600 dest[i].counter_id = mlx5_fc_id(attr->counter); 601 i++; 602 } 603 604 if (attr->outer_match_level != MLX5_MATCH_NONE) 605 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 606 if (attr->inner_match_level != MLX5_MATCH_NONE) 607 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; 608 609 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 610 flow_act.modify_hdr = attr->modify_hdr; 611 612 if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) && 613 attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER) 614 esw_setup_meter(attr, &flow_act); 615 616 if (split) { 617 fwd_attr.chain = attr->chain; 618 fwd_attr.prio = attr->prio; 619 fwd_attr.vport = esw_attr->in_rep->vport; 620 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 621 622 fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr); 623 } else { 624 if (attr->chain || attr->prio) 625 fdb = mlx5_chains_get_table(chains, attr->chain, 626 attr->prio, 0); 627 else 628 fdb = attr->ft; 629 630 if (!(attr->flags & MLX5_ATTR_FLAG_NO_IN_PORT)) 631 mlx5_eswitch_set_rule_source_port(esw, spec, attr, 632 esw_attr->in_mdev->priv.eswitch, 633 esw_attr->in_rep->vport); 634 } 635 if (IS_ERR(fdb)) { 636 rule = ERR_CAST(fdb); 637 goto err_esw_get; 638 } 639 640 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec)) 641 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr, 642 &flow_act, dest, i); 643 else 644 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i); 645 if (IS_ERR(rule)) 646 goto err_add_rule; 647 else 648 atomic64_inc(&esw->offloads.num_flows); 649 650 kfree(dest); 651 return rule; 652 653 err_add_rule: 654 if (split) 655 mlx5_esw_vporttbl_put(esw, &fwd_attr); 656 else if (attr->chain || attr->prio) 657 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 658 err_esw_get: 659 esw_cleanup_dests(esw, attr); 660 err_create_goto_table: 661 kfree(dest); 662 return rule; 663 } 664 665 struct mlx5_flow_handle * 666 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, 667 struct mlx5_flow_spec *spec, 668 struct mlx5_flow_attr *attr) 669 { 670 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; 671 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 672 struct mlx5_fs_chains *chains = esw_chains(esw); 673 struct mlx5_vport_tbl_attr fwd_attr; 674 struct mlx5_flow_destination *dest; 675 struct mlx5_flow_table *fast_fdb; 676 struct mlx5_flow_table *fwd_fdb; 677 struct mlx5_flow_handle *rule; 678 int i, err = 0; 679 680 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL); 681 if (!dest) 682 return ERR_PTR(-ENOMEM); 683 684 fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0); 685 if (IS_ERR(fast_fdb)) { 686 rule = ERR_CAST(fast_fdb); 687 goto err_get_fast; 688 } 689 690 fwd_attr.chain = attr->chain; 691 fwd_attr.prio = attr->prio; 692 fwd_attr.vport = esw_attr->in_rep->vport; 693 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 694 fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr); 695 if (IS_ERR(fwd_fdb)) { 696 rule = ERR_CAST(fwd_fdb); 697 goto err_get_fwd; 698 } 699 700 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 701 for (i = 0; i < esw_attr->split_count; i++) { 702 if (esw_is_indir_table(esw, attr)) 703 err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i); 704 else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) 705 err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr, 706 &i); 707 else 708 esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false); 709 710 if (err) { 711 rule = ERR_PTR(err); 712 goto err_chain_src_rewrite; 713 } 714 } 715 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 716 dest[i].ft = fwd_fdb; 717 i++; 718 719 mlx5_eswitch_set_rule_source_port(esw, spec, attr, 720 esw_attr->in_mdev->priv.eswitch, 721 esw_attr->in_rep->vport); 722 723 if (attr->outer_match_level != MLX5_MATCH_NONE) 724 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 725 726 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 727 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i); 728 729 if (IS_ERR(rule)) { 730 i = esw_attr->split_count; 731 goto err_chain_src_rewrite; 732 } 733 734 atomic64_inc(&esw->offloads.num_flows); 735 736 kfree(dest); 737 return rule; 738 err_chain_src_rewrite: 739 esw_put_dest_tables_loop(esw, attr, 0, i); 740 mlx5_esw_vporttbl_put(esw, &fwd_attr); 741 err_get_fwd: 742 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 743 err_get_fast: 744 kfree(dest); 745 return rule; 746 } 747 748 static void 749 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, 750 struct mlx5_flow_handle *rule, 751 struct mlx5_flow_attr *attr, 752 bool fwd_rule) 753 { 754 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 755 struct mlx5_fs_chains *chains = esw_chains(esw); 756 bool split = (esw_attr->split_count > 0); 757 struct mlx5_vport_tbl_attr fwd_attr; 758 int i; 759 760 mlx5_del_flow_rules(rule); 761 762 if (!mlx5e_tc_attr_flags_skip(attr->flags)) { 763 /* unref the term table */ 764 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) { 765 if (esw_attr->dests[i].termtbl) 766 mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl); 767 } 768 } 769 770 atomic64_dec(&esw->offloads.num_flows); 771 772 if (fwd_rule || split) { 773 fwd_attr.chain = attr->chain; 774 fwd_attr.prio = attr->prio; 775 fwd_attr.vport = esw_attr->in_rep->vport; 776 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 777 } 778 779 if (fwd_rule) { 780 mlx5_esw_vporttbl_put(esw, &fwd_attr); 781 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 782 esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count); 783 } else { 784 if (split) 785 mlx5_esw_vporttbl_put(esw, &fwd_attr); 786 else if (attr->chain || attr->prio) 787 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 788 esw_cleanup_dests(esw, attr); 789 } 790 } 791 792 void 793 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, 794 struct mlx5_flow_handle *rule, 795 struct mlx5_flow_attr *attr) 796 { 797 __mlx5_eswitch_del_rule(esw, rule, attr, false); 798 } 799 800 void 801 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, 802 struct mlx5_flow_handle *rule, 803 struct mlx5_flow_attr *attr) 804 { 805 __mlx5_eswitch_del_rule(esw, rule, attr, true); 806 } 807 808 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) 809 { 810 struct mlx5_eswitch_rep *rep; 811 unsigned long i; 812 int err = 0; 813 814 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none"); 815 mlx5_esw_for_each_host_func_vport(esw, i, rep, esw->esw_funcs.num_vfs) { 816 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED) 817 continue; 818 819 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val); 820 if (err) 821 goto out; 822 } 823 824 out: 825 return err; 826 } 827 828 static struct mlx5_eswitch_rep * 829 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop) 830 { 831 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL; 832 833 in_rep = attr->in_rep; 834 out_rep = attr->dests[0].rep; 835 836 if (push) 837 vport = in_rep; 838 else if (pop) 839 vport = out_rep; 840 else 841 vport = in_rep; 842 843 return vport; 844 } 845 846 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr, 847 bool push, bool pop, bool fwd) 848 { 849 struct mlx5_eswitch_rep *in_rep, *out_rep; 850 851 if ((push || pop) && !fwd) 852 goto out_notsupp; 853 854 in_rep = attr->in_rep; 855 out_rep = attr->dests[0].rep; 856 857 if (push && in_rep->vport == MLX5_VPORT_UPLINK) 858 goto out_notsupp; 859 860 if (pop && out_rep->vport == MLX5_VPORT_UPLINK) 861 goto out_notsupp; 862 863 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */ 864 if (!push && !pop && fwd) 865 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK) 866 goto out_notsupp; 867 868 /* protects against (1) setting rules with different vlans to push and 869 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0) 870 */ 871 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0])) 872 goto out_notsupp; 873 874 return 0; 875 876 out_notsupp: 877 return -EOPNOTSUPP; 878 } 879 880 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, 881 struct mlx5_flow_attr *attr) 882 { 883 struct offloads_fdb *offloads = &esw->fdb_table.offloads; 884 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 885 struct mlx5_eswitch_rep *vport = NULL; 886 bool push, pop, fwd; 887 int err = 0; 888 889 /* nop if we're on the vlan push/pop non emulation mode */ 890 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) 891 return 0; 892 893 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); 894 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); 895 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && 896 !attr->dest_chain); 897 898 mutex_lock(&esw->state_lock); 899 900 err = esw_add_vlan_action_check(esw_attr, push, pop, fwd); 901 if (err) 902 goto unlock; 903 904 attr->flags &= ~MLX5_ATTR_FLAG_VLAN_HANDLED; 905 906 vport = esw_vlan_action_get_vport(esw_attr, push, pop); 907 908 if (!push && !pop && fwd) { 909 /* tracks VF --> wire rules without vlan push action */ 910 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) { 911 vport->vlan_refcount++; 912 attr->flags |= MLX5_ATTR_FLAG_VLAN_HANDLED; 913 } 914 915 goto unlock; 916 } 917 918 if (!push && !pop) 919 goto unlock; 920 921 if (!(offloads->vlan_push_pop_refcount)) { 922 /* it's the 1st vlan rule, apply global vlan pop policy */ 923 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP); 924 if (err) 925 goto out; 926 } 927 offloads->vlan_push_pop_refcount++; 928 929 if (push) { 930 if (vport->vlan_refcount) 931 goto skip_set_push; 932 933 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, esw_attr->vlan_vid[0], 934 0, SET_VLAN_INSERT | SET_VLAN_STRIP); 935 if (err) 936 goto out; 937 vport->vlan = esw_attr->vlan_vid[0]; 938 skip_set_push: 939 vport->vlan_refcount++; 940 } 941 out: 942 if (!err) 943 attr->flags |= MLX5_ATTR_FLAG_VLAN_HANDLED; 944 unlock: 945 mutex_unlock(&esw->state_lock); 946 return err; 947 } 948 949 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, 950 struct mlx5_flow_attr *attr) 951 { 952 struct offloads_fdb *offloads = &esw->fdb_table.offloads; 953 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 954 struct mlx5_eswitch_rep *vport = NULL; 955 bool push, pop, fwd; 956 int err = 0; 957 958 /* nop if we're on the vlan push/pop non emulation mode */ 959 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) 960 return 0; 961 962 if (!(attr->flags & MLX5_ATTR_FLAG_VLAN_HANDLED)) 963 return 0; 964 965 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); 966 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); 967 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); 968 969 mutex_lock(&esw->state_lock); 970 971 vport = esw_vlan_action_get_vport(esw_attr, push, pop); 972 973 if (!push && !pop && fwd) { 974 /* tracks VF --> wire rules without vlan push action */ 975 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) 976 vport->vlan_refcount--; 977 978 goto out; 979 } 980 981 if (push) { 982 vport->vlan_refcount--; 983 if (vport->vlan_refcount) 984 goto skip_unset_push; 985 986 vport->vlan = 0; 987 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, 988 0, 0, SET_VLAN_STRIP); 989 if (err) 990 goto out; 991 } 992 993 skip_unset_push: 994 offloads->vlan_push_pop_refcount--; 995 if (offloads->vlan_push_pop_refcount) 996 goto out; 997 998 /* no more vlan rules, stop global vlan pop policy */ 999 err = esw_set_global_vlan_pop(esw, 0); 1000 1001 out: 1002 mutex_unlock(&esw->state_lock); 1003 return err; 1004 } 1005 1006 struct mlx5_flow_handle * 1007 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, 1008 struct mlx5_eswitch *from_esw, 1009 struct mlx5_eswitch_rep *rep, 1010 u32 sqn) 1011 { 1012 struct mlx5_flow_act flow_act = {0}; 1013 struct mlx5_flow_destination dest = {}; 1014 struct mlx5_flow_handle *flow_rule; 1015 struct mlx5_flow_spec *spec; 1016 void *misc; 1017 1018 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1019 if (!spec) { 1020 flow_rule = ERR_PTR(-ENOMEM); 1021 goto out; 1022 } 1023 1024 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 1025 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn); 1026 /* source vport is the esw manager */ 1027 MLX5_SET(fte_match_set_misc, misc, source_port, from_esw->manager_vport); 1028 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch)) 1029 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, 1030 MLX5_CAP_GEN(from_esw->dev, vhca_id)); 1031 1032 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 1033 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn); 1034 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 1035 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch)) 1036 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 1037 source_eswitch_owner_vhca_id); 1038 1039 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 1040 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 1041 dest.vport.num = rep->vport; 1042 dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id); 1043 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 1044 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1045 1046 if (rep->vport == MLX5_VPORT_UPLINK) 1047 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; 1048 1049 flow_rule = mlx5_add_flow_rules(on_esw->fdb_table.offloads.slow_fdb, 1050 spec, &flow_act, &dest, 1); 1051 if (IS_ERR(flow_rule)) 1052 esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n", 1053 PTR_ERR(flow_rule)); 1054 out: 1055 kvfree(spec); 1056 return flow_rule; 1057 } 1058 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule); 1059 1060 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) 1061 { 1062 mlx5_del_flow_rules(rule); 1063 } 1064 1065 void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule) 1066 { 1067 if (rule) 1068 mlx5_del_flow_rules(rule); 1069 } 1070 1071 struct mlx5_flow_handle * 1072 mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num) 1073 { 1074 struct mlx5_flow_destination dest = {}; 1075 struct mlx5_flow_act flow_act = {0}; 1076 struct mlx5_flow_handle *flow_rule; 1077 struct mlx5_flow_spec *spec; 1078 1079 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1080 if (!spec) 1081 return ERR_PTR(-ENOMEM); 1082 1083 MLX5_SET(fte_match_param, spec->match_criteria, 1084 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask()); 1085 MLX5_SET(fte_match_param, spec->match_criteria, 1086 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK); 1087 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1, 1088 ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK); 1089 1090 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 1091 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 1092 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1093 1094 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0, 1095 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num)); 1096 dest.vport.num = vport_num; 1097 1098 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1099 spec, &flow_act, &dest, 1); 1100 if (IS_ERR(flow_rule)) 1101 esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n", 1102 vport_num, PTR_ERR(flow_rule)); 1103 1104 kvfree(spec); 1105 return flow_rule; 1106 } 1107 1108 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw) 1109 { 1110 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) & 1111 MLX5_FDB_TO_VPORT_REG_C_1; 1112 } 1113 1114 static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable) 1115 { 1116 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {}; 1117 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; 1118 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {}; 1119 u8 curr, wanted; 1120 int err; 1121 1122 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) && 1123 !mlx5_eswitch_vport_match_metadata_enabled(esw)) 1124 return 0; 1125 1126 MLX5_SET(query_esw_vport_context_in, in, opcode, 1127 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT); 1128 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out); 1129 if (err) 1130 return err; 1131 1132 curr = MLX5_GET(query_esw_vport_context_out, out, 1133 esw_vport_context.fdb_to_vport_reg_c_id); 1134 wanted = MLX5_FDB_TO_VPORT_REG_C_0; 1135 if (mlx5_eswitch_reg_c1_loopback_supported(esw)) 1136 wanted |= MLX5_FDB_TO_VPORT_REG_C_1; 1137 1138 if (enable) 1139 curr |= wanted; 1140 else 1141 curr &= ~wanted; 1142 1143 MLX5_SET(modify_esw_vport_context_in, min, 1144 esw_vport_context.fdb_to_vport_reg_c_id, curr); 1145 MLX5_SET(modify_esw_vport_context_in, min, 1146 field_select.fdb_to_vport_reg_c_id, 1); 1147 1148 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min); 1149 if (!err) { 1150 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1)) 1151 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED; 1152 else 1153 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED; 1154 } 1155 1156 return err; 1157 } 1158 1159 static void peer_miss_rules_setup(struct mlx5_eswitch *esw, 1160 struct mlx5_core_dev *peer_dev, 1161 struct mlx5_flow_spec *spec, 1162 struct mlx5_flow_destination *dest) 1163 { 1164 void *misc; 1165 1166 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1167 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1168 misc_parameters_2); 1169 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1170 mlx5_eswitch_get_vport_metadata_mask()); 1171 1172 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 1173 } else { 1174 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1175 misc_parameters); 1176 1177 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, 1178 MLX5_CAP_GEN(peer_dev, vhca_id)); 1179 1180 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 1181 1182 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1183 misc_parameters); 1184 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 1185 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 1186 source_eswitch_owner_vhca_id); 1187 } 1188 1189 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 1190 dest->vport.num = peer_dev->priv.eswitch->manager_vport; 1191 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id); 1192 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 1193 } 1194 1195 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw, 1196 struct mlx5_eswitch *peer_esw, 1197 struct mlx5_flow_spec *spec, 1198 u16 vport) 1199 { 1200 void *misc; 1201 1202 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1203 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1204 misc_parameters_2); 1205 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1206 mlx5_eswitch_get_vport_metadata_for_match(peer_esw, 1207 vport)); 1208 } else { 1209 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1210 misc_parameters); 1211 MLX5_SET(fte_match_set_misc, misc, source_port, vport); 1212 } 1213 } 1214 1215 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, 1216 struct mlx5_core_dev *peer_dev) 1217 { 1218 struct mlx5_flow_destination dest = {}; 1219 struct mlx5_flow_act flow_act = {0}; 1220 struct mlx5_flow_handle **flows; 1221 /* total vports is the same for both e-switches */ 1222 int nvports = esw->total_vports; 1223 struct mlx5_flow_handle *flow; 1224 struct mlx5_flow_spec *spec; 1225 struct mlx5_vport *vport; 1226 unsigned long i; 1227 void *misc; 1228 int err; 1229 1230 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1231 if (!spec) 1232 return -ENOMEM; 1233 1234 peer_miss_rules_setup(esw, peer_dev, spec, &dest); 1235 1236 flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL); 1237 if (!flows) { 1238 err = -ENOMEM; 1239 goto alloc_flows_err; 1240 } 1241 1242 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1243 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1244 misc_parameters); 1245 1246 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1247 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1248 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch, 1249 spec, MLX5_VPORT_PF); 1250 1251 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1252 spec, &flow_act, &dest, 1); 1253 if (IS_ERR(flow)) { 1254 err = PTR_ERR(flow); 1255 goto add_pf_flow_err; 1256 } 1257 flows[vport->index] = flow; 1258 } 1259 1260 if (mlx5_ecpf_vport_exists(esw->dev)) { 1261 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1262 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF); 1263 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1264 spec, &flow_act, &dest, 1); 1265 if (IS_ERR(flow)) { 1266 err = PTR_ERR(flow); 1267 goto add_ecpf_flow_err; 1268 } 1269 flows[vport->index] = flow; 1270 } 1271 1272 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) { 1273 esw_set_peer_miss_rule_source_port(esw, 1274 peer_dev->priv.eswitch, 1275 spec, vport->vport); 1276 1277 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1278 spec, &flow_act, &dest, 1); 1279 if (IS_ERR(flow)) { 1280 err = PTR_ERR(flow); 1281 goto add_vf_flow_err; 1282 } 1283 flows[vport->index] = flow; 1284 } 1285 1286 esw->fdb_table.offloads.peer_miss_rules = flows; 1287 1288 kvfree(spec); 1289 return 0; 1290 1291 add_vf_flow_err: 1292 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) { 1293 if (!flows[vport->index]) 1294 continue; 1295 mlx5_del_flow_rules(flows[vport->index]); 1296 } 1297 if (mlx5_ecpf_vport_exists(esw->dev)) { 1298 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1299 mlx5_del_flow_rules(flows[vport->index]); 1300 } 1301 add_ecpf_flow_err: 1302 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1303 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1304 mlx5_del_flow_rules(flows[vport->index]); 1305 } 1306 add_pf_flow_err: 1307 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err); 1308 kvfree(flows); 1309 alloc_flows_err: 1310 kvfree(spec); 1311 return err; 1312 } 1313 1314 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw) 1315 { 1316 struct mlx5_flow_handle **flows; 1317 struct mlx5_vport *vport; 1318 unsigned long i; 1319 1320 flows = esw->fdb_table.offloads.peer_miss_rules; 1321 1322 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) 1323 mlx5_del_flow_rules(flows[vport->index]); 1324 1325 if (mlx5_ecpf_vport_exists(esw->dev)) { 1326 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1327 mlx5_del_flow_rules(flows[vport->index]); 1328 } 1329 1330 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1331 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1332 mlx5_del_flow_rules(flows[vport->index]); 1333 } 1334 kvfree(flows); 1335 } 1336 1337 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) 1338 { 1339 struct mlx5_flow_act flow_act = {0}; 1340 struct mlx5_flow_destination dest = {}; 1341 struct mlx5_flow_handle *flow_rule = NULL; 1342 struct mlx5_flow_spec *spec; 1343 void *headers_c; 1344 void *headers_v; 1345 int err = 0; 1346 u8 *dmac_c; 1347 u8 *dmac_v; 1348 1349 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1350 if (!spec) { 1351 err = -ENOMEM; 1352 goto out; 1353 } 1354 1355 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 1356 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1357 outer_headers); 1358 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, 1359 outer_headers.dmac_47_16); 1360 dmac_c[0] = 0x01; 1361 1362 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 1363 dest.vport.num = esw->manager_vport; 1364 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1365 1366 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1367 spec, &flow_act, &dest, 1); 1368 if (IS_ERR(flow_rule)) { 1369 err = PTR_ERR(flow_rule); 1370 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err); 1371 goto out; 1372 } 1373 1374 esw->fdb_table.offloads.miss_rule_uni = flow_rule; 1375 1376 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1377 outer_headers); 1378 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, 1379 outer_headers.dmac_47_16); 1380 dmac_v[0] = 0x01; 1381 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1382 spec, &flow_act, &dest, 1); 1383 if (IS_ERR(flow_rule)) { 1384 err = PTR_ERR(flow_rule); 1385 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err); 1386 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); 1387 goto out; 1388 } 1389 1390 esw->fdb_table.offloads.miss_rule_multi = flow_rule; 1391 1392 out: 1393 kvfree(spec); 1394 return err; 1395 } 1396 1397 struct mlx5_flow_handle * 1398 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) 1399 { 1400 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; 1401 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore; 1402 struct mlx5_flow_context *flow_context; 1403 struct mlx5_flow_handle *flow_rule; 1404 struct mlx5_flow_destination dest; 1405 struct mlx5_flow_spec *spec; 1406 void *misc; 1407 1408 if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) 1409 return ERR_PTR(-EOPNOTSUPP); 1410 1411 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1412 if (!spec) 1413 return ERR_PTR(-ENOMEM); 1414 1415 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1416 misc_parameters_2); 1417 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1418 ESW_REG_C0_USER_DATA_METADATA_MASK); 1419 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1420 misc_parameters_2); 1421 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag); 1422 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 1423 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 1424 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 1425 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id; 1426 1427 flow_context = &spec->flow_context; 1428 flow_context->flags |= FLOW_CONTEXT_HAS_TAG; 1429 flow_context->flow_tag = tag; 1430 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1431 dest.ft = esw->offloads.ft_offloads; 1432 1433 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); 1434 kvfree(spec); 1435 1436 if (IS_ERR(flow_rule)) 1437 esw_warn(esw->dev, 1438 "Failed to create restore rule for tag: %d, err(%d)\n", 1439 tag, (int)PTR_ERR(flow_rule)); 1440 1441 return flow_rule; 1442 } 1443 1444 #define MAX_PF_SQ 256 1445 #define MAX_SQ_NVPORTS 32 1446 1447 static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw, 1448 u32 *flow_group_in) 1449 { 1450 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in, 1451 flow_group_in, 1452 match_criteria); 1453 1454 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1455 MLX5_SET(create_flow_group_in, flow_group_in, 1456 match_criteria_enable, 1457 MLX5_MATCH_MISC_PARAMETERS_2); 1458 1459 MLX5_SET(fte_match_param, match_criteria, 1460 misc_parameters_2.metadata_reg_c_0, 1461 mlx5_eswitch_get_vport_metadata_mask()); 1462 } else { 1463 MLX5_SET(create_flow_group_in, flow_group_in, 1464 match_criteria_enable, 1465 MLX5_MATCH_MISC_PARAMETERS); 1466 1467 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 1468 misc_parameters.source_port); 1469 } 1470 } 1471 1472 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) 1473 static void esw_vport_tbl_put(struct mlx5_eswitch *esw) 1474 { 1475 struct mlx5_vport_tbl_attr attr; 1476 struct mlx5_vport *vport; 1477 unsigned long i; 1478 1479 attr.chain = 0; 1480 attr.prio = 1; 1481 mlx5_esw_for_each_vport(esw, i, vport) { 1482 attr.vport = vport->vport; 1483 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 1484 mlx5_esw_vporttbl_put(esw, &attr); 1485 } 1486 } 1487 1488 static int esw_vport_tbl_get(struct mlx5_eswitch *esw) 1489 { 1490 struct mlx5_vport_tbl_attr attr; 1491 struct mlx5_flow_table *fdb; 1492 struct mlx5_vport *vport; 1493 unsigned long i; 1494 1495 attr.chain = 0; 1496 attr.prio = 1; 1497 mlx5_esw_for_each_vport(esw, i, vport) { 1498 attr.vport = vport->vport; 1499 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 1500 fdb = mlx5_esw_vporttbl_get(esw, &attr); 1501 if (IS_ERR(fdb)) 1502 goto out; 1503 } 1504 return 0; 1505 1506 out: 1507 esw_vport_tbl_put(esw); 1508 return PTR_ERR(fdb); 1509 } 1510 1511 #define fdb_modify_header_fwd_to_table_supported(esw) \ 1512 (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table)) 1513 static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags) 1514 { 1515 struct mlx5_core_dev *dev = esw->dev; 1516 1517 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level)) 1518 *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED; 1519 1520 if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) && 1521 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) { 1522 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1523 esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n"); 1524 } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) { 1525 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1526 esw_warn(dev, "Tc chains and priorities offload aren't supported\n"); 1527 } else if (!fdb_modify_header_fwd_to_table_supported(esw)) { 1528 /* Disabled when ttl workaround is needed, e.g 1529 * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig 1530 */ 1531 esw_warn(dev, 1532 "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n"); 1533 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1534 } else { 1535 *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1536 esw_info(dev, "Supported tc chains and prios offload\n"); 1537 } 1538 1539 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) 1540 *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED; 1541 } 1542 1543 static int 1544 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb) 1545 { 1546 struct mlx5_core_dev *dev = esw->dev; 1547 struct mlx5_flow_table *nf_ft, *ft; 1548 struct mlx5_chains_attr attr = {}; 1549 struct mlx5_fs_chains *chains; 1550 u32 fdb_max; 1551 int err; 1552 1553 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size); 1554 1555 esw_init_chains_offload_flags(esw, &attr.flags); 1556 attr.ns = MLX5_FLOW_NAMESPACE_FDB; 1557 attr.max_ft_sz = fdb_max; 1558 attr.max_grp_num = esw->params.large_group_num; 1559 attr.default_ft = miss_fdb; 1560 attr.mapping = esw->offloads.reg_c0_obj_pool; 1561 1562 chains = mlx5_chains_create(dev, &attr); 1563 if (IS_ERR(chains)) { 1564 err = PTR_ERR(chains); 1565 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err); 1566 return err; 1567 } 1568 1569 esw->fdb_table.offloads.esw_chains_priv = chains; 1570 1571 /* Create tc_end_ft which is the always created ft chain */ 1572 nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1573 1, 0); 1574 if (IS_ERR(nf_ft)) { 1575 err = PTR_ERR(nf_ft); 1576 goto nf_ft_err; 1577 } 1578 1579 /* Always open the root for fast path */ 1580 ft = mlx5_chains_get_table(chains, 0, 1, 0); 1581 if (IS_ERR(ft)) { 1582 err = PTR_ERR(ft); 1583 goto level_0_err; 1584 } 1585 1586 /* Open level 1 for split fdb rules now if prios isn't supported */ 1587 if (!mlx5_chains_prios_supported(chains)) { 1588 err = esw_vport_tbl_get(esw); 1589 if (err) 1590 goto level_1_err; 1591 } 1592 1593 mlx5_chains_set_end_ft(chains, nf_ft); 1594 1595 return 0; 1596 1597 level_1_err: 1598 mlx5_chains_put_table(chains, 0, 1, 0); 1599 level_0_err: 1600 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0); 1601 nf_ft_err: 1602 mlx5_chains_destroy(chains); 1603 esw->fdb_table.offloads.esw_chains_priv = NULL; 1604 1605 return err; 1606 } 1607 1608 static void 1609 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains) 1610 { 1611 if (!mlx5_chains_prios_supported(chains)) 1612 esw_vport_tbl_put(esw); 1613 mlx5_chains_put_table(chains, 0, 1, 0); 1614 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0); 1615 mlx5_chains_destroy(chains); 1616 } 1617 1618 #else /* CONFIG_MLX5_CLS_ACT */ 1619 1620 static int 1621 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb) 1622 { return 0; } 1623 1624 static void 1625 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains) 1626 {} 1627 1628 #endif 1629 1630 static int 1631 esw_create_send_to_vport_group(struct mlx5_eswitch *esw, 1632 struct mlx5_flow_table *fdb, 1633 u32 *flow_group_in, 1634 int *ix) 1635 { 1636 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1637 struct mlx5_flow_group *g; 1638 void *match_criteria; 1639 int count, err = 0; 1640 1641 memset(flow_group_in, 0, inlen); 1642 1643 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 1644 MLX5_MATCH_MISC_PARAMETERS); 1645 1646 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); 1647 1648 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn); 1649 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); 1650 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { 1651 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 1652 misc_parameters.source_eswitch_owner_vhca_id); 1653 MLX5_SET(create_flow_group_in, flow_group_in, 1654 source_eswitch_owner_vhca_id_valid, 1); 1655 } 1656 1657 /* See comment at table_size calculation */ 1658 count = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ); 1659 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 1660 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, *ix + count - 1); 1661 *ix += count; 1662 1663 g = mlx5_create_flow_group(fdb, flow_group_in); 1664 if (IS_ERR(g)) { 1665 err = PTR_ERR(g); 1666 esw_warn(esw->dev, "Failed to create send-to-vport flow group err(%d)\n", err); 1667 goto out; 1668 } 1669 esw->fdb_table.offloads.send_to_vport_grp = g; 1670 1671 out: 1672 return err; 1673 } 1674 1675 static int 1676 esw_create_meta_send_to_vport_group(struct mlx5_eswitch *esw, 1677 struct mlx5_flow_table *fdb, 1678 u32 *flow_group_in, 1679 int *ix) 1680 { 1681 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1682 struct mlx5_flow_group *g; 1683 void *match_criteria; 1684 int err = 0; 1685 1686 if (!esw_src_port_rewrite_supported(esw)) 1687 return 0; 1688 1689 memset(flow_group_in, 0, inlen); 1690 1691 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 1692 MLX5_MATCH_MISC_PARAMETERS_2); 1693 1694 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); 1695 1696 MLX5_SET(fte_match_param, match_criteria, 1697 misc_parameters_2.metadata_reg_c_0, 1698 mlx5_eswitch_get_vport_metadata_mask()); 1699 MLX5_SET(fte_match_param, match_criteria, 1700 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK); 1701 1702 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix); 1703 MLX5_SET(create_flow_group_in, flow_group_in, 1704 end_flow_index, *ix + esw->total_vports - 1); 1705 *ix += esw->total_vports; 1706 1707 g = mlx5_create_flow_group(fdb, flow_group_in); 1708 if (IS_ERR(g)) { 1709 err = PTR_ERR(g); 1710 esw_warn(esw->dev, 1711 "Failed to create send-to-vport meta flow group err(%d)\n", err); 1712 goto send_vport_meta_err; 1713 } 1714 esw->fdb_table.offloads.send_to_vport_meta_grp = g; 1715 1716 return 0; 1717 1718 send_vport_meta_err: 1719 return err; 1720 } 1721 1722 static int 1723 esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw, 1724 struct mlx5_flow_table *fdb, 1725 u32 *flow_group_in, 1726 int *ix) 1727 { 1728 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1729 struct mlx5_flow_group *g; 1730 void *match_criteria; 1731 int err = 0; 1732 1733 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) 1734 return 0; 1735 1736 memset(flow_group_in, 0, inlen); 1737 1738 esw_set_flow_group_source_port(esw, flow_group_in); 1739 1740 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1741 match_criteria = MLX5_ADDR_OF(create_flow_group_in, 1742 flow_group_in, 1743 match_criteria); 1744 1745 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 1746 misc_parameters.source_eswitch_owner_vhca_id); 1747 1748 MLX5_SET(create_flow_group_in, flow_group_in, 1749 source_eswitch_owner_vhca_id_valid, 1); 1750 } 1751 1752 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix); 1753 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1754 *ix + esw->total_vports - 1); 1755 *ix += esw->total_vports; 1756 1757 g = mlx5_create_flow_group(fdb, flow_group_in); 1758 if (IS_ERR(g)) { 1759 err = PTR_ERR(g); 1760 esw_warn(esw->dev, "Failed to create peer miss flow group err(%d)\n", err); 1761 goto out; 1762 } 1763 esw->fdb_table.offloads.peer_miss_grp = g; 1764 1765 out: 1766 return err; 1767 } 1768 1769 static int 1770 esw_create_miss_group(struct mlx5_eswitch *esw, 1771 struct mlx5_flow_table *fdb, 1772 u32 *flow_group_in, 1773 int *ix) 1774 { 1775 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1776 struct mlx5_flow_group *g; 1777 void *match_criteria; 1778 int err = 0; 1779 u8 *dmac; 1780 1781 memset(flow_group_in, 0, inlen); 1782 1783 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 1784 MLX5_MATCH_OUTER_HEADERS); 1785 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, 1786 match_criteria); 1787 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, 1788 outer_headers.dmac_47_16); 1789 dmac[0] = 0x01; 1790 1791 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix); 1792 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1793 *ix + MLX5_ESW_MISS_FLOWS); 1794 1795 g = mlx5_create_flow_group(fdb, flow_group_in); 1796 if (IS_ERR(g)) { 1797 err = PTR_ERR(g); 1798 esw_warn(esw->dev, "Failed to create miss flow group err(%d)\n", err); 1799 goto miss_err; 1800 } 1801 esw->fdb_table.offloads.miss_grp = g; 1802 1803 err = esw_add_fdb_miss_rule(esw); 1804 if (err) 1805 goto miss_rule_err; 1806 1807 return 0; 1808 1809 miss_rule_err: 1810 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); 1811 miss_err: 1812 return err; 1813 } 1814 1815 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) 1816 { 1817 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1818 struct mlx5_flow_table_attr ft_attr = {}; 1819 struct mlx5_core_dev *dev = esw->dev; 1820 struct mlx5_flow_namespace *root_ns; 1821 struct mlx5_flow_table *fdb = NULL; 1822 int table_size, ix = 0, err = 0; 1823 u32 flags = 0, *flow_group_in; 1824 1825 esw_debug(esw->dev, "Create offloads FDB Tables\n"); 1826 1827 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 1828 if (!flow_group_in) 1829 return -ENOMEM; 1830 1831 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); 1832 if (!root_ns) { 1833 esw_warn(dev, "Failed to get FDB flow namespace\n"); 1834 err = -EOPNOTSUPP; 1835 goto ns_err; 1836 } 1837 esw->fdb_table.offloads.ns = root_ns; 1838 err = mlx5_flow_namespace_set_mode(root_ns, 1839 esw->dev->priv.steering->mode); 1840 if (err) { 1841 esw_warn(dev, "Failed to set FDB namespace steering mode\n"); 1842 goto ns_err; 1843 } 1844 1845 /* To be strictly correct: 1846 * MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) 1847 * should be: 1848 * esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ + 1849 * peer_esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ 1850 * but as the peer device might not be in switchdev mode it's not 1851 * possible. We use the fact that by default FW sets max vfs and max sfs 1852 * to the same value on both devices. If it needs to be changed in the future note 1853 * the peer miss group should also be created based on the number of 1854 * total vports of the peer (currently is also uses esw->total_vports). 1855 */ 1856 table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) + 1857 esw->total_vports * 2 + MLX5_ESW_MISS_FLOWS; 1858 1859 /* create the slow path fdb with encap set, so further table instances 1860 * can be created at run time while VFs are probed if the FW allows that. 1861 */ 1862 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) 1863 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | 1864 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); 1865 1866 ft_attr.flags = flags; 1867 ft_attr.max_fte = table_size; 1868 ft_attr.prio = FDB_SLOW_PATH; 1869 1870 fdb = mlx5_create_flow_table(root_ns, &ft_attr); 1871 if (IS_ERR(fdb)) { 1872 err = PTR_ERR(fdb); 1873 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err); 1874 goto slow_fdb_err; 1875 } 1876 esw->fdb_table.offloads.slow_fdb = fdb; 1877 1878 /* Create empty TC-miss managed table. This allows plugging in following 1879 * priorities without directly exposing their level 0 table to 1880 * eswitch_offloads and passing it as miss_fdb to following call to 1881 * esw_chains_create(). 1882 */ 1883 memset(&ft_attr, 0, sizeof(ft_attr)); 1884 ft_attr.prio = FDB_TC_MISS; 1885 esw->fdb_table.offloads.tc_miss_table = mlx5_create_flow_table(root_ns, &ft_attr); 1886 if (IS_ERR(esw->fdb_table.offloads.tc_miss_table)) { 1887 err = PTR_ERR(esw->fdb_table.offloads.tc_miss_table); 1888 esw_warn(dev, "Failed to create TC miss FDB Table err %d\n", err); 1889 goto tc_miss_table_err; 1890 } 1891 1892 err = esw_chains_create(esw, esw->fdb_table.offloads.tc_miss_table); 1893 if (err) { 1894 esw_warn(dev, "Failed to open fdb chains err(%d)\n", err); 1895 goto fdb_chains_err; 1896 } 1897 1898 err = esw_create_send_to_vport_group(esw, fdb, flow_group_in, &ix); 1899 if (err) 1900 goto send_vport_err; 1901 1902 err = esw_create_meta_send_to_vport_group(esw, fdb, flow_group_in, &ix); 1903 if (err) 1904 goto send_vport_meta_err; 1905 1906 err = esw_create_peer_esw_miss_group(esw, fdb, flow_group_in, &ix); 1907 if (err) 1908 goto peer_miss_err; 1909 1910 err = esw_create_miss_group(esw, fdb, flow_group_in, &ix); 1911 if (err) 1912 goto miss_err; 1913 1914 kvfree(flow_group_in); 1915 return 0; 1916 1917 miss_err: 1918 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 1919 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); 1920 peer_miss_err: 1921 if (esw->fdb_table.offloads.send_to_vport_meta_grp) 1922 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp); 1923 send_vport_meta_err: 1924 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); 1925 send_vport_err: 1926 esw_chains_destroy(esw, esw_chains(esw)); 1927 fdb_chains_err: 1928 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table); 1929 tc_miss_table_err: 1930 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); 1931 slow_fdb_err: 1932 /* Holds true only as long as DMFS is the default */ 1933 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS); 1934 ns_err: 1935 kvfree(flow_group_in); 1936 return err; 1937 } 1938 1939 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) 1940 { 1941 if (!esw->fdb_table.offloads.slow_fdb) 1942 return; 1943 1944 esw_debug(esw->dev, "Destroy offloads FDB Tables\n"); 1945 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi); 1946 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); 1947 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); 1948 if (esw->fdb_table.offloads.send_to_vport_meta_grp) 1949 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp); 1950 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 1951 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); 1952 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); 1953 1954 esw_chains_destroy(esw, esw_chains(esw)); 1955 1956 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table); 1957 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); 1958 /* Holds true only as long as DMFS is the default */ 1959 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns, 1960 MLX5_FLOW_STEERING_MODE_DMFS); 1961 atomic64_set(&esw->user_count, 0); 1962 } 1963 1964 static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw) 1965 { 1966 int nvports; 1967 1968 nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS; 1969 if (mlx5e_tc_int_port_supported(esw)) 1970 nvports += MLX5E_TC_MAX_INT_PORT_NUM; 1971 1972 return nvports; 1973 } 1974 1975 static int esw_create_offloads_table(struct mlx5_eswitch *esw) 1976 { 1977 struct mlx5_flow_table_attr ft_attr = {}; 1978 struct mlx5_core_dev *dev = esw->dev; 1979 struct mlx5_flow_table *ft_offloads; 1980 struct mlx5_flow_namespace *ns; 1981 int err = 0; 1982 1983 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); 1984 if (!ns) { 1985 esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); 1986 return -EOPNOTSUPP; 1987 } 1988 1989 ft_attr.max_fte = esw_get_nr_ft_offloads_steering_src_ports(esw) + 1990 MLX5_ESW_FT_OFFLOADS_DROP_RULE; 1991 ft_attr.prio = 1; 1992 1993 ft_offloads = mlx5_create_flow_table(ns, &ft_attr); 1994 if (IS_ERR(ft_offloads)) { 1995 err = PTR_ERR(ft_offloads); 1996 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err); 1997 return err; 1998 } 1999 2000 esw->offloads.ft_offloads = ft_offloads; 2001 return 0; 2002 } 2003 2004 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw) 2005 { 2006 struct mlx5_esw_offload *offloads = &esw->offloads; 2007 2008 mlx5_destroy_flow_table(offloads->ft_offloads); 2009 } 2010 2011 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) 2012 { 2013 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 2014 struct mlx5_flow_group *g; 2015 u32 *flow_group_in; 2016 int nvports; 2017 int err = 0; 2018 2019 nvports = esw_get_nr_ft_offloads_steering_src_ports(esw); 2020 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 2021 if (!flow_group_in) 2022 return -ENOMEM; 2023 2024 /* create vport rx group */ 2025 esw_set_flow_group_source_port(esw, flow_group_in); 2026 2027 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 2028 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1); 2029 2030 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in); 2031 2032 if (IS_ERR(g)) { 2033 err = PTR_ERR(g); 2034 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err); 2035 goto out; 2036 } 2037 2038 esw->offloads.vport_rx_group = g; 2039 out: 2040 kvfree(flow_group_in); 2041 return err; 2042 } 2043 2044 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw) 2045 { 2046 mlx5_destroy_flow_group(esw->offloads.vport_rx_group); 2047 } 2048 2049 static int esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch *esw) 2050 { 2051 /* ft_offloads table is enlarged by MLX5_ESW_FT_OFFLOADS_DROP_RULE (1) 2052 * for the drop rule, which is placed at the end of the table. 2053 * So return the total of vport and int_port as rule index. 2054 */ 2055 return esw_get_nr_ft_offloads_steering_src_ports(esw); 2056 } 2057 2058 static int esw_create_vport_rx_drop_group(struct mlx5_eswitch *esw) 2059 { 2060 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 2061 struct mlx5_flow_group *g; 2062 u32 *flow_group_in; 2063 int flow_index; 2064 int err = 0; 2065 2066 flow_index = esw_create_vport_rx_drop_rule_index(esw); 2067 2068 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 2069 if (!flow_group_in) 2070 return -ENOMEM; 2071 2072 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index); 2073 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index); 2074 2075 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in); 2076 2077 if (IS_ERR(g)) { 2078 err = PTR_ERR(g); 2079 mlx5_core_warn(esw->dev, "Failed to create vport rx drop group err %d\n", err); 2080 goto out; 2081 } 2082 2083 esw->offloads.vport_rx_drop_group = g; 2084 out: 2085 kvfree(flow_group_in); 2086 return err; 2087 } 2088 2089 static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw) 2090 { 2091 if (esw->offloads.vport_rx_drop_group) 2092 mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group); 2093 } 2094 2095 struct mlx5_flow_handle * 2096 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, 2097 struct mlx5_flow_destination *dest) 2098 { 2099 struct mlx5_flow_act flow_act = {0}; 2100 struct mlx5_flow_handle *flow_rule; 2101 struct mlx5_flow_spec *spec; 2102 void *misc; 2103 2104 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 2105 if (!spec) { 2106 flow_rule = ERR_PTR(-ENOMEM); 2107 goto out; 2108 } 2109 2110 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 2111 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); 2112 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 2113 mlx5_eswitch_get_vport_metadata_for_match(esw, vport)); 2114 2115 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 2116 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 2117 mlx5_eswitch_get_vport_metadata_mask()); 2118 2119 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 2120 } else { 2121 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 2122 MLX5_SET(fte_match_set_misc, misc, source_port, vport); 2123 2124 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 2125 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 2126 2127 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 2128 } 2129 2130 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2131 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec, 2132 &flow_act, dest, 1); 2133 if (IS_ERR(flow_rule)) { 2134 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule)); 2135 goto out; 2136 } 2137 2138 out: 2139 kvfree(spec); 2140 return flow_rule; 2141 } 2142 2143 static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw) 2144 { 2145 struct mlx5_flow_act flow_act = {}; 2146 struct mlx5_flow_handle *flow_rule; 2147 2148 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; 2149 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, NULL, 2150 &flow_act, NULL, 0); 2151 if (IS_ERR(flow_rule)) { 2152 esw_warn(esw->dev, 2153 "fs offloads: Failed to add vport rx drop rule err %ld\n", 2154 PTR_ERR(flow_rule)); 2155 return PTR_ERR(flow_rule); 2156 } 2157 2158 esw->offloads.vport_rx_drop_rule = flow_rule; 2159 2160 return 0; 2161 } 2162 2163 static void esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch *esw) 2164 { 2165 if (esw->offloads.vport_rx_drop_rule) 2166 mlx5_del_flow_rules(esw->offloads.vport_rx_drop_rule); 2167 } 2168 2169 static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode) 2170 { 2171 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; 2172 struct mlx5_core_dev *dev = esw->dev; 2173 struct mlx5_vport *vport; 2174 unsigned long i; 2175 2176 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 2177 return -EOPNOTSUPP; 2178 2179 if (!mlx5_esw_is_fdb_created(esw)) 2180 return -EOPNOTSUPP; 2181 2182 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 2183 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 2184 mlx5_mode = MLX5_INLINE_MODE_NONE; 2185 goto out; 2186 case MLX5_CAP_INLINE_MODE_L2: 2187 mlx5_mode = MLX5_INLINE_MODE_L2; 2188 goto out; 2189 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 2190 goto query_vports; 2191 } 2192 2193 query_vports: 2194 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode); 2195 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 2196 mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode); 2197 if (prev_mlx5_mode != mlx5_mode) 2198 return -EINVAL; 2199 prev_mlx5_mode = mlx5_mode; 2200 } 2201 2202 out: 2203 *mode = mlx5_mode; 2204 return 0; 2205 } 2206 2207 static void esw_destroy_restore_table(struct mlx5_eswitch *esw) 2208 { 2209 struct mlx5_esw_offload *offloads = &esw->offloads; 2210 2211 if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) 2212 return; 2213 2214 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id); 2215 mlx5_destroy_flow_group(offloads->restore_group); 2216 mlx5_destroy_flow_table(offloads->ft_offloads_restore); 2217 } 2218 2219 static int esw_create_restore_table(struct mlx5_eswitch *esw) 2220 { 2221 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; 2222 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 2223 struct mlx5_flow_table_attr ft_attr = {}; 2224 struct mlx5_core_dev *dev = esw->dev; 2225 struct mlx5_flow_namespace *ns; 2226 struct mlx5_modify_hdr *mod_hdr; 2227 void *match_criteria, *misc; 2228 struct mlx5_flow_table *ft; 2229 struct mlx5_flow_group *g; 2230 u32 *flow_group_in; 2231 int err = 0; 2232 2233 if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) 2234 return 0; 2235 2236 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); 2237 if (!ns) { 2238 esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); 2239 return -EOPNOTSUPP; 2240 } 2241 2242 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 2243 if (!flow_group_in) { 2244 err = -ENOMEM; 2245 goto out_free; 2246 } 2247 2248 ft_attr.max_fte = 1 << ESW_REG_C0_USER_DATA_METADATA_BITS; 2249 ft = mlx5_create_flow_table(ns, &ft_attr); 2250 if (IS_ERR(ft)) { 2251 err = PTR_ERR(ft); 2252 esw_warn(esw->dev, "Failed to create restore table, err %d\n", 2253 err); 2254 goto out_free; 2255 } 2256 2257 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, 2258 match_criteria); 2259 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, 2260 misc_parameters_2); 2261 2262 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 2263 ESW_REG_C0_USER_DATA_METADATA_MASK); 2264 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 2265 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2266 ft_attr.max_fte - 1); 2267 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 2268 MLX5_MATCH_MISC_PARAMETERS_2); 2269 g = mlx5_create_flow_group(ft, flow_group_in); 2270 if (IS_ERR(g)) { 2271 err = PTR_ERR(g); 2272 esw_warn(dev, "Failed to create restore flow group, err: %d\n", 2273 err); 2274 goto err_group; 2275 } 2276 2277 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY); 2278 MLX5_SET(copy_action_in, modact, src_field, 2279 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1); 2280 MLX5_SET(copy_action_in, modact, dst_field, 2281 MLX5_ACTION_IN_FIELD_METADATA_REG_B); 2282 mod_hdr = mlx5_modify_header_alloc(esw->dev, 2283 MLX5_FLOW_NAMESPACE_KERNEL, 1, 2284 modact); 2285 if (IS_ERR(mod_hdr)) { 2286 err = PTR_ERR(mod_hdr); 2287 esw_warn(dev, "Failed to create restore mod header, err: %d\n", 2288 err); 2289 goto err_mod_hdr; 2290 } 2291 2292 esw->offloads.ft_offloads_restore = ft; 2293 esw->offloads.restore_group = g; 2294 esw->offloads.restore_copy_hdr_id = mod_hdr; 2295 2296 kvfree(flow_group_in); 2297 2298 return 0; 2299 2300 err_mod_hdr: 2301 mlx5_destroy_flow_group(g); 2302 err_group: 2303 mlx5_destroy_flow_table(ft); 2304 out_free: 2305 kvfree(flow_group_in); 2306 2307 return err; 2308 } 2309 2310 static int esw_offloads_start(struct mlx5_eswitch *esw, 2311 struct netlink_ext_ack *extack) 2312 { 2313 int err, err1; 2314 2315 esw->mode = MLX5_ESWITCH_OFFLOADS; 2316 err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs); 2317 if (err) { 2318 NL_SET_ERR_MSG_MOD(extack, 2319 "Failed setting eswitch to offloads"); 2320 esw->mode = MLX5_ESWITCH_LEGACY; 2321 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS); 2322 if (err1) { 2323 NL_SET_ERR_MSG_MOD(extack, 2324 "Failed setting eswitch back to legacy"); 2325 } 2326 mlx5_rescan_drivers(esw->dev); 2327 } 2328 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { 2329 if (mlx5_eswitch_inline_mode_get(esw, 2330 &esw->offloads.inline_mode)) { 2331 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2; 2332 NL_SET_ERR_MSG_MOD(extack, 2333 "Inline mode is different between vports"); 2334 } 2335 } 2336 return err; 2337 } 2338 2339 static void mlx5_esw_offloads_rep_mark_set(struct mlx5_eswitch *esw, 2340 struct mlx5_eswitch_rep *rep, 2341 xa_mark_t mark) 2342 { 2343 bool mark_set; 2344 2345 /* Copy the mark from vport to its rep */ 2346 mark_set = xa_get_mark(&esw->vports, rep->vport, mark); 2347 if (mark_set) 2348 xa_set_mark(&esw->offloads.vport_reps, rep->vport, mark); 2349 } 2350 2351 static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport) 2352 { 2353 struct mlx5_eswitch_rep *rep; 2354 int rep_type; 2355 int err; 2356 2357 rep = kzalloc(sizeof(*rep), GFP_KERNEL); 2358 if (!rep) 2359 return -ENOMEM; 2360 2361 rep->vport = vport->vport; 2362 rep->vport_index = vport->index; 2363 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) 2364 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED); 2365 2366 err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL); 2367 if (err) 2368 goto insert_err; 2369 2370 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_HOST_FN); 2371 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_VF); 2372 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_SF); 2373 return 0; 2374 2375 insert_err: 2376 kfree(rep); 2377 return err; 2378 } 2379 2380 static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw, 2381 struct mlx5_eswitch_rep *rep) 2382 { 2383 xa_erase(&esw->offloads.vport_reps, rep->vport); 2384 kfree(rep); 2385 } 2386 2387 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw) 2388 { 2389 struct mlx5_eswitch_rep *rep; 2390 unsigned long i; 2391 2392 mlx5_esw_for_each_rep(esw, i, rep) 2393 mlx5_esw_offloads_rep_cleanup(esw, rep); 2394 xa_destroy(&esw->offloads.vport_reps); 2395 } 2396 2397 int esw_offloads_init_reps(struct mlx5_eswitch *esw) 2398 { 2399 struct mlx5_vport *vport; 2400 unsigned long i; 2401 int err; 2402 2403 xa_init(&esw->offloads.vport_reps); 2404 2405 mlx5_esw_for_each_vport(esw, i, vport) { 2406 err = mlx5_esw_offloads_rep_init(esw, vport); 2407 if (err) 2408 goto err; 2409 } 2410 return 0; 2411 2412 err: 2413 esw_offloads_cleanup_reps(esw); 2414 return err; 2415 } 2416 2417 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, 2418 struct mlx5_eswitch_rep *rep, u8 rep_type) 2419 { 2420 if (atomic_cmpxchg(&rep->rep_data[rep_type].state, 2421 REP_LOADED, REP_REGISTERED) == REP_LOADED) 2422 esw->offloads.rep_ops[rep_type]->unload(rep); 2423 } 2424 2425 static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type) 2426 { 2427 struct mlx5_eswitch_rep *rep; 2428 unsigned long i; 2429 2430 mlx5_esw_for_each_sf_rep(esw, i, rep) 2431 __esw_offloads_unload_rep(esw, rep, rep_type); 2432 } 2433 2434 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type) 2435 { 2436 struct mlx5_eswitch_rep *rep; 2437 unsigned long i; 2438 2439 __unload_reps_sf_vport(esw, rep_type); 2440 2441 mlx5_esw_for_each_vf_rep(esw, i, rep) 2442 __esw_offloads_unload_rep(esw, rep, rep_type); 2443 2444 if (mlx5_ecpf_vport_exists(esw->dev)) { 2445 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF); 2446 __esw_offloads_unload_rep(esw, rep, rep_type); 2447 } 2448 2449 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 2450 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF); 2451 __esw_offloads_unload_rep(esw, rep, rep_type); 2452 } 2453 2454 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); 2455 __esw_offloads_unload_rep(esw, rep, rep_type); 2456 } 2457 2458 int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num) 2459 { 2460 struct mlx5_eswitch_rep *rep; 2461 int rep_type; 2462 int err; 2463 2464 rep = mlx5_eswitch_get_rep(esw, vport_num); 2465 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) 2466 if (atomic_cmpxchg(&rep->rep_data[rep_type].state, 2467 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) { 2468 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep); 2469 if (err) 2470 goto err_reps; 2471 } 2472 2473 return 0; 2474 2475 err_reps: 2476 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED); 2477 for (--rep_type; rep_type >= 0; rep_type--) 2478 __esw_offloads_unload_rep(esw, rep, rep_type); 2479 return err; 2480 } 2481 2482 void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num) 2483 { 2484 struct mlx5_eswitch_rep *rep; 2485 int rep_type; 2486 2487 rep = mlx5_eswitch_get_rep(esw, vport_num); 2488 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--) 2489 __esw_offloads_unload_rep(esw, rep, rep_type); 2490 } 2491 2492 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num) 2493 { 2494 int err; 2495 2496 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 2497 return 0; 2498 2499 if (vport_num != MLX5_VPORT_UPLINK) { 2500 err = mlx5_esw_offloads_devlink_port_register(esw, vport_num); 2501 if (err) 2502 return err; 2503 } 2504 2505 err = mlx5_esw_offloads_rep_load(esw, vport_num); 2506 if (err) 2507 goto load_err; 2508 return err; 2509 2510 load_err: 2511 if (vport_num != MLX5_VPORT_UPLINK) 2512 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); 2513 return err; 2514 } 2515 2516 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num) 2517 { 2518 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 2519 return; 2520 2521 mlx5_esw_offloads_rep_unload(esw, vport_num); 2522 2523 if (vport_num != MLX5_VPORT_UPLINK) 2524 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); 2525 } 2526 2527 static int esw_set_slave_root_fdb(struct mlx5_core_dev *master, 2528 struct mlx5_core_dev *slave) 2529 { 2530 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; 2531 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {}; 2532 struct mlx5_flow_root_namespace *root; 2533 struct mlx5_flow_namespace *ns; 2534 int err; 2535 2536 MLX5_SET(set_flow_table_root_in, in, opcode, 2537 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); 2538 MLX5_SET(set_flow_table_root_in, in, table_type, 2539 FS_FT_FDB); 2540 2541 if (master) { 2542 ns = mlx5_get_flow_namespace(master, 2543 MLX5_FLOW_NAMESPACE_FDB); 2544 root = find_root(&ns->node); 2545 mutex_lock(&root->chain_lock); 2546 MLX5_SET(set_flow_table_root_in, in, 2547 table_eswitch_owner_vhca_id_valid, 1); 2548 MLX5_SET(set_flow_table_root_in, in, 2549 table_eswitch_owner_vhca_id, 2550 MLX5_CAP_GEN(master, vhca_id)); 2551 MLX5_SET(set_flow_table_root_in, in, table_id, 2552 root->root_ft->id); 2553 } else { 2554 ns = mlx5_get_flow_namespace(slave, 2555 MLX5_FLOW_NAMESPACE_FDB); 2556 root = find_root(&ns->node); 2557 mutex_lock(&root->chain_lock); 2558 MLX5_SET(set_flow_table_root_in, in, table_id, 2559 root->root_ft->id); 2560 } 2561 2562 err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out)); 2563 mutex_unlock(&root->chain_lock); 2564 2565 return err; 2566 } 2567 2568 static int __esw_set_master_egress_rule(struct mlx5_core_dev *master, 2569 struct mlx5_core_dev *slave, 2570 struct mlx5_vport *vport, 2571 struct mlx5_flow_table *acl) 2572 { 2573 struct mlx5_flow_handle *flow_rule = NULL; 2574 struct mlx5_flow_destination dest = {}; 2575 struct mlx5_flow_act flow_act = {}; 2576 struct mlx5_flow_spec *spec; 2577 int err = 0; 2578 void *misc; 2579 2580 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 2581 if (!spec) 2582 return -ENOMEM; 2583 2584 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 2585 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 2586 misc_parameters); 2587 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK); 2588 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, 2589 MLX5_CAP_GEN(slave, vhca_id)); 2590 2591 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 2592 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 2593 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 2594 source_eswitch_owner_vhca_id); 2595 2596 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2597 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 2598 dest.vport.num = slave->priv.eswitch->manager_vport; 2599 dest.vport.vhca_id = MLX5_CAP_GEN(slave, vhca_id); 2600 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 2601 2602 flow_rule = mlx5_add_flow_rules(acl, spec, &flow_act, 2603 &dest, 1); 2604 if (IS_ERR(flow_rule)) 2605 err = PTR_ERR(flow_rule); 2606 else 2607 vport->egress.offloads.bounce_rule = flow_rule; 2608 2609 kvfree(spec); 2610 return err; 2611 } 2612 2613 static int esw_set_master_egress_rule(struct mlx5_core_dev *master, 2614 struct mlx5_core_dev *slave) 2615 { 2616 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 2617 struct mlx5_eswitch *esw = master->priv.eswitch; 2618 struct mlx5_flow_table_attr ft_attr = { 2619 .max_fte = 1, .prio = 0, .level = 0, 2620 .flags = MLX5_FLOW_TABLE_OTHER_VPORT, 2621 }; 2622 struct mlx5_flow_namespace *egress_ns; 2623 struct mlx5_flow_table *acl; 2624 struct mlx5_flow_group *g; 2625 struct mlx5_vport *vport; 2626 void *match_criteria; 2627 u32 *flow_group_in; 2628 int err; 2629 2630 vport = mlx5_eswitch_get_vport(esw, esw->manager_vport); 2631 if (IS_ERR(vport)) 2632 return PTR_ERR(vport); 2633 2634 egress_ns = mlx5_get_flow_vport_acl_namespace(master, 2635 MLX5_FLOW_NAMESPACE_ESW_EGRESS, 2636 vport->index); 2637 if (!egress_ns) 2638 return -EINVAL; 2639 2640 if (vport->egress.acl) 2641 return -EINVAL; 2642 2643 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 2644 if (!flow_group_in) 2645 return -ENOMEM; 2646 2647 acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport); 2648 if (IS_ERR(acl)) { 2649 err = PTR_ERR(acl); 2650 goto out; 2651 } 2652 2653 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, 2654 match_criteria); 2655 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 2656 misc_parameters.source_port); 2657 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 2658 misc_parameters.source_eswitch_owner_vhca_id); 2659 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 2660 MLX5_MATCH_MISC_PARAMETERS); 2661 2662 MLX5_SET(create_flow_group_in, flow_group_in, 2663 source_eswitch_owner_vhca_id_valid, 1); 2664 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 2665 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); 2666 2667 g = mlx5_create_flow_group(acl, flow_group_in); 2668 if (IS_ERR(g)) { 2669 err = PTR_ERR(g); 2670 goto err_group; 2671 } 2672 2673 err = __esw_set_master_egress_rule(master, slave, vport, acl); 2674 if (err) 2675 goto err_rule; 2676 2677 vport->egress.acl = acl; 2678 vport->egress.offloads.bounce_grp = g; 2679 2680 kvfree(flow_group_in); 2681 2682 return 0; 2683 2684 err_rule: 2685 mlx5_destroy_flow_group(g); 2686 err_group: 2687 mlx5_destroy_flow_table(acl); 2688 out: 2689 kvfree(flow_group_in); 2690 return err; 2691 } 2692 2693 static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev) 2694 { 2695 struct mlx5_vport *vport; 2696 2697 vport = mlx5_eswitch_get_vport(dev->priv.eswitch, 2698 dev->priv.eswitch->manager_vport); 2699 2700 esw_acl_egress_ofld_cleanup(vport); 2701 } 2702 2703 int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw, 2704 struct mlx5_eswitch *slave_esw) 2705 { 2706 int err; 2707 2708 err = esw_set_slave_root_fdb(master_esw->dev, 2709 slave_esw->dev); 2710 if (err) 2711 return err; 2712 2713 err = esw_set_master_egress_rule(master_esw->dev, 2714 slave_esw->dev); 2715 if (err) 2716 goto err_acl; 2717 2718 return err; 2719 2720 err_acl: 2721 esw_set_slave_root_fdb(NULL, slave_esw->dev); 2722 2723 return err; 2724 } 2725 2726 void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw, 2727 struct mlx5_eswitch *slave_esw) 2728 { 2729 esw_unset_master_egress_rule(master_esw->dev); 2730 esw_set_slave_root_fdb(NULL, slave_esw->dev); 2731 } 2732 2733 #define ESW_OFFLOADS_DEVCOM_PAIR (0) 2734 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1) 2735 2736 static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw) 2737 { 2738 const struct mlx5_eswitch_rep_ops *ops; 2739 struct mlx5_eswitch_rep *rep; 2740 unsigned long i; 2741 u8 rep_type; 2742 2743 mlx5_esw_for_each_rep(esw, i, rep) { 2744 rep_type = NUM_REP_TYPES; 2745 while (rep_type--) { 2746 ops = esw->offloads.rep_ops[rep_type]; 2747 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED && 2748 ops->event) 2749 ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_UNPAIR, NULL); 2750 } 2751 } 2752 } 2753 2754 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw) 2755 { 2756 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) 2757 mlx5e_tc_clean_fdb_peer_flows(esw); 2758 #endif 2759 mlx5_esw_offloads_rep_event_unpair(esw); 2760 esw_del_fdb_peer_miss_rules(esw); 2761 } 2762 2763 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw, 2764 struct mlx5_eswitch *peer_esw) 2765 { 2766 const struct mlx5_eswitch_rep_ops *ops; 2767 struct mlx5_eswitch_rep *rep; 2768 unsigned long i; 2769 u8 rep_type; 2770 int err; 2771 2772 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev); 2773 if (err) 2774 return err; 2775 2776 mlx5_esw_for_each_rep(esw, i, rep) { 2777 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { 2778 ops = esw->offloads.rep_ops[rep_type]; 2779 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED && 2780 ops->event) { 2781 err = ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_PAIR, peer_esw); 2782 if (err) 2783 goto err_out; 2784 } 2785 } 2786 } 2787 2788 return 0; 2789 2790 err_out: 2791 mlx5_esw_offloads_unpair(esw); 2792 return err; 2793 } 2794 2795 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw, 2796 struct mlx5_eswitch *peer_esw, 2797 bool pair) 2798 { 2799 struct mlx5_flow_root_namespace *peer_ns; 2800 struct mlx5_flow_root_namespace *ns; 2801 int err; 2802 2803 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns; 2804 ns = esw->dev->priv.steering->fdb_root_ns; 2805 2806 if (pair) { 2807 err = mlx5_flow_namespace_set_peer(ns, peer_ns); 2808 if (err) 2809 return err; 2810 2811 err = mlx5_flow_namespace_set_peer(peer_ns, ns); 2812 if (err) { 2813 mlx5_flow_namespace_set_peer(ns, NULL); 2814 return err; 2815 } 2816 } else { 2817 mlx5_flow_namespace_set_peer(ns, NULL); 2818 mlx5_flow_namespace_set_peer(peer_ns, NULL); 2819 } 2820 2821 return 0; 2822 } 2823 2824 static int mlx5_esw_offloads_devcom_event(int event, 2825 void *my_data, 2826 void *event_data) 2827 { 2828 struct mlx5_eswitch *esw = my_data; 2829 struct mlx5_devcom *devcom = esw->dev->priv.devcom; 2830 struct mlx5_eswitch *peer_esw = event_data; 2831 int err; 2832 2833 switch (event) { 2834 case ESW_OFFLOADS_DEVCOM_PAIR: 2835 if (mlx5_eswitch_vport_match_metadata_enabled(esw) != 2836 mlx5_eswitch_vport_match_metadata_enabled(peer_esw)) 2837 break; 2838 2839 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true); 2840 if (err) 2841 goto err_out; 2842 err = mlx5_esw_offloads_pair(esw, peer_esw); 2843 if (err) 2844 goto err_peer; 2845 2846 err = mlx5_esw_offloads_pair(peer_esw, esw); 2847 if (err) 2848 goto err_pair; 2849 2850 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true); 2851 break; 2852 2853 case ESW_OFFLOADS_DEVCOM_UNPAIR: 2854 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) 2855 break; 2856 2857 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false); 2858 mlx5_esw_offloads_unpair(peer_esw); 2859 mlx5_esw_offloads_unpair(esw); 2860 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false); 2861 break; 2862 } 2863 2864 return 0; 2865 2866 err_pair: 2867 mlx5_esw_offloads_unpair(esw); 2868 err_peer: 2869 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false); 2870 err_out: 2871 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d", 2872 event, err); 2873 return err; 2874 } 2875 2876 static void esw_offloads_devcom_init(struct mlx5_eswitch *esw) 2877 { 2878 struct mlx5_devcom *devcom = esw->dev->priv.devcom; 2879 2880 INIT_LIST_HEAD(&esw->offloads.peer_flows); 2881 mutex_init(&esw->offloads.peer_mutex); 2882 2883 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) 2884 return; 2885 2886 if (!mlx5_is_lag_supported(esw->dev)) 2887 return; 2888 2889 mlx5_devcom_register_component(devcom, 2890 MLX5_DEVCOM_ESW_OFFLOADS, 2891 mlx5_esw_offloads_devcom_event, 2892 esw); 2893 2894 mlx5_devcom_send_event(devcom, 2895 MLX5_DEVCOM_ESW_OFFLOADS, 2896 ESW_OFFLOADS_DEVCOM_PAIR, esw); 2897 } 2898 2899 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) 2900 { 2901 struct mlx5_devcom *devcom = esw->dev->priv.devcom; 2902 2903 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) 2904 return; 2905 2906 if (!mlx5_is_lag_supported(esw->dev)) 2907 return; 2908 2909 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS, 2910 ESW_OFFLOADS_DEVCOM_UNPAIR, esw); 2911 2912 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS); 2913 } 2914 2915 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw) 2916 { 2917 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl)) 2918 return false; 2919 2920 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) & 2921 MLX5_FDB_TO_VPORT_REG_C_0)) 2922 return false; 2923 2924 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source)) 2925 return false; 2926 2927 return true; 2928 } 2929 2930 #define MLX5_ESW_METADATA_RSVD_UPLINK 1 2931 2932 /* Share the same metadata for uplink's. This is fine because: 2933 * (a) In shared FDB mode (LAG) both uplink's are treated the 2934 * same and tagged with the same metadata. 2935 * (b) In non shared FDB mode, packets from physical port0 2936 * cannot hit eswitch of PF1 and vice versa. 2937 */ 2938 static u32 mlx5_esw_match_metadata_reserved(struct mlx5_eswitch *esw) 2939 { 2940 return MLX5_ESW_METADATA_RSVD_UPLINK; 2941 } 2942 2943 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw) 2944 { 2945 u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1; 2946 /* Reserve 0xf for internal port offload */ 2947 u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 2; 2948 u32 pf_num; 2949 int id; 2950 2951 /* Only 4 bits of pf_num */ 2952 pf_num = mlx5_get_dev_index(esw->dev); 2953 if (pf_num > max_pf_num) 2954 return 0; 2955 2956 /* Metadata is 4 bits of PFNUM and 12 bits of unique id */ 2957 /* Use only non-zero vport_id (2-4095) for all PF's */ 2958 id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 2959 MLX5_ESW_METADATA_RSVD_UPLINK + 1, 2960 vport_end_ida, GFP_KERNEL); 2961 if (id < 0) 2962 return 0; 2963 id = (pf_num << ESW_VPORT_BITS) | id; 2964 return id; 2965 } 2966 2967 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata) 2968 { 2969 u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1; 2970 2971 /* Metadata contains only 12 bits of actual ida id */ 2972 ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask); 2973 } 2974 2975 static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw, 2976 struct mlx5_vport *vport) 2977 { 2978 if (vport->vport == MLX5_VPORT_UPLINK) 2979 vport->default_metadata = mlx5_esw_match_metadata_reserved(esw); 2980 else 2981 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw); 2982 2983 vport->metadata = vport->default_metadata; 2984 return vport->metadata ? 0 : -ENOSPC; 2985 } 2986 2987 static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw, 2988 struct mlx5_vport *vport) 2989 { 2990 if (!vport->default_metadata) 2991 return; 2992 2993 if (vport->vport == MLX5_VPORT_UPLINK) 2994 return; 2995 2996 WARN_ON(vport->metadata != vport->default_metadata); 2997 mlx5_esw_match_metadata_free(esw, vport->default_metadata); 2998 } 2999 3000 static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw) 3001 { 3002 struct mlx5_vport *vport; 3003 unsigned long i; 3004 3005 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) 3006 return; 3007 3008 mlx5_esw_for_each_vport(esw, i, vport) 3009 esw_offloads_vport_metadata_cleanup(esw, vport); 3010 } 3011 3012 static int esw_offloads_metadata_init(struct mlx5_eswitch *esw) 3013 { 3014 struct mlx5_vport *vport; 3015 unsigned long i; 3016 int err; 3017 3018 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) 3019 return 0; 3020 3021 mlx5_esw_for_each_vport(esw, i, vport) { 3022 err = esw_offloads_vport_metadata_setup(esw, vport); 3023 if (err) 3024 goto metadata_err; 3025 } 3026 3027 return 0; 3028 3029 metadata_err: 3030 esw_offloads_metadata_uninit(esw); 3031 return err; 3032 } 3033 3034 int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable) 3035 { 3036 int err = 0; 3037 3038 down_write(&esw->mode_lock); 3039 if (mlx5_esw_is_fdb_created(esw)) { 3040 err = -EBUSY; 3041 goto done; 3042 } 3043 if (!mlx5_esw_vport_match_metadata_supported(esw)) { 3044 err = -EOPNOTSUPP; 3045 goto done; 3046 } 3047 if (enable) 3048 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; 3049 else 3050 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA; 3051 done: 3052 up_write(&esw->mode_lock); 3053 return err; 3054 } 3055 3056 int 3057 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, 3058 struct mlx5_vport *vport) 3059 { 3060 int err; 3061 3062 err = esw_acl_ingress_ofld_setup(esw, vport); 3063 if (err) 3064 return err; 3065 3066 err = esw_acl_egress_ofld_setup(esw, vport); 3067 if (err) 3068 goto egress_err; 3069 3070 return 0; 3071 3072 egress_err: 3073 esw_acl_ingress_ofld_cleanup(esw, vport); 3074 return err; 3075 } 3076 3077 void 3078 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, 3079 struct mlx5_vport *vport) 3080 { 3081 esw_acl_egress_ofld_cleanup(vport); 3082 esw_acl_ingress_ofld_cleanup(esw, vport); 3083 } 3084 3085 static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) 3086 { 3087 struct mlx5_vport *vport; 3088 3089 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); 3090 if (IS_ERR(vport)) 3091 return PTR_ERR(vport); 3092 3093 return esw_vport_create_offloads_acl_tables(esw, vport); 3094 } 3095 3096 static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) 3097 { 3098 struct mlx5_vport *vport; 3099 3100 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); 3101 if (IS_ERR(vport)) 3102 return; 3103 3104 esw_vport_destroy_offloads_acl_tables(esw, vport); 3105 } 3106 3107 int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw) 3108 { 3109 struct mlx5_eswitch_rep *rep; 3110 unsigned long i; 3111 int ret; 3112 3113 if (!esw || esw->mode != MLX5_ESWITCH_OFFLOADS) 3114 return 0; 3115 3116 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); 3117 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED) 3118 return 0; 3119 3120 ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK); 3121 if (ret) 3122 return ret; 3123 3124 mlx5_esw_for_each_rep(esw, i, rep) { 3125 if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED) 3126 mlx5_esw_offloads_rep_load(esw, rep->vport); 3127 } 3128 3129 return 0; 3130 } 3131 3132 static int esw_offloads_steering_init(struct mlx5_eswitch *esw) 3133 { 3134 struct mlx5_esw_indir_table *indir; 3135 int err; 3136 3137 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); 3138 mutex_init(&esw->fdb_table.offloads.vports.lock); 3139 hash_init(esw->fdb_table.offloads.vports.table); 3140 atomic64_set(&esw->user_count, 0); 3141 3142 indir = mlx5_esw_indir_table_init(); 3143 if (IS_ERR(indir)) { 3144 err = PTR_ERR(indir); 3145 goto create_indir_err; 3146 } 3147 esw->fdb_table.offloads.indir = indir; 3148 3149 err = esw_create_uplink_offloads_acl_tables(esw); 3150 if (err) 3151 goto create_acl_err; 3152 3153 err = esw_create_offloads_table(esw); 3154 if (err) 3155 goto create_offloads_err; 3156 3157 err = esw_create_restore_table(esw); 3158 if (err) 3159 goto create_restore_err; 3160 3161 err = esw_create_offloads_fdb_tables(esw); 3162 if (err) 3163 goto create_fdb_err; 3164 3165 err = esw_create_vport_rx_group(esw); 3166 if (err) 3167 goto create_fg_err; 3168 3169 err = esw_create_vport_rx_drop_group(esw); 3170 if (err) 3171 goto create_rx_drop_fg_err; 3172 3173 err = esw_create_vport_rx_drop_rule(esw); 3174 if (err) 3175 goto create_rx_drop_rule_err; 3176 3177 return 0; 3178 3179 create_rx_drop_rule_err: 3180 esw_destroy_vport_rx_drop_group(esw); 3181 create_rx_drop_fg_err: 3182 esw_destroy_vport_rx_group(esw); 3183 create_fg_err: 3184 esw_destroy_offloads_fdb_tables(esw); 3185 create_fdb_err: 3186 esw_destroy_restore_table(esw); 3187 create_restore_err: 3188 esw_destroy_offloads_table(esw); 3189 create_offloads_err: 3190 esw_destroy_uplink_offloads_acl_tables(esw); 3191 create_acl_err: 3192 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir); 3193 create_indir_err: 3194 mutex_destroy(&esw->fdb_table.offloads.vports.lock); 3195 return err; 3196 } 3197 3198 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) 3199 { 3200 esw_destroy_vport_rx_drop_rule(esw); 3201 esw_destroy_vport_rx_drop_group(esw); 3202 esw_destroy_vport_rx_group(esw); 3203 esw_destroy_offloads_fdb_tables(esw); 3204 esw_destroy_restore_table(esw); 3205 esw_destroy_offloads_table(esw); 3206 esw_destroy_uplink_offloads_acl_tables(esw); 3207 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir); 3208 mutex_destroy(&esw->fdb_table.offloads.vports.lock); 3209 } 3210 3211 static void 3212 esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out) 3213 { 3214 struct devlink *devlink; 3215 bool host_pf_disabled; 3216 u16 new_num_vfs; 3217 3218 new_num_vfs = MLX5_GET(query_esw_functions_out, out, 3219 host_params_context.host_num_of_vfs); 3220 host_pf_disabled = MLX5_GET(query_esw_functions_out, out, 3221 host_params_context.host_pf_disabled); 3222 3223 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled) 3224 return; 3225 3226 devlink = priv_to_devlink(esw->dev); 3227 devl_lock(devlink); 3228 /* Number of VFs can only change from "0 to x" or "x to 0". */ 3229 if (esw->esw_funcs.num_vfs > 0) { 3230 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); 3231 } else { 3232 int err; 3233 3234 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs, 3235 MLX5_VPORT_UC_ADDR_CHANGE); 3236 if (err) { 3237 devl_unlock(devlink); 3238 return; 3239 } 3240 } 3241 esw->esw_funcs.num_vfs = new_num_vfs; 3242 devl_unlock(devlink); 3243 } 3244 3245 static void esw_functions_changed_event_handler(struct work_struct *work) 3246 { 3247 struct mlx5_host_work *host_work; 3248 struct mlx5_eswitch *esw; 3249 const u32 *out; 3250 3251 host_work = container_of(work, struct mlx5_host_work, work); 3252 esw = host_work->esw; 3253 3254 out = mlx5_esw_query_functions(esw->dev); 3255 if (IS_ERR(out)) 3256 goto out; 3257 3258 esw_vfs_changed_event_handler(esw, out); 3259 kvfree(out); 3260 out: 3261 kfree(host_work); 3262 } 3263 3264 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data) 3265 { 3266 struct mlx5_esw_functions *esw_funcs; 3267 struct mlx5_host_work *host_work; 3268 struct mlx5_eswitch *esw; 3269 3270 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC); 3271 if (!host_work) 3272 return NOTIFY_DONE; 3273 3274 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb); 3275 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs); 3276 3277 host_work->esw = esw; 3278 3279 INIT_WORK(&host_work->work, esw_functions_changed_event_handler); 3280 queue_work(esw->work_queue, &host_work->work); 3281 3282 return NOTIFY_OK; 3283 } 3284 3285 static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw) 3286 { 3287 const u32 *query_host_out; 3288 3289 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) 3290 return 0; 3291 3292 query_host_out = mlx5_esw_query_functions(esw->dev); 3293 if (IS_ERR(query_host_out)) 3294 return PTR_ERR(query_host_out); 3295 3296 /* Mark non local controller with non zero controller number. */ 3297 esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out, 3298 host_params_context.host_number); 3299 kvfree(query_host_out); 3300 return 0; 3301 } 3302 3303 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller) 3304 { 3305 /* Local controller is always valid */ 3306 if (controller == 0) 3307 return true; 3308 3309 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) 3310 return false; 3311 3312 /* External host number starts with zero in device */ 3313 return (controller == esw->offloads.host_number + 1); 3314 } 3315 3316 int esw_offloads_enable(struct mlx5_eswitch *esw) 3317 { 3318 struct mapping_ctx *reg_c0_obj_pool; 3319 struct mlx5_vport *vport; 3320 unsigned long i; 3321 u64 mapping_id; 3322 int err; 3323 3324 mutex_init(&esw->offloads.termtbl_mutex); 3325 mlx5_rdma_enable_roce(esw->dev); 3326 3327 err = mlx5_esw_host_number_init(esw); 3328 if (err) 3329 goto err_metadata; 3330 3331 err = esw_offloads_metadata_init(esw); 3332 if (err) 3333 goto err_metadata; 3334 3335 err = esw_set_passing_vport_metadata(esw, true); 3336 if (err) 3337 goto err_vport_metadata; 3338 3339 mapping_id = mlx5_query_nic_system_image_guid(esw->dev); 3340 3341 reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN, 3342 sizeof(struct mlx5_mapped_obj), 3343 ESW_REG_C0_USER_DATA_METADATA_MASK, 3344 true); 3345 3346 if (IS_ERR(reg_c0_obj_pool)) { 3347 err = PTR_ERR(reg_c0_obj_pool); 3348 goto err_pool; 3349 } 3350 esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool; 3351 3352 err = esw_offloads_steering_init(esw); 3353 if (err) 3354 goto err_steering_init; 3355 3356 /* Representor will control the vport link state */ 3357 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) 3358 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN; 3359 3360 /* Uplink vport rep must load first. */ 3361 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK); 3362 if (err) 3363 goto err_uplink; 3364 3365 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE); 3366 if (err) 3367 goto err_vports; 3368 3369 esw_offloads_devcom_init(esw); 3370 3371 return 0; 3372 3373 err_vports: 3374 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); 3375 err_uplink: 3376 esw_offloads_steering_cleanup(esw); 3377 err_steering_init: 3378 mapping_destroy(reg_c0_obj_pool); 3379 err_pool: 3380 esw_set_passing_vport_metadata(esw, false); 3381 err_vport_metadata: 3382 esw_offloads_metadata_uninit(esw); 3383 err_metadata: 3384 mlx5_rdma_disable_roce(esw->dev); 3385 mutex_destroy(&esw->offloads.termtbl_mutex); 3386 return err; 3387 } 3388 3389 static int esw_offloads_stop(struct mlx5_eswitch *esw, 3390 struct netlink_ext_ack *extack) 3391 { 3392 int err, err1; 3393 3394 esw->mode = MLX5_ESWITCH_LEGACY; 3395 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS); 3396 if (err) { 3397 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); 3398 esw->mode = MLX5_ESWITCH_OFFLOADS; 3399 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS); 3400 if (err1) { 3401 NL_SET_ERR_MSG_MOD(extack, 3402 "Failed setting eswitch back to offloads"); 3403 } 3404 } 3405 3406 return err; 3407 } 3408 3409 void esw_offloads_disable(struct mlx5_eswitch *esw) 3410 { 3411 esw_offloads_devcom_cleanup(esw); 3412 mlx5_eswitch_disable_pf_vf_vports(esw); 3413 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); 3414 esw_set_passing_vport_metadata(esw, false); 3415 esw_offloads_steering_cleanup(esw); 3416 mapping_destroy(esw->offloads.reg_c0_obj_pool); 3417 esw_offloads_metadata_uninit(esw); 3418 mlx5_rdma_disable_roce(esw->dev); 3419 mutex_destroy(&esw->offloads.termtbl_mutex); 3420 } 3421 3422 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) 3423 { 3424 switch (mode) { 3425 case DEVLINK_ESWITCH_MODE_LEGACY: 3426 *mlx5_mode = MLX5_ESWITCH_LEGACY; 3427 break; 3428 case DEVLINK_ESWITCH_MODE_SWITCHDEV: 3429 *mlx5_mode = MLX5_ESWITCH_OFFLOADS; 3430 break; 3431 default: 3432 return -EINVAL; 3433 } 3434 3435 return 0; 3436 } 3437 3438 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode) 3439 { 3440 switch (mlx5_mode) { 3441 case MLX5_ESWITCH_LEGACY: 3442 *mode = DEVLINK_ESWITCH_MODE_LEGACY; 3443 break; 3444 case MLX5_ESWITCH_OFFLOADS: 3445 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; 3446 break; 3447 default: 3448 return -EINVAL; 3449 } 3450 3451 return 0; 3452 } 3453 3454 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode) 3455 { 3456 switch (mode) { 3457 case DEVLINK_ESWITCH_INLINE_MODE_NONE: 3458 *mlx5_mode = MLX5_INLINE_MODE_NONE; 3459 break; 3460 case DEVLINK_ESWITCH_INLINE_MODE_LINK: 3461 *mlx5_mode = MLX5_INLINE_MODE_L2; 3462 break; 3463 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK: 3464 *mlx5_mode = MLX5_INLINE_MODE_IP; 3465 break; 3466 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT: 3467 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP; 3468 break; 3469 default: 3470 return -EINVAL; 3471 } 3472 3473 return 0; 3474 } 3475 3476 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode) 3477 { 3478 switch (mlx5_mode) { 3479 case MLX5_INLINE_MODE_NONE: 3480 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE; 3481 break; 3482 case MLX5_INLINE_MODE_L2: 3483 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK; 3484 break; 3485 case MLX5_INLINE_MODE_IP: 3486 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK; 3487 break; 3488 case MLX5_INLINE_MODE_TCP_UDP: 3489 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT; 3490 break; 3491 default: 3492 return -EINVAL; 3493 } 3494 3495 return 0; 3496 } 3497 3498 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, 3499 struct netlink_ext_ack *extack) 3500 { 3501 u16 cur_mlx5_mode, mlx5_mode = 0; 3502 struct mlx5_eswitch *esw; 3503 int err = 0; 3504 3505 esw = mlx5_devlink_eswitch_get(devlink); 3506 if (IS_ERR(esw)) 3507 return PTR_ERR(esw); 3508 3509 if (esw_mode_from_devlink(mode, &mlx5_mode)) 3510 return -EINVAL; 3511 3512 mlx5_lag_disable_change(esw->dev); 3513 err = mlx5_esw_try_lock(esw); 3514 if (err < 0) { 3515 NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy"); 3516 goto enable_lag; 3517 } 3518 cur_mlx5_mode = err; 3519 err = 0; 3520 3521 if (cur_mlx5_mode == mlx5_mode) 3522 goto unlock; 3523 3524 mlx5_eswitch_disable_locked(esw); 3525 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) { 3526 if (mlx5_devlink_trap_get_num_active(esw->dev)) { 3527 NL_SET_ERR_MSG_MOD(extack, 3528 "Can't change mode while devlink traps are active"); 3529 err = -EOPNOTSUPP; 3530 goto unlock; 3531 } 3532 err = esw_offloads_start(esw, extack); 3533 } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) { 3534 err = esw_offloads_stop(esw, extack); 3535 mlx5_rescan_drivers(esw->dev); 3536 } else { 3537 err = -EINVAL; 3538 } 3539 3540 unlock: 3541 mlx5_esw_unlock(esw); 3542 enable_lag: 3543 mlx5_lag_enable_change(esw->dev); 3544 return err; 3545 } 3546 3547 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) 3548 { 3549 struct mlx5_eswitch *esw; 3550 int err; 3551 3552 esw = mlx5_devlink_eswitch_get(devlink); 3553 if (IS_ERR(esw)) 3554 return PTR_ERR(esw); 3555 3556 down_write(&esw->mode_lock); 3557 err = esw_mode_to_devlink(esw->mode, mode); 3558 up_write(&esw->mode_lock); 3559 return err; 3560 } 3561 3562 static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode, 3563 struct netlink_ext_ack *extack) 3564 { 3565 struct mlx5_core_dev *dev = esw->dev; 3566 struct mlx5_vport *vport; 3567 u16 err_vport_num = 0; 3568 unsigned long i; 3569 int err = 0; 3570 3571 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 3572 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode); 3573 if (err) { 3574 err_vport_num = vport->vport; 3575 NL_SET_ERR_MSG_MOD(extack, 3576 "Failed to set min inline on vport"); 3577 goto revert_inline_mode; 3578 } 3579 } 3580 return 0; 3581 3582 revert_inline_mode: 3583 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 3584 if (vport->vport == err_vport_num) 3585 break; 3586 mlx5_modify_nic_vport_min_inline(dev, 3587 vport->vport, 3588 esw->offloads.inline_mode); 3589 } 3590 return err; 3591 } 3592 3593 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, 3594 struct netlink_ext_ack *extack) 3595 { 3596 struct mlx5_core_dev *dev = devlink_priv(devlink); 3597 struct mlx5_eswitch *esw; 3598 u8 mlx5_mode; 3599 int err; 3600 3601 esw = mlx5_devlink_eswitch_get(devlink); 3602 if (IS_ERR(esw)) 3603 return PTR_ERR(esw); 3604 3605 down_write(&esw->mode_lock); 3606 3607 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 3608 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 3609 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) { 3610 err = 0; 3611 goto out; 3612 } 3613 3614 fallthrough; 3615 case MLX5_CAP_INLINE_MODE_L2: 3616 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set"); 3617 err = -EOPNOTSUPP; 3618 goto out; 3619 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 3620 break; 3621 } 3622 3623 if (atomic64_read(&esw->offloads.num_flows) > 0) { 3624 NL_SET_ERR_MSG_MOD(extack, 3625 "Can't set inline mode when flows are configured"); 3626 err = -EOPNOTSUPP; 3627 goto out; 3628 } 3629 3630 err = esw_inline_mode_from_devlink(mode, &mlx5_mode); 3631 if (err) 3632 goto out; 3633 3634 err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack); 3635 if (err) 3636 goto out; 3637 3638 esw->offloads.inline_mode = mlx5_mode; 3639 up_write(&esw->mode_lock); 3640 return 0; 3641 3642 out: 3643 up_write(&esw->mode_lock); 3644 return err; 3645 } 3646 3647 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) 3648 { 3649 struct mlx5_eswitch *esw; 3650 int err; 3651 3652 esw = mlx5_devlink_eswitch_get(devlink); 3653 if (IS_ERR(esw)) 3654 return PTR_ERR(esw); 3655 3656 down_write(&esw->mode_lock); 3657 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); 3658 up_write(&esw->mode_lock); 3659 return err; 3660 } 3661 3662 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, 3663 enum devlink_eswitch_encap_mode encap, 3664 struct netlink_ext_ack *extack) 3665 { 3666 struct mlx5_core_dev *dev = devlink_priv(devlink); 3667 struct mlx5_eswitch *esw; 3668 int err = 0; 3669 3670 esw = mlx5_devlink_eswitch_get(devlink); 3671 if (IS_ERR(esw)) 3672 return PTR_ERR(esw); 3673 3674 down_write(&esw->mode_lock); 3675 3676 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && 3677 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) || 3678 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) { 3679 err = -EOPNOTSUPP; 3680 goto unlock; 3681 } 3682 3683 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) { 3684 err = -EOPNOTSUPP; 3685 goto unlock; 3686 } 3687 3688 if (esw->mode == MLX5_ESWITCH_LEGACY) { 3689 esw->offloads.encap = encap; 3690 goto unlock; 3691 } 3692 3693 if (esw->offloads.encap == encap) 3694 goto unlock; 3695 3696 if (atomic64_read(&esw->offloads.num_flows) > 0) { 3697 NL_SET_ERR_MSG_MOD(extack, 3698 "Can't set encapsulation when flows are configured"); 3699 err = -EOPNOTSUPP; 3700 goto unlock; 3701 } 3702 3703 esw_destroy_offloads_fdb_tables(esw); 3704 3705 esw->offloads.encap = encap; 3706 3707 err = esw_create_offloads_fdb_tables(esw); 3708 3709 if (err) { 3710 NL_SET_ERR_MSG_MOD(extack, 3711 "Failed re-creating fast FDB table"); 3712 esw->offloads.encap = !encap; 3713 (void)esw_create_offloads_fdb_tables(esw); 3714 } 3715 3716 unlock: 3717 up_write(&esw->mode_lock); 3718 return err; 3719 } 3720 3721 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, 3722 enum devlink_eswitch_encap_mode *encap) 3723 { 3724 struct mlx5_eswitch *esw; 3725 3726 esw = mlx5_devlink_eswitch_get(devlink); 3727 if (IS_ERR(esw)) 3728 return PTR_ERR(esw); 3729 3730 down_write(&esw->mode_lock); 3731 *encap = esw->offloads.encap; 3732 up_write(&esw->mode_lock); 3733 return 0; 3734 } 3735 3736 static bool 3737 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num) 3738 { 3739 /* Currently, only ECPF based device has representor for host PF. */ 3740 if (vport_num == MLX5_VPORT_PF && 3741 !mlx5_core_is_ecpf_esw_manager(esw->dev)) 3742 return false; 3743 3744 if (vport_num == MLX5_VPORT_ECPF && 3745 !mlx5_ecpf_vport_exists(esw->dev)) 3746 return false; 3747 3748 return true; 3749 } 3750 3751 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, 3752 const struct mlx5_eswitch_rep_ops *ops, 3753 u8 rep_type) 3754 { 3755 struct mlx5_eswitch_rep_data *rep_data; 3756 struct mlx5_eswitch_rep *rep; 3757 unsigned long i; 3758 3759 esw->offloads.rep_ops[rep_type] = ops; 3760 mlx5_esw_for_each_rep(esw, i, rep) { 3761 if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) { 3762 rep->esw = esw; 3763 rep_data = &rep->rep_data[rep_type]; 3764 atomic_set(&rep_data->state, REP_REGISTERED); 3765 } 3766 } 3767 } 3768 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps); 3769 3770 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type) 3771 { 3772 struct mlx5_eswitch_rep *rep; 3773 unsigned long i; 3774 3775 if (esw->mode == MLX5_ESWITCH_OFFLOADS) 3776 __unload_reps_all_vport(esw, rep_type); 3777 3778 mlx5_esw_for_each_rep(esw, i, rep) 3779 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED); 3780 } 3781 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps); 3782 3783 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type) 3784 { 3785 struct mlx5_eswitch_rep *rep; 3786 3787 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); 3788 return rep->rep_data[rep_type].priv; 3789 } 3790 3791 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, 3792 u16 vport, 3793 u8 rep_type) 3794 { 3795 struct mlx5_eswitch_rep *rep; 3796 3797 rep = mlx5_eswitch_get_rep(esw, vport); 3798 3799 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED && 3800 esw->offloads.rep_ops[rep_type]->get_proto_dev) 3801 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep); 3802 return NULL; 3803 } 3804 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev); 3805 3806 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type) 3807 { 3808 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type); 3809 } 3810 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev); 3811 3812 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, 3813 u16 vport) 3814 { 3815 return mlx5_eswitch_get_rep(esw, vport); 3816 } 3817 EXPORT_SYMBOL(mlx5_eswitch_vport_rep); 3818 3819 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw) 3820 { 3821 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED); 3822 } 3823 EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled); 3824 3825 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw) 3826 { 3827 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA); 3828 } 3829 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled); 3830 3831 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, 3832 u16 vport_num) 3833 { 3834 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); 3835 3836 if (WARN_ON_ONCE(IS_ERR(vport))) 3837 return 0; 3838 3839 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS); 3840 } 3841 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match); 3842 3843 int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port, 3844 u16 vport_num, u32 controller, u32 sfnum) 3845 { 3846 int err; 3847 3848 err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE); 3849 if (err) 3850 return err; 3851 3852 err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum); 3853 if (err) 3854 goto devlink_err; 3855 3856 mlx5_esw_vport_debugfs_create(esw, vport_num, true, sfnum); 3857 err = mlx5_esw_offloads_rep_load(esw, vport_num); 3858 if (err) 3859 goto rep_err; 3860 return 0; 3861 3862 rep_err: 3863 mlx5_esw_vport_debugfs_destroy(esw, vport_num); 3864 mlx5_esw_devlink_sf_port_unregister(esw, vport_num); 3865 devlink_err: 3866 mlx5_esw_vport_disable(esw, vport_num); 3867 return err; 3868 } 3869 3870 void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) 3871 { 3872 mlx5_esw_offloads_rep_unload(esw, vport_num); 3873 mlx5_esw_vport_debugfs_destroy(esw, vport_num); 3874 mlx5_esw_devlink_sf_port_unregister(esw, vport_num); 3875 mlx5_esw_vport_disable(esw, vport_num); 3876 } 3877 3878 static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id) 3879 { 3880 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 3881 void *query_ctx; 3882 void *hca_caps; 3883 int err; 3884 3885 *vhca_id = 0; 3886 if (mlx5_esw_is_manager_vport(esw, vport_num) || 3887 !MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) 3888 return -EPERM; 3889 3890 query_ctx = kzalloc(query_out_sz, GFP_KERNEL); 3891 if (!query_ctx) 3892 return -ENOMEM; 3893 3894 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx); 3895 if (err) 3896 goto out_free; 3897 3898 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 3899 *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id); 3900 3901 out_free: 3902 kfree(query_ctx); 3903 return err; 3904 } 3905 3906 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num) 3907 { 3908 u16 *old_entry, *vhca_map_entry, vhca_id; 3909 int err; 3910 3911 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id); 3912 if (err) { 3913 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n", 3914 vport_num, err); 3915 return err; 3916 } 3917 3918 vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL); 3919 if (!vhca_map_entry) 3920 return -ENOMEM; 3921 3922 *vhca_map_entry = vport_num; 3923 old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL); 3924 if (xa_is_err(old_entry)) { 3925 kfree(vhca_map_entry); 3926 return xa_err(old_entry); 3927 } 3928 kfree(old_entry); 3929 return 0; 3930 } 3931 3932 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num) 3933 { 3934 u16 *vhca_map_entry, vhca_id; 3935 int err; 3936 3937 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id); 3938 if (err) 3939 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n", 3940 vport_num, err); 3941 3942 vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id); 3943 kfree(vhca_map_entry); 3944 } 3945 3946 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num) 3947 { 3948 u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id); 3949 3950 if (!res) 3951 return -ENOENT; 3952 3953 *vport_num = *res; 3954 return 0; 3955 } 3956 3957 u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, 3958 u16 vport_num) 3959 { 3960 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); 3961 3962 if (WARN_ON_ONCE(IS_ERR(vport))) 3963 return 0; 3964 3965 return vport->metadata; 3966 } 3967 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set); 3968 3969 static bool 3970 is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num) 3971 { 3972 return vport_num == MLX5_VPORT_PF || 3973 mlx5_eswitch_is_vf_vport(esw, vport_num) || 3974 mlx5_esw_is_sf_vport(esw, vport_num); 3975 } 3976 3977 int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port, 3978 u8 *hw_addr, int *hw_addr_len, 3979 struct netlink_ext_ack *extack) 3980 { 3981 struct mlx5_eswitch *esw; 3982 struct mlx5_vport *vport; 3983 u16 vport_num; 3984 3985 esw = mlx5_devlink_eswitch_get(port->devlink); 3986 if (IS_ERR(esw)) 3987 return PTR_ERR(esw); 3988 3989 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); 3990 if (!is_port_function_supported(esw, vport_num)) 3991 return -EOPNOTSUPP; 3992 3993 vport = mlx5_eswitch_get_vport(esw, vport_num); 3994 if (IS_ERR(vport)) { 3995 NL_SET_ERR_MSG_MOD(extack, "Invalid port"); 3996 return PTR_ERR(vport); 3997 } 3998 3999 mutex_lock(&esw->state_lock); 4000 ether_addr_copy(hw_addr, vport->info.mac); 4001 *hw_addr_len = ETH_ALEN; 4002 mutex_unlock(&esw->state_lock); 4003 return 0; 4004 } 4005 4006 int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port, 4007 const u8 *hw_addr, int hw_addr_len, 4008 struct netlink_ext_ack *extack) 4009 { 4010 struct mlx5_eswitch *esw; 4011 u16 vport_num; 4012 4013 esw = mlx5_devlink_eswitch_get(port->devlink); 4014 if (IS_ERR(esw)) { 4015 NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr"); 4016 return PTR_ERR(esw); 4017 } 4018 4019 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); 4020 if (!is_port_function_supported(esw, vport_num)) { 4021 NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr"); 4022 return -EINVAL; 4023 } 4024 4025 return mlx5_eswitch_set_vport_mac(esw, vport_num, hw_addr); 4026 } 4027