1 /*- 2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 #include "opt_rss.h" 27 #include "opt_ratelimit.h" 28 29 #include <linux/etherdevice.h> 30 #include <dev/mlx5/driver.h> 31 #include <dev/mlx5/mlx5_ifc.h> 32 #include <dev/mlx5/vport.h> 33 #include <dev/mlx5/fs.h> 34 #include <dev/mlx5/mpfs.h> 35 #include <dev/mlx5/mlx5_core/mlx5_core.h> 36 #include <dev/mlx5/mlx5_core/eswitch.h> 37 38 #define UPLINK_VPORT 0xFFFF 39 40 #define MLX5_DEBUG_ESWITCH_MASK BIT(3) 41 42 #define esw_info(dev, format, ...) \ 43 printf("mlx5_core: INFO: ""(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__) 44 45 #define esw_warn(dev, format, ...) \ 46 printf("mlx5_core: WARN: ""(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__) 47 48 #define esw_debug(dev, format, ...) \ 49 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) 50 51 enum { 52 MLX5_ACTION_NONE = 0, 53 MLX5_ACTION_ADD = 1, 54 MLX5_ACTION_DEL = 2, 55 }; 56 57 /* E-Switch UC L2 table hash node */ 58 struct esw_uc_addr { 59 struct l2addr_node node; 60 u32 table_index; 61 u32 vport; 62 }; 63 64 /* E-Switch MC FDB table hash node */ 65 struct esw_mc_addr { /* SRIOV only */ 66 struct l2addr_node node; 67 struct mlx5_flow_rule *uplink_rule; /* Forward to uplink rule */ 68 u32 refcnt; 69 }; 70 71 /* Vport UC/MC hash node */ 72 struct vport_addr { 73 struct l2addr_node node; 74 u8 action; 75 u32 vport; 76 struct mlx5_flow_rule *flow_rule; /* SRIOV only */ 77 }; 78 79 enum { 80 UC_ADDR_CHANGE = BIT(0), 81 MC_ADDR_CHANGE = BIT(1), 82 }; 83 84 /* Vport context events */ 85 #define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \ 86 MC_ADDR_CHANGE) 87 88 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, 89 u32 events_mask) 90 { 91 int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0}; 92 int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0}; 93 void *nic_vport_ctx; 94 95 MLX5_SET(modify_nic_vport_context_in, in, 96 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); 97 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); 98 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); 99 if (vport) 100 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); 101 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, 102 in, nic_vport_context); 103 104 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1); 105 106 if (events_mask & UC_ADDR_CHANGE) 107 MLX5_SET(nic_vport_context, nic_vport_ctx, 108 event_on_uc_address_change, 1); 109 if (events_mask & MC_ADDR_CHANGE) 110 MLX5_SET(nic_vport_context, nic_vport_ctx, 111 event_on_mc_address_change, 1); 112 113 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 114 } 115 116 /* E-Switch vport context HW commands */ 117 static int query_esw_vport_context_cmd(struct mlx5_core_dev *mdev, u32 vport, 118 u32 *out, int outlen) 119 { 120 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {0}; 121 122 MLX5_SET(query_nic_vport_context_in, in, opcode, 123 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT); 124 125 MLX5_SET(query_esw_vport_context_in, in, vport_number, vport); 126 if (vport) 127 MLX5_SET(query_esw_vport_context_in, in, other_vport, 1); 128 129 return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); 130 } 131 132 static int query_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, 133 u16 *vlan, u8 *qos) 134 { 135 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {0}; 136 int err; 137 bool cvlan_strip; 138 bool cvlan_insert; 139 140 *vlan = 0; 141 *qos = 0; 142 143 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || 144 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) 145 return -ENOTSUPP; 146 147 err = query_esw_vport_context_cmd(dev, vport, out, sizeof(out)); 148 if (err) 149 goto out; 150 151 cvlan_strip = MLX5_GET(query_esw_vport_context_out, out, 152 esw_vport_context.vport_cvlan_strip); 153 154 cvlan_insert = MLX5_GET(query_esw_vport_context_out, out, 155 esw_vport_context.vport_cvlan_insert); 156 157 if (cvlan_strip || cvlan_insert) { 158 *vlan = MLX5_GET(query_esw_vport_context_out, out, 159 esw_vport_context.cvlan_id); 160 *qos = MLX5_GET(query_esw_vport_context_out, out, 161 esw_vport_context.cvlan_pcp); 162 } 163 164 esw_debug(dev, "Query Vport[%d] cvlan: VLAN %d qos=%d\n", 165 vport, *vlan, *qos); 166 out: 167 return err; 168 } 169 170 static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport, 171 void *in, int inlen) 172 { 173 u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0}; 174 175 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); 176 if (vport) 177 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); 178 179 MLX5_SET(modify_esw_vport_context_in, in, opcode, 180 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); 181 182 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 183 } 184 185 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, 186 u16 vlan, u8 qos, bool set) 187 { 188 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0}; 189 190 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || 191 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) 192 return -ENOTSUPP; 193 194 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%d\n", 195 vport, vlan, qos, set); 196 197 if (set) { 198 MLX5_SET(modify_esw_vport_context_in, in, 199 esw_vport_context.vport_cvlan_strip, 1); 200 /* insert only if no vlan in packet */ 201 MLX5_SET(modify_esw_vport_context_in, in, 202 esw_vport_context.vport_cvlan_insert, 1); 203 MLX5_SET(modify_esw_vport_context_in, in, 204 esw_vport_context.cvlan_pcp, qos); 205 MLX5_SET(modify_esw_vport_context_in, in, 206 esw_vport_context.cvlan_id, vlan); 207 } 208 209 MLX5_SET(modify_esw_vport_context_in, in, 210 field_select.vport_cvlan_strip, 1); 211 MLX5_SET(modify_esw_vport_context_in, in, 212 field_select.vport_cvlan_insert, 1); 213 214 return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in)); 215 } 216 217 /* E-Switch FDB */ 218 static struct mlx5_flow_rule * 219 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport) 220 { 221 int match_header = MLX5_MATCH_OUTER_HEADERS; 222 struct mlx5_flow_destination dest; 223 struct mlx5_flow_rule *flow_rule = NULL; 224 u32 *match_v; 225 u32 *match_c; 226 u8 *dmac_v; 227 u8 *dmac_c; 228 229 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 230 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 231 if (!match_v || !match_c) { 232 printf("mlx5_core: WARN: ""FDB: Failed to alloc match parameters\n"); 233 goto out; 234 } 235 dmac_v = MLX5_ADDR_OF(fte_match_param, match_v, 236 outer_headers.dmac_47_16); 237 dmac_c = MLX5_ADDR_OF(fte_match_param, match_c, 238 outer_headers.dmac_47_16); 239 240 ether_addr_copy(dmac_v, mac); 241 /* Match criteria mask */ 242 memset(dmac_c, 0xff, 6); 243 244 dest.type = MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT; 245 dest.vport_num = vport; 246 247 esw_debug(esw->dev, 248 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n", 249 dmac_v, dmac_c, vport); 250 flow_rule = 251 mlx5_add_flow_rule(esw->fdb_table.fdb, 252 match_header, 253 match_c, 254 match_v, 255 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 256 0, &dest); 257 if (IS_ERR_OR_NULL(flow_rule)) { 258 printf("mlx5_core: WARN: ""FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); 259 flow_rule = NULL; 260 } 261 out: 262 kfree(match_v); 263 kfree(match_c); 264 return flow_rule; 265 } 266 267 static int esw_create_fdb_table(struct mlx5_eswitch *esw) 268 { 269 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 270 struct mlx5_core_dev *dev = esw->dev; 271 struct mlx5_flow_namespace *root_ns; 272 struct mlx5_flow_table *fdb; 273 struct mlx5_flow_group *g; 274 void *match_criteria; 275 int table_size; 276 u32 *flow_group_in; 277 u8 *dmac; 278 int err = 0; 279 280 esw_debug(dev, "Create FDB log_max_size(%d)\n", 281 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); 282 283 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); 284 if (!root_ns) { 285 esw_warn(dev, "Failed to get FDB flow namespace\n"); 286 return -ENOMEM; 287 } 288 289 flow_group_in = mlx5_vzalloc(inlen); 290 if (!flow_group_in) 291 return -ENOMEM; 292 memset(flow_group_in, 0, inlen); 293 294 /* (-2) Since MaorG said so .. */ 295 table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)) - 2; 296 297 fdb = mlx5_create_flow_table(root_ns, 0, "FDB", table_size); 298 if (IS_ERR_OR_NULL(fdb)) { 299 err = PTR_ERR(fdb); 300 esw_warn(dev, "Failed to create FDB Table err %d\n", err); 301 goto out; 302 } 303 304 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 305 MLX5_MATCH_OUTER_HEADERS); 306 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); 307 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16); 308 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 309 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1); 310 eth_broadcast_addr(dmac); 311 312 g = mlx5_create_flow_group(fdb, flow_group_in); 313 if (IS_ERR_OR_NULL(g)) { 314 err = PTR_ERR(g); 315 esw_warn(dev, "Failed to create flow group err(%d)\n", err); 316 goto out; 317 } 318 319 esw->fdb_table.addr_grp = g; 320 esw->fdb_table.fdb = fdb; 321 out: 322 kfree(flow_group_in); 323 if (err && !IS_ERR_OR_NULL(fdb)) 324 mlx5_destroy_flow_table(fdb); 325 return err; 326 } 327 328 static void esw_destroy_fdb_table(struct mlx5_eswitch *esw) 329 { 330 if (!esw->fdb_table.fdb) 331 return; 332 333 esw_debug(esw->dev, "Destroy FDB Table\n"); 334 mlx5_destroy_flow_group(esw->fdb_table.addr_grp); 335 mlx5_destroy_flow_table(esw->fdb_table.fdb); 336 esw->fdb_table.fdb = NULL; 337 esw->fdb_table.addr_grp = NULL; 338 } 339 340 /* E-Switch vport UC/MC lists management */ 341 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw, 342 struct vport_addr *vaddr); 343 344 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) 345 { 346 struct hlist_head *hash = esw->l2_table.l2_hash; 347 struct esw_uc_addr *esw_uc; 348 u8 *mac = vaddr->node.addr; 349 u32 vport = vaddr->vport; 350 int err; 351 352 esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr); 353 if (esw_uc) { 354 esw_warn(esw->dev, 355 "Failed to set L2 mac(%pM) for vport(%d), mac is already in use by vport(%d)\n", 356 mac, vport, esw_uc->vport); 357 return -EEXIST; 358 } 359 360 esw_uc = l2addr_hash_add(hash, mac, struct esw_uc_addr, GFP_KERNEL); 361 if (!esw_uc) 362 return -ENOMEM; 363 esw_uc->vport = vport; 364 365 err = mlx5_mpfs_add_mac(esw->dev, &esw_uc->table_index, mac, 0, 0); 366 if (err) 367 goto abort; 368 369 if (esw->fdb_table.fdb) /* SRIOV is enabled: Forward UC MAC to vport */ 370 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); 371 372 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n", 373 vport, mac, esw_uc->table_index, vaddr->flow_rule); 374 return err; 375 abort: 376 l2addr_hash_del(esw_uc); 377 return err; 378 } 379 380 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) 381 { 382 struct hlist_head *hash = esw->l2_table.l2_hash; 383 struct esw_uc_addr *esw_uc; 384 u8 *mac = vaddr->node.addr; 385 u32 vport = vaddr->vport; 386 387 esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr); 388 if (!esw_uc || esw_uc->vport != vport) { 389 esw_debug(esw->dev, 390 "MAC(%pM) doesn't belong to vport (%d)\n", 391 mac, vport); 392 return -EINVAL; 393 } 394 esw_debug(esw->dev, "\tDELETE UC MAC: vport[%d] %pM index:%d fr(%p)\n", 395 vport, mac, esw_uc->table_index, vaddr->flow_rule); 396 397 mlx5_mpfs_del_mac(esw->dev, esw_uc->table_index); 398 399 mlx5_del_flow_rule(&vaddr->flow_rule); 400 401 l2addr_hash_del(esw_uc); 402 return 0; 403 } 404 405 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) 406 { 407 struct hlist_head *hash = esw->mc_table; 408 struct esw_mc_addr *esw_mc; 409 u8 *mac = vaddr->node.addr; 410 u32 vport = vaddr->vport; 411 412 if (!esw->fdb_table.fdb) 413 return 0; 414 415 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); 416 if (esw_mc) 417 goto add; 418 419 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL); 420 if (!esw_mc) 421 return -ENOMEM; 422 423 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */ 424 esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT); 425 add: 426 esw_mc->refcnt++; 427 /* Forward MC MAC to vport */ 428 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); 429 esw_debug(esw->dev, 430 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n", 431 vport, mac, vaddr->flow_rule, 432 esw_mc->refcnt, esw_mc->uplink_rule); 433 return 0; 434 } 435 436 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) 437 { 438 struct hlist_head *hash = esw->mc_table; 439 struct esw_mc_addr *esw_mc; 440 u8 *mac = vaddr->node.addr; 441 u32 vport = vaddr->vport; 442 443 if (!esw->fdb_table.fdb) 444 return 0; 445 446 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); 447 if (!esw_mc) { 448 esw_warn(esw->dev, 449 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)", 450 mac, vport); 451 return -EINVAL; 452 } 453 esw_debug(esw->dev, 454 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n", 455 vport, mac, vaddr->flow_rule, esw_mc->refcnt, 456 esw_mc->uplink_rule); 457 458 mlx5_del_flow_rule(&vaddr->flow_rule); 459 460 if (--esw_mc->refcnt) 461 return 0; 462 463 mlx5_del_flow_rule(&esw_mc->uplink_rule); 464 465 l2addr_hash_del(esw_mc); 466 return 0; 467 } 468 469 /* Apply vport UC/MC list to HW l2 table and FDB table */ 470 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw, 471 u32 vport_num, int list_type) 472 { 473 struct mlx5_vport *vport = &esw->vports[vport_num]; 474 bool is_uc = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC; 475 vport_addr_action vport_addr_add; 476 vport_addr_action vport_addr_del; 477 struct vport_addr *addr; 478 struct l2addr_node *node; 479 struct hlist_head *hash; 480 struct hlist_node *tmp; 481 int hi; 482 483 vport_addr_add = is_uc ? esw_add_uc_addr : 484 esw_add_mc_addr; 485 vport_addr_del = is_uc ? esw_del_uc_addr : 486 esw_del_mc_addr; 487 488 hash = is_uc ? vport->uc_list : vport->mc_list; 489 for_each_l2hash_node(node, tmp, hash, hi) { 490 addr = container_of(node, struct vport_addr, node); 491 switch (addr->action) { 492 case MLX5_ACTION_ADD: 493 vport_addr_add(esw, addr); 494 addr->action = MLX5_ACTION_NONE; 495 break; 496 case MLX5_ACTION_DEL: 497 vport_addr_del(esw, addr); 498 l2addr_hash_del(addr); 499 break; 500 } 501 } 502 } 503 504 /* Sync vport UC/MC list from vport context */ 505 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw, 506 u32 vport_num, int list_type) 507 { 508 struct mlx5_vport *vport = &esw->vports[vport_num]; 509 bool is_uc = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC; 510 u8 (*mac_list)[ETH_ALEN]; 511 struct l2addr_node *node; 512 struct vport_addr *addr; 513 struct hlist_head *hash; 514 struct hlist_node *tmp; 515 int size; 516 int err; 517 int hi; 518 int i; 519 520 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) : 521 MLX5_MAX_MC_PER_VPORT(esw->dev); 522 523 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL); 524 if (!mac_list) 525 return; 526 527 hash = is_uc ? vport->uc_list : vport->mc_list; 528 529 for_each_l2hash_node(node, tmp, hash, hi) { 530 addr = container_of(node, struct vport_addr, node); 531 addr->action = MLX5_ACTION_DEL; 532 } 533 534 err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type, 535 mac_list, &size); 536 if (err) 537 return; 538 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n", 539 vport_num, is_uc ? "UC" : "MC", size); 540 541 for (i = 0; i < size; i++) { 542 if (is_uc && !is_valid_ether_addr(mac_list[i])) 543 continue; 544 545 if (!is_uc && !is_multicast_ether_addr(mac_list[i])) 546 continue; 547 548 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr); 549 if (addr) { 550 addr->action = MLX5_ACTION_NONE; 551 continue; 552 } 553 554 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr, 555 GFP_KERNEL); 556 if (!addr) { 557 esw_warn(esw->dev, 558 "Failed to add MAC(%pM) to vport[%d] DB\n", 559 mac_list[i], vport_num); 560 continue; 561 } 562 addr->vport = vport_num; 563 addr->action = MLX5_ACTION_ADD; 564 } 565 kfree(mac_list); 566 } 567 568 static void esw_vport_change_handler(struct work_struct *work) 569 { 570 struct mlx5_vport *vport = 571 container_of(work, struct mlx5_vport, vport_change_handler); 572 struct mlx5_core_dev *dev = vport->dev; 573 struct mlx5_eswitch *esw = dev->priv.eswitch; 574 u8 mac[ETH_ALEN]; 575 576 mlx5_query_nic_vport_mac_address(dev, vport->vport, mac); 577 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n", 578 vport->vport, mac); 579 580 if (vport->enabled_events & UC_ADDR_CHANGE) { 581 esw_update_vport_addr_list(esw, vport->vport, 582 MLX5_NIC_VPORT_LIST_TYPE_UC); 583 esw_apply_vport_addr_list(esw, vport->vport, 584 MLX5_NIC_VPORT_LIST_TYPE_UC); 585 } 586 587 if (vport->enabled_events & MC_ADDR_CHANGE) { 588 esw_update_vport_addr_list(esw, vport->vport, 589 MLX5_NIC_VPORT_LIST_TYPE_MC); 590 esw_apply_vport_addr_list(esw, vport->vport, 591 MLX5_NIC_VPORT_LIST_TYPE_MC); 592 } 593 594 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport); 595 if (vport->enabled) 596 arm_vport_context_events_cmd(dev, vport->vport, 597 vport->enabled_events); 598 } 599 600 static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, 601 struct mlx5_vport *vport) 602 { 603 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 604 struct mlx5_flow_group *vlan_grp = NULL; 605 struct mlx5_flow_group *drop_grp = NULL; 606 struct mlx5_core_dev *dev = esw->dev; 607 struct mlx5_flow_namespace *root_ns; 608 struct mlx5_flow_table *acl; 609 void *match_criteria; 610 char table_name[32]; 611 u32 *flow_group_in; 612 int table_size = 2; 613 int err = 0; 614 615 if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) 616 return; 617 618 esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n", 619 vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size)); 620 621 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS); 622 if (!root_ns) { 623 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n"); 624 return; 625 } 626 627 flow_group_in = mlx5_vzalloc(inlen); 628 if (!flow_group_in) 629 return; 630 631 snprintf(table_name, 32, "egress_%d", vport->vport); 632 acl = mlx5_create_vport_flow_table(root_ns, vport->vport, 0, table_name, table_size); 633 if (IS_ERR_OR_NULL(acl)) { 634 err = PTR_ERR(acl); 635 esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n", 636 vport->vport, err); 637 goto out; 638 } 639 640 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 641 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); 642 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); 643 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid); 644 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 645 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); 646 647 vlan_grp = mlx5_create_flow_group(acl, flow_group_in); 648 if (IS_ERR_OR_NULL(vlan_grp)) { 649 err = PTR_ERR(vlan_grp); 650 esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n", 651 vport->vport, err); 652 goto out; 653 } 654 655 memset(flow_group_in, 0, inlen); 656 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); 657 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); 658 drop_grp = mlx5_create_flow_group(acl, flow_group_in); 659 if (IS_ERR_OR_NULL(drop_grp)) { 660 err = PTR_ERR(drop_grp); 661 esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n", 662 vport->vport, err); 663 goto out; 664 } 665 666 vport->egress.acl = acl; 667 vport->egress.drop_grp = drop_grp; 668 vport->egress.allowed_vlans_grp = vlan_grp; 669 out: 670 kfree(flow_group_in); 671 if (err && !IS_ERR_OR_NULL(vlan_grp)) 672 mlx5_destroy_flow_group(vlan_grp); 673 if (err && !IS_ERR_OR_NULL(acl)) 674 mlx5_destroy_flow_table(acl); 675 } 676 677 static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, 678 struct mlx5_vport *vport) 679 { 680 mlx5_del_flow_rule(&vport->egress.allowed_vlan); 681 mlx5_del_flow_rule(&vport->egress.drop_rule); 682 } 683 684 static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, 685 struct mlx5_vport *vport) 686 { 687 if (IS_ERR_OR_NULL(vport->egress.acl)) 688 return; 689 690 esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport); 691 692 esw_vport_cleanup_egress_rules(esw, vport); 693 mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp); 694 mlx5_destroy_flow_group(vport->egress.drop_grp); 695 mlx5_destroy_flow_table(vport->egress.acl); 696 vport->egress.allowed_vlans_grp = NULL; 697 vport->egress.drop_grp = NULL; 698 vport->egress.acl = NULL; 699 } 700 701 static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, 702 struct mlx5_vport *vport) 703 { 704 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 705 struct mlx5_core_dev *dev = esw->dev; 706 struct mlx5_flow_namespace *root_ns; 707 struct mlx5_flow_table *acl; 708 struct mlx5_flow_group *g; 709 void *match_criteria; 710 char table_name[32]; 711 u32 *flow_group_in; 712 int table_size = 1; 713 int err = 0; 714 715 if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) 716 return; 717 718 esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n", 719 vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size)); 720 721 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS); 722 if (!root_ns) { 723 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n"); 724 return; 725 } 726 727 flow_group_in = mlx5_vzalloc(inlen); 728 if (!flow_group_in) 729 return; 730 731 snprintf(table_name, 32, "ingress_%d", vport->vport); 732 acl = mlx5_create_vport_flow_table(root_ns, vport->vport, 0, table_name, table_size); 733 if (IS_ERR_OR_NULL(acl)) { 734 err = PTR_ERR(acl); 735 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n", 736 vport->vport, err); 737 goto out; 738 } 739 740 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 741 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); 742 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); 743 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 744 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); 745 746 g = mlx5_create_flow_group(acl, flow_group_in); 747 if (IS_ERR_OR_NULL(g)) { 748 err = PTR_ERR(g); 749 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow group, err(%d)\n", 750 vport->vport, err); 751 goto out; 752 } 753 754 vport->ingress.acl = acl; 755 vport->ingress.drop_grp = g; 756 out: 757 kfree(flow_group_in); 758 if (err && !IS_ERR_OR_NULL(acl)) 759 mlx5_destroy_flow_table(acl); 760 } 761 762 static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, 763 struct mlx5_vport *vport) 764 { 765 mlx5_del_flow_rule(&vport->ingress.drop_rule); 766 } 767 768 static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, 769 struct mlx5_vport *vport) 770 { 771 if (IS_ERR_OR_NULL(vport->ingress.acl)) 772 return; 773 774 esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport); 775 776 esw_vport_cleanup_ingress_rules(esw, vport); 777 mlx5_destroy_flow_group(vport->ingress.drop_grp); 778 mlx5_destroy_flow_table(vport->ingress.acl); 779 vport->ingress.acl = NULL; 780 vport->ingress.drop_grp = NULL; 781 } 782 783 static int esw_vport_ingress_config(struct mlx5_eswitch *esw, 784 struct mlx5_vport *vport) 785 { 786 struct mlx5_flow_destination dest; 787 u32 *match_v; 788 u32 *match_c; 789 int err = 0; 790 791 if (IS_ERR_OR_NULL(vport->ingress.acl)) { 792 esw_warn(esw->dev, 793 "vport[%d] configure ingress rules failed, ingress acl is not initialized!\n", 794 vport->vport); 795 return -EPERM; 796 } 797 798 esw_vport_cleanup_ingress_rules(esw, vport); 799 800 if (!vport->vlan && !vport->qos) 801 return 0; 802 803 esw_debug(esw->dev, 804 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n", 805 vport->vport, vport->vlan, vport->qos); 806 807 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 808 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 809 if (!match_v || !match_c) { 810 err = -ENOMEM; 811 esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n", 812 vport->vport, err); 813 goto out; 814 } 815 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.cvlan_tag); 816 MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.cvlan_tag); 817 818 dest.type = MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT; 819 dest.vport_num = vport->vport; 820 821 vport->ingress.drop_rule = 822 mlx5_add_flow_rule(vport->ingress.acl, 823 MLX5_MATCH_OUTER_HEADERS, 824 match_c, 825 match_v, 826 MLX5_FLOW_CONTEXT_ACTION_DROP, 827 0, &dest); 828 if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) { 829 err = PTR_ERR(vport->ingress.drop_rule); 830 printf("mlx5_core: WARN: ""vport[%d] configure ingress rules, err(%d)\n", vport->vport, err); 831 vport->ingress.drop_rule = NULL; 832 } 833 out: 834 kfree(match_v); 835 kfree(match_c); 836 return err; 837 } 838 839 static int esw_vport_egress_config(struct mlx5_eswitch *esw, 840 struct mlx5_vport *vport) 841 { 842 struct mlx5_flow_destination dest; 843 u32 *match_v; 844 u32 *match_c; 845 int err = 0; 846 847 if (IS_ERR_OR_NULL(vport->egress.acl)) { 848 esw_warn(esw->dev, "vport[%d] configure rgress rules failed, egress acl is not initialized!\n", 849 vport->vport); 850 return -EPERM; 851 } 852 853 esw_vport_cleanup_egress_rules(esw, vport); 854 855 if (!vport->vlan && !vport->qos) 856 return 0; 857 858 esw_debug(esw->dev, 859 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n", 860 vport->vport, vport->vlan, vport->qos); 861 862 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 863 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 864 if (!match_v || !match_c) { 865 err = -ENOMEM; 866 esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n", 867 vport->vport, err); 868 goto out; 869 } 870 871 /* Allowed vlan rule */ 872 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.cvlan_tag); 873 MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.cvlan_tag); 874 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.first_vid); 875 MLX5_SET(fte_match_param, match_v, outer_headers.first_vid, vport->vlan); 876 877 dest.type = MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT; 878 dest.vport_num = vport->vport; 879 880 vport->egress.allowed_vlan = 881 mlx5_add_flow_rule(vport->egress.acl, 882 MLX5_MATCH_OUTER_HEADERS, 883 match_c, 884 match_v, 885 MLX5_FLOW_CONTEXT_ACTION_ALLOW, 886 0, &dest); 887 if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) { 888 err = PTR_ERR(vport->egress.allowed_vlan); 889 printf("mlx5_core: WARN: ""vport[%d] configure egress allowed vlan rule failed, err(%d)\n", vport->vport, err); 890 vport->egress.allowed_vlan = NULL; 891 goto out; 892 } 893 894 /* Drop others rule (star rule) */ 895 memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param)); 896 memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param)); 897 vport->egress.drop_rule = 898 mlx5_add_flow_rule(vport->egress.acl, 899 0, 900 match_c, 901 match_v, 902 MLX5_FLOW_CONTEXT_ACTION_DROP, 903 0, &dest); 904 if (IS_ERR_OR_NULL(vport->egress.drop_rule)) { 905 err = PTR_ERR(vport->egress.drop_rule); 906 printf("mlx5_core: WARN: ""vport[%d] configure egress drop rule failed, err(%d)\n", vport->vport, err); 907 vport->egress.drop_rule = NULL; 908 } 909 out: 910 kfree(match_v); 911 kfree(match_c); 912 return err; 913 } 914 915 static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, 916 int enable_events) 917 { 918 struct mlx5_vport *vport = &esw->vports[vport_num]; 919 unsigned long flags; 920 921 mutex_lock(&vport->state_lock); 922 WARN_ON(vport->enabled); 923 924 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); 925 926 if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */ 927 esw_vport_enable_ingress_acl(esw, vport); 928 esw_vport_enable_egress_acl(esw, vport); 929 esw_vport_ingress_config(esw, vport); 930 esw_vport_egress_config(esw, vport); 931 } 932 933 mlx5_modify_vport_admin_state(esw->dev, 934 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, 935 vport_num, 936 MLX5_ESW_VPORT_ADMIN_STATE_AUTO); 937 938 /* Sync with current vport context */ 939 vport->enabled_events = enable_events; 940 esw_vport_change_handler(&vport->vport_change_handler); 941 942 spin_lock_irqsave(&vport->lock, flags); 943 vport->enabled = true; 944 spin_unlock_irqrestore(&vport->lock, flags); 945 946 arm_vport_context_events_cmd(esw->dev, vport_num, enable_events); 947 948 esw->enabled_vports++; 949 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); 950 mutex_unlock(&vport->state_lock); 951 } 952 953 static void esw_cleanup_vport(struct mlx5_eswitch *esw, u16 vport_num) 954 { 955 struct mlx5_vport *vport = &esw->vports[vport_num]; 956 struct l2addr_node *node; 957 struct vport_addr *addr; 958 struct hlist_node *tmp; 959 int hi; 960 961 for_each_l2hash_node(node, tmp, vport->uc_list, hi) { 962 addr = container_of(node, struct vport_addr, node); 963 addr->action = MLX5_ACTION_DEL; 964 } 965 esw_apply_vport_addr_list(esw, vport_num, MLX5_NIC_VPORT_LIST_TYPE_UC); 966 967 for_each_l2hash_node(node, tmp, vport->mc_list, hi) { 968 addr = container_of(node, struct vport_addr, node); 969 addr->action = MLX5_ACTION_DEL; 970 } 971 esw_apply_vport_addr_list(esw, vport_num, MLX5_NIC_VPORT_LIST_TYPE_MC); 972 } 973 974 static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) 975 { 976 struct mlx5_vport *vport = &esw->vports[vport_num]; 977 unsigned long flags; 978 979 mutex_lock(&vport->state_lock); 980 if (!vport->enabled) { 981 mutex_unlock(&vport->state_lock); 982 return; 983 } 984 985 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num); 986 /* Mark this vport as disabled to discard new events */ 987 spin_lock_irqsave(&vport->lock, flags); 988 vport->enabled = false; 989 vport->enabled_events = 0; 990 spin_unlock_irqrestore(&vport->lock, flags); 991 992 mlx5_modify_vport_admin_state(esw->dev, 993 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, 994 vport_num, 995 MLX5_ESW_VPORT_ADMIN_STATE_DOWN); 996 /* Wait for current already scheduled events to complete */ 997 flush_workqueue(esw->work_queue); 998 /* Disable events from this vport */ 999 arm_vport_context_events_cmd(esw->dev, vport->vport, 0); 1000 /* We don't assume VFs will cleanup after themselves */ 1001 esw_cleanup_vport(esw, vport_num); 1002 if (vport_num) { 1003 esw_vport_disable_egress_acl(esw, vport); 1004 esw_vport_disable_ingress_acl(esw, vport); 1005 } 1006 esw->enabled_vports--; 1007 mutex_unlock(&vport->state_lock); 1008 } 1009 1010 /* Public E-Switch API */ 1011 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs) 1012 { 1013 int err; 1014 int i; 1015 1016 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || 1017 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 1018 return 0; 1019 1020 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || 1021 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { 1022 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); 1023 return -ENOTSUPP; 1024 } 1025 1026 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support)) 1027 esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n"); 1028 1029 if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support)) 1030 esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n"); 1031 1032 esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs); 1033 1034 esw_disable_vport(esw, 0); 1035 1036 err = esw_create_fdb_table(esw); 1037 if (err) 1038 goto abort; 1039 1040 for (i = 0; i <= nvfs; i++) 1041 esw_enable_vport(esw, i, SRIOV_VPORT_EVENTS); 1042 1043 esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n", 1044 esw->enabled_vports); 1045 return 0; 1046 1047 abort: 1048 esw_enable_vport(esw, 0, UC_ADDR_CHANGE); 1049 return err; 1050 } 1051 1052 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) 1053 { 1054 int i; 1055 1056 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || 1057 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 1058 return; 1059 1060 esw_info(esw->dev, "disable SRIOV: active vports(%d)\n", 1061 esw->enabled_vports); 1062 1063 for (i = 0; i < esw->total_vports; i++) 1064 esw_disable_vport(esw, i); 1065 1066 esw_destroy_fdb_table(esw); 1067 1068 /* VPORT 0 (PF) must be enabled back with non-sriov configuration */ 1069 esw_enable_vport(esw, 0, UC_ADDR_CHANGE); 1070 } 1071 1072 int mlx5_eswitch_init(struct mlx5_core_dev *dev, int total_vports) 1073 { 1074 int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); 1075 struct mlx5_eswitch *esw; 1076 int vport_num; 1077 int err; 1078 1079 if (!MLX5_CAP_GEN(dev, vport_group_manager) || 1080 MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 1081 return 0; 1082 1083 esw_info(dev, 1084 "Total vports %d, l2 table size(%d), per vport: max uc(%d) max mc(%d)\n", 1085 total_vports, l2_table_size, 1086 MLX5_MAX_UC_PER_VPORT(dev), 1087 MLX5_MAX_MC_PER_VPORT(dev)); 1088 1089 esw = kzalloc(sizeof(*esw), GFP_KERNEL); 1090 if (!esw) 1091 return -ENOMEM; 1092 1093 esw->dev = dev; 1094 1095 esw->l2_table.bitmap = kcalloc(BITS_TO_LONGS(l2_table_size), 1096 sizeof(uintptr_t), GFP_KERNEL); 1097 if (!esw->l2_table.bitmap) { 1098 err = -ENOMEM; 1099 goto abort; 1100 } 1101 esw->l2_table.size = l2_table_size; 1102 1103 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq"); 1104 if (!esw->work_queue) { 1105 err = -ENOMEM; 1106 goto abort; 1107 } 1108 1109 esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport), 1110 GFP_KERNEL); 1111 if (!esw->vports) { 1112 err = -ENOMEM; 1113 goto abort; 1114 } 1115 1116 for (vport_num = 0; vport_num < total_vports; vport_num++) { 1117 struct mlx5_vport *vport = &esw->vports[vport_num]; 1118 1119 vport->vport = vport_num; 1120 vport->dev = dev; 1121 INIT_WORK(&vport->vport_change_handler, 1122 esw_vport_change_handler); 1123 spin_lock_init(&vport->lock); 1124 mutex_init(&vport->state_lock); 1125 } 1126 1127 esw->total_vports = total_vports; 1128 esw->enabled_vports = 0; 1129 1130 dev->priv.eswitch = esw; 1131 esw_enable_vport(esw, 0, UC_ADDR_CHANGE); 1132 /* VF Vports will be enabled when SRIOV is enabled */ 1133 return 0; 1134 abort: 1135 if (esw->work_queue) 1136 destroy_workqueue(esw->work_queue); 1137 kfree(esw->l2_table.bitmap); 1138 kfree(esw->vports); 1139 kfree(esw); 1140 return err; 1141 } 1142 1143 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) 1144 { 1145 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || 1146 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 1147 return; 1148 1149 esw_info(esw->dev, "cleanup\n"); 1150 esw_disable_vport(esw, 0); 1151 1152 esw->dev->priv.eswitch = NULL; 1153 destroy_workqueue(esw->work_queue); 1154 kfree(esw->l2_table.bitmap); 1155 kfree(esw->vports); 1156 kfree(esw); 1157 } 1158 1159 void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) 1160 { 1161 struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change; 1162 u16 vport_num = be16_to_cpu(vc_eqe->vport_num); 1163 struct mlx5_vport *vport; 1164 1165 if (!esw) { 1166 printf("mlx5_core: WARN: ""MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n", vport_num); 1167 return; 1168 } 1169 1170 vport = &esw->vports[vport_num]; 1171 spin_lock(&vport->lock); 1172 if (vport->enabled) 1173 queue_work(esw->work_queue, &vport->vport_change_handler); 1174 spin_unlock(&vport->lock); 1175 } 1176 1177 /* Vport Administration */ 1178 #define ESW_ALLOWED(esw) \ 1179 (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev)) 1180 #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports) 1181 1182 static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) 1183 { 1184 ((u8 *)node_guid)[7] = mac[0]; 1185 ((u8 *)node_guid)[6] = mac[1]; 1186 ((u8 *)node_guid)[5] = mac[2]; 1187 ((u8 *)node_guid)[4] = 0xff; 1188 ((u8 *)node_guid)[3] = 0xfe; 1189 ((u8 *)node_guid)[2] = mac[3]; 1190 ((u8 *)node_guid)[1] = mac[4]; 1191 ((u8 *)node_guid)[0] = mac[5]; 1192 } 1193 1194 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, 1195 int vport, u8 mac[ETH_ALEN]) 1196 { 1197 int err = 0; 1198 u64 node_guid; 1199 1200 if (!ESW_ALLOWED(esw)) 1201 return -EPERM; 1202 if (!LEGAL_VPORT(esw, vport)) 1203 return -EINVAL; 1204 1205 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac); 1206 if (err) { 1207 mlx5_core_warn(esw->dev, 1208 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n", 1209 vport, err); 1210 return err; 1211 } 1212 1213 node_guid_gen_from_mac(&node_guid, mac); 1214 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid); 1215 if (err) { 1216 mlx5_core_warn(esw->dev, 1217 "Failed to mlx5_modify_nic_vport_node_guid vport(%d) err=(%d)\n", 1218 vport, err); 1219 return err; 1220 } 1221 1222 return err; 1223 } 1224 1225 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, 1226 int vport, int link_state) 1227 { 1228 if (!ESW_ALLOWED(esw)) 1229 return -EPERM; 1230 if (!LEGAL_VPORT(esw, vport)) 1231 return -EINVAL; 1232 1233 return mlx5_modify_vport_admin_state(esw->dev, 1234 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, 1235 vport, link_state); 1236 } 1237 1238 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, 1239 int vport, struct mlx5_esw_vport_info *ivi) 1240 { 1241 u16 vlan; 1242 u8 qos; 1243 1244 if (!ESW_ALLOWED(esw)) 1245 return -EPERM; 1246 if (!LEGAL_VPORT(esw, vport)) 1247 return -EINVAL; 1248 1249 memset(ivi, 0, sizeof(*ivi)); 1250 ivi->vf = vport - 1; 1251 1252 mlx5_query_nic_vport_mac_address(esw->dev, vport, ivi->mac); 1253 ivi->linkstate = mlx5_query_vport_admin_state(esw->dev, 1254 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, 1255 vport); 1256 query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos); 1257 ivi->vlan = vlan; 1258 ivi->qos = qos; 1259 ivi->spoofchk = 0; 1260 1261 return 0; 1262 } 1263 1264 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 1265 int vport, u16 vlan, u8 qos) 1266 { 1267 struct mlx5_vport *evport; 1268 int err = 0; 1269 int set = 0; 1270 1271 if (!ESW_ALLOWED(esw)) 1272 return -EPERM; 1273 if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7)) 1274 return -EINVAL; 1275 1276 if (vlan || qos) 1277 set = 1; 1278 1279 evport = &esw->vports[vport]; 1280 1281 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set); 1282 if (err) 1283 return err; 1284 1285 mutex_lock(&evport->state_lock); 1286 evport->vlan = vlan; 1287 evport->qos = qos; 1288 if (evport->enabled) { 1289 esw_vport_ingress_config(esw, evport); 1290 esw_vport_egress_config(esw, evport); 1291 } 1292 mutex_unlock(&evport->state_lock); 1293 return err; 1294 } 1295 1296