1 /* 2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/list.h> 34 #include <linux/ip.h> 35 #include <linux/ipv6.h> 36 #include <linux/tcp.h> 37 #include <linux/mlx5/fs.h> 38 #include "en.h" 39 #include "lib/mpfs.h" 40 41 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, 42 struct mlx5e_l2_rule *ai, int type); 43 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, 44 struct mlx5e_l2_rule *ai); 45 46 enum { 47 MLX5E_FULLMATCH = 0, 48 MLX5E_ALLMULTI = 1, 49 MLX5E_PROMISC = 2, 50 }; 51 52 enum { 53 MLX5E_UC = 0, 54 MLX5E_MC_IPV4 = 1, 55 MLX5E_MC_IPV6 = 2, 56 MLX5E_MC_OTHER = 3, 57 }; 58 59 enum { 60 MLX5E_ACTION_NONE = 0, 61 MLX5E_ACTION_ADD = 1, 62 MLX5E_ACTION_DEL = 2, 63 }; 64 65 struct mlx5e_l2_hash_node { 66 struct hlist_node hlist; 67 u8 action; 68 struct mlx5e_l2_rule ai; 69 bool mpfs; 70 }; 71 72 static inline int mlx5e_hash_l2(u8 *addr) 73 { 74 return addr[5]; 75 } 76 77 static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr) 78 { 79 struct mlx5e_l2_hash_node *hn; 80 int ix = mlx5e_hash_l2(addr); 81 int found = 0; 82 83 hlist_for_each_entry(hn, &hash[ix], hlist) 84 if (ether_addr_equal_64bits(hn->ai.addr, addr)) { 85 found = 1; 86 break; 87 } 88 89 if (found) { 90 hn->action = MLX5E_ACTION_NONE; 91 return; 92 } 93 94 hn = kzalloc(sizeof(*hn), GFP_ATOMIC); 95 if (!hn) 96 return; 97 98 ether_addr_copy(hn->ai.addr, addr); 99 hn->action = MLX5E_ACTION_ADD; 100 101 hlist_add_head(&hn->hlist, &hash[ix]); 102 } 103 104 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn) 105 { 106 hlist_del(&hn->hlist); 107 kfree(hn); 108 } 109 110 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) 111 { 112 struct net_device *ndev = priv->netdev; 113 int max_list_size; 114 int list_size; 115 u16 *vlans; 116 int vlan; 117 int err; 118 int i; 119 120 list_size = 0; 121 for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) 122 list_size++; 123 124 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list); 125 126 if (list_size > max_list_size) { 127 netdev_warn(ndev, 128 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n", 129 list_size, max_list_size); 130 list_size = max_list_size; 131 } 132 133 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL); 134 if (!vlans) 135 return -ENOMEM; 136 137 i = 0; 138 for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) { 139 if (i >= list_size) 140 break; 141 vlans[i++] = vlan; 142 } 143 144 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size); 145 if (err) 146 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n", 147 err); 148 149 kfree(vlans); 150 return err; 151 } 152 153 enum mlx5e_vlan_rule_type { 154 MLX5E_VLAN_RULE_TYPE_UNTAGGED, 155 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 156 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 157 MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, 158 MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, 159 }; 160 161 static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, 162 enum mlx5e_vlan_rule_type rule_type, 163 u16 vid, struct mlx5_flow_spec *spec) 164 { 165 struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; 166 struct mlx5_flow_destination dest = {}; 167 struct mlx5_flow_handle **rule_p; 168 MLX5_DECLARE_FLOW_ACT(flow_act); 169 int err = 0; 170 171 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 172 dest.ft = priv->fs.l2.ft.t; 173 174 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 175 176 switch (rule_type) { 177 case MLX5E_VLAN_RULE_TYPE_UNTAGGED: 178 /* cvlan_tag enabled in match criteria and 179 * disabled in match value means both S & C tags 180 * don't exist (untagged of both) 181 */ 182 rule_p = &priv->fs.vlan.untagged_rule; 183 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 184 outer_headers.cvlan_tag); 185 break; 186 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: 187 rule_p = &priv->fs.vlan.any_cvlan_rule; 188 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 189 outer_headers.cvlan_tag); 190 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1); 191 break; 192 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: 193 rule_p = &priv->fs.vlan.any_svlan_rule; 194 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 195 outer_headers.svlan_tag); 196 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1); 197 break; 198 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID: 199 rule_p = &priv->fs.vlan.active_svlans_rule[vid]; 200 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 201 outer_headers.svlan_tag); 202 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1); 203 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 204 outer_headers.first_vid); 205 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 206 vid); 207 break; 208 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */ 209 rule_p = &priv->fs.vlan.active_cvlans_rule[vid]; 210 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 211 outer_headers.cvlan_tag); 212 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1); 213 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 214 outer_headers.first_vid); 215 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 216 vid); 217 break; 218 } 219 220 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); 221 222 if (IS_ERR(*rule_p)) { 223 err = PTR_ERR(*rule_p); 224 *rule_p = NULL; 225 netdev_err(priv->netdev, "%s: add rule failed\n", __func__); 226 } 227 228 return err; 229 } 230 231 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv, 232 enum mlx5e_vlan_rule_type rule_type, u16 vid) 233 { 234 struct mlx5_flow_spec *spec; 235 int err = 0; 236 237 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 238 if (!spec) 239 return -ENOMEM; 240 241 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID) 242 mlx5e_vport_context_update_vlans(priv); 243 244 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec); 245 246 kvfree(spec); 247 248 return err; 249 } 250 251 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, 252 enum mlx5e_vlan_rule_type rule_type, u16 vid) 253 { 254 switch (rule_type) { 255 case MLX5E_VLAN_RULE_TYPE_UNTAGGED: 256 if (priv->fs.vlan.untagged_rule) { 257 mlx5_del_flow_rules(priv->fs.vlan.untagged_rule); 258 priv->fs.vlan.untagged_rule = NULL; 259 } 260 break; 261 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: 262 if (priv->fs.vlan.any_cvlan_rule) { 263 mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule); 264 priv->fs.vlan.any_cvlan_rule = NULL; 265 } 266 break; 267 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: 268 if (priv->fs.vlan.any_svlan_rule) { 269 mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule); 270 priv->fs.vlan.any_svlan_rule = NULL; 271 } 272 break; 273 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID: 274 if (priv->fs.vlan.active_svlans_rule[vid]) { 275 mlx5_del_flow_rules(priv->fs.vlan.active_svlans_rule[vid]); 276 priv->fs.vlan.active_svlans_rule[vid] = NULL; 277 } 278 break; 279 case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID: 280 if (priv->fs.vlan.active_cvlans_rule[vid]) { 281 mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]); 282 priv->fs.vlan.active_cvlans_rule[vid] = NULL; 283 } 284 mlx5e_vport_context_update_vlans(priv); 285 break; 286 } 287 } 288 289 static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv) 290 { 291 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); 292 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); 293 } 294 295 static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv) 296 { 297 int err; 298 299 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); 300 if (err) 301 return err; 302 303 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); 304 } 305 306 void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv) 307 { 308 if (!priv->fs.vlan.cvlan_filter_disabled) 309 return; 310 311 priv->fs.vlan.cvlan_filter_disabled = false; 312 if (priv->netdev->flags & IFF_PROMISC) 313 return; 314 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); 315 } 316 317 void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv) 318 { 319 if (priv->fs.vlan.cvlan_filter_disabled) 320 return; 321 322 priv->fs.vlan.cvlan_filter_disabled = true; 323 if (priv->netdev->flags & IFF_PROMISC) 324 return; 325 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); 326 } 327 328 static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid) 329 { 330 int err; 331 332 set_bit(vid, priv->fs.vlan.active_cvlans); 333 334 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); 335 if (err) 336 clear_bit(vid, priv->fs.vlan.active_cvlans); 337 338 return err; 339 } 340 341 static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid) 342 { 343 struct net_device *netdev = priv->netdev; 344 int err; 345 346 set_bit(vid, priv->fs.vlan.active_svlans); 347 348 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); 349 if (err) { 350 clear_bit(vid, priv->fs.vlan.active_svlans); 351 return err; 352 } 353 354 /* Need to fix some features.. */ 355 netdev_update_features(netdev); 356 return err; 357 } 358 359 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 360 { 361 struct mlx5e_priv *priv = netdev_priv(dev); 362 363 if (be16_to_cpu(proto) == ETH_P_8021Q) 364 return mlx5e_vlan_rx_add_cvid(priv, vid); 365 else if (be16_to_cpu(proto) == ETH_P_8021AD) 366 return mlx5e_vlan_rx_add_svid(priv, vid); 367 368 return -EOPNOTSUPP; 369 } 370 371 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) 372 { 373 struct mlx5e_priv *priv = netdev_priv(dev); 374 375 if (be16_to_cpu(proto) == ETH_P_8021Q) { 376 clear_bit(vid, priv->fs.vlan.active_cvlans); 377 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); 378 } else if (be16_to_cpu(proto) == ETH_P_8021AD) { 379 clear_bit(vid, priv->fs.vlan.active_svlans); 380 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); 381 netdev_update_features(dev); 382 } 383 384 return 0; 385 } 386 387 static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv) 388 { 389 int i; 390 391 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); 392 393 for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) { 394 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i); 395 } 396 397 for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID) 398 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); 399 400 if (priv->fs.vlan.cvlan_filter_disabled && 401 !(priv->netdev->flags & IFF_PROMISC)) 402 mlx5e_add_any_vid_rules(priv); 403 } 404 405 static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) 406 { 407 int i; 408 409 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); 410 411 for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) { 412 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i); 413 } 414 415 for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID) 416 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); 417 418 if (priv->fs.vlan.cvlan_filter_disabled && 419 !(priv->netdev->flags & IFF_PROMISC)) 420 mlx5e_del_any_vid_rules(priv); 421 } 422 423 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ 424 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \ 425 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist) 426 427 static void mlx5e_execute_l2_action(struct mlx5e_priv *priv, 428 struct mlx5e_l2_hash_node *hn) 429 { 430 u8 action = hn->action; 431 u8 mac_addr[ETH_ALEN]; 432 int l2_err = 0; 433 434 ether_addr_copy(mac_addr, hn->ai.addr); 435 436 switch (action) { 437 case MLX5E_ACTION_ADD: 438 mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH); 439 if (!is_multicast_ether_addr(mac_addr)) { 440 l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr); 441 hn->mpfs = !l2_err; 442 } 443 hn->action = MLX5E_ACTION_NONE; 444 break; 445 446 case MLX5E_ACTION_DEL: 447 if (!is_multicast_ether_addr(mac_addr) && hn->mpfs) 448 l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr); 449 mlx5e_del_l2_flow_rule(priv, &hn->ai); 450 mlx5e_del_l2_from_hash(hn); 451 break; 452 } 453 454 if (l2_err) 455 netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n", 456 action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err); 457 } 458 459 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv) 460 { 461 struct net_device *netdev = priv->netdev; 462 struct netdev_hw_addr *ha; 463 464 netif_addr_lock_bh(netdev); 465 466 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, 467 priv->netdev->dev_addr); 468 469 netdev_for_each_uc_addr(ha, netdev) 470 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr); 471 472 netdev_for_each_mc_addr(ha, netdev) 473 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr); 474 475 netif_addr_unlock_bh(netdev); 476 } 477 478 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type, 479 u8 addr_array[][ETH_ALEN], int size) 480 { 481 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC); 482 struct net_device *ndev = priv->netdev; 483 struct mlx5e_l2_hash_node *hn; 484 struct hlist_head *addr_list; 485 struct hlist_node *tmp; 486 int i = 0; 487 int hi; 488 489 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc; 490 491 if (is_uc) /* Make sure our own address is pushed first */ 492 ether_addr_copy(addr_array[i++], ndev->dev_addr); 493 else if (priv->fs.l2.broadcast_enabled) 494 ether_addr_copy(addr_array[i++], ndev->broadcast); 495 496 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) { 497 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr)) 498 continue; 499 if (i >= size) 500 break; 501 ether_addr_copy(addr_array[i++], hn->ai.addr); 502 } 503 } 504 505 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv, 506 int list_type) 507 { 508 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC); 509 struct mlx5e_l2_hash_node *hn; 510 u8 (*addr_array)[ETH_ALEN] = NULL; 511 struct hlist_head *addr_list; 512 struct hlist_node *tmp; 513 int max_size; 514 int size; 515 int err; 516 int hi; 517 518 size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0); 519 max_size = is_uc ? 520 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) : 521 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list); 522 523 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc; 524 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) 525 size++; 526 527 if (size > max_size) { 528 netdev_warn(priv->netdev, 529 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n", 530 is_uc ? "UC" : "MC", size, max_size); 531 size = max_size; 532 } 533 534 if (size) { 535 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL); 536 if (!addr_array) { 537 err = -ENOMEM; 538 goto out; 539 } 540 mlx5e_fill_addr_array(priv, list_type, addr_array, size); 541 } 542 543 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size); 544 out: 545 if (err) 546 netdev_err(priv->netdev, 547 "Failed to modify vport %s list err(%d)\n", 548 is_uc ? "UC" : "MC", err); 549 kfree(addr_array); 550 } 551 552 static void mlx5e_vport_context_update(struct mlx5e_priv *priv) 553 { 554 struct mlx5e_l2_table *ea = &priv->fs.l2; 555 556 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC); 557 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC); 558 mlx5_modify_nic_vport_promisc(priv->mdev, 0, 559 ea->allmulti_enabled, 560 ea->promisc_enabled); 561 } 562 563 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv) 564 { 565 struct mlx5e_l2_hash_node *hn; 566 struct hlist_node *tmp; 567 int i; 568 569 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i) 570 mlx5e_execute_l2_action(priv, hn); 571 572 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i) 573 mlx5e_execute_l2_action(priv, hn); 574 } 575 576 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv) 577 { 578 struct mlx5e_l2_hash_node *hn; 579 struct hlist_node *tmp; 580 int i; 581 582 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i) 583 hn->action = MLX5E_ACTION_DEL; 584 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i) 585 hn->action = MLX5E_ACTION_DEL; 586 587 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state)) 588 mlx5e_sync_netdev_addr(priv); 589 590 mlx5e_apply_netdev_addr(priv); 591 } 592 593 void mlx5e_set_rx_mode_work(struct work_struct *work) 594 { 595 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, 596 set_rx_mode_work); 597 598 struct mlx5e_l2_table *ea = &priv->fs.l2; 599 struct net_device *ndev = priv->netdev; 600 601 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state); 602 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC); 603 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI); 604 bool broadcast_enabled = rx_mode_enable; 605 606 bool enable_promisc = !ea->promisc_enabled && promisc_enabled; 607 bool disable_promisc = ea->promisc_enabled && !promisc_enabled; 608 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled; 609 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled; 610 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; 611 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; 612 613 if (enable_promisc) { 614 if (!priv->channels.params.vlan_strip_disable) 615 netdev_warn_once(ndev, 616 "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n"); 617 mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC); 618 if (!priv->fs.vlan.cvlan_filter_disabled) 619 mlx5e_add_any_vid_rules(priv); 620 } 621 if (enable_allmulti) 622 mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); 623 if (enable_broadcast) 624 mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH); 625 626 mlx5e_handle_netdev_addr(priv); 627 628 if (disable_broadcast) 629 mlx5e_del_l2_flow_rule(priv, &ea->broadcast); 630 if (disable_allmulti) 631 mlx5e_del_l2_flow_rule(priv, &ea->allmulti); 632 if (disable_promisc) { 633 if (!priv->fs.vlan.cvlan_filter_disabled) 634 mlx5e_del_any_vid_rules(priv); 635 mlx5e_del_l2_flow_rule(priv, &ea->promisc); 636 } 637 638 ea->promisc_enabled = promisc_enabled; 639 ea->allmulti_enabled = allmulti_enabled; 640 ea->broadcast_enabled = broadcast_enabled; 641 642 mlx5e_vport_context_update(priv); 643 } 644 645 static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft) 646 { 647 int i; 648 649 for (i = ft->num_groups - 1; i >= 0; i--) { 650 if (!IS_ERR_OR_NULL(ft->g[i])) 651 mlx5_destroy_flow_group(ft->g[i]); 652 ft->g[i] = NULL; 653 } 654 ft->num_groups = 0; 655 } 656 657 void mlx5e_init_l2_addr(struct mlx5e_priv *priv) 658 { 659 ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast); 660 } 661 662 void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft) 663 { 664 mlx5e_destroy_groups(ft); 665 kfree(ft->g); 666 mlx5_destroy_flow_table(ft->t); 667 ft->t = NULL; 668 } 669 670 static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc) 671 { 672 int i; 673 674 for (i = 0; i < MLX5E_NUM_TT; i++) { 675 if (!IS_ERR_OR_NULL(ttc->rules[i])) { 676 mlx5_del_flow_rules(ttc->rules[i]); 677 ttc->rules[i] = NULL; 678 } 679 } 680 681 for (i = 0; i < MLX5E_NUM_TUNNEL_TT; i++) { 682 if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) { 683 mlx5_del_flow_rules(ttc->tunnel_rules[i]); 684 ttc->tunnel_rules[i] = NULL; 685 } 686 } 687 } 688 689 struct mlx5e_etype_proto { 690 u16 etype; 691 u8 proto; 692 }; 693 694 static struct mlx5e_etype_proto ttc_rules[] = { 695 [MLX5E_TT_IPV4_TCP] = { 696 .etype = ETH_P_IP, 697 .proto = IPPROTO_TCP, 698 }, 699 [MLX5E_TT_IPV6_TCP] = { 700 .etype = ETH_P_IPV6, 701 .proto = IPPROTO_TCP, 702 }, 703 [MLX5E_TT_IPV4_UDP] = { 704 .etype = ETH_P_IP, 705 .proto = IPPROTO_UDP, 706 }, 707 [MLX5E_TT_IPV6_UDP] = { 708 .etype = ETH_P_IPV6, 709 .proto = IPPROTO_UDP, 710 }, 711 [MLX5E_TT_IPV4_IPSEC_AH] = { 712 .etype = ETH_P_IP, 713 .proto = IPPROTO_AH, 714 }, 715 [MLX5E_TT_IPV6_IPSEC_AH] = { 716 .etype = ETH_P_IPV6, 717 .proto = IPPROTO_AH, 718 }, 719 [MLX5E_TT_IPV4_IPSEC_ESP] = { 720 .etype = ETH_P_IP, 721 .proto = IPPROTO_ESP, 722 }, 723 [MLX5E_TT_IPV6_IPSEC_ESP] = { 724 .etype = ETH_P_IPV6, 725 .proto = IPPROTO_ESP, 726 }, 727 [MLX5E_TT_IPV4] = { 728 .etype = ETH_P_IP, 729 .proto = 0, 730 }, 731 [MLX5E_TT_IPV6] = { 732 .etype = ETH_P_IPV6, 733 .proto = 0, 734 }, 735 [MLX5E_TT_ANY] = { 736 .etype = 0, 737 .proto = 0, 738 }, 739 }; 740 741 static struct mlx5e_etype_proto ttc_tunnel_rules[] = { 742 [MLX5E_TT_IPV4_GRE] = { 743 .etype = ETH_P_IP, 744 .proto = IPPROTO_GRE, 745 }, 746 [MLX5E_TT_IPV6_GRE] = { 747 .etype = ETH_P_IPV6, 748 .proto = IPPROTO_GRE, 749 }, 750 [MLX5E_TT_IPV4_IPIP] = { 751 .etype = ETH_P_IP, 752 .proto = IPPROTO_IPIP, 753 }, 754 [MLX5E_TT_IPV6_IPIP] = { 755 .etype = ETH_P_IPV6, 756 .proto = IPPROTO_IPIP, 757 }, 758 [MLX5E_TT_IPV4_IPV6] = { 759 .etype = ETH_P_IP, 760 .proto = IPPROTO_IPV6, 761 }, 762 [MLX5E_TT_IPV6_IPV6] = { 763 .etype = ETH_P_IPV6, 764 .proto = IPPROTO_IPV6, 765 }, 766 767 }; 768 769 bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type) 770 { 771 switch (proto_type) { 772 case IPPROTO_GRE: 773 return MLX5_CAP_ETH(mdev, tunnel_stateless_gre); 774 case IPPROTO_IPIP: 775 case IPPROTO_IPV6: 776 return MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip); 777 default: 778 return false; 779 } 780 } 781 782 bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev) 783 { 784 int tt; 785 786 for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { 787 if (mlx5e_tunnel_proto_supported(mdev, ttc_tunnel_rules[tt].proto)) 788 return true; 789 } 790 return false; 791 } 792 793 bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) 794 { 795 return (mlx5e_any_tunnel_proto_supported(mdev) && 796 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version)); 797 } 798 799 static u8 mlx5e_etype_to_ipv(u16 ethertype) 800 { 801 if (ethertype == ETH_P_IP) 802 return 4; 803 804 if (ethertype == ETH_P_IPV6) 805 return 6; 806 807 return 0; 808 } 809 810 static struct mlx5_flow_handle * 811 mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, 812 struct mlx5_flow_table *ft, 813 struct mlx5_flow_destination *dest, 814 u16 etype, 815 u8 proto) 816 { 817 int match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version); 818 MLX5_DECLARE_FLOW_ACT(flow_act); 819 struct mlx5_flow_handle *rule; 820 struct mlx5_flow_spec *spec; 821 int err = 0; 822 u8 ipv; 823 824 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 825 if (!spec) 826 return ERR_PTR(-ENOMEM); 827 828 if (proto) { 829 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 830 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); 831 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto); 832 } 833 834 ipv = mlx5e_etype_to_ipv(etype); 835 if (match_ipv_outer && ipv) { 836 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 837 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version); 838 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv); 839 } else if (etype) { 840 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 841 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype); 842 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype); 843 } 844 845 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1); 846 if (IS_ERR(rule)) { 847 err = PTR_ERR(rule); 848 netdev_err(priv->netdev, "%s: add rule failed\n", __func__); 849 } 850 851 kvfree(spec); 852 return err ? ERR_PTR(err) : rule; 853 } 854 855 static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv, 856 struct ttc_params *params, 857 struct mlx5e_ttc_table *ttc) 858 { 859 struct mlx5_flow_destination dest = {}; 860 struct mlx5_flow_handle **rules; 861 struct mlx5_flow_table *ft; 862 int tt; 863 int err; 864 865 ft = ttc->ft.t; 866 rules = ttc->rules; 867 868 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; 869 for (tt = 0; tt < MLX5E_NUM_TT; tt++) { 870 if (tt == MLX5E_TT_ANY) 871 dest.tir_num = params->any_tt_tirn; 872 else 873 dest.tir_num = params->indir_tirn[tt]; 874 rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, 875 ttc_rules[tt].etype, 876 ttc_rules[tt].proto); 877 if (IS_ERR(rules[tt])) 878 goto del_rules; 879 } 880 881 if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev)) 882 return 0; 883 884 rules = ttc->tunnel_rules; 885 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 886 dest.ft = params->inner_ttc->ft.t; 887 for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { 888 if (!mlx5e_tunnel_proto_supported(priv->mdev, 889 ttc_tunnel_rules[tt].proto)) 890 continue; 891 rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, 892 ttc_tunnel_rules[tt].etype, 893 ttc_tunnel_rules[tt].proto); 894 if (IS_ERR(rules[tt])) 895 goto del_rules; 896 } 897 898 return 0; 899 900 del_rules: 901 err = PTR_ERR(rules[tt]); 902 rules[tt] = NULL; 903 mlx5e_cleanup_ttc_rules(ttc); 904 return err; 905 } 906 907 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc, 908 bool use_ipv) 909 { 910 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 911 struct mlx5e_flow_table *ft = &ttc->ft; 912 int ix = 0; 913 u32 *in; 914 int err; 915 u8 *mc; 916 917 ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS, 918 sizeof(*ft->g), GFP_KERNEL); 919 if (!ft->g) 920 return -ENOMEM; 921 in = kvzalloc(inlen, GFP_KERNEL); 922 if (!in) { 923 kfree(ft->g); 924 return -ENOMEM; 925 } 926 927 /* L4 Group */ 928 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 929 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); 930 if (use_ipv) 931 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version); 932 else 933 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 934 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 935 MLX5_SET_CFG(in, start_flow_index, ix); 936 ix += MLX5E_TTC_GROUP1_SIZE; 937 MLX5_SET_CFG(in, end_flow_index, ix - 1); 938 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 939 if (IS_ERR(ft->g[ft->num_groups])) 940 goto err; 941 ft->num_groups++; 942 943 /* L3 Group */ 944 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0); 945 MLX5_SET_CFG(in, start_flow_index, ix); 946 ix += MLX5E_TTC_GROUP2_SIZE; 947 MLX5_SET_CFG(in, end_flow_index, ix - 1); 948 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 949 if (IS_ERR(ft->g[ft->num_groups])) 950 goto err; 951 ft->num_groups++; 952 953 /* Any Group */ 954 memset(in, 0, inlen); 955 MLX5_SET_CFG(in, start_flow_index, ix); 956 ix += MLX5E_TTC_GROUP3_SIZE; 957 MLX5_SET_CFG(in, end_flow_index, ix - 1); 958 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 959 if (IS_ERR(ft->g[ft->num_groups])) 960 goto err; 961 ft->num_groups++; 962 963 kvfree(in); 964 return 0; 965 966 err: 967 err = PTR_ERR(ft->g[ft->num_groups]); 968 ft->g[ft->num_groups] = NULL; 969 kvfree(in); 970 971 return err; 972 } 973 974 static struct mlx5_flow_handle * 975 mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv, 976 struct mlx5_flow_table *ft, 977 struct mlx5_flow_destination *dest, 978 u16 etype, u8 proto) 979 { 980 MLX5_DECLARE_FLOW_ACT(flow_act); 981 struct mlx5_flow_handle *rule; 982 struct mlx5_flow_spec *spec; 983 int err = 0; 984 u8 ipv; 985 986 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 987 if (!spec) 988 return ERR_PTR(-ENOMEM); 989 990 ipv = mlx5e_etype_to_ipv(etype); 991 if (etype && ipv) { 992 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS; 993 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version); 994 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv); 995 } 996 997 if (proto) { 998 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS; 999 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol); 1000 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto); 1001 } 1002 1003 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1); 1004 if (IS_ERR(rule)) { 1005 err = PTR_ERR(rule); 1006 netdev_err(priv->netdev, "%s: add rule failed\n", __func__); 1007 } 1008 1009 kvfree(spec); 1010 return err ? ERR_PTR(err) : rule; 1011 } 1012 1013 static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv, 1014 struct ttc_params *params, 1015 struct mlx5e_ttc_table *ttc) 1016 { 1017 struct mlx5_flow_destination dest = {}; 1018 struct mlx5_flow_handle **rules; 1019 struct mlx5_flow_table *ft; 1020 int err; 1021 int tt; 1022 1023 ft = ttc->ft.t; 1024 rules = ttc->rules; 1025 1026 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; 1027 for (tt = 0; tt < MLX5E_NUM_TT; tt++) { 1028 if (tt == MLX5E_TT_ANY) 1029 dest.tir_num = params->any_tt_tirn; 1030 else 1031 dest.tir_num = params->indir_tirn[tt]; 1032 1033 rules[tt] = mlx5e_generate_inner_ttc_rule(priv, ft, &dest, 1034 ttc_rules[tt].etype, 1035 ttc_rules[tt].proto); 1036 if (IS_ERR(rules[tt])) 1037 goto del_rules; 1038 } 1039 1040 return 0; 1041 1042 del_rules: 1043 err = PTR_ERR(rules[tt]); 1044 rules[tt] = NULL; 1045 mlx5e_cleanup_ttc_rules(ttc); 1046 return err; 1047 } 1048 1049 static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc) 1050 { 1051 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1052 struct mlx5e_flow_table *ft = &ttc->ft; 1053 int ix = 0; 1054 u32 *in; 1055 int err; 1056 u8 *mc; 1057 1058 ft->g = kcalloc(MLX5E_INNER_TTC_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL); 1059 if (!ft->g) 1060 return -ENOMEM; 1061 in = kvzalloc(inlen, GFP_KERNEL); 1062 if (!in) { 1063 kfree(ft->g); 1064 return -ENOMEM; 1065 } 1066 1067 /* L4 Group */ 1068 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 1069 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol); 1070 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version); 1071 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS); 1072 MLX5_SET_CFG(in, start_flow_index, ix); 1073 ix += MLX5E_INNER_TTC_GROUP1_SIZE; 1074 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1075 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1076 if (IS_ERR(ft->g[ft->num_groups])) 1077 goto err; 1078 ft->num_groups++; 1079 1080 /* L3 Group */ 1081 MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0); 1082 MLX5_SET_CFG(in, start_flow_index, ix); 1083 ix += MLX5E_INNER_TTC_GROUP2_SIZE; 1084 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1085 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1086 if (IS_ERR(ft->g[ft->num_groups])) 1087 goto err; 1088 ft->num_groups++; 1089 1090 /* Any Group */ 1091 memset(in, 0, inlen); 1092 MLX5_SET_CFG(in, start_flow_index, ix); 1093 ix += MLX5E_INNER_TTC_GROUP3_SIZE; 1094 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1095 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1096 if (IS_ERR(ft->g[ft->num_groups])) 1097 goto err; 1098 ft->num_groups++; 1099 1100 kvfree(in); 1101 return 0; 1102 1103 err: 1104 err = PTR_ERR(ft->g[ft->num_groups]); 1105 ft->g[ft->num_groups] = NULL; 1106 kvfree(in); 1107 1108 return err; 1109 } 1110 1111 void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, 1112 struct ttc_params *ttc_params) 1113 { 1114 ttc_params->any_tt_tirn = priv->direct_tir[0].tirn; 1115 ttc_params->inner_ttc = &priv->fs.inner_ttc; 1116 } 1117 1118 void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params) 1119 { 1120 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr; 1121 1122 ft_attr->max_fte = MLX5E_INNER_TTC_TABLE_SIZE; 1123 ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL; 1124 ft_attr->prio = MLX5E_NIC_PRIO; 1125 } 1126 1127 void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params) 1128 1129 { 1130 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr; 1131 1132 ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE; 1133 ft_attr->level = MLX5E_TTC_FT_LEVEL; 1134 ft_attr->prio = MLX5E_NIC_PRIO; 1135 } 1136 1137 int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, 1138 struct mlx5e_ttc_table *ttc) 1139 { 1140 struct mlx5e_flow_table *ft = &ttc->ft; 1141 int err; 1142 1143 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) 1144 return 0; 1145 1146 ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr); 1147 if (IS_ERR(ft->t)) { 1148 err = PTR_ERR(ft->t); 1149 ft->t = NULL; 1150 return err; 1151 } 1152 1153 err = mlx5e_create_inner_ttc_table_groups(ttc); 1154 if (err) 1155 goto err; 1156 1157 err = mlx5e_generate_inner_ttc_table_rules(priv, params, ttc); 1158 if (err) 1159 goto err; 1160 1161 return 0; 1162 1163 err: 1164 mlx5e_destroy_flow_table(ft); 1165 return err; 1166 } 1167 1168 void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv, 1169 struct mlx5e_ttc_table *ttc) 1170 { 1171 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) 1172 return; 1173 1174 mlx5e_cleanup_ttc_rules(ttc); 1175 mlx5e_destroy_flow_table(&ttc->ft); 1176 } 1177 1178 void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv, 1179 struct mlx5e_ttc_table *ttc) 1180 { 1181 mlx5e_cleanup_ttc_rules(ttc); 1182 mlx5e_destroy_flow_table(&ttc->ft); 1183 } 1184 1185 int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, 1186 struct mlx5e_ttc_table *ttc) 1187 { 1188 bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version); 1189 struct mlx5e_flow_table *ft = &ttc->ft; 1190 int err; 1191 1192 ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr); 1193 if (IS_ERR(ft->t)) { 1194 err = PTR_ERR(ft->t); 1195 ft->t = NULL; 1196 return err; 1197 } 1198 1199 err = mlx5e_create_ttc_table_groups(ttc, match_ipv_outer); 1200 if (err) 1201 goto err; 1202 1203 err = mlx5e_generate_ttc_table_rules(priv, params, ttc); 1204 if (err) 1205 goto err; 1206 1207 return 0; 1208 err: 1209 mlx5e_destroy_flow_table(ft); 1210 return err; 1211 } 1212 1213 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, 1214 struct mlx5e_l2_rule *ai) 1215 { 1216 if (!IS_ERR_OR_NULL(ai->rule)) { 1217 mlx5_del_flow_rules(ai->rule); 1218 ai->rule = NULL; 1219 } 1220 } 1221 1222 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, 1223 struct mlx5e_l2_rule *ai, int type) 1224 { 1225 struct mlx5_flow_table *ft = priv->fs.l2.ft.t; 1226 struct mlx5_flow_destination dest = {}; 1227 MLX5_DECLARE_FLOW_ACT(flow_act); 1228 struct mlx5_flow_spec *spec; 1229 int err = 0; 1230 u8 *mc_dmac; 1231 u8 *mv_dmac; 1232 1233 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1234 if (!spec) 1235 return -ENOMEM; 1236 1237 mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1238 outer_headers.dmac_47_16); 1239 mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1240 outer_headers.dmac_47_16); 1241 1242 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1243 dest.ft = priv->fs.ttc.ft.t; 1244 1245 switch (type) { 1246 case MLX5E_FULLMATCH: 1247 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 1248 eth_broadcast_addr(mc_dmac); 1249 ether_addr_copy(mv_dmac, ai->addr); 1250 break; 1251 1252 case MLX5E_ALLMULTI: 1253 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 1254 mc_dmac[0] = 0x01; 1255 mv_dmac[0] = 0x01; 1256 break; 1257 1258 case MLX5E_PROMISC: 1259 break; 1260 } 1261 1262 ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); 1263 if (IS_ERR(ai->rule)) { 1264 netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n", 1265 __func__, mv_dmac); 1266 err = PTR_ERR(ai->rule); 1267 ai->rule = NULL; 1268 } 1269 1270 kvfree(spec); 1271 1272 return err; 1273 } 1274 1275 #define MLX5E_NUM_L2_GROUPS 3 1276 #define MLX5E_L2_GROUP1_SIZE BIT(0) 1277 #define MLX5E_L2_GROUP2_SIZE BIT(15) 1278 #define MLX5E_L2_GROUP3_SIZE BIT(0) 1279 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\ 1280 MLX5E_L2_GROUP2_SIZE +\ 1281 MLX5E_L2_GROUP3_SIZE) 1282 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table) 1283 { 1284 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1285 struct mlx5e_flow_table *ft = &l2_table->ft; 1286 int ix = 0; 1287 u8 *mc_dmac; 1288 u32 *in; 1289 int err; 1290 u8 *mc; 1291 1292 ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL); 1293 if (!ft->g) 1294 return -ENOMEM; 1295 in = kvzalloc(inlen, GFP_KERNEL); 1296 if (!in) { 1297 kfree(ft->g); 1298 return -ENOMEM; 1299 } 1300 1301 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 1302 mc_dmac = MLX5_ADDR_OF(fte_match_param, mc, 1303 outer_headers.dmac_47_16); 1304 /* Flow Group for promiscuous */ 1305 MLX5_SET_CFG(in, start_flow_index, ix); 1306 ix += MLX5E_L2_GROUP1_SIZE; 1307 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1308 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1309 if (IS_ERR(ft->g[ft->num_groups])) 1310 goto err_destroy_groups; 1311 ft->num_groups++; 1312 1313 /* Flow Group for full match */ 1314 eth_broadcast_addr(mc_dmac); 1315 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1316 MLX5_SET_CFG(in, start_flow_index, ix); 1317 ix += MLX5E_L2_GROUP2_SIZE; 1318 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1319 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1320 if (IS_ERR(ft->g[ft->num_groups])) 1321 goto err_destroy_groups; 1322 ft->num_groups++; 1323 1324 /* Flow Group for allmulti */ 1325 eth_zero_addr(mc_dmac); 1326 mc_dmac[0] = 0x01; 1327 MLX5_SET_CFG(in, start_flow_index, ix); 1328 ix += MLX5E_L2_GROUP3_SIZE; 1329 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1330 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1331 if (IS_ERR(ft->g[ft->num_groups])) 1332 goto err_destroy_groups; 1333 ft->num_groups++; 1334 1335 kvfree(in); 1336 return 0; 1337 1338 err_destroy_groups: 1339 err = PTR_ERR(ft->g[ft->num_groups]); 1340 ft->g[ft->num_groups] = NULL; 1341 mlx5e_destroy_groups(ft); 1342 kvfree(in); 1343 1344 return err; 1345 } 1346 1347 static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv) 1348 { 1349 mlx5e_destroy_flow_table(&priv->fs.l2.ft); 1350 } 1351 1352 static int mlx5e_create_l2_table(struct mlx5e_priv *priv) 1353 { 1354 struct mlx5e_l2_table *l2_table = &priv->fs.l2; 1355 struct mlx5e_flow_table *ft = &l2_table->ft; 1356 struct mlx5_flow_table_attr ft_attr = {}; 1357 int err; 1358 1359 ft->num_groups = 0; 1360 1361 ft_attr.max_fte = MLX5E_L2_TABLE_SIZE; 1362 ft_attr.level = MLX5E_L2_FT_LEVEL; 1363 ft_attr.prio = MLX5E_NIC_PRIO; 1364 1365 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); 1366 if (IS_ERR(ft->t)) { 1367 err = PTR_ERR(ft->t); 1368 ft->t = NULL; 1369 return err; 1370 } 1371 1372 err = mlx5e_create_l2_table_groups(l2_table); 1373 if (err) 1374 goto err_destroy_flow_table; 1375 1376 return 0; 1377 1378 err_destroy_flow_table: 1379 mlx5_destroy_flow_table(ft->t); 1380 ft->t = NULL; 1381 1382 return err; 1383 } 1384 1385 #define MLX5E_NUM_VLAN_GROUPS 4 1386 #define MLX5E_VLAN_GROUP0_SIZE BIT(12) 1387 #define MLX5E_VLAN_GROUP1_SIZE BIT(12) 1388 #define MLX5E_VLAN_GROUP2_SIZE BIT(1) 1389 #define MLX5E_VLAN_GROUP3_SIZE BIT(0) 1390 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\ 1391 MLX5E_VLAN_GROUP1_SIZE +\ 1392 MLX5E_VLAN_GROUP2_SIZE +\ 1393 MLX5E_VLAN_GROUP3_SIZE) 1394 1395 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in, 1396 int inlen) 1397 { 1398 int err; 1399 int ix = 0; 1400 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 1401 1402 memset(in, 0, inlen); 1403 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1404 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 1405 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); 1406 MLX5_SET_CFG(in, start_flow_index, ix); 1407 ix += MLX5E_VLAN_GROUP0_SIZE; 1408 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1409 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1410 if (IS_ERR(ft->g[ft->num_groups])) 1411 goto err_destroy_groups; 1412 ft->num_groups++; 1413 1414 memset(in, 0, inlen); 1415 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1416 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag); 1417 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); 1418 MLX5_SET_CFG(in, start_flow_index, ix); 1419 ix += MLX5E_VLAN_GROUP1_SIZE; 1420 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1421 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1422 if (IS_ERR(ft->g[ft->num_groups])) 1423 goto err_destroy_groups; 1424 ft->num_groups++; 1425 1426 memset(in, 0, inlen); 1427 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1428 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 1429 MLX5_SET_CFG(in, start_flow_index, ix); 1430 ix += MLX5E_VLAN_GROUP2_SIZE; 1431 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1432 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1433 if (IS_ERR(ft->g[ft->num_groups])) 1434 goto err_destroy_groups; 1435 ft->num_groups++; 1436 1437 memset(in, 0, inlen); 1438 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1439 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag); 1440 MLX5_SET_CFG(in, start_flow_index, ix); 1441 ix += MLX5E_VLAN_GROUP3_SIZE; 1442 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1443 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1444 if (IS_ERR(ft->g[ft->num_groups])) 1445 goto err_destroy_groups; 1446 ft->num_groups++; 1447 1448 return 0; 1449 1450 err_destroy_groups: 1451 err = PTR_ERR(ft->g[ft->num_groups]); 1452 ft->g[ft->num_groups] = NULL; 1453 mlx5e_destroy_groups(ft); 1454 1455 return err; 1456 } 1457 1458 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft) 1459 { 1460 u32 *in; 1461 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1462 int err; 1463 1464 in = kvzalloc(inlen, GFP_KERNEL); 1465 if (!in) 1466 return -ENOMEM; 1467 1468 err = __mlx5e_create_vlan_table_groups(ft, in, inlen); 1469 1470 kvfree(in); 1471 return err; 1472 } 1473 1474 static int mlx5e_create_vlan_table(struct mlx5e_priv *priv) 1475 { 1476 struct mlx5e_flow_table *ft = &priv->fs.vlan.ft; 1477 struct mlx5_flow_table_attr ft_attr = {}; 1478 int err; 1479 1480 ft->num_groups = 0; 1481 1482 ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE; 1483 ft_attr.level = MLX5E_VLAN_FT_LEVEL; 1484 ft_attr.prio = MLX5E_NIC_PRIO; 1485 1486 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); 1487 1488 if (IS_ERR(ft->t)) { 1489 err = PTR_ERR(ft->t); 1490 ft->t = NULL; 1491 return err; 1492 } 1493 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL); 1494 if (!ft->g) { 1495 err = -ENOMEM; 1496 goto err_destroy_vlan_table; 1497 } 1498 1499 err = mlx5e_create_vlan_table_groups(ft); 1500 if (err) 1501 goto err_free_g; 1502 1503 mlx5e_add_vlan_rules(priv); 1504 1505 return 0; 1506 1507 err_free_g: 1508 kfree(ft->g); 1509 err_destroy_vlan_table: 1510 mlx5_destroy_flow_table(ft->t); 1511 ft->t = NULL; 1512 1513 return err; 1514 } 1515 1516 static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv) 1517 { 1518 mlx5e_del_vlan_rules(priv); 1519 mlx5e_destroy_flow_table(&priv->fs.vlan.ft); 1520 } 1521 1522 int mlx5e_create_flow_steering(struct mlx5e_priv *priv) 1523 { 1524 struct ttc_params ttc_params = {}; 1525 int tt, err; 1526 1527 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, 1528 MLX5_FLOW_NAMESPACE_KERNEL); 1529 1530 if (!priv->fs.ns) 1531 return -EOPNOTSUPP; 1532 1533 err = mlx5e_arfs_create_tables(priv); 1534 if (err) { 1535 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n", 1536 err); 1537 priv->netdev->hw_features &= ~NETIF_F_NTUPLE; 1538 } 1539 1540 mlx5e_set_ttc_basic_params(priv, &ttc_params); 1541 mlx5e_set_inner_ttc_ft_params(&ttc_params); 1542 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) 1543 ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn; 1544 1545 err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc); 1546 if (err) { 1547 netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n", 1548 err); 1549 goto err_destroy_arfs_tables; 1550 } 1551 1552 mlx5e_set_ttc_ft_params(&ttc_params); 1553 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) 1554 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn; 1555 1556 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc); 1557 if (err) { 1558 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", 1559 err); 1560 goto err_destroy_inner_ttc_table; 1561 } 1562 1563 err = mlx5e_create_l2_table(priv); 1564 if (err) { 1565 netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n", 1566 err); 1567 goto err_destroy_ttc_table; 1568 } 1569 1570 err = mlx5e_create_vlan_table(priv); 1571 if (err) { 1572 netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n", 1573 err); 1574 goto err_destroy_l2_table; 1575 } 1576 1577 mlx5e_ethtool_init_steering(priv); 1578 1579 return 0; 1580 1581 err_destroy_l2_table: 1582 mlx5e_destroy_l2_table(priv); 1583 err_destroy_ttc_table: 1584 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); 1585 err_destroy_inner_ttc_table: 1586 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); 1587 err_destroy_arfs_tables: 1588 mlx5e_arfs_destroy_tables(priv); 1589 1590 return err; 1591 } 1592 1593 void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv) 1594 { 1595 mlx5e_destroy_vlan_table(priv); 1596 mlx5e_destroy_l2_table(priv); 1597 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); 1598 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); 1599 mlx5e_arfs_destroy_tables(priv); 1600 mlx5e_ethtool_cleanup_steering(priv); 1601 } 1602