1 /* 2 * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef __MLX5_ESWITCH_H__ 34 #define __MLX5_ESWITCH_H__ 35 36 #include <linux/if_ether.h> 37 #include <linux/if_link.h> 38 #include <linux/atomic.h> 39 #include <net/devlink.h> 40 #include <linux/mlx5/device.h> 41 #include <linux/mlx5/eswitch.h> 42 #include <linux/mlx5/vport.h> 43 #include <linux/mlx5/fs.h> 44 #include "lib/mpfs.h" 45 46 #ifdef CONFIG_MLX5_ESWITCH 47 48 #define MLX5_MAX_UC_PER_VPORT(dev) \ 49 (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list)) 50 51 #define MLX5_MAX_MC_PER_VPORT(dev) \ 52 (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list)) 53 54 #define MLX5_MIN_BW_SHARE 1 55 56 #define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \ 57 min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit) 58 59 #define mlx5_esw_has_fwd_fdb(dev) \ 60 MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table) 61 62 #define FDB_MAX_CHAIN 3 63 #define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1) 64 #define FDB_MAX_PRIO 16 65 66 struct vport_ingress { 67 struct mlx5_flow_table *acl; 68 struct mlx5_flow_group *allow_untagged_spoofchk_grp; 69 struct mlx5_flow_group *allow_spoofchk_only_grp; 70 struct mlx5_flow_group *allow_untagged_only_grp; 71 struct mlx5_flow_group *drop_grp; 72 struct mlx5_modify_hdr *modify_metadata; 73 struct mlx5_flow_handle *modify_metadata_rule; 74 struct mlx5_flow_handle *allow_rule; 75 struct mlx5_flow_handle *drop_rule; 76 struct mlx5_fc *drop_counter; 77 }; 78 79 struct vport_egress { 80 struct mlx5_flow_table *acl; 81 struct mlx5_flow_group *allowed_vlans_grp; 82 struct mlx5_flow_group *drop_grp; 83 struct mlx5_flow_handle *allowed_vlan; 84 struct mlx5_flow_handle *drop_rule; 85 struct mlx5_fc *drop_counter; 86 }; 87 88 struct mlx5_vport_drop_stats { 89 u64 rx_dropped; 90 u64 tx_dropped; 91 }; 92 93 struct mlx5_vport_info { 94 u8 mac[ETH_ALEN]; 95 u16 vlan; 96 u8 qos; 97 u64 node_guid; 98 int link_state; 99 u32 min_rate; 100 u32 max_rate; 101 bool spoofchk; 102 bool trusted; 103 }; 104 105 /* Vport context events */ 106 enum mlx5_eswitch_vport_event { 107 MLX5_VPORT_UC_ADDR_CHANGE = BIT(0), 108 MLX5_VPORT_MC_ADDR_CHANGE = BIT(1), 109 MLX5_VPORT_PROMISC_CHANGE = BIT(3), 110 }; 111 112 struct mlx5_vport { 113 struct mlx5_core_dev *dev; 114 int vport; 115 struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; 116 struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE]; 117 struct mlx5_flow_handle *promisc_rule; 118 struct mlx5_flow_handle *allmulti_rule; 119 struct work_struct vport_change_handler; 120 121 struct vport_ingress ingress; 122 struct vport_egress egress; 123 124 struct mlx5_vport_info info; 125 126 struct { 127 bool enabled; 128 u32 esw_tsar_ix; 129 u32 bw_share; 130 } qos; 131 132 bool enabled; 133 enum mlx5_eswitch_vport_event enabled_events; 134 }; 135 136 enum offloads_fdb_flags { 137 ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED = BIT(0), 138 }; 139 140 extern const unsigned int ESW_POOLS[4]; 141 142 #define PRIO_LEVELS 2 143 struct mlx5_eswitch_fdb { 144 union { 145 struct legacy_fdb { 146 struct mlx5_flow_table *fdb; 147 struct mlx5_flow_group *addr_grp; 148 struct mlx5_flow_group *allmulti_grp; 149 struct mlx5_flow_group *promisc_grp; 150 struct mlx5_flow_table *vepa_fdb; 151 struct mlx5_flow_handle *vepa_uplink_rule; 152 struct mlx5_flow_handle *vepa_star_rule; 153 } legacy; 154 155 struct offloads_fdb { 156 struct mlx5_flow_namespace *ns; 157 struct mlx5_flow_table *slow_fdb; 158 struct mlx5_flow_group *send_to_vport_grp; 159 struct mlx5_flow_group *peer_miss_grp; 160 struct mlx5_flow_handle **peer_miss_rules; 161 struct mlx5_flow_group *miss_grp; 162 struct mlx5_flow_handle *miss_rule_uni; 163 struct mlx5_flow_handle *miss_rule_multi; 164 int vlan_push_pop_refcount; 165 166 struct { 167 struct mlx5_flow_table *fdb; 168 u32 num_rules; 169 } fdb_prio[FDB_MAX_CHAIN + 1][FDB_MAX_PRIO + 1][PRIO_LEVELS]; 170 /* Protects fdb_prio table */ 171 struct mutex fdb_prio_lock; 172 173 int fdb_left[ARRAY_SIZE(ESW_POOLS)]; 174 } offloads; 175 }; 176 u32 flags; 177 }; 178 179 struct mlx5_esw_offload { 180 struct mlx5_flow_table *ft_offloads; 181 struct mlx5_flow_group *vport_rx_group; 182 struct mlx5_eswitch_rep *vport_reps; 183 struct list_head peer_flows; 184 struct mutex peer_mutex; 185 struct mutex encap_tbl_lock; /* protects encap_tbl */ 186 DECLARE_HASHTABLE(encap_tbl, 8); 187 struct mod_hdr_tbl mod_hdr; 188 DECLARE_HASHTABLE(termtbl_tbl, 8); 189 struct mutex termtbl_mutex; /* protects termtbl hash */ 190 const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES]; 191 u8 inline_mode; 192 atomic64_t num_flows; 193 enum devlink_eswitch_encap_mode encap; 194 }; 195 196 /* E-Switch MC FDB table hash node */ 197 struct esw_mc_addr { /* SRIOV only */ 198 struct l2addr_node node; 199 struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */ 200 u32 refcnt; 201 }; 202 203 struct mlx5_host_work { 204 struct work_struct work; 205 struct mlx5_eswitch *esw; 206 }; 207 208 struct mlx5_esw_functions { 209 struct mlx5_nb nb; 210 u16 num_vfs; 211 }; 212 213 enum { 214 MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0), 215 }; 216 217 struct mlx5_eswitch { 218 struct mlx5_core_dev *dev; 219 struct mlx5_nb nb; 220 /* legacy data structures */ 221 struct mlx5_eswitch_fdb fdb_table; 222 struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; 223 struct esw_mc_addr mc_promisc; 224 /* end of legacy */ 225 struct workqueue_struct *work_queue; 226 struct mlx5_vport *vports; 227 u32 flags; 228 int total_vports; 229 int enabled_vports; 230 /* Synchronize between vport change events 231 * and async SRIOV admin state changes 232 */ 233 struct mutex state_lock; 234 235 struct { 236 bool enabled; 237 u32 root_tsar_id; 238 } qos; 239 240 struct mlx5_esw_offload offloads; 241 int mode; 242 int nvports; 243 u16 manager_vport; 244 u16 first_host_vport; 245 struct mlx5_esw_functions esw_funcs; 246 }; 247 248 void esw_offloads_disable(struct mlx5_eswitch *esw); 249 int esw_offloads_enable(struct mlx5_eswitch *esw); 250 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); 251 int esw_offloads_init_reps(struct mlx5_eswitch *esw); 252 void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, 253 struct mlx5_vport *vport); 254 int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, 255 struct mlx5_vport *vport); 256 void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, 257 struct mlx5_vport *vport); 258 int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, 259 struct mlx5_vport *vport); 260 void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, 261 struct mlx5_vport *vport); 262 void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, 263 struct mlx5_vport *vport); 264 void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, 265 struct mlx5_vport *vport); 266 int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, 267 u32 rate_mbps); 268 269 /* E-Switch API */ 270 int mlx5_eswitch_init(struct mlx5_core_dev *dev); 271 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); 272 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode); 273 void mlx5_eswitch_disable(struct mlx5_eswitch *esw); 274 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, 275 u16 vport, u8 mac[ETH_ALEN]); 276 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, 277 u16 vport, int link_state); 278 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 279 u16 vport, u16 vlan, u8 qos); 280 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, 281 u16 vport, bool spoofchk); 282 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, 283 u16 vport_num, bool setting); 284 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, 285 u32 max_rate, u32 min_rate); 286 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting); 287 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting); 288 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, 289 u16 vport, struct ifla_vf_info *ivi); 290 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, 291 u16 vport, 292 struct ifla_vf_stats *vf_stats); 293 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule); 294 295 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport, 296 void *in, int inlen); 297 int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport, 298 void *out, int outlen); 299 300 struct mlx5_flow_spec; 301 struct mlx5_esw_flow_attr; 302 struct mlx5_termtbl_handle; 303 304 bool 305 mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, 306 struct mlx5_flow_act *flow_act, 307 struct mlx5_flow_spec *spec); 308 309 struct mlx5_flow_handle * 310 mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw, 311 struct mlx5_flow_table *ft, 312 struct mlx5_flow_spec *spec, 313 struct mlx5_esw_flow_attr *attr, 314 struct mlx5_flow_act *flow_act, 315 struct mlx5_flow_destination *dest, 316 int num_dest); 317 318 void 319 mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw, 320 struct mlx5_termtbl_handle *tt); 321 322 struct mlx5_flow_handle * 323 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, 324 struct mlx5_flow_spec *spec, 325 struct mlx5_esw_flow_attr *attr); 326 struct mlx5_flow_handle * 327 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, 328 struct mlx5_flow_spec *spec, 329 struct mlx5_esw_flow_attr *attr); 330 void 331 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, 332 struct mlx5_flow_handle *rule, 333 struct mlx5_esw_flow_attr *attr); 334 void 335 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, 336 struct mlx5_flow_handle *rule, 337 struct mlx5_esw_flow_attr *attr); 338 339 bool 340 mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw); 341 342 u16 343 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw); 344 345 u32 346 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw); 347 348 struct mlx5_flow_handle * 349 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, 350 struct mlx5_flow_destination *dest); 351 352 enum { 353 SET_VLAN_STRIP = BIT(0), 354 SET_VLAN_INSERT = BIT(1) 355 }; 356 357 enum mlx5_flow_match_level { 358 MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE, 359 MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2, 360 MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP, 361 MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP, 362 }; 363 364 /* current maximum for flow based vport multicasting */ 365 #define MLX5_MAX_FLOW_FWD_VPORTS 2 366 367 enum { 368 MLX5_ESW_DEST_ENCAP = BIT(0), 369 MLX5_ESW_DEST_ENCAP_VALID = BIT(1), 370 }; 371 372 struct mlx5_esw_flow_attr { 373 struct mlx5_eswitch_rep *in_rep; 374 struct mlx5_core_dev *in_mdev; 375 struct mlx5_core_dev *counter_dev; 376 377 int split_count; 378 int out_count; 379 380 int action; 381 __be16 vlan_proto[MLX5_FS_VLAN_DEPTH]; 382 u16 vlan_vid[MLX5_FS_VLAN_DEPTH]; 383 u8 vlan_prio[MLX5_FS_VLAN_DEPTH]; 384 u8 total_vlan; 385 bool vlan_handled; 386 struct { 387 u32 flags; 388 struct mlx5_eswitch_rep *rep; 389 struct mlx5_pkt_reformat *pkt_reformat; 390 struct mlx5_core_dev *mdev; 391 struct mlx5_termtbl_handle *termtbl; 392 } dests[MLX5_MAX_FLOW_FWD_VPORTS]; 393 struct mlx5_modify_hdr *modify_hdr; 394 u8 inner_match_level; 395 u8 outer_match_level; 396 struct mlx5_fc *counter; 397 u32 chain; 398 u16 prio; 399 u32 dest_chain; 400 struct mlx5e_tc_flow_parse_attr *parse_attr; 401 }; 402 403 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, 404 struct netlink_ext_ack *extack); 405 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); 406 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, 407 struct netlink_ext_ack *extack); 408 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode); 409 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode); 410 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, 411 enum devlink_eswitch_encap_mode encap, 412 struct netlink_ext_ack *extack); 413 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, 414 enum devlink_eswitch_encap_mode *encap); 415 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); 416 417 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, 418 struct mlx5_esw_flow_attr *attr); 419 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, 420 struct mlx5_esw_flow_attr *attr); 421 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 422 u16 vport, u16 vlan, u8 qos, u8 set_flags); 423 424 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev, 425 u8 vlan_depth) 426 { 427 bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) && 428 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan); 429 430 if (vlan_depth == 1) 431 return ret; 432 433 return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) && 434 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2); 435 } 436 437 bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, 438 struct mlx5_core_dev *dev1); 439 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, 440 struct mlx5_core_dev *dev1); 441 442 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev); 443 444 #define MLX5_DEBUG_ESWITCH_MASK BIT(3) 445 446 #define esw_info(__dev, format, ...) \ 447 dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__) 448 449 #define esw_warn(__dev, format, ...) \ 450 dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__) 451 452 #define esw_debug(dev, format, ...) \ 453 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) 454 455 /* The returned number is valid only when the dev is eswitch manager. */ 456 static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev) 457 { 458 return mlx5_core_is_ecpf_esw_manager(dev) ? 459 MLX5_VPORT_ECPF : MLX5_VPORT_PF; 460 } 461 462 static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev) 463 { 464 return mlx5_core_is_ecpf_esw_manager(dev) ? 465 MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF; 466 } 467 468 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) 469 { 470 /* Ideally device should have the functions changed supported 471 * capability regardless of it being ECPF or PF wherever such 472 * event should be processed such as on eswitch manager device. 473 * However, some ECPF based device might not have this capability 474 * set. Hence OR for ECPF check to cover such device. 475 */ 476 return MLX5_CAP_ESW(dev, esw_functions_changed) || 477 mlx5_core_is_ecpf_esw_manager(dev); 478 } 479 480 static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw) 481 { 482 /* Uplink always locate at the last element of the array.*/ 483 return esw->total_vports - 1; 484 } 485 486 static inline int mlx5_eswitch_ecpf_idx(struct mlx5_eswitch *esw) 487 { 488 return esw->total_vports - 2; 489 } 490 491 static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch *esw, 492 u16 vport_num) 493 { 494 if (vport_num == MLX5_VPORT_ECPF) { 495 if (!mlx5_ecpf_vport_exists(esw->dev)) 496 esw_warn(esw->dev, "ECPF vport doesn't exist!\n"); 497 return mlx5_eswitch_ecpf_idx(esw); 498 } 499 500 if (vport_num == MLX5_VPORT_UPLINK) 501 return mlx5_eswitch_uplink_idx(esw); 502 503 return vport_num; 504 } 505 506 static inline u16 mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw, 507 int index) 508 { 509 if (index == mlx5_eswitch_ecpf_idx(esw) && 510 mlx5_ecpf_vport_exists(esw->dev)) 511 return MLX5_VPORT_ECPF; 512 513 if (index == mlx5_eswitch_uplink_idx(esw)) 514 return MLX5_VPORT_UPLINK; 515 516 return index; 517 } 518 519 /* TODO: This mlx5e_tc function shouldn't be called by eswitch */ 520 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); 521 522 /* The vport getter/iterator are only valid after esw->total_vports 523 * and vport->vport are initialized in mlx5_eswitch_init. 524 */ 525 #define mlx5_esw_for_all_vports(esw, i, vport) \ 526 for ((i) = MLX5_VPORT_PF; \ 527 (vport) = &(esw)->vports[i], \ 528 (i) < (esw)->total_vports; (i)++) 529 530 #define mlx5_esw_for_all_vports_reverse(esw, i, vport) \ 531 for ((i) = (esw)->total_vports - 1; \ 532 (vport) = &(esw)->vports[i], \ 533 (i) >= MLX5_VPORT_PF; (i)--) 534 535 #define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \ 536 for ((i) = MLX5_VPORT_FIRST_VF; \ 537 (vport) = &(esw)->vports[(i)], \ 538 (i) <= (nvfs); (i)++) 539 540 #define mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, nvfs) \ 541 for ((i) = (nvfs); \ 542 (vport) = &(esw)->vports[(i)], \ 543 (i) >= MLX5_VPORT_FIRST_VF; (i)--) 544 545 /* The rep getter/iterator are only valid after esw->total_vports 546 * and vport->vport are initialized in mlx5_eswitch_init. 547 */ 548 #define mlx5_esw_for_all_reps(esw, i, rep) \ 549 for ((i) = MLX5_VPORT_PF; \ 550 (rep) = &(esw)->offloads.vport_reps[i], \ 551 (i) < (esw)->total_vports; (i)++) 552 553 #define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \ 554 for ((i) = MLX5_VPORT_FIRST_VF; \ 555 (rep) = &(esw)->offloads.vport_reps[i], \ 556 (i) <= (nvfs); (i)++) 557 558 #define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \ 559 for ((i) = (nvfs); \ 560 (rep) = &(esw)->offloads.vport_reps[i], \ 561 (i) >= MLX5_VPORT_FIRST_VF; (i)--) 562 563 #define mlx5_esw_for_each_vf_vport_num(esw, vport, nvfs) \ 564 for ((vport) = MLX5_VPORT_FIRST_VF; (vport) <= (nvfs); (vport)++) 565 566 #define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \ 567 for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--) 568 569 /* Includes host PF (vport 0) if it's not esw manager. */ 570 #define mlx5_esw_for_each_host_func_rep(esw, i, rep, nvfs) \ 571 for ((i) = (esw)->first_host_vport; \ 572 (rep) = &(esw)->offloads.vport_reps[i], \ 573 (i) <= (nvfs); (i)++) 574 575 #define mlx5_esw_for_each_host_func_rep_reverse(esw, i, rep, nvfs) \ 576 for ((i) = (nvfs); \ 577 (rep) = &(esw)->offloads.vport_reps[i], \ 578 (i) >= (esw)->first_host_vport; (i)--) 579 580 #define mlx5_esw_for_each_host_func_vport(esw, vport, nvfs) \ 581 for ((vport) = (esw)->first_host_vport; \ 582 (vport) <= (nvfs); (vport)++) 583 584 #define mlx5_esw_for_each_host_func_vport_reverse(esw, vport, nvfs) \ 585 for ((vport) = (nvfs); \ 586 (vport) >= (esw)->first_host_vport; (vport)--) 587 588 struct mlx5_vport *__must_check 589 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num); 590 591 bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num); 592 593 void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs); 594 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); 595 596 void 597 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, 598 enum mlx5_eswitch_vport_event enabled_events); 599 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw); 600 601 #else /* CONFIG_MLX5_ESWITCH */ 602 /* eswitch API stubs */ 603 static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } 604 static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} 605 static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { return 0; } 606 static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {} 607 static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; } 608 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } 609 static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) 610 { 611 return ERR_PTR(-EOPNOTSUPP); 612 } 613 614 static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {} 615 616 #define FDB_MAX_CHAIN 1 617 #define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1) 618 #define FDB_MAX_PRIO 1 619 620 #endif /* CONFIG_MLX5_ESWITCH */ 621 622 #endif /* __MLX5_ESWITCH_H__ */ 623