1 /*
2  * Copyright (c) 2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef __MLX5_ESWITCH_H__
34 #define __MLX5_ESWITCH_H__
35 
36 #include <linux/if_ether.h>
37 #include <linux/if_link.h>
38 #include <linux/atomic.h>
39 #include <linux/xarray.h>
40 #include <net/devlink.h>
41 #include <linux/mlx5/device.h>
42 #include <linux/mlx5/eswitch.h>
43 #include <linux/mlx5/vport.h>
44 #include <linux/mlx5/fs.h>
45 #include "lib/mpfs.h"
46 #include "lib/fs_chains.h"
47 #include "sf/sf.h"
48 #include "en/tc_ct.h"
49 #include "en/tc/sample.h"
50 
51 enum mlx5_mapped_obj_type {
52 	MLX5_MAPPED_OBJ_CHAIN,
53 	MLX5_MAPPED_OBJ_SAMPLE,
54 	MLX5_MAPPED_OBJ_INT_PORT_METADATA,
55 	MLX5_MAPPED_OBJ_ACT_MISS,
56 };
57 
58 struct mlx5_mapped_obj {
59 	enum mlx5_mapped_obj_type type;
60 	union {
61 		u32 chain;
62 		u64 act_miss_cookie;
63 		struct {
64 			u32 group_id;
65 			u32 rate;
66 			u32 trunc_size;
67 			u32 tunnel_id;
68 		} sample;
69 		u32 int_port_metadata;
70 	};
71 };
72 
73 #ifdef CONFIG_MLX5_ESWITCH
74 
75 #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
76 
77 #define MLX5_MAX_UC_PER_VPORT(dev) \
78 	(1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
79 
80 #define MLX5_MAX_MC_PER_VPORT(dev) \
81 	(1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
82 
83 #define mlx5_esw_has_fwd_fdb(dev) \
84 	MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
85 
86 #define esw_chains(esw) \
87 	((esw)->fdb_table.offloads.esw_chains_priv)
88 
89 enum {
90 	MAPPING_TYPE_CHAIN,
91 	MAPPING_TYPE_TUNNEL,
92 	MAPPING_TYPE_TUNNEL_ENC_OPTS,
93 	MAPPING_TYPE_LABELS,
94 	MAPPING_TYPE_ZONE,
95 	MAPPING_TYPE_INT_PORT,
96 };
97 
98 struct vport_ingress {
99 	struct mlx5_flow_table *acl;
100 	struct mlx5_flow_handle *allow_rule;
101 	struct {
102 		struct mlx5_flow_group *allow_spoofchk_only_grp;
103 		struct mlx5_flow_group *allow_untagged_spoofchk_grp;
104 		struct mlx5_flow_group *allow_untagged_only_grp;
105 		struct mlx5_flow_group *drop_grp;
106 		struct mlx5_flow_handle *drop_rule;
107 		struct mlx5_fc *drop_counter;
108 	} legacy;
109 	struct {
110 		/* Optional group to add an FTE to do internal priority
111 		 * tagging on ingress packets.
112 		 */
113 		struct mlx5_flow_group *metadata_prio_tag_grp;
114 		/* Group to add default match-all FTE entry to tag ingress
115 		 * packet with metadata.
116 		 */
117 		struct mlx5_flow_group *metadata_allmatch_grp;
118 		/* Optional group to add a drop all rule */
119 		struct mlx5_flow_group *drop_grp;
120 		struct mlx5_modify_hdr *modify_metadata;
121 		struct mlx5_flow_handle *modify_metadata_rule;
122 		struct mlx5_flow_handle *drop_rule;
123 	} offloads;
124 };
125 
126 enum vport_egress_acl_type {
127 	VPORT_EGRESS_ACL_TYPE_DEFAULT,
128 	VPORT_EGRESS_ACL_TYPE_SHARED_FDB,
129 };
130 
131 struct vport_egress {
132 	struct mlx5_flow_table *acl;
133 	enum vport_egress_acl_type type;
134 	struct mlx5_flow_handle  *allowed_vlan;
135 	struct mlx5_flow_group *vlan_grp;
136 	union {
137 		struct {
138 			struct mlx5_flow_group *drop_grp;
139 			struct mlx5_flow_handle *drop_rule;
140 			struct mlx5_fc *drop_counter;
141 		} legacy;
142 		struct {
143 			struct mlx5_flow_group *fwd_grp;
144 			struct mlx5_flow_handle *fwd_rule;
145 			struct xarray bounce_rules;
146 			struct mlx5_flow_group *bounce_grp;
147 		} offloads;
148 	};
149 };
150 
151 struct mlx5_vport_drop_stats {
152 	u64 rx_dropped;
153 	u64 tx_dropped;
154 };
155 
156 struct mlx5_vport_info {
157 	u8                      mac[ETH_ALEN];
158 	u16                     vlan;
159 	u64                     node_guid;
160 	int                     link_state;
161 	u8                      qos;
162 	u8                      spoofchk: 1;
163 	u8                      trusted: 1;
164 	u8                      roce_enabled: 1;
165 	u8                      mig_enabled: 1;
166 	u8                      ipsec_crypto_enabled: 1;
167 	u8                      ipsec_packet_enabled: 1;
168 };
169 
170 /* Vport context events */
171 enum mlx5_eswitch_vport_event {
172 	MLX5_VPORT_UC_ADDR_CHANGE = BIT(0),
173 	MLX5_VPORT_MC_ADDR_CHANGE = BIT(1),
174 	MLX5_VPORT_PROMISC_CHANGE = BIT(3),
175 };
176 
177 struct mlx5_vport;
178 
179 struct mlx5_devlink_port {
180 	struct devlink_port dl_port;
181 	struct mlx5_vport *vport;
182 };
183 
mlx5_devlink_port_init(struct mlx5_devlink_port * dl_port,struct mlx5_vport * vport)184 static inline void mlx5_devlink_port_init(struct mlx5_devlink_port *dl_port,
185 					  struct mlx5_vport *vport)
186 {
187 	dl_port->vport = vport;
188 }
189 
mlx5_devlink_port_get(struct devlink_port * dl_port)190 static inline struct mlx5_devlink_port *mlx5_devlink_port_get(struct devlink_port *dl_port)
191 {
192 	return container_of(dl_port, struct mlx5_devlink_port, dl_port);
193 }
194 
mlx5_devlink_port_vport_get(struct devlink_port * dl_port)195 static inline struct mlx5_vport *mlx5_devlink_port_vport_get(struct devlink_port *dl_port)
196 {
197 	return mlx5_devlink_port_get(dl_port)->vport;
198 }
199 
200 struct mlx5_vport {
201 	struct mlx5_core_dev    *dev;
202 	struct hlist_head       uc_list[MLX5_L2_ADDR_HASH_SIZE];
203 	struct hlist_head       mc_list[MLX5_L2_ADDR_HASH_SIZE];
204 	struct mlx5_flow_handle *promisc_rule;
205 	struct mlx5_flow_handle *allmulti_rule;
206 	struct work_struct      vport_change_handler;
207 
208 	struct vport_ingress    ingress;
209 	struct vport_egress     egress;
210 	u32                     default_metadata;
211 	u32                     metadata;
212 
213 	struct mlx5_vport_info  info;
214 
215 	struct {
216 		bool            enabled;
217 		u32             esw_tsar_ix;
218 		u32             bw_share;
219 		u32 min_rate;
220 		u32 max_rate;
221 		struct mlx5_esw_rate_group *group;
222 	} qos;
223 
224 	u16 vport;
225 	bool                    enabled;
226 	bool max_eqs_set;
227 	enum mlx5_eswitch_vport_event enabled_events;
228 	int index;
229 	struct mlx5_devlink_port *dl_port;
230 };
231 
232 struct mlx5_esw_indir_table;
233 
234 struct mlx5_eswitch_fdb {
235 	union {
236 		struct legacy_fdb {
237 			struct mlx5_flow_table *fdb;
238 			struct mlx5_flow_group *addr_grp;
239 			struct mlx5_flow_group *allmulti_grp;
240 			struct mlx5_flow_group *promisc_grp;
241 			struct mlx5_flow_table *vepa_fdb;
242 			struct mlx5_flow_handle *vepa_uplink_rule;
243 			struct mlx5_flow_handle *vepa_star_rule;
244 		} legacy;
245 
246 		struct offloads_fdb {
247 			struct mlx5_flow_namespace *ns;
248 			struct mlx5_flow_table *tc_miss_table;
249 			struct mlx5_flow_table *slow_fdb;
250 			struct mlx5_flow_group *send_to_vport_grp;
251 			struct mlx5_flow_group *send_to_vport_meta_grp;
252 			struct mlx5_flow_group *peer_miss_grp;
253 			struct mlx5_flow_handle **peer_miss_rules[MLX5_MAX_PORTS];
254 			struct mlx5_flow_group *miss_grp;
255 			struct mlx5_flow_handle **send_to_vport_meta_rules;
256 			struct mlx5_flow_handle *miss_rule_uni;
257 			struct mlx5_flow_handle *miss_rule_multi;
258 
259 			struct mlx5_fs_chains *esw_chains_priv;
260 			struct {
261 				DECLARE_HASHTABLE(table, 8);
262 				/* Protects vports.table */
263 				struct mutex lock;
264 			} vports;
265 
266 			struct mlx5_esw_indir_table *indir;
267 
268 		} offloads;
269 	};
270 	u32 flags;
271 };
272 
273 struct mlx5_esw_offload {
274 	struct mlx5_flow_table *ft_offloads_restore;
275 	struct mlx5_flow_group *restore_group;
276 	struct mlx5_modify_hdr *restore_copy_hdr_id;
277 	struct mapping_ctx *reg_c0_obj_pool;
278 
279 	struct mlx5_flow_table *ft_offloads;
280 	struct mlx5_flow_group *vport_rx_group;
281 	struct mlx5_flow_group *vport_rx_drop_group;
282 	struct mlx5_flow_handle *vport_rx_drop_rule;
283 	struct mlx5_flow_table *ft_ipsec_tx_pol;
284 	struct xarray vport_reps;
285 	struct list_head peer_flows[MLX5_MAX_PORTS];
286 	struct mutex peer_mutex;
287 	struct mutex encap_tbl_lock; /* protects encap_tbl */
288 	DECLARE_HASHTABLE(encap_tbl, 8);
289 	struct mutex decap_tbl_lock; /* protects decap_tbl */
290 	DECLARE_HASHTABLE(decap_tbl, 8);
291 	struct mod_hdr_tbl mod_hdr;
292 	DECLARE_HASHTABLE(termtbl_tbl, 8);
293 	struct mutex termtbl_mutex; /* protects termtbl hash */
294 	struct xarray vhca_map;
295 	const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
296 	u8 inline_mode;
297 	atomic64_t num_flows;
298 	u64 num_block_encap;
299 	u64 num_block_mode;
300 	enum devlink_eswitch_encap_mode encap;
301 	struct ida vport_metadata_ida;
302 	unsigned int host_number; /* ECPF supports one external host */
303 };
304 
305 /* E-Switch MC FDB table hash node */
306 struct esw_mc_addr { /* SRIOV only */
307 	struct l2addr_node     node;
308 	struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
309 	u32                    refcnt;
310 };
311 
312 struct mlx5_host_work {
313 	struct work_struct	work;
314 	struct mlx5_eswitch	*esw;
315 };
316 
317 struct mlx5_esw_functions {
318 	struct mlx5_nb		nb;
319 	u16			num_vfs;
320 	u16			num_ec_vfs;
321 };
322 
323 enum {
324 	MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0),
325 	MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
326 	MLX5_ESWITCH_VPORT_ACL_NS_CREATED = BIT(2),
327 };
328 
329 struct mlx5_esw_bridge_offloads;
330 
331 enum {
332 	MLX5_ESW_FDB_CREATED = BIT(0),
333 };
334 
335 struct dentry;
336 
337 struct mlx5_eswitch {
338 	struct mlx5_core_dev    *dev;
339 	struct mlx5_nb          nb;
340 	struct mlx5_eswitch_fdb fdb_table;
341 	/* legacy data structures */
342 	struct hlist_head       mc_table[MLX5_L2_ADDR_HASH_SIZE];
343 	struct esw_mc_addr mc_promisc;
344 	/* end of legacy */
345 	struct dentry *debugfs_root;
346 	struct workqueue_struct *work_queue;
347 	struct xarray vports;
348 	u32 flags;
349 	int                     total_vports;
350 	int                     enabled_vports;
351 	/* Synchronize between vport change events
352 	 * and async SRIOV admin state changes
353 	 */
354 	struct mutex            state_lock;
355 
356 	/* Protects eswitch mode change that occurs via one or more
357 	 * user commands, i.e. sriov state change, devlink commands.
358 	 */
359 	struct rw_semaphore mode_lock;
360 	atomic64_t user_count;
361 
362 	struct {
363 		u32             root_tsar_ix;
364 		struct mlx5_esw_rate_group *group0;
365 		struct list_head groups; /* Protected by esw->state_lock */
366 
367 		/* Protected by esw->state_lock.
368 		 * Initially 0, meaning no QoS users and QoS is disabled.
369 		 */
370 		refcount_t refcnt;
371 	} qos;
372 
373 	struct mlx5_esw_bridge_offloads *br_offloads;
374 	struct mlx5_esw_offload offloads;
375 	int                     mode;
376 	u16                     manager_vport;
377 	u16                     first_host_vport;
378 	u8			num_peers;
379 	struct mlx5_esw_functions esw_funcs;
380 	struct {
381 		u32             large_group_num;
382 	}  params;
383 	struct blocking_notifier_head n_head;
384 	struct xarray paired;
385 	struct mlx5_devcom_comp_dev *devcom;
386 	u16 enabled_ipsec_vf_count;
387 	bool eswitch_operation_in_progress;
388 };
389 
390 void esw_offloads_disable(struct mlx5_eswitch *esw);
391 int esw_offloads_enable(struct mlx5_eswitch *esw);
392 void esw_offloads_cleanup(struct mlx5_eswitch *esw);
393 int esw_offloads_init(struct mlx5_eswitch *esw);
394 
395 struct mlx5_flow_handle *
396 mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num);
397 void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule);
398 
399 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
400 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw);
401 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
402 
403 int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps);
404 
405 /* E-Switch API */
406 int mlx5_eswitch_init(struct mlx5_core_dev *dev);
407 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
408 
409 #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
410 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs);
411 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
412 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
413 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
414 void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
415 void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key);
416 void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
417 bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw);
418 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
419 			       u16 vport, const u8 *mac);
420 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
421 				 u16 vport, int link_state);
422 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
423 				u16 vport, u16 vlan, u8 qos);
424 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
425 				    u16 vport, bool spoofchk);
426 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
427 				 u16 vport_num, bool setting);
428 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
429 				u32 max_rate, u32 min_rate);
430 int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw,
431 				    struct mlx5_vport *vport,
432 				    struct mlx5_esw_rate_group *group,
433 				    struct netlink_ext_ack *extack);
434 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting);
435 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting);
436 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
437 				  u16 vport, struct ifla_vf_info *ivi);
438 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
439 				 u16 vport,
440 				 struct ifla_vf_stats *vf_stats);
441 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
442 
443 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
444 					  bool other_vport, void *in);
445 
446 struct mlx5_flow_spec;
447 struct mlx5_esw_flow_attr;
448 struct mlx5_termtbl_handle;
449 
450 bool
451 mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
452 			      struct mlx5_flow_attr *attr,
453 			      struct mlx5_flow_act *flow_act,
454 			      struct mlx5_flow_spec *spec);
455 
456 struct mlx5_flow_handle *
457 mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
458 			      struct mlx5_flow_table *ft,
459 			      struct mlx5_flow_spec *spec,
460 			      struct mlx5_esw_flow_attr *attr,
461 			      struct mlx5_flow_act *flow_act,
462 			      struct mlx5_flow_destination *dest,
463 			      int num_dest);
464 
465 void
466 mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
467 			 struct mlx5_termtbl_handle *tt);
468 
469 void
470 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec);
471 
472 struct mlx5_flow_handle *
473 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
474 				struct mlx5_flow_spec *spec,
475 				struct mlx5_flow_attr *attr);
476 struct mlx5_flow_handle *
477 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
478 			  struct mlx5_flow_spec *spec,
479 			  struct mlx5_flow_attr *attr);
480 void
481 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
482 				struct mlx5_flow_handle *rule,
483 				struct mlx5_flow_attr *attr);
484 void
485 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
486 			  struct mlx5_flow_handle *rule,
487 			  struct mlx5_flow_attr *attr);
488 
489 struct mlx5_flow_handle *
490 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
491 				  struct mlx5_flow_destination *dest);
492 
493 enum {
494 	SET_VLAN_STRIP	= BIT(0),
495 	SET_VLAN_INSERT	= BIT(1)
496 };
497 
498 enum mlx5_flow_match_level {
499 	MLX5_MATCH_NONE	= MLX5_INLINE_MODE_NONE,
500 	MLX5_MATCH_L2	= MLX5_INLINE_MODE_L2,
501 	MLX5_MATCH_L3	= MLX5_INLINE_MODE_IP,
502 	MLX5_MATCH_L4	= MLX5_INLINE_MODE_TCP_UDP,
503 };
504 
505 /* current maximum for flow based vport multicasting */
506 #define MLX5_MAX_FLOW_FWD_VPORTS 32
507 
508 enum {
509 	MLX5_ESW_DEST_ENCAP         = BIT(0),
510 	MLX5_ESW_DEST_ENCAP_VALID   = BIT(1),
511 	MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE  = BIT(2),
512 };
513 
514 struct mlx5_esw_flow_attr {
515 	struct mlx5_eswitch_rep *in_rep;
516 	struct mlx5_core_dev	*in_mdev;
517 	struct mlx5_core_dev    *counter_dev;
518 	struct mlx5e_tc_int_port *dest_int_port;
519 	struct mlx5e_tc_int_port *int_port;
520 
521 	int split_count;
522 	int out_count;
523 
524 	__be16	vlan_proto[MLX5_FS_VLAN_DEPTH];
525 	u16	vlan_vid[MLX5_FS_VLAN_DEPTH];
526 	u8	vlan_prio[MLX5_FS_VLAN_DEPTH];
527 	u8	total_vlan;
528 	struct {
529 		u32 flags;
530 		bool vport_valid;
531 		u16 vport;
532 		struct mlx5_pkt_reformat *pkt_reformat;
533 		struct mlx5_core_dev *mdev;
534 		struct mlx5_termtbl_handle *termtbl;
535 		int src_port_rewrite_act_id;
536 	} dests[MLX5_MAX_FLOW_FWD_VPORTS];
537 	struct mlx5_rx_tun_attr *rx_tun_attr;
538 	struct ethhdr eth;
539 	struct mlx5_pkt_reformat *decap_pkt_reformat;
540 };
541 
542 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
543 				  struct netlink_ext_ack *extack);
544 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
545 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
546 					 struct netlink_ext_ack *extack);
547 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
548 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
549 					enum devlink_eswitch_encap_mode encap,
550 					struct netlink_ext_ack *extack);
551 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
552 					enum devlink_eswitch_encap_mode *encap);
553 int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
554 				     u8 *hw_addr, int *hw_addr_len,
555 				     struct netlink_ext_ack *extack);
556 int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port,
557 				     const u8 *hw_addr, int hw_addr_len,
558 				     struct netlink_ext_ack *extack);
559 int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
560 				  struct netlink_ext_ack *extack);
561 int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
562 				  struct netlink_ext_ack *extack);
563 int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
564 					struct netlink_ext_ack *extack);
565 int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
566 					struct netlink_ext_ack *extack);
567 #ifdef CONFIG_XFRM_OFFLOAD
568 int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled,
569 					  struct netlink_ext_ack *extack);
570 int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable,
571 					  struct netlink_ext_ack *extack);
572 int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
573 					  struct netlink_ext_ack *extack);
574 int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable,
575 					  struct netlink_ext_ack *extack);
576 #endif /* CONFIG_XFRM_OFFLOAD */
577 int mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port,
578 					u32 *max_io_eqs,
579 					struct netlink_ext_ack *extack);
580 int mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port,
581 					u32 max_io_eqs,
582 					struct netlink_ext_ack *extack);
583 int mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port,
584 						   struct netlink_ext_ack *extack);
585 
586 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
587 
588 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
589 				  u16 vport, u16 vlan, u8 qos, u8 set_flags);
590 
esw_vst_mode_is_steering(struct mlx5_eswitch * esw)591 static inline bool esw_vst_mode_is_steering(struct mlx5_eswitch *esw)
592 {
593 	return (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, pop_vlan) &&
594 		MLX5_CAP_ESW_INGRESS_ACL(esw->dev, push_vlan));
595 }
596 
mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev * dev,u8 vlan_depth)597 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
598 						       u8 vlan_depth)
599 {
600 	bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
601 		   MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
602 
603 	if (vlan_depth == 1)
604 		return ret;
605 
606 	return  ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) &&
607 		MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
608 }
609 
610 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
611 			       struct mlx5_core_dev *dev1);
612 
613 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
614 
615 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
616 
617 #define esw_info(__dev, format, ...)			\
618 	dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
619 
620 #define esw_warn(__dev, format, ...)			\
621 	dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
622 
623 #define esw_debug(dev, format, ...)				\
624 	mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
625 
mlx5_esw_allowed(const struct mlx5_eswitch * esw)626 static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw)
627 {
628 	return esw && MLX5_ESWITCH_MANAGER(esw->dev);
629 }
630 
631 static inline bool
mlx5_esw_is_manager_vport(const struct mlx5_eswitch * esw,u16 vport_num)632 mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
633 {
634 	return esw->manager_vport == vport_num;
635 }
636 
mlx5_esw_is_owner(struct mlx5_eswitch * esw,u16 vport_num,u16 esw_owner_vhca_id)637 static inline bool mlx5_esw_is_owner(struct mlx5_eswitch *esw, u16 vport_num,
638 				     u16 esw_owner_vhca_id)
639 {
640 	return esw_owner_vhca_id == MLX5_CAP_GEN(esw->dev, vhca_id) ||
641 		(vport_num == MLX5_VPORT_UPLINK && mlx5_lag_is_master(esw->dev));
642 }
643 
mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev * dev)644 static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
645 {
646 	return mlx5_core_is_ecpf_esw_manager(dev) ?
647 		MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
648 }
649 
mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev * dev)650 static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
651 {
652 	return mlx5_core_is_ecpf_esw_manager(dev);
653 }
654 
655 static inline unsigned int
mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev * dev,u16 vport_num)656 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
657 				     u16 vport_num)
658 {
659 	return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
660 }
661 
662 static inline u16
mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)663 mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
664 {
665 	return dl_port_index & 0xffff;
666 }
667 
mlx5_esw_is_fdb_created(struct mlx5_eswitch * esw)668 static inline bool mlx5_esw_is_fdb_created(struct mlx5_eswitch *esw)
669 {
670 	return esw->fdb_table.flags & MLX5_ESW_FDB_CREATED;
671 }
672 
673 /* TODO: This mlx5e_tc function shouldn't be called by eswitch */
674 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
675 
676 /* Each mark identifies eswitch vport type.
677  * MLX5_ESW_VPT_HOST_FN is used to identify both PF and VF ports using
678  * a single mark.
679  * MLX5_ESW_VPT_VF identifies a SRIOV VF vport.
680  * MLX5_ESW_VPT_SF identifies SF vport.
681  */
682 #define MLX5_ESW_VPT_HOST_FN XA_MARK_0
683 #define MLX5_ESW_VPT_VF XA_MARK_1
684 #define MLX5_ESW_VPT_SF XA_MARK_2
685 
686 /* The vport iterator is valid only after vport are initialized in mlx5_eswitch_init.
687  * Borrowed the idea from xa_for_each_marked() but with support for desired last element.
688  */
689 
690 #define mlx5_esw_for_each_vport(esw, index, vport) \
691 	xa_for_each(&((esw)->vports), index, vport)
692 
693 #define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter)	\
694 	for (index = 0, entry = xa_find(xa, &index, last, filter); \
695 	     entry; entry = xa_find_after(xa, &index, last, filter))
696 
697 #define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter)	\
698 	mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter)
699 
700 #define mlx5_esw_for_each_vf_vport(esw, index, vport, last)	\
701 	mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF)
702 
703 #define mlx5_esw_for_each_host_func_vport(esw, index, vport, last)	\
704 	mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN)
705 
706 /* This macro should only be used if EC SRIOV is enabled.
707  *
708  * Because there were no more marks available on the xarray this uses a
709  * for_each_range approach. The range is only valid when EC SRIOV is enabled
710  */
711 #define mlx5_esw_for_each_ec_vf_vport(esw, index, vport, last)		\
712 	xa_for_each_range(&((esw)->vports),				\
713 			  index,					\
714 			  vport,					\
715 			  MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base),	\
716 			  MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
717 			  (last) - 1)
718 
719 struct mlx5_eswitch *__must_check
720 mlx5_devlink_eswitch_get(struct devlink *devlink);
721 
722 struct mlx5_eswitch *mlx5_devlink_eswitch_nocheck_get(struct devlink *devlink);
723 
724 struct mlx5_vport *__must_check
725 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
726 
727 bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
728 bool mlx5_eswitch_is_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
729 bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
730 
731 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
732 
733 int
734 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
735 				 enum mlx5_eswitch_vport_event enabled_events);
736 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
737 
738 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
739 			  enum mlx5_eswitch_vport_event enabled_events);
740 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
741 
742 int
743 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
744 				     struct mlx5_vport *vport);
745 void
746 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
747 				      struct mlx5_vport *vport);
748 
749 struct esw_vport_tbl_namespace {
750 	int max_fte;
751 	int max_num_groups;
752 	u32 flags;
753 };
754 
755 struct mlx5_vport_tbl_attr {
756 	u32 chain;
757 	u16 prio;
758 	u16 vport;
759 	struct esw_vport_tbl_namespace *vport_ns;
760 };
761 
762 struct mlx5_flow_table *
763 mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
764 void
765 mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
766 
767 struct mlx5_flow_handle *
768 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
769 
770 void mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
771 					 u32 *flow_group_in,
772 					 int match_params);
773 
774 void mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
775 				   u16 vport,
776 				   struct mlx5_flow_spec *spec);
777 
778 int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
779 void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
780 
781 int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
782 				  struct mlx5_devlink_port *dl_port,
783 				  u32 controller, u32 sfnum);
784 void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
785 
786 int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
787 void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
788 
789 int mlx5_eswitch_load_sf_vport(struct mlx5_eswitch *esw, u16 vport_num,
790 			       enum mlx5_eswitch_vport_event enabled_events,
791 			       struct mlx5_devlink_port *dl_port, u32 controller, u32 sfnum);
792 void mlx5_eswitch_unload_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
793 
794 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
795 				enum mlx5_eswitch_vport_event enabled_events);
796 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
797 
798 int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw,
799 					      struct mlx5_vport *vport);
800 void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw,
801 						  struct mlx5_vport *vport);
802 
803 int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
804 					   struct mlx5_devlink_port *dl_port,
805 					   u32 controller, u32 sfnum);
806 void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
807 
808 int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
809 void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
810 struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
811 
812 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);
813 
814 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num);
815 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num);
816 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num);
817 
818 /**
819  * mlx5_esw_event_info - Indicates eswitch mode changed/changing.
820  *
821  * @new_mode: New mode of eswitch.
822  */
823 struct mlx5_esw_event_info {
824 	u16 new_mode;
825 };
826 
827 int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n);
828 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n);
829 
830 bool mlx5_esw_hold(struct mlx5_core_dev *dev);
831 void mlx5_esw_release(struct mlx5_core_dev *dev);
832 void mlx5_esw_get(struct mlx5_core_dev *dev);
833 void mlx5_esw_put(struct mlx5_core_dev *dev);
834 int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
835 int mlx5_esw_lock(struct mlx5_eswitch *esw);
836 void mlx5_esw_unlock(struct mlx5_eswitch *esw);
837 
838 void esw_vport_change_handle_locked(struct mlx5_vport *vport);
839 
840 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller);
841 
842 int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
843 					     struct mlx5_eswitch *slave_esw, int max_slaves);
844 void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
845 					      struct mlx5_eswitch *slave_esw);
846 int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw);
847 
848 bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev);
849 void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
850 
851 int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev);
852 void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev);
853 
mlx5_eswitch_num_vfs(struct mlx5_eswitch * esw)854 static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw)
855 {
856 	if (mlx5_esw_allowed(esw))
857 		return esw->esw_funcs.num_vfs;
858 
859 	return 0;
860 }
861 
mlx5_eswitch_get_npeers(struct mlx5_eswitch * esw)862 static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw)
863 {
864 	if (mlx5_esw_allowed(esw))
865 		return esw->num_peers;
866 	return 0;
867 }
868 
869 static inline struct mlx5_flow_table *
mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch * esw)870 mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw)
871 {
872 	return esw->fdb_table.offloads.slow_fdb;
873 }
874 
875 int mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
876 				    struct mlx5_esw_flow_attr *esw_attr, int attr_idx);
877 bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev);
878 void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev);
879 bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev);
880 int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev,
881 				  struct mlx5_vport *vport);
882 int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev,
883 					       u16 vport_num);
884 int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
885 					 bool enable);
886 int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
887 					 bool enable);
888 int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
889 					       u16 vport_num);
890 #else  /* CONFIG_MLX5_ESWITCH */
891 /* eswitch API stubs */
mlx5_eswitch_init(struct mlx5_core_dev * dev)892 static inline int  mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
mlx5_eswitch_cleanup(struct mlx5_eswitch * esw)893 static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
mlx5_eswitch_enable(struct mlx5_eswitch * esw,int num_vfs)894 static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
mlx5_eswitch_disable_sriov(struct mlx5_eswitch * esw,bool clear_vf)895 static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
mlx5_eswitch_disable(struct mlx5_eswitch * esw)896 static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
mlx5_esw_offloads_devcom_init(struct mlx5_eswitch * esw,u64 key)897 static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key) {}
mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch * esw)898 static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch * esw)899 static inline bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) { return false; }
mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev * dev)900 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
901 static inline
mlx5_eswitch_set_vport_state(struct mlx5_eswitch * esw,u16 vport,int link_state)902 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
mlx5_esw_query_functions(struct mlx5_core_dev * dev)903 static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
904 {
905 	return ERR_PTR(-EOPNOTSUPP);
906 }
907 
908 static inline struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch * esw,u32 tag)909 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
910 {
911 	return ERR_PTR(-EOPNOTSUPP);
912 }
913 
914 static inline unsigned int
mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev * dev,u16 vport_num)915 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
916 				     u16 vport_num)
917 {
918 	return vport_num;
919 }
920 
921 static inline int
mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch * master_esw,struct mlx5_eswitch * slave_esw,int max_slaves)922 mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
923 					 struct mlx5_eswitch *slave_esw, int max_slaves)
924 {
925 	return 0;
926 }
927 
928 static inline void
mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch * master_esw,struct mlx5_eswitch * slave_esw)929 mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
930 					 struct mlx5_eswitch *slave_esw) {}
931 
mlx5_eswitch_get_npeers(struct mlx5_eswitch * esw)932 static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) { return 0; }
933 
934 static inline int
mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch * esw)935 mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
936 {
937 	return 0;
938 }
939 
mlx5_eswitch_block_encap(struct mlx5_core_dev * dev)940 static inline bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
941 {
942 	return true;
943 }
944 
mlx5_eswitch_unblock_encap(struct mlx5_core_dev * dev)945 static inline void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
946 {
947 }
948 
mlx5_eswitch_block_mode(struct mlx5_core_dev * dev)949 static inline int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev) { return 0; }
mlx5_eswitch_unblock_mode(struct mlx5_core_dev * dev)950 static inline void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev) {}
mlx5_eswitch_block_ipsec(struct mlx5_core_dev * dev)951 static inline bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev)
952 {
953 	return false;
954 }
955 
mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev * dev)956 static inline void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev) {}
957 #endif /* CONFIG_MLX5_ESWITCH */
958 
959 #endif /* __MLX5_ESWITCH_H__ */
960