1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies. */
3 
4 #include <linux/build_bug.h>
5 #include <linux/list.h>
6 #include <linux/notifier.h>
7 #include <net/netevent.h>
8 #include <net/switchdev.h>
9 #include "lib/devcom.h"
10 #include "bridge.h"
11 #include "eswitch.h"
12 #include "bridge_priv.h"
13 #define CREATE_TRACE_POINTS
14 #include "diag/bridge_tracepoint.h"
15 
16 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE 12000
17 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_UNTAGGED_GRP_SIZE 16000
18 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM 0
19 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO		\
20 	(MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
21 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM	\
22 	(MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
23 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO		\
24 	(MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM +	\
25 	 MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
26 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM			\
27 	(MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO + 1)
28 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO			\
29 	(MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM +		\
30 	 MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
31 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM	\
32 	(MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO + 1)
33 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO		\
34 	(MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM +	\
35 	 MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
36 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM			\
37 	(MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO + 1)
38 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO			\
39 	(MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM +		\
40 	 MLX5_ESW_BRIDGE_INGRESS_TABLE_UNTAGGED_GRP_SIZE - 1)
41 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE			\
42 	(MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO + 1)
43 static_assert(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE == 64000);
44 
45 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE 16000
46 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_SIZE (32000 - 1)
47 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM 0
48 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO		\
49 	(MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE - 1)
50 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM		\
51 	(MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
52 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO			\
53 	(MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM +		\
54 	 MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE - 1)
55 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
56 	(MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO + 1)
57 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO			\
58 	(MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM +		\
59 	 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_SIZE - 1)
60 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM \
61 	(MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO + 1)
62 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO	\
63 	MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM
64 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE			\
65 	(MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO + 1)
66 static_assert(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE == 64000);
67 
68 #define MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE 0
69 
70 enum {
71 	MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
72 	MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
73 	MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
74 };
75 
76 static const struct rhashtable_params fdb_ht_params = {
77 	.key_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, key),
78 	.key_len = sizeof(struct mlx5_esw_bridge_fdb_key),
79 	.head_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, ht_node),
80 	.automatic_shrinking = true,
81 };
82 
83 enum {
84 	MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG = BIT(0),
85 };
86 
87 struct mlx5_esw_bridge {
88 	int ifindex;
89 	int refcnt;
90 	struct list_head list;
91 	struct mlx5_esw_bridge_offloads *br_offloads;
92 
93 	struct list_head fdb_list;
94 	struct rhashtable fdb_ht;
95 
96 	struct mlx5_flow_table *egress_ft;
97 	struct mlx5_flow_group *egress_vlan_fg;
98 	struct mlx5_flow_group *egress_qinq_fg;
99 	struct mlx5_flow_group *egress_mac_fg;
100 	struct mlx5_flow_group *egress_miss_fg;
101 	struct mlx5_pkt_reformat *egress_miss_pkt_reformat;
102 	struct mlx5_flow_handle *egress_miss_handle;
103 	unsigned long ageing_time;
104 	u32 flags;
105 	u16 vlan_proto;
106 };
107 
108 static void
109 mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char *addr, u16 vid,
110 				   unsigned long val)
111 {
112 	struct switchdev_notifier_fdb_info send_info = {};
113 
114 	send_info.addr = addr;
115 	send_info.vid = vid;
116 	send_info.offloaded = true;
117 	call_switchdev_notifiers(val, dev, &send_info.info, NULL);
118 }
119 
120 static void
121 mlx5_esw_bridge_fdb_del_notify(struct mlx5_esw_bridge_fdb_entry *entry)
122 {
123 	if (!(entry->flags & (MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER | MLX5_ESW_BRIDGE_FLAG_PEER)))
124 		mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
125 						   entry->key.vid,
126 						   SWITCHDEV_FDB_DEL_TO_BRIDGE);
127 }
128 
129 static bool mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(struct mlx5_eswitch *esw)
130 {
131 	return BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) &&
132 		MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) >= sizeof(struct vlan_hdr) &&
133 		MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) >=
134 		offsetof(struct vlan_ethhdr, h_vlan_proto);
135 }
136 
137 static struct mlx5_pkt_reformat *
138 mlx5_esw_bridge_pkt_reformat_vlan_pop_create(struct mlx5_eswitch *esw)
139 {
140 	struct mlx5_pkt_reformat_params reformat_params = {};
141 
142 	reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR;
143 	reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
144 	reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
145 	reformat_params.size = sizeof(struct vlan_hdr);
146 	return mlx5_packet_reformat_alloc(esw->dev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB);
147 }
148 
149 static struct mlx5_flow_table *
150 mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
151 {
152 	struct mlx5_flow_table_attr ft_attr = {};
153 	struct mlx5_core_dev *dev = esw->dev;
154 	struct mlx5_flow_namespace *ns;
155 	struct mlx5_flow_table *fdb;
156 
157 	ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
158 	if (!ns) {
159 		esw_warn(dev, "Failed to get FDB namespace\n");
160 		return ERR_PTR(-ENOENT);
161 	}
162 
163 	ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
164 	ft_attr.max_fte = max_fte;
165 	ft_attr.level = level;
166 	ft_attr.prio = FDB_BR_OFFLOAD;
167 	fdb = mlx5_create_flow_table(ns, &ft_attr);
168 	if (IS_ERR(fdb))
169 		esw_warn(dev, "Failed to create bridge FDB Table (err=%ld)\n", PTR_ERR(fdb));
170 
171 	return fdb;
172 }
173 
174 static struct mlx5_flow_group *
175 mlx5_esw_bridge_ingress_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
176 					     struct mlx5_eswitch *esw,
177 					     struct mlx5_flow_table *ingress_ft)
178 {
179 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
180 	struct mlx5_flow_group *fg;
181 	u32 *in, *match;
182 
183 	in = kvzalloc(inlen, GFP_KERNEL);
184 	if (!in)
185 		return ERR_PTR(-ENOMEM);
186 
187 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
188 		 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
189 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
190 
191 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
192 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
193 	if (vlan_proto == ETH_P_8021Q)
194 		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
195 	else if (vlan_proto == ETH_P_8021AD)
196 		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
197 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
198 
199 	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
200 		 mlx5_eswitch_get_vport_metadata_mask());
201 
202 	MLX5_SET(create_flow_group_in, in, start_flow_index, from);
203 	MLX5_SET(create_flow_group_in, in, end_flow_index, to);
204 
205 	fg = mlx5_create_flow_group(ingress_ft, in);
206 	kvfree(in);
207 	if (IS_ERR(fg))
208 		esw_warn(esw->dev,
209 			 "Failed to create VLAN(proto=%x) flow group for bridge ingress table (err=%ld)\n",
210 			 vlan_proto, PTR_ERR(fg));
211 
212 	return fg;
213 }
214 
215 static struct mlx5_flow_group *
216 mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw,
217 				       struct mlx5_flow_table *ingress_ft)
218 {
219 	unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM;
220 	unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO;
221 
222 	return mlx5_esw_bridge_ingress_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, ingress_ft);
223 }
224 
225 static struct mlx5_flow_group *
226 mlx5_esw_bridge_ingress_qinq_fg_create(struct mlx5_eswitch *esw,
227 				       struct mlx5_flow_table *ingress_ft)
228 {
229 	unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM;
230 	unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO;
231 
232 	return mlx5_esw_bridge_ingress_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw,
233 							    ingress_ft);
234 }
235 
236 static struct mlx5_flow_group *
237 mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(unsigned int from, unsigned int to,
238 						    u16 vlan_proto, struct mlx5_eswitch *esw,
239 						    struct mlx5_flow_table *ingress_ft)
240 {
241 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
242 	struct mlx5_flow_group *fg;
243 	u32 *in, *match;
244 
245 	in = kvzalloc(inlen, GFP_KERNEL);
246 	if (!in)
247 		return ERR_PTR(-ENOMEM);
248 
249 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
250 		 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
251 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
252 
253 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
254 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
255 	if (vlan_proto == ETH_P_8021Q)
256 		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
257 	else if (vlan_proto == ETH_P_8021AD)
258 		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
259 	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
260 		 mlx5_eswitch_get_vport_metadata_mask());
261 
262 	MLX5_SET(create_flow_group_in, in, start_flow_index, from);
263 	MLX5_SET(create_flow_group_in, in, end_flow_index, to);
264 
265 	fg = mlx5_create_flow_group(ingress_ft, in);
266 	if (IS_ERR(fg))
267 		esw_warn(esw->dev,
268 			 "Failed to create bridge ingress table VLAN filter flow group (err=%ld)\n",
269 			 PTR_ERR(fg));
270 	kvfree(in);
271 	return fg;
272 }
273 
274 static struct mlx5_flow_group *
275 mlx5_esw_bridge_ingress_vlan_filter_fg_create(struct mlx5_eswitch *esw,
276 					      struct mlx5_flow_table *ingress_ft)
277 {
278 	unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM;
279 	unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO;
280 
281 	return mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(from, to, ETH_P_8021Q, esw,
282 								   ingress_ft);
283 }
284 
285 static struct mlx5_flow_group *
286 mlx5_esw_bridge_ingress_qinq_filter_fg_create(struct mlx5_eswitch *esw,
287 					      struct mlx5_flow_table *ingress_ft)
288 {
289 	unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM;
290 	unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO;
291 
292 	return mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(from, to, ETH_P_8021AD, esw,
293 								   ingress_ft);
294 }
295 
296 static struct mlx5_flow_group *
297 mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
298 {
299 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
300 	struct mlx5_flow_group *fg;
301 	u32 *in, *match;
302 
303 	in = kvzalloc(inlen, GFP_KERNEL);
304 	if (!in)
305 		return ERR_PTR(-ENOMEM);
306 
307 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
308 		 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
309 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
310 
311 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
312 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
313 
314 	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
315 		 mlx5_eswitch_get_vport_metadata_mask());
316 
317 	MLX5_SET(create_flow_group_in, in, start_flow_index,
318 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM);
319 	MLX5_SET(create_flow_group_in, in, end_flow_index,
320 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO);
321 
322 	fg = mlx5_create_flow_group(ingress_ft, in);
323 	if (IS_ERR(fg))
324 		esw_warn(esw->dev,
325 			 "Failed to create MAC flow group for bridge ingress table (err=%ld)\n",
326 			 PTR_ERR(fg));
327 
328 	kvfree(in);
329 	return fg;
330 }
331 
332 static struct mlx5_flow_group *
333 mlx5_esw_bridge_egress_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
334 					    struct mlx5_eswitch *esw,
335 					    struct mlx5_flow_table *egress_ft)
336 {
337 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
338 	struct mlx5_flow_group *fg;
339 	u32 *in, *match;
340 
341 	in = kvzalloc(inlen, GFP_KERNEL);
342 	if (!in)
343 		return ERR_PTR(-ENOMEM);
344 
345 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
346 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
347 
348 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
349 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
350 	if (vlan_proto == ETH_P_8021Q)
351 		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
352 	else if (vlan_proto == ETH_P_8021AD)
353 		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
354 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
355 
356 	MLX5_SET(create_flow_group_in, in, start_flow_index, from);
357 	MLX5_SET(create_flow_group_in, in, end_flow_index, to);
358 
359 	fg = mlx5_create_flow_group(egress_ft, in);
360 	if (IS_ERR(fg))
361 		esw_warn(esw->dev,
362 			 "Failed to create VLAN flow group for bridge egress table (err=%ld)\n",
363 			 PTR_ERR(fg));
364 	kvfree(in);
365 	return fg;
366 }
367 
368 static struct mlx5_flow_group *
369 mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
370 {
371 	unsigned int from = MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM;
372 	unsigned int to = MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO;
373 
374 	return mlx5_esw_bridge_egress_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, egress_ft);
375 }
376 
377 static struct mlx5_flow_group *
378 mlx5_esw_bridge_egress_qinq_fg_create(struct mlx5_eswitch *esw,
379 				      struct mlx5_flow_table *egress_ft)
380 {
381 	unsigned int from = MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM;
382 	unsigned int to = MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO;
383 
384 	return mlx5_esw_bridge_egress_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw, egress_ft);
385 }
386 
387 static struct mlx5_flow_group *
388 mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
389 {
390 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
391 	struct mlx5_flow_group *fg;
392 	u32 *in, *match;
393 
394 	in = kvzalloc(inlen, GFP_KERNEL);
395 	if (!in)
396 		return ERR_PTR(-ENOMEM);
397 
398 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
399 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
400 
401 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
402 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
403 
404 	MLX5_SET(create_flow_group_in, in, start_flow_index,
405 		 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM);
406 	MLX5_SET(create_flow_group_in, in, end_flow_index,
407 		 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO);
408 
409 	fg = mlx5_create_flow_group(egress_ft, in);
410 	if (IS_ERR(fg))
411 		esw_warn(esw->dev,
412 			 "Failed to create bridge egress table MAC flow group (err=%ld)\n",
413 			 PTR_ERR(fg));
414 	kvfree(in);
415 	return fg;
416 }
417 
418 static struct mlx5_flow_group *
419 mlx5_esw_bridge_egress_miss_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
420 {
421 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
422 	struct mlx5_flow_group *fg;
423 	u32 *in, *match;
424 
425 	in = kvzalloc(inlen, GFP_KERNEL);
426 	if (!in)
427 		return ERR_PTR(-ENOMEM);
428 
429 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
430 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
431 
432 	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
433 
434 	MLX5_SET(create_flow_group_in, in, start_flow_index,
435 		 MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM);
436 	MLX5_SET(create_flow_group_in, in, end_flow_index,
437 		 MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO);
438 
439 	fg = mlx5_create_flow_group(egress_ft, in);
440 	if (IS_ERR(fg))
441 		esw_warn(esw->dev,
442 			 "Failed to create bridge egress table miss flow group (err=%ld)\n",
443 			 PTR_ERR(fg));
444 	kvfree(in);
445 	return fg;
446 }
447 
448 static int
449 mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
450 {
451 	struct mlx5_flow_group *mac_fg, *qinq_filter_fg, *qinq_fg, *vlan_filter_fg, *vlan_fg;
452 	struct mlx5_flow_table *ingress_ft, *skip_ft;
453 	struct mlx5_eswitch *esw = br_offloads->esw;
454 	int err;
455 
456 	if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
457 		return -EOPNOTSUPP;
458 
459 	ingress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE,
460 						  MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
461 						  esw);
462 	if (IS_ERR(ingress_ft))
463 		return PTR_ERR(ingress_ft);
464 
465 	skip_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE,
466 					       MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
467 					       esw);
468 	if (IS_ERR(skip_ft)) {
469 		err = PTR_ERR(skip_ft);
470 		goto err_skip_tbl;
471 	}
472 
473 	vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(esw, ingress_ft);
474 	if (IS_ERR(vlan_fg)) {
475 		err = PTR_ERR(vlan_fg);
476 		goto err_vlan_fg;
477 	}
478 
479 	vlan_filter_fg = mlx5_esw_bridge_ingress_vlan_filter_fg_create(esw, ingress_ft);
480 	if (IS_ERR(vlan_filter_fg)) {
481 		err = PTR_ERR(vlan_filter_fg);
482 		goto err_vlan_filter_fg;
483 	}
484 
485 	qinq_fg = mlx5_esw_bridge_ingress_qinq_fg_create(esw, ingress_ft);
486 	if (IS_ERR(qinq_fg)) {
487 		err = PTR_ERR(qinq_fg);
488 		goto err_qinq_fg;
489 	}
490 
491 	qinq_filter_fg = mlx5_esw_bridge_ingress_qinq_filter_fg_create(esw, ingress_ft);
492 	if (IS_ERR(qinq_filter_fg)) {
493 		err = PTR_ERR(qinq_filter_fg);
494 		goto err_qinq_filter_fg;
495 	}
496 
497 	mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(esw, ingress_ft);
498 	if (IS_ERR(mac_fg)) {
499 		err = PTR_ERR(mac_fg);
500 		goto err_mac_fg;
501 	}
502 
503 	br_offloads->ingress_ft = ingress_ft;
504 	br_offloads->skip_ft = skip_ft;
505 	br_offloads->ingress_vlan_fg = vlan_fg;
506 	br_offloads->ingress_vlan_filter_fg = vlan_filter_fg;
507 	br_offloads->ingress_qinq_fg = qinq_fg;
508 	br_offloads->ingress_qinq_filter_fg = qinq_filter_fg;
509 	br_offloads->ingress_mac_fg = mac_fg;
510 	return 0;
511 
512 err_mac_fg:
513 	mlx5_destroy_flow_group(qinq_filter_fg);
514 err_qinq_filter_fg:
515 	mlx5_destroy_flow_group(qinq_fg);
516 err_qinq_fg:
517 	mlx5_destroy_flow_group(vlan_filter_fg);
518 err_vlan_filter_fg:
519 	mlx5_destroy_flow_group(vlan_fg);
520 err_vlan_fg:
521 	mlx5_destroy_flow_table(skip_ft);
522 err_skip_tbl:
523 	mlx5_destroy_flow_table(ingress_ft);
524 	return err;
525 }
526 
527 static void
528 mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
529 {
530 	mlx5_destroy_flow_group(br_offloads->ingress_mac_fg);
531 	br_offloads->ingress_mac_fg = NULL;
532 	mlx5_destroy_flow_group(br_offloads->ingress_qinq_filter_fg);
533 	br_offloads->ingress_qinq_filter_fg = NULL;
534 	mlx5_destroy_flow_group(br_offloads->ingress_qinq_fg);
535 	br_offloads->ingress_qinq_fg = NULL;
536 	mlx5_destroy_flow_group(br_offloads->ingress_vlan_filter_fg);
537 	br_offloads->ingress_vlan_filter_fg = NULL;
538 	mlx5_destroy_flow_group(br_offloads->ingress_vlan_fg);
539 	br_offloads->ingress_vlan_fg = NULL;
540 	mlx5_destroy_flow_table(br_offloads->skip_ft);
541 	br_offloads->skip_ft = NULL;
542 	mlx5_destroy_flow_table(br_offloads->ingress_ft);
543 	br_offloads->ingress_ft = NULL;
544 }
545 
546 static struct mlx5_flow_handle *
547 mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
548 					struct mlx5_flow_table *skip_ft,
549 					struct mlx5_pkt_reformat *pkt_reformat);
550 
551 static int
552 mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
553 				  struct mlx5_esw_bridge *bridge)
554 {
555 	struct mlx5_flow_group *miss_fg = NULL, *mac_fg, *vlan_fg, *qinq_fg;
556 	struct mlx5_pkt_reformat *miss_pkt_reformat = NULL;
557 	struct mlx5_flow_handle *miss_handle = NULL;
558 	struct mlx5_eswitch *esw = br_offloads->esw;
559 	struct mlx5_flow_table *egress_ft;
560 	int err;
561 
562 	egress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE,
563 						 MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
564 						 esw);
565 	if (IS_ERR(egress_ft))
566 		return PTR_ERR(egress_ft);
567 
568 	vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(esw, egress_ft);
569 	if (IS_ERR(vlan_fg)) {
570 		err = PTR_ERR(vlan_fg);
571 		goto err_vlan_fg;
572 	}
573 
574 	qinq_fg = mlx5_esw_bridge_egress_qinq_fg_create(esw, egress_ft);
575 	if (IS_ERR(qinq_fg)) {
576 		err = PTR_ERR(qinq_fg);
577 		goto err_qinq_fg;
578 	}
579 
580 	mac_fg = mlx5_esw_bridge_egress_mac_fg_create(esw, egress_ft);
581 	if (IS_ERR(mac_fg)) {
582 		err = PTR_ERR(mac_fg);
583 		goto err_mac_fg;
584 	}
585 
586 	if (mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
587 		miss_fg = mlx5_esw_bridge_egress_miss_fg_create(esw, egress_ft);
588 		if (IS_ERR(miss_fg)) {
589 			esw_warn(esw->dev, "Failed to create miss flow group (err=%ld)\n",
590 				 PTR_ERR(miss_fg));
591 			miss_fg = NULL;
592 			goto skip_miss_flow;
593 		}
594 
595 		miss_pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
596 		if (IS_ERR(miss_pkt_reformat)) {
597 			esw_warn(esw->dev,
598 				 "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
599 				 PTR_ERR(miss_pkt_reformat));
600 			miss_pkt_reformat = NULL;
601 			mlx5_destroy_flow_group(miss_fg);
602 			miss_fg = NULL;
603 			goto skip_miss_flow;
604 		}
605 
606 		miss_handle = mlx5_esw_bridge_egress_miss_flow_create(egress_ft,
607 								      br_offloads->skip_ft,
608 								      miss_pkt_reformat);
609 		if (IS_ERR(miss_handle)) {
610 			esw_warn(esw->dev, "Failed to create miss flow (err=%ld)\n",
611 				 PTR_ERR(miss_handle));
612 			miss_handle = NULL;
613 			mlx5_packet_reformat_dealloc(esw->dev, miss_pkt_reformat);
614 			miss_pkt_reformat = NULL;
615 			mlx5_destroy_flow_group(miss_fg);
616 			miss_fg = NULL;
617 			goto skip_miss_flow;
618 		}
619 	}
620 skip_miss_flow:
621 
622 	bridge->egress_ft = egress_ft;
623 	bridge->egress_vlan_fg = vlan_fg;
624 	bridge->egress_qinq_fg = qinq_fg;
625 	bridge->egress_mac_fg = mac_fg;
626 	bridge->egress_miss_fg = miss_fg;
627 	bridge->egress_miss_pkt_reformat = miss_pkt_reformat;
628 	bridge->egress_miss_handle = miss_handle;
629 	return 0;
630 
631 err_mac_fg:
632 	mlx5_destroy_flow_group(qinq_fg);
633 err_qinq_fg:
634 	mlx5_destroy_flow_group(vlan_fg);
635 err_vlan_fg:
636 	mlx5_destroy_flow_table(egress_ft);
637 	return err;
638 }
639 
640 static void
641 mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
642 {
643 	if (bridge->egress_miss_handle)
644 		mlx5_del_flow_rules(bridge->egress_miss_handle);
645 	if (bridge->egress_miss_pkt_reformat)
646 		mlx5_packet_reformat_dealloc(bridge->br_offloads->esw->dev,
647 					     bridge->egress_miss_pkt_reformat);
648 	if (bridge->egress_miss_fg)
649 		mlx5_destroy_flow_group(bridge->egress_miss_fg);
650 	mlx5_destroy_flow_group(bridge->egress_mac_fg);
651 	mlx5_destroy_flow_group(bridge->egress_qinq_fg);
652 	mlx5_destroy_flow_group(bridge->egress_vlan_fg);
653 	mlx5_destroy_flow_table(bridge->egress_ft);
654 }
655 
656 static struct mlx5_flow_handle *
657 mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char *addr,
658 					     struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
659 					     struct mlx5_esw_bridge *bridge,
660 					     struct mlx5_eswitch *esw)
661 {
662 	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
663 	struct mlx5_flow_act flow_act = {
664 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT,
665 		.flags = FLOW_ACT_NO_APPEND,
666 	};
667 	struct mlx5_flow_destination dests[2] = {};
668 	struct mlx5_flow_spec *rule_spec;
669 	struct mlx5_flow_handle *handle;
670 	u8 *smac_v, *smac_c;
671 
672 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
673 	if (!rule_spec)
674 		return ERR_PTR(-ENOMEM);
675 
676 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
677 
678 	smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
679 			      outer_headers.smac_47_16);
680 	ether_addr_copy(smac_v, addr);
681 	smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
682 			      outer_headers.smac_47_16);
683 	eth_broadcast_addr(smac_c);
684 
685 	MLX5_SET(fte_match_param, rule_spec->match_criteria,
686 		 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
687 	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
688 		 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
689 
690 	if (vlan && vlan->pkt_reformat_push) {
691 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
692 			MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
693 		flow_act.pkt_reformat = vlan->pkt_reformat_push;
694 		flow_act.modify_hdr = vlan->pkt_mod_hdr_push_mark;
695 	} else if (vlan) {
696 		if (bridge->vlan_proto == ETH_P_8021Q) {
697 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
698 					 outer_headers.cvlan_tag);
699 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
700 					 outer_headers.cvlan_tag);
701 		} else if (bridge->vlan_proto == ETH_P_8021AD) {
702 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
703 					 outer_headers.svlan_tag);
704 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
705 					 outer_headers.svlan_tag);
706 		}
707 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
708 				 outer_headers.first_vid);
709 		MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
710 			 vlan->vid);
711 	}
712 
713 	dests[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
714 	dests[0].ft = bridge->egress_ft;
715 	dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
716 	dests[1].counter_id = counter_id;
717 
718 	handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, dests,
719 				     ARRAY_SIZE(dests));
720 
721 	kvfree(rule_spec);
722 	return handle;
723 }
724 
725 static struct mlx5_flow_handle *
726 mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
727 				    struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
728 				    struct mlx5_esw_bridge *bridge)
729 {
730 	return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
731 							    bridge, bridge->br_offloads->esw);
732 }
733 
734 static struct mlx5_flow_handle *
735 mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, const unsigned char *addr,
736 					 struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
737 					 struct mlx5_esw_bridge *bridge)
738 {
739 	struct mlx5_devcom *devcom = bridge->br_offloads->esw->dev->priv.devcom;
740 	static struct mlx5_flow_handle *handle;
741 	struct mlx5_eswitch *peer_esw;
742 
743 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
744 	if (!peer_esw)
745 		return ERR_PTR(-ENODEV);
746 
747 	handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
748 							      bridge, peer_esw);
749 
750 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
751 	return handle;
752 }
753 
754 static struct mlx5_flow_handle *
755 mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num, const unsigned char *addr,
756 					   struct mlx5_esw_bridge *bridge)
757 {
758 	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
759 	struct mlx5_flow_destination dest = {
760 		.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
761 		.ft = br_offloads->skip_ft,
762 	};
763 	struct mlx5_flow_act flow_act = {
764 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
765 		.flags = FLOW_ACT_NO_APPEND,
766 	};
767 	struct mlx5_flow_spec *rule_spec;
768 	struct mlx5_flow_handle *handle;
769 	u8 *smac_v, *smac_c;
770 
771 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
772 	if (!rule_spec)
773 		return ERR_PTR(-ENOMEM);
774 
775 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
776 
777 	smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
778 			      outer_headers.smac_47_16);
779 	ether_addr_copy(smac_v, addr);
780 	smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
781 			      outer_headers.smac_47_16);
782 	eth_broadcast_addr(smac_c);
783 
784 	MLX5_SET(fte_match_param, rule_spec->match_criteria,
785 		 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
786 	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
787 		 mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num));
788 
789 	if (bridge->vlan_proto == ETH_P_8021Q) {
790 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
791 				 outer_headers.cvlan_tag);
792 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
793 				 outer_headers.cvlan_tag);
794 	} else if (bridge->vlan_proto == ETH_P_8021AD) {
795 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
796 				 outer_headers.svlan_tag);
797 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
798 				 outer_headers.svlan_tag);
799 	}
800 
801 	handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, &dest, 1);
802 
803 	kvfree(rule_spec);
804 	return handle;
805 }
806 
807 static struct mlx5_flow_handle *
808 mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const unsigned char *addr,
809 				   struct mlx5_esw_bridge_vlan *vlan,
810 				   struct mlx5_esw_bridge *bridge)
811 {
812 	struct mlx5_flow_destination dest = {
813 		.type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
814 		.vport.num = vport_num,
815 	};
816 	struct mlx5_flow_act flow_act = {
817 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
818 		.flags = FLOW_ACT_NO_APPEND,
819 	};
820 	struct mlx5_flow_spec *rule_spec;
821 	struct mlx5_flow_handle *handle;
822 	u8 *dmac_v, *dmac_c;
823 
824 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
825 	if (!rule_spec)
826 		return ERR_PTR(-ENOMEM);
827 
828 	if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
829 	    vport_num == MLX5_VPORT_UPLINK)
830 		rule_spec->flow_context.flow_source =
831 			MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
832 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
833 
834 	dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
835 			      outer_headers.dmac_47_16);
836 	ether_addr_copy(dmac_v, addr);
837 	dmac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
838 			      outer_headers.dmac_47_16);
839 	eth_broadcast_addr(dmac_c);
840 
841 	if (vlan) {
842 		if (vlan->pkt_reformat_pop) {
843 			flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
844 			flow_act.pkt_reformat = vlan->pkt_reformat_pop;
845 		}
846 
847 		if (bridge->vlan_proto == ETH_P_8021Q) {
848 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
849 					 outer_headers.cvlan_tag);
850 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
851 					 outer_headers.cvlan_tag);
852 		} else if (bridge->vlan_proto == ETH_P_8021AD) {
853 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
854 					 outer_headers.svlan_tag);
855 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
856 					 outer_headers.svlan_tag);
857 		}
858 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
859 				 outer_headers.first_vid);
860 		MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
861 			 vlan->vid);
862 	}
863 
864 	if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
865 		dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
866 		dest.vport.vhca_id = esw_owner_vhca_id;
867 	}
868 	handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, &dest, 1);
869 
870 	kvfree(rule_spec);
871 	return handle;
872 }
873 
874 static struct mlx5_flow_handle *
875 mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
876 					struct mlx5_flow_table *skip_ft,
877 					struct mlx5_pkt_reformat *pkt_reformat)
878 {
879 	struct mlx5_flow_destination dest = {
880 		.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
881 		.ft = skip_ft,
882 	};
883 	struct mlx5_flow_act flow_act = {
884 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
885 		MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT,
886 		.flags = FLOW_ACT_NO_APPEND,
887 		.pkt_reformat = pkt_reformat,
888 	};
889 	struct mlx5_flow_spec *rule_spec;
890 	struct mlx5_flow_handle *handle;
891 
892 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
893 	if (!rule_spec)
894 		return ERR_PTR(-ENOMEM);
895 
896 	rule_spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
897 
898 	MLX5_SET(fte_match_param, rule_spec->match_criteria,
899 		 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
900 	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_1,
901 		 ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK);
902 
903 	handle = mlx5_add_flow_rules(egress_ft, rule_spec, &flow_act, &dest, 1);
904 
905 	kvfree(rule_spec);
906 	return handle;
907 }
908 
909 static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
910 						      struct mlx5_esw_bridge_offloads *br_offloads)
911 {
912 	struct mlx5_esw_bridge *bridge;
913 	int err;
914 
915 	bridge = kvzalloc(sizeof(*bridge), GFP_KERNEL);
916 	if (!bridge)
917 		return ERR_PTR(-ENOMEM);
918 
919 	bridge->br_offloads = br_offloads;
920 	err = mlx5_esw_bridge_egress_table_init(br_offloads, bridge);
921 	if (err)
922 		goto err_egress_tbl;
923 
924 	err = rhashtable_init(&bridge->fdb_ht, &fdb_ht_params);
925 	if (err)
926 		goto err_fdb_ht;
927 
928 	INIT_LIST_HEAD(&bridge->fdb_list);
929 	bridge->ifindex = ifindex;
930 	bridge->refcnt = 1;
931 	bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
932 	bridge->vlan_proto = ETH_P_8021Q;
933 	list_add(&bridge->list, &br_offloads->bridges);
934 
935 	return bridge;
936 
937 err_fdb_ht:
938 	mlx5_esw_bridge_egress_table_cleanup(bridge);
939 err_egress_tbl:
940 	kvfree(bridge);
941 	return ERR_PTR(err);
942 }
943 
944 static void mlx5_esw_bridge_get(struct mlx5_esw_bridge *bridge)
945 {
946 	bridge->refcnt++;
947 }
948 
949 static void mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads *br_offloads,
950 				struct mlx5_esw_bridge *bridge)
951 {
952 	if (--bridge->refcnt)
953 		return;
954 
955 	mlx5_esw_bridge_egress_table_cleanup(bridge);
956 	list_del(&bridge->list);
957 	rhashtable_destroy(&bridge->fdb_ht);
958 	kvfree(bridge);
959 
960 	if (list_empty(&br_offloads->bridges))
961 		mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
962 }
963 
964 static struct mlx5_esw_bridge *
965 mlx5_esw_bridge_lookup(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads)
966 {
967 	struct mlx5_esw_bridge *bridge;
968 
969 	ASSERT_RTNL();
970 
971 	list_for_each_entry(bridge, &br_offloads->bridges, list) {
972 		if (bridge->ifindex == ifindex) {
973 			mlx5_esw_bridge_get(bridge);
974 			return bridge;
975 		}
976 	}
977 
978 	if (!br_offloads->ingress_ft) {
979 		int err = mlx5_esw_bridge_ingress_table_init(br_offloads);
980 
981 		if (err)
982 			return ERR_PTR(err);
983 	}
984 
985 	bridge = mlx5_esw_bridge_create(ifindex, br_offloads);
986 	if (IS_ERR(bridge) && list_empty(&br_offloads->bridges))
987 		mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
988 	return bridge;
989 }
990 
991 static unsigned long mlx5_esw_bridge_port_key_from_data(u16 vport_num, u16 esw_owner_vhca_id)
992 {
993 	return vport_num | (unsigned long)esw_owner_vhca_id << sizeof(vport_num) * BITS_PER_BYTE;
994 }
995 
996 static unsigned long mlx5_esw_bridge_port_key(struct mlx5_esw_bridge_port *port)
997 {
998 	return mlx5_esw_bridge_port_key_from_data(port->vport_num, port->esw_owner_vhca_id);
999 }
1000 
1001 static int mlx5_esw_bridge_port_insert(struct mlx5_esw_bridge_port *port,
1002 				       struct mlx5_esw_bridge_offloads *br_offloads)
1003 {
1004 	return xa_insert(&br_offloads->ports, mlx5_esw_bridge_port_key(port), port, GFP_KERNEL);
1005 }
1006 
1007 static struct mlx5_esw_bridge_port *
1008 mlx5_esw_bridge_port_lookup(u16 vport_num, u16 esw_owner_vhca_id,
1009 			    struct mlx5_esw_bridge_offloads *br_offloads)
1010 {
1011 	return xa_load(&br_offloads->ports, mlx5_esw_bridge_port_key_from_data(vport_num,
1012 									       esw_owner_vhca_id));
1013 }
1014 
1015 static void mlx5_esw_bridge_port_erase(struct mlx5_esw_bridge_port *port,
1016 				       struct mlx5_esw_bridge_offloads *br_offloads)
1017 {
1018 	xa_erase(&br_offloads->ports, mlx5_esw_bridge_port_key(port));
1019 }
1020 
1021 static void mlx5_esw_bridge_fdb_entry_refresh(struct mlx5_esw_bridge_fdb_entry *entry)
1022 {
1023 	trace_mlx5_esw_bridge_fdb_entry_refresh(entry);
1024 
1025 	mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
1026 					   entry->key.vid,
1027 					   SWITCHDEV_FDB_ADD_TO_BRIDGE);
1028 }
1029 
1030 static void
1031 mlx5_esw_bridge_fdb_entry_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
1032 				  struct mlx5_esw_bridge *bridge)
1033 {
1034 	trace_mlx5_esw_bridge_fdb_entry_cleanup(entry);
1035 
1036 	rhashtable_remove_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
1037 	mlx5_del_flow_rules(entry->egress_handle);
1038 	if (entry->filter_handle)
1039 		mlx5_del_flow_rules(entry->filter_handle);
1040 	mlx5_del_flow_rules(entry->ingress_handle);
1041 	mlx5_fc_destroy(bridge->br_offloads->esw->dev, entry->ingress_counter);
1042 	list_del(&entry->vlan_list);
1043 	list_del(&entry->list);
1044 	kvfree(entry);
1045 }
1046 
1047 static void
1048 mlx5_esw_bridge_fdb_entry_notify_and_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
1049 					     struct mlx5_esw_bridge *bridge)
1050 {
1051 	mlx5_esw_bridge_fdb_del_notify(entry);
1052 	mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
1053 }
1054 
1055 static void mlx5_esw_bridge_fdb_flush(struct mlx5_esw_bridge *bridge)
1056 {
1057 	struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1058 
1059 	list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
1060 		mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1061 }
1062 
1063 static struct mlx5_esw_bridge_vlan *
1064 mlx5_esw_bridge_vlan_lookup(u16 vid, struct mlx5_esw_bridge_port *port)
1065 {
1066 	return xa_load(&port->vlans, vid);
1067 }
1068 
1069 static int
1070 mlx5_esw_bridge_vlan_push_create(u16 vlan_proto, struct mlx5_esw_bridge_vlan *vlan,
1071 				 struct mlx5_eswitch *esw)
1072 {
1073 	struct {
1074 		__be16	h_vlan_proto;
1075 		__be16	h_vlan_TCI;
1076 	} vlan_hdr = { htons(vlan_proto), htons(vlan->vid) };
1077 	struct mlx5_pkt_reformat_params reformat_params = {};
1078 	struct mlx5_pkt_reformat *pkt_reformat;
1079 
1080 	if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_insert)) ||
1081 	    MLX5_CAP_GEN_2(esw->dev, max_reformat_insert_size) < sizeof(vlan_hdr) ||
1082 	    MLX5_CAP_GEN_2(esw->dev, max_reformat_insert_offset) <
1083 	    offsetof(struct vlan_ethhdr, h_vlan_proto)) {
1084 		esw_warn(esw->dev, "Packet reformat INSERT_HEADER is not supported\n");
1085 		return -EOPNOTSUPP;
1086 	}
1087 
1088 	reformat_params.type = MLX5_REFORMAT_TYPE_INSERT_HDR;
1089 	reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
1090 	reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
1091 	reformat_params.size = sizeof(vlan_hdr);
1092 	reformat_params.data = &vlan_hdr;
1093 	pkt_reformat = mlx5_packet_reformat_alloc(esw->dev,
1094 						  &reformat_params,
1095 						  MLX5_FLOW_NAMESPACE_FDB);
1096 	if (IS_ERR(pkt_reformat)) {
1097 		esw_warn(esw->dev, "Failed to alloc packet reformat INSERT_HEADER (err=%ld)\n",
1098 			 PTR_ERR(pkt_reformat));
1099 		return PTR_ERR(pkt_reformat);
1100 	}
1101 
1102 	vlan->pkt_reformat_push = pkt_reformat;
1103 	return 0;
1104 }
1105 
1106 static void
1107 mlx5_esw_bridge_vlan_push_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1108 {
1109 	mlx5_packet_reformat_dealloc(esw->dev, vlan->pkt_reformat_push);
1110 	vlan->pkt_reformat_push = NULL;
1111 }
1112 
1113 static int
1114 mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1115 {
1116 	struct mlx5_pkt_reformat *pkt_reformat;
1117 
1118 	if (!mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
1119 		esw_warn(esw->dev, "Packet reformat REMOVE_HEADER is not supported\n");
1120 		return -EOPNOTSUPP;
1121 	}
1122 
1123 	pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
1124 	if (IS_ERR(pkt_reformat)) {
1125 		esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
1126 			 PTR_ERR(pkt_reformat));
1127 		return PTR_ERR(pkt_reformat);
1128 	}
1129 
1130 	vlan->pkt_reformat_pop = pkt_reformat;
1131 	return 0;
1132 }
1133 
1134 static void
1135 mlx5_esw_bridge_vlan_pop_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1136 {
1137 	mlx5_packet_reformat_dealloc(esw->dev, vlan->pkt_reformat_pop);
1138 	vlan->pkt_reformat_pop = NULL;
1139 }
1140 
1141 static int
1142 mlx5_esw_bridge_vlan_push_mark_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1143 {
1144 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1145 	struct mlx5_modify_hdr *pkt_mod_hdr;
1146 
1147 	MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
1148 	MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
1149 	MLX5_SET(set_action_in, action, offset, 8);
1150 	MLX5_SET(set_action_in, action, length, ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS);
1151 	MLX5_SET(set_action_in, action, data, ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN);
1152 
1153 	pkt_mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB, 1, action);
1154 	if (IS_ERR(pkt_mod_hdr))
1155 		return PTR_ERR(pkt_mod_hdr);
1156 
1157 	vlan->pkt_mod_hdr_push_mark = pkt_mod_hdr;
1158 	return 0;
1159 }
1160 
1161 static void
1162 mlx5_esw_bridge_vlan_push_mark_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1163 {
1164 	mlx5_modify_header_dealloc(esw->dev, vlan->pkt_mod_hdr_push_mark);
1165 	vlan->pkt_mod_hdr_push_mark = NULL;
1166 }
1167 
1168 static int
1169 mlx5_esw_bridge_vlan_push_pop_create(u16 vlan_proto, u16 flags, struct mlx5_esw_bridge_vlan *vlan,
1170 				     struct mlx5_eswitch *esw)
1171 {
1172 	int err;
1173 
1174 	if (flags & BRIDGE_VLAN_INFO_PVID) {
1175 		err = mlx5_esw_bridge_vlan_push_create(vlan_proto, vlan, esw);
1176 		if (err)
1177 			return err;
1178 
1179 		err = mlx5_esw_bridge_vlan_push_mark_create(vlan, esw);
1180 		if (err)
1181 			goto err_vlan_push_mark;
1182 	}
1183 
1184 	if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
1185 		err = mlx5_esw_bridge_vlan_pop_create(vlan, esw);
1186 		if (err)
1187 			goto err_vlan_pop;
1188 	}
1189 
1190 	return 0;
1191 
1192 err_vlan_pop:
1193 	if (vlan->pkt_mod_hdr_push_mark)
1194 		mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
1195 err_vlan_push_mark:
1196 	if (vlan->pkt_reformat_push)
1197 		mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
1198 	return err;
1199 }
1200 
1201 static struct mlx5_esw_bridge_vlan *
1202 mlx5_esw_bridge_vlan_create(u16 vlan_proto, u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
1203 			    struct mlx5_eswitch *esw)
1204 {
1205 	struct mlx5_esw_bridge_vlan *vlan;
1206 	int err;
1207 
1208 	vlan = kvzalloc(sizeof(*vlan), GFP_KERNEL);
1209 	if (!vlan)
1210 		return ERR_PTR(-ENOMEM);
1211 
1212 	vlan->vid = vid;
1213 	vlan->flags = flags;
1214 	INIT_LIST_HEAD(&vlan->fdb_list);
1215 
1216 	err = mlx5_esw_bridge_vlan_push_pop_create(vlan_proto, flags, vlan, esw);
1217 	if (err)
1218 		goto err_vlan_push_pop;
1219 
1220 	err = xa_insert(&port->vlans, vid, vlan, GFP_KERNEL);
1221 	if (err)
1222 		goto err_xa_insert;
1223 
1224 	trace_mlx5_esw_bridge_vlan_create(vlan);
1225 	return vlan;
1226 
1227 err_xa_insert:
1228 	if (vlan->pkt_reformat_pop)
1229 		mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
1230 	if (vlan->pkt_mod_hdr_push_mark)
1231 		mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
1232 	if (vlan->pkt_reformat_push)
1233 		mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
1234 err_vlan_push_pop:
1235 	kvfree(vlan);
1236 	return ERR_PTR(err);
1237 }
1238 
1239 static void mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port *port,
1240 				       struct mlx5_esw_bridge_vlan *vlan)
1241 {
1242 	xa_erase(&port->vlans, vlan->vid);
1243 }
1244 
1245 static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan,
1246 				       struct mlx5_esw_bridge *bridge)
1247 {
1248 	struct mlx5_eswitch *esw = bridge->br_offloads->esw;
1249 	struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1250 
1251 	list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list)
1252 		mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1253 
1254 	if (vlan->pkt_reformat_pop)
1255 		mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
1256 	if (vlan->pkt_mod_hdr_push_mark)
1257 		mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
1258 	if (vlan->pkt_reformat_push)
1259 		mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
1260 }
1261 
1262 static void mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port *port,
1263 					 struct mlx5_esw_bridge_vlan *vlan,
1264 					 struct mlx5_esw_bridge *bridge)
1265 {
1266 	trace_mlx5_esw_bridge_vlan_cleanup(vlan);
1267 	mlx5_esw_bridge_vlan_flush(vlan, bridge);
1268 	mlx5_esw_bridge_vlan_erase(port, vlan);
1269 	kvfree(vlan);
1270 }
1271 
1272 static void mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port *port,
1273 					     struct mlx5_esw_bridge *bridge)
1274 {
1275 	struct mlx5_esw_bridge_vlan *vlan;
1276 	unsigned long index;
1277 
1278 	xa_for_each(&port->vlans, index, vlan)
1279 		mlx5_esw_bridge_vlan_cleanup(port, vlan, bridge);
1280 }
1281 
1282 static int mlx5_esw_bridge_port_vlans_recreate(struct mlx5_esw_bridge_port *port,
1283 					       struct mlx5_esw_bridge *bridge)
1284 {
1285 	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1286 	struct mlx5_esw_bridge_vlan *vlan;
1287 	unsigned long i;
1288 	int err;
1289 
1290 	xa_for_each(&port->vlans, i, vlan) {
1291 		mlx5_esw_bridge_vlan_flush(vlan, bridge);
1292 		err = mlx5_esw_bridge_vlan_push_pop_create(bridge->vlan_proto, vlan->flags, vlan,
1293 							   br_offloads->esw);
1294 		if (err) {
1295 			esw_warn(br_offloads->esw->dev,
1296 				 "Failed to create VLAN=%u(proto=%x) push/pop actions (vport=%u,err=%d)\n",
1297 				 vlan->vid, bridge->vlan_proto, port->vport_num,
1298 				 err);
1299 			return err;
1300 		}
1301 	}
1302 
1303 	return 0;
1304 }
1305 
1306 static int
1307 mlx5_esw_bridge_vlans_recreate(struct mlx5_esw_bridge *bridge)
1308 {
1309 	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1310 	struct mlx5_esw_bridge_port *port;
1311 	unsigned long i;
1312 	int err;
1313 
1314 	xa_for_each(&br_offloads->ports, i, port) {
1315 		if (port->bridge != bridge)
1316 			continue;
1317 
1318 		err = mlx5_esw_bridge_port_vlans_recreate(port, bridge);
1319 		if (err)
1320 			return err;
1321 	}
1322 
1323 	return 0;
1324 }
1325 
1326 static struct mlx5_esw_bridge_vlan *
1327 mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, u16 esw_owner_vhca_id,
1328 				 struct mlx5_esw_bridge *bridge, struct mlx5_eswitch *esw)
1329 {
1330 	struct mlx5_esw_bridge_port *port;
1331 	struct mlx5_esw_bridge_vlan *vlan;
1332 
1333 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, bridge->br_offloads);
1334 	if (!port) {
1335 		/* FDB is added asynchronously on wq while port might have been deleted
1336 		 * concurrently. Report on 'info' logging level and skip the FDB offload.
1337 		 */
1338 		esw_info(esw->dev, "Failed to lookup bridge port (vport=%u)\n", vport_num);
1339 		return ERR_PTR(-EINVAL);
1340 	}
1341 
1342 	vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1343 	if (!vlan) {
1344 		/* FDB is added asynchronously on wq while vlan might have been deleted
1345 		 * concurrently. Report on 'info' logging level and skip the FDB offload.
1346 		 */
1347 		esw_info(esw->dev, "Failed to lookup bridge port vlan metadata (vport=%u)\n",
1348 			 vport_num);
1349 		return ERR_PTR(-EINVAL);
1350 	}
1351 
1352 	return vlan;
1353 }
1354 
1355 static struct mlx5_esw_bridge_fdb_entry *
1356 mlx5_esw_bridge_fdb_lookup(struct mlx5_esw_bridge *bridge,
1357 			   const unsigned char *addr, u16 vid)
1358 {
1359 	struct mlx5_esw_bridge_fdb_key key = {};
1360 
1361 	ether_addr_copy(key.addr, addr);
1362 	key.vid = vid;
1363 	return rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params);
1364 }
1365 
1366 static struct mlx5_esw_bridge_fdb_entry *
1367 mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1368 			       const unsigned char *addr, u16 vid, bool added_by_user, bool peer,
1369 			       struct mlx5_eswitch *esw, struct mlx5_esw_bridge *bridge)
1370 {
1371 	struct mlx5_esw_bridge_vlan *vlan = NULL;
1372 	struct mlx5_esw_bridge_fdb_entry *entry;
1373 	struct mlx5_flow_handle *handle;
1374 	struct mlx5_fc *counter;
1375 	int err;
1376 
1377 	if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG && vid) {
1378 		vlan = mlx5_esw_bridge_port_vlan_lookup(vid, vport_num, esw_owner_vhca_id, bridge,
1379 							esw);
1380 		if (IS_ERR(vlan))
1381 			return ERR_CAST(vlan);
1382 	}
1383 
1384 	entry = mlx5_esw_bridge_fdb_lookup(bridge, addr, vid);
1385 	if (entry)
1386 		mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1387 
1388 	entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
1389 	if (!entry)
1390 		return ERR_PTR(-ENOMEM);
1391 
1392 	ether_addr_copy(entry->key.addr, addr);
1393 	entry->key.vid = vid;
1394 	entry->dev = dev;
1395 	entry->vport_num = vport_num;
1396 	entry->esw_owner_vhca_id = esw_owner_vhca_id;
1397 	entry->lastuse = jiffies;
1398 	if (added_by_user)
1399 		entry->flags |= MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER;
1400 	if (peer)
1401 		entry->flags |= MLX5_ESW_BRIDGE_FLAG_PEER;
1402 
1403 	counter = mlx5_fc_create(esw->dev, true);
1404 	if (IS_ERR(counter)) {
1405 		err = PTR_ERR(counter);
1406 		goto err_ingress_fc_create;
1407 	}
1408 	entry->ingress_counter = counter;
1409 
1410 	handle = peer ?
1411 		mlx5_esw_bridge_ingress_flow_peer_create(vport_num, addr, vlan,
1412 							 mlx5_fc_id(counter), bridge) :
1413 		mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan,
1414 						    mlx5_fc_id(counter), bridge);
1415 	if (IS_ERR(handle)) {
1416 		err = PTR_ERR(handle);
1417 		esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d)\n",
1418 			 vport_num, err);
1419 		goto err_ingress_flow_create;
1420 	}
1421 	entry->ingress_handle = handle;
1422 
1423 	if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG) {
1424 		handle = mlx5_esw_bridge_ingress_filter_flow_create(vport_num, addr, bridge);
1425 		if (IS_ERR(handle)) {
1426 			err = PTR_ERR(handle);
1427 			esw_warn(esw->dev, "Failed to create ingress filter(vport=%u,err=%d)\n",
1428 				 vport_num, err);
1429 			goto err_ingress_filter_flow_create;
1430 		}
1431 		entry->filter_handle = handle;
1432 	}
1433 
1434 	handle = mlx5_esw_bridge_egress_flow_create(vport_num, esw_owner_vhca_id, addr, vlan,
1435 						    bridge);
1436 	if (IS_ERR(handle)) {
1437 		err = PTR_ERR(handle);
1438 		esw_warn(esw->dev, "Failed to create egress flow(vport=%u,err=%d)\n",
1439 			 vport_num, err);
1440 		goto err_egress_flow_create;
1441 	}
1442 	entry->egress_handle = handle;
1443 
1444 	err = rhashtable_insert_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
1445 	if (err) {
1446 		esw_warn(esw->dev, "Failed to insert FDB flow(vport=%u,err=%d)\n", vport_num, err);
1447 		goto err_ht_init;
1448 	}
1449 
1450 	if (vlan)
1451 		list_add(&entry->vlan_list, &vlan->fdb_list);
1452 	else
1453 		INIT_LIST_HEAD(&entry->vlan_list);
1454 	list_add(&entry->list, &bridge->fdb_list);
1455 
1456 	trace_mlx5_esw_bridge_fdb_entry_init(entry);
1457 	return entry;
1458 
1459 err_ht_init:
1460 	mlx5_del_flow_rules(entry->egress_handle);
1461 err_egress_flow_create:
1462 	if (entry->filter_handle)
1463 		mlx5_del_flow_rules(entry->filter_handle);
1464 err_ingress_filter_flow_create:
1465 	mlx5_del_flow_rules(entry->ingress_handle);
1466 err_ingress_flow_create:
1467 	mlx5_fc_destroy(esw->dev, entry->ingress_counter);
1468 err_ingress_fc_create:
1469 	kvfree(entry);
1470 	return ERR_PTR(err);
1471 }
1472 
1473 int mlx5_esw_bridge_ageing_time_set(u16 vport_num, u16 esw_owner_vhca_id, unsigned long ageing_time,
1474 				    struct mlx5_esw_bridge_offloads *br_offloads)
1475 {
1476 	struct mlx5_esw_bridge_port *port;
1477 
1478 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1479 	if (!port)
1480 		return -EINVAL;
1481 
1482 	port->bridge->ageing_time = clock_t_to_jiffies(ageing_time);
1483 	return 0;
1484 }
1485 
1486 int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable,
1487 				       struct mlx5_esw_bridge_offloads *br_offloads)
1488 {
1489 	struct mlx5_esw_bridge_port *port;
1490 	struct mlx5_esw_bridge *bridge;
1491 	bool filtering;
1492 
1493 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1494 	if (!port)
1495 		return -EINVAL;
1496 
1497 	bridge = port->bridge;
1498 	filtering = bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1499 	if (filtering == enable)
1500 		return 0;
1501 
1502 	mlx5_esw_bridge_fdb_flush(bridge);
1503 	if (enable)
1504 		bridge->flags |= MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1505 	else
1506 		bridge->flags &= ~MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1507 
1508 	return 0;
1509 }
1510 
1511 int mlx5_esw_bridge_vlan_proto_set(u16 vport_num, u16 esw_owner_vhca_id, u16 proto,
1512 				   struct mlx5_esw_bridge_offloads *br_offloads)
1513 {
1514 	struct mlx5_esw_bridge_port *port;
1515 	struct mlx5_esw_bridge *bridge;
1516 
1517 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id,
1518 					   br_offloads);
1519 	if (!port)
1520 		return -EINVAL;
1521 
1522 	bridge = port->bridge;
1523 	if (bridge->vlan_proto == proto)
1524 		return 0;
1525 	if (proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
1526 		esw_warn(br_offloads->esw->dev, "Can't set unsupported VLAN protocol %x", proto);
1527 		return -EOPNOTSUPP;
1528 	}
1529 
1530 	mlx5_esw_bridge_fdb_flush(bridge);
1531 	bridge->vlan_proto = proto;
1532 	mlx5_esw_bridge_vlans_recreate(bridge);
1533 
1534 	return 0;
1535 }
1536 
1537 static int mlx5_esw_bridge_vport_init(u16 vport_num, u16 esw_owner_vhca_id, u16 flags,
1538 				      struct mlx5_esw_bridge_offloads *br_offloads,
1539 				      struct mlx5_esw_bridge *bridge)
1540 {
1541 	struct mlx5_eswitch *esw = br_offloads->esw;
1542 	struct mlx5_esw_bridge_port *port;
1543 	int err;
1544 
1545 	port = kvzalloc(sizeof(*port), GFP_KERNEL);
1546 	if (!port)
1547 		return -ENOMEM;
1548 
1549 	port->vport_num = vport_num;
1550 	port->esw_owner_vhca_id = esw_owner_vhca_id;
1551 	port->bridge = bridge;
1552 	port->flags |= flags;
1553 	xa_init(&port->vlans);
1554 	err = mlx5_esw_bridge_port_insert(port, br_offloads);
1555 	if (err) {
1556 		esw_warn(esw->dev,
1557 			 "Failed to insert port metadata (vport=%u,esw_owner_vhca_id=%u,err=%d)\n",
1558 			 port->vport_num, port->esw_owner_vhca_id, err);
1559 		goto err_port_insert;
1560 	}
1561 	trace_mlx5_esw_bridge_vport_init(port);
1562 
1563 	return 0;
1564 
1565 err_port_insert:
1566 	kvfree(port);
1567 	return err;
1568 }
1569 
1570 static int mlx5_esw_bridge_vport_cleanup(struct mlx5_esw_bridge_offloads *br_offloads,
1571 					 struct mlx5_esw_bridge_port *port)
1572 {
1573 	u16 vport_num = port->vport_num, esw_owner_vhca_id = port->esw_owner_vhca_id;
1574 	struct mlx5_esw_bridge *bridge = port->bridge;
1575 	struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1576 
1577 	list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
1578 		if (entry->vport_num == vport_num && entry->esw_owner_vhca_id == esw_owner_vhca_id)
1579 			mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
1580 
1581 	trace_mlx5_esw_bridge_vport_cleanup(port);
1582 	mlx5_esw_bridge_port_vlans_flush(port, bridge);
1583 	mlx5_esw_bridge_port_erase(port, br_offloads);
1584 	kvfree(port);
1585 	mlx5_esw_bridge_put(br_offloads, bridge);
1586 	return 0;
1587 }
1588 
1589 static int mlx5_esw_bridge_vport_link_with_flags(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1590 						 u16 flags,
1591 						 struct mlx5_esw_bridge_offloads *br_offloads,
1592 						 struct netlink_ext_ack *extack)
1593 {
1594 	struct mlx5_esw_bridge *bridge;
1595 	int err;
1596 
1597 	bridge = mlx5_esw_bridge_lookup(ifindex, br_offloads);
1598 	if (IS_ERR(bridge)) {
1599 		NL_SET_ERR_MSG_MOD(extack, "Error checking for existing bridge with same ifindex");
1600 		return PTR_ERR(bridge);
1601 	}
1602 
1603 	err = mlx5_esw_bridge_vport_init(vport_num, esw_owner_vhca_id, flags, br_offloads, bridge);
1604 	if (err) {
1605 		NL_SET_ERR_MSG_MOD(extack, "Error initializing port");
1606 		goto err_vport;
1607 	}
1608 	return 0;
1609 
1610 err_vport:
1611 	mlx5_esw_bridge_put(br_offloads, bridge);
1612 	return err;
1613 }
1614 
1615 int mlx5_esw_bridge_vport_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1616 			       struct mlx5_esw_bridge_offloads *br_offloads,
1617 			       struct netlink_ext_ack *extack)
1618 {
1619 	return mlx5_esw_bridge_vport_link_with_flags(ifindex, vport_num, esw_owner_vhca_id, 0,
1620 						     br_offloads, extack);
1621 }
1622 
1623 int mlx5_esw_bridge_vport_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1624 				 struct mlx5_esw_bridge_offloads *br_offloads,
1625 				 struct netlink_ext_ack *extack)
1626 {
1627 	struct mlx5_esw_bridge_port *port;
1628 	int err;
1629 
1630 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1631 	if (!port) {
1632 		NL_SET_ERR_MSG_MOD(extack, "Port is not attached to any bridge");
1633 		return -EINVAL;
1634 	}
1635 	if (port->bridge->ifindex != ifindex) {
1636 		NL_SET_ERR_MSG_MOD(extack, "Port is attached to another bridge");
1637 		return -EINVAL;
1638 	}
1639 
1640 	err = mlx5_esw_bridge_vport_cleanup(br_offloads, port);
1641 	if (err)
1642 		NL_SET_ERR_MSG_MOD(extack, "Port cleanup failed");
1643 	return err;
1644 }
1645 
1646 int mlx5_esw_bridge_vport_peer_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1647 				    struct mlx5_esw_bridge_offloads *br_offloads,
1648 				    struct netlink_ext_ack *extack)
1649 {
1650 	if (!MLX5_CAP_ESW(br_offloads->esw->dev, merged_eswitch))
1651 		return 0;
1652 
1653 	return mlx5_esw_bridge_vport_link_with_flags(ifindex, vport_num, esw_owner_vhca_id,
1654 						     MLX5_ESW_BRIDGE_PORT_FLAG_PEER,
1655 						     br_offloads, extack);
1656 }
1657 
1658 int mlx5_esw_bridge_vport_peer_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1659 				      struct mlx5_esw_bridge_offloads *br_offloads,
1660 				      struct netlink_ext_ack *extack)
1661 {
1662 	return mlx5_esw_bridge_vport_unlink(ifindex, vport_num, esw_owner_vhca_id, br_offloads,
1663 					    extack);
1664 }
1665 
1666 int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, u16 flags,
1667 				  struct mlx5_esw_bridge_offloads *br_offloads,
1668 				  struct netlink_ext_ack *extack)
1669 {
1670 	struct mlx5_esw_bridge_port *port;
1671 	struct mlx5_esw_bridge_vlan *vlan;
1672 
1673 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1674 	if (!port)
1675 		return -EINVAL;
1676 
1677 	vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1678 	if (vlan) {
1679 		if (vlan->flags == flags)
1680 			return 0;
1681 		mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge);
1682 	}
1683 
1684 	vlan = mlx5_esw_bridge_vlan_create(port->bridge->vlan_proto, vid, flags, port,
1685 					   br_offloads->esw);
1686 	if (IS_ERR(vlan)) {
1687 		NL_SET_ERR_MSG_MOD(extack, "Failed to create VLAN entry");
1688 		return PTR_ERR(vlan);
1689 	}
1690 	return 0;
1691 }
1692 
1693 void mlx5_esw_bridge_port_vlan_del(u16 vport_num, u16 esw_owner_vhca_id, u16 vid,
1694 				   struct mlx5_esw_bridge_offloads *br_offloads)
1695 {
1696 	struct mlx5_esw_bridge_port *port;
1697 	struct mlx5_esw_bridge_vlan *vlan;
1698 
1699 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1700 	if (!port)
1701 		return;
1702 
1703 	vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1704 	if (!vlan)
1705 		return;
1706 	mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge);
1707 }
1708 
1709 void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1710 				     struct mlx5_esw_bridge_offloads *br_offloads,
1711 				     struct switchdev_notifier_fdb_info *fdb_info)
1712 {
1713 	struct mlx5_esw_bridge_fdb_entry *entry;
1714 	struct mlx5_esw_bridge_port *port;
1715 	struct mlx5_esw_bridge *bridge;
1716 
1717 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1718 	if (!port || port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER)
1719 		return;
1720 
1721 	bridge = port->bridge;
1722 	entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
1723 	if (!entry) {
1724 		esw_debug(br_offloads->esw->dev,
1725 			  "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
1726 			  fdb_info->addr, fdb_info->vid, vport_num);
1727 		return;
1728 	}
1729 
1730 	entry->lastuse = jiffies;
1731 }
1732 
1733 void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1734 				struct mlx5_esw_bridge_offloads *br_offloads,
1735 				struct switchdev_notifier_fdb_info *fdb_info)
1736 {
1737 	struct mlx5_esw_bridge_fdb_entry *entry;
1738 	struct mlx5_esw_bridge_port *port;
1739 	struct mlx5_esw_bridge *bridge;
1740 
1741 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1742 	if (!port)
1743 		return;
1744 
1745 	bridge = port->bridge;
1746 	entry = mlx5_esw_bridge_fdb_entry_init(dev, vport_num, esw_owner_vhca_id, fdb_info->addr,
1747 					       fdb_info->vid, fdb_info->added_by_user,
1748 					       port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER,
1749 					       br_offloads->esw, bridge);
1750 	if (IS_ERR(entry))
1751 		return;
1752 
1753 	if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
1754 		mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
1755 						   SWITCHDEV_FDB_OFFLOADED);
1756 	else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER))
1757 		/* Take over dynamic entries to prevent kernel bridge from aging them out. */
1758 		mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
1759 						   SWITCHDEV_FDB_ADD_TO_BRIDGE);
1760 }
1761 
1762 void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1763 				struct mlx5_esw_bridge_offloads *br_offloads,
1764 				struct switchdev_notifier_fdb_info *fdb_info)
1765 {
1766 	struct mlx5_eswitch *esw = br_offloads->esw;
1767 	struct mlx5_esw_bridge_fdb_entry *entry;
1768 	struct mlx5_esw_bridge_port *port;
1769 	struct mlx5_esw_bridge *bridge;
1770 
1771 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1772 	if (!port)
1773 		return;
1774 
1775 	bridge = port->bridge;
1776 	entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
1777 	if (!entry) {
1778 		esw_warn(esw->dev,
1779 			 "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
1780 			 fdb_info->addr, fdb_info->vid, vport_num);
1781 		return;
1782 	}
1783 
1784 	mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1785 }
1786 
1787 void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
1788 {
1789 	struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1790 	struct mlx5_esw_bridge *bridge;
1791 
1792 	list_for_each_entry(bridge, &br_offloads->bridges, list) {
1793 		list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
1794 			unsigned long lastuse =
1795 				(unsigned long)mlx5_fc_query_lastuse(entry->ingress_counter);
1796 
1797 			if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
1798 				continue;
1799 
1800 			if (time_after(lastuse, entry->lastuse))
1801 				mlx5_esw_bridge_fdb_entry_refresh(entry);
1802 			else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER) &&
1803 				 time_is_before_jiffies(entry->lastuse + bridge->ageing_time))
1804 				mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1805 		}
1806 	}
1807 }
1808 
1809 static void mlx5_esw_bridge_flush(struct mlx5_esw_bridge_offloads *br_offloads)
1810 {
1811 	struct mlx5_esw_bridge_port *port;
1812 	unsigned long i;
1813 
1814 	xa_for_each(&br_offloads->ports, i, port)
1815 		mlx5_esw_bridge_vport_cleanup(br_offloads, port);
1816 
1817 	WARN_ONCE(!list_empty(&br_offloads->bridges),
1818 		  "Cleaning up bridge offloads while still having bridges attached\n");
1819 }
1820 
1821 struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
1822 {
1823 	struct mlx5_esw_bridge_offloads *br_offloads;
1824 
1825 	ASSERT_RTNL();
1826 
1827 	br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL);
1828 	if (!br_offloads)
1829 		return ERR_PTR(-ENOMEM);
1830 
1831 	INIT_LIST_HEAD(&br_offloads->bridges);
1832 	xa_init(&br_offloads->ports);
1833 	br_offloads->esw = esw;
1834 	esw->br_offloads = br_offloads;
1835 
1836 	return br_offloads;
1837 }
1838 
1839 void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
1840 {
1841 	struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads;
1842 
1843 	ASSERT_RTNL();
1844 
1845 	if (!br_offloads)
1846 		return;
1847 
1848 	mlx5_esw_bridge_flush(br_offloads);
1849 	WARN_ON(!xa_empty(&br_offloads->ports));
1850 
1851 	esw->br_offloads = NULL;
1852 	kvfree(br_offloads);
1853 }
1854