1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include "lib/devcom.h"
5 #include "bridge.h"
6 #include "eswitch.h"
7 #include "bridge_priv.h"
8 #include "diag/bridge_tracepoint.h"
9 
10 static const struct rhashtable_params mdb_ht_params = {
11 	.key_offset = offsetof(struct mlx5_esw_bridge_mdb_entry, key),
12 	.key_len = sizeof(struct mlx5_esw_bridge_mdb_key),
13 	.head_offset = offsetof(struct mlx5_esw_bridge_mdb_entry, ht_node),
14 	.automatic_shrinking = true,
15 };
16 
17 int mlx5_esw_bridge_mdb_init(struct mlx5_esw_bridge *bridge)
18 {
19 	INIT_LIST_HEAD(&bridge->mdb_list);
20 	return rhashtable_init(&bridge->mdb_ht, &mdb_ht_params);
21 }
22 
23 void mlx5_esw_bridge_mdb_cleanup(struct mlx5_esw_bridge *bridge)
24 {
25 	rhashtable_destroy(&bridge->mdb_ht);
26 }
27 
28 static struct mlx5_esw_bridge_port *
29 mlx5_esw_bridge_mdb_port_lookup(struct mlx5_esw_bridge_port *port,
30 				struct mlx5_esw_bridge_mdb_entry *entry)
31 {
32 	return xa_load(&entry->ports, mlx5_esw_bridge_port_key(port));
33 }
34 
35 static int mlx5_esw_bridge_mdb_port_insert(struct mlx5_esw_bridge_port *port,
36 					   struct mlx5_esw_bridge_mdb_entry *entry)
37 {
38 	int err = xa_insert(&entry->ports, mlx5_esw_bridge_port_key(port), port, GFP_KERNEL);
39 
40 	if (!err)
41 		entry->num_ports++;
42 	return err;
43 }
44 
45 static void mlx5_esw_bridge_mdb_port_remove(struct mlx5_esw_bridge_port *port,
46 					    struct mlx5_esw_bridge_mdb_entry *entry)
47 {
48 	xa_erase(&entry->ports, mlx5_esw_bridge_port_key(port));
49 	entry->num_ports--;
50 }
51 
52 static struct mlx5_flow_handle *
53 mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_mdb_entry *entry,
54 				struct mlx5_esw_bridge *bridge)
55 {
56 	struct mlx5_flow_act flow_act = {
57 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
58 		.flags = FLOW_ACT_NO_APPEND | FLOW_ACT_IGNORE_FLOW_LEVEL,
59 	};
60 	int num_dests = entry->num_ports, i = 0;
61 	struct mlx5_flow_destination *dests;
62 	struct mlx5_esw_bridge_port *port;
63 	struct mlx5_flow_spec *rule_spec;
64 	struct mlx5_flow_handle *handle;
65 	u8 *dmac_v, *dmac_c;
66 	unsigned long idx;
67 
68 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
69 	if (!rule_spec)
70 		return ERR_PTR(-ENOMEM);
71 
72 	dests = kvcalloc(num_dests, sizeof(*dests), GFP_KERNEL);
73 	if (!dests) {
74 		kvfree(rule_spec);
75 		return ERR_PTR(-ENOMEM);
76 	}
77 
78 	xa_for_each(&entry->ports, idx, port) {
79 		dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
80 		dests[i].ft = port->mcast.ft;
81 		if (port->vport_num == MLX5_VPORT_UPLINK)
82 			dests[i].ft->flags |= MLX5_FLOW_TABLE_UPLINK_VPORT;
83 		i++;
84 	}
85 
86 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
87 	dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16);
88 	ether_addr_copy(dmac_v, entry->key.addr);
89 	dmac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria, outer_headers.dmac_47_16);
90 	eth_broadcast_addr(dmac_c);
91 
92 	if (entry->key.vid) {
93 		if (bridge->vlan_proto == ETH_P_8021Q) {
94 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
95 					 outer_headers.cvlan_tag);
96 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
97 					 outer_headers.cvlan_tag);
98 		} else if (bridge->vlan_proto == ETH_P_8021AD) {
99 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
100 					 outer_headers.svlan_tag);
101 			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
102 					 outer_headers.svlan_tag);
103 		}
104 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
105 				 outer_headers.first_vid);
106 		MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
107 			 entry->key.vid);
108 	}
109 
110 	handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, dests, num_dests);
111 
112 	kvfree(dests);
113 	kvfree(rule_spec);
114 	return handle;
115 }
116 
117 static int
118 mlx5_esw_bridge_port_mdb_offload(struct mlx5_esw_bridge_port *port,
119 				 struct mlx5_esw_bridge_mdb_entry *entry)
120 {
121 	struct mlx5_flow_handle *handle;
122 
123 	handle = mlx5_esw_bridge_mdb_flow_create(port->esw_owner_vhca_id, entry, port->bridge);
124 	if (entry->egress_handle) {
125 		mlx5_del_flow_rules(entry->egress_handle);
126 		entry->egress_handle = NULL;
127 	}
128 	if (IS_ERR(handle))
129 		return PTR_ERR(handle);
130 
131 	entry->egress_handle = handle;
132 	return 0;
133 }
134 
135 static struct mlx5_esw_bridge_mdb_entry *
136 mlx5_esw_bridge_mdb_lookup(struct mlx5_esw_bridge *bridge,
137 			   const unsigned char *addr, u16 vid)
138 {
139 	struct mlx5_esw_bridge_mdb_key key = {};
140 
141 	ether_addr_copy(key.addr, addr);
142 	key.vid = vid;
143 	return rhashtable_lookup_fast(&bridge->mdb_ht, &key, mdb_ht_params);
144 }
145 
146 static struct mlx5_esw_bridge_mdb_entry *
147 mlx5_esw_bridge_port_mdb_entry_init(struct mlx5_esw_bridge_port *port,
148 				    const unsigned char *addr, u16 vid)
149 {
150 	struct mlx5_esw_bridge *bridge = port->bridge;
151 	struct mlx5_esw_bridge_mdb_entry *entry;
152 	int err;
153 
154 	entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
155 	if (!entry)
156 		return ERR_PTR(-ENOMEM);
157 
158 	ether_addr_copy(entry->key.addr, addr);
159 	entry->key.vid = vid;
160 	xa_init(&entry->ports);
161 	err = rhashtable_insert_fast(&bridge->mdb_ht, &entry->ht_node, mdb_ht_params);
162 	if (err)
163 		goto err_ht_insert;
164 
165 	list_add(&entry->list, &bridge->mdb_list);
166 
167 	return entry;
168 
169 err_ht_insert:
170 	xa_destroy(&entry->ports);
171 	kvfree(entry);
172 	return ERR_PTR(err);
173 }
174 
175 static void mlx5_esw_bridge_port_mdb_entry_cleanup(struct mlx5_esw_bridge *bridge,
176 						   struct mlx5_esw_bridge_mdb_entry *entry)
177 {
178 	if (entry->egress_handle)
179 		mlx5_del_flow_rules(entry->egress_handle);
180 	list_del(&entry->list);
181 	rhashtable_remove_fast(&bridge->mdb_ht, &entry->ht_node, mdb_ht_params);
182 	xa_destroy(&entry->ports);
183 	kvfree(entry);
184 }
185 
186 int mlx5_esw_bridge_port_mdb_attach(struct net_device *dev, struct mlx5_esw_bridge_port *port,
187 				    const unsigned char *addr, u16 vid)
188 {
189 	struct mlx5_esw_bridge *bridge = port->bridge;
190 	struct mlx5_esw_bridge_mdb_entry *entry;
191 	int err;
192 
193 	if (!(bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
194 		return -EOPNOTSUPP;
195 
196 	entry = mlx5_esw_bridge_mdb_lookup(bridge, addr, vid);
197 	if (entry) {
198 		if (mlx5_esw_bridge_mdb_port_lookup(port, entry)) {
199 			esw_warn(bridge->br_offloads->esw->dev, "MDB attach entry is already attached to port (MAC=%pM,vid=%u,vport=%u)\n",
200 				 addr, vid, port->vport_num);
201 			return 0;
202 		}
203 	} else {
204 		entry = mlx5_esw_bridge_port_mdb_entry_init(port, addr, vid);
205 		if (IS_ERR(entry)) {
206 			err = PTR_ERR(entry);
207 			esw_warn(bridge->br_offloads->esw->dev, "MDB attach failed to init entry (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
208 				 addr, vid, port->vport_num, err);
209 			return err;
210 		}
211 	}
212 
213 	err = mlx5_esw_bridge_mdb_port_insert(port, entry);
214 	if (err) {
215 		if (!entry->num_ports)
216 			mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry); /* new mdb entry */
217 		esw_warn(bridge->br_offloads->esw->dev,
218 			 "MDB attach failed to insert port (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
219 			 addr, vid, port->vport_num, err);
220 		return err;
221 	}
222 
223 	err = mlx5_esw_bridge_port_mdb_offload(port, entry);
224 	if (err)
225 		/* Single mdb can be used by multiple ports, so just log the
226 		 * error and continue.
227 		 */
228 		esw_warn(bridge->br_offloads->esw->dev, "MDB attach failed to offload (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
229 			 addr, vid, port->vport_num, err);
230 
231 	trace_mlx5_esw_bridge_port_mdb_attach(dev, entry);
232 	return 0;
233 }
234 
235 static void mlx5_esw_bridge_port_mdb_entry_detach(struct mlx5_esw_bridge_port *port,
236 						  struct mlx5_esw_bridge_mdb_entry *entry)
237 {
238 	struct mlx5_esw_bridge *bridge = port->bridge;
239 	int err;
240 
241 	mlx5_esw_bridge_mdb_port_remove(port, entry);
242 	if (!entry->num_ports) {
243 		mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry);
244 		return;
245 	}
246 
247 	err = mlx5_esw_bridge_port_mdb_offload(port, entry);
248 	if (err)
249 		/* Single mdb can be used by multiple ports, so just log the
250 		 * error and continue.
251 		 */
252 		esw_warn(bridge->br_offloads->esw->dev, "MDB detach failed to offload (MAC=%pM,vid=%u,vport=%u)\n",
253 			 entry->key.addr, entry->key.vid, port->vport_num);
254 }
255 
256 void mlx5_esw_bridge_port_mdb_detach(struct net_device *dev, struct mlx5_esw_bridge_port *port,
257 				     const unsigned char *addr, u16 vid)
258 {
259 	struct mlx5_esw_bridge *bridge = port->bridge;
260 	struct mlx5_esw_bridge_mdb_entry *entry;
261 
262 	entry = mlx5_esw_bridge_mdb_lookup(bridge, addr, vid);
263 	if (!entry) {
264 		esw_debug(bridge->br_offloads->esw->dev,
265 			  "MDB detach entry not found (MAC=%pM,vid=%u,vport=%u)\n",
266 			  addr, vid, port->vport_num);
267 		return;
268 	}
269 
270 	if (!mlx5_esw_bridge_mdb_port_lookup(port, entry)) {
271 		esw_debug(bridge->br_offloads->esw->dev,
272 			  "MDB detach entry not attached to the port (MAC=%pM,vid=%u,vport=%u)\n",
273 			  addr, vid, port->vport_num);
274 		return;
275 	}
276 
277 	trace_mlx5_esw_bridge_port_mdb_detach(dev, entry);
278 	mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
279 }
280 
281 void mlx5_esw_bridge_port_mdb_vlan_flush(struct mlx5_esw_bridge_port *port,
282 					 struct mlx5_esw_bridge_vlan *vlan)
283 {
284 	struct mlx5_esw_bridge *bridge = port->bridge;
285 	struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
286 
287 	list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
288 		if (entry->key.vid == vlan->vid && mlx5_esw_bridge_mdb_port_lookup(port, entry))
289 			mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
290 }
291 
292 static void mlx5_esw_bridge_port_mdb_flush(struct mlx5_esw_bridge_port *port)
293 {
294 	struct mlx5_esw_bridge *bridge = port->bridge;
295 	struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
296 
297 	list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
298 		if (mlx5_esw_bridge_mdb_port_lookup(port, entry))
299 			mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
300 }
301 
302 void mlx5_esw_bridge_mdb_flush(struct mlx5_esw_bridge *bridge)
303 {
304 	struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
305 
306 	list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
307 		mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry);
308 }
309 static int mlx5_esw_bridge_port_mcast_fts_init(struct mlx5_esw_bridge_port *port,
310 					       struct mlx5_esw_bridge *bridge)
311 {
312 	struct mlx5_eswitch *esw = bridge->br_offloads->esw;
313 	struct mlx5_flow_table *mcast_ft;
314 
315 	mcast_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_MCAST_TABLE_SIZE,
316 						MLX5_ESW_BRIDGE_LEVEL_MCAST_TABLE,
317 						esw);
318 	if (IS_ERR(mcast_ft))
319 		return PTR_ERR(mcast_ft);
320 
321 	port->mcast.ft = mcast_ft;
322 	return 0;
323 }
324 
325 static void mlx5_esw_bridge_port_mcast_fts_cleanup(struct mlx5_esw_bridge_port *port)
326 {
327 	if (port->mcast.ft)
328 		mlx5_destroy_flow_table(port->mcast.ft);
329 	port->mcast.ft = NULL;
330 }
331 
332 static struct mlx5_flow_group *
333 mlx5_esw_bridge_mcast_filter_fg_create(struct mlx5_eswitch *esw,
334 				       struct mlx5_flow_table *mcast_ft)
335 {
336 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
337 	struct mlx5_flow_group *fg;
338 	u32 *in, *match;
339 
340 	in = kvzalloc(inlen, GFP_KERNEL);
341 	if (!in)
342 		return ERR_PTR(-ENOMEM);
343 
344 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
345 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
346 
347 	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
348 		 mlx5_eswitch_get_vport_metadata_mask());
349 
350 	MLX5_SET(create_flow_group_in, in, start_flow_index,
351 		 MLX5_ESW_BRIDGE_MCAST_TABLE_FILTER_GRP_IDX_FROM);
352 	MLX5_SET(create_flow_group_in, in, end_flow_index,
353 		 MLX5_ESW_BRIDGE_MCAST_TABLE_FILTER_GRP_IDX_TO);
354 
355 	fg = mlx5_create_flow_group(mcast_ft, in);
356 	kvfree(in);
357 	if (IS_ERR(fg))
358 		esw_warn(esw->dev,
359 			 "Failed to create filter flow group for bridge mcast table (err=%pe)\n",
360 			 fg);
361 
362 	return fg;
363 }
364 
365 static struct mlx5_flow_group *
366 mlx5_esw_bridge_mcast_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
367 					   struct mlx5_eswitch *esw,
368 					   struct mlx5_flow_table *mcast_ft)
369 {
370 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
371 	struct mlx5_flow_group *fg;
372 	u32 *in, *match;
373 
374 	in = kvzalloc(inlen, GFP_KERNEL);
375 	if (!in)
376 		return ERR_PTR(-ENOMEM);
377 
378 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
379 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
380 
381 	if (vlan_proto == ETH_P_8021Q)
382 		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
383 	else if (vlan_proto == ETH_P_8021AD)
384 		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
385 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
386 
387 	MLX5_SET(create_flow_group_in, in, start_flow_index, from);
388 	MLX5_SET(create_flow_group_in, in, end_flow_index, to);
389 
390 	fg = mlx5_create_flow_group(mcast_ft, in);
391 	kvfree(in);
392 	if (IS_ERR(fg))
393 		esw_warn(esw->dev,
394 			 "Failed to create VLAN(proto=%x) flow group for bridge mcast table (err=%pe)\n",
395 			 vlan_proto, fg);
396 
397 	return fg;
398 }
399 
400 static struct mlx5_flow_group *
401 mlx5_esw_bridge_mcast_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *mcast_ft)
402 {
403 	unsigned int from = MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_IDX_FROM;
404 	unsigned int to = MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_IDX_TO;
405 
406 	return mlx5_esw_bridge_mcast_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, mcast_ft);
407 }
408 
409 static struct mlx5_flow_group *
410 mlx5_esw_bridge_mcast_qinq_fg_create(struct mlx5_eswitch *esw,
411 				     struct mlx5_flow_table *mcast_ft)
412 {
413 	unsigned int from = MLX5_ESW_BRIDGE_MCAST_TABLE_QINQ_GRP_IDX_FROM;
414 	unsigned int to = MLX5_ESW_BRIDGE_MCAST_TABLE_QINQ_GRP_IDX_TO;
415 
416 	return mlx5_esw_bridge_mcast_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw, mcast_ft);
417 }
418 
419 static struct mlx5_flow_group *
420 mlx5_esw_bridge_mcast_fwd_fg_create(struct mlx5_eswitch *esw,
421 				    struct mlx5_flow_table *mcast_ft)
422 {
423 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
424 	struct mlx5_flow_group *fg;
425 	u32 *in;
426 
427 	in = kvzalloc(inlen, GFP_KERNEL);
428 	if (!in)
429 		return ERR_PTR(-ENOMEM);
430 
431 	MLX5_SET(create_flow_group_in, in, start_flow_index,
432 		 MLX5_ESW_BRIDGE_MCAST_TABLE_FWD_GRP_IDX_FROM);
433 	MLX5_SET(create_flow_group_in, in, end_flow_index,
434 		 MLX5_ESW_BRIDGE_MCAST_TABLE_FWD_GRP_IDX_TO);
435 
436 	fg = mlx5_create_flow_group(mcast_ft, in);
437 	kvfree(in);
438 	if (IS_ERR(fg))
439 		esw_warn(esw->dev,
440 			 "Failed to create forward flow group for bridge mcast table (err=%pe)\n",
441 			 fg);
442 
443 	return fg;
444 }
445 
446 static int mlx5_esw_bridge_port_mcast_fgs_init(struct mlx5_esw_bridge_port *port)
447 {
448 	struct mlx5_flow_group *fwd_fg, *qinq_fg, *vlan_fg, *filter_fg;
449 	struct mlx5_eswitch *esw = port->bridge->br_offloads->esw;
450 	struct mlx5_flow_table *mcast_ft = port->mcast.ft;
451 	int err;
452 
453 	filter_fg = mlx5_esw_bridge_mcast_filter_fg_create(esw, mcast_ft);
454 	if (IS_ERR(filter_fg))
455 		return PTR_ERR(filter_fg);
456 
457 	vlan_fg = mlx5_esw_bridge_mcast_vlan_fg_create(esw, mcast_ft);
458 	if (IS_ERR(vlan_fg)) {
459 		err = PTR_ERR(vlan_fg);
460 		goto err_vlan_fg;
461 	}
462 
463 	qinq_fg = mlx5_esw_bridge_mcast_qinq_fg_create(esw, mcast_ft);
464 	if (IS_ERR(qinq_fg)) {
465 		err = PTR_ERR(qinq_fg);
466 		goto err_qinq_fg;
467 	}
468 
469 	fwd_fg = mlx5_esw_bridge_mcast_fwd_fg_create(esw, mcast_ft);
470 	if (IS_ERR(fwd_fg)) {
471 		err = PTR_ERR(fwd_fg);
472 		goto err_fwd_fg;
473 	}
474 
475 	port->mcast.filter_fg = filter_fg;
476 	port->mcast.vlan_fg = vlan_fg;
477 	port->mcast.qinq_fg = qinq_fg;
478 	port->mcast.fwd_fg = fwd_fg;
479 
480 	return 0;
481 
482 err_fwd_fg:
483 	mlx5_destroy_flow_group(qinq_fg);
484 err_qinq_fg:
485 	mlx5_destroy_flow_group(vlan_fg);
486 err_vlan_fg:
487 	mlx5_destroy_flow_group(filter_fg);
488 	return err;
489 }
490 
491 static void mlx5_esw_bridge_port_mcast_fgs_cleanup(struct mlx5_esw_bridge_port *port)
492 {
493 	if (port->mcast.fwd_fg)
494 		mlx5_destroy_flow_group(port->mcast.fwd_fg);
495 	port->mcast.fwd_fg = NULL;
496 	if (port->mcast.qinq_fg)
497 		mlx5_destroy_flow_group(port->mcast.qinq_fg);
498 	port->mcast.qinq_fg = NULL;
499 	if (port->mcast.vlan_fg)
500 		mlx5_destroy_flow_group(port->mcast.vlan_fg);
501 	port->mcast.vlan_fg = NULL;
502 	if (port->mcast.filter_fg)
503 		mlx5_destroy_flow_group(port->mcast.filter_fg);
504 	port->mcast.filter_fg = NULL;
505 }
506 
507 static struct mlx5_flow_handle *
508 mlx5_esw_bridge_mcast_flow_with_esw_create(struct mlx5_esw_bridge_port *port,
509 					   struct mlx5_eswitch *esw)
510 {
511 	struct mlx5_flow_act flow_act = {
512 		.action = MLX5_FLOW_CONTEXT_ACTION_DROP,
513 		.flags = FLOW_ACT_NO_APPEND,
514 	};
515 	struct mlx5_flow_spec *rule_spec;
516 	struct mlx5_flow_handle *handle;
517 
518 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
519 	if (!rule_spec)
520 		return ERR_PTR(-ENOMEM);
521 
522 	rule_spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
523 
524 	MLX5_SET(fte_match_param, rule_spec->match_criteria,
525 		 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
526 	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
527 		 mlx5_eswitch_get_vport_metadata_for_match(esw, port->vport_num));
528 
529 	handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, NULL, 0);
530 
531 	kvfree(rule_spec);
532 	return handle;
533 }
534 
535 static struct mlx5_flow_handle *
536 mlx5_esw_bridge_mcast_filter_flow_create(struct mlx5_esw_bridge_port *port)
537 {
538 	return mlx5_esw_bridge_mcast_flow_with_esw_create(port, port->bridge->br_offloads->esw);
539 }
540 
541 static struct mlx5_flow_handle *
542 mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port)
543 {
544 	struct mlx5_devcom_comp_dev *devcom = port->bridge->br_offloads->esw->devcom, *pos;
545 	struct mlx5_eswitch *tmp, *peer_esw = NULL;
546 	static struct mlx5_flow_handle *handle;
547 
548 	if (!mlx5_devcom_for_each_peer_begin(devcom))
549 		return ERR_PTR(-ENODEV);
550 
551 	mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) {
552 		if (mlx5_esw_is_owner(tmp, port->vport_num, port->esw_owner_vhca_id)) {
553 			peer_esw = tmp;
554 			break;
555 		}
556 	}
557 
558 	if (!peer_esw) {
559 		handle = ERR_PTR(-ENODEV);
560 		goto out;
561 	}
562 
563 	handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw);
564 
565 out:
566 	mlx5_devcom_for_each_peer_end(devcom);
567 	return handle;
568 }
569 
570 static struct mlx5_flow_handle *
571 mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_port *port,
572 				       struct mlx5_esw_bridge_vlan *vlan)
573 {
574 	struct mlx5_flow_act flow_act = {
575 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
576 		.flags = FLOW_ACT_NO_APPEND,
577 	};
578 	struct mlx5_flow_destination dest = {
579 		.type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
580 		.vport.num = port->vport_num,
581 	};
582 	struct mlx5_esw_bridge *bridge = port->bridge;
583 	struct mlx5_flow_spec *rule_spec;
584 	struct mlx5_flow_handle *handle;
585 
586 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
587 	if (!rule_spec)
588 		return ERR_PTR(-ENOMEM);
589 
590 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
591 
592 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
593 	flow_act.pkt_reformat = vlan->pkt_reformat_pop;
594 
595 	if (vlan_proto == ETH_P_8021Q) {
596 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
597 				 outer_headers.cvlan_tag);
598 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
599 				 outer_headers.cvlan_tag);
600 	} else if (vlan_proto == ETH_P_8021AD) {
601 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
602 				 outer_headers.svlan_tag);
603 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
604 				 outer_headers.svlan_tag);
605 	}
606 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.first_vid);
607 	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid, vlan->vid);
608 
609 	if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
610 		dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
611 		dest.vport.vhca_id = port->esw_owner_vhca_id;
612 	}
613 	handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
614 
615 	kvfree(rule_spec);
616 	return handle;
617 }
618 
619 int mlx5_esw_bridge_vlan_mcast_init(u16 vlan_proto, struct mlx5_esw_bridge_port *port,
620 				    struct mlx5_esw_bridge_vlan *vlan)
621 {
622 	struct mlx5_flow_handle *handle;
623 
624 	if (!(port->bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
625 		return 0;
626 
627 	handle = mlx5_esw_bridge_mcast_vlan_flow_create(vlan_proto, port, vlan);
628 	if (IS_ERR(handle))
629 		return PTR_ERR(handle);
630 
631 	vlan->mcast_handle = handle;
632 	return 0;
633 }
634 
635 void mlx5_esw_bridge_vlan_mcast_cleanup(struct mlx5_esw_bridge_vlan *vlan)
636 {
637 	if (vlan->mcast_handle)
638 		mlx5_del_flow_rules(vlan->mcast_handle);
639 	vlan->mcast_handle = NULL;
640 }
641 
642 static struct mlx5_flow_handle *
643 mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
644 {
645 	struct mlx5_flow_act flow_act = {
646 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
647 		.flags = FLOW_ACT_NO_APPEND,
648 	};
649 	struct mlx5_flow_destination dest = {
650 		.type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
651 		.vport.num = port->vport_num,
652 	};
653 	struct mlx5_esw_bridge *bridge = port->bridge;
654 	struct mlx5_flow_spec *rule_spec;
655 	struct mlx5_flow_handle *handle;
656 
657 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
658 	if (!rule_spec)
659 		return ERR_PTR(-ENOMEM);
660 
661 	if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
662 		dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
663 		dest.vport.vhca_id = port->esw_owner_vhca_id;
664 	}
665 	handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
666 
667 	kvfree(rule_spec);
668 	return handle;
669 }
670 
671 static int mlx5_esw_bridge_port_mcast_fhs_init(struct mlx5_esw_bridge_port *port)
672 {
673 	struct mlx5_flow_handle *filter_handle, *fwd_handle;
674 	struct mlx5_esw_bridge_vlan *vlan, *failed;
675 	unsigned long index;
676 	int err;
677 
678 
679 	filter_handle = (port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER) ?
680 		mlx5_esw_bridge_mcast_filter_flow_peer_create(port) :
681 		mlx5_esw_bridge_mcast_filter_flow_create(port);
682 	if (IS_ERR(filter_handle))
683 		return PTR_ERR(filter_handle);
684 
685 	fwd_handle = mlx5_esw_bridge_mcast_fwd_flow_create(port);
686 	if (IS_ERR(fwd_handle)) {
687 		err = PTR_ERR(fwd_handle);
688 		goto err_fwd;
689 	}
690 
691 	xa_for_each(&port->vlans, index, vlan) {
692 		err = mlx5_esw_bridge_vlan_mcast_init(port->bridge->vlan_proto, port, vlan);
693 		if (err) {
694 			failed = vlan;
695 			goto err_vlan;
696 		}
697 	}
698 
699 	port->mcast.filter_handle = filter_handle;
700 	port->mcast.fwd_handle = fwd_handle;
701 
702 	return 0;
703 
704 err_vlan:
705 	xa_for_each(&port->vlans, index, vlan) {
706 		if (vlan == failed)
707 			break;
708 
709 		mlx5_esw_bridge_vlan_mcast_cleanup(vlan);
710 	}
711 	mlx5_del_flow_rules(fwd_handle);
712 err_fwd:
713 	mlx5_del_flow_rules(filter_handle);
714 	return err;
715 }
716 
717 static void mlx5_esw_bridge_port_mcast_fhs_cleanup(struct mlx5_esw_bridge_port *port)
718 {
719 	struct mlx5_esw_bridge_vlan *vlan;
720 	unsigned long index;
721 
722 	xa_for_each(&port->vlans, index, vlan)
723 		mlx5_esw_bridge_vlan_mcast_cleanup(vlan);
724 
725 	if (port->mcast.fwd_handle)
726 		mlx5_del_flow_rules(port->mcast.fwd_handle);
727 	port->mcast.fwd_handle = NULL;
728 	if (port->mcast.filter_handle)
729 		mlx5_del_flow_rules(port->mcast.filter_handle);
730 	port->mcast.filter_handle = NULL;
731 }
732 
733 int mlx5_esw_bridge_port_mcast_init(struct mlx5_esw_bridge_port *port)
734 {
735 	struct mlx5_esw_bridge *bridge = port->bridge;
736 	int err;
737 
738 	if (!(bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
739 		return 0;
740 
741 	err = mlx5_esw_bridge_port_mcast_fts_init(port, bridge);
742 	if (err)
743 		return err;
744 
745 	err = mlx5_esw_bridge_port_mcast_fgs_init(port);
746 	if (err)
747 		goto err_fgs;
748 
749 	err = mlx5_esw_bridge_port_mcast_fhs_init(port);
750 	if (err)
751 		goto err_fhs;
752 	return err;
753 
754 err_fhs:
755 	mlx5_esw_bridge_port_mcast_fgs_cleanup(port);
756 err_fgs:
757 	mlx5_esw_bridge_port_mcast_fts_cleanup(port);
758 	return err;
759 }
760 
761 void mlx5_esw_bridge_port_mcast_cleanup(struct mlx5_esw_bridge_port *port)
762 {
763 	mlx5_esw_bridge_port_mdb_flush(port);
764 	mlx5_esw_bridge_port_mcast_fhs_cleanup(port);
765 	mlx5_esw_bridge_port_mcast_fgs_cleanup(port);
766 	mlx5_esw_bridge_port_mcast_fts_cleanup(port);
767 }
768 
769 static struct mlx5_flow_group *
770 mlx5_esw_bridge_ingress_igmp_fg_create(struct mlx5_eswitch *esw,
771 				       struct mlx5_flow_table *ingress_ft)
772 {
773 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
774 	struct mlx5_flow_group *fg;
775 	u32 *in, *match;
776 
777 	in = kvzalloc(inlen, GFP_KERNEL);
778 	if (!in)
779 		return ERR_PTR(-ENOMEM);
780 
781 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
782 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
783 
784 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_version);
785 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_protocol);
786 
787 	MLX5_SET(create_flow_group_in, in, start_flow_index,
788 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_IGMP_GRP_IDX_FROM);
789 	MLX5_SET(create_flow_group_in, in, end_flow_index,
790 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_IGMP_GRP_IDX_TO);
791 
792 	fg = mlx5_create_flow_group(ingress_ft, in);
793 	kvfree(in);
794 	if (IS_ERR(fg))
795 		esw_warn(esw->dev,
796 			 "Failed to create IGMP flow group for bridge ingress table (err=%pe)\n",
797 			 fg);
798 
799 	return fg;
800 }
801 
802 static struct mlx5_flow_group *
803 mlx5_esw_bridge_ingress_mld_fg_create(struct mlx5_eswitch *esw,
804 				      struct mlx5_flow_table *ingress_ft)
805 {
806 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
807 	struct mlx5_flow_group *fg;
808 	u32 *in, *match;
809 
810 	if (!(MLX5_CAP_GEN(esw->dev, flex_parser_protocols) & MLX5_FLEX_PROTO_ICMPV6)) {
811 		esw_warn(esw->dev,
812 			 "Can't create MLD flow group due to missing hardware ICMPv6 parsing support\n");
813 		return NULL;
814 	}
815 
816 	in = kvzalloc(inlen, GFP_KERNEL);
817 	if (!in)
818 		return ERR_PTR(-ENOMEM);
819 
820 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
821 		 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_3);
822 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
823 
824 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_version);
825 	MLX5_SET_TO_ONES(fte_match_param, match, misc_parameters_3.icmpv6_type);
826 
827 	MLX5_SET(create_flow_group_in, in, start_flow_index,
828 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_IDX_FROM);
829 	MLX5_SET(create_flow_group_in, in, end_flow_index,
830 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_IDX_TO);
831 
832 	fg = mlx5_create_flow_group(ingress_ft, in);
833 	kvfree(in);
834 	if (IS_ERR(fg))
835 		esw_warn(esw->dev,
836 			 "Failed to create MLD flow group for bridge ingress table (err=%pe)\n",
837 			 fg);
838 
839 	return fg;
840 }
841 
842 static int
843 mlx5_esw_bridge_ingress_mcast_fgs_init(struct mlx5_esw_bridge_offloads *br_offloads)
844 {
845 	struct mlx5_flow_table *ingress_ft = br_offloads->ingress_ft;
846 	struct mlx5_eswitch *esw = br_offloads->esw;
847 	struct mlx5_flow_group *igmp_fg, *mld_fg;
848 
849 	igmp_fg = mlx5_esw_bridge_ingress_igmp_fg_create(esw, ingress_ft);
850 	if (IS_ERR(igmp_fg))
851 		return PTR_ERR(igmp_fg);
852 
853 	mld_fg = mlx5_esw_bridge_ingress_mld_fg_create(esw, ingress_ft);
854 	if (IS_ERR(mld_fg)) {
855 		mlx5_destroy_flow_group(igmp_fg);
856 		return PTR_ERR(mld_fg);
857 	}
858 
859 	br_offloads->ingress_igmp_fg = igmp_fg;
860 	br_offloads->ingress_mld_fg = mld_fg;
861 	return 0;
862 }
863 
864 static void
865 mlx5_esw_bridge_ingress_mcast_fgs_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
866 {
867 	if (br_offloads->ingress_mld_fg)
868 		mlx5_destroy_flow_group(br_offloads->ingress_mld_fg);
869 	br_offloads->ingress_mld_fg = NULL;
870 	if (br_offloads->ingress_igmp_fg)
871 		mlx5_destroy_flow_group(br_offloads->ingress_igmp_fg);
872 	br_offloads->ingress_igmp_fg = NULL;
873 }
874 
875 static struct mlx5_flow_handle *
876 mlx5_esw_bridge_ingress_igmp_fh_create(struct mlx5_flow_table *ingress_ft,
877 				       struct mlx5_flow_table *skip_ft)
878 {
879 	struct mlx5_flow_destination dest = {
880 		.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
881 		.ft = skip_ft,
882 	};
883 	struct mlx5_flow_act flow_act = {
884 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
885 		.flags = FLOW_ACT_NO_APPEND,
886 	};
887 	struct mlx5_flow_spec *rule_spec;
888 	struct mlx5_flow_handle *handle;
889 
890 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
891 	if (!rule_spec)
892 		return ERR_PTR(-ENOMEM);
893 
894 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
895 
896 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_version);
897 	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version, 4);
898 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_protocol);
899 	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_protocol, IPPROTO_IGMP);
900 
901 	handle = mlx5_add_flow_rules(ingress_ft, rule_spec, &flow_act, &dest, 1);
902 
903 	kvfree(rule_spec);
904 	return handle;
905 }
906 
907 static struct mlx5_flow_handle *
908 mlx5_esw_bridge_ingress_mld_fh_create(u8 type, struct mlx5_flow_table *ingress_ft,
909 				      struct mlx5_flow_table *skip_ft)
910 {
911 	struct mlx5_flow_destination dest = {
912 		.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
913 		.ft = skip_ft,
914 	};
915 	struct mlx5_flow_act flow_act = {
916 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
917 		.flags = FLOW_ACT_NO_APPEND,
918 	};
919 	struct mlx5_flow_spec *rule_spec;
920 	struct mlx5_flow_handle *handle;
921 
922 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
923 	if (!rule_spec)
924 		return ERR_PTR(-ENOMEM);
925 
926 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_3;
927 
928 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_version);
929 	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version, 6);
930 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, misc_parameters_3.icmpv6_type);
931 	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_3.icmpv6_type, type);
932 
933 	handle = mlx5_add_flow_rules(ingress_ft, rule_spec, &flow_act, &dest, 1);
934 
935 	kvfree(rule_spec);
936 	return handle;
937 }
938 
939 static int
940 mlx5_esw_bridge_ingress_mcast_fhs_create(struct mlx5_esw_bridge_offloads *br_offloads)
941 {
942 	struct mlx5_flow_handle *igmp_handle, *mld_query_handle, *mld_report_handle,
943 		*mld_done_handle;
944 	struct mlx5_flow_table *ingress_ft = br_offloads->ingress_ft,
945 		*skip_ft = br_offloads->skip_ft;
946 	int err;
947 
948 	igmp_handle = mlx5_esw_bridge_ingress_igmp_fh_create(ingress_ft, skip_ft);
949 	if (IS_ERR(igmp_handle))
950 		return PTR_ERR(igmp_handle);
951 
952 	if (br_offloads->ingress_mld_fg) {
953 		mld_query_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_QUERY,
954 									 ingress_ft,
955 									 skip_ft);
956 		if (IS_ERR(mld_query_handle)) {
957 			err = PTR_ERR(mld_query_handle);
958 			goto err_mld_query;
959 		}
960 
961 		mld_report_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_REPORT,
962 									  ingress_ft,
963 									  skip_ft);
964 		if (IS_ERR(mld_report_handle)) {
965 			err = PTR_ERR(mld_report_handle);
966 			goto err_mld_report;
967 		}
968 
969 		mld_done_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_REDUCTION,
970 									ingress_ft,
971 									skip_ft);
972 		if (IS_ERR(mld_done_handle)) {
973 			err = PTR_ERR(mld_done_handle);
974 			goto err_mld_done;
975 		}
976 	} else {
977 		mld_query_handle = NULL;
978 		mld_report_handle = NULL;
979 		mld_done_handle = NULL;
980 	}
981 
982 	br_offloads->igmp_handle = igmp_handle;
983 	br_offloads->mld_query_handle = mld_query_handle;
984 	br_offloads->mld_report_handle = mld_report_handle;
985 	br_offloads->mld_done_handle = mld_done_handle;
986 
987 	return 0;
988 
989 err_mld_done:
990 	mlx5_del_flow_rules(mld_report_handle);
991 err_mld_report:
992 	mlx5_del_flow_rules(mld_query_handle);
993 err_mld_query:
994 	mlx5_del_flow_rules(igmp_handle);
995 	return err;
996 }
997 
998 static void
999 mlx5_esw_bridge_ingress_mcast_fhs_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
1000 {
1001 	if (br_offloads->mld_done_handle)
1002 		mlx5_del_flow_rules(br_offloads->mld_done_handle);
1003 	br_offloads->mld_done_handle = NULL;
1004 	if (br_offloads->mld_report_handle)
1005 		mlx5_del_flow_rules(br_offloads->mld_report_handle);
1006 	br_offloads->mld_report_handle = NULL;
1007 	if (br_offloads->mld_query_handle)
1008 		mlx5_del_flow_rules(br_offloads->mld_query_handle);
1009 	br_offloads->mld_query_handle = NULL;
1010 	if (br_offloads->igmp_handle)
1011 		mlx5_del_flow_rules(br_offloads->igmp_handle);
1012 	br_offloads->igmp_handle = NULL;
1013 }
1014 
1015 static int mlx5_esw_brige_mcast_init(struct mlx5_esw_bridge *bridge)
1016 {
1017 	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1018 	struct mlx5_esw_bridge_port *port, *failed;
1019 	unsigned long i;
1020 	int err;
1021 
1022 	xa_for_each(&br_offloads->ports, i, port) {
1023 		if (port->bridge != bridge)
1024 			continue;
1025 
1026 		err = mlx5_esw_bridge_port_mcast_init(port);
1027 		if (err) {
1028 			failed = port;
1029 			goto err_port;
1030 		}
1031 	}
1032 	return 0;
1033 
1034 err_port:
1035 	xa_for_each(&br_offloads->ports, i, port) {
1036 		if (port == failed)
1037 			break;
1038 		if (port->bridge != bridge)
1039 			continue;
1040 
1041 		mlx5_esw_bridge_port_mcast_cleanup(port);
1042 	}
1043 	return err;
1044 }
1045 
1046 static void mlx5_esw_brige_mcast_cleanup(struct mlx5_esw_bridge *bridge)
1047 {
1048 	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1049 	struct mlx5_esw_bridge_port *port;
1050 	unsigned long i;
1051 
1052 	xa_for_each(&br_offloads->ports, i, port) {
1053 		if (port->bridge != bridge)
1054 			continue;
1055 
1056 		mlx5_esw_bridge_port_mcast_cleanup(port);
1057 	}
1058 }
1059 
1060 static int mlx5_esw_brige_mcast_global_enable(struct mlx5_esw_bridge_offloads *br_offloads)
1061 {
1062 	int err;
1063 
1064 	if (br_offloads->ingress_igmp_fg)
1065 		return 0; /* already enabled by another bridge */
1066 
1067 	err = mlx5_esw_bridge_ingress_mcast_fgs_init(br_offloads);
1068 	if (err) {
1069 		esw_warn(br_offloads->esw->dev,
1070 			 "Failed to create global multicast flow groups (err=%d)\n",
1071 			 err);
1072 		return err;
1073 	}
1074 
1075 	err = mlx5_esw_bridge_ingress_mcast_fhs_create(br_offloads);
1076 	if (err) {
1077 		esw_warn(br_offloads->esw->dev,
1078 			 "Failed to create global multicast flows (err=%d)\n",
1079 			 err);
1080 		goto err_fhs;
1081 	}
1082 
1083 	return 0;
1084 
1085 err_fhs:
1086 	mlx5_esw_bridge_ingress_mcast_fgs_cleanup(br_offloads);
1087 	return err;
1088 }
1089 
1090 static void mlx5_esw_brige_mcast_global_disable(struct mlx5_esw_bridge_offloads *br_offloads)
1091 {
1092 	struct mlx5_esw_bridge *br;
1093 
1094 	list_for_each_entry(br, &br_offloads->bridges, list) {
1095 		/* Ingress table is global, so only disable snooping when all
1096 		 * bridges on esw have multicast disabled.
1097 		 */
1098 		if (br->flags & MLX5_ESW_BRIDGE_MCAST_FLAG)
1099 			return;
1100 	}
1101 
1102 	mlx5_esw_bridge_ingress_mcast_fhs_cleanup(br_offloads);
1103 	mlx5_esw_bridge_ingress_mcast_fgs_cleanup(br_offloads);
1104 }
1105 
1106 int mlx5_esw_bridge_mcast_enable(struct mlx5_esw_bridge *bridge)
1107 {
1108 	int err;
1109 
1110 	err = mlx5_esw_brige_mcast_global_enable(bridge->br_offloads);
1111 	if (err)
1112 		return err;
1113 
1114 	bridge->flags |= MLX5_ESW_BRIDGE_MCAST_FLAG;
1115 
1116 	err = mlx5_esw_brige_mcast_init(bridge);
1117 	if (err) {
1118 		esw_warn(bridge->br_offloads->esw->dev, "Failed to enable multicast (err=%d)\n",
1119 			 err);
1120 		bridge->flags &= ~MLX5_ESW_BRIDGE_MCAST_FLAG;
1121 		mlx5_esw_brige_mcast_global_disable(bridge->br_offloads);
1122 	}
1123 	return err;
1124 }
1125 
1126 void mlx5_esw_bridge_mcast_disable(struct mlx5_esw_bridge *bridge)
1127 {
1128 	mlx5_esw_brige_mcast_cleanup(bridge);
1129 	bridge->flags &= ~MLX5_ESW_BRIDGE_MCAST_FLAG;
1130 	mlx5_esw_brige_mcast_global_disable(bridge->br_offloads);
1131 }
1132