1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/slab.h>
9 #include <linux/device.h>
10 #include <linux/skbuff.h>
11 #include <linux/if_vlan.h>
12 #include <linux/if_bridge.h>
13 #include <linux/workqueue.h>
14 #include <linux/jiffies.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/netlink.h>
17 #include <net/switchdev.h>
18 #include <net/vxlan.h>
19 
20 #include "spectrum_span.h"
21 #include "spectrum_switchdev.h"
22 #include "spectrum.h"
23 #include "core.h"
24 #include "reg.h"
25 
26 struct mlxsw_sp_bridge_ops;
27 
28 struct mlxsw_sp_bridge {
29 	struct mlxsw_sp *mlxsw_sp;
30 	struct {
31 		struct delayed_work dw;
32 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
33 		unsigned int interval; /* ms */
34 	} fdb_notify;
35 #define MLXSW_SP_MIN_AGEING_TIME 10
36 #define MLXSW_SP_MAX_AGEING_TIME 1000000
37 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
38 	u32 ageing_time;
39 	bool vlan_enabled_exists;
40 	struct list_head bridges_list;
41 	DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
42 	const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
43 	const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
44 	const struct mlxsw_sp_bridge_ops *bridge_8021ad_ops;
45 };
46 
47 struct mlxsw_sp_bridge_device {
48 	struct net_device *dev;
49 	struct list_head list;
50 	struct list_head ports_list;
51 	struct list_head mdb_list;
52 	struct rhashtable mdb_ht;
53 	u8 vlan_enabled:1,
54 	   multicast_enabled:1,
55 	   mrouter:1;
56 	const struct mlxsw_sp_bridge_ops *ops;
57 };
58 
59 struct mlxsw_sp_bridge_port {
60 	struct net_device *dev;
61 	struct mlxsw_sp_bridge_device *bridge_device;
62 	struct list_head list;
63 	struct list_head vlans_list;
64 	unsigned int ref_count;
65 	u8 stp_state;
66 	unsigned long flags;
67 	bool mrouter;
68 	bool lagged;
69 	union {
70 		u16 lag_id;
71 		u16 system_port;
72 	};
73 };
74 
75 struct mlxsw_sp_bridge_vlan {
76 	struct list_head list;
77 	struct list_head port_vlan_list;
78 	u16 vid;
79 };
80 
81 struct mlxsw_sp_bridge_ops {
82 	int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
83 			 struct mlxsw_sp_bridge_port *bridge_port,
84 			 struct mlxsw_sp_port *mlxsw_sp_port,
85 			 struct netlink_ext_ack *extack);
86 	void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
87 			   struct mlxsw_sp_bridge_port *bridge_port,
88 			   struct mlxsw_sp_port *mlxsw_sp_port);
89 	int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
90 			  const struct net_device *vxlan_dev, u16 vid,
91 			  struct netlink_ext_ack *extack);
92 	struct mlxsw_sp_fid *
93 		(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
94 			   u16 vid, struct netlink_ext_ack *extack);
95 	struct mlxsw_sp_fid *
96 		(*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
97 			      u16 vid);
98 	u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
99 		       const struct mlxsw_sp_fid *fid);
100 };
101 
102 struct mlxsw_sp_switchdev_ops {
103 	void (*init)(struct mlxsw_sp *mlxsw_sp);
104 };
105 
106 struct mlxsw_sp_mdb_entry_key {
107 	unsigned char addr[ETH_ALEN];
108 	u16 fid;
109 };
110 
111 struct mlxsw_sp_mdb_entry {
112 	struct list_head list;
113 	struct rhash_head ht_node;
114 	struct mlxsw_sp_mdb_entry_key key;
115 	u16 mid;
116 	struct list_head ports_list;
117 	u16 ports_count;
118 	bool in_hw;
119 	unsigned long *ports_in_mid; /* bits array */
120 };
121 
122 struct mlxsw_sp_mdb_entry_port {
123 	struct list_head list; /* Member of 'ports_list'. */
124 	u16 local_port;
125 	refcount_t refcount;
126 	bool mrouter;
127 };
128 
129 static const struct rhashtable_params mlxsw_sp_mdb_ht_params = {
130 	.key_offset = offsetof(struct mlxsw_sp_mdb_entry, key),
131 	.head_offset = offsetof(struct mlxsw_sp_mdb_entry, ht_node),
132 	.key_len = sizeof(struct mlxsw_sp_mdb_entry_key),
133 };
134 
135 static int
136 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
137 			       struct mlxsw_sp_bridge_port *bridge_port,
138 			       u16 fid_index);
139 
140 static void
141 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
142 			       struct mlxsw_sp_bridge_port *bridge_port);
143 
144 static int
145 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
146 				   struct mlxsw_sp_bridge_device
147 				   *bridge_device, bool mc_enabled);
148 
149 static void
150 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
151 				 struct mlxsw_sp_bridge_port *bridge_port,
152 				 bool add);
153 
154 static struct mlxsw_sp_bridge_device *
155 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
156 			    const struct net_device *br_dev)
157 {
158 	struct mlxsw_sp_bridge_device *bridge_device;
159 
160 	list_for_each_entry(bridge_device, &bridge->bridges_list, list)
161 		if (bridge_device->dev == br_dev)
162 			return bridge_device;
163 
164 	return NULL;
165 }
166 
167 bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
168 					 const struct net_device *br_dev)
169 {
170 	return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
171 }
172 
173 static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
174 						    struct netdev_nested_priv *priv)
175 {
176 	struct mlxsw_sp *mlxsw_sp = priv->data;
177 
178 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
179 	return 0;
180 }
181 
182 static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
183 						struct net_device *dev)
184 {
185 	struct netdev_nested_priv priv = {
186 		.data = (void *)mlxsw_sp,
187 	};
188 
189 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
190 	netdev_walk_all_upper_dev_rcu(dev,
191 				      mlxsw_sp_bridge_device_upper_rif_destroy,
192 				      &priv);
193 }
194 
195 static int mlxsw_sp_bridge_device_vxlan_init(struct mlxsw_sp_bridge *bridge,
196 					     struct net_device *br_dev,
197 					     struct netlink_ext_ack *extack)
198 {
199 	struct net_device *dev, *stop_dev;
200 	struct list_head *iter;
201 	int err;
202 
203 	netdev_for_each_lower_dev(br_dev, dev, iter) {
204 		if (netif_is_vxlan(dev) && netif_running(dev)) {
205 			err = mlxsw_sp_bridge_vxlan_join(bridge->mlxsw_sp,
206 							 br_dev, dev, 0,
207 							 extack);
208 			if (err) {
209 				stop_dev = dev;
210 				goto err_vxlan_join;
211 			}
212 		}
213 	}
214 
215 	return 0;
216 
217 err_vxlan_join:
218 	netdev_for_each_lower_dev(br_dev, dev, iter) {
219 		if (netif_is_vxlan(dev) && netif_running(dev)) {
220 			if (stop_dev == dev)
221 				break;
222 			mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
223 		}
224 	}
225 	return err;
226 }
227 
228 static void mlxsw_sp_bridge_device_vxlan_fini(struct mlxsw_sp_bridge *bridge,
229 					      struct net_device *br_dev)
230 {
231 	struct net_device *dev;
232 	struct list_head *iter;
233 
234 	netdev_for_each_lower_dev(br_dev, dev, iter) {
235 		if (netif_is_vxlan(dev) && netif_running(dev))
236 			mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
237 	}
238 }
239 
240 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp,
241 					      bool no_delay)
242 {
243 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
244 	unsigned int interval = no_delay ? 0 : bridge->fdb_notify.interval;
245 
246 	mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
247 			       msecs_to_jiffies(interval));
248 }
249 
250 static struct mlxsw_sp_bridge_device *
251 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
252 			      struct net_device *br_dev,
253 			      struct netlink_ext_ack *extack)
254 {
255 	struct device *dev = bridge->mlxsw_sp->bus_info->dev;
256 	struct mlxsw_sp_bridge_device *bridge_device;
257 	bool vlan_enabled = br_vlan_enabled(br_dev);
258 	int err;
259 
260 	if (vlan_enabled && bridge->vlan_enabled_exists) {
261 		dev_err(dev, "Only one VLAN-aware bridge is supported\n");
262 		NL_SET_ERR_MSG_MOD(extack, "Only one VLAN-aware bridge is supported");
263 		return ERR_PTR(-EINVAL);
264 	}
265 
266 	bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
267 	if (!bridge_device)
268 		return ERR_PTR(-ENOMEM);
269 
270 	err = rhashtable_init(&bridge_device->mdb_ht, &mlxsw_sp_mdb_ht_params);
271 	if (err)
272 		goto err_mdb_rhashtable_init;
273 
274 	bridge_device->dev = br_dev;
275 	bridge_device->vlan_enabled = vlan_enabled;
276 	bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
277 	bridge_device->mrouter = br_multicast_router(br_dev);
278 	INIT_LIST_HEAD(&bridge_device->ports_list);
279 	if (vlan_enabled) {
280 		u16 proto;
281 
282 		bridge->vlan_enabled_exists = true;
283 		br_vlan_get_proto(br_dev, &proto);
284 		if (proto == ETH_P_8021AD)
285 			bridge_device->ops = bridge->bridge_8021ad_ops;
286 		else
287 			bridge_device->ops = bridge->bridge_8021q_ops;
288 	} else {
289 		bridge_device->ops = bridge->bridge_8021d_ops;
290 	}
291 	INIT_LIST_HEAD(&bridge_device->mdb_list);
292 
293 	if (list_empty(&bridge->bridges_list))
294 		mlxsw_sp_fdb_notify_work_schedule(bridge->mlxsw_sp, false);
295 	list_add(&bridge_device->list, &bridge->bridges_list);
296 
297 	/* It is possible we already have VXLAN devices enslaved to the bridge.
298 	 * In which case, we need to replay their configuration as if they were
299 	 * just now enslaved to the bridge.
300 	 */
301 	err = mlxsw_sp_bridge_device_vxlan_init(bridge, br_dev, extack);
302 	if (err)
303 		goto err_vxlan_init;
304 
305 	return bridge_device;
306 
307 err_vxlan_init:
308 	list_del(&bridge_device->list);
309 	if (bridge_device->vlan_enabled)
310 		bridge->vlan_enabled_exists = false;
311 	rhashtable_destroy(&bridge_device->mdb_ht);
312 err_mdb_rhashtable_init:
313 	kfree(bridge_device);
314 	return ERR_PTR(err);
315 }
316 
317 static void
318 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
319 			       struct mlxsw_sp_bridge_device *bridge_device)
320 {
321 	mlxsw_sp_bridge_device_vxlan_fini(bridge, bridge_device->dev);
322 	mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
323 					    bridge_device->dev);
324 	list_del(&bridge_device->list);
325 	if (list_empty(&bridge->bridges_list))
326 		cancel_delayed_work(&bridge->fdb_notify.dw);
327 	if (bridge_device->vlan_enabled)
328 		bridge->vlan_enabled_exists = false;
329 	WARN_ON(!list_empty(&bridge_device->ports_list));
330 	WARN_ON(!list_empty(&bridge_device->mdb_list));
331 	rhashtable_destroy(&bridge_device->mdb_ht);
332 	kfree(bridge_device);
333 }
334 
335 static struct mlxsw_sp_bridge_device *
336 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
337 			   struct net_device *br_dev,
338 			   struct netlink_ext_ack *extack)
339 {
340 	struct mlxsw_sp_bridge_device *bridge_device;
341 
342 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
343 	if (bridge_device)
344 		return bridge_device;
345 
346 	return mlxsw_sp_bridge_device_create(bridge, br_dev, extack);
347 }
348 
349 static void
350 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
351 			   struct mlxsw_sp_bridge_device *bridge_device)
352 {
353 	if (list_empty(&bridge_device->ports_list))
354 		mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
355 }
356 
357 static struct mlxsw_sp_bridge_port *
358 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
359 			    const struct net_device *brport_dev)
360 {
361 	struct mlxsw_sp_bridge_port *bridge_port;
362 
363 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
364 		if (bridge_port->dev == brport_dev)
365 			return bridge_port;
366 	}
367 
368 	return NULL;
369 }
370 
371 struct mlxsw_sp_bridge_port *
372 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
373 			  struct net_device *brport_dev)
374 {
375 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
376 	struct mlxsw_sp_bridge_device *bridge_device;
377 
378 	if (!br_dev)
379 		return NULL;
380 
381 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
382 	if (!bridge_device)
383 		return NULL;
384 
385 	return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
386 }
387 
388 static struct mlxsw_sp_bridge_port *
389 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
390 			    struct net_device *brport_dev,
391 			    struct netlink_ext_ack *extack)
392 {
393 	struct mlxsw_sp_bridge_port *bridge_port;
394 	struct mlxsw_sp_port *mlxsw_sp_port;
395 	int err;
396 
397 	bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
398 	if (!bridge_port)
399 		return ERR_PTR(-ENOMEM);
400 
401 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
402 	bridge_port->lagged = mlxsw_sp_port->lagged;
403 	if (bridge_port->lagged)
404 		bridge_port->lag_id = mlxsw_sp_port->lag_id;
405 	else
406 		bridge_port->system_port = mlxsw_sp_port->local_port;
407 	bridge_port->dev = brport_dev;
408 	bridge_port->bridge_device = bridge_device;
409 	bridge_port->stp_state = BR_STATE_DISABLED;
410 	bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
411 			     BR_MCAST_FLOOD;
412 	INIT_LIST_HEAD(&bridge_port->vlans_list);
413 	list_add(&bridge_port->list, &bridge_device->ports_list);
414 	bridge_port->ref_count = 1;
415 
416 	err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev,
417 					    NULL, NULL, NULL, false, extack);
418 	if (err)
419 		goto err_switchdev_offload;
420 
421 	return bridge_port;
422 
423 err_switchdev_offload:
424 	list_del(&bridge_port->list);
425 	kfree(bridge_port);
426 	return ERR_PTR(err);
427 }
428 
429 static void
430 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
431 {
432 	switchdev_bridge_port_unoffload(bridge_port->dev, NULL, NULL, NULL);
433 	list_del(&bridge_port->list);
434 	WARN_ON(!list_empty(&bridge_port->vlans_list));
435 	kfree(bridge_port);
436 }
437 
438 static struct mlxsw_sp_bridge_port *
439 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
440 			 struct net_device *brport_dev,
441 			 struct netlink_ext_ack *extack)
442 {
443 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
444 	struct mlxsw_sp_bridge_device *bridge_device;
445 	struct mlxsw_sp_bridge_port *bridge_port;
446 	int err;
447 
448 	bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
449 	if (bridge_port) {
450 		bridge_port->ref_count++;
451 		return bridge_port;
452 	}
453 
454 	bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev, extack);
455 	if (IS_ERR(bridge_device))
456 		return ERR_CAST(bridge_device);
457 
458 	bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev,
459 						  extack);
460 	if (IS_ERR(bridge_port)) {
461 		err = PTR_ERR(bridge_port);
462 		goto err_bridge_port_create;
463 	}
464 
465 	return bridge_port;
466 
467 err_bridge_port_create:
468 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
469 	return ERR_PTR(err);
470 }
471 
472 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
473 				     struct mlxsw_sp_bridge_port *bridge_port)
474 {
475 	struct mlxsw_sp_bridge_device *bridge_device;
476 
477 	if (--bridge_port->ref_count != 0)
478 		return;
479 	bridge_device = bridge_port->bridge_device;
480 	mlxsw_sp_bridge_port_destroy(bridge_port);
481 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
482 }
483 
484 static struct mlxsw_sp_port_vlan *
485 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
486 				  const struct mlxsw_sp_bridge_device *
487 				  bridge_device,
488 				  u16 vid)
489 {
490 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
491 
492 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
493 			    list) {
494 		if (!mlxsw_sp_port_vlan->bridge_port)
495 			continue;
496 		if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
497 		    bridge_device)
498 			continue;
499 		if (bridge_device->vlan_enabled &&
500 		    mlxsw_sp_port_vlan->vid != vid)
501 			continue;
502 		return mlxsw_sp_port_vlan;
503 	}
504 
505 	return NULL;
506 }
507 
508 static struct mlxsw_sp_port_vlan*
509 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
510 			       u16 fid_index)
511 {
512 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
513 
514 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
515 			    list) {
516 		struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
517 
518 		if (fid && mlxsw_sp_fid_index(fid) == fid_index)
519 			return mlxsw_sp_port_vlan;
520 	}
521 
522 	return NULL;
523 }
524 
525 static struct mlxsw_sp_bridge_vlan *
526 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
527 			  u16 vid)
528 {
529 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
530 
531 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
532 		if (bridge_vlan->vid == vid)
533 			return bridge_vlan;
534 	}
535 
536 	return NULL;
537 }
538 
539 static struct mlxsw_sp_bridge_vlan *
540 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
541 {
542 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
543 
544 	bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
545 	if (!bridge_vlan)
546 		return NULL;
547 
548 	INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
549 	bridge_vlan->vid = vid;
550 	list_add(&bridge_vlan->list, &bridge_port->vlans_list);
551 
552 	return bridge_vlan;
553 }
554 
555 static void
556 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
557 {
558 	list_del(&bridge_vlan->list);
559 	WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
560 	kfree(bridge_vlan);
561 }
562 
563 static struct mlxsw_sp_bridge_vlan *
564 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
565 {
566 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
567 
568 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
569 	if (bridge_vlan)
570 		return bridge_vlan;
571 
572 	return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
573 }
574 
575 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
576 {
577 	if (list_empty(&bridge_vlan->port_vlan_list))
578 		mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
579 }
580 
581 static int
582 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
583 				  struct mlxsw_sp_bridge_vlan *bridge_vlan,
584 				  u8 state)
585 {
586 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
587 
588 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
589 			    bridge_vlan_node) {
590 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
591 			continue;
592 		return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
593 						 bridge_vlan->vid, state);
594 	}
595 
596 	return 0;
597 }
598 
599 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
600 					    struct net_device *orig_dev,
601 					    u8 state)
602 {
603 	struct mlxsw_sp_bridge_port *bridge_port;
604 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
605 	int err;
606 
607 	/* It's possible we failed to enslave the port, yet this
608 	 * operation is executed due to it being deferred.
609 	 */
610 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
611 						orig_dev);
612 	if (!bridge_port)
613 		return 0;
614 
615 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
616 		err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
617 							bridge_vlan, state);
618 		if (err)
619 			goto err_port_bridge_vlan_stp_set;
620 	}
621 
622 	bridge_port->stp_state = state;
623 
624 	return 0;
625 
626 err_port_bridge_vlan_stp_set:
627 	list_for_each_entry_continue_reverse(bridge_vlan,
628 					     &bridge_port->vlans_list, list)
629 		mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
630 						  bridge_port->stp_state);
631 	return err;
632 }
633 
634 static int
635 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
636 				    struct mlxsw_sp_bridge_vlan *bridge_vlan,
637 				    enum mlxsw_sp_flood_type packet_type,
638 				    bool member)
639 {
640 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
641 
642 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
643 			    bridge_vlan_node) {
644 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
645 			continue;
646 		return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
647 					      packet_type,
648 					      mlxsw_sp_port->local_port,
649 					      member);
650 	}
651 
652 	return 0;
653 }
654 
655 static int
656 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
657 				     struct mlxsw_sp_bridge_port *bridge_port,
658 				     enum mlxsw_sp_flood_type packet_type,
659 				     bool member)
660 {
661 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
662 	int err;
663 
664 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
665 		err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
666 							  bridge_vlan,
667 							  packet_type,
668 							  member);
669 		if (err)
670 			goto err_port_bridge_vlan_flood_set;
671 	}
672 
673 	return 0;
674 
675 err_port_bridge_vlan_flood_set:
676 	list_for_each_entry_continue_reverse(bridge_vlan,
677 					     &bridge_port->vlans_list, list)
678 		mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
679 						    packet_type, !member);
680 	return err;
681 }
682 
683 static int
684 mlxsw_sp_bridge_vlans_flood_set(struct mlxsw_sp_bridge_vlan *bridge_vlan,
685 				enum mlxsw_sp_flood_type packet_type,
686 				bool member)
687 {
688 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
689 	int err;
690 
691 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
692 			    bridge_vlan_node) {
693 		u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
694 
695 		err = mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
696 					     packet_type, local_port, member);
697 		if (err)
698 			goto err_fid_flood_set;
699 	}
700 
701 	return 0;
702 
703 err_fid_flood_set:
704 	list_for_each_entry_continue_reverse(mlxsw_sp_port_vlan,
705 					     &bridge_vlan->port_vlan_list,
706 					     list) {
707 		u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
708 
709 		mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid, packet_type,
710 				       local_port, !member);
711 	}
712 
713 	return err;
714 }
715 
716 static int
717 mlxsw_sp_bridge_ports_flood_table_set(struct mlxsw_sp_bridge_port *bridge_port,
718 				      enum mlxsw_sp_flood_type packet_type,
719 				      bool member)
720 {
721 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
722 	int err;
723 
724 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
725 		err = mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
726 						      member);
727 		if (err)
728 			goto err_bridge_vlans_flood_set;
729 	}
730 
731 	return 0;
732 
733 err_bridge_vlans_flood_set:
734 	list_for_each_entry_continue_reverse(bridge_vlan,
735 					     &bridge_port->vlans_list, list)
736 		mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
737 						!member);
738 	return err;
739 }
740 
741 static int
742 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
743 				       struct mlxsw_sp_bridge_vlan *bridge_vlan,
744 				       bool set)
745 {
746 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
747 	u16 vid = bridge_vlan->vid;
748 
749 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
750 			    bridge_vlan_node) {
751 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
752 			continue;
753 		return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
754 	}
755 
756 	return 0;
757 }
758 
759 static int
760 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
761 				  struct mlxsw_sp_bridge_port *bridge_port,
762 				  bool set)
763 {
764 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
765 	int err;
766 
767 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
768 		err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
769 							     bridge_vlan, set);
770 		if (err)
771 			goto err_port_bridge_vlan_learning_set;
772 	}
773 
774 	return 0;
775 
776 err_port_bridge_vlan_learning_set:
777 	list_for_each_entry_continue_reverse(bridge_vlan,
778 					     &bridge_port->vlans_list, list)
779 		mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
780 						       bridge_vlan, !set);
781 	return err;
782 }
783 
784 static int
785 mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
786 				    struct switchdev_brport_flags flags)
787 {
788 	if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
789 		return -EINVAL;
790 
791 	return 0;
792 }
793 
794 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
795 					   struct net_device *orig_dev,
796 					   struct switchdev_brport_flags flags)
797 {
798 	struct mlxsw_sp_bridge_port *bridge_port;
799 	int err;
800 
801 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
802 						orig_dev);
803 	if (!bridge_port)
804 		return 0;
805 
806 	if (flags.mask & BR_FLOOD) {
807 		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
808 							   bridge_port,
809 							   MLXSW_SP_FLOOD_TYPE_UC,
810 							   flags.val & BR_FLOOD);
811 		if (err)
812 			return err;
813 	}
814 
815 	if (flags.mask & BR_LEARNING) {
816 		err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port,
817 							bridge_port,
818 							flags.val & BR_LEARNING);
819 		if (err)
820 			return err;
821 	}
822 
823 	if (bridge_port->bridge_device->multicast_enabled)
824 		goto out;
825 
826 	if (flags.mask & BR_MCAST_FLOOD) {
827 		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
828 							   bridge_port,
829 							   MLXSW_SP_FLOOD_TYPE_MC,
830 							   flags.val & BR_MCAST_FLOOD);
831 		if (err)
832 			return err;
833 	}
834 
835 out:
836 	memcpy(&bridge_port->flags, &flags.val, sizeof(flags.val));
837 	return 0;
838 }
839 
840 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
841 {
842 	char sfdat_pl[MLXSW_REG_SFDAT_LEN];
843 	int err;
844 
845 	mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
846 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
847 	if (err)
848 		return err;
849 	mlxsw_sp->bridge->ageing_time = ageing_time;
850 	return 0;
851 }
852 
853 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
854 					    unsigned long ageing_clock_t)
855 {
856 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
857 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
858 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
859 
860 	if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
861 	    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
862 		return -ERANGE;
863 
864 	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
865 }
866 
867 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
868 					  struct net_device *orig_dev,
869 					  bool vlan_enabled)
870 {
871 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
872 	struct mlxsw_sp_bridge_device *bridge_device;
873 
874 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
875 	if (WARN_ON(!bridge_device))
876 		return -EINVAL;
877 
878 	if (bridge_device->vlan_enabled == vlan_enabled)
879 		return 0;
880 
881 	netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
882 	return -EINVAL;
883 }
884 
885 static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_port,
886 						struct net_device *orig_dev,
887 						u16 vlan_proto)
888 {
889 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
890 	struct mlxsw_sp_bridge_device *bridge_device;
891 
892 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
893 	if (WARN_ON(!bridge_device))
894 		return -EINVAL;
895 
896 	netdev_err(bridge_device->dev, "VLAN protocol can't be changed on existing bridge\n");
897 	return -EINVAL;
898 }
899 
900 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
901 					  struct net_device *orig_dev,
902 					  bool is_port_mrouter)
903 {
904 	struct mlxsw_sp_bridge_port *bridge_port;
905 	int err;
906 
907 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
908 						orig_dev);
909 	if (!bridge_port)
910 		return 0;
911 
912 	if (!bridge_port->bridge_device->multicast_enabled)
913 		goto out;
914 
915 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
916 						   MLXSW_SP_FLOOD_TYPE_MC,
917 						   is_port_mrouter);
918 	if (err)
919 		return err;
920 
921 	mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
922 					 is_port_mrouter);
923 out:
924 	bridge_port->mrouter = is_port_mrouter;
925 	return 0;
926 }
927 
928 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
929 {
930 	const struct mlxsw_sp_bridge_device *bridge_device;
931 
932 	bridge_device = bridge_port->bridge_device;
933 	return bridge_device->multicast_enabled ? bridge_port->mrouter :
934 					bridge_port->flags & BR_MCAST_FLOOD;
935 }
936 
937 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
938 					 struct net_device *orig_dev,
939 					 bool mc_disabled)
940 {
941 	enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
942 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
943 	struct mlxsw_sp_bridge_device *bridge_device;
944 	struct mlxsw_sp_bridge_port *bridge_port;
945 	int err;
946 
947 	/* It's possible we failed to enslave the port, yet this
948 	 * operation is executed due to it being deferred.
949 	 */
950 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
951 	if (!bridge_device)
952 		return 0;
953 
954 	if (bridge_device->multicast_enabled == !mc_disabled)
955 		return 0;
956 
957 	bridge_device->multicast_enabled = !mc_disabled;
958 	err = mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
959 						 !mc_disabled);
960 	if (err)
961 		goto err_mc_enable_sync;
962 
963 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
964 		bool member = mlxsw_sp_mc_flood(bridge_port);
965 
966 		err = mlxsw_sp_bridge_ports_flood_table_set(bridge_port,
967 							    packet_type,
968 							    member);
969 		if (err)
970 			goto err_flood_table_set;
971 	}
972 
973 	return 0;
974 
975 err_flood_table_set:
976 	list_for_each_entry_continue_reverse(bridge_port,
977 					     &bridge_device->ports_list, list) {
978 		bool member = mlxsw_sp_mc_flood(bridge_port);
979 
980 		mlxsw_sp_bridge_ports_flood_table_set(bridge_port, packet_type,
981 						      !member);
982 	}
983 	mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
984 					   mc_disabled);
985 err_mc_enable_sync:
986 	bridge_device->multicast_enabled = mc_disabled;
987 	return err;
988 }
989 
990 static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
991 					 u16 mid_idx, bool add)
992 {
993 	char *smid2_pl;
994 	int err;
995 
996 	smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
997 	if (!smid2_pl)
998 		return -ENOMEM;
999 
1000 	mlxsw_reg_smid2_pack(smid2_pl, mid_idx,
1001 			     mlxsw_sp_router_port(mlxsw_sp), add, false, 0);
1002 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
1003 	kfree(smid2_pl);
1004 	return err;
1005 }
1006 
1007 static struct mlxsw_sp_mdb_entry_port *
1008 mlxsw_sp_mdb_entry_port_lookup(struct mlxsw_sp_mdb_entry *mdb_entry,
1009 			       u16 local_port)
1010 {
1011 	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1012 
1013 	list_for_each_entry(mdb_entry_port, &mdb_entry->ports_list, list) {
1014 		if (mdb_entry_port->local_port == local_port)
1015 			return mdb_entry_port;
1016 	}
1017 
1018 	return NULL;
1019 }
1020 
1021 static __always_unused struct mlxsw_sp_mdb_entry_port *
1022 mlxsw_sp_mdb_entry_port_get(struct mlxsw_sp *mlxsw_sp,
1023 			    struct mlxsw_sp_mdb_entry *mdb_entry,
1024 			    u16 local_port)
1025 {
1026 	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1027 	int err;
1028 
1029 	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1030 	if (mdb_entry_port) {
1031 		if (mdb_entry_port->mrouter &&
1032 		    refcount_read(&mdb_entry_port->refcount) == 1)
1033 			mdb_entry->ports_count++;
1034 
1035 		refcount_inc(&mdb_entry_port->refcount);
1036 		return mdb_entry_port;
1037 	}
1038 
1039 	err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1040 					  mdb_entry->key.fid, local_port, true);
1041 	if (err)
1042 		return ERR_PTR(err);
1043 
1044 	mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
1045 	if (!mdb_entry_port) {
1046 		err = -ENOMEM;
1047 		goto err_mdb_entry_port_alloc;
1048 	}
1049 
1050 	mdb_entry_port->local_port = local_port;
1051 	refcount_set(&mdb_entry_port->refcount, 1);
1052 	list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
1053 	mdb_entry->ports_count++;
1054 
1055 	return mdb_entry_port;
1056 
1057 err_mdb_entry_port_alloc:
1058 	mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1059 				    mdb_entry->key.fid, local_port, false);
1060 	return ERR_PTR(err);
1061 }
1062 
1063 static __always_unused void
1064 mlxsw_sp_mdb_entry_port_put(struct mlxsw_sp *mlxsw_sp,
1065 			    struct mlxsw_sp_mdb_entry *mdb_entry,
1066 			    u16 local_port, bool force)
1067 {
1068 	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1069 
1070 	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1071 	if (!mdb_entry_port)
1072 		return;
1073 
1074 	if (!force && !refcount_dec_and_test(&mdb_entry_port->refcount)) {
1075 		if (mdb_entry_port->mrouter &&
1076 		    refcount_read(&mdb_entry_port->refcount) == 1)
1077 			mdb_entry->ports_count--;
1078 		return;
1079 	}
1080 
1081 	mdb_entry->ports_count--;
1082 	list_del(&mdb_entry_port->list);
1083 	kfree(mdb_entry_port);
1084 	mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1085 				    mdb_entry->key.fid, local_port, false);
1086 }
1087 
1088 static __always_unused struct mlxsw_sp_mdb_entry_port *
1089 mlxsw_sp_mdb_entry_mrouter_port_get(struct mlxsw_sp *mlxsw_sp,
1090 				    struct mlxsw_sp_mdb_entry *mdb_entry,
1091 				    u16 local_port)
1092 {
1093 	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1094 	int err;
1095 
1096 	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1097 	if (mdb_entry_port) {
1098 		if (!mdb_entry_port->mrouter)
1099 			refcount_inc(&mdb_entry_port->refcount);
1100 		return mdb_entry_port;
1101 	}
1102 
1103 	err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1104 					  mdb_entry->key.fid, local_port, true);
1105 	if (err)
1106 		return ERR_PTR(err);
1107 
1108 	mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
1109 	if (!mdb_entry_port) {
1110 		err = -ENOMEM;
1111 		goto err_mdb_entry_port_alloc;
1112 	}
1113 
1114 	mdb_entry_port->local_port = local_port;
1115 	refcount_set(&mdb_entry_port->refcount, 1);
1116 	mdb_entry_port->mrouter = true;
1117 	list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
1118 
1119 	return mdb_entry_port;
1120 
1121 err_mdb_entry_port_alloc:
1122 	mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1123 				    mdb_entry->key.fid, local_port, false);
1124 	return ERR_PTR(err);
1125 }
1126 
1127 static __always_unused void
1128 mlxsw_sp_mdb_entry_mrouter_port_put(struct mlxsw_sp *mlxsw_sp,
1129 				    struct mlxsw_sp_mdb_entry *mdb_entry,
1130 				    u16 local_port)
1131 {
1132 	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1133 
1134 	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1135 	if (!mdb_entry_port)
1136 		return;
1137 
1138 	if (!mdb_entry_port->mrouter)
1139 		return;
1140 
1141 	mdb_entry_port->mrouter = false;
1142 	if (!refcount_dec_and_test(&mdb_entry_port->refcount))
1143 		return;
1144 
1145 	list_del(&mdb_entry_port->list);
1146 	kfree(mdb_entry_port);
1147 	mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1148 				    mdb_entry->key.fid, local_port, false);
1149 }
1150 
1151 static void
1152 mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
1153 				   struct mlxsw_sp_bridge_device *bridge_device,
1154 				   bool add)
1155 {
1156 	struct mlxsw_sp_mdb_entry *mdb_entry;
1157 
1158 	list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list)
1159 		mlxsw_sp_smid_router_port_set(mlxsw_sp, mdb_entry->mid, add);
1160 }
1161 
1162 static int
1163 mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
1164 				  struct net_device *orig_dev,
1165 				  bool is_mrouter)
1166 {
1167 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1168 	struct mlxsw_sp_bridge_device *bridge_device;
1169 
1170 	/* It's possible we failed to enslave the port, yet this
1171 	 * operation is executed due to it being deferred.
1172 	 */
1173 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
1174 	if (!bridge_device)
1175 		return 0;
1176 
1177 	if (bridge_device->mrouter != is_mrouter)
1178 		mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
1179 						   is_mrouter);
1180 	bridge_device->mrouter = is_mrouter;
1181 	return 0;
1182 }
1183 
1184 static int mlxsw_sp_port_attr_set(struct net_device *dev, const void *ctx,
1185 				  const struct switchdev_attr *attr,
1186 				  struct netlink_ext_ack *extack)
1187 {
1188 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1189 	int err;
1190 
1191 	switch (attr->id) {
1192 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
1193 		err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port,
1194 						       attr->orig_dev,
1195 						       attr->u.stp_state);
1196 		break;
1197 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
1198 		err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port,
1199 							  attr->u.brport_flags);
1200 		break;
1201 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
1202 		err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port,
1203 						      attr->orig_dev,
1204 						      attr->u.brport_flags);
1205 		break;
1206 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
1207 		err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port,
1208 						       attr->u.ageing_time);
1209 		break;
1210 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
1211 		err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port,
1212 						     attr->orig_dev,
1213 						     attr->u.vlan_filtering);
1214 		break;
1215 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
1216 		err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port,
1217 							   attr->orig_dev,
1218 							   attr->u.vlan_protocol);
1219 		break;
1220 	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
1221 		err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port,
1222 						     attr->orig_dev,
1223 						     attr->u.mrouter);
1224 		break;
1225 	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
1226 		err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port,
1227 						    attr->orig_dev,
1228 						    attr->u.mc_disabled);
1229 		break;
1230 	case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
1231 		err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port,
1232 							attr->orig_dev,
1233 							attr->u.mrouter);
1234 		break;
1235 	default:
1236 		err = -EOPNOTSUPP;
1237 		break;
1238 	}
1239 
1240 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
1241 
1242 	return err;
1243 }
1244 
1245 static int
1246 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1247 			    struct mlxsw_sp_bridge_port *bridge_port,
1248 			    struct netlink_ext_ack *extack)
1249 {
1250 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1251 	struct mlxsw_sp_bridge_device *bridge_device;
1252 	u16 local_port = mlxsw_sp_port->local_port;
1253 	u16 vid = mlxsw_sp_port_vlan->vid;
1254 	struct mlxsw_sp_fid *fid;
1255 	int err;
1256 
1257 	bridge_device = bridge_port->bridge_device;
1258 	fid = bridge_device->ops->fid_get(bridge_device, vid, extack);
1259 	if (IS_ERR(fid))
1260 		return PTR_ERR(fid);
1261 
1262 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
1263 				     bridge_port->flags & BR_FLOOD);
1264 	if (err)
1265 		goto err_fid_uc_flood_set;
1266 
1267 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
1268 				     mlxsw_sp_mc_flood(bridge_port));
1269 	if (err)
1270 		goto err_fid_mc_flood_set;
1271 
1272 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
1273 				     true);
1274 	if (err)
1275 		goto err_fid_bc_flood_set;
1276 
1277 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
1278 	if (err)
1279 		goto err_fid_port_vid_map;
1280 
1281 	mlxsw_sp_port_vlan->fid = fid;
1282 
1283 	return 0;
1284 
1285 err_fid_port_vid_map:
1286 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
1287 err_fid_bc_flood_set:
1288 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
1289 err_fid_mc_flood_set:
1290 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1291 err_fid_uc_flood_set:
1292 	mlxsw_sp_fid_put(fid);
1293 	return err;
1294 }
1295 
1296 static void
1297 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1298 {
1299 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1300 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1301 	u16 local_port = mlxsw_sp_port->local_port;
1302 	u16 vid = mlxsw_sp_port_vlan->vid;
1303 
1304 	mlxsw_sp_port_vlan->fid = NULL;
1305 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
1306 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
1307 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
1308 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1309 	mlxsw_sp_fid_put(fid);
1310 }
1311 
1312 static u16
1313 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
1314 			     u16 vid, bool is_pvid)
1315 {
1316 	if (is_pvid)
1317 		return vid;
1318 	else if (mlxsw_sp_port->pvid == vid)
1319 		return 0;	/* Dis-allow untagged packets */
1320 	else
1321 		return mlxsw_sp_port->pvid;
1322 }
1323 
1324 static int
1325 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1326 			       struct mlxsw_sp_bridge_port *bridge_port,
1327 			       struct netlink_ext_ack *extack)
1328 {
1329 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1330 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1331 	u16 vid = mlxsw_sp_port_vlan->vid;
1332 	int err;
1333 
1334 	/* No need to continue if only VLAN flags were changed */
1335 	if (mlxsw_sp_port_vlan->bridge_port)
1336 		return 0;
1337 
1338 	err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port,
1339 					  extack);
1340 	if (err)
1341 		return err;
1342 
1343 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
1344 					     bridge_port->flags & BR_LEARNING);
1345 	if (err)
1346 		goto err_port_vid_learning_set;
1347 
1348 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
1349 					bridge_port->stp_state);
1350 	if (err)
1351 		goto err_port_vid_stp_set;
1352 
1353 	bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
1354 	if (!bridge_vlan) {
1355 		err = -ENOMEM;
1356 		goto err_bridge_vlan_get;
1357 	}
1358 
1359 	list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1360 		 &bridge_vlan->port_vlan_list);
1361 
1362 	mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1363 				 bridge_port->dev, extack);
1364 	mlxsw_sp_port_vlan->bridge_port = bridge_port;
1365 
1366 	return 0;
1367 
1368 err_bridge_vlan_get:
1369 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1370 err_port_vid_stp_set:
1371 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1372 err_port_vid_learning_set:
1373 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1374 	return err;
1375 }
1376 
1377 void
1378 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1379 {
1380 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1381 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1382 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1383 	struct mlxsw_sp_bridge_port *bridge_port;
1384 	u16 vid = mlxsw_sp_port_vlan->vid;
1385 	bool last_port, last_vlan;
1386 
1387 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1388 		    mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1389 		return;
1390 
1391 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
1392 	last_vlan = list_is_singular(&bridge_port->vlans_list);
1393 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1394 	last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1395 
1396 	list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1397 	mlxsw_sp_bridge_vlan_put(bridge_vlan);
1398 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1399 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1400 	if (last_port)
1401 		mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1402 					       bridge_port,
1403 					       mlxsw_sp_fid_index(fid));
1404 	if (last_vlan)
1405 		mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
1406 
1407 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1408 
1409 	mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1410 	mlxsw_sp_port_vlan->bridge_port = NULL;
1411 }
1412 
1413 static int
1414 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1415 			      struct mlxsw_sp_bridge_port *bridge_port,
1416 			      u16 vid, bool is_untagged, bool is_pvid,
1417 			      struct netlink_ext_ack *extack)
1418 {
1419 	u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1420 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1421 	u16 old_pvid = mlxsw_sp_port->pvid;
1422 	u16 proto;
1423 	int err;
1424 
1425 	/* The only valid scenario in which a port-vlan already exists, is if
1426 	 * the VLAN flags were changed and the port-vlan is associated with the
1427 	 * correct bridge port
1428 	 */
1429 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1430 	if (mlxsw_sp_port_vlan &&
1431 	    mlxsw_sp_port_vlan->bridge_port != bridge_port)
1432 		return -EEXIST;
1433 
1434 	if (!mlxsw_sp_port_vlan) {
1435 		mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1436 							       vid);
1437 		if (IS_ERR(mlxsw_sp_port_vlan))
1438 			return PTR_ERR(mlxsw_sp_port_vlan);
1439 	}
1440 
1441 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1442 				     is_untagged);
1443 	if (err)
1444 		goto err_port_vlan_set;
1445 
1446 	br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
1447 	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
1448 	if (err)
1449 		goto err_port_pvid_set;
1450 
1451 	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
1452 					     extack);
1453 	if (err)
1454 		goto err_port_vlan_bridge_join;
1455 
1456 	return 0;
1457 
1458 err_port_vlan_bridge_join:
1459 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid, proto);
1460 err_port_pvid_set:
1461 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1462 err_port_vlan_set:
1463 	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1464 	return err;
1465 }
1466 
1467 static int
1468 mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1469 				const struct net_device *br_dev,
1470 				const struct switchdev_obj_port_vlan *vlan)
1471 {
1472 	u16 pvid;
1473 
1474 	pvid = mlxsw_sp_rif_vid(mlxsw_sp, br_dev);
1475 	if (!pvid)
1476 		return 0;
1477 
1478 	if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1479 		if (vlan->vid != pvid) {
1480 			netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
1481 			return -EBUSY;
1482 		}
1483 	} else {
1484 		if (vlan->vid == pvid) {
1485 			netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
1486 			return -EBUSY;
1487 		}
1488 	}
1489 
1490 	return 0;
1491 }
1492 
1493 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1494 				   const struct switchdev_obj_port_vlan *vlan,
1495 				   struct netlink_ext_ack *extack)
1496 {
1497 	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1498 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1499 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1500 	struct net_device *orig_dev = vlan->obj.orig_dev;
1501 	struct mlxsw_sp_bridge_port *bridge_port;
1502 
1503 	if (netif_is_bridge_master(orig_dev)) {
1504 		int err = 0;
1505 
1506 		if (br_vlan_enabled(orig_dev))
1507 			err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
1508 							      orig_dev, vlan);
1509 		if (!err)
1510 			err = -EOPNOTSUPP;
1511 		return err;
1512 	}
1513 
1514 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1515 	if (WARN_ON(!bridge_port))
1516 		return -EINVAL;
1517 
1518 	if (!bridge_port->bridge_device->vlan_enabled)
1519 		return 0;
1520 
1521 	return mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1522 					     vlan->vid, flag_untagged,
1523 					     flag_pvid, extack);
1524 }
1525 
1526 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1527 {
1528 	return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1529 			MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1530 }
1531 
1532 static int
1533 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1534 			       struct mlxsw_sp_bridge_port *bridge_port,
1535 			       u16 fid_index)
1536 {
1537 	bool lagged = bridge_port->lagged;
1538 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
1539 	u16 system_port;
1540 
1541 	system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1542 	mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1543 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1544 	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1545 
1546 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1547 }
1548 
1549 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1550 {
1551 	return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1552 			 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
1553 }
1554 
1555 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1556 {
1557 	return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1558 			MLXSW_REG_SFD_OP_WRITE_REMOVE;
1559 }
1560 
1561 static int
1562 mlxsw_sp_port_fdb_tun_uc_op4(struct mlxsw_sp *mlxsw_sp, bool dynamic,
1563 			     const char *mac, u16 fid, __be32 addr, bool adding)
1564 {
1565 	char *sfd_pl;
1566 	u8 num_rec;
1567 	u32 uip;
1568 	int err;
1569 
1570 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1571 	if (!sfd_pl)
1572 		return -ENOMEM;
1573 
1574 	uip = be32_to_cpu(addr);
1575 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1576 	mlxsw_reg_sfd_uc_tunnel_pack4(sfd_pl, 0,
1577 				      mlxsw_sp_sfd_rec_policy(dynamic), mac,
1578 				      fid, MLXSW_REG_SFD_REC_ACTION_NOP, uip);
1579 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1580 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1581 	if (err)
1582 		goto out;
1583 
1584 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1585 		err = -EBUSY;
1586 
1587 out:
1588 	kfree(sfd_pl);
1589 	return err;
1590 }
1591 
1592 static int mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(struct mlxsw_sp *mlxsw_sp,
1593 						  const char *mac, u16 fid,
1594 						  u32 kvdl_index, bool adding)
1595 {
1596 	char *sfd_pl;
1597 	u8 num_rec;
1598 	int err;
1599 
1600 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1601 	if (!sfd_pl)
1602 		return -ENOMEM;
1603 
1604 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1605 	mlxsw_reg_sfd_uc_tunnel_pack6(sfd_pl, 0, mac, fid,
1606 				      MLXSW_REG_SFD_REC_ACTION_NOP, kvdl_index);
1607 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1608 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1609 	if (err)
1610 		goto out;
1611 
1612 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1613 		err = -EBUSY;
1614 
1615 out:
1616 	kfree(sfd_pl);
1617 	return err;
1618 }
1619 
1620 static int mlxsw_sp_port_fdb_tun_uc_op6_add(struct mlxsw_sp *mlxsw_sp,
1621 					    const char *mac, u16 fid,
1622 					    const struct in6_addr *addr)
1623 {
1624 	u32 kvdl_index;
1625 	int err;
1626 
1627 	err = mlxsw_sp_nve_ipv6_addr_kvdl_set(mlxsw_sp, addr, &kvdl_index);
1628 	if (err)
1629 		return err;
1630 
1631 	err = mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid,
1632 						     kvdl_index, true);
1633 	if (err)
1634 		goto err_sfd_write;
1635 
1636 	err = mlxsw_sp_nve_ipv6_addr_map_replace(mlxsw_sp, mac, fid, addr);
1637 	if (err)
1638 		/* Replace can fail only for creating new mapping, so removing
1639 		 * the FDB entry in the error path is OK.
1640 		 */
1641 		goto err_addr_replace;
1642 
1643 	return 0;
1644 
1645 err_addr_replace:
1646 	mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, kvdl_index,
1647 					       false);
1648 err_sfd_write:
1649 	mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
1650 	return err;
1651 }
1652 
1653 static void mlxsw_sp_port_fdb_tun_uc_op6_del(struct mlxsw_sp *mlxsw_sp,
1654 					     const char *mac, u16 fid,
1655 					     const struct in6_addr *addr)
1656 {
1657 	mlxsw_sp_nve_ipv6_addr_map_del(mlxsw_sp, mac, fid);
1658 	mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, 0, false);
1659 	mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
1660 }
1661 
1662 static int
1663 mlxsw_sp_port_fdb_tun_uc_op6(struct mlxsw_sp *mlxsw_sp, const char *mac,
1664 			     u16 fid, const struct in6_addr *addr, bool adding)
1665 {
1666 	if (adding)
1667 		return mlxsw_sp_port_fdb_tun_uc_op6_add(mlxsw_sp, mac, fid,
1668 							addr);
1669 
1670 	mlxsw_sp_port_fdb_tun_uc_op6_del(mlxsw_sp, mac, fid, addr);
1671 	return 0;
1672 }
1673 
1674 static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
1675 					  const char *mac, u16 fid,
1676 					  enum mlxsw_sp_l3proto proto,
1677 					  const union mlxsw_sp_l3addr *addr,
1678 					  bool adding, bool dynamic)
1679 {
1680 	switch (proto) {
1681 	case MLXSW_SP_L3_PROTO_IPV4:
1682 		return mlxsw_sp_port_fdb_tun_uc_op4(mlxsw_sp, dynamic, mac, fid,
1683 						    addr->addr4, adding);
1684 	case MLXSW_SP_L3_PROTO_IPV6:
1685 		return mlxsw_sp_port_fdb_tun_uc_op6(mlxsw_sp, mac, fid,
1686 						    &addr->addr6, adding);
1687 	default:
1688 		WARN_ON(1);
1689 		return -EOPNOTSUPP;
1690 	}
1691 }
1692 
1693 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1694 				     const char *mac, u16 fid, bool adding,
1695 				     enum mlxsw_reg_sfd_rec_action action,
1696 				     enum mlxsw_reg_sfd_rec_policy policy)
1697 {
1698 	char *sfd_pl;
1699 	u8 num_rec;
1700 	int err;
1701 
1702 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1703 	if (!sfd_pl)
1704 		return -ENOMEM;
1705 
1706 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1707 	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
1708 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1709 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1710 	if (err)
1711 		goto out;
1712 
1713 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1714 		err = -EBUSY;
1715 
1716 out:
1717 	kfree(sfd_pl);
1718 	return err;
1719 }
1720 
1721 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1722 				   const char *mac, u16 fid, bool adding,
1723 				   bool dynamic)
1724 {
1725 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1726 					 MLXSW_REG_SFD_REC_ACTION_NOP,
1727 					 mlxsw_sp_sfd_rec_policy(dynamic));
1728 }
1729 
1730 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1731 			bool adding)
1732 {
1733 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1734 					 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1735 					 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
1736 }
1737 
1738 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1739 				       const char *mac, u16 fid, u16 lag_vid,
1740 				       bool adding, bool dynamic)
1741 {
1742 	char *sfd_pl;
1743 	u8 num_rec;
1744 	int err;
1745 
1746 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1747 	if (!sfd_pl)
1748 		return -ENOMEM;
1749 
1750 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1751 	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1752 				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1753 				  lag_vid, lag_id);
1754 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1755 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1756 	if (err)
1757 		goto out;
1758 
1759 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1760 		err = -EBUSY;
1761 
1762 out:
1763 	kfree(sfd_pl);
1764 	return err;
1765 }
1766 
1767 static int
1768 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1769 		      struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1770 {
1771 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1772 	struct net_device *orig_dev = fdb_info->info.dev;
1773 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1774 	struct mlxsw_sp_bridge_device *bridge_device;
1775 	struct mlxsw_sp_bridge_port *bridge_port;
1776 	u16 fid_index, vid;
1777 
1778 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1779 	if (!bridge_port)
1780 		return -EINVAL;
1781 
1782 	bridge_device = bridge_port->bridge_device;
1783 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1784 							       bridge_device,
1785 							       fdb_info->vid);
1786 	if (!mlxsw_sp_port_vlan)
1787 		return 0;
1788 
1789 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1790 	vid = mlxsw_sp_port_vlan->vid;
1791 
1792 	if (!bridge_port->lagged)
1793 		return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1794 					       bridge_port->system_port,
1795 					       fdb_info->addr, fid_index,
1796 					       adding, false);
1797 	else
1798 		return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1799 						   bridge_port->lag_id,
1800 						   fdb_info->addr, fid_index,
1801 						   vid, adding, false);
1802 }
1803 
1804 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1805 				u16 fid, u16 mid_idx, bool adding)
1806 {
1807 	char *sfd_pl;
1808 	u8 num_rec;
1809 	int err;
1810 
1811 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1812 	if (!sfd_pl)
1813 		return -ENOMEM;
1814 
1815 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1816 	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1817 			      MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
1818 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1819 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1820 	if (err)
1821 		goto out;
1822 
1823 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1824 		err = -EBUSY;
1825 
1826 out:
1827 	kfree(sfd_pl);
1828 	return err;
1829 }
1830 
1831 static int
1832 mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
1833 			      const struct mlxsw_sp_ports_bitmap *ports_bm,
1834 			      bool set_router_port)
1835 {
1836 	char *smid2_pl;
1837 	int err, i;
1838 
1839 	smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
1840 	if (!smid2_pl)
1841 		return -ENOMEM;
1842 
1843 	mlxsw_reg_smid2_pack(smid2_pl, mid_idx, 0, false, false, 0);
1844 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
1845 		if (mlxsw_sp->ports[i])
1846 			mlxsw_reg_smid2_port_mask_set(smid2_pl, i, 1);
1847 	}
1848 
1849 	mlxsw_reg_smid2_port_mask_set(smid2_pl,
1850 				      mlxsw_sp_router_port(mlxsw_sp), 1);
1851 
1852 	for_each_set_bit(i, ports_bm->bitmap, ports_bm->nbits)
1853 		mlxsw_reg_smid2_port_set(smid2_pl, i, 1);
1854 
1855 	mlxsw_reg_smid2_port_set(smid2_pl, mlxsw_sp_router_port(mlxsw_sp),
1856 				 set_router_port);
1857 
1858 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
1859 	kfree(smid2_pl);
1860 	return err;
1861 }
1862 
1863 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
1864 				  u16 mid_idx, bool add)
1865 {
1866 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1867 	char *smid2_pl;
1868 	int err;
1869 
1870 	smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
1871 	if (!smid2_pl)
1872 		return -ENOMEM;
1873 
1874 	mlxsw_reg_smid2_pack(smid2_pl, mid_idx, mlxsw_sp_port->local_port, add,
1875 			     false, 0);
1876 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
1877 	kfree(smid2_pl);
1878 	return err;
1879 }
1880 
1881 static struct mlxsw_sp_mdb_entry *
1882 __mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
1883 		  const unsigned char *addr, u16 fid)
1884 {
1885 	struct mlxsw_sp_mdb_entry *mdb_entry;
1886 
1887 	list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
1888 		if (ether_addr_equal(mdb_entry->key.addr, addr) &&
1889 		    mdb_entry->key.fid == fid)
1890 			return mdb_entry;
1891 	}
1892 	return NULL;
1893 }
1894 
1895 static void
1896 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1897 				      struct mlxsw_sp_bridge_port *bridge_port,
1898 				      struct mlxsw_sp_ports_bitmap *ports_bm)
1899 {
1900 	struct mlxsw_sp_port *mlxsw_sp_port;
1901 	u64 max_lag_members, i;
1902 	int lag_id;
1903 
1904 	if (!bridge_port->lagged) {
1905 		set_bit(bridge_port->system_port, ports_bm->bitmap);
1906 	} else {
1907 		max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1908 						     MAX_LAG_MEMBERS);
1909 		lag_id = bridge_port->lag_id;
1910 		for (i = 0; i < max_lag_members; i++) {
1911 			mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1912 								 lag_id, i);
1913 			if (mlxsw_sp_port)
1914 				set_bit(mlxsw_sp_port->local_port,
1915 					ports_bm->bitmap);
1916 		}
1917 	}
1918 }
1919 
1920 static void
1921 mlxsw_sp_mc_get_mrouters_bitmap(struct mlxsw_sp_ports_bitmap *flood_bm,
1922 				struct mlxsw_sp_bridge_device *bridge_device,
1923 				struct mlxsw_sp *mlxsw_sp)
1924 {
1925 	struct mlxsw_sp_bridge_port *bridge_port;
1926 
1927 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1928 		if (bridge_port->mrouter) {
1929 			mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1930 							      bridge_port,
1931 							      flood_bm);
1932 		}
1933 	}
1934 }
1935 
1936 static int
1937 mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1938 			    struct mlxsw_sp_mdb_entry *mdb_entry,
1939 			    struct mlxsw_sp_bridge_device *bridge_device)
1940 {
1941 	struct mlxsw_sp_ports_bitmap flood_bitmap;
1942 	u16 mid_idx;
1943 	int err;
1944 
1945 	mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
1946 				      MLXSW_SP_MID_MAX);
1947 	if (mid_idx == MLXSW_SP_MID_MAX)
1948 		return -ENOBUFS;
1949 
1950 	err = mlxsw_sp_port_bitmap_init(mlxsw_sp, &flood_bitmap);
1951 	if (err)
1952 		return err;
1953 
1954 	bitmap_copy(flood_bitmap.bitmap, mdb_entry->ports_in_mid,
1955 		    flood_bitmap.nbits);
1956 	mlxsw_sp_mc_get_mrouters_bitmap(&flood_bitmap, bridge_device, mlxsw_sp);
1957 
1958 	mdb_entry->mid = mid_idx;
1959 	err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, &flood_bitmap,
1960 					    bridge_device->mrouter);
1961 	mlxsw_sp_port_bitmap_fini(&flood_bitmap);
1962 	if (err)
1963 		return err;
1964 
1965 	err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb_entry->key.addr,
1966 				   mdb_entry->key.fid, mid_idx, true);
1967 	if (err)
1968 		return err;
1969 
1970 	set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
1971 	mdb_entry->in_hw = true;
1972 	return 0;
1973 }
1974 
1975 static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1976 					struct mlxsw_sp_mdb_entry *mdb_entry)
1977 {
1978 	if (!mdb_entry->in_hw)
1979 		return 0;
1980 
1981 	clear_bit(mdb_entry->mid, mlxsw_sp->bridge->mids_bitmap);
1982 	mdb_entry->in_hw = false;
1983 	return mlxsw_sp_port_mdb_op(mlxsw_sp, mdb_entry->key.addr,
1984 				    mdb_entry->key.fid, mdb_entry->mid, false);
1985 }
1986 
1987 static struct mlxsw_sp_mdb_entry *
1988 __mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1989 		    struct mlxsw_sp_bridge_device *bridge_device,
1990 		    const unsigned char *addr, u16 fid)
1991 {
1992 	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1993 	struct mlxsw_sp_mdb_entry *mdb_entry;
1994 	int err;
1995 
1996 	mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
1997 	if (!mdb_entry)
1998 		return NULL;
1999 
2000 	mdb_entry->ports_in_mid = bitmap_zalloc(max_ports, GFP_KERNEL);
2001 	if (!mdb_entry->ports_in_mid)
2002 		goto err_ports_in_mid_alloc;
2003 
2004 	ether_addr_copy(mdb_entry->key.addr, addr);
2005 	mdb_entry->key.fid = fid;
2006 	mdb_entry->in_hw = false;
2007 
2008 	if (!bridge_device->multicast_enabled)
2009 		goto out;
2010 
2011 	err = mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mdb_entry, bridge_device);
2012 	if (err)
2013 		goto err_write_mdb_entry;
2014 
2015 out:
2016 	list_add_tail(&mdb_entry->list, &bridge_device->mdb_list);
2017 	return mdb_entry;
2018 
2019 err_write_mdb_entry:
2020 	bitmap_free(mdb_entry->ports_in_mid);
2021 err_ports_in_mid_alloc:
2022 	kfree(mdb_entry);
2023 	return NULL;
2024 }
2025 
2026 static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
2027 					 struct mlxsw_sp_mdb_entry *mdb_entry)
2028 {
2029 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2030 	int err = 0;
2031 
2032 	clear_bit(mlxsw_sp_port->local_port, mdb_entry->ports_in_mid);
2033 	if (bitmap_empty(mdb_entry->ports_in_mid,
2034 			 mlxsw_core_max_ports(mlxsw_sp->core))) {
2035 		err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mdb_entry);
2036 		list_del(&mdb_entry->list);
2037 		bitmap_free(mdb_entry->ports_in_mid);
2038 		kfree(mdb_entry);
2039 	}
2040 	return err;
2041 }
2042 
2043 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
2044 				 const struct switchdev_obj_port_mdb *mdb)
2045 {
2046 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2047 	struct net_device *orig_dev = mdb->obj.orig_dev;
2048 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2049 	struct net_device *dev = mlxsw_sp_port->dev;
2050 	struct mlxsw_sp_bridge_device *bridge_device;
2051 	struct mlxsw_sp_bridge_port *bridge_port;
2052 	struct mlxsw_sp_mdb_entry *mdb_entry;
2053 	u16 fid_index;
2054 	int err = 0;
2055 
2056 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
2057 	if (!bridge_port)
2058 		return 0;
2059 
2060 	bridge_device = bridge_port->bridge_device;
2061 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
2062 							       bridge_device,
2063 							       mdb->vid);
2064 	if (!mlxsw_sp_port_vlan)
2065 		return 0;
2066 
2067 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
2068 
2069 	mdb_entry = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
2070 	if (!mdb_entry) {
2071 		mdb_entry = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device,
2072 						mdb->addr, fid_index);
2073 		if (!mdb_entry) {
2074 			netdev_err(dev, "Unable to allocate MC group\n");
2075 			return -ENOMEM;
2076 		}
2077 	}
2078 	set_bit(mlxsw_sp_port->local_port, mdb_entry->ports_in_mid);
2079 
2080 	if (!bridge_device->multicast_enabled)
2081 		return 0;
2082 
2083 	if (bridge_port->mrouter)
2084 		return 0;
2085 
2086 	err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mdb_entry->mid, true);
2087 	if (err) {
2088 		netdev_err(dev, "Unable to set SMID\n");
2089 		goto err_out;
2090 	}
2091 
2092 	return 0;
2093 
2094 err_out:
2095 	mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mdb_entry);
2096 	return err;
2097 }
2098 
2099 static int
2100 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
2101 				   struct mlxsw_sp_bridge_device *bridge_device,
2102 				   bool mc_enabled)
2103 {
2104 	struct mlxsw_sp_mdb_entry *mdb_entry;
2105 	int err;
2106 
2107 	list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
2108 		if (mc_enabled)
2109 			err = mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mdb_entry,
2110 							  bridge_device);
2111 		else
2112 			err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mdb_entry);
2113 
2114 		if (err)
2115 			goto err_mdb_entry_update;
2116 	}
2117 
2118 	return 0;
2119 
2120 err_mdb_entry_update:
2121 	list_for_each_entry_continue_reverse(mdb_entry,
2122 					     &bridge_device->mdb_list, list) {
2123 		if (mc_enabled)
2124 			mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mdb_entry);
2125 		else
2126 			mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mdb_entry,
2127 						    bridge_device);
2128 	}
2129 	return err;
2130 }
2131 
2132 static void
2133 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
2134 				 struct mlxsw_sp_bridge_port *bridge_port,
2135 				 bool add)
2136 {
2137 	struct mlxsw_sp_bridge_device *bridge_device;
2138 	struct mlxsw_sp_mdb_entry *mdb_entry;
2139 
2140 	bridge_device = bridge_port->bridge_device;
2141 
2142 	list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
2143 		if (!test_bit(mlxsw_sp_port->local_port,
2144 			      mdb_entry->ports_in_mid))
2145 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mdb_entry->mid,
2146 					       add);
2147 	}
2148 }
2149 
2150 static int mlxsw_sp_port_obj_add(struct net_device *dev, const void *ctx,
2151 				 const struct switchdev_obj *obj,
2152 				 struct netlink_ext_ack *extack)
2153 {
2154 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2155 	const struct switchdev_obj_port_vlan *vlan;
2156 	int err = 0;
2157 
2158 	switch (obj->id) {
2159 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
2160 		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
2161 
2162 		err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, extack);
2163 
2164 		/* The event is emitted before the changes are actually
2165 		 * applied to the bridge. Therefore schedule the respin
2166 		 * call for later, so that the respin logic sees the
2167 		 * updated bridge state.
2168 		 */
2169 		mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2170 		break;
2171 	case SWITCHDEV_OBJ_ID_PORT_MDB:
2172 		err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
2173 					    SWITCHDEV_OBJ_PORT_MDB(obj));
2174 		break;
2175 	default:
2176 		err = -EOPNOTSUPP;
2177 		break;
2178 	}
2179 
2180 	return err;
2181 }
2182 
2183 static void
2184 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
2185 			      struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
2186 {
2187 	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
2188 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2189 	u16 proto;
2190 
2191 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2192 	if (WARN_ON(!mlxsw_sp_port_vlan))
2193 		return;
2194 
2195 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2196 	br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
2197 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
2198 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
2199 	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
2200 }
2201 
2202 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
2203 				   const struct switchdev_obj_port_vlan *vlan)
2204 {
2205 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2206 	struct net_device *orig_dev = vlan->obj.orig_dev;
2207 	struct mlxsw_sp_bridge_port *bridge_port;
2208 
2209 	if (netif_is_bridge_master(orig_dev))
2210 		return -EOPNOTSUPP;
2211 
2212 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
2213 	if (WARN_ON(!bridge_port))
2214 		return -EINVAL;
2215 
2216 	if (!bridge_port->bridge_device->vlan_enabled)
2217 		return 0;
2218 
2219 	mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vlan->vid);
2220 
2221 	return 0;
2222 }
2223 
2224 static int
2225 __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
2226 			struct mlxsw_sp_bridge_port *bridge_port,
2227 			struct mlxsw_sp_mdb_entry *mdb_entry)
2228 {
2229 	struct net_device *dev = mlxsw_sp_port->dev;
2230 	int err;
2231 
2232 	if (bridge_port->bridge_device->multicast_enabled &&
2233 	    !bridge_port->mrouter) {
2234 		err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mdb_entry->mid,
2235 					     false);
2236 		if (err)
2237 			netdev_err(dev, "Unable to remove port from SMID\n");
2238 	}
2239 
2240 	err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mdb_entry);
2241 	if (err)
2242 		netdev_err(dev, "Unable to remove MC SFD\n");
2243 
2244 	return err;
2245 }
2246 
2247 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
2248 				 const struct switchdev_obj_port_mdb *mdb)
2249 {
2250 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2251 	struct net_device *orig_dev = mdb->obj.orig_dev;
2252 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2253 	struct mlxsw_sp_bridge_device *bridge_device;
2254 	struct net_device *dev = mlxsw_sp_port->dev;
2255 	struct mlxsw_sp_bridge_port *bridge_port;
2256 	struct mlxsw_sp_mdb_entry *mdb_entry;
2257 	u16 fid_index;
2258 
2259 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
2260 	if (!bridge_port)
2261 		return 0;
2262 
2263 	bridge_device = bridge_port->bridge_device;
2264 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
2265 							       bridge_device,
2266 							       mdb->vid);
2267 	if (!mlxsw_sp_port_vlan)
2268 		return 0;
2269 
2270 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
2271 
2272 	mdb_entry = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
2273 	if (!mdb_entry) {
2274 		netdev_err(dev, "Unable to remove port from MC DB\n");
2275 		return -EINVAL;
2276 	}
2277 
2278 	return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mdb_entry);
2279 }
2280 
2281 static void
2282 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
2283 			       struct mlxsw_sp_bridge_port *bridge_port)
2284 {
2285 	struct mlxsw_sp_bridge_device *bridge_device;
2286 	struct mlxsw_sp_mdb_entry *mdb_entry, *tmp;
2287 	u16 local_port = mlxsw_sp_port->local_port;
2288 
2289 	bridge_device = bridge_port->bridge_device;
2290 
2291 	list_for_each_entry_safe(mdb_entry, tmp, &bridge_device->mdb_list,
2292 				 list) {
2293 		if (test_bit(local_port, mdb_entry->ports_in_mid)) {
2294 			__mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
2295 						mdb_entry);
2296 		} else if (bridge_device->multicast_enabled &&
2297 			   bridge_port->mrouter) {
2298 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mdb_entry->mid,
2299 					       false);
2300 		}
2301 	}
2302 }
2303 
2304 static int mlxsw_sp_port_obj_del(struct net_device *dev, const void *ctx,
2305 				 const struct switchdev_obj *obj)
2306 {
2307 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2308 	int err = 0;
2309 
2310 	switch (obj->id) {
2311 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
2312 		err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
2313 					      SWITCHDEV_OBJ_PORT_VLAN(obj));
2314 		break;
2315 	case SWITCHDEV_OBJ_ID_PORT_MDB:
2316 		err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
2317 					    SWITCHDEV_OBJ_PORT_MDB(obj));
2318 		break;
2319 	default:
2320 		err = -EOPNOTSUPP;
2321 		break;
2322 	}
2323 
2324 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2325 
2326 	return err;
2327 }
2328 
2329 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
2330 						   u16 lag_id)
2331 {
2332 	struct mlxsw_sp_port *mlxsw_sp_port;
2333 	u64 max_lag_members;
2334 	int i;
2335 
2336 	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
2337 					     MAX_LAG_MEMBERS);
2338 	for (i = 0; i < max_lag_members; i++) {
2339 		mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
2340 		if (mlxsw_sp_port)
2341 			return mlxsw_sp_port;
2342 	}
2343 	return NULL;
2344 }
2345 
2346 static int
2347 mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port *bridge_port,
2348 				     struct mlxsw_sp_port *mlxsw_sp_port,
2349 				     struct netlink_ext_ack *extack)
2350 {
2351 	if (is_vlan_dev(bridge_port->dev)) {
2352 		NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
2353 		return -EINVAL;
2354 	}
2355 
2356 	/* Port is no longer usable as a router interface */
2357 	if (mlxsw_sp_port->default_vlan->fid)
2358 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
2359 
2360 	return 0;
2361 }
2362 
2363 static int
2364 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2365 				struct mlxsw_sp_bridge_port *bridge_port,
2366 				struct mlxsw_sp_port *mlxsw_sp_port,
2367 				struct netlink_ext_ack *extack)
2368 {
2369 	return mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
2370 						    extack);
2371 }
2372 
2373 static void
2374 mlxsw_sp_bridge_vlan_aware_port_leave(struct mlxsw_sp_port *mlxsw_sp_port)
2375 {
2376 	/* Make sure untagged frames are allowed to ingress */
2377 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
2378 			       ETH_P_8021Q);
2379 }
2380 
2381 static void
2382 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2383 				 struct mlxsw_sp_bridge_port *bridge_port,
2384 				 struct mlxsw_sp_port *mlxsw_sp_port)
2385 {
2386 	mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
2387 }
2388 
2389 static int
2390 mlxsw_sp_bridge_vlan_aware_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2391 				      const struct net_device *vxlan_dev,
2392 				      u16 vid, u16 ethertype,
2393 				      struct netlink_ext_ack *extack)
2394 {
2395 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2396 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2397 	struct mlxsw_sp_nve_params params = {
2398 		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2399 		.vni = vxlan->cfg.vni,
2400 		.dev = vxlan_dev,
2401 		.ethertype = ethertype,
2402 	};
2403 	struct mlxsw_sp_fid *fid;
2404 	int err;
2405 
2406 	/* If the VLAN is 0, we need to find the VLAN that is configured as
2407 	 * PVID and egress untagged on the bridge port of the VxLAN device.
2408 	 * It is possible no such VLAN exists
2409 	 */
2410 	if (!vid) {
2411 		err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid);
2412 		if (err || !vid)
2413 			return err;
2414 	}
2415 
2416 	fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2417 	if (IS_ERR(fid)) {
2418 		NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1Q FID");
2419 		return PTR_ERR(fid);
2420 	}
2421 
2422 	if (mlxsw_sp_fid_vni_is_set(fid)) {
2423 		NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2424 		err = -EINVAL;
2425 		goto err_vni_exists;
2426 	}
2427 
2428 	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2429 	if (err)
2430 		goto err_nve_fid_enable;
2431 
2432 	return 0;
2433 
2434 err_nve_fid_enable:
2435 err_vni_exists:
2436 	mlxsw_sp_fid_put(fid);
2437 	return err;
2438 }
2439 
2440 static int
2441 mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2442 				 const struct net_device *vxlan_dev, u16 vid,
2443 				 struct netlink_ext_ack *extack)
2444 {
2445 	return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
2446 						     vid, ETH_P_8021Q, extack);
2447 }
2448 
2449 static struct net_device *
2450 mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid)
2451 {
2452 	struct net_device *dev;
2453 	struct list_head *iter;
2454 
2455 	netdev_for_each_lower_dev(br_dev, dev, iter) {
2456 		u16 pvid;
2457 		int err;
2458 
2459 		if (!netif_is_vxlan(dev))
2460 			continue;
2461 
2462 		err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
2463 		if (err || pvid != vid)
2464 			continue;
2465 
2466 		return dev;
2467 	}
2468 
2469 	return NULL;
2470 }
2471 
2472 static struct mlxsw_sp_fid *
2473 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2474 			      u16 vid, struct netlink_ext_ack *extack)
2475 {
2476 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2477 
2478 	return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2479 }
2480 
2481 static struct mlxsw_sp_fid *
2482 mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2483 				 u16 vid)
2484 {
2485 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2486 
2487 	return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
2488 }
2489 
2490 static u16
2491 mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2492 			      const struct mlxsw_sp_fid *fid)
2493 {
2494 	return mlxsw_sp_fid_8021q_vid(fid);
2495 }
2496 
2497 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
2498 	.port_join	= mlxsw_sp_bridge_8021q_port_join,
2499 	.port_leave	= mlxsw_sp_bridge_8021q_port_leave,
2500 	.vxlan_join	= mlxsw_sp_bridge_8021q_vxlan_join,
2501 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2502 	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2503 	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2504 };
2505 
2506 static bool
2507 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
2508 			   const struct net_device *br_dev)
2509 {
2510 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2511 
2512 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
2513 			    list) {
2514 		if (mlxsw_sp_port_vlan->bridge_port &&
2515 		    mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
2516 		    br_dev)
2517 			return true;
2518 	}
2519 
2520 	return false;
2521 }
2522 
2523 static int
2524 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2525 				struct mlxsw_sp_bridge_port *bridge_port,
2526 				struct mlxsw_sp_port *mlxsw_sp_port,
2527 				struct netlink_ext_ack *extack)
2528 {
2529 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2530 	struct net_device *dev = bridge_port->dev;
2531 	u16 vid;
2532 
2533 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2534 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2535 	if (WARN_ON(!mlxsw_sp_port_vlan))
2536 		return -EINVAL;
2537 
2538 	if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
2539 		NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
2540 		return -EINVAL;
2541 	}
2542 
2543 	/* Port is no longer usable as a router interface */
2544 	if (mlxsw_sp_port_vlan->fid)
2545 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
2546 
2547 	return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
2548 					      extack);
2549 }
2550 
2551 static void
2552 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2553 				 struct mlxsw_sp_bridge_port *bridge_port,
2554 				 struct mlxsw_sp_port *mlxsw_sp_port)
2555 {
2556 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2557 	struct net_device *dev = bridge_port->dev;
2558 	u16 vid;
2559 
2560 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2561 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2562 	if (!mlxsw_sp_port_vlan || !mlxsw_sp_port_vlan->bridge_port)
2563 		return;
2564 
2565 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2566 }
2567 
2568 static int
2569 mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2570 				 const struct net_device *vxlan_dev, u16 vid,
2571 				 struct netlink_ext_ack *extack)
2572 {
2573 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2574 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2575 	struct mlxsw_sp_nve_params params = {
2576 		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2577 		.vni = vxlan->cfg.vni,
2578 		.dev = vxlan_dev,
2579 		.ethertype = ETH_P_8021Q,
2580 	};
2581 	struct mlxsw_sp_fid *fid;
2582 	int err;
2583 
2584 	fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2585 	if (IS_ERR(fid)) {
2586 		NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1D FID");
2587 		return -EINVAL;
2588 	}
2589 
2590 	if (mlxsw_sp_fid_vni_is_set(fid)) {
2591 		NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2592 		err = -EINVAL;
2593 		goto err_vni_exists;
2594 	}
2595 
2596 	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2597 	if (err)
2598 		goto err_nve_fid_enable;
2599 
2600 	return 0;
2601 
2602 err_nve_fid_enable:
2603 err_vni_exists:
2604 	mlxsw_sp_fid_put(fid);
2605 	return err;
2606 }
2607 
2608 static struct mlxsw_sp_fid *
2609 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2610 			      u16 vid, struct netlink_ext_ack *extack)
2611 {
2612 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2613 
2614 	return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2615 }
2616 
2617 static struct mlxsw_sp_fid *
2618 mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2619 				 u16 vid)
2620 {
2621 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2622 
2623 	/* The only valid VLAN for a VLAN-unaware bridge is 0 */
2624 	if (vid)
2625 		return NULL;
2626 
2627 	return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2628 }
2629 
2630 static u16
2631 mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2632 			      const struct mlxsw_sp_fid *fid)
2633 {
2634 	return 0;
2635 }
2636 
2637 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2638 	.port_join	= mlxsw_sp_bridge_8021d_port_join,
2639 	.port_leave	= mlxsw_sp_bridge_8021d_port_leave,
2640 	.vxlan_join	= mlxsw_sp_bridge_8021d_vxlan_join,
2641 	.fid_get	= mlxsw_sp_bridge_8021d_fid_get,
2642 	.fid_lookup	= mlxsw_sp_bridge_8021d_fid_lookup,
2643 	.fid_vid	= mlxsw_sp_bridge_8021d_fid_vid,
2644 };
2645 
2646 static int
2647 mlxsw_sp_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2648 				 struct mlxsw_sp_bridge_port *bridge_port,
2649 				 struct mlxsw_sp_port *mlxsw_sp_port,
2650 				 struct netlink_ext_ack *extack)
2651 {
2652 	int err;
2653 
2654 	err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, false);
2655 	if (err)
2656 		return err;
2657 
2658 	err = mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
2659 						   extack);
2660 	if (err)
2661 		goto err_bridge_vlan_aware_port_join;
2662 
2663 	return 0;
2664 
2665 err_bridge_vlan_aware_port_join:
2666 	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
2667 	return err;
2668 }
2669 
2670 static void
2671 mlxsw_sp_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2672 				  struct mlxsw_sp_bridge_port *bridge_port,
2673 				  struct mlxsw_sp_port *mlxsw_sp_port)
2674 {
2675 	mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
2676 	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
2677 }
2678 
2679 static int
2680 mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2681 				  const struct net_device *vxlan_dev, u16 vid,
2682 				  struct netlink_ext_ack *extack)
2683 {
2684 	return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
2685 						     vid, ETH_P_8021AD, extack);
2686 }
2687 
2688 static const struct mlxsw_sp_bridge_ops mlxsw_sp1_bridge_8021ad_ops = {
2689 	.port_join	= mlxsw_sp_bridge_8021ad_port_join,
2690 	.port_leave	= mlxsw_sp_bridge_8021ad_port_leave,
2691 	.vxlan_join	= mlxsw_sp_bridge_8021ad_vxlan_join,
2692 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2693 	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2694 	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2695 };
2696 
2697 static int
2698 mlxsw_sp2_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2699 				  struct mlxsw_sp_bridge_port *bridge_port,
2700 				  struct mlxsw_sp_port *mlxsw_sp_port,
2701 				  struct netlink_ext_ack *extack)
2702 {
2703 	int err;
2704 
2705 	/* The EtherType of decapsulated packets is determined at the egress
2706 	 * port to allow 802.1d and 802.1ad bridges with VXLAN devices to
2707 	 * co-exist.
2708 	 */
2709 	err = mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021AD);
2710 	if (err)
2711 		return err;
2712 
2713 	err = mlxsw_sp_bridge_8021ad_port_join(bridge_device, bridge_port,
2714 					       mlxsw_sp_port, extack);
2715 	if (err)
2716 		goto err_bridge_8021ad_port_join;
2717 
2718 	return 0;
2719 
2720 err_bridge_8021ad_port_join:
2721 	mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q);
2722 	return err;
2723 }
2724 
2725 static void
2726 mlxsw_sp2_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2727 				   struct mlxsw_sp_bridge_port *bridge_port,
2728 				   struct mlxsw_sp_port *mlxsw_sp_port)
2729 {
2730 	mlxsw_sp_bridge_8021ad_port_leave(bridge_device, bridge_port,
2731 					  mlxsw_sp_port);
2732 	mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q);
2733 }
2734 
2735 static const struct mlxsw_sp_bridge_ops mlxsw_sp2_bridge_8021ad_ops = {
2736 	.port_join	= mlxsw_sp2_bridge_8021ad_port_join,
2737 	.port_leave	= mlxsw_sp2_bridge_8021ad_port_leave,
2738 	.vxlan_join	= mlxsw_sp_bridge_8021ad_vxlan_join,
2739 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2740 	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2741 	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2742 };
2743 
2744 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2745 			      struct net_device *brport_dev,
2746 			      struct net_device *br_dev,
2747 			      struct netlink_ext_ack *extack)
2748 {
2749 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2750 	struct mlxsw_sp_bridge_device *bridge_device;
2751 	struct mlxsw_sp_bridge_port *bridge_port;
2752 	int err;
2753 
2754 	bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev,
2755 					       extack);
2756 	if (IS_ERR(bridge_port))
2757 		return PTR_ERR(bridge_port);
2758 	bridge_device = bridge_port->bridge_device;
2759 
2760 	err = bridge_device->ops->port_join(bridge_device, bridge_port,
2761 					    mlxsw_sp_port, extack);
2762 	if (err)
2763 		goto err_port_join;
2764 
2765 	return 0;
2766 
2767 err_port_join:
2768 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2769 	return err;
2770 }
2771 
2772 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2773 				struct net_device *brport_dev,
2774 				struct net_device *br_dev)
2775 {
2776 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2777 	struct mlxsw_sp_bridge_device *bridge_device;
2778 	struct mlxsw_sp_bridge_port *bridge_port;
2779 
2780 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2781 	if (!bridge_device)
2782 		return;
2783 	bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2784 	if (!bridge_port)
2785 		return;
2786 
2787 	bridge_device->ops->port_leave(bridge_device, bridge_port,
2788 				       mlxsw_sp_port);
2789 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2790 }
2791 
2792 int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
2793 			       const struct net_device *br_dev,
2794 			       const struct net_device *vxlan_dev, u16 vid,
2795 			       struct netlink_ext_ack *extack)
2796 {
2797 	struct mlxsw_sp_bridge_device *bridge_device;
2798 
2799 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2800 	if (WARN_ON(!bridge_device))
2801 		return -EINVAL;
2802 
2803 	return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
2804 					      extack);
2805 }
2806 
2807 void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
2808 				 const struct net_device *vxlan_dev)
2809 {
2810 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2811 	struct mlxsw_sp_fid *fid;
2812 
2813 	/* If the VxLAN device is down, then the FID does not have a VNI */
2814 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni);
2815 	if (!fid)
2816 		return;
2817 
2818 	mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
2819 	/* Drop both the reference we just took during lookup and the reference
2820 	 * the VXLAN device took.
2821 	 */
2822 	mlxsw_sp_fid_put(fid);
2823 	mlxsw_sp_fid_put(fid);
2824 }
2825 
2826 static void
2827 mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
2828 				      enum mlxsw_sp_l3proto *proto,
2829 				      union mlxsw_sp_l3addr *addr)
2830 {
2831 	if (vxlan_addr->sa.sa_family == AF_INET) {
2832 		addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
2833 		*proto = MLXSW_SP_L3_PROTO_IPV4;
2834 	} else {
2835 		addr->addr6 = vxlan_addr->sin6.sin6_addr;
2836 		*proto = MLXSW_SP_L3_PROTO_IPV6;
2837 	}
2838 }
2839 
2840 static void
2841 mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
2842 				      const union mlxsw_sp_l3addr *addr,
2843 				      union vxlan_addr *vxlan_addr)
2844 {
2845 	switch (proto) {
2846 	case MLXSW_SP_L3_PROTO_IPV4:
2847 		vxlan_addr->sa.sa_family = AF_INET;
2848 		vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
2849 		break;
2850 	case MLXSW_SP_L3_PROTO_IPV6:
2851 		vxlan_addr->sa.sa_family = AF_INET6;
2852 		vxlan_addr->sin6.sin6_addr = addr->addr6;
2853 		break;
2854 	}
2855 }
2856 
2857 static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
2858 					      const char *mac,
2859 					      enum mlxsw_sp_l3proto proto,
2860 					      union mlxsw_sp_l3addr *addr,
2861 					      __be32 vni, bool adding)
2862 {
2863 	struct switchdev_notifier_vxlan_fdb_info info;
2864 	struct vxlan_dev *vxlan = netdev_priv(dev);
2865 	enum switchdev_notifier_type type;
2866 
2867 	type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
2868 			SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
2869 	mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
2870 	info.remote_port = vxlan->cfg.dst_port;
2871 	info.remote_vni = vni;
2872 	info.remote_ifindex = 0;
2873 	ether_addr_copy(info.eth_addr, mac);
2874 	info.vni = vni;
2875 	info.offloaded = adding;
2876 	call_switchdev_notifiers(type, dev, &info.info, NULL);
2877 }
2878 
2879 static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
2880 					    const char *mac,
2881 					    enum mlxsw_sp_l3proto proto,
2882 					    union mlxsw_sp_l3addr *addr,
2883 					    __be32 vni,
2884 					    bool adding)
2885 {
2886 	if (netif_is_vxlan(dev))
2887 		mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
2888 						  adding);
2889 }
2890 
2891 static void
2892 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
2893 			    const char *mac, u16 vid,
2894 			    struct net_device *dev, bool offloaded)
2895 {
2896 	struct switchdev_notifier_fdb_info info = {};
2897 
2898 	info.addr = mac;
2899 	info.vid = vid;
2900 	info.offloaded = offloaded;
2901 	call_switchdev_notifiers(type, dev, &info.info, NULL);
2902 }
2903 
2904 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
2905 					    char *sfn_pl, int rec_index,
2906 					    bool adding)
2907 {
2908 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2909 	struct mlxsw_sp_bridge_device *bridge_device;
2910 	struct mlxsw_sp_bridge_port *bridge_port;
2911 	struct mlxsw_sp_port *mlxsw_sp_port;
2912 	enum switchdev_notifier_type type;
2913 	char mac[ETH_ALEN];
2914 	u16 local_port;
2915 	u16 vid, fid;
2916 	bool do_notification = true;
2917 	int err;
2918 
2919 	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
2920 
2921 	if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2922 		return;
2923 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2924 	if (!mlxsw_sp_port) {
2925 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
2926 		goto just_remove;
2927 	}
2928 
2929 	if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
2930 		goto just_remove;
2931 
2932 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2933 	if (!mlxsw_sp_port_vlan) {
2934 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2935 		goto just_remove;
2936 	}
2937 
2938 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2939 	if (!bridge_port) {
2940 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2941 		goto just_remove;
2942 	}
2943 
2944 	bridge_device = bridge_port->bridge_device;
2945 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2946 
2947 do_fdb_op:
2948 	err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
2949 				      adding, true);
2950 	if (err) {
2951 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2952 		return;
2953 	}
2954 
2955 	if (!do_notification)
2956 		return;
2957 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2958 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2959 
2960 	return;
2961 
2962 just_remove:
2963 	adding = false;
2964 	do_notification = false;
2965 	goto do_fdb_op;
2966 }
2967 
2968 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
2969 						char *sfn_pl, int rec_index,
2970 						bool adding)
2971 {
2972 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2973 	struct mlxsw_sp_bridge_device *bridge_device;
2974 	struct mlxsw_sp_bridge_port *bridge_port;
2975 	struct mlxsw_sp_port *mlxsw_sp_port;
2976 	enum switchdev_notifier_type type;
2977 	char mac[ETH_ALEN];
2978 	u16 lag_vid = 0;
2979 	u16 lag_id;
2980 	u16 vid, fid;
2981 	bool do_notification = true;
2982 	int err;
2983 
2984 	mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
2985 	mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
2986 	if (!mlxsw_sp_port) {
2987 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
2988 		goto just_remove;
2989 	}
2990 
2991 	if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
2992 		goto just_remove;
2993 
2994 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2995 	if (!mlxsw_sp_port_vlan) {
2996 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2997 		goto just_remove;
2998 	}
2999 
3000 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
3001 	if (!bridge_port) {
3002 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
3003 		goto just_remove;
3004 	}
3005 
3006 	bridge_device = bridge_port->bridge_device;
3007 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
3008 	lag_vid = mlxsw_sp_port_vlan->vid;
3009 
3010 do_fdb_op:
3011 	err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
3012 					  adding, true);
3013 	if (err) {
3014 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
3015 		return;
3016 	}
3017 
3018 	if (!do_notification)
3019 		return;
3020 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
3021 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
3022 
3023 	return;
3024 
3025 just_remove:
3026 	adding = false;
3027 	do_notification = false;
3028 	goto do_fdb_op;
3029 }
3030 
3031 static int
3032 __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
3033 					    const struct mlxsw_sp_fid *fid,
3034 					    bool adding,
3035 					    struct net_device **nve_dev,
3036 					    u16 *p_vid, __be32 *p_vni)
3037 {
3038 	struct mlxsw_sp_bridge_device *bridge_device;
3039 	struct net_device *br_dev, *dev;
3040 	int nve_ifindex;
3041 	int err;
3042 
3043 	err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
3044 	if (err)
3045 		return err;
3046 
3047 	err = mlxsw_sp_fid_vni(fid, p_vni);
3048 	if (err)
3049 		return err;
3050 
3051 	dev = __dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
3052 	if (!dev)
3053 		return -EINVAL;
3054 	*nve_dev = dev;
3055 
3056 	if (!netif_running(dev))
3057 		return -EINVAL;
3058 
3059 	if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
3060 		return -EINVAL;
3061 
3062 	if (adding && netif_is_vxlan(dev)) {
3063 		struct vxlan_dev *vxlan = netdev_priv(dev);
3064 
3065 		if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
3066 			return -EINVAL;
3067 	}
3068 
3069 	br_dev = netdev_master_upper_dev_get(dev);
3070 	if (!br_dev)
3071 		return -EINVAL;
3072 
3073 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3074 	if (!bridge_device)
3075 		return -EINVAL;
3076 
3077 	*p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
3078 
3079 	return 0;
3080 }
3081 
3082 static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
3083 						      char *sfn_pl,
3084 						      int rec_index,
3085 						      bool adding)
3086 {
3087 	enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
3088 	enum switchdev_notifier_type type;
3089 	struct net_device *nve_dev;
3090 	union mlxsw_sp_l3addr addr;
3091 	struct mlxsw_sp_fid *fid;
3092 	char mac[ETH_ALEN];
3093 	u16 fid_index, vid;
3094 	__be32 vni;
3095 	u32 uip;
3096 	int err;
3097 
3098 	mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
3099 				       &uip, &sfn_proto);
3100 
3101 	fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
3102 	if (!fid)
3103 		goto err_fid_lookup;
3104 
3105 	err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
3106 					      (enum mlxsw_sp_l3proto) sfn_proto,
3107 					      &addr);
3108 	if (err)
3109 		goto err_ip_resolve;
3110 
3111 	err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
3112 							  &nve_dev, &vid, &vni);
3113 	if (err)
3114 		goto err_fdb_process;
3115 
3116 	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
3117 					     (enum mlxsw_sp_l3proto) sfn_proto,
3118 					     &addr, adding, true);
3119 	if (err)
3120 		goto err_fdb_op;
3121 
3122 	mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
3123 					(enum mlxsw_sp_l3proto) sfn_proto,
3124 					&addr, vni, adding);
3125 
3126 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
3127 			SWITCHDEV_FDB_DEL_TO_BRIDGE;
3128 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding);
3129 
3130 	mlxsw_sp_fid_put(fid);
3131 
3132 	return;
3133 
3134 err_fdb_op:
3135 err_fdb_process:
3136 err_ip_resolve:
3137 	mlxsw_sp_fid_put(fid);
3138 err_fid_lookup:
3139 	/* Remove an FDB entry in case we cannot process it. Otherwise the
3140 	 * device will keep sending the same notification over and over again.
3141 	 */
3142 	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
3143 				       (enum mlxsw_sp_l3proto) sfn_proto, &addr,
3144 				       false, true);
3145 }
3146 
3147 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
3148 					    char *sfn_pl, int rec_index)
3149 {
3150 	switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
3151 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
3152 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
3153 						rec_index, true);
3154 		break;
3155 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
3156 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
3157 						rec_index, false);
3158 		break;
3159 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
3160 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
3161 						    rec_index, true);
3162 		break;
3163 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
3164 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
3165 						    rec_index, false);
3166 		break;
3167 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
3168 		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
3169 							  rec_index, true);
3170 		break;
3171 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
3172 		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
3173 							  rec_index, false);
3174 		break;
3175 	}
3176 }
3177 
3178 #define MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION 10
3179 
3180 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
3181 {
3182 	struct mlxsw_sp_bridge *bridge;
3183 	struct mlxsw_sp *mlxsw_sp;
3184 	bool reschedule = false;
3185 	char *sfn_pl;
3186 	int queries;
3187 	u8 num_rec;
3188 	int i;
3189 	int err;
3190 
3191 	sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
3192 	if (!sfn_pl)
3193 		return;
3194 
3195 	bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
3196 	mlxsw_sp = bridge->mlxsw_sp;
3197 
3198 	rtnl_lock();
3199 	if (list_empty(&bridge->bridges_list))
3200 		goto out;
3201 	reschedule = true;
3202 	queries = MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION;
3203 	while (queries > 0) {
3204 		mlxsw_reg_sfn_pack(sfn_pl);
3205 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
3206 		if (err) {
3207 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
3208 			goto out;
3209 		}
3210 		num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
3211 		for (i = 0; i < num_rec; i++)
3212 			mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
3213 		if (num_rec != MLXSW_REG_SFN_REC_MAX_COUNT)
3214 			goto out;
3215 		queries--;
3216 	}
3217 
3218 out:
3219 	rtnl_unlock();
3220 	kfree(sfn_pl);
3221 	if (!reschedule)
3222 		return;
3223 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, !queries);
3224 }
3225 
3226 struct mlxsw_sp_switchdev_event_work {
3227 	struct work_struct work;
3228 	union {
3229 		struct switchdev_notifier_fdb_info fdb_info;
3230 		struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
3231 	};
3232 	struct net_device *dev;
3233 	unsigned long event;
3234 };
3235 
3236 static void
3237 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
3238 					  struct mlxsw_sp_switchdev_event_work *
3239 					  switchdev_work,
3240 					  struct mlxsw_sp_fid *fid, __be32 vni)
3241 {
3242 	struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
3243 	struct switchdev_notifier_fdb_info *fdb_info;
3244 	struct net_device *dev = switchdev_work->dev;
3245 	enum mlxsw_sp_l3proto proto;
3246 	union mlxsw_sp_l3addr addr;
3247 	int err;
3248 
3249 	fdb_info = &switchdev_work->fdb_info;
3250 	err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
3251 	if (err)
3252 		return;
3253 
3254 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
3255 					      &proto, &addr);
3256 
3257 	switch (switchdev_work->event) {
3258 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3259 		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
3260 						     vxlan_fdb_info.eth_addr,
3261 						     mlxsw_sp_fid_index(fid),
3262 						     proto, &addr, true, false);
3263 		if (err)
3264 			return;
3265 		vxlan_fdb_info.offloaded = true;
3266 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3267 					 &vxlan_fdb_info.info, NULL);
3268 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3269 					    vxlan_fdb_info.eth_addr,
3270 					    fdb_info->vid, dev, true);
3271 		break;
3272 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3273 		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
3274 						     vxlan_fdb_info.eth_addr,
3275 						     mlxsw_sp_fid_index(fid),
3276 						     proto, &addr, false,
3277 						     false);
3278 		vxlan_fdb_info.offloaded = false;
3279 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3280 					 &vxlan_fdb_info.info, NULL);
3281 		break;
3282 	}
3283 }
3284 
3285 static void
3286 mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
3287 					switchdev_work)
3288 {
3289 	struct mlxsw_sp_bridge_device *bridge_device;
3290 	struct net_device *dev = switchdev_work->dev;
3291 	struct net_device *br_dev;
3292 	struct mlxsw_sp *mlxsw_sp;
3293 	struct mlxsw_sp_fid *fid;
3294 	__be32 vni;
3295 	int err;
3296 
3297 	if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
3298 	    switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
3299 		return;
3300 
3301 	if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
3302 	    (!switchdev_work->fdb_info.added_by_user ||
3303 	     switchdev_work->fdb_info.is_local))
3304 		return;
3305 
3306 	if (!netif_running(dev))
3307 		return;
3308 	br_dev = netdev_master_upper_dev_get(dev);
3309 	if (!br_dev)
3310 		return;
3311 	if (!netif_is_bridge_master(br_dev))
3312 		return;
3313 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3314 	if (!mlxsw_sp)
3315 		return;
3316 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3317 	if (!bridge_device)
3318 		return;
3319 
3320 	fid = bridge_device->ops->fid_lookup(bridge_device,
3321 					     switchdev_work->fdb_info.vid);
3322 	if (!fid)
3323 		return;
3324 
3325 	err = mlxsw_sp_fid_vni(fid, &vni);
3326 	if (err)
3327 		goto out;
3328 
3329 	mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
3330 						  vni);
3331 
3332 out:
3333 	mlxsw_sp_fid_put(fid);
3334 }
3335 
3336 static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
3337 {
3338 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
3339 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3340 	struct net_device *dev = switchdev_work->dev;
3341 	struct switchdev_notifier_fdb_info *fdb_info;
3342 	struct mlxsw_sp_port *mlxsw_sp_port;
3343 	int err;
3344 
3345 	rtnl_lock();
3346 	if (netif_is_vxlan(dev)) {
3347 		mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
3348 		goto out;
3349 	}
3350 
3351 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3352 	if (!mlxsw_sp_port)
3353 		goto out;
3354 
3355 	switch (switchdev_work->event) {
3356 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3357 		fdb_info = &switchdev_work->fdb_info;
3358 		if (!fdb_info->added_by_user || fdb_info->is_local)
3359 			break;
3360 		err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
3361 		if (err)
3362 			break;
3363 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3364 					    fdb_info->addr,
3365 					    fdb_info->vid, dev, true);
3366 		break;
3367 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3368 		fdb_info = &switchdev_work->fdb_info;
3369 		mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
3370 		break;
3371 	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
3372 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3373 		/* These events are only used to potentially update an existing
3374 		 * SPAN mirror.
3375 		 */
3376 		break;
3377 	}
3378 
3379 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
3380 
3381 out:
3382 	rtnl_unlock();
3383 	kfree(switchdev_work->fdb_info.addr);
3384 	kfree(switchdev_work);
3385 	dev_put(dev);
3386 }
3387 
3388 static void
3389 mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
3390 				 struct mlxsw_sp_switchdev_event_work *
3391 				 switchdev_work)
3392 {
3393 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3394 	struct mlxsw_sp_bridge_device *bridge_device;
3395 	struct net_device *dev = switchdev_work->dev;
3396 	u8 all_zeros_mac[ETH_ALEN] = { 0 };
3397 	enum mlxsw_sp_l3proto proto;
3398 	union mlxsw_sp_l3addr addr;
3399 	struct net_device *br_dev;
3400 	struct mlxsw_sp_fid *fid;
3401 	u16 vid;
3402 	int err;
3403 
3404 	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
3405 	br_dev = netdev_master_upper_dev_get(dev);
3406 
3407 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3408 	if (!bridge_device)
3409 		return;
3410 
3411 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3412 	if (!fid)
3413 		return;
3414 
3415 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3416 					      &proto, &addr);
3417 
3418 	if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
3419 		err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
3420 		if (err) {
3421 			mlxsw_sp_fid_put(fid);
3422 			return;
3423 		}
3424 		vxlan_fdb_info->offloaded = true;
3425 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3426 					 &vxlan_fdb_info->info, NULL);
3427 		mlxsw_sp_fid_put(fid);
3428 		return;
3429 	}
3430 
3431 	/* The device has a single FDB table, whereas Linux has two - one
3432 	 * in the bridge driver and another in the VxLAN driver. We only
3433 	 * program an entry to the device if the MAC points to the VxLAN
3434 	 * device in the bridge's FDB table
3435 	 */
3436 	vid = bridge_device->ops->fid_vid(bridge_device, fid);
3437 	if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
3438 		goto err_br_fdb_find;
3439 
3440 	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3441 					     mlxsw_sp_fid_index(fid), proto,
3442 					     &addr, true, false);
3443 	if (err)
3444 		goto err_fdb_tunnel_uc_op;
3445 	vxlan_fdb_info->offloaded = true;
3446 	call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3447 				 &vxlan_fdb_info->info, NULL);
3448 	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3449 				    vxlan_fdb_info->eth_addr, vid, dev, true);
3450 
3451 	mlxsw_sp_fid_put(fid);
3452 
3453 	return;
3454 
3455 err_fdb_tunnel_uc_op:
3456 err_br_fdb_find:
3457 	mlxsw_sp_fid_put(fid);
3458 }
3459 
3460 static void
3461 mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
3462 				 struct mlxsw_sp_switchdev_event_work *
3463 				 switchdev_work)
3464 {
3465 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3466 	struct mlxsw_sp_bridge_device *bridge_device;
3467 	struct net_device *dev = switchdev_work->dev;
3468 	struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3469 	u8 all_zeros_mac[ETH_ALEN] = { 0 };
3470 	enum mlxsw_sp_l3proto proto;
3471 	union mlxsw_sp_l3addr addr;
3472 	struct mlxsw_sp_fid *fid;
3473 	u16 vid;
3474 
3475 	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
3476 
3477 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3478 	if (!bridge_device)
3479 		return;
3480 
3481 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3482 	if (!fid)
3483 		return;
3484 
3485 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3486 					      &proto, &addr);
3487 
3488 	if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
3489 		mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
3490 		mlxsw_sp_fid_put(fid);
3491 		return;
3492 	}
3493 
3494 	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3495 				       mlxsw_sp_fid_index(fid), proto, &addr,
3496 				       false, false);
3497 	vid = bridge_device->ops->fid_vid(bridge_device, fid);
3498 	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3499 				    vxlan_fdb_info->eth_addr, vid, dev, false);
3500 
3501 	mlxsw_sp_fid_put(fid);
3502 }
3503 
3504 static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
3505 {
3506 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
3507 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3508 	struct net_device *dev = switchdev_work->dev;
3509 	struct mlxsw_sp *mlxsw_sp;
3510 	struct net_device *br_dev;
3511 
3512 	rtnl_lock();
3513 
3514 	if (!netif_running(dev))
3515 		goto out;
3516 	br_dev = netdev_master_upper_dev_get(dev);
3517 	if (!br_dev)
3518 		goto out;
3519 	if (!netif_is_bridge_master(br_dev))
3520 		goto out;
3521 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3522 	if (!mlxsw_sp)
3523 		goto out;
3524 
3525 	switch (switchdev_work->event) {
3526 	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3527 		mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
3528 		break;
3529 	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3530 		mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
3531 		break;
3532 	}
3533 
3534 out:
3535 	rtnl_unlock();
3536 	kfree(switchdev_work);
3537 	dev_put(dev);
3538 }
3539 
3540 static int
3541 mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
3542 				      switchdev_work,
3543 				      struct switchdev_notifier_info *info)
3544 {
3545 	struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
3546 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3547 	struct vxlan_config *cfg = &vxlan->cfg;
3548 	struct netlink_ext_ack *extack;
3549 
3550 	extack = switchdev_notifier_info_to_extack(info);
3551 	vxlan_fdb_info = container_of(info,
3552 				      struct switchdev_notifier_vxlan_fdb_info,
3553 				      info);
3554 
3555 	if (vxlan_fdb_info->remote_port != cfg->dst_port) {
3556 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default remote port is not supported");
3557 		return -EOPNOTSUPP;
3558 	}
3559 	if (vxlan_fdb_info->remote_vni != cfg->vni ||
3560 	    vxlan_fdb_info->vni != cfg->vni) {
3561 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default VNI is not supported");
3562 		return -EOPNOTSUPP;
3563 	}
3564 	if (vxlan_fdb_info->remote_ifindex) {
3565 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Local interface is not supported");
3566 		return -EOPNOTSUPP;
3567 	}
3568 	if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) {
3569 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast MAC addresses not supported");
3570 		return -EOPNOTSUPP;
3571 	}
3572 	if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) {
3573 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast destination IP is not supported");
3574 		return -EOPNOTSUPP;
3575 	}
3576 
3577 	switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
3578 
3579 	return 0;
3580 }
3581 
3582 /* Called under rcu_read_lock() */
3583 static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
3584 				    unsigned long event, void *ptr)
3585 {
3586 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3587 	struct mlxsw_sp_switchdev_event_work *switchdev_work;
3588 	struct switchdev_notifier_fdb_info *fdb_info;
3589 	struct switchdev_notifier_info *info = ptr;
3590 	struct net_device *br_dev;
3591 	int err;
3592 
3593 	if (event == SWITCHDEV_PORT_ATTR_SET) {
3594 		err = switchdev_handle_port_attr_set(dev, ptr,
3595 						     mlxsw_sp_port_dev_check,
3596 						     mlxsw_sp_port_attr_set);
3597 		return notifier_from_errno(err);
3598 	}
3599 
3600 	/* Tunnel devices are not our uppers, so check their master instead */
3601 	br_dev = netdev_master_upper_dev_get_rcu(dev);
3602 	if (!br_dev)
3603 		return NOTIFY_DONE;
3604 	if (!netif_is_bridge_master(br_dev))
3605 		return NOTIFY_DONE;
3606 	if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
3607 		return NOTIFY_DONE;
3608 
3609 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3610 	if (!switchdev_work)
3611 		return NOTIFY_BAD;
3612 
3613 	switchdev_work->dev = dev;
3614 	switchdev_work->event = event;
3615 
3616 	switch (event) {
3617 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3618 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3619 	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
3620 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3621 		fdb_info = container_of(info,
3622 					struct switchdev_notifier_fdb_info,
3623 					info);
3624 		INIT_WORK(&switchdev_work->work,
3625 			  mlxsw_sp_switchdev_bridge_fdb_event_work);
3626 		memcpy(&switchdev_work->fdb_info, ptr,
3627 		       sizeof(switchdev_work->fdb_info));
3628 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
3629 		if (!switchdev_work->fdb_info.addr)
3630 			goto err_addr_alloc;
3631 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
3632 				fdb_info->addr);
3633 		/* Take a reference on the device. This can be either
3634 		 * upper device containig mlxsw_sp_port or just a
3635 		 * mlxsw_sp_port
3636 		 */
3637 		dev_hold(dev);
3638 		break;
3639 	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3640 	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3641 		INIT_WORK(&switchdev_work->work,
3642 			  mlxsw_sp_switchdev_vxlan_fdb_event_work);
3643 		err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
3644 							    info);
3645 		if (err)
3646 			goto err_vxlan_work_prepare;
3647 		dev_hold(dev);
3648 		break;
3649 	default:
3650 		kfree(switchdev_work);
3651 		return NOTIFY_DONE;
3652 	}
3653 
3654 	mlxsw_core_schedule_work(&switchdev_work->work);
3655 
3656 	return NOTIFY_DONE;
3657 
3658 err_vxlan_work_prepare:
3659 err_addr_alloc:
3660 	kfree(switchdev_work);
3661 	return NOTIFY_BAD;
3662 }
3663 
3664 struct notifier_block mlxsw_sp_switchdev_notifier = {
3665 	.notifier_call = mlxsw_sp_switchdev_event,
3666 };
3667 
3668 static int
3669 mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3670 				  struct mlxsw_sp_bridge_device *bridge_device,
3671 				  const struct net_device *vxlan_dev, u16 vid,
3672 				  bool flag_untagged, bool flag_pvid,
3673 				  struct netlink_ext_ack *extack)
3674 {
3675 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3676 	__be32 vni = vxlan->cfg.vni;
3677 	struct mlxsw_sp_fid *fid;
3678 	u16 old_vid;
3679 	int err;
3680 
3681 	/* We cannot have the same VLAN as PVID and egress untagged on multiple
3682 	 * VxLAN devices. Note that we get this notification before the VLAN is
3683 	 * actually added to the bridge's database, so it is not possible for
3684 	 * the lookup function to return 'vxlan_dev'
3685 	 */
3686 	if (flag_untagged && flag_pvid &&
3687 	    mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) {
3688 		NL_SET_ERR_MSG_MOD(extack, "VLAN already mapped to a different VNI");
3689 		return -EINVAL;
3690 	}
3691 
3692 	if (!netif_running(vxlan_dev))
3693 		return 0;
3694 
3695 	/* First case: FID is not associated with this VNI, but the new VLAN
3696 	 * is both PVID and egress untagged. Need to enable NVE on the FID, if
3697 	 * it exists
3698 	 */
3699 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3700 	if (!fid) {
3701 		if (!flag_untagged || !flag_pvid)
3702 			return 0;
3703 		return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev,
3704 						      vid, extack);
3705 	}
3706 
3707 	/* Second case: FID is associated with the VNI and the VLAN associated
3708 	 * with the FID is the same as the notified VLAN. This means the flags
3709 	 * (PVID / egress untagged) were toggled and that NVE should be
3710 	 * disabled on the FID
3711 	 */
3712 	old_vid = mlxsw_sp_fid_8021q_vid(fid);
3713 	if (vid == old_vid) {
3714 		if (WARN_ON(flag_untagged && flag_pvid)) {
3715 			mlxsw_sp_fid_put(fid);
3716 			return -EINVAL;
3717 		}
3718 		mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3719 		mlxsw_sp_fid_put(fid);
3720 		return 0;
3721 	}
3722 
3723 	/* Third case: A new VLAN was configured on the VxLAN device, but this
3724 	 * VLAN is not PVID, so there is nothing to do.
3725 	 */
3726 	if (!flag_pvid) {
3727 		mlxsw_sp_fid_put(fid);
3728 		return 0;
3729 	}
3730 
3731 	/* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
3732 	 * mapped to the VNI should be unmapped
3733 	 */
3734 	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3735 	mlxsw_sp_fid_put(fid);
3736 
3737 	/* Fifth case: The new VLAN is also egress untagged, which means the
3738 	 * VLAN needs to be mapped to the VNI
3739 	 */
3740 	if (!flag_untagged)
3741 		return 0;
3742 
3743 	err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, extack);
3744 	if (err)
3745 		goto err_vxlan_join;
3746 
3747 	return 0;
3748 
3749 err_vxlan_join:
3750 	bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, old_vid, NULL);
3751 	return err;
3752 }
3753 
3754 static void
3755 mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
3756 				  struct mlxsw_sp_bridge_device *bridge_device,
3757 				  const struct net_device *vxlan_dev, u16 vid)
3758 {
3759 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3760 	__be32 vni = vxlan->cfg.vni;
3761 	struct mlxsw_sp_fid *fid;
3762 
3763 	if (!netif_running(vxlan_dev))
3764 		return;
3765 
3766 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3767 	if (!fid)
3768 		return;
3769 
3770 	/* A different VLAN than the one mapped to the VNI is deleted */
3771 	if (mlxsw_sp_fid_8021q_vid(fid) != vid)
3772 		goto out;
3773 
3774 	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3775 
3776 out:
3777 	mlxsw_sp_fid_put(fid);
3778 }
3779 
3780 static int
3781 mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3782 				   struct switchdev_notifier_port_obj_info *
3783 				   port_obj_info)
3784 {
3785 	struct switchdev_obj_port_vlan *vlan =
3786 		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3787 	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
3788 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
3789 	struct mlxsw_sp_bridge_device *bridge_device;
3790 	struct netlink_ext_ack *extack;
3791 	struct mlxsw_sp *mlxsw_sp;
3792 	struct net_device *br_dev;
3793 
3794 	extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
3795 	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3796 	if (!br_dev)
3797 		return 0;
3798 
3799 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3800 	if (!mlxsw_sp)
3801 		return 0;
3802 
3803 	port_obj_info->handled = true;
3804 
3805 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3806 	if (!bridge_device)
3807 		return -EINVAL;
3808 
3809 	if (!bridge_device->vlan_enabled)
3810 		return 0;
3811 
3812 	return mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
3813 						 vxlan_dev, vlan->vid,
3814 						 flag_untagged,
3815 						 flag_pvid, extack);
3816 }
3817 
3818 static void
3819 mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
3820 				   struct switchdev_notifier_port_obj_info *
3821 				   port_obj_info)
3822 {
3823 	struct switchdev_obj_port_vlan *vlan =
3824 		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3825 	struct mlxsw_sp_bridge_device *bridge_device;
3826 	struct mlxsw_sp *mlxsw_sp;
3827 	struct net_device *br_dev;
3828 
3829 	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3830 	if (!br_dev)
3831 		return;
3832 
3833 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3834 	if (!mlxsw_sp)
3835 		return;
3836 
3837 	port_obj_info->handled = true;
3838 
3839 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3840 	if (!bridge_device)
3841 		return;
3842 
3843 	if (!bridge_device->vlan_enabled)
3844 		return;
3845 
3846 	mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device, vxlan_dev,
3847 					  vlan->vid);
3848 }
3849 
3850 static int
3851 mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev,
3852 					struct switchdev_notifier_port_obj_info *
3853 					port_obj_info)
3854 {
3855 	int err = 0;
3856 
3857 	switch (port_obj_info->obj->id) {
3858 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
3859 		err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev,
3860 							 port_obj_info);
3861 		break;
3862 	default:
3863 		break;
3864 	}
3865 
3866 	return err;
3867 }
3868 
3869 static void
3870 mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev,
3871 					struct switchdev_notifier_port_obj_info *
3872 					port_obj_info)
3873 {
3874 	switch (port_obj_info->obj->id) {
3875 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
3876 		mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info);
3877 		break;
3878 	default:
3879 		break;
3880 	}
3881 }
3882 
3883 static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
3884 					     unsigned long event, void *ptr)
3885 {
3886 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3887 	int err = 0;
3888 
3889 	switch (event) {
3890 	case SWITCHDEV_PORT_OBJ_ADD:
3891 		if (netif_is_vxlan(dev))
3892 			err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr);
3893 		else
3894 			err = switchdev_handle_port_obj_add(dev, ptr,
3895 							mlxsw_sp_port_dev_check,
3896 							mlxsw_sp_port_obj_add);
3897 		return notifier_from_errno(err);
3898 	case SWITCHDEV_PORT_OBJ_DEL:
3899 		if (netif_is_vxlan(dev))
3900 			mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr);
3901 		else
3902 			err = switchdev_handle_port_obj_del(dev, ptr,
3903 							mlxsw_sp_port_dev_check,
3904 							mlxsw_sp_port_obj_del);
3905 		return notifier_from_errno(err);
3906 	case SWITCHDEV_PORT_ATTR_SET:
3907 		err = switchdev_handle_port_attr_set(dev, ptr,
3908 						     mlxsw_sp_port_dev_check,
3909 						     mlxsw_sp_port_attr_set);
3910 		return notifier_from_errno(err);
3911 	}
3912 
3913 	return NOTIFY_DONE;
3914 }
3915 
3916 static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
3917 	.notifier_call = mlxsw_sp_switchdev_blocking_event,
3918 };
3919 
3920 u8
3921 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
3922 {
3923 	return bridge_port->stp_state;
3924 }
3925 
3926 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
3927 {
3928 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
3929 	struct notifier_block *nb;
3930 	int err;
3931 
3932 	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
3933 	if (err) {
3934 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
3935 		return err;
3936 	}
3937 
3938 	err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3939 	if (err) {
3940 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
3941 		return err;
3942 	}
3943 
3944 	nb = &mlxsw_sp_switchdev_blocking_notifier;
3945 	err = register_switchdev_blocking_notifier(nb);
3946 	if (err) {
3947 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
3948 		goto err_register_switchdev_blocking_notifier;
3949 	}
3950 
3951 	INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
3952 	bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
3953 	return 0;
3954 
3955 err_register_switchdev_blocking_notifier:
3956 	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3957 	return err;
3958 }
3959 
3960 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
3961 {
3962 	struct notifier_block *nb;
3963 
3964 	cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
3965 
3966 	nb = &mlxsw_sp_switchdev_blocking_notifier;
3967 	unregister_switchdev_blocking_notifier(nb);
3968 
3969 	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3970 }
3971 
3972 static void mlxsw_sp1_switchdev_init(struct mlxsw_sp *mlxsw_sp)
3973 {
3974 	mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp1_bridge_8021ad_ops;
3975 }
3976 
3977 const struct mlxsw_sp_switchdev_ops mlxsw_sp1_switchdev_ops = {
3978 	.init	= mlxsw_sp1_switchdev_init,
3979 };
3980 
3981 static void mlxsw_sp2_switchdev_init(struct mlxsw_sp *mlxsw_sp)
3982 {
3983 	mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp2_bridge_8021ad_ops;
3984 }
3985 
3986 const struct mlxsw_sp_switchdev_ops mlxsw_sp2_switchdev_ops = {
3987 	.init	= mlxsw_sp2_switchdev_init,
3988 };
3989 
3990 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
3991 {
3992 	struct mlxsw_sp_bridge *bridge;
3993 
3994 	bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
3995 	if (!bridge)
3996 		return -ENOMEM;
3997 	mlxsw_sp->bridge = bridge;
3998 	bridge->mlxsw_sp = mlxsw_sp;
3999 
4000 	INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
4001 
4002 	bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
4003 	bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
4004 
4005 	mlxsw_sp->switchdev_ops->init(mlxsw_sp);
4006 
4007 	return mlxsw_sp_fdb_init(mlxsw_sp);
4008 }
4009 
4010 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
4011 {
4012 	mlxsw_sp_fdb_fini(mlxsw_sp);
4013 	WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
4014 	kfree(mlxsw_sp->bridge);
4015 }
4016 
4017