1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
8 #include <linux/in6.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <linux/genalloc.h>
22 #include <linux/xarray.h>
23 #include <net/netevent.h>
24 #include <net/neighbour.h>
25 #include <net/arp.h>
26 #include <net/inet_dscp.h>
27 #include <net/ip_fib.h>
28 #include <net/ip6_fib.h>
29 #include <net/nexthop.h>
30 #include <net/fib_rules.h>
31 #include <net/ip_tunnels.h>
32 #include <net/l3mdev.h>
33 #include <net/addrconf.h>
34 #include <net/ndisc.h>
35 #include <net/ipv6.h>
36 #include <net/fib_notifier.h>
37 #include <net/switchdev.h>
38 
39 #include "spectrum.h"
40 #include "core.h"
41 #include "reg.h"
42 #include "spectrum_cnt.h"
43 #include "spectrum_dpipe.h"
44 #include "spectrum_ipip.h"
45 #include "spectrum_mr.h"
46 #include "spectrum_mr_tcam.h"
47 #include "spectrum_router.h"
48 #include "spectrum_span.h"
49 
50 struct mlxsw_sp_fib;
51 struct mlxsw_sp_vr;
52 struct mlxsw_sp_lpm_tree;
53 struct mlxsw_sp_rif_ops;
54 
55 struct mlxsw_sp_crif_key {
56 	struct net_device *dev;
57 };
58 
59 struct mlxsw_sp_crif {
60 	struct mlxsw_sp_crif_key key;
61 	struct rhash_head ht_node;
62 	bool can_destroy;
63 	struct list_head nexthop_list;
64 	struct mlxsw_sp_rif *rif;
65 };
66 
67 static const struct rhashtable_params mlxsw_sp_crif_ht_params = {
68 	.key_offset = offsetof(struct mlxsw_sp_crif, key),
69 	.key_len = sizeof_field(struct mlxsw_sp_crif, key),
70 	.head_offset = offsetof(struct mlxsw_sp_crif, ht_node),
71 };
72 
73 struct mlxsw_sp_rif {
74 	struct mlxsw_sp_crif *crif; /* NULL for underlay RIF */
75 	netdevice_tracker dev_tracker;
76 	struct list_head neigh_list;
77 	struct mlxsw_sp_fid *fid;
78 	unsigned char addr[ETH_ALEN];
79 	int mtu;
80 	u16 rif_index;
81 	u8 mac_profile_id;
82 	u8 rif_entries;
83 	u16 vr_id;
84 	const struct mlxsw_sp_rif_ops *ops;
85 	struct mlxsw_sp *mlxsw_sp;
86 
87 	unsigned int counter_ingress;
88 	bool counter_ingress_valid;
89 	unsigned int counter_egress;
90 	bool counter_egress_valid;
91 };
92 
mlxsw_sp_rif_dev(const struct mlxsw_sp_rif * rif)93 static struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
94 {
95 	if (!rif->crif)
96 		return NULL;
97 	return rif->crif->key.dev;
98 }
99 
100 struct mlxsw_sp_rif_params {
101 	struct net_device *dev;
102 	union {
103 		u16 system_port;
104 		u16 lag_id;
105 	};
106 	u16 vid;
107 	bool lag;
108 	bool double_entry;
109 };
110 
111 struct mlxsw_sp_rif_subport {
112 	struct mlxsw_sp_rif common;
113 	refcount_t ref_count;
114 	union {
115 		u16 system_port;
116 		u16 lag_id;
117 	};
118 	u16 vid;
119 	bool lag;
120 };
121 
122 struct mlxsw_sp_rif_ipip_lb {
123 	struct mlxsw_sp_rif common;
124 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
125 	u16 ul_vr_id;	/* Spectrum-1. */
126 	u16 ul_rif_id;	/* Spectrum-2+. */
127 };
128 
129 struct mlxsw_sp_rif_params_ipip_lb {
130 	struct mlxsw_sp_rif_params common;
131 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
132 };
133 
134 struct mlxsw_sp_rif_ops {
135 	enum mlxsw_sp_rif_type type;
136 	size_t rif_size;
137 
138 	void (*setup)(struct mlxsw_sp_rif *rif,
139 		      const struct mlxsw_sp_rif_params *params);
140 	int (*configure)(struct mlxsw_sp_rif *rif,
141 			 struct netlink_ext_ack *extack);
142 	void (*deconfigure)(struct mlxsw_sp_rif *rif);
143 	struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
144 					 const struct mlxsw_sp_rif_params *params,
145 					 struct netlink_ext_ack *extack);
146 	void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
147 };
148 
149 struct mlxsw_sp_rif_mac_profile {
150 	unsigned char mac_prefix[ETH_ALEN];
151 	refcount_t ref_count;
152 	u8 id;
153 };
154 
155 struct mlxsw_sp_router_ops {
156 	int (*init)(struct mlxsw_sp *mlxsw_sp);
157 	int (*ipips_init)(struct mlxsw_sp *mlxsw_sp);
158 };
159 
160 static struct mlxsw_sp_rif *
161 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
162 			 const struct net_device *dev);
163 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
164 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
165 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
166 				  struct mlxsw_sp_lpm_tree *lpm_tree);
167 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
168 				     const struct mlxsw_sp_fib *fib,
169 				     u8 tree_id);
170 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
171 				       const struct mlxsw_sp_fib *fib);
172 
173 static unsigned int *
mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)174 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
175 			   enum mlxsw_sp_rif_counter_dir dir)
176 {
177 	switch (dir) {
178 	case MLXSW_SP_RIF_COUNTER_EGRESS:
179 		return &rif->counter_egress;
180 	case MLXSW_SP_RIF_COUNTER_INGRESS:
181 		return &rif->counter_ingress;
182 	}
183 	return NULL;
184 }
185 
186 static bool
mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)187 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
188 			       enum mlxsw_sp_rif_counter_dir dir)
189 {
190 	switch (dir) {
191 	case MLXSW_SP_RIF_COUNTER_EGRESS:
192 		return rif->counter_egress_valid;
193 	case MLXSW_SP_RIF_COUNTER_INGRESS:
194 		return rif->counter_ingress_valid;
195 	}
196 	return false;
197 }
198 
199 static void
mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir,bool valid)200 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
201 			       enum mlxsw_sp_rif_counter_dir dir,
202 			       bool valid)
203 {
204 	switch (dir) {
205 	case MLXSW_SP_RIF_COUNTER_EGRESS:
206 		rif->counter_egress_valid = valid;
207 		break;
208 	case MLXSW_SP_RIF_COUNTER_INGRESS:
209 		rif->counter_ingress_valid = valid;
210 		break;
211 	}
212 }
213 
mlxsw_sp_rif_counter_edit(struct mlxsw_sp * mlxsw_sp,u16 rif_index,unsigned int counter_index,bool enable,enum mlxsw_sp_rif_counter_dir dir)214 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
215 				     unsigned int counter_index, bool enable,
216 				     enum mlxsw_sp_rif_counter_dir dir)
217 {
218 	char ritr_pl[MLXSW_REG_RITR_LEN];
219 	bool is_egress = false;
220 	int err;
221 
222 	if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
223 		is_egress = true;
224 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
225 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
226 	if (err)
227 		return err;
228 
229 	mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
230 				    is_egress);
231 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
232 }
233 
mlxsw_sp_rif_counter_value_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir,u64 * cnt)234 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
235 				   struct mlxsw_sp_rif *rif,
236 				   enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
237 {
238 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
239 	unsigned int *p_counter_index;
240 	bool valid;
241 	int err;
242 
243 	valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
244 	if (!valid)
245 		return -EINVAL;
246 
247 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
248 	if (!p_counter_index)
249 		return -EINVAL;
250 	mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
251 			     MLXSW_REG_RICNT_OPCODE_NOP);
252 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
253 	if (err)
254 		return err;
255 	*cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
256 	return 0;
257 }
258 
259 struct mlxsw_sp_rif_counter_set_basic {
260 	u64 good_unicast_packets;
261 	u64 good_multicast_packets;
262 	u64 good_broadcast_packets;
263 	u64 good_unicast_bytes;
264 	u64 good_multicast_bytes;
265 	u64 good_broadcast_bytes;
266 	u64 error_packets;
267 	u64 discard_packets;
268 	u64 error_bytes;
269 	u64 discard_bytes;
270 };
271 
272 static int
mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir,struct mlxsw_sp_rif_counter_set_basic * set)273 mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif *rif,
274 				 enum mlxsw_sp_rif_counter_dir dir,
275 				 struct mlxsw_sp_rif_counter_set_basic *set)
276 {
277 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
278 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
279 	unsigned int *p_counter_index;
280 	int err;
281 
282 	if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
283 		return -EINVAL;
284 
285 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
286 	if (!p_counter_index)
287 		return -EINVAL;
288 
289 	mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
290 			     MLXSW_REG_RICNT_OPCODE_CLEAR);
291 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
292 	if (err)
293 		return err;
294 
295 	if (!set)
296 		return 0;
297 
298 #define MLXSW_SP_RIF_COUNTER_EXTRACT(NAME)				\
299 		(set->NAME = mlxsw_reg_ricnt_ ## NAME ## _get(ricnt_pl))
300 
301 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_packets);
302 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_packets);
303 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_packets);
304 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_bytes);
305 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_bytes);
306 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_bytes);
307 	MLXSW_SP_RIF_COUNTER_EXTRACT(error_packets);
308 	MLXSW_SP_RIF_COUNTER_EXTRACT(discard_packets);
309 	MLXSW_SP_RIF_COUNTER_EXTRACT(error_bytes);
310 	MLXSW_SP_RIF_COUNTER_EXTRACT(discard_bytes);
311 
312 #undef MLXSW_SP_RIF_COUNTER_EXTRACT
313 
314 	return 0;
315 }
316 
mlxsw_sp_rif_counter_clear(struct mlxsw_sp * mlxsw_sp,unsigned int counter_index)317 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
318 				      unsigned int counter_index)
319 {
320 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
321 
322 	mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
323 			     MLXSW_REG_RICNT_OPCODE_CLEAR);
324 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
325 }
326 
mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)327 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
328 			       enum mlxsw_sp_rif_counter_dir dir)
329 {
330 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
331 	unsigned int *p_counter_index;
332 	int err;
333 
334 	if (mlxsw_sp_rif_counter_valid_get(rif, dir))
335 		return 0;
336 
337 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
338 	if (!p_counter_index)
339 		return -EINVAL;
340 
341 	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
342 				     p_counter_index);
343 	if (err)
344 		return err;
345 
346 	err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
347 	if (err)
348 		goto err_counter_clear;
349 
350 	err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
351 					*p_counter_index, true, dir);
352 	if (err)
353 		goto err_counter_edit;
354 	mlxsw_sp_rif_counter_valid_set(rif, dir, true);
355 	return 0;
356 
357 err_counter_edit:
358 err_counter_clear:
359 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
360 			      *p_counter_index);
361 	return err;
362 }
363 
mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)364 void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif,
365 			       enum mlxsw_sp_rif_counter_dir dir)
366 {
367 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
368 	unsigned int *p_counter_index;
369 
370 	if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
371 		return;
372 
373 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
374 	if (WARN_ON(!p_counter_index))
375 		return;
376 	mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
377 				  *p_counter_index, false, dir);
378 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
379 			      *p_counter_index);
380 	mlxsw_sp_rif_counter_valid_set(rif, dir, false);
381 }
382 
mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif * rif)383 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
384 {
385 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
386 	struct devlink *devlink;
387 
388 	devlink = priv_to_devlink(mlxsw_sp->core);
389 	if (!devlink_dpipe_table_counter_enabled(devlink,
390 						 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
391 		return;
392 	mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
393 }
394 
mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif * rif)395 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
396 {
397 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
398 }
399 
400 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
401 
402 struct mlxsw_sp_prefix_usage {
403 	DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
404 };
405 
406 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
407 	for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
408 
409 static bool
mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage * prefix_usage1,struct mlxsw_sp_prefix_usage * prefix_usage2)410 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
411 			 struct mlxsw_sp_prefix_usage *prefix_usage2)
412 {
413 	return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
414 }
415 
416 static void
mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage * prefix_usage1,struct mlxsw_sp_prefix_usage * prefix_usage2)417 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
418 			  struct mlxsw_sp_prefix_usage *prefix_usage2)
419 {
420 	memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
421 }
422 
423 static void
mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage * prefix_usage,unsigned char prefix_len)424 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
425 			  unsigned char prefix_len)
426 {
427 	set_bit(prefix_len, prefix_usage->b);
428 }
429 
430 static void
mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage * prefix_usage,unsigned char prefix_len)431 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
432 			    unsigned char prefix_len)
433 {
434 	clear_bit(prefix_len, prefix_usage->b);
435 }
436 
437 struct mlxsw_sp_fib_key {
438 	unsigned char addr[sizeof(struct in6_addr)];
439 	unsigned char prefix_len;
440 };
441 
442 enum mlxsw_sp_fib_entry_type {
443 	MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
444 	MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
445 	MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
446 	MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
447 	MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
448 
449 	/* This is a special case of local delivery, where a packet should be
450 	 * decapsulated on reception. Note that there is no corresponding ENCAP,
451 	 * because that's a type of next hop, not of FIB entry. (There can be
452 	 * several next hops in a REMOTE entry, and some of them may be
453 	 * encapsulating entries.)
454 	 */
455 	MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
456 	MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
457 };
458 
459 struct mlxsw_sp_nexthop_group_info;
460 struct mlxsw_sp_nexthop_group;
461 struct mlxsw_sp_fib_entry;
462 
463 struct mlxsw_sp_fib_node {
464 	struct mlxsw_sp_fib_entry *fib_entry;
465 	struct list_head list;
466 	struct rhash_head ht_node;
467 	struct mlxsw_sp_fib *fib;
468 	struct mlxsw_sp_fib_key key;
469 };
470 
471 struct mlxsw_sp_fib_entry_decap {
472 	struct mlxsw_sp_ipip_entry *ipip_entry;
473 	u32 tunnel_index;
474 };
475 
476 struct mlxsw_sp_fib_entry {
477 	struct mlxsw_sp_fib_node *fib_node;
478 	enum mlxsw_sp_fib_entry_type type;
479 	struct list_head nexthop_group_node;
480 	struct mlxsw_sp_nexthop_group *nh_group;
481 	struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
482 };
483 
484 struct mlxsw_sp_fib4_entry {
485 	struct mlxsw_sp_fib_entry common;
486 	struct fib_info *fi;
487 	u32 tb_id;
488 	dscp_t dscp;
489 	u8 type;
490 };
491 
492 struct mlxsw_sp_fib6_entry {
493 	struct mlxsw_sp_fib_entry common;
494 	struct list_head rt6_list;
495 	unsigned int nrt6;
496 };
497 
498 struct mlxsw_sp_rt6 {
499 	struct list_head list;
500 	struct fib6_info *rt;
501 };
502 
503 struct mlxsw_sp_lpm_tree {
504 	u8 id; /* tree ID */
505 	refcount_t ref_count;
506 	enum mlxsw_sp_l3proto proto;
507 	unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
508 	struct mlxsw_sp_prefix_usage prefix_usage;
509 };
510 
511 struct mlxsw_sp_fib {
512 	struct rhashtable ht;
513 	struct list_head node_list;
514 	struct mlxsw_sp_vr *vr;
515 	struct mlxsw_sp_lpm_tree *lpm_tree;
516 	enum mlxsw_sp_l3proto proto;
517 };
518 
519 struct mlxsw_sp_vr {
520 	u16 id; /* virtual router ID */
521 	u32 tb_id; /* kernel fib table id */
522 	unsigned int rif_count;
523 	struct mlxsw_sp_fib *fib4;
524 	struct mlxsw_sp_fib *fib6;
525 	struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
526 	struct mlxsw_sp_rif *ul_rif;
527 	refcount_t ul_rif_refcnt;
528 };
529 
530 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
531 
mlxsw_sp_fib_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto)532 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
533 						struct mlxsw_sp_vr *vr,
534 						enum mlxsw_sp_l3proto proto)
535 {
536 	struct mlxsw_sp_lpm_tree *lpm_tree;
537 	struct mlxsw_sp_fib *fib;
538 	int err;
539 
540 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
541 	fib = kzalloc(sizeof(*fib), GFP_KERNEL);
542 	if (!fib)
543 		return ERR_PTR(-ENOMEM);
544 	err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
545 	if (err)
546 		goto err_rhashtable_init;
547 	INIT_LIST_HEAD(&fib->node_list);
548 	fib->proto = proto;
549 	fib->vr = vr;
550 	fib->lpm_tree = lpm_tree;
551 	mlxsw_sp_lpm_tree_hold(lpm_tree);
552 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
553 	if (err)
554 		goto err_lpm_tree_bind;
555 	return fib;
556 
557 err_lpm_tree_bind:
558 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
559 err_rhashtable_init:
560 	kfree(fib);
561 	return ERR_PTR(err);
562 }
563 
mlxsw_sp_fib_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib * fib)564 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
565 				 struct mlxsw_sp_fib *fib)
566 {
567 	mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
568 	mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
569 	WARN_ON(!list_empty(&fib->node_list));
570 	rhashtable_destroy(&fib->ht);
571 	kfree(fib);
572 }
573 
574 static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp * mlxsw_sp)575 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
576 {
577 	static struct mlxsw_sp_lpm_tree *lpm_tree;
578 	int i;
579 
580 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
581 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
582 		if (refcount_read(&lpm_tree->ref_count) == 0)
583 			return lpm_tree;
584 	}
585 	return NULL;
586 }
587 
mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lpm_tree * lpm_tree)588 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
589 				   struct mlxsw_sp_lpm_tree *lpm_tree)
590 {
591 	char ralta_pl[MLXSW_REG_RALTA_LEN];
592 
593 	mlxsw_reg_ralta_pack(ralta_pl, true,
594 			     (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
595 			     lpm_tree->id);
596 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
597 }
598 
mlxsw_sp_lpm_tree_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lpm_tree * lpm_tree)599 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
600 				   struct mlxsw_sp_lpm_tree *lpm_tree)
601 {
602 	char ralta_pl[MLXSW_REG_RALTA_LEN];
603 
604 	mlxsw_reg_ralta_pack(ralta_pl, false,
605 			     (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
606 			     lpm_tree->id);
607 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
608 }
609 
610 static int
mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_prefix_usage * prefix_usage,struct mlxsw_sp_lpm_tree * lpm_tree)611 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
612 				  struct mlxsw_sp_prefix_usage *prefix_usage,
613 				  struct mlxsw_sp_lpm_tree *lpm_tree)
614 {
615 	char ralst_pl[MLXSW_REG_RALST_LEN];
616 	u8 root_bin = 0;
617 	u8 prefix;
618 	u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
619 
620 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
621 		root_bin = prefix;
622 
623 	mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
624 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
625 		if (prefix == 0)
626 			continue;
627 		mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
628 					 MLXSW_REG_RALST_BIN_NO_CHILD);
629 		last_prefix = prefix;
630 	}
631 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
632 }
633 
634 static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_prefix_usage * prefix_usage,enum mlxsw_sp_l3proto proto)635 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
636 			 struct mlxsw_sp_prefix_usage *prefix_usage,
637 			 enum mlxsw_sp_l3proto proto)
638 {
639 	struct mlxsw_sp_lpm_tree *lpm_tree;
640 	int err;
641 
642 	lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
643 	if (!lpm_tree)
644 		return ERR_PTR(-EBUSY);
645 	lpm_tree->proto = proto;
646 	err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
647 	if (err)
648 		return ERR_PTR(err);
649 
650 	err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
651 						lpm_tree);
652 	if (err)
653 		goto err_left_struct_set;
654 	memcpy(&lpm_tree->prefix_usage, prefix_usage,
655 	       sizeof(lpm_tree->prefix_usage));
656 	memset(&lpm_tree->prefix_ref_count, 0,
657 	       sizeof(lpm_tree->prefix_ref_count));
658 	refcount_set(&lpm_tree->ref_count, 1);
659 	return lpm_tree;
660 
661 err_left_struct_set:
662 	mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
663 	return ERR_PTR(err);
664 }
665 
mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lpm_tree * lpm_tree)666 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
667 				      struct mlxsw_sp_lpm_tree *lpm_tree)
668 {
669 	mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
670 }
671 
672 static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_prefix_usage * prefix_usage,enum mlxsw_sp_l3proto proto)673 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
674 		      struct mlxsw_sp_prefix_usage *prefix_usage,
675 		      enum mlxsw_sp_l3proto proto)
676 {
677 	struct mlxsw_sp_lpm_tree *lpm_tree;
678 	int i;
679 
680 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
681 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
682 		if (refcount_read(&lpm_tree->ref_count) &&
683 		    lpm_tree->proto == proto &&
684 		    mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
685 					     prefix_usage)) {
686 			mlxsw_sp_lpm_tree_hold(lpm_tree);
687 			return lpm_tree;
688 		}
689 	}
690 	return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
691 }
692 
mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree * lpm_tree)693 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
694 {
695 	refcount_inc(&lpm_tree->ref_count);
696 }
697 
mlxsw_sp_lpm_tree_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lpm_tree * lpm_tree)698 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
699 				  struct mlxsw_sp_lpm_tree *lpm_tree)
700 {
701 	if (!refcount_dec_and_test(&lpm_tree->ref_count))
702 		return;
703 	mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
704 }
705 
706 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
707 
mlxsw_sp_lpm_init(struct mlxsw_sp * mlxsw_sp)708 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
709 {
710 	struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
711 	struct mlxsw_sp_lpm_tree *lpm_tree;
712 	u64 max_trees;
713 	int err, i;
714 
715 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
716 		return -EIO;
717 
718 	max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
719 	mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
720 	mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
721 					     sizeof(struct mlxsw_sp_lpm_tree),
722 					     GFP_KERNEL);
723 	if (!mlxsw_sp->router->lpm.trees)
724 		return -ENOMEM;
725 
726 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
727 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
728 		lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
729 	}
730 
731 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
732 					 MLXSW_SP_L3_PROTO_IPV4);
733 	if (IS_ERR(lpm_tree)) {
734 		err = PTR_ERR(lpm_tree);
735 		goto err_ipv4_tree_get;
736 	}
737 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
738 
739 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
740 					 MLXSW_SP_L3_PROTO_IPV6);
741 	if (IS_ERR(lpm_tree)) {
742 		err = PTR_ERR(lpm_tree);
743 		goto err_ipv6_tree_get;
744 	}
745 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
746 
747 	return 0;
748 
749 err_ipv6_tree_get:
750 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
751 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
752 err_ipv4_tree_get:
753 	kfree(mlxsw_sp->router->lpm.trees);
754 	return err;
755 }
756 
mlxsw_sp_lpm_fini(struct mlxsw_sp * mlxsw_sp)757 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
758 {
759 	struct mlxsw_sp_lpm_tree *lpm_tree;
760 
761 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
762 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
763 
764 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
765 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
766 
767 	kfree(mlxsw_sp->router->lpm.trees);
768 }
769 
mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr * vr)770 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
771 {
772 	return !!vr->fib4 || !!vr->fib6 ||
773 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
774 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
775 }
776 
mlxsw_sp_vr_find_unused(struct mlxsw_sp * mlxsw_sp)777 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
778 {
779 	int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
780 	struct mlxsw_sp_vr *vr;
781 	int i;
782 
783 	for (i = 0; i < max_vrs; i++) {
784 		vr = &mlxsw_sp->router->vrs[i];
785 		if (!mlxsw_sp_vr_is_used(vr))
786 			return vr;
787 	}
788 	return NULL;
789 }
790 
mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_fib * fib,u8 tree_id)791 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
792 				     const struct mlxsw_sp_fib *fib, u8 tree_id)
793 {
794 	char raltb_pl[MLXSW_REG_RALTB_LEN];
795 
796 	mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
797 			     (enum mlxsw_reg_ralxx_protocol) fib->proto,
798 			     tree_id);
799 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
800 }
801 
mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_fib * fib)802 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
803 				       const struct mlxsw_sp_fib *fib)
804 {
805 	char raltb_pl[MLXSW_REG_RALTB_LEN];
806 
807 	/* Bind to tree 0 which is default */
808 	mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
809 			     (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
810 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
811 }
812 
mlxsw_sp_fix_tb_id(u32 tb_id)813 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
814 {
815 	/* For our purpose, squash main, default and local tables into one */
816 	if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
817 		tb_id = RT_TABLE_MAIN;
818 	return tb_id;
819 }
820 
mlxsw_sp_vr_find(struct mlxsw_sp * mlxsw_sp,u32 tb_id)821 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
822 					    u32 tb_id)
823 {
824 	int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
825 	struct mlxsw_sp_vr *vr;
826 	int i;
827 
828 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
829 
830 	for (i = 0; i < max_vrs; i++) {
831 		vr = &mlxsw_sp->router->vrs[i];
832 		if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
833 			return vr;
834 	}
835 	return NULL;
836 }
837 
mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp * mlxsw_sp,u32 tb_id,u16 * vr_id)838 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
839 				u16 *vr_id)
840 {
841 	struct mlxsw_sp_vr *vr;
842 	int err = 0;
843 
844 	mutex_lock(&mlxsw_sp->router->lock);
845 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
846 	if (!vr) {
847 		err = -ESRCH;
848 		goto out;
849 	}
850 	*vr_id = vr->id;
851 out:
852 	mutex_unlock(&mlxsw_sp->router->lock);
853 	return err;
854 }
855 
mlxsw_sp_vr_fib(const struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto)856 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
857 					    enum mlxsw_sp_l3proto proto)
858 {
859 	switch (proto) {
860 	case MLXSW_SP_L3_PROTO_IPV4:
861 		return vr->fib4;
862 	case MLXSW_SP_L3_PROTO_IPV6:
863 		return vr->fib6;
864 	}
865 	return NULL;
866 }
867 
mlxsw_sp_vr_create(struct mlxsw_sp * mlxsw_sp,u32 tb_id,struct netlink_ext_ack * extack)868 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
869 					      u32 tb_id,
870 					      struct netlink_ext_ack *extack)
871 {
872 	struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
873 	struct mlxsw_sp_fib *fib4;
874 	struct mlxsw_sp_fib *fib6;
875 	struct mlxsw_sp_vr *vr;
876 	int err;
877 
878 	vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
879 	if (!vr) {
880 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
881 		return ERR_PTR(-EBUSY);
882 	}
883 	fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
884 	if (IS_ERR(fib4))
885 		return ERR_CAST(fib4);
886 	fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
887 	if (IS_ERR(fib6)) {
888 		err = PTR_ERR(fib6);
889 		goto err_fib6_create;
890 	}
891 	mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
892 					     MLXSW_SP_L3_PROTO_IPV4);
893 	if (IS_ERR(mr4_table)) {
894 		err = PTR_ERR(mr4_table);
895 		goto err_mr4_table_create;
896 	}
897 	mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
898 					     MLXSW_SP_L3_PROTO_IPV6);
899 	if (IS_ERR(mr6_table)) {
900 		err = PTR_ERR(mr6_table);
901 		goto err_mr6_table_create;
902 	}
903 
904 	vr->fib4 = fib4;
905 	vr->fib6 = fib6;
906 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
907 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
908 	vr->tb_id = tb_id;
909 	return vr;
910 
911 err_mr6_table_create:
912 	mlxsw_sp_mr_table_destroy(mr4_table);
913 err_mr4_table_create:
914 	mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
915 err_fib6_create:
916 	mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
917 	return ERR_PTR(err);
918 }
919 
mlxsw_sp_vr_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr)920 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
921 				struct mlxsw_sp_vr *vr)
922 {
923 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
924 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
925 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
926 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
927 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
928 	vr->fib6 = NULL;
929 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
930 	vr->fib4 = NULL;
931 }
932 
mlxsw_sp_vr_get(struct mlxsw_sp * mlxsw_sp,u32 tb_id,struct netlink_ext_ack * extack)933 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
934 					   struct netlink_ext_ack *extack)
935 {
936 	struct mlxsw_sp_vr *vr;
937 
938 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
939 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
940 	if (!vr)
941 		vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
942 	return vr;
943 }
944 
mlxsw_sp_vr_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr)945 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
946 {
947 	if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
948 	    list_empty(&vr->fib6->node_list) &&
949 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
950 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
951 		mlxsw_sp_vr_destroy(mlxsw_sp, vr);
952 }
953 
954 static bool
mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto,u8 tree_id)955 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
956 				    enum mlxsw_sp_l3proto proto, u8 tree_id)
957 {
958 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
959 
960 	if (!mlxsw_sp_vr_is_used(vr))
961 		return false;
962 	if (fib->lpm_tree->id == tree_id)
963 		return true;
964 	return false;
965 }
966 
mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib * fib,struct mlxsw_sp_lpm_tree * new_tree)967 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
968 					struct mlxsw_sp_fib *fib,
969 					struct mlxsw_sp_lpm_tree *new_tree)
970 {
971 	struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
972 	int err;
973 
974 	fib->lpm_tree = new_tree;
975 	mlxsw_sp_lpm_tree_hold(new_tree);
976 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
977 	if (err)
978 		goto err_tree_bind;
979 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
980 	return 0;
981 
982 err_tree_bind:
983 	mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
984 	fib->lpm_tree = old_tree;
985 	return err;
986 }
987 
mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib * fib,struct mlxsw_sp_lpm_tree * new_tree)988 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
989 					 struct mlxsw_sp_fib *fib,
990 					 struct mlxsw_sp_lpm_tree *new_tree)
991 {
992 	int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
993 	enum mlxsw_sp_l3proto proto = fib->proto;
994 	struct mlxsw_sp_lpm_tree *old_tree;
995 	u8 old_id, new_id = new_tree->id;
996 	struct mlxsw_sp_vr *vr;
997 	int i, err;
998 
999 	old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
1000 	old_id = old_tree->id;
1001 
1002 	for (i = 0; i < max_vrs; i++) {
1003 		vr = &mlxsw_sp->router->vrs[i];
1004 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
1005 			continue;
1006 		err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1007 						   mlxsw_sp_vr_fib(vr, proto),
1008 						   new_tree);
1009 		if (err)
1010 			goto err_tree_replace;
1011 	}
1012 
1013 	memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
1014 	       sizeof(new_tree->prefix_ref_count));
1015 	mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
1016 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
1017 
1018 	return 0;
1019 
1020 err_tree_replace:
1021 	for (i--; i >= 0; i--) {
1022 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
1023 			continue;
1024 		mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1025 					     mlxsw_sp_vr_fib(vr, proto),
1026 					     old_tree);
1027 	}
1028 	return err;
1029 }
1030 
mlxsw_sp_vrs_init(struct mlxsw_sp * mlxsw_sp)1031 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
1032 {
1033 	struct mlxsw_sp_vr *vr;
1034 	u64 max_vrs;
1035 	int i;
1036 
1037 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
1038 		return -EIO;
1039 
1040 	max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
1041 	mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
1042 					GFP_KERNEL);
1043 	if (!mlxsw_sp->router->vrs)
1044 		return -ENOMEM;
1045 
1046 	for (i = 0; i < max_vrs; i++) {
1047 		vr = &mlxsw_sp->router->vrs[i];
1048 		vr->id = i;
1049 	}
1050 
1051 	return 0;
1052 }
1053 
1054 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
1055 
mlxsw_sp_vrs_fini(struct mlxsw_sp * mlxsw_sp)1056 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
1057 {
1058 	/* At this stage we're guaranteed not to have new incoming
1059 	 * FIB notifications and the work queue is free from FIBs
1060 	 * sitting on top of mlxsw netdevs. However, we can still
1061 	 * have other FIBs queued. Flush the queue before flushing
1062 	 * the device's tables. No need for locks, as we're the only
1063 	 * writer.
1064 	 */
1065 	mlxsw_core_flush_owq();
1066 	mlxsw_sp_router_fib_flush(mlxsw_sp);
1067 	kfree(mlxsw_sp->router->vrs);
1068 }
1069 
mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device * ol_dev)1070 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
1071 {
1072 	struct net_device *d;
1073 	u32 tb_id;
1074 
1075 	rcu_read_lock();
1076 	d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1077 	if (d)
1078 		tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1079 	else
1080 		tb_id = RT_TABLE_MAIN;
1081 	rcu_read_unlock();
1082 
1083 	return tb_id;
1084 }
1085 
1086 static void
mlxsw_sp_crif_init(struct mlxsw_sp_crif * crif,struct net_device * dev)1087 mlxsw_sp_crif_init(struct mlxsw_sp_crif *crif, struct net_device *dev)
1088 {
1089 	crif->key.dev = dev;
1090 	INIT_LIST_HEAD(&crif->nexthop_list);
1091 }
1092 
1093 static struct mlxsw_sp_crif *
mlxsw_sp_crif_alloc(struct net_device * dev)1094 mlxsw_sp_crif_alloc(struct net_device *dev)
1095 {
1096 	struct mlxsw_sp_crif *crif;
1097 
1098 	crif = kzalloc(sizeof(*crif), GFP_KERNEL);
1099 	if (!crif)
1100 		return NULL;
1101 
1102 	mlxsw_sp_crif_init(crif, dev);
1103 	return crif;
1104 }
1105 
mlxsw_sp_crif_free(struct mlxsw_sp_crif * crif)1106 static void mlxsw_sp_crif_free(struct mlxsw_sp_crif *crif)
1107 {
1108 	if (WARN_ON(crif->rif))
1109 		return;
1110 
1111 	WARN_ON(!list_empty(&crif->nexthop_list));
1112 	kfree(crif);
1113 }
1114 
mlxsw_sp_crif_insert(struct mlxsw_sp_router * router,struct mlxsw_sp_crif * crif)1115 static int mlxsw_sp_crif_insert(struct mlxsw_sp_router *router,
1116 				struct mlxsw_sp_crif *crif)
1117 {
1118 	return rhashtable_insert_fast(&router->crif_ht, &crif->ht_node,
1119 				      mlxsw_sp_crif_ht_params);
1120 }
1121 
mlxsw_sp_crif_remove(struct mlxsw_sp_router * router,struct mlxsw_sp_crif * crif)1122 static void mlxsw_sp_crif_remove(struct mlxsw_sp_router *router,
1123 				 struct mlxsw_sp_crif *crif)
1124 {
1125 	rhashtable_remove_fast(&router->crif_ht, &crif->ht_node,
1126 			       mlxsw_sp_crif_ht_params);
1127 }
1128 
1129 static struct mlxsw_sp_crif *
mlxsw_sp_crif_lookup(struct mlxsw_sp_router * router,const struct net_device * dev)1130 mlxsw_sp_crif_lookup(struct mlxsw_sp_router *router,
1131 		     const struct net_device *dev)
1132 {
1133 	struct mlxsw_sp_crif_key key = {
1134 		.dev = (struct net_device *)dev,
1135 	};
1136 
1137 	return rhashtable_lookup_fast(&router->crif_ht, &key,
1138 				      mlxsw_sp_crif_ht_params);
1139 }
1140 
1141 static struct mlxsw_sp_rif *
1142 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1143 		    const struct mlxsw_sp_rif_params *params,
1144 		    struct netlink_ext_ack *extack);
1145 
1146 static struct mlxsw_sp_rif_ipip_lb *
mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt,struct net_device * ol_dev,struct netlink_ext_ack * extack)1147 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1148 				enum mlxsw_sp_ipip_type ipipt,
1149 				struct net_device *ol_dev,
1150 				struct netlink_ext_ack *extack)
1151 {
1152 	struct mlxsw_sp_rif_params_ipip_lb lb_params;
1153 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1154 	struct mlxsw_sp_rif *rif;
1155 
1156 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1157 	lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1158 		.common.dev = ol_dev,
1159 		.common.lag = false,
1160 		.common.double_entry = ipip_ops->double_rif_entry,
1161 		.lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1162 	};
1163 
1164 	rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1165 	if (IS_ERR(rif))
1166 		return ERR_CAST(rif);
1167 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1168 }
1169 
1170 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt,struct net_device * ol_dev)1171 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1172 			  enum mlxsw_sp_ipip_type ipipt,
1173 			  struct net_device *ol_dev)
1174 {
1175 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1176 	struct mlxsw_sp_ipip_entry *ipip_entry;
1177 	struct mlxsw_sp_ipip_entry *ret = NULL;
1178 	int err;
1179 
1180 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1181 	ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1182 	if (!ipip_entry)
1183 		return ERR_PTR(-ENOMEM);
1184 
1185 	ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1186 							    ol_dev, NULL);
1187 	if (IS_ERR(ipip_entry->ol_lb)) {
1188 		ret = ERR_CAST(ipip_entry->ol_lb);
1189 		goto err_ol_ipip_lb_create;
1190 	}
1191 
1192 	ipip_entry->ipipt = ipipt;
1193 	ipip_entry->ol_dev = ol_dev;
1194 	ipip_entry->parms = ipip_ops->parms_init(ol_dev);
1195 
1196 	err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry);
1197 	if (err) {
1198 		ret = ERR_PTR(err);
1199 		goto err_rem_ip_addr_set;
1200 	}
1201 
1202 	return ipip_entry;
1203 
1204 err_rem_ip_addr_set:
1205 	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1206 err_ol_ipip_lb_create:
1207 	kfree(ipip_entry);
1208 	return ret;
1209 }
1210 
mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1211 static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp,
1212 					struct mlxsw_sp_ipip_entry *ipip_entry)
1213 {
1214 	const struct mlxsw_sp_ipip_ops *ipip_ops =
1215 		mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1216 
1217 	ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry);
1218 	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1219 	kfree(ipip_entry);
1220 }
1221 
1222 static bool
mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp * mlxsw_sp,const enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr saddr,u32 ul_tb_id,struct mlxsw_sp_ipip_entry * ipip_entry)1223 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1224 				  const enum mlxsw_sp_l3proto ul_proto,
1225 				  union mlxsw_sp_l3addr saddr,
1226 				  u32 ul_tb_id,
1227 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1228 {
1229 	u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1230 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1231 	union mlxsw_sp_l3addr tun_saddr;
1232 
1233 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1234 		return false;
1235 
1236 	tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1237 	return tun_ul_tb_id == ul_tb_id &&
1238 	       mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1239 }
1240 
mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt)1241 static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp,
1242 						 enum mlxsw_sp_ipip_type ipipt)
1243 {
1244 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1245 
1246 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1247 
1248 	/* Not all tunnels require to increase the default pasing depth
1249 	 * (96 bytes).
1250 	 */
1251 	if (ipip_ops->inc_parsing_depth)
1252 		return mlxsw_sp_parsing_depth_inc(mlxsw_sp);
1253 
1254 	return 0;
1255 }
1256 
mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt)1257 static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp,
1258 						  enum mlxsw_sp_ipip_type ipipt)
1259 {
1260 	const struct mlxsw_sp_ipip_ops *ipip_ops =
1261 		mlxsw_sp->router->ipip_ops_arr[ipipt];
1262 
1263 	if (ipip_ops->inc_parsing_depth)
1264 		mlxsw_sp_parsing_depth_dec(mlxsw_sp);
1265 }
1266 
1267 static int
mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,struct mlxsw_sp_ipip_entry * ipip_entry)1268 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1269 			      struct mlxsw_sp_fib_entry *fib_entry,
1270 			      struct mlxsw_sp_ipip_entry *ipip_entry)
1271 {
1272 	u32 tunnel_index;
1273 	int err;
1274 
1275 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1276 				  1, &tunnel_index);
1277 	if (err)
1278 		return err;
1279 
1280 	err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp,
1281 						    ipip_entry->ipipt);
1282 	if (err)
1283 		goto err_parsing_depth_inc;
1284 
1285 	ipip_entry->decap_fib_entry = fib_entry;
1286 	fib_entry->decap.ipip_entry = ipip_entry;
1287 	fib_entry->decap.tunnel_index = tunnel_index;
1288 
1289 	return 0;
1290 
1291 err_parsing_depth_inc:
1292 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
1293 			   fib_entry->decap.tunnel_index);
1294 	return err;
1295 }
1296 
mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)1297 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1298 					  struct mlxsw_sp_fib_entry *fib_entry)
1299 {
1300 	enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt;
1301 
1302 	/* Unlink this node from the IPIP entry that it's the decap entry of. */
1303 	fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1304 	fib_entry->decap.ipip_entry = NULL;
1305 	mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt);
1306 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1307 			   1, fib_entry->decap.tunnel_index);
1308 }
1309 
1310 static struct mlxsw_sp_fib_node *
1311 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1312 			 size_t addr_len, unsigned char prefix_len);
1313 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1314 				     struct mlxsw_sp_fib_entry *fib_entry);
1315 
1316 static void
mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1317 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1318 				 struct mlxsw_sp_ipip_entry *ipip_entry)
1319 {
1320 	struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1321 
1322 	mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1323 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1324 
1325 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1326 }
1327 
1328 static void
mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct mlxsw_sp_fib_entry * decap_fib_entry)1329 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1330 				  struct mlxsw_sp_ipip_entry *ipip_entry,
1331 				  struct mlxsw_sp_fib_entry *decap_fib_entry)
1332 {
1333 	if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1334 					  ipip_entry))
1335 		return;
1336 	decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1337 
1338 	if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1339 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1340 }
1341 
1342 static struct mlxsw_sp_fib_entry *
mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp * mlxsw_sp,u32 tb_id,enum mlxsw_sp_l3proto proto,const union mlxsw_sp_l3addr * addr,enum mlxsw_sp_fib_entry_type type)1343 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1344 				     enum mlxsw_sp_l3proto proto,
1345 				     const union mlxsw_sp_l3addr *addr,
1346 				     enum mlxsw_sp_fib_entry_type type)
1347 {
1348 	struct mlxsw_sp_fib_node *fib_node;
1349 	unsigned char addr_prefix_len;
1350 	struct mlxsw_sp_fib *fib;
1351 	struct mlxsw_sp_vr *vr;
1352 	const void *addrp;
1353 	size_t addr_len;
1354 	u32 addr4;
1355 
1356 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1357 	if (!vr)
1358 		return NULL;
1359 	fib = mlxsw_sp_vr_fib(vr, proto);
1360 
1361 	switch (proto) {
1362 	case MLXSW_SP_L3_PROTO_IPV4:
1363 		addr4 = be32_to_cpu(addr->addr4);
1364 		addrp = &addr4;
1365 		addr_len = 4;
1366 		addr_prefix_len = 32;
1367 		break;
1368 	case MLXSW_SP_L3_PROTO_IPV6:
1369 		addrp = &addr->addr6;
1370 		addr_len = 16;
1371 		addr_prefix_len = 128;
1372 		break;
1373 	default:
1374 		WARN_ON(1);
1375 		return NULL;
1376 	}
1377 
1378 	fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1379 					    addr_prefix_len);
1380 	if (!fib_node || fib_node->fib_entry->type != type)
1381 		return NULL;
1382 
1383 	return fib_node->fib_entry;
1384 }
1385 
1386 /* Given an IPIP entry, find the corresponding decap route. */
1387 static struct mlxsw_sp_fib_entry *
mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1388 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1389 			       struct mlxsw_sp_ipip_entry *ipip_entry)
1390 {
1391 	static struct mlxsw_sp_fib_node *fib_node;
1392 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1393 	unsigned char saddr_prefix_len;
1394 	union mlxsw_sp_l3addr saddr;
1395 	struct mlxsw_sp_fib *ul_fib;
1396 	struct mlxsw_sp_vr *ul_vr;
1397 	const void *saddrp;
1398 	size_t saddr_len;
1399 	u32 ul_tb_id;
1400 	u32 saddr4;
1401 
1402 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1403 
1404 	ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1405 	ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1406 	if (!ul_vr)
1407 		return NULL;
1408 
1409 	ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1410 	saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1411 					   ipip_entry->ol_dev);
1412 
1413 	switch (ipip_ops->ul_proto) {
1414 	case MLXSW_SP_L3_PROTO_IPV4:
1415 		saddr4 = be32_to_cpu(saddr.addr4);
1416 		saddrp = &saddr4;
1417 		saddr_len = 4;
1418 		saddr_prefix_len = 32;
1419 		break;
1420 	case MLXSW_SP_L3_PROTO_IPV6:
1421 		saddrp = &saddr.addr6;
1422 		saddr_len = 16;
1423 		saddr_prefix_len = 128;
1424 		break;
1425 	default:
1426 		WARN_ON(1);
1427 		return NULL;
1428 	}
1429 
1430 	fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1431 					    saddr_prefix_len);
1432 	if (!fib_node ||
1433 	    fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1434 		return NULL;
1435 
1436 	return fib_node->fib_entry;
1437 }
1438 
1439 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_create(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt,struct net_device * ol_dev)1440 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1441 			   enum mlxsw_sp_ipip_type ipipt,
1442 			   struct net_device *ol_dev)
1443 {
1444 	struct mlxsw_sp_ipip_entry *ipip_entry;
1445 
1446 	ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1447 	if (IS_ERR(ipip_entry))
1448 		return ipip_entry;
1449 
1450 	list_add_tail(&ipip_entry->ipip_list_node,
1451 		      &mlxsw_sp->router->ipip_list);
1452 
1453 	return ipip_entry;
1454 }
1455 
1456 static void
mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1457 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1458 			    struct mlxsw_sp_ipip_entry *ipip_entry)
1459 {
1460 	list_del(&ipip_entry->ipip_list_node);
1461 	mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry);
1462 }
1463 
1464 static bool
mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp * mlxsw_sp,const struct net_device * ul_dev,enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr ul_dip,struct mlxsw_sp_ipip_entry * ipip_entry)1465 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1466 				  const struct net_device *ul_dev,
1467 				  enum mlxsw_sp_l3proto ul_proto,
1468 				  union mlxsw_sp_l3addr ul_dip,
1469 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1470 {
1471 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1472 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1473 
1474 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1475 		return false;
1476 
1477 	return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1478 						 ul_tb_id, ipip_entry);
1479 }
1480 
1481 /* Given decap parameters, find the corresponding IPIP entry. */
1482 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp * mlxsw_sp,int ul_dev_ifindex,enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr ul_dip)1483 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
1484 				  enum mlxsw_sp_l3proto ul_proto,
1485 				  union mlxsw_sp_l3addr ul_dip)
1486 {
1487 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1488 	struct net_device *ul_dev;
1489 
1490 	rcu_read_lock();
1491 
1492 	ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
1493 	if (!ul_dev)
1494 		goto out_unlock;
1495 
1496 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1497 			    ipip_list_node)
1498 		if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1499 						      ul_proto, ul_dip,
1500 						      ipip_entry))
1501 			goto out_unlock;
1502 
1503 	rcu_read_unlock();
1504 
1505 	return NULL;
1506 
1507 out_unlock:
1508 	rcu_read_unlock();
1509 	return ipip_entry;
1510 }
1511 
mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev,enum mlxsw_sp_ipip_type * p_type)1512 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1513 				      const struct net_device *dev,
1514 				      enum mlxsw_sp_ipip_type *p_type)
1515 {
1516 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1517 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1518 	enum mlxsw_sp_ipip_type ipipt;
1519 
1520 	for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1521 		ipip_ops = router->ipip_ops_arr[ipipt];
1522 		if (dev->type == ipip_ops->dev_type) {
1523 			if (p_type)
1524 				*p_type = ipipt;
1525 			return true;
1526 		}
1527 	}
1528 	return false;
1529 }
1530 
mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)1531 static bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1532 				       const struct net_device *dev)
1533 {
1534 	return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1535 }
1536 
1537 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp * mlxsw_sp,const struct net_device * ol_dev)1538 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1539 				   const struct net_device *ol_dev)
1540 {
1541 	struct mlxsw_sp_ipip_entry *ipip_entry;
1542 
1543 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1544 			    ipip_list_node)
1545 		if (ipip_entry->ol_dev == ol_dev)
1546 			return ipip_entry;
1547 
1548 	return NULL;
1549 }
1550 
1551 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp * mlxsw_sp,const struct net_device * ul_dev,struct mlxsw_sp_ipip_entry * start)1552 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1553 				   const struct net_device *ul_dev,
1554 				   struct mlxsw_sp_ipip_entry *start)
1555 {
1556 	struct mlxsw_sp_ipip_entry *ipip_entry;
1557 
1558 	ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1559 					ipip_list_node);
1560 	list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1561 				     ipip_list_node) {
1562 		struct net_device *ol_dev = ipip_entry->ol_dev;
1563 		struct net_device *ipip_ul_dev;
1564 
1565 		rcu_read_lock();
1566 		ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1567 		rcu_read_unlock();
1568 
1569 		if (ipip_ul_dev == ul_dev)
1570 			return ipip_entry;
1571 	}
1572 
1573 	return NULL;
1574 }
1575 
mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)1576 static bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1577 				       const struct net_device *dev)
1578 {
1579 	return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1580 }
1581 
mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp * mlxsw_sp,const struct net_device * ol_dev,enum mlxsw_sp_ipip_type ipipt)1582 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1583 						const struct net_device *ol_dev,
1584 						enum mlxsw_sp_ipip_type ipipt)
1585 {
1586 	const struct mlxsw_sp_ipip_ops *ops
1587 		= mlxsw_sp->router->ipip_ops_arr[ipipt];
1588 
1589 	return ops->can_offload(mlxsw_sp, ol_dev);
1590 }
1591 
mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1592 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1593 						struct net_device *ol_dev)
1594 {
1595 	enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1596 	struct mlxsw_sp_ipip_entry *ipip_entry;
1597 	enum mlxsw_sp_l3proto ul_proto;
1598 	union mlxsw_sp_l3addr saddr;
1599 	u32 ul_tb_id;
1600 
1601 	mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1602 	if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1603 		ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1604 		ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1605 		saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1606 		if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1607 							  saddr, ul_tb_id,
1608 							  NULL)) {
1609 			ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1610 								ol_dev);
1611 			if (IS_ERR(ipip_entry))
1612 				return PTR_ERR(ipip_entry);
1613 		}
1614 	}
1615 
1616 	return 0;
1617 }
1618 
mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1619 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1620 						   struct net_device *ol_dev)
1621 {
1622 	struct mlxsw_sp_ipip_entry *ipip_entry;
1623 
1624 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1625 	if (ipip_entry)
1626 		mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1627 }
1628 
1629 static void
mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1630 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1631 				struct mlxsw_sp_ipip_entry *ipip_entry)
1632 {
1633 	struct mlxsw_sp_fib_entry *decap_fib_entry;
1634 
1635 	decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1636 	if (decap_fib_entry)
1637 		mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1638 						  decap_fib_entry);
1639 }
1640 
1641 static int
mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb * lb_rif,u16 ul_vr_id,u16 ul_rif_id,bool enable)1642 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1643 			u16 ul_rif_id, bool enable)
1644 {
1645 	struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1646 	struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common);
1647 	enum mlxsw_reg_ritr_loopback_ipip_options ipip_options;
1648 	struct mlxsw_sp_rif *rif = &lb_rif->common;
1649 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1650 	char ritr_pl[MLXSW_REG_RITR_LEN];
1651 	struct in6_addr *saddr6;
1652 	u32 saddr4;
1653 
1654 	ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET;
1655 	switch (lb_cf.ul_protocol) {
1656 	case MLXSW_SP_L3_PROTO_IPV4:
1657 		saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1658 		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1659 				    rif->rif_index, rif->vr_id, dev->mtu);
1660 		mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1661 						   ipip_options, ul_vr_id,
1662 						   ul_rif_id, saddr4,
1663 						   lb_cf.okey);
1664 		break;
1665 
1666 	case MLXSW_SP_L3_PROTO_IPV6:
1667 		saddr6 = &lb_cf.saddr.addr6;
1668 		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1669 				    rif->rif_index, rif->vr_id, dev->mtu);
1670 		mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt,
1671 						   ipip_options, ul_vr_id,
1672 						   ul_rif_id, saddr6,
1673 						   lb_cf.okey);
1674 		break;
1675 	}
1676 
1677 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1678 }
1679 
mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1680 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1681 						 struct net_device *ol_dev)
1682 {
1683 	struct mlxsw_sp_ipip_entry *ipip_entry;
1684 	struct mlxsw_sp_rif_ipip_lb *lb_rif;
1685 	int err = 0;
1686 
1687 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1688 	if (ipip_entry) {
1689 		lb_rif = ipip_entry->ol_lb;
1690 		err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1691 					      lb_rif->ul_rif_id, true);
1692 		if (err)
1693 			goto out;
1694 		lb_rif->common.mtu = ol_dev->mtu;
1695 	}
1696 
1697 out:
1698 	return err;
1699 }
1700 
mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1701 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1702 						struct net_device *ol_dev)
1703 {
1704 	struct mlxsw_sp_ipip_entry *ipip_entry;
1705 
1706 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1707 	if (ipip_entry)
1708 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1709 }
1710 
1711 static void
mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1712 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1713 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1714 {
1715 	if (ipip_entry->decap_fib_entry)
1716 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1717 }
1718 
mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1719 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1720 						  struct net_device *ol_dev)
1721 {
1722 	struct mlxsw_sp_ipip_entry *ipip_entry;
1723 
1724 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1725 	if (ipip_entry)
1726 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1727 }
1728 
1729 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1730 					struct mlxsw_sp_rif *rif);
1731 
mlxsw_sp_rif_migrate_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * old_rif,struct mlxsw_sp_rif * new_rif,bool migrate_nhs)1732 static void mlxsw_sp_rif_migrate_destroy(struct mlxsw_sp *mlxsw_sp,
1733 					 struct mlxsw_sp_rif *old_rif,
1734 					 struct mlxsw_sp_rif *new_rif,
1735 					 bool migrate_nhs)
1736 {
1737 	struct mlxsw_sp_crif *crif = old_rif->crif;
1738 	struct mlxsw_sp_crif mock_crif = {};
1739 
1740 	if (migrate_nhs)
1741 		mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
1742 
1743 	/* Plant a mock CRIF so that destroying the old RIF doesn't unoffload
1744 	 * our nexthops and IPIP tunnels, and doesn't sever the crif->rif link.
1745 	 */
1746 	mlxsw_sp_crif_init(&mock_crif, crif->key.dev);
1747 	old_rif->crif = &mock_crif;
1748 	mock_crif.rif = old_rif;
1749 	mlxsw_sp_rif_destroy(old_rif);
1750 }
1751 
1752 static int
mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,bool keep_encap,struct netlink_ext_ack * extack)1753 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1754 				 struct mlxsw_sp_ipip_entry *ipip_entry,
1755 				 bool keep_encap,
1756 				 struct netlink_ext_ack *extack)
1757 {
1758 	struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1759 	struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1760 
1761 	new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1762 						     ipip_entry->ipipt,
1763 						     ipip_entry->ol_dev,
1764 						     extack);
1765 	if (IS_ERR(new_lb_rif))
1766 		return PTR_ERR(new_lb_rif);
1767 	ipip_entry->ol_lb = new_lb_rif;
1768 
1769 	mlxsw_sp_rif_migrate_destroy(mlxsw_sp, &old_lb_rif->common,
1770 				     &new_lb_rif->common, keep_encap);
1771 	return 0;
1772 }
1773 
1774 /**
1775  * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1776  * @mlxsw_sp: mlxsw_sp.
1777  * @ipip_entry: IPIP entry.
1778  * @recreate_loopback: Recreates the associated loopback RIF.
1779  * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1780  *              relevant when recreate_loopback is true.
1781  * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1782  *                   is only relevant when recreate_loopback is false.
1783  * @extack: extack.
1784  *
1785  * Return: Non-zero value on failure.
1786  */
__mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,bool recreate_loopback,bool keep_encap,bool update_nexthops,struct netlink_ext_ack * extack)1787 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1788 					struct mlxsw_sp_ipip_entry *ipip_entry,
1789 					bool recreate_loopback,
1790 					bool keep_encap,
1791 					bool update_nexthops,
1792 					struct netlink_ext_ack *extack)
1793 {
1794 	int err;
1795 
1796 	/* RIFs can't be edited, so to update loopback, we need to destroy and
1797 	 * recreate it. That creates a window of opportunity where RALUE and
1798 	 * RATR registers end up referencing a RIF that's already gone. RATRs
1799 	 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1800 	 * of RALUE, demote the decap route back.
1801 	 */
1802 	if (ipip_entry->decap_fib_entry)
1803 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1804 
1805 	if (recreate_loopback) {
1806 		err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1807 						       keep_encap, extack);
1808 		if (err)
1809 			return err;
1810 	} else if (update_nexthops) {
1811 		mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1812 					    &ipip_entry->ol_lb->common);
1813 	}
1814 
1815 	if (ipip_entry->ol_dev->flags & IFF_UP)
1816 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1817 
1818 	return 0;
1819 }
1820 
mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev,struct netlink_ext_ack * extack)1821 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1822 						struct net_device *ol_dev,
1823 						struct netlink_ext_ack *extack)
1824 {
1825 	struct mlxsw_sp_ipip_entry *ipip_entry =
1826 		mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1827 
1828 	if (!ipip_entry)
1829 		return 0;
1830 
1831 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1832 						   true, false, false, extack);
1833 }
1834 
1835 static int
mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev,bool * demote_this,struct netlink_ext_ack * extack)1836 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1837 				     struct mlxsw_sp_ipip_entry *ipip_entry,
1838 				     struct net_device *ul_dev,
1839 				     bool *demote_this,
1840 				     struct netlink_ext_ack *extack)
1841 {
1842 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1843 	enum mlxsw_sp_l3proto ul_proto;
1844 	union mlxsw_sp_l3addr saddr;
1845 
1846 	/* Moving underlay to a different VRF might cause local address
1847 	 * conflict, and the conflicting tunnels need to be demoted.
1848 	 */
1849 	ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1850 	saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1851 	if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1852 						 saddr, ul_tb_id,
1853 						 ipip_entry)) {
1854 		*demote_this = true;
1855 		return 0;
1856 	}
1857 
1858 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1859 						   true, true, false, extack);
1860 }
1861 
1862 static int
mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev)1863 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1864 				    struct mlxsw_sp_ipip_entry *ipip_entry,
1865 				    struct net_device *ul_dev)
1866 {
1867 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1868 						   false, false, true, NULL);
1869 }
1870 
1871 static int
mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev)1872 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1873 				      struct mlxsw_sp_ipip_entry *ipip_entry,
1874 				      struct net_device *ul_dev)
1875 {
1876 	/* A down underlay device causes encapsulated packets to not be
1877 	 * forwarded, but decap still works. So refresh next hops without
1878 	 * touching anything else.
1879 	 */
1880 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1881 						   false, false, true, NULL);
1882 }
1883 
1884 static int
mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev,struct netlink_ext_ack * extack)1885 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1886 					struct net_device *ol_dev,
1887 					struct netlink_ext_ack *extack)
1888 {
1889 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1890 	struct mlxsw_sp_ipip_entry *ipip_entry;
1891 	int err;
1892 
1893 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1894 	if (!ipip_entry)
1895 		/* A change might make a tunnel eligible for offloading, but
1896 		 * that is currently not implemented. What falls to slow path
1897 		 * stays there.
1898 		 */
1899 		return 0;
1900 
1901 	/* A change might make a tunnel not eligible for offloading. */
1902 	if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1903 						 ipip_entry->ipipt)) {
1904 		mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1905 		return 0;
1906 	}
1907 
1908 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1909 	err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1910 	return err;
1911 }
1912 
mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1913 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1914 				       struct mlxsw_sp_ipip_entry *ipip_entry)
1915 {
1916 	struct net_device *ol_dev = ipip_entry->ol_dev;
1917 
1918 	if (ol_dev->flags & IFF_UP)
1919 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1920 	mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1921 }
1922 
1923 /* The configuration where several tunnels have the same local address in the
1924  * same underlay table needs special treatment in the HW. That is currently not
1925  * implemented in the driver. This function finds and demotes the first tunnel
1926  * with a given source address, except the one passed in the argument
1927  * `except'.
1928  */
1929 bool
mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr saddr,u32 ul_tb_id,const struct mlxsw_sp_ipip_entry * except)1930 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1931 				     enum mlxsw_sp_l3proto ul_proto,
1932 				     union mlxsw_sp_l3addr saddr,
1933 				     u32 ul_tb_id,
1934 				     const struct mlxsw_sp_ipip_entry *except)
1935 {
1936 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1937 
1938 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1939 				 ipip_list_node) {
1940 		if (ipip_entry != except &&
1941 		    mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1942 						      ul_tb_id, ipip_entry)) {
1943 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1944 			return true;
1945 		}
1946 	}
1947 
1948 	return false;
1949 }
1950 
mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp * mlxsw_sp,struct net_device * ul_dev)1951 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1952 						     struct net_device *ul_dev)
1953 {
1954 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1955 
1956 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1957 				 ipip_list_node) {
1958 		struct net_device *ol_dev = ipip_entry->ol_dev;
1959 		struct net_device *ipip_ul_dev;
1960 
1961 		rcu_read_lock();
1962 		ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1963 		rcu_read_unlock();
1964 		if (ipip_ul_dev == ul_dev)
1965 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1966 	}
1967 }
1968 
mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev,unsigned long event,struct netdev_notifier_info * info)1969 static int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1970 					    struct net_device *ol_dev,
1971 					    unsigned long event,
1972 					    struct netdev_notifier_info *info)
1973 {
1974 	struct netdev_notifier_changeupper_info *chup;
1975 	struct netlink_ext_ack *extack;
1976 	int err = 0;
1977 
1978 	switch (event) {
1979 	case NETDEV_REGISTER:
1980 		err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1981 		break;
1982 	case NETDEV_UNREGISTER:
1983 		mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1984 		break;
1985 	case NETDEV_UP:
1986 		mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1987 		break;
1988 	case NETDEV_DOWN:
1989 		mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1990 		break;
1991 	case NETDEV_CHANGEUPPER:
1992 		chup = container_of(info, typeof(*chup), info);
1993 		extack = info->extack;
1994 		if (netif_is_l3_master(chup->upper_dev))
1995 			err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1996 								   ol_dev,
1997 								   extack);
1998 		break;
1999 	case NETDEV_CHANGE:
2000 		extack = info->extack;
2001 		err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
2002 							      ol_dev, extack);
2003 		break;
2004 	case NETDEV_CHANGEMTU:
2005 		err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
2006 		break;
2007 	}
2008 	return err;
2009 }
2010 
2011 static int
__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev,bool * demote_this,unsigned long event,struct netdev_notifier_info * info)2012 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
2013 				   struct mlxsw_sp_ipip_entry *ipip_entry,
2014 				   struct net_device *ul_dev,
2015 				   bool *demote_this,
2016 				   unsigned long event,
2017 				   struct netdev_notifier_info *info)
2018 {
2019 	struct netdev_notifier_changeupper_info *chup;
2020 	struct netlink_ext_ack *extack;
2021 
2022 	switch (event) {
2023 	case NETDEV_CHANGEUPPER:
2024 		chup = container_of(info, typeof(*chup), info);
2025 		extack = info->extack;
2026 		if (netif_is_l3_master(chup->upper_dev))
2027 			return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
2028 								    ipip_entry,
2029 								    ul_dev,
2030 								    demote_this,
2031 								    extack);
2032 		break;
2033 
2034 	case NETDEV_UP:
2035 		return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
2036 							   ul_dev);
2037 	case NETDEV_DOWN:
2038 		return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
2039 							     ipip_entry,
2040 							     ul_dev);
2041 	}
2042 	return 0;
2043 }
2044 
2045 static int
mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ul_dev,unsigned long event,struct netdev_notifier_info * info)2046 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
2047 				 struct net_device *ul_dev,
2048 				 unsigned long event,
2049 				 struct netdev_notifier_info *info)
2050 {
2051 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
2052 	int err;
2053 
2054 	while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
2055 								ul_dev,
2056 								ipip_entry))) {
2057 		struct mlxsw_sp_ipip_entry *prev;
2058 		bool demote_this = false;
2059 
2060 		err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
2061 							 ul_dev, &demote_this,
2062 							 event, info);
2063 		if (err) {
2064 			mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
2065 								 ul_dev);
2066 			return err;
2067 		}
2068 
2069 		if (demote_this) {
2070 			if (list_is_first(&ipip_entry->ipip_list_node,
2071 					  &mlxsw_sp->router->ipip_list))
2072 				prev = NULL;
2073 			else
2074 				/* This can't be cached from previous iteration,
2075 				 * because that entry could be gone now.
2076 				 */
2077 				prev = list_prev_entry(ipip_entry,
2078 						       ipip_list_node);
2079 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
2080 			ipip_entry = prev;
2081 		}
2082 	}
2083 
2084 	return 0;
2085 }
2086 
mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,enum mlxsw_sp_l3proto ul_proto,const union mlxsw_sp_l3addr * ul_sip,u32 tunnel_index)2087 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2088 				      enum mlxsw_sp_l3proto ul_proto,
2089 				      const union mlxsw_sp_l3addr *ul_sip,
2090 				      u32 tunnel_index)
2091 {
2092 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2093 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2094 	struct mlxsw_sp_fib_entry *fib_entry;
2095 	int err = 0;
2096 
2097 	mutex_lock(&mlxsw_sp->router->lock);
2098 
2099 	if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
2100 		err = -EINVAL;
2101 		goto out;
2102 	}
2103 
2104 	router->nve_decap_config.ul_tb_id = ul_tb_id;
2105 	router->nve_decap_config.tunnel_index = tunnel_index;
2106 	router->nve_decap_config.ul_proto = ul_proto;
2107 	router->nve_decap_config.ul_sip = *ul_sip;
2108 	router->nve_decap_config.valid = true;
2109 
2110 	/* It is valid to create a tunnel with a local IP and only later
2111 	 * assign this IP address to a local interface
2112 	 */
2113 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2114 							 ul_proto, ul_sip,
2115 							 type);
2116 	if (!fib_entry)
2117 		goto out;
2118 
2119 	fib_entry->decap.tunnel_index = tunnel_index;
2120 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2121 
2122 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2123 	if (err)
2124 		goto err_fib_entry_update;
2125 
2126 	goto out;
2127 
2128 err_fib_entry_update:
2129 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2130 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2131 out:
2132 	mutex_unlock(&mlxsw_sp->router->lock);
2133 	return err;
2134 }
2135 
mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,enum mlxsw_sp_l3proto ul_proto,const union mlxsw_sp_l3addr * ul_sip)2136 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2137 				      enum mlxsw_sp_l3proto ul_proto,
2138 				      const union mlxsw_sp_l3addr *ul_sip)
2139 {
2140 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2141 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2142 	struct mlxsw_sp_fib_entry *fib_entry;
2143 
2144 	mutex_lock(&mlxsw_sp->router->lock);
2145 
2146 	if (WARN_ON_ONCE(!router->nve_decap_config.valid))
2147 		goto out;
2148 
2149 	router->nve_decap_config.valid = false;
2150 
2151 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2152 							 ul_proto, ul_sip,
2153 							 type);
2154 	if (!fib_entry)
2155 		goto out;
2156 
2157 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2158 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2159 out:
2160 	mutex_unlock(&mlxsw_sp->router->lock);
2161 }
2162 
mlxsw_sp_router_nve_is_decap(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,enum mlxsw_sp_l3proto ul_proto,const union mlxsw_sp_l3addr * ul_sip)2163 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
2164 					 u32 ul_tb_id,
2165 					 enum mlxsw_sp_l3proto ul_proto,
2166 					 const union mlxsw_sp_l3addr *ul_sip)
2167 {
2168 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2169 
2170 	return router->nve_decap_config.valid &&
2171 	       router->nve_decap_config.ul_tb_id == ul_tb_id &&
2172 	       router->nve_decap_config.ul_proto == ul_proto &&
2173 	       !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
2174 		       sizeof(*ul_sip));
2175 }
2176 
2177 struct mlxsw_sp_neigh_key {
2178 	struct neighbour *n;
2179 };
2180 
2181 struct mlxsw_sp_neigh_entry {
2182 	struct list_head rif_list_node;
2183 	struct rhash_head ht_node;
2184 	struct mlxsw_sp_neigh_key key;
2185 	u16 rif;
2186 	bool connected;
2187 	unsigned char ha[ETH_ALEN];
2188 	struct list_head nexthop_list; /* list of nexthops using
2189 					* this neigh entry
2190 					*/
2191 	struct list_head nexthop_neighs_list_node;
2192 	unsigned int counter_index;
2193 	bool counter_valid;
2194 };
2195 
2196 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
2197 	.key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
2198 	.head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
2199 	.key_len = sizeof(struct mlxsw_sp_neigh_key),
2200 };
2201 
2202 struct mlxsw_sp_neigh_entry *
mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif * rif,struct mlxsw_sp_neigh_entry * neigh_entry)2203 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
2204 			struct mlxsw_sp_neigh_entry *neigh_entry)
2205 {
2206 	if (!neigh_entry) {
2207 		if (list_empty(&rif->neigh_list))
2208 			return NULL;
2209 		else
2210 			return list_first_entry(&rif->neigh_list,
2211 						typeof(*neigh_entry),
2212 						rif_list_node);
2213 	}
2214 	if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
2215 		return NULL;
2216 	return list_next_entry(neigh_entry, rif_list_node);
2217 }
2218 
mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry * neigh_entry)2219 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
2220 {
2221 	return neigh_entry->key.n->tbl->family;
2222 }
2223 
2224 unsigned char *
mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry * neigh_entry)2225 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
2226 {
2227 	return neigh_entry->ha;
2228 }
2229 
mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry * neigh_entry)2230 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2231 {
2232 	struct neighbour *n;
2233 
2234 	n = neigh_entry->key.n;
2235 	return ntohl(*((__be32 *) n->primary_key));
2236 }
2237 
2238 struct in6_addr *
mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry * neigh_entry)2239 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2240 {
2241 	struct neighbour *n;
2242 
2243 	n = neigh_entry->key.n;
2244 	return (struct in6_addr *) &n->primary_key;
2245 }
2246 
mlxsw_sp_neigh_counter_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,u64 * p_counter)2247 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2248 			       struct mlxsw_sp_neigh_entry *neigh_entry,
2249 			       u64 *p_counter)
2250 {
2251 	if (!neigh_entry->counter_valid)
2252 		return -EINVAL;
2253 
2254 	return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2255 					 false, p_counter, NULL);
2256 }
2257 
2258 static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp * mlxsw_sp,struct neighbour * n,u16 rif)2259 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2260 			   u16 rif)
2261 {
2262 	struct mlxsw_sp_neigh_entry *neigh_entry;
2263 
2264 	neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2265 	if (!neigh_entry)
2266 		return NULL;
2267 
2268 	neigh_entry->key.n = n;
2269 	neigh_entry->rif = rif;
2270 	INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2271 
2272 	return neigh_entry;
2273 }
2274 
mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry * neigh_entry)2275 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2276 {
2277 	kfree(neigh_entry);
2278 }
2279 
2280 static int
mlxsw_sp_neigh_entry_insert(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2281 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2282 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2283 {
2284 	return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2285 				      &neigh_entry->ht_node,
2286 				      mlxsw_sp_neigh_ht_params);
2287 }
2288 
2289 static void
mlxsw_sp_neigh_entry_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2290 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2291 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2292 {
2293 	rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2294 			       &neigh_entry->ht_node,
2295 			       mlxsw_sp_neigh_ht_params);
2296 }
2297 
2298 static bool
mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2299 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2300 				    struct mlxsw_sp_neigh_entry *neigh_entry)
2301 {
2302 	struct devlink *devlink;
2303 	const char *table_name;
2304 
2305 	switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2306 	case AF_INET:
2307 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2308 		break;
2309 	case AF_INET6:
2310 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2311 		break;
2312 	default:
2313 		WARN_ON(1);
2314 		return false;
2315 	}
2316 
2317 	devlink = priv_to_devlink(mlxsw_sp->core);
2318 	return devlink_dpipe_table_counter_enabled(devlink, table_name);
2319 }
2320 
2321 static void
mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2322 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2323 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2324 {
2325 	if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2326 		return;
2327 
2328 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2329 		return;
2330 
2331 	neigh_entry->counter_valid = true;
2332 }
2333 
2334 static void
mlxsw_sp_neigh_counter_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2335 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2336 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2337 {
2338 	if (!neigh_entry->counter_valid)
2339 		return;
2340 	mlxsw_sp_flow_counter_free(mlxsw_sp,
2341 				   neigh_entry->counter_index);
2342 	neigh_entry->counter_valid = false;
2343 }
2344 
2345 static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_create(struct mlxsw_sp * mlxsw_sp,struct neighbour * n)2346 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2347 {
2348 	struct mlxsw_sp_neigh_entry *neigh_entry;
2349 	struct mlxsw_sp_rif *rif;
2350 	int err;
2351 
2352 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2353 	if (!rif)
2354 		return ERR_PTR(-EINVAL);
2355 
2356 	neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2357 	if (!neigh_entry)
2358 		return ERR_PTR(-ENOMEM);
2359 
2360 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2361 	if (err)
2362 		goto err_neigh_entry_insert;
2363 
2364 	mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2365 	atomic_inc(&mlxsw_sp->router->neighs_update.neigh_count);
2366 	list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2367 
2368 	return neigh_entry;
2369 
2370 err_neigh_entry_insert:
2371 	mlxsw_sp_neigh_entry_free(neigh_entry);
2372 	return ERR_PTR(err);
2373 }
2374 
2375 static void
mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2376 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2377 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2378 {
2379 	list_del(&neigh_entry->rif_list_node);
2380 	atomic_dec(&mlxsw_sp->router->neighs_update.neigh_count);
2381 	mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2382 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2383 	mlxsw_sp_neigh_entry_free(neigh_entry);
2384 }
2385 
2386 static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp * mlxsw_sp,struct neighbour * n)2387 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2388 {
2389 	struct mlxsw_sp_neigh_key key;
2390 
2391 	key.n = n;
2392 	return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2393 				      &key, mlxsw_sp_neigh_ht_params);
2394 }
2395 
2396 static void
mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp * mlxsw_sp)2397 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2398 {
2399 	unsigned long interval;
2400 
2401 #if IS_ENABLED(CONFIG_IPV6)
2402 	interval = min_t(unsigned long,
2403 			 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2404 			 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2405 #else
2406 	interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2407 #endif
2408 	mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2409 }
2410 
mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int ent_index)2411 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2412 						   char *rauhtd_pl,
2413 						   int ent_index)
2414 {
2415 	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
2416 	struct net_device *dev;
2417 	struct neighbour *n;
2418 	__be32 dipn;
2419 	u32 dip;
2420 	u16 rif;
2421 
2422 	mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2423 
2424 	if (WARN_ON_ONCE(rif >= max_rifs))
2425 		return;
2426 	if (!mlxsw_sp->router->rifs[rif]) {
2427 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2428 		return;
2429 	}
2430 
2431 	dipn = htonl(dip);
2432 	dev = mlxsw_sp_rif_dev(mlxsw_sp->router->rifs[rif]);
2433 	n = neigh_lookup(&arp_tbl, &dipn, dev);
2434 	if (!n)
2435 		return;
2436 
2437 	netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2438 	neigh_event_send(n, NULL);
2439 	neigh_release(n);
2440 }
2441 
2442 #if IS_ENABLED(CONFIG_IPV6)
mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2443 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2444 						   char *rauhtd_pl,
2445 						   int rec_index)
2446 {
2447 	struct net_device *dev;
2448 	struct neighbour *n;
2449 	struct in6_addr dip;
2450 	u16 rif;
2451 
2452 	mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2453 					 (char *) &dip);
2454 
2455 	if (!mlxsw_sp->router->rifs[rif]) {
2456 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2457 		return;
2458 	}
2459 
2460 	dev = mlxsw_sp_rif_dev(mlxsw_sp->router->rifs[rif]);
2461 	n = neigh_lookup(&nd_tbl, &dip, dev);
2462 	if (!n)
2463 		return;
2464 
2465 	netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2466 	neigh_event_send(n, NULL);
2467 	neigh_release(n);
2468 }
2469 #else
mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2470 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2471 						   char *rauhtd_pl,
2472 						   int rec_index)
2473 {
2474 }
2475 #endif
2476 
mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2477 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2478 						   char *rauhtd_pl,
2479 						   int rec_index)
2480 {
2481 	u8 num_entries;
2482 	int i;
2483 
2484 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2485 								rec_index);
2486 	/* Hardware starts counting at 0, so add 1. */
2487 	num_entries++;
2488 
2489 	/* Each record consists of several neighbour entries. */
2490 	for (i = 0; i < num_entries; i++) {
2491 		int ent_index;
2492 
2493 		ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2494 		mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2495 						       ent_index);
2496 	}
2497 
2498 }
2499 
mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2500 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2501 						   char *rauhtd_pl,
2502 						   int rec_index)
2503 {
2504 	/* One record contains one entry. */
2505 	mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2506 					       rec_index);
2507 }
2508 
mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2509 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2510 					      char *rauhtd_pl, int rec_index)
2511 {
2512 	switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2513 	case MLXSW_REG_RAUHTD_TYPE_IPV4:
2514 		mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2515 						       rec_index);
2516 		break;
2517 	case MLXSW_REG_RAUHTD_TYPE_IPV6:
2518 		mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2519 						       rec_index);
2520 		break;
2521 	}
2522 }
2523 
mlxsw_sp_router_rauhtd_is_full(char * rauhtd_pl)2524 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2525 {
2526 	u8 num_rec, last_rec_index, num_entries;
2527 
2528 	num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2529 	last_rec_index = num_rec - 1;
2530 
2531 	if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2532 		return false;
2533 	if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2534 	    MLXSW_REG_RAUHTD_TYPE_IPV6)
2535 		return true;
2536 
2537 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2538 								last_rec_index);
2539 	if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2540 		return true;
2541 	return false;
2542 }
2543 
2544 static int
__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,enum mlxsw_reg_rauhtd_type type)2545 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2546 				       char *rauhtd_pl,
2547 				       enum mlxsw_reg_rauhtd_type type)
2548 {
2549 	int i, num_rec;
2550 	int err;
2551 
2552 	/* Ensure the RIF we read from the device does not change mid-dump. */
2553 	mutex_lock(&mlxsw_sp->router->lock);
2554 	do {
2555 		mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2556 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2557 				      rauhtd_pl);
2558 		if (err) {
2559 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2560 			break;
2561 		}
2562 		num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2563 		for (i = 0; i < num_rec; i++)
2564 			mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2565 							  i);
2566 	} while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2567 	mutex_unlock(&mlxsw_sp->router->lock);
2568 
2569 	return err;
2570 }
2571 
mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp * mlxsw_sp)2572 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2573 {
2574 	enum mlxsw_reg_rauhtd_type type;
2575 	char *rauhtd_pl;
2576 	int err;
2577 
2578 	if (!atomic_read(&mlxsw_sp->router->neighs_update.neigh_count))
2579 		return 0;
2580 
2581 	rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2582 	if (!rauhtd_pl)
2583 		return -ENOMEM;
2584 
2585 	type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2586 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2587 	if (err)
2588 		goto out;
2589 
2590 	type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2591 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2592 out:
2593 	kfree(rauhtd_pl);
2594 	return err;
2595 }
2596 
mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp * mlxsw_sp)2597 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2598 {
2599 	struct mlxsw_sp_neigh_entry *neigh_entry;
2600 
2601 	mutex_lock(&mlxsw_sp->router->lock);
2602 	list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2603 			    nexthop_neighs_list_node)
2604 		/* If this neigh have nexthops, make the kernel think this neigh
2605 		 * is active regardless of the traffic.
2606 		 */
2607 		neigh_event_send(neigh_entry->key.n, NULL);
2608 	mutex_unlock(&mlxsw_sp->router->lock);
2609 }
2610 
2611 static void
mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp * mlxsw_sp)2612 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2613 {
2614 	unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2615 
2616 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2617 			       msecs_to_jiffies(interval));
2618 }
2619 
mlxsw_sp_router_neighs_update_work(struct work_struct * work)2620 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2621 {
2622 	struct mlxsw_sp_router *router;
2623 	int err;
2624 
2625 	router = container_of(work, struct mlxsw_sp_router,
2626 			      neighs_update.dw.work);
2627 	err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2628 	if (err)
2629 		dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2630 
2631 	mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2632 
2633 	mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2634 }
2635 
mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct * work)2636 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2637 {
2638 	struct mlxsw_sp_neigh_entry *neigh_entry;
2639 	struct mlxsw_sp_router *router;
2640 
2641 	router = container_of(work, struct mlxsw_sp_router,
2642 			      nexthop_probe_dw.work);
2643 	/* Iterate over nexthop neighbours, find those who are unresolved and
2644 	 * send arp on them. This solves the chicken-egg problem when
2645 	 * the nexthop wouldn't get offloaded until the neighbor is resolved
2646 	 * but it wouldn't get resolved ever in case traffic is flowing in HW
2647 	 * using different nexthop.
2648 	 */
2649 	mutex_lock(&router->lock);
2650 	list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2651 			    nexthop_neighs_list_node)
2652 		if (!neigh_entry->connected)
2653 			neigh_event_send(neigh_entry->key.n, NULL);
2654 	mutex_unlock(&router->lock);
2655 
2656 	mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2657 			       MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2658 }
2659 
2660 static void
2661 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2662 			      struct mlxsw_sp_neigh_entry *neigh_entry,
2663 			      bool removing, bool dead);
2664 
mlxsw_sp_rauht_op(bool adding)2665 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2666 {
2667 	return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2668 			MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2669 }
2670 
2671 static int
mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,enum mlxsw_reg_rauht_op op)2672 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2673 				struct mlxsw_sp_neigh_entry *neigh_entry,
2674 				enum mlxsw_reg_rauht_op op)
2675 {
2676 	struct neighbour *n = neigh_entry->key.n;
2677 	u32 dip = ntohl(*((__be32 *) n->primary_key));
2678 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2679 
2680 	mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2681 			      dip);
2682 	if (neigh_entry->counter_valid)
2683 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2684 					     neigh_entry->counter_index);
2685 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2686 }
2687 
2688 static int
mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,enum mlxsw_reg_rauht_op op)2689 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2690 				struct mlxsw_sp_neigh_entry *neigh_entry,
2691 				enum mlxsw_reg_rauht_op op)
2692 {
2693 	struct neighbour *n = neigh_entry->key.n;
2694 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2695 	const char *dip = n->primary_key;
2696 
2697 	mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2698 			      dip);
2699 	if (neigh_entry->counter_valid)
2700 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2701 					     neigh_entry->counter_index);
2702 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2703 }
2704 
mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry * neigh_entry)2705 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2706 {
2707 	struct neighbour *n = neigh_entry->key.n;
2708 
2709 	/* Packets with a link-local destination address are trapped
2710 	 * after LPM lookup and never reach the neighbour table, so
2711 	 * there is no need to program such neighbours to the device.
2712 	 */
2713 	if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2714 	    IPV6_ADDR_LINKLOCAL)
2715 		return true;
2716 	return false;
2717 }
2718 
2719 static void
mlxsw_sp_neigh_entry_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,bool adding)2720 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2721 			    struct mlxsw_sp_neigh_entry *neigh_entry,
2722 			    bool adding)
2723 {
2724 	enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2725 	int err;
2726 
2727 	if (!adding && !neigh_entry->connected)
2728 		return;
2729 	neigh_entry->connected = adding;
2730 	if (neigh_entry->key.n->tbl->family == AF_INET) {
2731 		err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2732 						      op);
2733 		if (err)
2734 			return;
2735 	} else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2736 		if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2737 			return;
2738 		err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2739 						      op);
2740 		if (err)
2741 			return;
2742 	} else {
2743 		WARN_ON_ONCE(1);
2744 		return;
2745 	}
2746 
2747 	if (adding)
2748 		neigh_entry->key.n->flags |= NTF_OFFLOADED;
2749 	else
2750 		neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2751 }
2752 
2753 void
mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,bool adding)2754 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2755 				    struct mlxsw_sp_neigh_entry *neigh_entry,
2756 				    bool adding)
2757 {
2758 	if (adding)
2759 		mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2760 	else
2761 		mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2762 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2763 }
2764 
2765 struct mlxsw_sp_netevent_work {
2766 	struct work_struct work;
2767 	struct mlxsw_sp *mlxsw_sp;
2768 	struct neighbour *n;
2769 };
2770 
mlxsw_sp_router_neigh_event_work(struct work_struct * work)2771 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2772 {
2773 	struct mlxsw_sp_netevent_work *net_work =
2774 		container_of(work, struct mlxsw_sp_netevent_work, work);
2775 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2776 	struct mlxsw_sp_neigh_entry *neigh_entry;
2777 	struct neighbour *n = net_work->n;
2778 	unsigned char ha[ETH_ALEN];
2779 	bool entry_connected;
2780 	u8 nud_state, dead;
2781 
2782 	/* If these parameters are changed after we release the lock,
2783 	 * then we are guaranteed to receive another event letting us
2784 	 * know about it.
2785 	 */
2786 	read_lock_bh(&n->lock);
2787 	memcpy(ha, n->ha, ETH_ALEN);
2788 	nud_state = n->nud_state;
2789 	dead = n->dead;
2790 	read_unlock_bh(&n->lock);
2791 
2792 	mutex_lock(&mlxsw_sp->router->lock);
2793 	mlxsw_sp_span_respin(mlxsw_sp);
2794 
2795 	entry_connected = nud_state & NUD_VALID && !dead;
2796 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2797 	if (!entry_connected && !neigh_entry)
2798 		goto out;
2799 	if (!neigh_entry) {
2800 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2801 		if (IS_ERR(neigh_entry))
2802 			goto out;
2803 	}
2804 
2805 	if (neigh_entry->connected && entry_connected &&
2806 	    !memcmp(neigh_entry->ha, ha, ETH_ALEN))
2807 		goto out;
2808 
2809 	memcpy(neigh_entry->ha, ha, ETH_ALEN);
2810 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2811 	mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2812 				      dead);
2813 
2814 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2815 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2816 
2817 out:
2818 	mutex_unlock(&mlxsw_sp->router->lock);
2819 	neigh_release(n);
2820 	kfree(net_work);
2821 }
2822 
2823 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2824 
mlxsw_sp_router_mp_hash_event_work(struct work_struct * work)2825 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2826 {
2827 	struct mlxsw_sp_netevent_work *net_work =
2828 		container_of(work, struct mlxsw_sp_netevent_work, work);
2829 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2830 
2831 	mlxsw_sp_mp_hash_init(mlxsw_sp);
2832 	kfree(net_work);
2833 }
2834 
2835 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2836 
mlxsw_sp_router_update_priority_work(struct work_struct * work)2837 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2838 {
2839 	struct mlxsw_sp_netevent_work *net_work =
2840 		container_of(work, struct mlxsw_sp_netevent_work, work);
2841 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2842 
2843 	__mlxsw_sp_router_init(mlxsw_sp);
2844 	kfree(net_work);
2845 }
2846 
mlxsw_sp_router_schedule_work(struct net * net,struct mlxsw_sp_router * router,struct neighbour * n,void (* cb)(struct work_struct *))2847 static int mlxsw_sp_router_schedule_work(struct net *net,
2848 					 struct mlxsw_sp_router *router,
2849 					 struct neighbour *n,
2850 					 void (*cb)(struct work_struct *))
2851 {
2852 	struct mlxsw_sp_netevent_work *net_work;
2853 
2854 	if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2855 		return NOTIFY_DONE;
2856 
2857 	net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2858 	if (!net_work)
2859 		return NOTIFY_BAD;
2860 
2861 	INIT_WORK(&net_work->work, cb);
2862 	net_work->mlxsw_sp = router->mlxsw_sp;
2863 	net_work->n = n;
2864 	mlxsw_core_schedule_work(&net_work->work);
2865 	return NOTIFY_DONE;
2866 }
2867 
mlxsw_sp_dev_lower_is_port(struct net_device * dev)2868 static bool mlxsw_sp_dev_lower_is_port(struct net_device *dev)
2869 {
2870 	struct mlxsw_sp_port *mlxsw_sp_port;
2871 
2872 	rcu_read_lock();
2873 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
2874 	rcu_read_unlock();
2875 	return !!mlxsw_sp_port;
2876 }
2877 
mlxsw_sp_router_schedule_neigh_work(struct mlxsw_sp_router * router,struct neighbour * n)2878 static int mlxsw_sp_router_schedule_neigh_work(struct mlxsw_sp_router *router,
2879 					       struct neighbour *n)
2880 {
2881 	struct net *net;
2882 
2883 	net = neigh_parms_net(n->parms);
2884 
2885 	/* Take a reference to ensure the neighbour won't be destructed until we
2886 	 * drop the reference in delayed work.
2887 	 */
2888 	neigh_clone(n);
2889 	return mlxsw_sp_router_schedule_work(net, router, n,
2890 					     mlxsw_sp_router_neigh_event_work);
2891 }
2892 
mlxsw_sp_router_netevent_event(struct notifier_block * nb,unsigned long event,void * ptr)2893 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2894 					  unsigned long event, void *ptr)
2895 {
2896 	struct mlxsw_sp_router *router;
2897 	unsigned long interval;
2898 	struct neigh_parms *p;
2899 	struct neighbour *n;
2900 
2901 	router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2902 
2903 	switch (event) {
2904 	case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2905 		p = ptr;
2906 
2907 		/* We don't care about changes in the default table. */
2908 		if (!p->dev || (p->tbl->family != AF_INET &&
2909 				p->tbl->family != AF_INET6))
2910 			return NOTIFY_DONE;
2911 
2912 		/* We are in atomic context and can't take RTNL mutex,
2913 		 * so use RCU variant to walk the device chain.
2914 		 */
2915 		if (!mlxsw_sp_dev_lower_is_port(p->dev))
2916 			return NOTIFY_DONE;
2917 
2918 		interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2919 		router->neighs_update.interval = interval;
2920 		break;
2921 	case NETEVENT_NEIGH_UPDATE:
2922 		n = ptr;
2923 
2924 		if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2925 			return NOTIFY_DONE;
2926 
2927 		if (!mlxsw_sp_dev_lower_is_port(n->dev))
2928 			return NOTIFY_DONE;
2929 
2930 		return mlxsw_sp_router_schedule_neigh_work(router, n);
2931 
2932 	case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2933 	case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2934 		return mlxsw_sp_router_schedule_work(ptr, router, NULL,
2935 				mlxsw_sp_router_mp_hash_event_work);
2936 
2937 	case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2938 		return mlxsw_sp_router_schedule_work(ptr, router, NULL,
2939 				mlxsw_sp_router_update_priority_work);
2940 	}
2941 
2942 	return NOTIFY_DONE;
2943 }
2944 
mlxsw_sp_neigh_init(struct mlxsw_sp * mlxsw_sp)2945 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2946 {
2947 	int err;
2948 
2949 	err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2950 			      &mlxsw_sp_neigh_ht_params);
2951 	if (err)
2952 		return err;
2953 
2954 	/* Initialize the polling interval according to the default
2955 	 * table.
2956 	 */
2957 	mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2958 
2959 	/* Create the delayed works for the activity_update */
2960 	INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2961 			  mlxsw_sp_router_neighs_update_work);
2962 	INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2963 			  mlxsw_sp_router_probe_unresolved_nexthops);
2964 	atomic_set(&mlxsw_sp->router->neighs_update.neigh_count, 0);
2965 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2966 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2967 	return 0;
2968 }
2969 
mlxsw_sp_neigh_fini(struct mlxsw_sp * mlxsw_sp)2970 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2971 {
2972 	cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2973 	cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2974 	rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2975 }
2976 
mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)2977 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2978 					 struct mlxsw_sp_rif *rif)
2979 {
2980 	struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2981 
2982 	list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2983 				 rif_list_node) {
2984 		mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2985 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2986 	}
2987 }
2988 
2989 struct mlxsw_sp_neigh_rif_made_sync {
2990 	struct mlxsw_sp *mlxsw_sp;
2991 	struct mlxsw_sp_rif *rif;
2992 	int err;
2993 };
2994 
mlxsw_sp_neigh_rif_made_sync_each(struct neighbour * n,void * data)2995 static void mlxsw_sp_neigh_rif_made_sync_each(struct neighbour *n, void *data)
2996 {
2997 	struct mlxsw_sp_neigh_rif_made_sync *rms = data;
2998 	int rc;
2999 
3000 	if (rms->err)
3001 		return;
3002 	if (n->dev != mlxsw_sp_rif_dev(rms->rif))
3003 		return;
3004 	rc = mlxsw_sp_router_schedule_neigh_work(rms->mlxsw_sp->router, n);
3005 	if (rc != NOTIFY_DONE)
3006 		rms->err = -ENOMEM;
3007 }
3008 
mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)3009 static int mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
3010 					struct mlxsw_sp_rif *rif)
3011 {
3012 	struct mlxsw_sp_neigh_rif_made_sync rms = {
3013 		.mlxsw_sp = mlxsw_sp,
3014 		.rif = rif,
3015 	};
3016 
3017 	neigh_for_each(&arp_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
3018 	if (rms.err)
3019 		goto err_arp;
3020 
3021 #if IS_ENABLED(CONFIG_IPV6)
3022 	neigh_for_each(&nd_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
3023 #endif
3024 	if (rms.err)
3025 		goto err_nd;
3026 
3027 	return 0;
3028 
3029 err_nd:
3030 err_arp:
3031 	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
3032 	return rms.err;
3033 }
3034 
3035 enum mlxsw_sp_nexthop_type {
3036 	MLXSW_SP_NEXTHOP_TYPE_ETH,
3037 	MLXSW_SP_NEXTHOP_TYPE_IPIP,
3038 };
3039 
3040 enum mlxsw_sp_nexthop_action {
3041 	/* Nexthop forwards packets to an egress RIF */
3042 	MLXSW_SP_NEXTHOP_ACTION_FORWARD,
3043 	/* Nexthop discards packets */
3044 	MLXSW_SP_NEXTHOP_ACTION_DISCARD,
3045 	/* Nexthop traps packets */
3046 	MLXSW_SP_NEXTHOP_ACTION_TRAP,
3047 };
3048 
3049 struct mlxsw_sp_nexthop_key {
3050 	struct fib_nh *fib_nh;
3051 };
3052 
3053 struct mlxsw_sp_nexthop_counter;
3054 
3055 struct mlxsw_sp_nexthop {
3056 	struct list_head neigh_list_node; /* member of neigh entry list */
3057 	struct list_head crif_list_node;
3058 	struct list_head router_list_node;
3059 	struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
3060 						   * this nexthop belongs to
3061 						   */
3062 	struct rhash_head ht_node;
3063 	struct neigh_table *neigh_tbl;
3064 	struct mlxsw_sp_nexthop_key key;
3065 	unsigned char gw_addr[sizeof(struct in6_addr)];
3066 	int ifindex;
3067 	int nh_weight;
3068 	int norm_nh_weight;
3069 	int num_adj_entries;
3070 	struct mlxsw_sp_crif *crif;
3071 	u8 should_offload:1, /* set indicates this nexthop should be written
3072 			      * to the adjacency table.
3073 			      */
3074 	   offloaded:1, /* set indicates this nexthop was written to the
3075 			 * adjacency table.
3076 			 */
3077 	   update:1; /* set indicates this nexthop should be updated in the
3078 		      * adjacency table (f.e., its MAC changed).
3079 		      */
3080 	enum mlxsw_sp_nexthop_action action;
3081 	enum mlxsw_sp_nexthop_type type;
3082 	union {
3083 		struct mlxsw_sp_neigh_entry *neigh_entry;
3084 		struct mlxsw_sp_ipip_entry *ipip_entry;
3085 	};
3086 	struct mlxsw_sp_nexthop_counter *counter;
3087 	u32 id;		/* NH ID for members of a NH object group. */
3088 };
3089 
3090 static struct net_device *
mlxsw_sp_nexthop_dev(const struct mlxsw_sp_nexthop * nh)3091 mlxsw_sp_nexthop_dev(const struct mlxsw_sp_nexthop *nh)
3092 {
3093 	if (!nh->crif)
3094 		return NULL;
3095 	return nh->crif->key.dev;
3096 }
3097 
3098 enum mlxsw_sp_nexthop_group_type {
3099 	MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
3100 	MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
3101 	MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
3102 };
3103 
3104 struct mlxsw_sp_nexthop_group_info {
3105 	struct mlxsw_sp_nexthop_group *nh_grp;
3106 	u32 adj_index;
3107 	u16 ecmp_size;
3108 	u16 count;
3109 	int sum_norm_weight;
3110 	u8 adj_index_valid:1,
3111 	   gateway:1, /* routes using the group use a gateway */
3112 	   is_resilient:1,
3113 	   hw_stats:1;
3114 	struct list_head list; /* member in nh_res_grp_list */
3115 	struct xarray nexthop_counters;
3116 	struct mlxsw_sp_nexthop nexthops[] __counted_by(count);
3117 };
3118 
3119 static struct mlxsw_sp_rif *
mlxsw_sp_nhgi_rif(const struct mlxsw_sp_nexthop_group_info * nhgi)3120 mlxsw_sp_nhgi_rif(const struct mlxsw_sp_nexthop_group_info *nhgi)
3121 {
3122 	struct mlxsw_sp_crif *crif = nhgi->nexthops[0].crif;
3123 
3124 	if (!crif)
3125 		return NULL;
3126 	return crif->rif;
3127 }
3128 
3129 struct mlxsw_sp_nexthop_group_vr_key {
3130 	u16 vr_id;
3131 	enum mlxsw_sp_l3proto proto;
3132 };
3133 
3134 struct mlxsw_sp_nexthop_group_vr_entry {
3135 	struct list_head list; /* member in vr_list */
3136 	struct rhash_head ht_node; /* member in vr_ht */
3137 	refcount_t ref_count;
3138 	struct mlxsw_sp_nexthop_group_vr_key key;
3139 };
3140 
3141 struct mlxsw_sp_nexthop_group {
3142 	struct rhash_head ht_node;
3143 	struct list_head fib_list; /* list of fib entries that use this group */
3144 	union {
3145 		struct {
3146 			struct fib_info *fi;
3147 		} ipv4;
3148 		struct {
3149 			u32 id;
3150 		} obj;
3151 	};
3152 	struct mlxsw_sp_nexthop_group_info *nhgi;
3153 	struct list_head vr_list;
3154 	struct rhashtable vr_ht;
3155 	enum mlxsw_sp_nexthop_group_type type;
3156 	bool can_destroy;
3157 };
3158 
3159 struct mlxsw_sp_nexthop_counter {
3160 	unsigned int counter_index;
3161 	refcount_t ref_count;
3162 };
3163 
3164 static struct mlxsw_sp_nexthop_counter *
mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp * mlxsw_sp)3165 mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp)
3166 {
3167 	struct mlxsw_sp_nexthop_counter *nhct;
3168 	int err;
3169 
3170 	nhct = kzalloc(sizeof(*nhct), GFP_KERNEL);
3171 	if (!nhct)
3172 		return ERR_PTR(-ENOMEM);
3173 
3174 	err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nhct->counter_index);
3175 	if (err)
3176 		goto err_counter_alloc;
3177 
3178 	refcount_set(&nhct->ref_count, 1);
3179 	return nhct;
3180 
3181 err_counter_alloc:
3182 	kfree(nhct);
3183 	return ERR_PTR(err);
3184 }
3185 
3186 static void
mlxsw_sp_nexthop_counter_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_counter * nhct)3187 mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
3188 			      struct mlxsw_sp_nexthop_counter *nhct)
3189 {
3190 	mlxsw_sp_flow_counter_free(mlxsw_sp, nhct->counter_index);
3191 	kfree(nhct);
3192 }
3193 
3194 static struct mlxsw_sp_nexthop_counter *
mlxsw_sp_nexthop_sh_counter_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3195 mlxsw_sp_nexthop_sh_counter_get(struct mlxsw_sp *mlxsw_sp,
3196 				struct mlxsw_sp_nexthop *nh)
3197 {
3198 	struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
3199 	struct mlxsw_sp_nexthop_counter *nhct;
3200 	void *ptr;
3201 	int err;
3202 
3203 	nhct = xa_load(&nh_grp->nhgi->nexthop_counters, nh->id);
3204 	if (nhct) {
3205 		refcount_inc(&nhct->ref_count);
3206 		return nhct;
3207 	}
3208 
3209 	nhct = mlxsw_sp_nexthop_counter_alloc(mlxsw_sp);
3210 	if (IS_ERR(nhct))
3211 		return nhct;
3212 
3213 	ptr = xa_store(&nh_grp->nhgi->nexthop_counters, nh->id, nhct,
3214 		       GFP_KERNEL);
3215 	if (IS_ERR(ptr)) {
3216 		err = PTR_ERR(ptr);
3217 		goto err_store;
3218 	}
3219 
3220 	return nhct;
3221 
3222 err_store:
3223 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nhct);
3224 	return ERR_PTR(err);
3225 }
3226 
mlxsw_sp_nexthop_sh_counter_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3227 static void mlxsw_sp_nexthop_sh_counter_put(struct mlxsw_sp *mlxsw_sp,
3228 					    struct mlxsw_sp_nexthop *nh)
3229 {
3230 	struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
3231 	struct mlxsw_sp_nexthop_counter *nhct;
3232 
3233 	nhct = xa_load(&nh_grp->nhgi->nexthop_counters, nh->id);
3234 	if (WARN_ON(!nhct))
3235 		return;
3236 
3237 	if (!refcount_dec_and_test(&nhct->ref_count))
3238 		return;
3239 
3240 	xa_erase(&nh_grp->nhgi->nexthop_counters, nh->id);
3241 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nhct);
3242 }
3243 
mlxsw_sp_nexthop_counter_enable(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3244 int mlxsw_sp_nexthop_counter_enable(struct mlxsw_sp *mlxsw_sp,
3245 				    struct mlxsw_sp_nexthop *nh)
3246 {
3247 	const char *table_adj = MLXSW_SP_DPIPE_TABLE_NAME_ADJ;
3248 	struct mlxsw_sp_nexthop_counter *nhct;
3249 	struct devlink *devlink;
3250 	bool dpipe_stats;
3251 
3252 	if (nh->counter)
3253 		return 0;
3254 
3255 	devlink = priv_to_devlink(mlxsw_sp->core);
3256 	dpipe_stats = devlink_dpipe_table_counter_enabled(devlink, table_adj);
3257 	if (!(nh->nhgi->hw_stats || dpipe_stats))
3258 		return 0;
3259 
3260 	if (nh->id)
3261 		nhct = mlxsw_sp_nexthop_sh_counter_get(mlxsw_sp, nh);
3262 	else
3263 		nhct = mlxsw_sp_nexthop_counter_alloc(mlxsw_sp);
3264 	if (IS_ERR(nhct))
3265 		return PTR_ERR(nhct);
3266 
3267 	nh->counter = nhct;
3268 	return 0;
3269 }
3270 
mlxsw_sp_nexthop_counter_disable(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3271 void mlxsw_sp_nexthop_counter_disable(struct mlxsw_sp *mlxsw_sp,
3272 				      struct mlxsw_sp_nexthop *nh)
3273 {
3274 	if (!nh->counter)
3275 		return;
3276 
3277 	if (nh->id)
3278 		mlxsw_sp_nexthop_sh_counter_put(mlxsw_sp, nh);
3279 	else
3280 		mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh->counter);
3281 	nh->counter = NULL;
3282 }
3283 
mlxsw_sp_nexthop_counter_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3284 static int mlxsw_sp_nexthop_counter_update(struct mlxsw_sp *mlxsw_sp,
3285 					   struct mlxsw_sp_nexthop *nh)
3286 {
3287 	if (nh->nhgi->hw_stats)
3288 		return mlxsw_sp_nexthop_counter_enable(mlxsw_sp, nh);
3289 	mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
3290 	return 0;
3291 }
3292 
mlxsw_sp_nexthop_counter_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,u64 * p_counter)3293 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
3294 				 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
3295 {
3296 	if (!nh->counter)
3297 		return -EINVAL;
3298 
3299 	return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter->counter_index,
3300 					 true, p_counter, NULL);
3301 }
3302 
mlxsw_sp_nexthop_next(struct mlxsw_sp_router * router,struct mlxsw_sp_nexthop * nh)3303 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
3304 					       struct mlxsw_sp_nexthop *nh)
3305 {
3306 	if (!nh) {
3307 		if (list_empty(&router->nexthop_list))
3308 			return NULL;
3309 		else
3310 			return list_first_entry(&router->nexthop_list,
3311 						typeof(*nh), router_list_node);
3312 	}
3313 	if (list_is_last(&nh->router_list_node, &router->nexthop_list))
3314 		return NULL;
3315 	return list_next_entry(nh, router_list_node);
3316 }
3317 
mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop * nh)3318 bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh)
3319 {
3320 	return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD;
3321 }
3322 
mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop * nh)3323 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
3324 {
3325 	if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH ||
3326 	    !mlxsw_sp_nexthop_is_forward(nh))
3327 		return NULL;
3328 	return nh->neigh_entry->ha;
3329 }
3330 
mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop * nh,u32 * p_adj_index,u32 * p_adj_size,u32 * p_adj_hash_index)3331 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
3332 			     u32 *p_adj_size, u32 *p_adj_hash_index)
3333 {
3334 	struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3335 	u32 adj_hash_index = 0;
3336 	int i;
3337 
3338 	if (!nh->offloaded || !nhgi->adj_index_valid)
3339 		return -EINVAL;
3340 
3341 	*p_adj_index = nhgi->adj_index;
3342 	*p_adj_size = nhgi->ecmp_size;
3343 
3344 	for (i = 0; i < nhgi->count; i++) {
3345 		struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3346 
3347 		if (nh_iter == nh)
3348 			break;
3349 		if (nh_iter->offloaded)
3350 			adj_hash_index += nh_iter->num_adj_entries;
3351 	}
3352 
3353 	*p_adj_hash_index = adj_hash_index;
3354 	return 0;
3355 }
3356 
mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop * nh)3357 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
3358 {
3359 	if (WARN_ON(!nh->crif))
3360 		return NULL;
3361 	return nh->crif->rif;
3362 }
3363 
mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop * nh)3364 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
3365 {
3366 	struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3367 	int i;
3368 
3369 	for (i = 0; i < nhgi->count; i++) {
3370 		struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3371 
3372 		if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
3373 			return true;
3374 	}
3375 	return false;
3376 }
3377 
3378 static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
3379 	.key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
3380 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
3381 	.key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key),
3382 	.automatic_shrinking = true,
3383 };
3384 
3385 static struct mlxsw_sp_nexthop_group_vr_entry *
mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3386 mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp,
3387 				       const struct mlxsw_sp_fib *fib)
3388 {
3389 	struct mlxsw_sp_nexthop_group_vr_key key;
3390 
3391 	memset(&key, 0, sizeof(key));
3392 	key.vr_id = fib->vr->id;
3393 	key.proto = fib->proto;
3394 	return rhashtable_lookup_fast(&nh_grp->vr_ht, &key,
3395 				      mlxsw_sp_nexthop_group_vr_ht_params);
3396 }
3397 
3398 static int
mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3399 mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp,
3400 				       const struct mlxsw_sp_fib *fib)
3401 {
3402 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3403 	int err;
3404 
3405 	vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL);
3406 	if (!vr_entry)
3407 		return -ENOMEM;
3408 
3409 	vr_entry->key.vr_id = fib->vr->id;
3410 	vr_entry->key.proto = fib->proto;
3411 	refcount_set(&vr_entry->ref_count, 1);
3412 
3413 	err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3414 				     mlxsw_sp_nexthop_group_vr_ht_params);
3415 	if (err)
3416 		goto err_hashtable_insert;
3417 
3418 	list_add(&vr_entry->list, &nh_grp->vr_list);
3419 
3420 	return 0;
3421 
3422 err_hashtable_insert:
3423 	kfree(vr_entry);
3424 	return err;
3425 }
3426 
3427 static void
mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop_group_vr_entry * vr_entry)3428 mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp,
3429 					struct mlxsw_sp_nexthop_group_vr_entry *vr_entry)
3430 {
3431 	list_del(&vr_entry->list);
3432 	rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3433 			       mlxsw_sp_nexthop_group_vr_ht_params);
3434 	kfree(vr_entry);
3435 }
3436 
3437 static int
mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3438 mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp,
3439 			       const struct mlxsw_sp_fib *fib)
3440 {
3441 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3442 
3443 	vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3444 	if (vr_entry) {
3445 		refcount_inc(&vr_entry->ref_count);
3446 		return 0;
3447 	}
3448 
3449 	return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib);
3450 }
3451 
3452 static void
mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3453 mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp,
3454 				 const struct mlxsw_sp_fib *fib)
3455 {
3456 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3457 
3458 	vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3459 	if (WARN_ON_ONCE(!vr_entry))
3460 		return;
3461 
3462 	if (!refcount_dec_and_test(&vr_entry->ref_count))
3463 		return;
3464 
3465 	mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry);
3466 }
3467 
3468 struct mlxsw_sp_nexthop_group_cmp_arg {
3469 	enum mlxsw_sp_nexthop_group_type type;
3470 	union {
3471 		struct fib_info *fi;
3472 		struct mlxsw_sp_fib6_entry *fib6_entry;
3473 		u32 id;
3474 	};
3475 };
3476 
3477 static bool
mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group * nh_grp,const struct in6_addr * gw,int ifindex,int weight)3478 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
3479 				    const struct in6_addr *gw, int ifindex,
3480 				    int weight)
3481 {
3482 	int i;
3483 
3484 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3485 		const struct mlxsw_sp_nexthop *nh;
3486 
3487 		nh = &nh_grp->nhgi->nexthops[i];
3488 		if (nh->ifindex == ifindex && nh->nh_weight == weight &&
3489 		    ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
3490 			return true;
3491 	}
3492 
3493 	return false;
3494 }
3495 
3496 static bool
mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib6_entry * fib6_entry)3497 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
3498 			    const struct mlxsw_sp_fib6_entry *fib6_entry)
3499 {
3500 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3501 
3502 	if (nh_grp->nhgi->count != fib6_entry->nrt6)
3503 		return false;
3504 
3505 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3506 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3507 		struct in6_addr *gw;
3508 		int ifindex, weight;
3509 
3510 		ifindex = fib6_nh->fib_nh_dev->ifindex;
3511 		weight = fib6_nh->fib_nh_weight;
3512 		gw = &fib6_nh->fib_nh_gw6;
3513 		if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
3514 							 weight))
3515 			return false;
3516 	}
3517 
3518 	return true;
3519 }
3520 
3521 static int
mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg * arg,const void * ptr)3522 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
3523 {
3524 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
3525 	const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
3526 
3527 	if (nh_grp->type != cmp_arg->type)
3528 		return 1;
3529 
3530 	switch (cmp_arg->type) {
3531 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3532 		return cmp_arg->fi != nh_grp->ipv4.fi;
3533 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3534 		return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
3535 						    cmp_arg->fib6_entry);
3536 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3537 		return cmp_arg->id != nh_grp->obj.id;
3538 	default:
3539 		WARN_ON(1);
3540 		return 1;
3541 	}
3542 }
3543 
mlxsw_sp_nexthop_group_hash_obj(const void * data,u32 len,u32 seed)3544 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
3545 {
3546 	const struct mlxsw_sp_nexthop_group *nh_grp = data;
3547 	const struct mlxsw_sp_nexthop *nh;
3548 	struct fib_info *fi;
3549 	unsigned int val;
3550 	int i;
3551 
3552 	switch (nh_grp->type) {
3553 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3554 		fi = nh_grp->ipv4.fi;
3555 		return jhash(&fi, sizeof(fi), seed);
3556 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3557 		val = nh_grp->nhgi->count;
3558 		for (i = 0; i < nh_grp->nhgi->count; i++) {
3559 			nh = &nh_grp->nhgi->nexthops[i];
3560 			val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3561 			val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3562 		}
3563 		return jhash(&val, sizeof(val), seed);
3564 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3565 		return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
3566 	default:
3567 		WARN_ON(1);
3568 		return 0;
3569 	}
3570 }
3571 
3572 static u32
mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry * fib6_entry,u32 seed)3573 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3574 {
3575 	unsigned int val = fib6_entry->nrt6;
3576 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3577 
3578 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3579 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3580 		struct net_device *dev = fib6_nh->fib_nh_dev;
3581 		struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3582 
3583 		val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3584 		val ^= jhash(gw, sizeof(*gw), seed);
3585 	}
3586 
3587 	return jhash(&val, sizeof(val), seed);
3588 }
3589 
3590 static u32
mlxsw_sp_nexthop_group_hash(const void * data,u32 len,u32 seed)3591 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3592 {
3593 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3594 
3595 	switch (cmp_arg->type) {
3596 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3597 		return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3598 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3599 		return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3600 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3601 		return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
3602 	default:
3603 		WARN_ON(1);
3604 		return 0;
3605 	}
3606 }
3607 
3608 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3609 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3610 	.hashfn	     = mlxsw_sp_nexthop_group_hash,
3611 	.obj_hashfn  = mlxsw_sp_nexthop_group_hash_obj,
3612 	.obj_cmpfn   = mlxsw_sp_nexthop_group_cmp,
3613 };
3614 
mlxsw_sp_nexthop_group_insert(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3615 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3616 					 struct mlxsw_sp_nexthop_group *nh_grp)
3617 {
3618 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3619 	    !nh_grp->nhgi->gateway)
3620 		return 0;
3621 
3622 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3623 				      &nh_grp->ht_node,
3624 				      mlxsw_sp_nexthop_group_ht_params);
3625 }
3626 
mlxsw_sp_nexthop_group_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3627 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3628 					  struct mlxsw_sp_nexthop_group *nh_grp)
3629 {
3630 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3631 	    !nh_grp->nhgi->gateway)
3632 		return;
3633 
3634 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3635 			       &nh_grp->ht_node,
3636 			       mlxsw_sp_nexthop_group_ht_params);
3637 }
3638 
3639 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp * mlxsw_sp,struct fib_info * fi)3640 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3641 			       struct fib_info *fi)
3642 {
3643 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3644 
3645 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
3646 	cmp_arg.fi = fi;
3647 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3648 				      &cmp_arg,
3649 				      mlxsw_sp_nexthop_group_ht_params);
3650 }
3651 
3652 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)3653 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3654 			       struct mlxsw_sp_fib6_entry *fib6_entry)
3655 {
3656 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3657 
3658 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
3659 	cmp_arg.fib6_entry = fib6_entry;
3660 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3661 				      &cmp_arg,
3662 				      mlxsw_sp_nexthop_group_ht_params);
3663 }
3664 
3665 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3666 	.key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3667 	.head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3668 	.key_len = sizeof(struct mlxsw_sp_nexthop_key),
3669 };
3670 
mlxsw_sp_nexthop_insert(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3671 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3672 				   struct mlxsw_sp_nexthop *nh)
3673 {
3674 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3675 				      &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3676 }
3677 
mlxsw_sp_nexthop_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3678 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3679 				    struct mlxsw_sp_nexthop *nh)
3680 {
3681 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3682 			       mlxsw_sp_nexthop_ht_params);
3683 }
3684 
3685 static struct mlxsw_sp_nexthop *
mlxsw_sp_nexthop_lookup(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_key key)3686 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3687 			struct mlxsw_sp_nexthop_key key)
3688 {
3689 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3690 				      mlxsw_sp_nexthop_ht_params);
3691 }
3692 
mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_l3proto proto,u16 vr_id,u32 adj_index,u16 ecmp_size,u32 new_adj_index,u16 new_ecmp_size)3693 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3694 					     enum mlxsw_sp_l3proto proto,
3695 					     u16 vr_id,
3696 					     u32 adj_index, u16 ecmp_size,
3697 					     u32 new_adj_index,
3698 					     u16 new_ecmp_size)
3699 {
3700 	char raleu_pl[MLXSW_REG_RALEU_LEN];
3701 
3702 	mlxsw_reg_raleu_pack(raleu_pl,
3703 			     (enum mlxsw_reg_ralxx_protocol) proto, vr_id,
3704 			     adj_index, ecmp_size, new_adj_index,
3705 			     new_ecmp_size);
3706 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3707 }
3708 
mlxsw_sp_adj_index_mass_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,u32 old_adj_index,u16 old_ecmp_size)3709 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3710 					  struct mlxsw_sp_nexthop_group *nh_grp,
3711 					  u32 old_adj_index, u16 old_ecmp_size)
3712 {
3713 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3714 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3715 	int err;
3716 
3717 	list_for_each_entry(vr_entry, &nh_grp->vr_list, list) {
3718 		err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp,
3719 							vr_entry->key.proto,
3720 							vr_entry->key.vr_id,
3721 							old_adj_index,
3722 							old_ecmp_size,
3723 							nhgi->adj_index,
3724 							nhgi->ecmp_size);
3725 		if (err)
3726 			goto err_mass_update_vr;
3727 	}
3728 	return 0;
3729 
3730 err_mass_update_vr:
3731 	list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list)
3732 		mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto,
3733 						  vr_entry->key.vr_id,
3734 						  nhgi->adj_index,
3735 						  nhgi->ecmp_size,
3736 						  old_adj_index, old_ecmp_size);
3737 	return err;
3738 }
3739 
__mlxsw_sp_nexthop_eth_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3740 static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
3741 					 u32 adj_index,
3742 					 struct mlxsw_sp_nexthop *nh,
3743 					 bool force, char *ratr_pl)
3744 {
3745 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3746 	struct mlxsw_sp_rif *rif = mlxsw_sp_nexthop_rif(nh);
3747 	enum mlxsw_reg_ratr_op op;
3748 	u16 rif_index;
3749 
3750 	rif_index = rif ? rif->rif_index :
3751 			  mlxsw_sp->router->lb_crif->rif->rif_index;
3752 	op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
3753 		     MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
3754 	mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET,
3755 			    adj_index, rif_index);
3756 	switch (nh->action) {
3757 	case MLXSW_SP_NEXTHOP_ACTION_FORWARD:
3758 		mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3759 		break;
3760 	case MLXSW_SP_NEXTHOP_ACTION_DISCARD:
3761 		mlxsw_reg_ratr_trap_action_set(ratr_pl,
3762 					       MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
3763 		break;
3764 	case MLXSW_SP_NEXTHOP_ACTION_TRAP:
3765 		mlxsw_reg_ratr_trap_action_set(ratr_pl,
3766 					       MLXSW_REG_RATR_TRAP_ACTION_TRAP);
3767 		mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
3768 		break;
3769 	default:
3770 		WARN_ON_ONCE(1);
3771 		return -EINVAL;
3772 	}
3773 	if (nh->counter)
3774 		mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter->counter_index,
3775 					    true);
3776 	else
3777 		mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3778 
3779 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3780 }
3781 
mlxsw_sp_nexthop_eth_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3782 int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3783 				struct mlxsw_sp_nexthop *nh, bool force,
3784 				char *ratr_pl)
3785 {
3786 	int i;
3787 
3788 	for (i = 0; i < nh->num_adj_entries; i++) {
3789 		int err;
3790 
3791 		err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i,
3792 						    nh, force, ratr_pl);
3793 		if (err)
3794 			return err;
3795 	}
3796 
3797 	return 0;
3798 }
3799 
__mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3800 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3801 					  u32 adj_index,
3802 					  struct mlxsw_sp_nexthop *nh,
3803 					  bool force, char *ratr_pl)
3804 {
3805 	const struct mlxsw_sp_ipip_ops *ipip_ops;
3806 
3807 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3808 	return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry,
3809 					force, ratr_pl);
3810 }
3811 
mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3812 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3813 					u32 adj_index,
3814 					struct mlxsw_sp_nexthop *nh, bool force,
3815 					char *ratr_pl)
3816 {
3817 	int i;
3818 
3819 	for (i = 0; i < nh->num_adj_entries; i++) {
3820 		int err;
3821 
3822 		err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3823 						     nh, force, ratr_pl);
3824 		if (err)
3825 			return err;
3826 	}
3827 
3828 	return 0;
3829 }
3830 
mlxsw_sp_nexthop_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3831 static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3832 				   struct mlxsw_sp_nexthop *nh, bool force,
3833 				   char *ratr_pl)
3834 {
3835 	/* When action is discard or trap, the nexthop must be
3836 	 * programmed as an Ethernet nexthop.
3837 	 */
3838 	if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH ||
3839 	    nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD ||
3840 	    nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3841 		return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh,
3842 						   force, ratr_pl);
3843 	else
3844 		return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh,
3845 						    force, ratr_pl);
3846 }
3847 
3848 static int
mlxsw_sp_nexthop_group_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group_info * nhgi,bool reallocate)3849 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3850 			      struct mlxsw_sp_nexthop_group_info *nhgi,
3851 			      bool reallocate)
3852 {
3853 	char ratr_pl[MLXSW_REG_RATR_LEN];
3854 	u32 adj_index = nhgi->adj_index; /* base */
3855 	struct mlxsw_sp_nexthop *nh;
3856 	int i;
3857 
3858 	for (i = 0; i < nhgi->count; i++) {
3859 		nh = &nhgi->nexthops[i];
3860 
3861 		if (!nh->should_offload) {
3862 			mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
3863 			nh->offloaded = 0;
3864 			continue;
3865 		}
3866 
3867 		if (nh->update || reallocate) {
3868 			int err = 0;
3869 
3870 			err = mlxsw_sp_nexthop_counter_update(mlxsw_sp, nh);
3871 			if (err)
3872 				return err;
3873 
3874 			err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
3875 						      true, ratr_pl);
3876 			if (err)
3877 				return err;
3878 			nh->update = 0;
3879 			nh->offloaded = 1;
3880 		}
3881 		adj_index += nh->num_adj_entries;
3882 	}
3883 	return 0;
3884 }
3885 
3886 static int
mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3887 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3888 				    struct mlxsw_sp_nexthop_group *nh_grp)
3889 {
3890 	struct mlxsw_sp_fib_entry *fib_entry;
3891 	int err;
3892 
3893 	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3894 		err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3895 		if (err)
3896 			return err;
3897 	}
3898 	return 0;
3899 }
3900 
3901 struct mlxsw_sp_adj_grp_size_range {
3902 	u16 start; /* Inclusive */
3903 	u16 end; /* Inclusive */
3904 };
3905 
3906 /* Ordered by range start value */
3907 static const struct mlxsw_sp_adj_grp_size_range
3908 mlxsw_sp1_adj_grp_size_ranges[] = {
3909 	{ .start = 1, .end = 64 },
3910 	{ .start = 512, .end = 512 },
3911 	{ .start = 1024, .end = 1024 },
3912 	{ .start = 2048, .end = 2048 },
3913 	{ .start = 4096, .end = 4096 },
3914 };
3915 
3916 /* Ordered by range start value */
3917 static const struct mlxsw_sp_adj_grp_size_range
3918 mlxsw_sp2_adj_grp_size_ranges[] = {
3919 	{ .start = 1, .end = 128 },
3920 	{ .start = 256, .end = 256 },
3921 	{ .start = 512, .end = 512 },
3922 	{ .start = 1024, .end = 1024 },
3923 	{ .start = 2048, .end = 2048 },
3924 	{ .start = 4096, .end = 4096 },
3925 };
3926 
mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp * mlxsw_sp,u16 * p_adj_grp_size)3927 static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp,
3928 					   u16 *p_adj_grp_size)
3929 {
3930 	int i;
3931 
3932 	for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
3933 		const struct mlxsw_sp_adj_grp_size_range *size_range;
3934 
3935 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3936 
3937 		if (*p_adj_grp_size >= size_range->start &&
3938 		    *p_adj_grp_size <= size_range->end)
3939 			return;
3940 
3941 		if (*p_adj_grp_size <= size_range->end) {
3942 			*p_adj_grp_size = size_range->end;
3943 			return;
3944 		}
3945 	}
3946 }
3947 
mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp * mlxsw_sp,u16 * p_adj_grp_size,unsigned int alloc_size)3948 static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp,
3949 					     u16 *p_adj_grp_size,
3950 					     unsigned int alloc_size)
3951 {
3952 	int i;
3953 
3954 	for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) {
3955 		const struct mlxsw_sp_adj_grp_size_range *size_range;
3956 
3957 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3958 
3959 		if (alloc_size >= size_range->end) {
3960 			*p_adj_grp_size = size_range->end;
3961 			return;
3962 		}
3963 	}
3964 }
3965 
mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp * mlxsw_sp,u16 * p_adj_grp_size)3966 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3967 				     u16 *p_adj_grp_size)
3968 {
3969 	unsigned int alloc_size;
3970 	int err;
3971 
3972 	/* Round up the requested group size to the next size supported
3973 	 * by the device and make sure the request can be satisfied.
3974 	 */
3975 	mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size);
3976 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3977 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3978 					      *p_adj_grp_size, &alloc_size);
3979 	if (err)
3980 		return err;
3981 	/* It is possible the allocation results in more allocated
3982 	 * entries than requested. Try to use as much of them as
3983 	 * possible.
3984 	 */
3985 	mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size);
3986 
3987 	return 0;
3988 }
3989 
3990 static void
mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info * nhgi)3991 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
3992 {
3993 	int i, g = 0, sum_norm_weight = 0;
3994 	struct mlxsw_sp_nexthop *nh;
3995 
3996 	for (i = 0; i < nhgi->count; i++) {
3997 		nh = &nhgi->nexthops[i];
3998 
3999 		if (!nh->should_offload)
4000 			continue;
4001 		if (g > 0)
4002 			g = gcd(nh->nh_weight, g);
4003 		else
4004 			g = nh->nh_weight;
4005 	}
4006 
4007 	for (i = 0; i < nhgi->count; i++) {
4008 		nh = &nhgi->nexthops[i];
4009 
4010 		if (!nh->should_offload)
4011 			continue;
4012 		nh->norm_nh_weight = nh->nh_weight / g;
4013 		sum_norm_weight += nh->norm_nh_weight;
4014 	}
4015 
4016 	nhgi->sum_norm_weight = sum_norm_weight;
4017 }
4018 
4019 static void
mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info * nhgi)4020 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
4021 {
4022 	int i, weight = 0, lower_bound = 0;
4023 	int total = nhgi->sum_norm_weight;
4024 	u16 ecmp_size = nhgi->ecmp_size;
4025 
4026 	for (i = 0; i < nhgi->count; i++) {
4027 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
4028 		int upper_bound;
4029 
4030 		if (!nh->should_offload)
4031 			continue;
4032 		weight += nh->norm_nh_weight;
4033 		upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
4034 		nh->num_adj_entries = upper_bound - lower_bound;
4035 		lower_bound = upper_bound;
4036 	}
4037 }
4038 
4039 static struct mlxsw_sp_nexthop *
4040 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
4041 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
4042 
4043 static void
mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)4044 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
4045 					struct mlxsw_sp_nexthop_group *nh_grp)
4046 {
4047 	int i;
4048 
4049 	for (i = 0; i < nh_grp->nhgi->count; i++) {
4050 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
4051 
4052 		if (nh->offloaded)
4053 			nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
4054 		else
4055 			nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
4056 	}
4057 }
4058 
4059 static void
__mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_fib6_entry * fib6_entry)4060 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
4061 					  struct mlxsw_sp_fib6_entry *fib6_entry)
4062 {
4063 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4064 
4065 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4066 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
4067 		struct mlxsw_sp_nexthop *nh;
4068 
4069 		nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
4070 		if (nh && nh->offloaded)
4071 			fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
4072 		else
4073 			fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
4074 	}
4075 }
4076 
4077 static void
mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)4078 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
4079 					struct mlxsw_sp_nexthop_group *nh_grp)
4080 {
4081 	struct mlxsw_sp_fib6_entry *fib6_entry;
4082 
4083 	/* Unfortunately, in IPv6 the route and the nexthop are described by
4084 	 * the same struct, so we need to iterate over all the routes using the
4085 	 * nexthop group and set / clear the offload indication for them.
4086 	 */
4087 	list_for_each_entry(fib6_entry, &nh_grp->fib_list,
4088 			    common.nexthop_group_node)
4089 		__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
4090 }
4091 
4092 static void
mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nexthop * nh,u16 bucket_index)4093 mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp,
4094 					const struct mlxsw_sp_nexthop *nh,
4095 					u16 bucket_index)
4096 {
4097 	struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
4098 	bool offload = false, trap = false;
4099 
4100 	if (nh->offloaded) {
4101 		if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
4102 			trap = true;
4103 		else
4104 			offload = true;
4105 	}
4106 	nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4107 				    bucket_index, offload, trap);
4108 }
4109 
4110 static void
mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)4111 mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
4112 					   struct mlxsw_sp_nexthop_group *nh_grp)
4113 {
4114 	int i;
4115 
4116 	/* Do not update the flags if the nexthop group is being destroyed
4117 	 * since:
4118 	 * 1. The nexthop objects is being deleted, in which case the flags are
4119 	 * irrelevant.
4120 	 * 2. The nexthop group was replaced by a newer group, in which case
4121 	 * the flags of the nexthop object were already updated based on the
4122 	 * new group.
4123 	 */
4124 	if (nh_grp->can_destroy)
4125 		return;
4126 
4127 	nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4128 			     nh_grp->nhgi->adj_index_valid, false);
4129 
4130 	/* Update flags of individual nexthop buckets in case of a resilient
4131 	 * nexthop group.
4132 	 */
4133 	if (!nh_grp->nhgi->is_resilient)
4134 		return;
4135 
4136 	for (i = 0; i < nh_grp->nhgi->count; i++) {
4137 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
4138 
4139 		mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i);
4140 	}
4141 }
4142 
4143 static void
mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)4144 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
4145 				       struct mlxsw_sp_nexthop_group *nh_grp)
4146 {
4147 	switch (nh_grp->type) {
4148 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
4149 		mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
4150 		break;
4151 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
4152 		mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
4153 		break;
4154 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
4155 		mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
4156 		break;
4157 	}
4158 }
4159 
4160 static int
mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)4161 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
4162 			       struct mlxsw_sp_nexthop_group *nh_grp)
4163 {
4164 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
4165 	u16 ecmp_size, old_ecmp_size;
4166 	struct mlxsw_sp_nexthop *nh;
4167 	bool offload_change = false;
4168 	u32 adj_index;
4169 	bool old_adj_index_valid;
4170 	u32 old_adj_index;
4171 	int i, err2, err;
4172 
4173 	if (!nhgi->gateway)
4174 		return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4175 
4176 	for (i = 0; i < nhgi->count; i++) {
4177 		nh = &nhgi->nexthops[i];
4178 
4179 		if (nh->should_offload != nh->offloaded) {
4180 			offload_change = true;
4181 			if (nh->should_offload)
4182 				nh->update = 1;
4183 		}
4184 	}
4185 	if (!offload_change) {
4186 		/* Nothing was added or removed, so no need to reallocate. Just
4187 		 * update MAC on existing adjacency indexes.
4188 		 */
4189 		err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
4190 		if (err) {
4191 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
4192 			goto set_trap;
4193 		}
4194 		/* Flags of individual nexthop buckets might need to be
4195 		 * updated.
4196 		 */
4197 		mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4198 		return 0;
4199 	}
4200 	mlxsw_sp_nexthop_group_normalize(nhgi);
4201 	if (!nhgi->sum_norm_weight) {
4202 		/* No neigh of this group is connected so we just set
4203 		 * the trap and let everthing flow through kernel.
4204 		 */
4205 		err = 0;
4206 		goto set_trap;
4207 	}
4208 
4209 	ecmp_size = nhgi->sum_norm_weight;
4210 	err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
4211 	if (err)
4212 		/* No valid allocation size available. */
4213 		goto set_trap;
4214 
4215 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4216 				  ecmp_size, &adj_index);
4217 	if (err) {
4218 		/* We ran out of KVD linear space, just set the
4219 		 * trap and let everything flow through kernel.
4220 		 */
4221 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
4222 		goto set_trap;
4223 	}
4224 	old_adj_index_valid = nhgi->adj_index_valid;
4225 	old_adj_index = nhgi->adj_index;
4226 	old_ecmp_size = nhgi->ecmp_size;
4227 	nhgi->adj_index_valid = 1;
4228 	nhgi->adj_index = adj_index;
4229 	nhgi->ecmp_size = ecmp_size;
4230 	mlxsw_sp_nexthop_group_rebalance(nhgi);
4231 	err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
4232 	if (err) {
4233 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
4234 		goto set_trap;
4235 	}
4236 
4237 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4238 
4239 	if (!old_adj_index_valid) {
4240 		/* The trap was set for fib entries, so we have to call
4241 		 * fib entry update to unset it and use adjacency index.
4242 		 */
4243 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4244 		if (err) {
4245 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
4246 			goto set_trap;
4247 		}
4248 		return 0;
4249 	}
4250 
4251 	err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
4252 					     old_adj_index, old_ecmp_size);
4253 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4254 			   old_ecmp_size, old_adj_index);
4255 	if (err) {
4256 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
4257 		goto set_trap;
4258 	}
4259 
4260 	return 0;
4261 
4262 set_trap:
4263 	old_adj_index_valid = nhgi->adj_index_valid;
4264 	nhgi->adj_index_valid = 0;
4265 	for (i = 0; i < nhgi->count; i++) {
4266 		nh = &nhgi->nexthops[i];
4267 		nh->offloaded = 0;
4268 	}
4269 	err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4270 	if (err2)
4271 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
4272 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4273 	if (old_adj_index_valid)
4274 		mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4275 				   nhgi->ecmp_size, nhgi->adj_index);
4276 	return err;
4277 }
4278 
__mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop * nh,bool removing)4279 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
4280 					    bool removing)
4281 {
4282 	if (!removing) {
4283 		nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD;
4284 		nh->should_offload = 1;
4285 	} else if (nh->nhgi->is_resilient) {
4286 		nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4287 		nh->should_offload = 1;
4288 	} else {
4289 		nh->should_offload = 0;
4290 	}
4291 	nh->update = 1;
4292 }
4293 
4294 static int
mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)4295 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
4296 				    struct mlxsw_sp_neigh_entry *neigh_entry)
4297 {
4298 	struct neighbour *n, *old_n = neigh_entry->key.n;
4299 	struct mlxsw_sp_nexthop *nh;
4300 	struct net_device *dev;
4301 	bool entry_connected;
4302 	u8 nud_state, dead;
4303 	int err;
4304 
4305 	nh = list_first_entry(&neigh_entry->nexthop_list,
4306 			      struct mlxsw_sp_nexthop, neigh_list_node);
4307 	dev = mlxsw_sp_nexthop_dev(nh);
4308 
4309 	n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, dev);
4310 	if (!n) {
4311 		n = neigh_create(nh->neigh_tbl, &nh->gw_addr, dev);
4312 		if (IS_ERR(n))
4313 			return PTR_ERR(n);
4314 		neigh_event_send(n, NULL);
4315 	}
4316 
4317 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
4318 	neigh_entry->key.n = n;
4319 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4320 	if (err)
4321 		goto err_neigh_entry_insert;
4322 
4323 	read_lock_bh(&n->lock);
4324 	nud_state = n->nud_state;
4325 	dead = n->dead;
4326 	read_unlock_bh(&n->lock);
4327 	entry_connected = nud_state & NUD_VALID && !dead;
4328 
4329 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
4330 			    neigh_list_node) {
4331 		neigh_release(old_n);
4332 		neigh_clone(n);
4333 		__mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
4334 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4335 	}
4336 
4337 	neigh_release(n);
4338 
4339 	return 0;
4340 
4341 err_neigh_entry_insert:
4342 	neigh_entry->key.n = old_n;
4343 	mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4344 	neigh_release(n);
4345 	return err;
4346 }
4347 
4348 static void
mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,bool removing,bool dead)4349 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
4350 			      struct mlxsw_sp_neigh_entry *neigh_entry,
4351 			      bool removing, bool dead)
4352 {
4353 	struct mlxsw_sp_nexthop *nh;
4354 
4355 	if (list_empty(&neigh_entry->nexthop_list))
4356 		return;
4357 
4358 	if (dead) {
4359 		int err;
4360 
4361 		err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
4362 							  neigh_entry);
4363 		if (err)
4364 			dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
4365 		return;
4366 	}
4367 
4368 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
4369 			    neigh_list_node) {
4370 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
4371 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4372 	}
4373 }
4374 
mlxsw_sp_nexthop_crif_init(struct mlxsw_sp_nexthop * nh,struct mlxsw_sp_crif * crif)4375 static void mlxsw_sp_nexthop_crif_init(struct mlxsw_sp_nexthop *nh,
4376 				       struct mlxsw_sp_crif *crif)
4377 {
4378 	if (nh->crif)
4379 		return;
4380 
4381 	nh->crif = crif;
4382 	list_add(&nh->crif_list_node, &crif->nexthop_list);
4383 }
4384 
mlxsw_sp_nexthop_crif_fini(struct mlxsw_sp_nexthop * nh)4385 static void mlxsw_sp_nexthop_crif_fini(struct mlxsw_sp_nexthop *nh)
4386 {
4387 	if (!nh->crif)
4388 		return;
4389 
4390 	list_del(&nh->crif_list_node);
4391 	nh->crif = NULL;
4392 }
4393 
mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4394 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
4395 				       struct mlxsw_sp_nexthop *nh)
4396 {
4397 	struct mlxsw_sp_neigh_entry *neigh_entry;
4398 	struct net_device *dev;
4399 	struct neighbour *n;
4400 	u8 nud_state, dead;
4401 	int err;
4402 
4403 	if (WARN_ON(!nh->crif->rif))
4404 		return 0;
4405 
4406 	if (!nh->nhgi->gateway || nh->neigh_entry)
4407 		return 0;
4408 	dev = mlxsw_sp_nexthop_dev(nh);
4409 
4410 	/* Take a reference of neigh here ensuring that neigh would
4411 	 * not be destructed before the nexthop entry is finished.
4412 	 * The reference is taken either in neigh_lookup() or
4413 	 * in neigh_create() in case n is not found.
4414 	 */
4415 	n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, dev);
4416 	if (!n) {
4417 		n = neigh_create(nh->neigh_tbl, &nh->gw_addr, dev);
4418 		if (IS_ERR(n))
4419 			return PTR_ERR(n);
4420 		neigh_event_send(n, NULL);
4421 	}
4422 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
4423 	if (!neigh_entry) {
4424 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
4425 		if (IS_ERR(neigh_entry)) {
4426 			err = -EINVAL;
4427 			goto err_neigh_entry_create;
4428 		}
4429 	}
4430 
4431 	/* If that is the first nexthop connected to that neigh, add to
4432 	 * nexthop_neighs_list
4433 	 */
4434 	if (list_empty(&neigh_entry->nexthop_list))
4435 		list_add_tail(&neigh_entry->nexthop_neighs_list_node,
4436 			      &mlxsw_sp->router->nexthop_neighs_list);
4437 
4438 	nh->neigh_entry = neigh_entry;
4439 	list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
4440 	read_lock_bh(&n->lock);
4441 	nud_state = n->nud_state;
4442 	dead = n->dead;
4443 	read_unlock_bh(&n->lock);
4444 	__mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
4445 
4446 	return 0;
4447 
4448 err_neigh_entry_create:
4449 	neigh_release(n);
4450 	return err;
4451 }
4452 
mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4453 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
4454 					struct mlxsw_sp_nexthop *nh)
4455 {
4456 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
4457 	struct neighbour *n;
4458 
4459 	if (!neigh_entry)
4460 		return;
4461 	n = neigh_entry->key.n;
4462 
4463 	__mlxsw_sp_nexthop_neigh_update(nh, true);
4464 	list_del(&nh->neigh_list_node);
4465 	nh->neigh_entry = NULL;
4466 
4467 	/* If that is the last nexthop connected to that neigh, remove from
4468 	 * nexthop_neighs_list
4469 	 */
4470 	if (list_empty(&neigh_entry->nexthop_list))
4471 		list_del(&neigh_entry->nexthop_neighs_list_node);
4472 
4473 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
4474 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
4475 
4476 	neigh_release(n);
4477 }
4478 
mlxsw_sp_ipip_netdev_ul_up(struct net_device * ol_dev)4479 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
4480 {
4481 	struct net_device *ul_dev;
4482 	bool is_up;
4483 
4484 	rcu_read_lock();
4485 	ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
4486 	is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
4487 	rcu_read_unlock();
4488 
4489 	return is_up;
4490 }
4491 
mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,struct mlxsw_sp_ipip_entry * ipip_entry)4492 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
4493 				       struct mlxsw_sp_nexthop *nh,
4494 				       struct mlxsw_sp_ipip_entry *ipip_entry)
4495 {
4496 	struct mlxsw_sp_crif *crif;
4497 	bool removing;
4498 
4499 	if (!nh->nhgi->gateway || nh->ipip_entry)
4500 		return;
4501 
4502 	crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, ipip_entry->ol_dev);
4503 	if (WARN_ON(!crif))
4504 		return;
4505 
4506 	nh->ipip_entry = ipip_entry;
4507 	removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
4508 	__mlxsw_sp_nexthop_neigh_update(nh, removing);
4509 	mlxsw_sp_nexthop_crif_init(nh, crif);
4510 }
4511 
mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4512 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
4513 				       struct mlxsw_sp_nexthop *nh)
4514 {
4515 	struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
4516 
4517 	if (!ipip_entry)
4518 		return;
4519 
4520 	__mlxsw_sp_nexthop_neigh_update(nh, true);
4521 	nh->ipip_entry = NULL;
4522 }
4523 
mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp * mlxsw_sp,const struct fib_nh * fib_nh,enum mlxsw_sp_ipip_type * p_ipipt)4524 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4525 					const struct fib_nh *fib_nh,
4526 					enum mlxsw_sp_ipip_type *p_ipipt)
4527 {
4528 	struct net_device *dev = fib_nh->fib_nh_dev;
4529 
4530 	return dev &&
4531 	       fib_nh->nh_parent->fib_type == RTN_UNICAST &&
4532 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
4533 }
4534 
mlxsw_sp_nexthop_type_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,const struct net_device * dev)4535 static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
4536 				      struct mlxsw_sp_nexthop *nh,
4537 				      const struct net_device *dev)
4538 {
4539 	const struct mlxsw_sp_ipip_ops *ipip_ops;
4540 	struct mlxsw_sp_ipip_entry *ipip_entry;
4541 	struct mlxsw_sp_crif *crif;
4542 	int err;
4543 
4544 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4545 	if (ipip_entry) {
4546 		ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4547 		if (ipip_ops->can_offload(mlxsw_sp, dev)) {
4548 			nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4549 			mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4550 			return 0;
4551 		}
4552 	}
4553 
4554 	nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
4555 	crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, dev);
4556 	if (!crif)
4557 		return 0;
4558 
4559 	mlxsw_sp_nexthop_crif_init(nh, crif);
4560 
4561 	if (!crif->rif)
4562 		return 0;
4563 
4564 	err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4565 	if (err)
4566 		goto err_neigh_init;
4567 
4568 	return 0;
4569 
4570 err_neigh_init:
4571 	mlxsw_sp_nexthop_crif_fini(nh);
4572 	return err;
4573 }
4574 
mlxsw_sp_nexthop_type_rif_made(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4575 static int mlxsw_sp_nexthop_type_rif_made(struct mlxsw_sp *mlxsw_sp,
4576 					  struct mlxsw_sp_nexthop *nh)
4577 {
4578 	switch (nh->type) {
4579 	case MLXSW_SP_NEXTHOP_TYPE_ETH:
4580 		return mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4581 	case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4582 		break;
4583 	}
4584 
4585 	return 0;
4586 }
4587 
mlxsw_sp_nexthop_type_rif_gone(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4588 static void mlxsw_sp_nexthop_type_rif_gone(struct mlxsw_sp *mlxsw_sp,
4589 					   struct mlxsw_sp_nexthop *nh)
4590 {
4591 	switch (nh->type) {
4592 	case MLXSW_SP_NEXTHOP_TYPE_ETH:
4593 		mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
4594 		break;
4595 	case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4596 		mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
4597 		break;
4598 	}
4599 }
4600 
mlxsw_sp_nexthop_type_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4601 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
4602 				       struct mlxsw_sp_nexthop *nh)
4603 {
4604 	mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
4605 	mlxsw_sp_nexthop_crif_fini(nh);
4606 }
4607 
mlxsw_sp_nexthop4_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop * nh,struct fib_nh * fib_nh)4608 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
4609 				  struct mlxsw_sp_nexthop_group *nh_grp,
4610 				  struct mlxsw_sp_nexthop *nh,
4611 				  struct fib_nh *fib_nh)
4612 {
4613 	struct net_device *dev = fib_nh->fib_nh_dev;
4614 	struct in_device *in_dev;
4615 	int err;
4616 
4617 	nh->nhgi = nh_grp->nhgi;
4618 	nh->key.fib_nh = fib_nh;
4619 #ifdef CONFIG_IP_ROUTE_MULTIPATH
4620 	nh->nh_weight = fib_nh->fib_nh_weight;
4621 #else
4622 	nh->nh_weight = 1;
4623 #endif
4624 	memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
4625 	nh->neigh_tbl = &arp_tbl;
4626 	err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
4627 	if (err)
4628 		return err;
4629 
4630 	err = mlxsw_sp_nexthop_counter_enable(mlxsw_sp, nh);
4631 	if (err)
4632 		goto err_counter_enable;
4633 
4634 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4635 
4636 	if (!dev)
4637 		return 0;
4638 	nh->ifindex = dev->ifindex;
4639 
4640 	rcu_read_lock();
4641 	in_dev = __in_dev_get_rcu(dev);
4642 	if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
4643 	    fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
4644 		rcu_read_unlock();
4645 		return 0;
4646 	}
4647 	rcu_read_unlock();
4648 
4649 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4650 	if (err)
4651 		goto err_nexthop_neigh_init;
4652 
4653 	return 0;
4654 
4655 err_nexthop_neigh_init:
4656 	list_del(&nh->router_list_node);
4657 	mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
4658 err_counter_enable:
4659 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4660 	return err;
4661 }
4662 
mlxsw_sp_nexthop4_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4663 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
4664 				   struct mlxsw_sp_nexthop *nh)
4665 {
4666 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4667 	list_del(&nh->router_list_node);
4668 	mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
4669 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4670 }
4671 
mlxsw_sp_nexthop4_event(struct mlxsw_sp * mlxsw_sp,unsigned long event,struct fib_nh * fib_nh)4672 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
4673 				    unsigned long event, struct fib_nh *fib_nh)
4674 {
4675 	struct mlxsw_sp_nexthop_key key;
4676 	struct mlxsw_sp_nexthop *nh;
4677 
4678 	key.fib_nh = fib_nh;
4679 	nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
4680 	if (!nh)
4681 		return;
4682 
4683 	switch (event) {
4684 	case FIB_EVENT_NH_ADD:
4685 		mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
4686 		break;
4687 	case FIB_EVENT_NH_DEL:
4688 		mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4689 		break;
4690 	}
4691 
4692 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4693 }
4694 
mlxsw_sp_nexthop_rif_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)4695 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
4696 					struct mlxsw_sp_rif *rif)
4697 {
4698 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
4699 	struct mlxsw_sp_nexthop *nh;
4700 	bool removing;
4701 
4702 	list_for_each_entry(nh, &rif->crif->nexthop_list, crif_list_node) {
4703 		switch (nh->type) {
4704 		case MLXSW_SP_NEXTHOP_TYPE_ETH:
4705 			removing = false;
4706 			break;
4707 		case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4708 			removing = !mlxsw_sp_ipip_netdev_ul_up(dev);
4709 			break;
4710 		default:
4711 			WARN_ON(1);
4712 			continue;
4713 		}
4714 
4715 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
4716 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4717 	}
4718 }
4719 
mlxsw_sp_nexthop_rif_made_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)4720 static int mlxsw_sp_nexthop_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
4721 					  struct mlxsw_sp_rif *rif)
4722 {
4723 	struct mlxsw_sp_nexthop *nh, *tmp;
4724 	unsigned int n = 0;
4725 	int err;
4726 
4727 	list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
4728 				 crif_list_node) {
4729 		err = mlxsw_sp_nexthop_type_rif_made(mlxsw_sp, nh);
4730 		if (err)
4731 			goto err_nexthop_type_rif;
4732 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4733 		n++;
4734 	}
4735 
4736 	return 0;
4737 
4738 err_nexthop_type_rif:
4739 	list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
4740 				 crif_list_node) {
4741 		if (!n--)
4742 			break;
4743 		mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
4744 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4745 	}
4746 	return err;
4747 }
4748 
mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)4749 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4750 					   struct mlxsw_sp_rif *rif)
4751 {
4752 	struct mlxsw_sp_nexthop *nh, *tmp;
4753 
4754 	list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
4755 				 crif_list_node) {
4756 		mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
4757 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4758 	}
4759 }
4760 
mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp * mlxsw_sp)4761 static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
4762 {
4763 	enum mlxsw_reg_ratr_trap_action trap_action;
4764 	char ratr_pl[MLXSW_REG_RATR_LEN];
4765 	int err;
4766 
4767 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4768 				  &mlxsw_sp->router->adj_trap_index);
4769 	if (err)
4770 		return err;
4771 
4772 	trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
4773 	mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4774 			    MLXSW_REG_RATR_TYPE_ETHERNET,
4775 			    mlxsw_sp->router->adj_trap_index,
4776 			    mlxsw_sp->router->lb_crif->rif->rif_index);
4777 	mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4778 	mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
4779 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4780 	if (err)
4781 		goto err_ratr_write;
4782 
4783 	return 0;
4784 
4785 err_ratr_write:
4786 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4787 			   mlxsw_sp->router->adj_trap_index);
4788 	return err;
4789 }
4790 
mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp * mlxsw_sp)4791 static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp)
4792 {
4793 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4794 			   mlxsw_sp->router->adj_trap_index);
4795 }
4796 
mlxsw_sp_nexthop_group_inc(struct mlxsw_sp * mlxsw_sp)4797 static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp)
4798 {
4799 	int err;
4800 
4801 	if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups))
4802 		return 0;
4803 
4804 	err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp);
4805 	if (err)
4806 		return err;
4807 
4808 	refcount_set(&mlxsw_sp->router->num_groups, 1);
4809 
4810 	return 0;
4811 }
4812 
mlxsw_sp_nexthop_group_dec(struct mlxsw_sp * mlxsw_sp)4813 static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp)
4814 {
4815 	if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups))
4816 		return;
4817 
4818 	mlxsw_sp_adj_trap_entry_fini(mlxsw_sp);
4819 }
4820 
4821 static void
mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nexthop_group * nh_grp,unsigned long * activity)4822 mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
4823 			     const struct mlxsw_sp_nexthop_group *nh_grp,
4824 			     unsigned long *activity)
4825 {
4826 	char *ratrad_pl;
4827 	int i, err;
4828 
4829 	ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL);
4830 	if (!ratrad_pl)
4831 		return;
4832 
4833 	mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index,
4834 			      nh_grp->nhgi->count);
4835 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl);
4836 	if (err)
4837 		goto out;
4838 
4839 	for (i = 0; i < nh_grp->nhgi->count; i++) {
4840 		if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i))
4841 			continue;
4842 		bitmap_set(activity, i, 1);
4843 	}
4844 
4845 out:
4846 	kfree(ratrad_pl);
4847 }
4848 
4849 #define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000 /* ms */
4850 
4851 static void
mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nexthop_group * nh_grp)4852 mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp,
4853 				const struct mlxsw_sp_nexthop_group *nh_grp)
4854 {
4855 	unsigned long *activity;
4856 
4857 	activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL);
4858 	if (!activity)
4859 		return;
4860 
4861 	mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity);
4862 	nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4863 					nh_grp->nhgi->count, activity);
4864 
4865 	bitmap_free(activity);
4866 }
4867 
4868 static void
mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp * mlxsw_sp)4869 mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp)
4870 {
4871 	unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL;
4872 
4873 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw,
4874 			       msecs_to_jiffies(interval));
4875 }
4876 
mlxsw_sp_nh_grp_activity_work(struct work_struct * work)4877 static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work)
4878 {
4879 	struct mlxsw_sp_nexthop_group_info *nhgi;
4880 	struct mlxsw_sp_router *router;
4881 	bool reschedule = false;
4882 
4883 	router = container_of(work, struct mlxsw_sp_router,
4884 			      nh_grp_activity_dw.work);
4885 
4886 	mutex_lock(&router->lock);
4887 
4888 	list_for_each_entry(nhgi, &router->nh_res_grp_list, list) {
4889 		mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp);
4890 		reschedule = true;
4891 	}
4892 
4893 	mutex_unlock(&router->lock);
4894 
4895 	if (!reschedule)
4896 		return;
4897 	mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp);
4898 }
4899 
4900 static int
mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_single_info * nh,struct netlink_ext_ack * extack)4901 mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
4902 				     const struct nh_notifier_single_info *nh,
4903 				     struct netlink_ext_ack *extack)
4904 {
4905 	int err = -EINVAL;
4906 
4907 	if (nh->is_fdb)
4908 		NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
4909 	else if (nh->has_encap)
4910 		NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
4911 	else
4912 		err = 0;
4913 
4914 	return err;
4915 }
4916 
4917 static int
mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_single_info * nh,struct netlink_ext_ack * extack)4918 mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp,
4919 					  const struct nh_notifier_single_info *nh,
4920 					  struct netlink_ext_ack *extack)
4921 {
4922 	int err;
4923 
4924 	err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack);
4925 	if (err)
4926 		return err;
4927 
4928 	/* Device only nexthops with an IPIP device are programmed as
4929 	 * encapsulating adjacency entries.
4930 	 */
4931 	if (!nh->gw_family && !nh->is_reject &&
4932 	    !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
4933 		NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
4934 		return -EINVAL;
4935 	}
4936 
4937 	return 0;
4938 }
4939 
4940 static int
mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_grp_info * nh_grp,struct netlink_ext_ack * extack)4941 mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
4942 				    const struct nh_notifier_grp_info *nh_grp,
4943 				    struct netlink_ext_ack *extack)
4944 {
4945 	int i;
4946 
4947 	if (nh_grp->is_fdb) {
4948 		NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
4949 		return -EINVAL;
4950 	}
4951 
4952 	for (i = 0; i < nh_grp->num_nh; i++) {
4953 		const struct nh_notifier_single_info *nh;
4954 		int err;
4955 
4956 		nh = &nh_grp->nh_entries[i].nh;
4957 		err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4958 								extack);
4959 		if (err)
4960 			return err;
4961 	}
4962 
4963 	return 0;
4964 }
4965 
4966 static int
mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_res_table_info * nh_res_table,struct netlink_ext_ack * extack)4967 mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp,
4968 					     const struct nh_notifier_res_table_info *nh_res_table,
4969 					     struct netlink_ext_ack *extack)
4970 {
4971 	unsigned int alloc_size;
4972 	bool valid_size = false;
4973 	int err, i;
4974 
4975 	if (nh_res_table->num_nh_buckets < 32) {
4976 		NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32");
4977 		return -EINVAL;
4978 	}
4979 
4980 	for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
4981 		const struct mlxsw_sp_adj_grp_size_range *size_range;
4982 
4983 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
4984 
4985 		if (nh_res_table->num_nh_buckets >= size_range->start &&
4986 		    nh_res_table->num_nh_buckets <= size_range->end) {
4987 			valid_size = true;
4988 			break;
4989 		}
4990 	}
4991 
4992 	if (!valid_size) {
4993 		NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets");
4994 		return -EINVAL;
4995 	}
4996 
4997 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
4998 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4999 					      nh_res_table->num_nh_buckets,
5000 					      &alloc_size);
5001 	if (err || nh_res_table->num_nh_buckets != alloc_size) {
5002 		NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition");
5003 		return -EINVAL;
5004 	}
5005 
5006 	return 0;
5007 }
5008 
5009 static int
mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_res_table_info * nh_res_table,struct netlink_ext_ack * extack)5010 mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp,
5011 					const struct nh_notifier_res_table_info *nh_res_table,
5012 					struct netlink_ext_ack *extack)
5013 {
5014 	int err;
5015 	u16 i;
5016 
5017 	err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp,
5018 							   nh_res_table,
5019 							   extack);
5020 	if (err)
5021 		return err;
5022 
5023 	for (i = 0; i < nh_res_table->num_nh_buckets; i++) {
5024 		const struct nh_notifier_single_info *nh;
5025 		int err;
5026 
5027 		nh = &nh_res_table->nhs[i];
5028 		err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
5029 								extack);
5030 		if (err)
5031 			return err;
5032 	}
5033 
5034 	return 0;
5035 }
5036 
mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp * mlxsw_sp,unsigned long event,struct nh_notifier_info * info)5037 static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
5038 					 unsigned long event,
5039 					 struct nh_notifier_info *info)
5040 {
5041 	struct nh_notifier_single_info *nh;
5042 
5043 	if (event != NEXTHOP_EVENT_REPLACE &&
5044 	    event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE &&
5045 	    event != NEXTHOP_EVENT_BUCKET_REPLACE)
5046 		return 0;
5047 
5048 	switch (info->type) {
5049 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
5050 		return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
5051 							    info->extack);
5052 	case NH_NOTIFIER_INFO_TYPE_GRP:
5053 		return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
5054 							   info->nh_grp,
5055 							   info->extack);
5056 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
5057 		return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp,
5058 							       info->nh_res_table,
5059 							       info->extack);
5060 	case NH_NOTIFIER_INFO_TYPE_RES_BUCKET:
5061 		nh = &info->nh_res_bucket->new_nh;
5062 		return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
5063 								 info->extack);
5064 	default:
5065 		NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
5066 		return -EOPNOTSUPP;
5067 	}
5068 }
5069 
mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_info * info)5070 static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
5071 					    const struct nh_notifier_info *info)
5072 {
5073 	const struct net_device *dev;
5074 
5075 	switch (info->type) {
5076 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
5077 		dev = info->nh->dev;
5078 		return info->nh->gw_family || info->nh->is_reject ||
5079 		       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
5080 	case NH_NOTIFIER_INFO_TYPE_GRP:
5081 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
5082 		/* Already validated earlier. */
5083 		return true;
5084 	default:
5085 		return false;
5086 	}
5087 }
5088 
mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)5089 static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
5090 						struct mlxsw_sp_nexthop *nh)
5091 {
5092 	nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD;
5093 	nh->should_offload = 1;
5094 	/* While nexthops that discard packets do not forward packets
5095 	 * via an egress RIF, they still need to be programmed using a
5096 	 * valid RIF, so use the loopback RIF created during init.
5097 	 */
5098 	nh->crif = mlxsw_sp->router->lb_crif;
5099 }
5100 
mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)5101 static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
5102 						struct mlxsw_sp_nexthop *nh)
5103 {
5104 	nh->crif = NULL;
5105 	nh->should_offload = 0;
5106 }
5107 
5108 static int
mlxsw_sp_nexthop_obj_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop * nh,struct nh_notifier_single_info * nh_obj,int weight)5109 mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
5110 			  struct mlxsw_sp_nexthop_group *nh_grp,
5111 			  struct mlxsw_sp_nexthop *nh,
5112 			  struct nh_notifier_single_info *nh_obj, int weight)
5113 {
5114 	struct net_device *dev = nh_obj->dev;
5115 	int err;
5116 
5117 	nh->nhgi = nh_grp->nhgi;
5118 	nh->nh_weight = weight;
5119 
5120 	switch (nh_obj->gw_family) {
5121 	case AF_INET:
5122 		memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
5123 		nh->neigh_tbl = &arp_tbl;
5124 		break;
5125 	case AF_INET6:
5126 		memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
5127 #if IS_ENABLED(CONFIG_IPV6)
5128 		nh->neigh_tbl = &nd_tbl;
5129 #endif
5130 		break;
5131 	}
5132 
5133 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
5134 	nh->ifindex = dev->ifindex;
5135 	nh->id = nh_obj->id;
5136 
5137 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
5138 	if (err)
5139 		goto err_type_init;
5140 
5141 	if (nh_obj->is_reject)
5142 		mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
5143 
5144 	/* In a resilient nexthop group, all the nexthops must be written to
5145 	 * the adjacency table. Even if they do not have a valid neighbour or
5146 	 * RIF.
5147 	 */
5148 	if (nh_grp->nhgi->is_resilient && !nh->should_offload) {
5149 		nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
5150 		nh->should_offload = 1;
5151 	}
5152 
5153 	return 0;
5154 
5155 err_type_init:
5156 	list_del(&nh->router_list_node);
5157 	return err;
5158 }
5159 
mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)5160 static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
5161 				      struct mlxsw_sp_nexthop *nh)
5162 {
5163 	if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD)
5164 		mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
5165 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
5166 	list_del(&nh->router_list_node);
5167 	mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
5168 	nh->should_offload = 0;
5169 }
5170 
5171 static int
mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct nh_notifier_info * info)5172 mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
5173 				     struct mlxsw_sp_nexthop_group *nh_grp,
5174 				     struct nh_notifier_info *info)
5175 {
5176 	struct mlxsw_sp_nexthop_group_info *nhgi;
5177 	struct mlxsw_sp_nexthop *nh;
5178 	bool is_resilient = false;
5179 	bool hw_stats = false;
5180 	unsigned int nhs;
5181 	int err, i;
5182 
5183 	switch (info->type) {
5184 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
5185 		nhs = 1;
5186 		break;
5187 	case NH_NOTIFIER_INFO_TYPE_GRP:
5188 		nhs = info->nh_grp->num_nh;
5189 		hw_stats = info->nh_grp->hw_stats;
5190 		break;
5191 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
5192 		nhs = info->nh_res_table->num_nh_buckets;
5193 		hw_stats = info->nh_res_table->hw_stats;
5194 		is_resilient = true;
5195 		break;
5196 	default:
5197 		return -EINVAL;
5198 	}
5199 
5200 	nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5201 	if (!nhgi)
5202 		return -ENOMEM;
5203 	nh_grp->nhgi = nhgi;
5204 	nhgi->nh_grp = nh_grp;
5205 	nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
5206 	nhgi->is_resilient = is_resilient;
5207 	nhgi->count = nhs;
5208 	nhgi->hw_stats = hw_stats;
5209 
5210 	xa_init_flags(&nhgi->nexthop_counters, XA_FLAGS_ALLOC1);
5211 
5212 	for (i = 0; i < nhgi->count; i++) {
5213 		struct nh_notifier_single_info *nh_obj;
5214 		int weight;
5215 
5216 		nh = &nhgi->nexthops[i];
5217 		switch (info->type) {
5218 		case NH_NOTIFIER_INFO_TYPE_SINGLE:
5219 			nh_obj = info->nh;
5220 			weight = 1;
5221 			break;
5222 		case NH_NOTIFIER_INFO_TYPE_GRP:
5223 			nh_obj = &info->nh_grp->nh_entries[i].nh;
5224 			weight = info->nh_grp->nh_entries[i].weight;
5225 			break;
5226 		case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
5227 			nh_obj = &info->nh_res_table->nhs[i];
5228 			weight = 1;
5229 			break;
5230 		default:
5231 			err = -EINVAL;
5232 			goto err_nexthop_obj_init;
5233 		}
5234 		err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
5235 						weight);
5236 		if (err)
5237 			goto err_nexthop_obj_init;
5238 	}
5239 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5240 	if (err)
5241 		goto err_group_inc;
5242 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5243 	if (err) {
5244 		NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
5245 		goto err_group_refresh;
5246 	}
5247 
5248 	/* Add resilient nexthop groups to a list so that the activity of their
5249 	 * nexthop buckets will be periodically queried and cleared.
5250 	 */
5251 	if (nhgi->is_resilient) {
5252 		if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
5253 			mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp);
5254 		list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list);
5255 	}
5256 
5257 	return 0;
5258 
5259 err_group_refresh:
5260 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5261 err_group_inc:
5262 	i = nhgi->count;
5263 err_nexthop_obj_init:
5264 	for (i--; i >= 0; i--) {
5265 		nh = &nhgi->nexthops[i];
5266 		mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5267 	}
5268 	kfree(nhgi);
5269 	return err;
5270 }
5271 
5272 static void
mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5273 mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5274 				     struct mlxsw_sp_nexthop_group *nh_grp)
5275 {
5276 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5277 	struct mlxsw_sp_router *router = mlxsw_sp->router;
5278 	int i;
5279 
5280 	if (nhgi->is_resilient) {
5281 		list_del(&nhgi->list);
5282 		if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
5283 			cancel_delayed_work(&router->nh_grp_activity_dw);
5284 	}
5285 
5286 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5287 	for (i = nhgi->count - 1; i >= 0; i--) {
5288 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5289 
5290 		mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5291 	}
5292 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5293 	WARN_ON_ONCE(nhgi->adj_index_valid);
5294 	WARN_ON(!xa_empty(&nhgi->nexthop_counters));
5295 	xa_destroy(&nhgi->nexthop_counters);
5296 	kfree(nhgi);
5297 }
5298 
5299 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5300 mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
5301 				  struct nh_notifier_info *info)
5302 {
5303 	struct mlxsw_sp_nexthop_group *nh_grp;
5304 	int err;
5305 
5306 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5307 	if (!nh_grp)
5308 		return ERR_PTR(-ENOMEM);
5309 	INIT_LIST_HEAD(&nh_grp->vr_list);
5310 	err = rhashtable_init(&nh_grp->vr_ht,
5311 			      &mlxsw_sp_nexthop_group_vr_ht_params);
5312 	if (err)
5313 		goto err_nexthop_group_vr_ht_init;
5314 	INIT_LIST_HEAD(&nh_grp->fib_list);
5315 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5316 	nh_grp->obj.id = info->id;
5317 
5318 	err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
5319 	if (err)
5320 		goto err_nexthop_group_info_init;
5321 
5322 	nh_grp->can_destroy = false;
5323 
5324 	return nh_grp;
5325 
5326 err_nexthop_group_info_init:
5327 	rhashtable_destroy(&nh_grp->vr_ht);
5328 err_nexthop_group_vr_ht_init:
5329 	kfree(nh_grp);
5330 	return ERR_PTR(err);
5331 }
5332 
5333 static void
mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5334 mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
5335 				   struct mlxsw_sp_nexthop_group *nh_grp)
5336 {
5337 	if (!nh_grp->can_destroy)
5338 		return;
5339 	mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
5340 	WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
5341 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5342 	rhashtable_destroy(&nh_grp->vr_ht);
5343 	kfree(nh_grp);
5344 }
5345 
5346 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp * mlxsw_sp,u32 id)5347 mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
5348 {
5349 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
5350 
5351 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5352 	cmp_arg.id = id;
5353 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
5354 				      &cmp_arg,
5355 				      mlxsw_sp_nexthop_group_ht_params);
5356 }
5357 
mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5358 static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
5359 					  struct mlxsw_sp_nexthop_group *nh_grp)
5360 {
5361 	return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5362 }
5363 
5364 static int
mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop_group * old_nh_grp,struct netlink_ext_ack * extack)5365 mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
5366 				   struct mlxsw_sp_nexthop_group *nh_grp,
5367 				   struct mlxsw_sp_nexthop_group *old_nh_grp,
5368 				   struct netlink_ext_ack *extack)
5369 {
5370 	struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
5371 	struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
5372 	int err;
5373 
5374 	old_nh_grp->nhgi = new_nhgi;
5375 	new_nhgi->nh_grp = old_nh_grp;
5376 	nh_grp->nhgi = old_nhgi;
5377 	old_nhgi->nh_grp = nh_grp;
5378 
5379 	if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5380 		/* Both the old adjacency index and the new one are valid.
5381 		 * Routes are currently using the old one. Tell the device to
5382 		 * replace the old adjacency index with the new one.
5383 		 */
5384 		err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
5385 						     old_nhgi->adj_index,
5386 						     old_nhgi->ecmp_size);
5387 		if (err) {
5388 			NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
5389 			goto err_out;
5390 		}
5391 	} else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
5392 		/* The old adjacency index is valid, while the new one is not.
5393 		 * Iterate over all the routes using the group and change them
5394 		 * to trap packets to the CPU.
5395 		 */
5396 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5397 		if (err) {
5398 			NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
5399 			goto err_out;
5400 		}
5401 	} else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5402 		/* The old adjacency index is invalid, while the new one is.
5403 		 * Iterate over all the routes using the group and change them
5404 		 * to forward packets using the new valid index.
5405 		 */
5406 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5407 		if (err) {
5408 			NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
5409 			goto err_out;
5410 		}
5411 	}
5412 
5413 	/* Make sure the flags are set / cleared based on the new nexthop group
5414 	 * information.
5415 	 */
5416 	mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
5417 
5418 	/* At this point 'nh_grp' is just a shell that is not used by anyone
5419 	 * and its nexthop group info is the old info that was just replaced
5420 	 * with the new one. Remove it.
5421 	 */
5422 	nh_grp->can_destroy = true;
5423 	mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5424 
5425 	return 0;
5426 
5427 err_out:
5428 	old_nhgi->nh_grp = old_nh_grp;
5429 	nh_grp->nhgi = new_nhgi;
5430 	new_nhgi->nh_grp = nh_grp;
5431 	old_nh_grp->nhgi = old_nhgi;
5432 	return err;
5433 }
5434 
mlxsw_sp_nexthop_obj_res_group_pre(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5435 static int mlxsw_sp_nexthop_obj_res_group_pre(struct mlxsw_sp *mlxsw_sp,
5436 					      struct nh_notifier_info *info)
5437 {
5438 	struct nh_notifier_grp_info *grp_info = info->nh_grp;
5439 	struct mlxsw_sp_nexthop_group_info *nhgi;
5440 	struct mlxsw_sp_nexthop_group *nh_grp;
5441 	int err;
5442 	int i;
5443 
5444 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5445 	if (!nh_grp)
5446 		return 0;
5447 	nhgi = nh_grp->nhgi;
5448 
5449 	if (nhgi->hw_stats == grp_info->hw_stats)
5450 		return 0;
5451 
5452 	nhgi->hw_stats = grp_info->hw_stats;
5453 
5454 	for (i = 0; i < nhgi->count; i++) {
5455 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5456 
5457 		if (nh->offloaded)
5458 			nh->update = 1;
5459 	}
5460 
5461 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5462 	if (err)
5463 		goto err_group_refresh;
5464 
5465 	return 0;
5466 
5467 err_group_refresh:
5468 	nhgi->hw_stats = !grp_info->hw_stats;
5469 	return err;
5470 }
5471 
mlxsw_sp_nexthop_obj_new(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5472 static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
5473 				    struct nh_notifier_info *info)
5474 {
5475 	struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
5476 	struct netlink_ext_ack *extack = info->extack;
5477 	int err;
5478 
5479 	nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
5480 	if (IS_ERR(nh_grp))
5481 		return PTR_ERR(nh_grp);
5482 
5483 	old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5484 	if (!old_nh_grp)
5485 		err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
5486 	else
5487 		err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
5488 							 old_nh_grp, extack);
5489 
5490 	if (err) {
5491 		nh_grp->can_destroy = true;
5492 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5493 	}
5494 
5495 	return err;
5496 }
5497 
mlxsw_sp_nexthop_obj_del(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5498 static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
5499 				     struct nh_notifier_info *info)
5500 {
5501 	struct mlxsw_sp_nexthop_group *nh_grp;
5502 
5503 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5504 	if (!nh_grp)
5505 		return;
5506 
5507 	nh_grp->can_destroy = true;
5508 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5509 
5510 	/* If the group still has routes using it, then defer the delete
5511 	 * operation until the last route using it is deleted.
5512 	 */
5513 	if (!list_empty(&nh_grp->fib_list))
5514 		return;
5515 	mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5516 }
5517 
mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp * mlxsw_sp,u32 adj_index,char * ratr_pl)5518 static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp,
5519 					     u32 adj_index, char *ratr_pl)
5520 {
5521 	MLXSW_REG_ZERO(ratr, ratr_pl);
5522 	mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5523 	mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index);
5524 	mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16);
5525 
5526 	return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
5527 }
5528 
mlxsw_sp_nexthop_obj_bucket_compare(char * ratr_pl,char * ratr_pl_new)5529 static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new)
5530 {
5531 	/* Clear the opcode and activity on both the old and new payload as
5532 	 * they are irrelevant for the comparison.
5533 	 */
5534 	mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5535 	mlxsw_reg_ratr_a_set(ratr_pl, 0);
5536 	mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ);
5537 	mlxsw_reg_ratr_a_set(ratr_pl_new, 0);
5538 
5539 	/* If the contents of the adjacency entry are consistent with the
5540 	 * replacement request, then replacement was successful.
5541 	 */
5542 	if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN))
5543 		return 0;
5544 
5545 	return -EINVAL;
5546 }
5547 
5548 static int
mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,struct nh_notifier_info * info)5549 mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp,
5550 				       struct mlxsw_sp_nexthop *nh,
5551 				       struct nh_notifier_info *info)
5552 {
5553 	u16 bucket_index = info->nh_res_bucket->bucket_index;
5554 	struct netlink_ext_ack *extack = info->extack;
5555 	bool force = info->nh_res_bucket->force;
5556 	char ratr_pl_new[MLXSW_REG_RATR_LEN];
5557 	char ratr_pl[MLXSW_REG_RATR_LEN];
5558 	u32 adj_index;
5559 	int err;
5560 
5561 	/* No point in trying an atomic replacement if the idle timer interval
5562 	 * is smaller than the interval in which we query and clear activity.
5563 	 */
5564 	if (!force && info->nh_res_bucket->idle_timer_ms <
5565 	    MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL)
5566 		force = true;
5567 
5568 	adj_index = nh->nhgi->adj_index + bucket_index;
5569 	err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl);
5570 	if (err) {
5571 		NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket");
5572 		return err;
5573 	}
5574 
5575 	if (!force) {
5576 		err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index,
5577 							ratr_pl_new);
5578 		if (err) {
5579 			NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent");
5580 			return err;
5581 		}
5582 
5583 		err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new);
5584 		if (err) {
5585 			NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement");
5586 			return err;
5587 		}
5588 	}
5589 
5590 	nh->update = 0;
5591 	nh->offloaded = 1;
5592 	mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index);
5593 
5594 	return 0;
5595 }
5596 
mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5597 static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp,
5598 					       struct nh_notifier_info *info)
5599 {
5600 	u16 bucket_index = info->nh_res_bucket->bucket_index;
5601 	struct netlink_ext_ack *extack = info->extack;
5602 	struct mlxsw_sp_nexthop_group_info *nhgi;
5603 	struct nh_notifier_single_info *nh_obj;
5604 	struct mlxsw_sp_nexthop_group *nh_grp;
5605 	struct mlxsw_sp_nexthop *nh;
5606 	int err;
5607 
5608 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5609 	if (!nh_grp) {
5610 		NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found");
5611 		return -EINVAL;
5612 	}
5613 
5614 	nhgi = nh_grp->nhgi;
5615 
5616 	if (bucket_index >= nhgi->count) {
5617 		NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range");
5618 		return -EINVAL;
5619 	}
5620 
5621 	nh = &nhgi->nexthops[bucket_index];
5622 	mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5623 
5624 	nh_obj = &info->nh_res_bucket->new_nh;
5625 	err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5626 	if (err) {
5627 		NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement");
5628 		goto err_nexthop_obj_init;
5629 	}
5630 
5631 	err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info);
5632 	if (err)
5633 		goto err_nexthop_obj_bucket_adj_update;
5634 
5635 	return 0;
5636 
5637 err_nexthop_obj_bucket_adj_update:
5638 	mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5639 err_nexthop_obj_init:
5640 	nh_obj = &info->nh_res_bucket->old_nh;
5641 	mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5642 	/* The old adjacency entry was not overwritten */
5643 	nh->update = 0;
5644 	nh->offloaded = 1;
5645 	return err;
5646 }
5647 
5648 static void
mlxsw_sp_nexthop_obj_mp_hw_stats_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group_info * nhgi,struct nh_notifier_grp_hw_stats_info * info)5649 mlxsw_sp_nexthop_obj_mp_hw_stats_get(struct mlxsw_sp *mlxsw_sp,
5650 				     struct mlxsw_sp_nexthop_group_info *nhgi,
5651 				     struct nh_notifier_grp_hw_stats_info *info)
5652 {
5653 	int nhi;
5654 
5655 	for (nhi = 0; nhi < info->num_nh; nhi++) {
5656 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[nhi];
5657 		u64 packets;
5658 		int err;
5659 
5660 		err = mlxsw_sp_nexthop_counter_get(mlxsw_sp, nh, &packets);
5661 		if (err)
5662 			continue;
5663 
5664 		nh_grp_hw_stats_report_delta(info, nhi, packets);
5665 	}
5666 }
5667 
5668 static void
mlxsw_sp_nexthop_obj_res_hw_stats_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group_info * nhgi,struct nh_notifier_grp_hw_stats_info * info)5669 mlxsw_sp_nexthop_obj_res_hw_stats_get(struct mlxsw_sp *mlxsw_sp,
5670 				      struct mlxsw_sp_nexthop_group_info *nhgi,
5671 				      struct nh_notifier_grp_hw_stats_info *info)
5672 {
5673 	int nhi = -1;
5674 	int bucket;
5675 
5676 	for (bucket = 0; bucket < nhgi->count; bucket++) {
5677 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[bucket];
5678 		u64 packets;
5679 		int err;
5680 
5681 		if (nhi == -1 || info->stats[nhi].id != nh->id) {
5682 			for (nhi = 0; nhi < info->num_nh; nhi++)
5683 				if (info->stats[nhi].id == nh->id)
5684 					break;
5685 			if (WARN_ON_ONCE(nhi == info->num_nh)) {
5686 				nhi = -1;
5687 				continue;
5688 			}
5689 		}
5690 
5691 		err = mlxsw_sp_nexthop_counter_get(mlxsw_sp, nh, &packets);
5692 		if (err)
5693 			continue;
5694 
5695 		nh_grp_hw_stats_report_delta(info, nhi, packets);
5696 	}
5697 }
5698 
mlxsw_sp_nexthop_obj_hw_stats_get(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5699 static void mlxsw_sp_nexthop_obj_hw_stats_get(struct mlxsw_sp *mlxsw_sp,
5700 					      struct nh_notifier_info *info)
5701 {
5702 	struct mlxsw_sp_nexthop_group_info *nhgi;
5703 	struct mlxsw_sp_nexthop_group *nh_grp;
5704 
5705 	if (info->type != NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS)
5706 		return;
5707 
5708 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5709 	if (!nh_grp)
5710 		return;
5711 	nhgi = nh_grp->nhgi;
5712 
5713 	if (nhgi->is_resilient)
5714 		mlxsw_sp_nexthop_obj_res_hw_stats_get(mlxsw_sp, nhgi,
5715 						      info->nh_grp_hw_stats);
5716 	else
5717 		mlxsw_sp_nexthop_obj_mp_hw_stats_get(mlxsw_sp, nhgi,
5718 						     info->nh_grp_hw_stats);
5719 }
5720 
mlxsw_sp_nexthop_obj_event(struct notifier_block * nb,unsigned long event,void * ptr)5721 static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
5722 				      unsigned long event, void *ptr)
5723 {
5724 	struct nh_notifier_info *info = ptr;
5725 	struct mlxsw_sp_router *router;
5726 	int err = 0;
5727 
5728 	router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
5729 	err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
5730 	if (err)
5731 		goto out;
5732 
5733 	mutex_lock(&router->lock);
5734 
5735 	switch (event) {
5736 	case NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE:
5737 		err = mlxsw_sp_nexthop_obj_res_group_pre(router->mlxsw_sp,
5738 							 info);
5739 		break;
5740 	case NEXTHOP_EVENT_REPLACE:
5741 		err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
5742 		break;
5743 	case NEXTHOP_EVENT_DEL:
5744 		mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
5745 		break;
5746 	case NEXTHOP_EVENT_BUCKET_REPLACE:
5747 		err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
5748 							  info);
5749 		break;
5750 	case NEXTHOP_EVENT_HW_STATS_REPORT_DELTA:
5751 		mlxsw_sp_nexthop_obj_hw_stats_get(router->mlxsw_sp, info);
5752 		break;
5753 	default:
5754 		break;
5755 	}
5756 
5757 	mutex_unlock(&router->lock);
5758 
5759 out:
5760 	return notifier_from_errno(err);
5761 }
5762 
mlxsw_sp_fi_is_gateway(const struct mlxsw_sp * mlxsw_sp,struct fib_info * fi)5763 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5764 				   struct fib_info *fi)
5765 {
5766 	const struct fib_nh *nh = fib_info_nh(fi, 0);
5767 
5768 	return nh->fib_nh_gw_family ||
5769 	       mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
5770 }
5771 
5772 static int
mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5773 mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
5774 				  struct mlxsw_sp_nexthop_group *nh_grp)
5775 {
5776 	unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
5777 	struct mlxsw_sp_nexthop_group_info *nhgi;
5778 	struct mlxsw_sp_nexthop *nh;
5779 	int err, i;
5780 
5781 	nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5782 	if (!nhgi)
5783 		return -ENOMEM;
5784 	nh_grp->nhgi = nhgi;
5785 	nhgi->nh_grp = nh_grp;
5786 	nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
5787 	nhgi->count = nhs;
5788 	for (i = 0; i < nhgi->count; i++) {
5789 		struct fib_nh *fib_nh;
5790 
5791 		nh = &nhgi->nexthops[i];
5792 		fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
5793 		err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
5794 		if (err)
5795 			goto err_nexthop4_init;
5796 	}
5797 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5798 	if (err)
5799 		goto err_group_inc;
5800 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5801 	if (err)
5802 		goto err_group_refresh;
5803 
5804 	return 0;
5805 
5806 err_group_refresh:
5807 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5808 err_group_inc:
5809 	i = nhgi->count;
5810 err_nexthop4_init:
5811 	for (i--; i >= 0; i--) {
5812 		nh = &nhgi->nexthops[i];
5813 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5814 	}
5815 	kfree(nhgi);
5816 	return err;
5817 }
5818 
5819 static void
mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5820 mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5821 				  struct mlxsw_sp_nexthop_group *nh_grp)
5822 {
5823 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5824 	int i;
5825 
5826 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5827 	for (i = nhgi->count - 1; i >= 0; i--) {
5828 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5829 
5830 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5831 	}
5832 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5833 	WARN_ON_ONCE(nhgi->adj_index_valid);
5834 	kfree(nhgi);
5835 }
5836 
5837 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop4_group_create(struct mlxsw_sp * mlxsw_sp,struct fib_info * fi)5838 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
5839 {
5840 	struct mlxsw_sp_nexthop_group *nh_grp;
5841 	int err;
5842 
5843 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5844 	if (!nh_grp)
5845 		return ERR_PTR(-ENOMEM);
5846 	INIT_LIST_HEAD(&nh_grp->vr_list);
5847 	err = rhashtable_init(&nh_grp->vr_ht,
5848 			      &mlxsw_sp_nexthop_group_vr_ht_params);
5849 	if (err)
5850 		goto err_nexthop_group_vr_ht_init;
5851 	INIT_LIST_HEAD(&nh_grp->fib_list);
5852 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
5853 	nh_grp->ipv4.fi = fi;
5854 	fib_info_hold(fi);
5855 
5856 	err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
5857 	if (err)
5858 		goto err_nexthop_group_info_init;
5859 
5860 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5861 	if (err)
5862 		goto err_nexthop_group_insert;
5863 
5864 	nh_grp->can_destroy = true;
5865 
5866 	return nh_grp;
5867 
5868 err_nexthop_group_insert:
5869 	mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5870 err_nexthop_group_info_init:
5871 	fib_info_put(fi);
5872 	rhashtable_destroy(&nh_grp->vr_ht);
5873 err_nexthop_group_vr_ht_init:
5874 	kfree(nh_grp);
5875 	return ERR_PTR(err);
5876 }
5877 
5878 static void
mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5879 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
5880 				struct mlxsw_sp_nexthop_group *nh_grp)
5881 {
5882 	if (!nh_grp->can_destroy)
5883 		return;
5884 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5885 	mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5886 	fib_info_put(nh_grp->ipv4.fi);
5887 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5888 	rhashtable_destroy(&nh_grp->vr_ht);
5889 	kfree(nh_grp);
5890 }
5891 
mlxsw_sp_nexthop4_group_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,struct fib_info * fi)5892 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
5893 				       struct mlxsw_sp_fib_entry *fib_entry,
5894 				       struct fib_info *fi)
5895 {
5896 	struct mlxsw_sp_nexthop_group *nh_grp;
5897 
5898 	if (fi->nh) {
5899 		nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
5900 							   fi->nh->id);
5901 		if (WARN_ON_ONCE(!nh_grp))
5902 			return -EINVAL;
5903 		goto out;
5904 	}
5905 
5906 	nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
5907 	if (!nh_grp) {
5908 		nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
5909 		if (IS_ERR(nh_grp))
5910 			return PTR_ERR(nh_grp);
5911 	}
5912 out:
5913 	list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
5914 	fib_entry->nh_group = nh_grp;
5915 	return 0;
5916 }
5917 
mlxsw_sp_nexthop4_group_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5918 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
5919 					struct mlxsw_sp_fib_entry *fib_entry)
5920 {
5921 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5922 
5923 	list_del(&fib_entry->nexthop_group_node);
5924 	if (!list_empty(&nh_grp->fib_list))
5925 		return;
5926 
5927 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
5928 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5929 		return;
5930 	}
5931 
5932 	mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
5933 }
5934 
5935 static bool
mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry * fib_entry)5936 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5937 {
5938 	struct mlxsw_sp_fib4_entry *fib4_entry;
5939 
5940 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5941 				  common);
5942 	return !fib4_entry->dscp;
5943 }
5944 
5945 static bool
mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry * fib_entry)5946 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5947 {
5948 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5949 
5950 	switch (fib_entry->fib_node->fib->proto) {
5951 	case MLXSW_SP_L3_PROTO_IPV4:
5952 		if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
5953 			return false;
5954 		break;
5955 	case MLXSW_SP_L3_PROTO_IPV6:
5956 		break;
5957 	}
5958 
5959 	switch (fib_entry->type) {
5960 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5961 		return !!nh_group->nhgi->adj_index_valid;
5962 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5963 		return !!mlxsw_sp_nhgi_rif(nh_group->nhgi);
5964 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5965 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5966 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5967 		return true;
5968 	default:
5969 		return false;
5970 	}
5971 }
5972 
5973 static struct mlxsw_sp_nexthop *
mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_rt6 * mlxsw_sp_rt6)5974 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
5975 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5976 {
5977 	int i;
5978 
5979 	for (i = 0; i < nh_grp->nhgi->count; i++) {
5980 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
5981 		struct net_device *dev = mlxsw_sp_nexthop_dev(nh);
5982 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
5983 
5984 		if (dev && dev == rt->fib6_nh->fib_nh_dev &&
5985 		    ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
5986 				    &rt->fib6_nh->fib_nh_gw6))
5987 			return nh;
5988 	}
5989 
5990 	return NULL;
5991 }
5992 
5993 static void
mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp * mlxsw_sp,struct fib_entry_notifier_info * fen_info)5994 mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5995 				      struct fib_entry_notifier_info *fen_info)
5996 {
5997 	u32 *p_dst = (u32 *) &fen_info->dst;
5998 	struct fib_rt_info fri;
5999 
6000 	fri.fi = fen_info->fi;
6001 	fri.tb_id = fen_info->tb_id;
6002 	fri.dst = cpu_to_be32(*p_dst);
6003 	fri.dst_len = fen_info->dst_len;
6004 	fri.dscp = fen_info->dscp;
6005 	fri.type = fen_info->type;
6006 	fri.offload = false;
6007 	fri.trap = false;
6008 	fri.offload_failed = true;
6009 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
6010 }
6011 
6012 static void
mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6013 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
6014 				 struct mlxsw_sp_fib_entry *fib_entry)
6015 {
6016 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
6017 	int dst_len = fib_entry->fib_node->key.prefix_len;
6018 	struct mlxsw_sp_fib4_entry *fib4_entry;
6019 	struct fib_rt_info fri;
6020 	bool should_offload;
6021 
6022 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
6023 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
6024 				  common);
6025 	fri.fi = fib4_entry->fi;
6026 	fri.tb_id = fib4_entry->tb_id;
6027 	fri.dst = cpu_to_be32(*p_dst);
6028 	fri.dst_len = dst_len;
6029 	fri.dscp = fib4_entry->dscp;
6030 	fri.type = fib4_entry->type;
6031 	fri.offload = should_offload;
6032 	fri.trap = !should_offload;
6033 	fri.offload_failed = false;
6034 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
6035 }
6036 
6037 static void
mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6038 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
6039 				   struct mlxsw_sp_fib_entry *fib_entry)
6040 {
6041 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
6042 	int dst_len = fib_entry->fib_node->key.prefix_len;
6043 	struct mlxsw_sp_fib4_entry *fib4_entry;
6044 	struct fib_rt_info fri;
6045 
6046 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
6047 				  common);
6048 	fri.fi = fib4_entry->fi;
6049 	fri.tb_id = fib4_entry->tb_id;
6050 	fri.dst = cpu_to_be32(*p_dst);
6051 	fri.dst_len = dst_len;
6052 	fri.dscp = fib4_entry->dscp;
6053 	fri.type = fib4_entry->type;
6054 	fri.offload = false;
6055 	fri.trap = false;
6056 	fri.offload_failed = false;
6057 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
6058 }
6059 
6060 #if IS_ENABLED(CONFIG_IPV6)
6061 static void
mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)6062 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
6063 				      struct fib6_info **rt_arr,
6064 				      unsigned int nrt6)
6065 {
6066 	int i;
6067 
6068 	/* In IPv6 a multipath route is represented using multiple routes, so
6069 	 * we need to set the flags on all of them.
6070 	 */
6071 	for (i = 0; i < nrt6; i++)
6072 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), rt_arr[i],
6073 				       false, false, true);
6074 }
6075 #else
6076 static void
mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)6077 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
6078 				      struct fib6_info **rt_arr,
6079 				      unsigned int nrt6)
6080 {
6081 }
6082 #endif
6083 
6084 #if IS_ENABLED(CONFIG_IPV6)
6085 static void
mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6086 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
6087 				 struct mlxsw_sp_fib_entry *fib_entry)
6088 {
6089 	struct mlxsw_sp_fib6_entry *fib6_entry;
6090 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6091 	bool should_offload;
6092 
6093 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
6094 
6095 	/* In IPv6 a multipath route is represented using multiple routes, so
6096 	 * we need to set the flags on all of them.
6097 	 */
6098 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
6099 				  common);
6100 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
6101 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
6102 				       should_offload, !should_offload, false);
6103 }
6104 #else
6105 static void
mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6106 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
6107 				 struct mlxsw_sp_fib_entry *fib_entry)
6108 {
6109 }
6110 #endif
6111 
6112 #if IS_ENABLED(CONFIG_IPV6)
6113 static void
mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6114 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
6115 				   struct mlxsw_sp_fib_entry *fib_entry)
6116 {
6117 	struct mlxsw_sp_fib6_entry *fib6_entry;
6118 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6119 
6120 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
6121 				  common);
6122 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
6123 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
6124 				       false, false, false);
6125 }
6126 #else
6127 static void
mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6128 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
6129 				   struct mlxsw_sp_fib_entry *fib_entry)
6130 {
6131 }
6132 #endif
6133 
6134 static void
mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6135 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
6136 				struct mlxsw_sp_fib_entry *fib_entry)
6137 {
6138 	switch (fib_entry->fib_node->fib->proto) {
6139 	case MLXSW_SP_L3_PROTO_IPV4:
6140 		mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
6141 		break;
6142 	case MLXSW_SP_L3_PROTO_IPV6:
6143 		mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
6144 		break;
6145 	}
6146 }
6147 
6148 static void
mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6149 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
6150 				  struct mlxsw_sp_fib_entry *fib_entry)
6151 {
6152 	switch (fib_entry->fib_node->fib->proto) {
6153 	case MLXSW_SP_L3_PROTO_IPV4:
6154 		mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
6155 		break;
6156 	case MLXSW_SP_L3_PROTO_IPV6:
6157 		mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
6158 		break;
6159 	}
6160 }
6161 
6162 static void
mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6163 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
6164 				    struct mlxsw_sp_fib_entry *fib_entry,
6165 				    enum mlxsw_reg_ralue_op op)
6166 {
6167 	switch (op) {
6168 	case MLXSW_REG_RALUE_OP_WRITE_WRITE:
6169 		mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
6170 		break;
6171 	case MLXSW_REG_RALUE_OP_WRITE_DELETE:
6172 		mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
6173 		break;
6174 	default:
6175 		break;
6176 	}
6177 }
6178 
6179 static void
mlxsw_sp_fib_entry_ralue_pack(char * ralue_pl,const struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6180 mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
6181 			      const struct mlxsw_sp_fib_entry *fib_entry,
6182 			      enum mlxsw_reg_ralue_op op)
6183 {
6184 	struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
6185 	enum mlxsw_reg_ralxx_protocol proto;
6186 	u32 *p_dip;
6187 
6188 	proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
6189 
6190 	switch (fib->proto) {
6191 	case MLXSW_SP_L3_PROTO_IPV4:
6192 		p_dip = (u32 *) fib_entry->fib_node->key.addr;
6193 		mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
6194 				      fib_entry->fib_node->key.prefix_len,
6195 				      *p_dip);
6196 		break;
6197 	case MLXSW_SP_L3_PROTO_IPV6:
6198 		mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
6199 				      fib_entry->fib_node->key.prefix_len,
6200 				      fib_entry->fib_node->key.addr);
6201 		break;
6202 	}
6203 }
6204 
mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6205 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
6206 					struct mlxsw_sp_fib_entry *fib_entry,
6207 					enum mlxsw_reg_ralue_op op)
6208 {
6209 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
6210 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
6211 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6212 	enum mlxsw_reg_ralue_trap_action trap_action;
6213 	u16 trap_id = 0;
6214 	u32 adjacency_index = 0;
6215 	u16 ecmp_size = 0;
6216 
6217 	/* In case the nexthop group adjacency index is valid, use it
6218 	 * with provided ECMP size. Otherwise, setup trap and pass
6219 	 * traffic to kernel.
6220 	 */
6221 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
6222 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
6223 		adjacency_index = nhgi->adj_index;
6224 		ecmp_size = nhgi->ecmp_size;
6225 	} else if (!nhgi->adj_index_valid && nhgi->count &&
6226 		   mlxsw_sp_nhgi_rif(nhgi)) {
6227 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
6228 		adjacency_index = mlxsw_sp->router->adj_trap_index;
6229 		ecmp_size = 1;
6230 	} else {
6231 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
6232 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
6233 	}
6234 
6235 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6236 	mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
6237 					adjacency_index, ecmp_size);
6238 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6239 }
6240 
mlxsw_sp_fib_entry_op_local(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6241 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
6242 				       struct mlxsw_sp_fib_entry *fib_entry,
6243 				       enum mlxsw_reg_ralue_op op)
6244 {
6245 	struct mlxsw_sp_rif *rif = mlxsw_sp_nhgi_rif(fib_entry->nh_group->nhgi);
6246 	enum mlxsw_reg_ralue_trap_action trap_action;
6247 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6248 	u16 trap_id = 0;
6249 	u16 rif_index = 0;
6250 
6251 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
6252 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
6253 		rif_index = rif->rif_index;
6254 	} else {
6255 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
6256 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
6257 	}
6258 
6259 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6260 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
6261 				       rif_index);
6262 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6263 }
6264 
mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6265 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
6266 				      struct mlxsw_sp_fib_entry *fib_entry,
6267 				      enum mlxsw_reg_ralue_op op)
6268 {
6269 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6270 
6271 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6272 	mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
6273 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6274 }
6275 
mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6276 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
6277 					   struct mlxsw_sp_fib_entry *fib_entry,
6278 					   enum mlxsw_reg_ralue_op op)
6279 {
6280 	enum mlxsw_reg_ralue_trap_action trap_action;
6281 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6282 
6283 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
6284 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6285 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
6286 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6287 }
6288 
6289 static int
mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6290 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
6291 				  struct mlxsw_sp_fib_entry *fib_entry,
6292 				  enum mlxsw_reg_ralue_op op)
6293 {
6294 	enum mlxsw_reg_ralue_trap_action trap_action;
6295 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6296 	u16 trap_id;
6297 
6298 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
6299 	trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
6300 
6301 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6302 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
6303 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6304 }
6305 
6306 static int
mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6307 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
6308 				 struct mlxsw_sp_fib_entry *fib_entry,
6309 				 enum mlxsw_reg_ralue_op op)
6310 {
6311 	struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
6312 	const struct mlxsw_sp_ipip_ops *ipip_ops;
6313 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6314 	int err;
6315 
6316 	if (WARN_ON(!ipip_entry))
6317 		return -EINVAL;
6318 
6319 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
6320 	err = ipip_ops->decap_config(mlxsw_sp, ipip_entry,
6321 				     fib_entry->decap.tunnel_index);
6322 	if (err)
6323 		return err;
6324 
6325 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6326 	mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
6327 					   fib_entry->decap.tunnel_index);
6328 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6329 }
6330 
mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6331 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
6332 					   struct mlxsw_sp_fib_entry *fib_entry,
6333 					   enum mlxsw_reg_ralue_op op)
6334 {
6335 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6336 
6337 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6338 	mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
6339 					   fib_entry->decap.tunnel_index);
6340 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6341 }
6342 
__mlxsw_sp_fib_entry_op(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6343 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6344 				   struct mlxsw_sp_fib_entry *fib_entry,
6345 				   enum mlxsw_reg_ralue_op op)
6346 {
6347 	switch (fib_entry->type) {
6348 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
6349 		return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
6350 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
6351 		return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
6352 	case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
6353 		return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
6354 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
6355 		return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
6356 	case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
6357 		return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
6358 							 op);
6359 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6360 		return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
6361 							fib_entry, op);
6362 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
6363 		return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
6364 	}
6365 	return -EINVAL;
6366 }
6367 
mlxsw_sp_fib_entry_op(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6368 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6369 				 struct mlxsw_sp_fib_entry *fib_entry,
6370 				 enum mlxsw_reg_ralue_op op)
6371 {
6372 	int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
6373 
6374 	if (err)
6375 		return err;
6376 
6377 	mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
6378 
6379 	return err;
6380 }
6381 
mlxsw_sp_fib_entry_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6382 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
6383 				     struct mlxsw_sp_fib_entry *fib_entry)
6384 {
6385 	return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
6386 				     MLXSW_REG_RALUE_OP_WRITE_WRITE);
6387 }
6388 
mlxsw_sp_fib_entry_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6389 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
6390 				  struct mlxsw_sp_fib_entry *fib_entry)
6391 {
6392 	return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
6393 				     MLXSW_REG_RALUE_OP_WRITE_DELETE);
6394 }
6395 
6396 static int
mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp * mlxsw_sp,const struct fib_entry_notifier_info * fen_info,struct mlxsw_sp_fib_entry * fib_entry)6397 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
6398 			     const struct fib_entry_notifier_info *fen_info,
6399 			     struct mlxsw_sp_fib_entry *fib_entry)
6400 {
6401 	struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
6402 	union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
6403 	struct mlxsw_sp_router *router = mlxsw_sp->router;
6404 	u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
6405 	int ifindex = nhgi->nexthops[0].ifindex;
6406 	struct mlxsw_sp_ipip_entry *ipip_entry;
6407 
6408 	switch (fen_info->type) {
6409 	case RTN_LOCAL:
6410 		ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
6411 							       MLXSW_SP_L3_PROTO_IPV4, dip);
6412 		if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
6413 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
6414 			return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
6415 							     fib_entry,
6416 							     ipip_entry);
6417 		}
6418 		if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
6419 						 MLXSW_SP_L3_PROTO_IPV4,
6420 						 &dip)) {
6421 			u32 tunnel_index;
6422 
6423 			tunnel_index = router->nve_decap_config.tunnel_index;
6424 			fib_entry->decap.tunnel_index = tunnel_index;
6425 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
6426 			return 0;
6427 		}
6428 		fallthrough;
6429 	case RTN_BROADCAST:
6430 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6431 		return 0;
6432 	case RTN_BLACKHOLE:
6433 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
6434 		return 0;
6435 	case RTN_UNREACHABLE:
6436 	case RTN_PROHIBIT:
6437 		/* Packets hitting these routes need to be trapped, but
6438 		 * can do so with a lower priority than packets directed
6439 		 * at the host, so use action type local instead of trap.
6440 		 */
6441 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
6442 		return 0;
6443 	case RTN_UNICAST:
6444 		if (nhgi->gateway)
6445 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
6446 		else
6447 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
6448 		return 0;
6449 	default:
6450 		return -EINVAL;
6451 	}
6452 }
6453 
6454 static void
mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6455 mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6456 			      struct mlxsw_sp_fib_entry *fib_entry)
6457 {
6458 	switch (fib_entry->type) {
6459 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6460 		mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
6461 		break;
6462 	default:
6463 		break;
6464 	}
6465 }
6466 
6467 static void
mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib4_entry * fib4_entry)6468 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6469 			       struct mlxsw_sp_fib4_entry *fib4_entry)
6470 {
6471 	mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common);
6472 }
6473 
6474 static struct mlxsw_sp_fib4_entry *
mlxsw_sp_fib4_entry_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node,const struct fib_entry_notifier_info * fen_info)6475 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
6476 			   struct mlxsw_sp_fib_node *fib_node,
6477 			   const struct fib_entry_notifier_info *fen_info)
6478 {
6479 	struct mlxsw_sp_fib4_entry *fib4_entry;
6480 	struct mlxsw_sp_fib_entry *fib_entry;
6481 	int err;
6482 
6483 	fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
6484 	if (!fib4_entry)
6485 		return ERR_PTR(-ENOMEM);
6486 	fib_entry = &fib4_entry->common;
6487 
6488 	err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
6489 	if (err)
6490 		goto err_nexthop4_group_get;
6491 
6492 	err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6493 					     fib_node->fib);
6494 	if (err)
6495 		goto err_nexthop_group_vr_link;
6496 
6497 	err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
6498 	if (err)
6499 		goto err_fib4_entry_type_set;
6500 
6501 	fib4_entry->fi = fen_info->fi;
6502 	fib_info_hold(fib4_entry->fi);
6503 	fib4_entry->tb_id = fen_info->tb_id;
6504 	fib4_entry->type = fen_info->type;
6505 	fib4_entry->dscp = fen_info->dscp;
6506 
6507 	fib_entry->fib_node = fib_node;
6508 
6509 	return fib4_entry;
6510 
6511 err_fib4_entry_type_set:
6512 	mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
6513 err_nexthop_group_vr_link:
6514 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6515 err_nexthop4_group_get:
6516 	kfree(fib4_entry);
6517 	return ERR_PTR(err);
6518 }
6519 
mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib4_entry * fib4_entry)6520 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6521 					struct mlxsw_sp_fib4_entry *fib4_entry)
6522 {
6523 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6524 
6525 	fib_info_put(fib4_entry->fi);
6526 	mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry);
6527 	mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
6528 					 fib_node->fib);
6529 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6530 	kfree(fib4_entry);
6531 }
6532 
6533 static struct mlxsw_sp_fib4_entry *
mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp * mlxsw_sp,const struct fib_entry_notifier_info * fen_info)6534 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
6535 			   const struct fib_entry_notifier_info *fen_info)
6536 {
6537 	struct mlxsw_sp_fib4_entry *fib4_entry;
6538 	struct mlxsw_sp_fib_node *fib_node;
6539 	struct mlxsw_sp_fib *fib;
6540 	struct mlxsw_sp_vr *vr;
6541 
6542 	vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
6543 	if (!vr)
6544 		return NULL;
6545 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
6546 
6547 	fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
6548 					    sizeof(fen_info->dst),
6549 					    fen_info->dst_len);
6550 	if (!fib_node)
6551 		return NULL;
6552 
6553 	fib4_entry = container_of(fib_node->fib_entry,
6554 				  struct mlxsw_sp_fib4_entry, common);
6555 	if (fib4_entry->tb_id == fen_info->tb_id &&
6556 	    fib4_entry->dscp == fen_info->dscp &&
6557 	    fib4_entry->type == fen_info->type &&
6558 	    fib4_entry->fi == fen_info->fi)
6559 		return fib4_entry;
6560 
6561 	return NULL;
6562 }
6563 
6564 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
6565 	.key_offset = offsetof(struct mlxsw_sp_fib_node, key),
6566 	.head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
6567 	.key_len = sizeof(struct mlxsw_sp_fib_key),
6568 	.automatic_shrinking = true,
6569 };
6570 
mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib * fib,struct mlxsw_sp_fib_node * fib_node)6571 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
6572 				    struct mlxsw_sp_fib_node *fib_node)
6573 {
6574 	return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
6575 				      mlxsw_sp_fib_ht_params);
6576 }
6577 
mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib * fib,struct mlxsw_sp_fib_node * fib_node)6578 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
6579 				     struct mlxsw_sp_fib_node *fib_node)
6580 {
6581 	rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
6582 			       mlxsw_sp_fib_ht_params);
6583 }
6584 
6585 static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib * fib,const void * addr,size_t addr_len,unsigned char prefix_len)6586 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
6587 			 size_t addr_len, unsigned char prefix_len)
6588 {
6589 	struct mlxsw_sp_fib_key key;
6590 
6591 	memset(&key, 0, sizeof(key));
6592 	memcpy(key.addr, addr, addr_len);
6593 	key.prefix_len = prefix_len;
6594 	return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
6595 }
6596 
6597 static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_create(struct mlxsw_sp_fib * fib,const void * addr,size_t addr_len,unsigned char prefix_len)6598 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
6599 			 size_t addr_len, unsigned char prefix_len)
6600 {
6601 	struct mlxsw_sp_fib_node *fib_node;
6602 
6603 	fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
6604 	if (!fib_node)
6605 		return NULL;
6606 
6607 	list_add(&fib_node->list, &fib->node_list);
6608 	memcpy(fib_node->key.addr, addr, addr_len);
6609 	fib_node->key.prefix_len = prefix_len;
6610 
6611 	return fib_node;
6612 }
6613 
mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node * fib_node)6614 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
6615 {
6616 	list_del(&fib_node->list);
6617 	kfree(fib_node);
6618 }
6619 
mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6620 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
6621 				      struct mlxsw_sp_fib_node *fib_node)
6622 {
6623 	struct mlxsw_sp_prefix_usage req_prefix_usage;
6624 	struct mlxsw_sp_fib *fib = fib_node->fib;
6625 	struct mlxsw_sp_lpm_tree *lpm_tree;
6626 	int err;
6627 
6628 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
6629 	if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6630 		goto out;
6631 
6632 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6633 	mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
6634 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6635 					 fib->proto);
6636 	if (IS_ERR(lpm_tree))
6637 		return PTR_ERR(lpm_tree);
6638 
6639 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6640 	if (err)
6641 		goto err_lpm_tree_replace;
6642 
6643 out:
6644 	lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
6645 	return 0;
6646 
6647 err_lpm_tree_replace:
6648 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6649 	return err;
6650 }
6651 
mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6652 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
6653 					 struct mlxsw_sp_fib_node *fib_node)
6654 {
6655 	struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
6656 	struct mlxsw_sp_prefix_usage req_prefix_usage;
6657 	struct mlxsw_sp_fib *fib = fib_node->fib;
6658 	int err;
6659 
6660 	if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6661 		return;
6662 	/* Try to construct a new LPM tree from the current prefix usage
6663 	 * minus the unused one. If we fail, continue using the old one.
6664 	 */
6665 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6666 	mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
6667 				    fib_node->key.prefix_len);
6668 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6669 					 fib->proto);
6670 	if (IS_ERR(lpm_tree))
6671 		return;
6672 
6673 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6674 	if (err)
6675 		goto err_lpm_tree_replace;
6676 
6677 	return;
6678 
6679 err_lpm_tree_replace:
6680 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6681 }
6682 
mlxsw_sp_fib_node_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node,struct mlxsw_sp_fib * fib)6683 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
6684 				  struct mlxsw_sp_fib_node *fib_node,
6685 				  struct mlxsw_sp_fib *fib)
6686 {
6687 	int err;
6688 
6689 	err = mlxsw_sp_fib_node_insert(fib, fib_node);
6690 	if (err)
6691 		return err;
6692 	fib_node->fib = fib;
6693 
6694 	err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
6695 	if (err)
6696 		goto err_fib_lpm_tree_link;
6697 
6698 	return 0;
6699 
6700 err_fib_lpm_tree_link:
6701 	fib_node->fib = NULL;
6702 	mlxsw_sp_fib_node_remove(fib, fib_node);
6703 	return err;
6704 }
6705 
mlxsw_sp_fib_node_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6706 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
6707 				   struct mlxsw_sp_fib_node *fib_node)
6708 {
6709 	struct mlxsw_sp_fib *fib = fib_node->fib;
6710 
6711 	mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
6712 	fib_node->fib = NULL;
6713 	mlxsw_sp_fib_node_remove(fib, fib_node);
6714 }
6715 
6716 static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_get(struct mlxsw_sp * mlxsw_sp,u32 tb_id,const void * addr,size_t addr_len,unsigned char prefix_len,enum mlxsw_sp_l3proto proto)6717 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
6718 		      size_t addr_len, unsigned char prefix_len,
6719 		      enum mlxsw_sp_l3proto proto)
6720 {
6721 	struct mlxsw_sp_fib_node *fib_node;
6722 	struct mlxsw_sp_fib *fib;
6723 	struct mlxsw_sp_vr *vr;
6724 	int err;
6725 
6726 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
6727 	if (IS_ERR(vr))
6728 		return ERR_CAST(vr);
6729 	fib = mlxsw_sp_vr_fib(vr, proto);
6730 
6731 	fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
6732 	if (fib_node)
6733 		return fib_node;
6734 
6735 	fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
6736 	if (!fib_node) {
6737 		err = -ENOMEM;
6738 		goto err_fib_node_create;
6739 	}
6740 
6741 	err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
6742 	if (err)
6743 		goto err_fib_node_init;
6744 
6745 	return fib_node;
6746 
6747 err_fib_node_init:
6748 	mlxsw_sp_fib_node_destroy(fib_node);
6749 err_fib_node_create:
6750 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6751 	return ERR_PTR(err);
6752 }
6753 
mlxsw_sp_fib_node_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6754 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
6755 				  struct mlxsw_sp_fib_node *fib_node)
6756 {
6757 	struct mlxsw_sp_vr *vr = fib_node->fib->vr;
6758 
6759 	if (fib_node->fib_entry)
6760 		return;
6761 	mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
6762 	mlxsw_sp_fib_node_destroy(fib_node);
6763 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6764 }
6765 
mlxsw_sp_fib_node_entry_link(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6766 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
6767 					struct mlxsw_sp_fib_entry *fib_entry)
6768 {
6769 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6770 	int err;
6771 
6772 	fib_node->fib_entry = fib_entry;
6773 
6774 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
6775 	if (err)
6776 		goto err_fib_entry_update;
6777 
6778 	return 0;
6779 
6780 err_fib_entry_update:
6781 	fib_node->fib_entry = NULL;
6782 	return err;
6783 }
6784 
6785 static void
mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6786 mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6787 			       struct mlxsw_sp_fib_entry *fib_entry)
6788 {
6789 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6790 
6791 	mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
6792 	fib_node->fib_entry = NULL;
6793 }
6794 
mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry * fib4_entry)6795 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
6796 {
6797 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6798 	struct mlxsw_sp_fib4_entry *fib4_replaced;
6799 
6800 	if (!fib_node->fib_entry)
6801 		return true;
6802 
6803 	fib4_replaced = container_of(fib_node->fib_entry,
6804 				     struct mlxsw_sp_fib4_entry, common);
6805 	if (fib4_entry->tb_id == RT_TABLE_MAIN &&
6806 	    fib4_replaced->tb_id == RT_TABLE_LOCAL)
6807 		return false;
6808 
6809 	return true;
6810 }
6811 
6812 static int
mlxsw_sp_router_fib4_replace(struct mlxsw_sp * mlxsw_sp,const struct fib_entry_notifier_info * fen_info)6813 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
6814 			     const struct fib_entry_notifier_info *fen_info)
6815 {
6816 	struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
6817 	struct mlxsw_sp_fib_entry *replaced;
6818 	struct mlxsw_sp_fib_node *fib_node;
6819 	int err;
6820 
6821 	if (fen_info->fi->nh &&
6822 	    !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
6823 		return 0;
6824 
6825 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
6826 					 &fen_info->dst, sizeof(fen_info->dst),
6827 					 fen_info->dst_len,
6828 					 MLXSW_SP_L3_PROTO_IPV4);
6829 	if (IS_ERR(fib_node)) {
6830 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
6831 		return PTR_ERR(fib_node);
6832 	}
6833 
6834 	fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
6835 	if (IS_ERR(fib4_entry)) {
6836 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
6837 		err = PTR_ERR(fib4_entry);
6838 		goto err_fib4_entry_create;
6839 	}
6840 
6841 	if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
6842 		mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6843 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6844 		return 0;
6845 	}
6846 
6847 	replaced = fib_node->fib_entry;
6848 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
6849 	if (err) {
6850 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
6851 		goto err_fib_node_entry_link;
6852 	}
6853 
6854 	/* Nothing to replace */
6855 	if (!replaced)
6856 		return 0;
6857 
6858 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
6859 	fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
6860 				     common);
6861 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
6862 
6863 	return 0;
6864 
6865 err_fib_node_entry_link:
6866 	fib_node->fib_entry = replaced;
6867 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6868 err_fib4_entry_create:
6869 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6870 	return err;
6871 }
6872 
mlxsw_sp_router_fib4_del(struct mlxsw_sp * mlxsw_sp,struct fib_entry_notifier_info * fen_info)6873 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
6874 				     struct fib_entry_notifier_info *fen_info)
6875 {
6876 	struct mlxsw_sp_fib4_entry *fib4_entry;
6877 	struct mlxsw_sp_fib_node *fib_node;
6878 
6879 	fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
6880 	if (!fib4_entry)
6881 		return;
6882 	fib_node = fib4_entry->common.fib_node;
6883 
6884 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
6885 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6886 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6887 }
6888 
mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info * rt)6889 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
6890 {
6891 	/* Multicast routes aren't supported, so ignore them. Neighbour
6892 	 * Discovery packets are specifically trapped.
6893 	 */
6894 	if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
6895 		return true;
6896 
6897 	/* Cloned routes are irrelevant in the forwarding path. */
6898 	if (rt->fib6_flags & RTF_CACHE)
6899 		return true;
6900 
6901 	return false;
6902 }
6903 
mlxsw_sp_rt6_create(struct fib6_info * rt)6904 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
6905 {
6906 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6907 
6908 	mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
6909 	if (!mlxsw_sp_rt6)
6910 		return ERR_PTR(-ENOMEM);
6911 
6912 	/* In case of route replace, replaced route is deleted with
6913 	 * no notification. Take reference to prevent accessing freed
6914 	 * memory.
6915 	 */
6916 	mlxsw_sp_rt6->rt = rt;
6917 	fib6_info_hold(rt);
6918 
6919 	return mlxsw_sp_rt6;
6920 }
6921 
6922 #if IS_ENABLED(CONFIG_IPV6)
mlxsw_sp_rt6_release(struct fib6_info * rt)6923 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6924 {
6925 	fib6_info_release(rt);
6926 }
6927 #else
mlxsw_sp_rt6_release(struct fib6_info * rt)6928 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6929 {
6930 }
6931 #endif
6932 
mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 * mlxsw_sp_rt6)6933 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
6934 {
6935 	struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
6936 
6937 	if (!mlxsw_sp_rt6->rt->nh)
6938 		fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
6939 	mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
6940 	kfree(mlxsw_sp_rt6);
6941 }
6942 
6943 static struct fib6_info *
mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry * fib6_entry)6944 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
6945 {
6946 	return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
6947 				list)->rt;
6948 }
6949 
6950 static struct mlxsw_sp_rt6 *
mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry * fib6_entry,const struct fib6_info * rt)6951 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
6952 			    const struct fib6_info *rt)
6953 {
6954 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6955 
6956 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
6957 		if (mlxsw_sp_rt6->rt == rt)
6958 			return mlxsw_sp_rt6;
6959 	}
6960 
6961 	return NULL;
6962 }
6963 
mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp * mlxsw_sp,const struct fib6_info * rt,enum mlxsw_sp_ipip_type * ret)6964 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
6965 					const struct fib6_info *rt,
6966 					enum mlxsw_sp_ipip_type *ret)
6967 {
6968 	return rt->fib6_nh->fib_nh_dev &&
6969 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
6970 }
6971 
mlxsw_sp_nexthop6_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop * nh,const struct fib6_info * rt)6972 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
6973 				  struct mlxsw_sp_nexthop_group *nh_grp,
6974 				  struct mlxsw_sp_nexthop *nh,
6975 				  const struct fib6_info *rt)
6976 {
6977 	struct net_device *dev = rt->fib6_nh->fib_nh_dev;
6978 	int err;
6979 
6980 	nh->nhgi = nh_grp->nhgi;
6981 	nh->nh_weight = rt->fib6_nh->fib_nh_weight;
6982 	memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
6983 #if IS_ENABLED(CONFIG_IPV6)
6984 	nh->neigh_tbl = &nd_tbl;
6985 #endif
6986 
6987 	err = mlxsw_sp_nexthop_counter_enable(mlxsw_sp, nh);
6988 	if (err)
6989 		return err;
6990 
6991 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
6992 
6993 	if (!dev)
6994 		return 0;
6995 	nh->ifindex = dev->ifindex;
6996 
6997 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
6998 	if (err)
6999 		goto err_nexthop_type_init;
7000 
7001 	return 0;
7002 
7003 err_nexthop_type_init:
7004 	list_del(&nh->router_list_node);
7005 	mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
7006 	return err;
7007 }
7008 
mlxsw_sp_nexthop6_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)7009 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
7010 				   struct mlxsw_sp_nexthop *nh)
7011 {
7012 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
7013 	list_del(&nh->router_list_node);
7014 	mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
7015 }
7016 
mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp * mlxsw_sp,const struct fib6_info * rt)7017 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
7018 				    const struct fib6_info *rt)
7019 {
7020 	return rt->fib6_nh->fib_nh_gw_family ||
7021 	       mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
7022 }
7023 
7024 static int
mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_fib6_entry * fib6_entry)7025 mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
7026 				  struct mlxsw_sp_nexthop_group *nh_grp,
7027 				  struct mlxsw_sp_fib6_entry *fib6_entry)
7028 {
7029 	struct mlxsw_sp_nexthop_group_info *nhgi;
7030 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7031 	struct mlxsw_sp_nexthop *nh;
7032 	int err, i;
7033 
7034 	nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
7035 		       GFP_KERNEL);
7036 	if (!nhgi)
7037 		return -ENOMEM;
7038 	nh_grp->nhgi = nhgi;
7039 	nhgi->nh_grp = nh_grp;
7040 	mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
7041 					struct mlxsw_sp_rt6, list);
7042 	nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
7043 	nhgi->count = fib6_entry->nrt6;
7044 	for (i = 0; i < nhgi->count; i++) {
7045 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
7046 
7047 		nh = &nhgi->nexthops[i];
7048 		err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
7049 		if (err)
7050 			goto err_nexthop6_init;
7051 		mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
7052 	}
7053 	nh_grp->nhgi = nhgi;
7054 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
7055 	if (err)
7056 		goto err_group_inc;
7057 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
7058 	if (err)
7059 		goto err_group_refresh;
7060 
7061 	return 0;
7062 
7063 err_group_refresh:
7064 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
7065 err_group_inc:
7066 	i = nhgi->count;
7067 err_nexthop6_init:
7068 	for (i--; i >= 0; i--) {
7069 		nh = &nhgi->nexthops[i];
7070 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
7071 	}
7072 	kfree(nhgi);
7073 	return err;
7074 }
7075 
7076 static void
mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)7077 mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
7078 				  struct mlxsw_sp_nexthop_group *nh_grp)
7079 {
7080 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
7081 	int i;
7082 
7083 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
7084 	for (i = nhgi->count - 1; i >= 0; i--) {
7085 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
7086 
7087 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
7088 	}
7089 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
7090 	WARN_ON_ONCE(nhgi->adj_index_valid);
7091 	kfree(nhgi);
7092 }
7093 
7094 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop6_group_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)7095 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
7096 			       struct mlxsw_sp_fib6_entry *fib6_entry)
7097 {
7098 	struct mlxsw_sp_nexthop_group *nh_grp;
7099 	int err;
7100 
7101 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
7102 	if (!nh_grp)
7103 		return ERR_PTR(-ENOMEM);
7104 	INIT_LIST_HEAD(&nh_grp->vr_list);
7105 	err = rhashtable_init(&nh_grp->vr_ht,
7106 			      &mlxsw_sp_nexthop_group_vr_ht_params);
7107 	if (err)
7108 		goto err_nexthop_group_vr_ht_init;
7109 	INIT_LIST_HEAD(&nh_grp->fib_list);
7110 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
7111 
7112 	err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
7113 	if (err)
7114 		goto err_nexthop_group_info_init;
7115 
7116 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
7117 	if (err)
7118 		goto err_nexthop_group_insert;
7119 
7120 	nh_grp->can_destroy = true;
7121 
7122 	return nh_grp;
7123 
7124 err_nexthop_group_insert:
7125 	mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
7126 err_nexthop_group_info_init:
7127 	rhashtable_destroy(&nh_grp->vr_ht);
7128 err_nexthop_group_vr_ht_init:
7129 	kfree(nh_grp);
7130 	return ERR_PTR(err);
7131 }
7132 
7133 static void
mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)7134 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
7135 				struct mlxsw_sp_nexthop_group *nh_grp)
7136 {
7137 	if (!nh_grp->can_destroy)
7138 		return;
7139 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
7140 	mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
7141 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
7142 	rhashtable_destroy(&nh_grp->vr_ht);
7143 	kfree(nh_grp);
7144 }
7145 
mlxsw_sp_nexthop6_group_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)7146 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
7147 				       struct mlxsw_sp_fib6_entry *fib6_entry)
7148 {
7149 	struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7150 	struct mlxsw_sp_nexthop_group *nh_grp;
7151 
7152 	if (rt->nh) {
7153 		nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
7154 							   rt->nh->id);
7155 		if (WARN_ON_ONCE(!nh_grp))
7156 			return -EINVAL;
7157 		goto out;
7158 	}
7159 
7160 	nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
7161 	if (!nh_grp) {
7162 		nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
7163 		if (IS_ERR(nh_grp))
7164 			return PTR_ERR(nh_grp);
7165 	}
7166 
7167 	/* The route and the nexthop are described by the same struct, so we
7168 	 * need to the update the nexthop offload indication for the new route.
7169 	 */
7170 	__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
7171 
7172 out:
7173 	list_add_tail(&fib6_entry->common.nexthop_group_node,
7174 		      &nh_grp->fib_list);
7175 	fib6_entry->common.nh_group = nh_grp;
7176 
7177 	return 0;
7178 }
7179 
mlxsw_sp_nexthop6_group_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)7180 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
7181 					struct mlxsw_sp_fib_entry *fib_entry)
7182 {
7183 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
7184 
7185 	list_del(&fib_entry->nexthop_group_node);
7186 	if (!list_empty(&nh_grp->fib_list))
7187 		return;
7188 
7189 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
7190 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
7191 		return;
7192 	}
7193 
7194 	mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
7195 }
7196 
7197 static int
mlxsw_sp_nexthop6_group_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)7198 mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
7199 			       struct mlxsw_sp_fib6_entry *fib6_entry)
7200 {
7201 	struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
7202 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7203 	int err;
7204 
7205 	mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib);
7206 	fib6_entry->common.nh_group = NULL;
7207 	list_del(&fib6_entry->common.nexthop_group_node);
7208 
7209 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
7210 	if (err)
7211 		goto err_nexthop6_group_get;
7212 
7213 	err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group,
7214 					     fib_node->fib);
7215 	if (err)
7216 		goto err_nexthop_group_vr_link;
7217 
7218 	/* In case this entry is offloaded, then the adjacency index
7219 	 * currently associated with it in the device's table is that
7220 	 * of the old group. Start using the new one instead.
7221 	 */
7222 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
7223 	if (err)
7224 		goto err_fib_entry_update;
7225 
7226 	if (list_empty(&old_nh_grp->fib_list))
7227 		mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
7228 
7229 	return 0;
7230 
7231 err_fib_entry_update:
7232 	mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
7233 					 fib_node->fib);
7234 err_nexthop_group_vr_link:
7235 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
7236 err_nexthop6_group_get:
7237 	list_add_tail(&fib6_entry->common.nexthop_group_node,
7238 		      &old_nh_grp->fib_list);
7239 	fib6_entry->common.nh_group = old_nh_grp;
7240 	mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib);
7241 	return err;
7242 }
7243 
7244 static int
mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry,struct fib6_info ** rt_arr,unsigned int nrt6)7245 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
7246 				struct mlxsw_sp_fib6_entry *fib6_entry,
7247 				struct fib6_info **rt_arr, unsigned int nrt6)
7248 {
7249 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7250 	int err, i;
7251 
7252 	for (i = 0; i < nrt6; i++) {
7253 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7254 		if (IS_ERR(mlxsw_sp_rt6)) {
7255 			err = PTR_ERR(mlxsw_sp_rt6);
7256 			goto err_rt6_unwind;
7257 		}
7258 
7259 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7260 		fib6_entry->nrt6++;
7261 	}
7262 
7263 	err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
7264 	if (err)
7265 		goto err_rt6_unwind;
7266 
7267 	return 0;
7268 
7269 err_rt6_unwind:
7270 	for (; i > 0; i--) {
7271 		fib6_entry->nrt6--;
7272 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7273 					       struct mlxsw_sp_rt6, list);
7274 		list_del(&mlxsw_sp_rt6->list);
7275 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7276 	}
7277 	return err;
7278 }
7279 
7280 static void
mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry,struct fib6_info ** rt_arr,unsigned int nrt6)7281 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
7282 				struct mlxsw_sp_fib6_entry *fib6_entry,
7283 				struct fib6_info **rt_arr, unsigned int nrt6)
7284 {
7285 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7286 	int i;
7287 
7288 	for (i = 0; i < nrt6; i++) {
7289 		mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
7290 							   rt_arr[i]);
7291 		if (WARN_ON_ONCE(!mlxsw_sp_rt6))
7292 			continue;
7293 
7294 		fib6_entry->nrt6--;
7295 		list_del(&mlxsw_sp_rt6->list);
7296 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7297 	}
7298 
7299 	mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
7300 }
7301 
7302 static int
mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,const struct fib6_info * rt)7303 mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
7304 				   struct mlxsw_sp_fib_entry *fib_entry,
7305 				   const struct fib6_info *rt)
7306 {
7307 	struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
7308 	union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
7309 	u32 tb_id = mlxsw_sp_fix_tb_id(rt->fib6_table->tb6_id);
7310 	struct mlxsw_sp_router *router = mlxsw_sp->router;
7311 	int ifindex = nhgi->nexthops[0].ifindex;
7312 	struct mlxsw_sp_ipip_entry *ipip_entry;
7313 
7314 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7315 	ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
7316 						       MLXSW_SP_L3_PROTO_IPV6,
7317 						       dip);
7318 
7319 	if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
7320 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
7321 		return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
7322 						     ipip_entry);
7323 	}
7324 	if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
7325 					 MLXSW_SP_L3_PROTO_IPV6, &dip)) {
7326 		u32 tunnel_index;
7327 
7328 		tunnel_index = router->nve_decap_config.tunnel_index;
7329 		fib_entry->decap.tunnel_index = tunnel_index;
7330 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
7331 	}
7332 
7333 	return 0;
7334 }
7335 
mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,const struct fib6_info * rt)7336 static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
7337 					struct mlxsw_sp_fib_entry *fib_entry,
7338 					const struct fib6_info *rt)
7339 {
7340 	if (rt->fib6_flags & RTF_LOCAL)
7341 		return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry,
7342 							  rt);
7343 	if (rt->fib6_flags & RTF_ANYCAST)
7344 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7345 	else if (rt->fib6_type == RTN_BLACKHOLE)
7346 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
7347 	else if (rt->fib6_flags & RTF_REJECT)
7348 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
7349 	else if (fib_entry->nh_group->nhgi->gateway)
7350 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
7351 	else
7352 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
7353 
7354 	return 0;
7355 }
7356 
7357 static void
mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry * fib6_entry)7358 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
7359 {
7360 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
7361 
7362 	list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
7363 				 list) {
7364 		fib6_entry->nrt6--;
7365 		list_del(&mlxsw_sp_rt6->list);
7366 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7367 	}
7368 }
7369 
7370 static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node,struct fib6_info ** rt_arr,unsigned int nrt6)7371 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
7372 			   struct mlxsw_sp_fib_node *fib_node,
7373 			   struct fib6_info **rt_arr, unsigned int nrt6)
7374 {
7375 	struct mlxsw_sp_fib6_entry *fib6_entry;
7376 	struct mlxsw_sp_fib_entry *fib_entry;
7377 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7378 	int err, i;
7379 
7380 	fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
7381 	if (!fib6_entry)
7382 		return ERR_PTR(-ENOMEM);
7383 	fib_entry = &fib6_entry->common;
7384 
7385 	INIT_LIST_HEAD(&fib6_entry->rt6_list);
7386 
7387 	for (i = 0; i < nrt6; i++) {
7388 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7389 		if (IS_ERR(mlxsw_sp_rt6)) {
7390 			err = PTR_ERR(mlxsw_sp_rt6);
7391 			goto err_rt6_unwind;
7392 		}
7393 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7394 		fib6_entry->nrt6++;
7395 	}
7396 
7397 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
7398 	if (err)
7399 		goto err_rt6_unwind;
7400 
7401 	err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
7402 					     fib_node->fib);
7403 	if (err)
7404 		goto err_nexthop_group_vr_link;
7405 
7406 	err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
7407 	if (err)
7408 		goto err_fib6_entry_type_set;
7409 
7410 	fib_entry->fib_node = fib_node;
7411 
7412 	return fib6_entry;
7413 
7414 err_fib6_entry_type_set:
7415 	mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
7416 err_nexthop_group_vr_link:
7417 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
7418 err_rt6_unwind:
7419 	for (; i > 0; i--) {
7420 		fib6_entry->nrt6--;
7421 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7422 					       struct mlxsw_sp_rt6, list);
7423 		list_del(&mlxsw_sp_rt6->list);
7424 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7425 	}
7426 	kfree(fib6_entry);
7427 	return ERR_PTR(err);
7428 }
7429 
7430 static void
mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)7431 mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
7432 			       struct mlxsw_sp_fib6_entry *fib6_entry)
7433 {
7434 	mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common);
7435 }
7436 
mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)7437 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
7438 					struct mlxsw_sp_fib6_entry *fib6_entry)
7439 {
7440 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7441 
7442 	mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry);
7443 	mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
7444 					 fib_node->fib);
7445 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
7446 	mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
7447 	WARN_ON(fib6_entry->nrt6);
7448 	kfree(fib6_entry);
7449 }
7450 
7451 static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp * mlxsw_sp,const struct fib6_info * rt)7452 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
7453 			   const struct fib6_info *rt)
7454 {
7455 	struct mlxsw_sp_fib6_entry *fib6_entry;
7456 	struct mlxsw_sp_fib_node *fib_node;
7457 	struct mlxsw_sp_fib *fib;
7458 	struct fib6_info *cmp_rt;
7459 	struct mlxsw_sp_vr *vr;
7460 
7461 	vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
7462 	if (!vr)
7463 		return NULL;
7464 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
7465 
7466 	fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
7467 					    sizeof(rt->fib6_dst.addr),
7468 					    rt->fib6_dst.plen);
7469 	if (!fib_node)
7470 		return NULL;
7471 
7472 	fib6_entry = container_of(fib_node->fib_entry,
7473 				  struct mlxsw_sp_fib6_entry, common);
7474 	cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7475 	if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
7476 	    rt->fib6_metric == cmp_rt->fib6_metric &&
7477 	    mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
7478 		return fib6_entry;
7479 
7480 	return NULL;
7481 }
7482 
mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry * fib6_entry)7483 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
7484 {
7485 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7486 	struct mlxsw_sp_fib6_entry *fib6_replaced;
7487 	struct fib6_info *rt, *rt_replaced;
7488 
7489 	if (!fib_node->fib_entry)
7490 		return true;
7491 
7492 	fib6_replaced = container_of(fib_node->fib_entry,
7493 				     struct mlxsw_sp_fib6_entry,
7494 				     common);
7495 	rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7496 	rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
7497 	if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
7498 	    rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
7499 		return false;
7500 
7501 	return true;
7502 }
7503 
mlxsw_sp_router_fib6_replace(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)7504 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
7505 					struct fib6_info **rt_arr,
7506 					unsigned int nrt6)
7507 {
7508 	struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
7509 	struct mlxsw_sp_fib_entry *replaced;
7510 	struct mlxsw_sp_fib_node *fib_node;
7511 	struct fib6_info *rt = rt_arr[0];
7512 	int err;
7513 
7514 	if (rt->fib6_src.plen)
7515 		return -EINVAL;
7516 
7517 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7518 		return 0;
7519 
7520 	if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id))
7521 		return 0;
7522 
7523 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7524 					 &rt->fib6_dst.addr,
7525 					 sizeof(rt->fib6_dst.addr),
7526 					 rt->fib6_dst.plen,
7527 					 MLXSW_SP_L3_PROTO_IPV6);
7528 	if (IS_ERR(fib_node))
7529 		return PTR_ERR(fib_node);
7530 
7531 	fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
7532 						nrt6);
7533 	if (IS_ERR(fib6_entry)) {
7534 		err = PTR_ERR(fib6_entry);
7535 		goto err_fib6_entry_create;
7536 	}
7537 
7538 	if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
7539 		mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7540 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7541 		return 0;
7542 	}
7543 
7544 	replaced = fib_node->fib_entry;
7545 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
7546 	if (err)
7547 		goto err_fib_node_entry_link;
7548 
7549 	/* Nothing to replace */
7550 	if (!replaced)
7551 		return 0;
7552 
7553 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
7554 	fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
7555 				     common);
7556 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
7557 
7558 	return 0;
7559 
7560 err_fib_node_entry_link:
7561 	fib_node->fib_entry = replaced;
7562 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7563 err_fib6_entry_create:
7564 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7565 	return err;
7566 }
7567 
mlxsw_sp_router_fib6_append(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)7568 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
7569 				       struct fib6_info **rt_arr,
7570 				       unsigned int nrt6)
7571 {
7572 	struct mlxsw_sp_fib6_entry *fib6_entry;
7573 	struct mlxsw_sp_fib_node *fib_node;
7574 	struct fib6_info *rt = rt_arr[0];
7575 	int err;
7576 
7577 	if (rt->fib6_src.plen)
7578 		return -EINVAL;
7579 
7580 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7581 		return 0;
7582 
7583 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7584 					 &rt->fib6_dst.addr,
7585 					 sizeof(rt->fib6_dst.addr),
7586 					 rt->fib6_dst.plen,
7587 					 MLXSW_SP_L3_PROTO_IPV6);
7588 	if (IS_ERR(fib_node))
7589 		return PTR_ERR(fib_node);
7590 
7591 	if (WARN_ON_ONCE(!fib_node->fib_entry)) {
7592 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7593 		return -EINVAL;
7594 	}
7595 
7596 	fib6_entry = container_of(fib_node->fib_entry,
7597 				  struct mlxsw_sp_fib6_entry, common);
7598 	err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
7599 					      nrt6);
7600 	if (err)
7601 		goto err_fib6_entry_nexthop_add;
7602 
7603 	return 0;
7604 
7605 err_fib6_entry_nexthop_add:
7606 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7607 	return err;
7608 }
7609 
mlxsw_sp_router_fib6_del(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)7610 static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
7611 				     struct fib6_info **rt_arr,
7612 				     unsigned int nrt6)
7613 {
7614 	struct mlxsw_sp_fib6_entry *fib6_entry;
7615 	struct mlxsw_sp_fib_node *fib_node;
7616 	struct fib6_info *rt = rt_arr[0];
7617 
7618 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7619 		return;
7620 
7621 	/* Multipath routes are first added to the FIB trie and only then
7622 	 * notified. If we vetoed the addition, we will get a delete
7623 	 * notification for a route we do not have. Therefore, do not warn if
7624 	 * route was not found.
7625 	 */
7626 	fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
7627 	if (!fib6_entry)
7628 		return;
7629 
7630 	/* If not all the nexthops are deleted, then only reduce the nexthop
7631 	 * group.
7632 	 */
7633 	if (nrt6 != fib6_entry->nrt6) {
7634 		mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
7635 						nrt6);
7636 		return;
7637 	}
7638 
7639 	fib_node = fib6_entry->common.fib_node;
7640 
7641 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
7642 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7643 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7644 }
7645 
7646 static struct mlxsw_sp_mr_table *
mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr * vr,int family)7647 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
7648 {
7649 	if (family == RTNL_FAMILY_IPMR)
7650 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
7651 	else
7652 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
7653 }
7654 
mlxsw_sp_router_fibmr_add(struct mlxsw_sp * mlxsw_sp,struct mfc_entry_notifier_info * men_info,bool replace)7655 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
7656 				     struct mfc_entry_notifier_info *men_info,
7657 				     bool replace)
7658 {
7659 	struct mlxsw_sp_mr_table *mrt;
7660 	struct mlxsw_sp_vr *vr;
7661 
7662 	vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
7663 	if (IS_ERR(vr))
7664 		return PTR_ERR(vr);
7665 
7666 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7667 	return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
7668 }
7669 
mlxsw_sp_router_fibmr_del(struct mlxsw_sp * mlxsw_sp,struct mfc_entry_notifier_info * men_info)7670 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
7671 				      struct mfc_entry_notifier_info *men_info)
7672 {
7673 	struct mlxsw_sp_mr_table *mrt;
7674 	struct mlxsw_sp_vr *vr;
7675 
7676 	vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
7677 	if (WARN_ON(!vr))
7678 		return;
7679 
7680 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7681 	mlxsw_sp_mr_route_del(mrt, men_info->mfc);
7682 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7683 }
7684 
7685 static int
mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp * mlxsw_sp,struct vif_entry_notifier_info * ven_info)7686 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
7687 			      struct vif_entry_notifier_info *ven_info)
7688 {
7689 	struct mlxsw_sp_mr_table *mrt;
7690 	struct mlxsw_sp_rif *rif;
7691 	struct mlxsw_sp_vr *vr;
7692 
7693 	vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
7694 	if (IS_ERR(vr))
7695 		return PTR_ERR(vr);
7696 
7697 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7698 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
7699 	return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
7700 				   ven_info->vif_index,
7701 				   ven_info->vif_flags, rif);
7702 }
7703 
7704 static void
mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp * mlxsw_sp,struct vif_entry_notifier_info * ven_info)7705 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
7706 			      struct vif_entry_notifier_info *ven_info)
7707 {
7708 	struct mlxsw_sp_mr_table *mrt;
7709 	struct mlxsw_sp_vr *vr;
7710 
7711 	vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
7712 	if (WARN_ON(!vr))
7713 		return;
7714 
7715 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7716 	mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
7717 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7718 }
7719 
mlxsw_sp_fib4_node_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)7720 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
7721 				     struct mlxsw_sp_fib_node *fib_node)
7722 {
7723 	struct mlxsw_sp_fib4_entry *fib4_entry;
7724 
7725 	fib4_entry = container_of(fib_node->fib_entry,
7726 				  struct mlxsw_sp_fib4_entry, common);
7727 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7728 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
7729 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7730 }
7731 
mlxsw_sp_fib6_node_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)7732 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
7733 				     struct mlxsw_sp_fib_node *fib_node)
7734 {
7735 	struct mlxsw_sp_fib6_entry *fib6_entry;
7736 
7737 	fib6_entry = container_of(fib_node->fib_entry,
7738 				  struct mlxsw_sp_fib6_entry, common);
7739 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7740 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7741 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7742 }
7743 
mlxsw_sp_fib_node_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)7744 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
7745 				    struct mlxsw_sp_fib_node *fib_node)
7746 {
7747 	switch (fib_node->fib->proto) {
7748 	case MLXSW_SP_L3_PROTO_IPV4:
7749 		mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
7750 		break;
7751 	case MLXSW_SP_L3_PROTO_IPV6:
7752 		mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
7753 		break;
7754 	}
7755 }
7756 
mlxsw_sp_vr_fib_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto)7757 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
7758 				  struct mlxsw_sp_vr *vr,
7759 				  enum mlxsw_sp_l3proto proto)
7760 {
7761 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
7762 	struct mlxsw_sp_fib_node *fib_node, *tmp;
7763 
7764 	list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
7765 		bool do_break = &tmp->list == &fib->node_list;
7766 
7767 		mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
7768 		if (do_break)
7769 			break;
7770 	}
7771 }
7772 
mlxsw_sp_router_fib_flush(struct mlxsw_sp * mlxsw_sp)7773 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
7774 {
7775 	int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
7776 	int i, j;
7777 
7778 	for (i = 0; i < max_vrs; i++) {
7779 		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
7780 
7781 		if (!mlxsw_sp_vr_is_used(vr))
7782 			continue;
7783 
7784 		for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
7785 			mlxsw_sp_mr_table_flush(vr->mr_table[j]);
7786 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
7787 
7788 		/* If virtual router was only used for IPv4, then it's no
7789 		 * longer used.
7790 		 */
7791 		if (!mlxsw_sp_vr_is_used(vr))
7792 			continue;
7793 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
7794 	}
7795 }
7796 
7797 struct mlxsw_sp_fib6_event_work {
7798 	struct fib6_info **rt_arr;
7799 	unsigned int nrt6;
7800 };
7801 
7802 struct mlxsw_sp_fib_event_work {
7803 	struct work_struct work;
7804 	netdevice_tracker dev_tracker;
7805 	union {
7806 		struct mlxsw_sp_fib6_event_work fib6_work;
7807 		struct fib_entry_notifier_info fen_info;
7808 		struct fib_rule_notifier_info fr_info;
7809 		struct fib_nh_notifier_info fnh_info;
7810 		struct mfc_entry_notifier_info men_info;
7811 		struct vif_entry_notifier_info ven_info;
7812 	};
7813 	struct mlxsw_sp *mlxsw_sp;
7814 	unsigned long event;
7815 };
7816 
7817 static int
mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work * fib6_work,struct fib6_entry_notifier_info * fen6_info)7818 mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
7819 			       struct fib6_entry_notifier_info *fen6_info)
7820 {
7821 	struct fib6_info *rt = fen6_info->rt;
7822 	struct fib6_info **rt_arr;
7823 	struct fib6_info *iter;
7824 	unsigned int nrt6;
7825 	int i = 0;
7826 
7827 	nrt6 = fen6_info->nsiblings + 1;
7828 
7829 	rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
7830 	if (!rt_arr)
7831 		return -ENOMEM;
7832 
7833 	fib6_work->rt_arr = rt_arr;
7834 	fib6_work->nrt6 = nrt6;
7835 
7836 	rt_arr[0] = rt;
7837 	fib6_info_hold(rt);
7838 
7839 	if (!fen6_info->nsiblings)
7840 		return 0;
7841 
7842 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
7843 		if (i == fen6_info->nsiblings)
7844 			break;
7845 
7846 		rt_arr[i + 1] = iter;
7847 		fib6_info_hold(iter);
7848 		i++;
7849 	}
7850 	WARN_ON_ONCE(i != fen6_info->nsiblings);
7851 
7852 	return 0;
7853 }
7854 
7855 static void
mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work * fib6_work)7856 mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
7857 {
7858 	int i;
7859 
7860 	for (i = 0; i < fib6_work->nrt6; i++)
7861 		mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
7862 	kfree(fib6_work->rt_arr);
7863 }
7864 
mlxsw_sp_router_fib4_event_work(struct work_struct * work)7865 static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
7866 {
7867 	struct mlxsw_sp_fib_event_work *fib_work =
7868 		container_of(work, struct mlxsw_sp_fib_event_work, work);
7869 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7870 	int err;
7871 
7872 	mutex_lock(&mlxsw_sp->router->lock);
7873 	mlxsw_sp_span_respin(mlxsw_sp);
7874 
7875 	switch (fib_work->event) {
7876 	case FIB_EVENT_ENTRY_REPLACE:
7877 		err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
7878 						   &fib_work->fen_info);
7879 		if (err) {
7880 			dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7881 			mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
7882 							      &fib_work->fen_info);
7883 		}
7884 		fib_info_put(fib_work->fen_info.fi);
7885 		break;
7886 	case FIB_EVENT_ENTRY_DEL:
7887 		mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
7888 		fib_info_put(fib_work->fen_info.fi);
7889 		break;
7890 	case FIB_EVENT_NH_ADD:
7891 	case FIB_EVENT_NH_DEL:
7892 		mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
7893 					fib_work->fnh_info.fib_nh);
7894 		fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
7895 		break;
7896 	}
7897 	mutex_unlock(&mlxsw_sp->router->lock);
7898 	kfree(fib_work);
7899 }
7900 
mlxsw_sp_router_fib6_event_work(struct work_struct * work)7901 static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
7902 {
7903 	struct mlxsw_sp_fib_event_work *fib_work =
7904 		    container_of(work, struct mlxsw_sp_fib_event_work, work);
7905 	struct mlxsw_sp_fib6_event_work *fib6_work = &fib_work->fib6_work;
7906 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7907 	int err;
7908 
7909 	mutex_lock(&mlxsw_sp->router->lock);
7910 	mlxsw_sp_span_respin(mlxsw_sp);
7911 
7912 	switch (fib_work->event) {
7913 	case FIB_EVENT_ENTRY_REPLACE:
7914 		err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
7915 						   fib6_work->rt_arr,
7916 						   fib6_work->nrt6);
7917 		if (err) {
7918 			dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7919 			mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7920 							      fib6_work->rt_arr,
7921 							      fib6_work->nrt6);
7922 		}
7923 		mlxsw_sp_router_fib6_work_fini(fib6_work);
7924 		break;
7925 	case FIB_EVENT_ENTRY_APPEND:
7926 		err = mlxsw_sp_router_fib6_append(mlxsw_sp,
7927 						  fib6_work->rt_arr,
7928 						  fib6_work->nrt6);
7929 		if (err) {
7930 			dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
7931 			mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7932 							      fib6_work->rt_arr,
7933 							      fib6_work->nrt6);
7934 		}
7935 		mlxsw_sp_router_fib6_work_fini(fib6_work);
7936 		break;
7937 	case FIB_EVENT_ENTRY_DEL:
7938 		mlxsw_sp_router_fib6_del(mlxsw_sp,
7939 					 fib6_work->rt_arr,
7940 					 fib6_work->nrt6);
7941 		mlxsw_sp_router_fib6_work_fini(fib6_work);
7942 		break;
7943 	}
7944 	mutex_unlock(&mlxsw_sp->router->lock);
7945 	kfree(fib_work);
7946 }
7947 
mlxsw_sp_router_fibmr_event_work(struct work_struct * work)7948 static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
7949 {
7950 	struct mlxsw_sp_fib_event_work *fib_work =
7951 		container_of(work, struct mlxsw_sp_fib_event_work, work);
7952 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7953 	bool replace;
7954 	int err;
7955 
7956 	rtnl_lock();
7957 	mutex_lock(&mlxsw_sp->router->lock);
7958 	switch (fib_work->event) {
7959 	case FIB_EVENT_ENTRY_REPLACE:
7960 	case FIB_EVENT_ENTRY_ADD:
7961 		replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
7962 
7963 		err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
7964 						replace);
7965 		if (err)
7966 			dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
7967 		mr_cache_put(fib_work->men_info.mfc);
7968 		break;
7969 	case FIB_EVENT_ENTRY_DEL:
7970 		mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
7971 		mr_cache_put(fib_work->men_info.mfc);
7972 		break;
7973 	case FIB_EVENT_VIF_ADD:
7974 		err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
7975 						    &fib_work->ven_info);
7976 		if (err)
7977 			dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
7978 		netdev_put(fib_work->ven_info.dev, &fib_work->dev_tracker);
7979 		break;
7980 	case FIB_EVENT_VIF_DEL:
7981 		mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
7982 					      &fib_work->ven_info);
7983 		netdev_put(fib_work->ven_info.dev, &fib_work->dev_tracker);
7984 		break;
7985 	}
7986 	mutex_unlock(&mlxsw_sp->router->lock);
7987 	rtnl_unlock();
7988 	kfree(fib_work);
7989 }
7990 
mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work * fib_work,struct fib_notifier_info * info)7991 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
7992 				       struct fib_notifier_info *info)
7993 {
7994 	struct fib_entry_notifier_info *fen_info;
7995 	struct fib_nh_notifier_info *fnh_info;
7996 
7997 	switch (fib_work->event) {
7998 	case FIB_EVENT_ENTRY_REPLACE:
7999 	case FIB_EVENT_ENTRY_DEL:
8000 		fen_info = container_of(info, struct fib_entry_notifier_info,
8001 					info);
8002 		fib_work->fen_info = *fen_info;
8003 		/* Take reference on fib_info to prevent it from being
8004 		 * freed while work is queued. Release it afterwards.
8005 		 */
8006 		fib_info_hold(fib_work->fen_info.fi);
8007 		break;
8008 	case FIB_EVENT_NH_ADD:
8009 	case FIB_EVENT_NH_DEL:
8010 		fnh_info = container_of(info, struct fib_nh_notifier_info,
8011 					info);
8012 		fib_work->fnh_info = *fnh_info;
8013 		fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
8014 		break;
8015 	}
8016 }
8017 
mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work * fib_work,struct fib_notifier_info * info)8018 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
8019 				      struct fib_notifier_info *info)
8020 {
8021 	struct fib6_entry_notifier_info *fen6_info;
8022 	int err;
8023 
8024 	switch (fib_work->event) {
8025 	case FIB_EVENT_ENTRY_REPLACE:
8026 	case FIB_EVENT_ENTRY_APPEND:
8027 	case FIB_EVENT_ENTRY_DEL:
8028 		fen6_info = container_of(info, struct fib6_entry_notifier_info,
8029 					 info);
8030 		err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
8031 						     fen6_info);
8032 		if (err)
8033 			return err;
8034 		break;
8035 	}
8036 
8037 	return 0;
8038 }
8039 
8040 static void
mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work * fib_work,struct fib_notifier_info * info)8041 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
8042 			    struct fib_notifier_info *info)
8043 {
8044 	switch (fib_work->event) {
8045 	case FIB_EVENT_ENTRY_REPLACE:
8046 	case FIB_EVENT_ENTRY_ADD:
8047 	case FIB_EVENT_ENTRY_DEL:
8048 		memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
8049 		mr_cache_hold(fib_work->men_info.mfc);
8050 		break;
8051 	case FIB_EVENT_VIF_ADD:
8052 	case FIB_EVENT_VIF_DEL:
8053 		memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
8054 		netdev_hold(fib_work->ven_info.dev, &fib_work->dev_tracker,
8055 			    GFP_ATOMIC);
8056 		break;
8057 	}
8058 }
8059 
mlxsw_sp_router_fib_rule_event(unsigned long event,struct fib_notifier_info * info,struct mlxsw_sp * mlxsw_sp)8060 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
8061 					  struct fib_notifier_info *info,
8062 					  struct mlxsw_sp *mlxsw_sp)
8063 {
8064 	struct netlink_ext_ack *extack = info->extack;
8065 	struct fib_rule_notifier_info *fr_info;
8066 	struct fib_rule *rule;
8067 	int err = 0;
8068 
8069 	/* nothing to do at the moment */
8070 	if (event == FIB_EVENT_RULE_DEL)
8071 		return 0;
8072 
8073 	fr_info = container_of(info, struct fib_rule_notifier_info, info);
8074 	rule = fr_info->rule;
8075 
8076 	/* Rule only affects locally generated traffic */
8077 	if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
8078 		return 0;
8079 
8080 	switch (info->family) {
8081 	case AF_INET:
8082 		if (!fib4_rule_default(rule) && !rule->l3mdev)
8083 			err = -EOPNOTSUPP;
8084 		break;
8085 	case AF_INET6:
8086 		if (!fib6_rule_default(rule) && !rule->l3mdev)
8087 			err = -EOPNOTSUPP;
8088 		break;
8089 	case RTNL_FAMILY_IPMR:
8090 		if (!ipmr_rule_default(rule) && !rule->l3mdev)
8091 			err = -EOPNOTSUPP;
8092 		break;
8093 	case RTNL_FAMILY_IP6MR:
8094 		if (!ip6mr_rule_default(rule) && !rule->l3mdev)
8095 			err = -EOPNOTSUPP;
8096 		break;
8097 	}
8098 
8099 	if (err < 0)
8100 		NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
8101 
8102 	return err;
8103 }
8104 
8105 /* Called with rcu_read_lock() */
mlxsw_sp_router_fib_event(struct notifier_block * nb,unsigned long event,void * ptr)8106 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
8107 				     unsigned long event, void *ptr)
8108 {
8109 	struct mlxsw_sp_fib_event_work *fib_work;
8110 	struct fib_notifier_info *info = ptr;
8111 	struct mlxsw_sp_router *router;
8112 	int err;
8113 
8114 	if ((info->family != AF_INET && info->family != AF_INET6 &&
8115 	     info->family != RTNL_FAMILY_IPMR &&
8116 	     info->family != RTNL_FAMILY_IP6MR))
8117 		return NOTIFY_DONE;
8118 
8119 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
8120 
8121 	switch (event) {
8122 	case FIB_EVENT_RULE_ADD:
8123 	case FIB_EVENT_RULE_DEL:
8124 		err = mlxsw_sp_router_fib_rule_event(event, info,
8125 						     router->mlxsw_sp);
8126 		return notifier_from_errno(err);
8127 	case FIB_EVENT_ENTRY_ADD:
8128 	case FIB_EVENT_ENTRY_REPLACE:
8129 	case FIB_EVENT_ENTRY_APPEND:
8130 		if (info->family == AF_INET) {
8131 			struct fib_entry_notifier_info *fen_info = ptr;
8132 
8133 			if (fen_info->fi->fib_nh_is_v6) {
8134 				NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
8135 				return notifier_from_errno(-EINVAL);
8136 			}
8137 		}
8138 		break;
8139 	}
8140 
8141 	fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
8142 	if (!fib_work)
8143 		return NOTIFY_BAD;
8144 
8145 	fib_work->mlxsw_sp = router->mlxsw_sp;
8146 	fib_work->event = event;
8147 
8148 	switch (info->family) {
8149 	case AF_INET:
8150 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
8151 		mlxsw_sp_router_fib4_event(fib_work, info);
8152 		break;
8153 	case AF_INET6:
8154 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
8155 		err = mlxsw_sp_router_fib6_event(fib_work, info);
8156 		if (err)
8157 			goto err_fib_event;
8158 		break;
8159 	case RTNL_FAMILY_IP6MR:
8160 	case RTNL_FAMILY_IPMR:
8161 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
8162 		mlxsw_sp_router_fibmr_event(fib_work, info);
8163 		break;
8164 	}
8165 
8166 	mlxsw_core_schedule_work(&fib_work->work);
8167 
8168 	return NOTIFY_DONE;
8169 
8170 err_fib_event:
8171 	kfree(fib_work);
8172 	return NOTIFY_BAD;
8173 }
8174 
8175 static struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)8176 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
8177 			 const struct net_device *dev)
8178 {
8179 	int max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
8180 	int i;
8181 
8182 	for (i = 0; i < max_rifs; i++)
8183 		if (mlxsw_sp->router->rifs[i] &&
8184 		    mlxsw_sp_rif_dev_is(mlxsw_sp->router->rifs[i], dev))
8185 			return mlxsw_sp->router->rifs[i];
8186 
8187 	return NULL;
8188 }
8189 
mlxsw_sp_rif_exists(struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)8190 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
8191 			 const struct net_device *dev)
8192 {
8193 	struct mlxsw_sp_rif *rif;
8194 
8195 	mutex_lock(&mlxsw_sp->router->lock);
8196 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8197 	mutex_unlock(&mlxsw_sp->router->lock);
8198 
8199 	return rif;
8200 }
8201 
mlxsw_sp_rif_vid(struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)8202 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
8203 {
8204 	struct mlxsw_sp_rif *rif;
8205 	u16 vid = 0;
8206 
8207 	mutex_lock(&mlxsw_sp->router->lock);
8208 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8209 	if (!rif)
8210 		goto out;
8211 
8212 	/* We only return the VID for VLAN RIFs. Otherwise we return an
8213 	 * invalid value (0).
8214 	 */
8215 	if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
8216 		goto out;
8217 
8218 	vid = mlxsw_sp_fid_8021q_vid(rif->fid);
8219 
8220 out:
8221 	mutex_unlock(&mlxsw_sp->router->lock);
8222 	return vid;
8223 }
8224 
mlxsw_sp_router_rif_disable(struct mlxsw_sp * mlxsw_sp,u16 rif)8225 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
8226 {
8227 	char ritr_pl[MLXSW_REG_RITR_LEN];
8228 	int err;
8229 
8230 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
8231 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8232 	if (err)
8233 		return err;
8234 
8235 	mlxsw_reg_ritr_enable_set(ritr_pl, false);
8236 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8237 }
8238 
mlxsw_sp_router_rif_made_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)8239 static int mlxsw_sp_router_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
8240 					 struct mlxsw_sp_rif *rif)
8241 {
8242 	int err;
8243 
8244 	err = mlxsw_sp_neigh_rif_made_sync(mlxsw_sp, rif);
8245 	if (err)
8246 		return err;
8247 
8248 	err = mlxsw_sp_nexthop_rif_made_sync(mlxsw_sp, rif);
8249 	if (err)
8250 		goto err_nexthop;
8251 
8252 	return 0;
8253 
8254 err_nexthop:
8255 	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
8256 	return err;
8257 }
8258 
mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)8259 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
8260 					  struct mlxsw_sp_rif *rif)
8261 {
8262 	/* Signal to nexthop cleanup that the RIF is going away. */
8263 	rif->crif->rif = NULL;
8264 
8265 	mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
8266 	mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
8267 	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
8268 }
8269 
__mlxsw_sp_dev_addr_list_empty(const struct net_device * dev)8270 static bool __mlxsw_sp_dev_addr_list_empty(const struct net_device *dev)
8271 {
8272 	struct inet6_dev *inet6_dev;
8273 	struct in_device *idev;
8274 
8275 	idev = __in_dev_get_rcu(dev);
8276 	if (idev && idev->ifa_list)
8277 		return false;
8278 
8279 	inet6_dev = __in6_dev_get(dev);
8280 	if (inet6_dev && !list_empty(&inet6_dev->addr_list))
8281 		return false;
8282 
8283 	return true;
8284 }
8285 
mlxsw_sp_dev_addr_list_empty(const struct net_device * dev)8286 static bool mlxsw_sp_dev_addr_list_empty(const struct net_device *dev)
8287 {
8288 	bool addr_list_empty;
8289 
8290 	rcu_read_lock();
8291 	addr_list_empty = __mlxsw_sp_dev_addr_list_empty(dev);
8292 	rcu_read_unlock();
8293 
8294 	return addr_list_empty;
8295 }
8296 
8297 static bool
mlxsw_sp_rif_should_config(struct mlxsw_sp_rif * rif,struct net_device * dev,unsigned long event)8298 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
8299 			   unsigned long event)
8300 {
8301 	bool addr_list_empty;
8302 
8303 	switch (event) {
8304 	case NETDEV_UP:
8305 		return rif == NULL;
8306 	case NETDEV_DOWN:
8307 		addr_list_empty = mlxsw_sp_dev_addr_list_empty(dev);
8308 
8309 		/* macvlans do not have a RIF, but rather piggy back on the
8310 		 * RIF of their lower device.
8311 		 */
8312 		if (netif_is_macvlan(dev) && addr_list_empty)
8313 			return true;
8314 
8315 		if (rif && addr_list_empty &&
8316 		    !netif_is_l3_slave(mlxsw_sp_rif_dev(rif)))
8317 			return true;
8318 		/* It is possible we already removed the RIF ourselves
8319 		 * if it was assigned to a netdev that is now a bridge
8320 		 * or LAG slave.
8321 		 */
8322 		return false;
8323 	}
8324 
8325 	return false;
8326 }
8327 
8328 static enum mlxsw_sp_rif_type
mlxsw_sp_dev_rif_type(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)8329 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
8330 		      const struct net_device *dev)
8331 {
8332 	enum mlxsw_sp_fid_type type;
8333 
8334 	if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
8335 		return MLXSW_SP_RIF_TYPE_IPIP_LB;
8336 
8337 	/* Otherwise RIF type is derived from the type of the underlying FID. */
8338 	if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
8339 		type = MLXSW_SP_FID_TYPE_8021Q;
8340 	else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
8341 		type = MLXSW_SP_FID_TYPE_8021Q;
8342 	else if (netif_is_bridge_master(dev))
8343 		type = MLXSW_SP_FID_TYPE_8021D;
8344 	else
8345 		type = MLXSW_SP_FID_TYPE_RFID;
8346 
8347 	return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
8348 }
8349 
mlxsw_sp_rif_index_alloc(struct mlxsw_sp * mlxsw_sp,u16 * p_rif_index,u8 rif_entries)8350 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index,
8351 				    u8 rif_entries)
8352 {
8353 	*p_rif_index = gen_pool_alloc(mlxsw_sp->router->rifs_table,
8354 				      rif_entries);
8355 	if (*p_rif_index == 0)
8356 		return -ENOBUFS;
8357 	*p_rif_index -= MLXSW_SP_ROUTER_GENALLOC_OFFSET;
8358 
8359 	/* RIF indexes must be aligned to the allocation size. */
8360 	WARN_ON_ONCE(*p_rif_index % rif_entries);
8361 
8362 	return 0;
8363 }
8364 
mlxsw_sp_rif_index_free(struct mlxsw_sp * mlxsw_sp,u16 rif_index,u8 rif_entries)8365 static void mlxsw_sp_rif_index_free(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
8366 				    u8 rif_entries)
8367 {
8368 	gen_pool_free(mlxsw_sp->router->rifs_table,
8369 		      MLXSW_SP_ROUTER_GENALLOC_OFFSET + rif_index, rif_entries);
8370 }
8371 
mlxsw_sp_rif_alloc(size_t rif_size,u16 rif_index,u16 vr_id,struct mlxsw_sp_crif * crif)8372 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
8373 					       u16 vr_id,
8374 					       struct mlxsw_sp_crif *crif)
8375 {
8376 	struct net_device *l3_dev = crif ? crif->key.dev : NULL;
8377 	struct mlxsw_sp_rif *rif;
8378 
8379 	rif = kzalloc(rif_size, GFP_KERNEL);
8380 	if (!rif)
8381 		return NULL;
8382 
8383 	INIT_LIST_HEAD(&rif->neigh_list);
8384 	if (l3_dev) {
8385 		ether_addr_copy(rif->addr, l3_dev->dev_addr);
8386 		rif->mtu = l3_dev->mtu;
8387 	}
8388 	rif->vr_id = vr_id;
8389 	rif->rif_index = rif_index;
8390 	if (crif) {
8391 		rif->crif = crif;
8392 		crif->rif = rif;
8393 	}
8394 
8395 	return rif;
8396 }
8397 
mlxsw_sp_rif_free(struct mlxsw_sp_rif * rif)8398 static void mlxsw_sp_rif_free(struct mlxsw_sp_rif *rif)
8399 {
8400 	WARN_ON(!list_empty(&rif->neigh_list));
8401 
8402 	if (rif->crif)
8403 		rif->crif->rif = NULL;
8404 	kfree(rif);
8405 }
8406 
mlxsw_sp_rif_by_index(const struct mlxsw_sp * mlxsw_sp,u16 rif_index)8407 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
8408 					   u16 rif_index)
8409 {
8410 	return mlxsw_sp->router->rifs[rif_index];
8411 }
8412 
mlxsw_sp_rif_index(const struct mlxsw_sp_rif * rif)8413 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
8414 {
8415 	return rif->rif_index;
8416 }
8417 
mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb * lb_rif)8418 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8419 {
8420 	return lb_rif->common.rif_index;
8421 }
8422 
mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb * lb_rif)8423 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8424 {
8425 	struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common);
8426 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
8427 	struct mlxsw_sp_vr *ul_vr;
8428 
8429 	ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
8430 	if (WARN_ON(IS_ERR(ul_vr)))
8431 		return 0;
8432 
8433 	return ul_vr->id;
8434 }
8435 
mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb * lb_rif)8436 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8437 {
8438 	return lb_rif->ul_rif_id;
8439 }
8440 
8441 static bool
mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif * rif)8442 mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif *rif)
8443 {
8444 	return mlxsw_sp_rif_counter_valid_get(rif,
8445 					      MLXSW_SP_RIF_COUNTER_EGRESS) &&
8446 	       mlxsw_sp_rif_counter_valid_get(rif,
8447 					      MLXSW_SP_RIF_COUNTER_INGRESS);
8448 }
8449 
8450 static int
mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif * rif)8451 mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif *rif)
8452 {
8453 	int err;
8454 
8455 	err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8456 	if (err)
8457 		return err;
8458 
8459 	/* Clear stale data. */
8460 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
8461 					       MLXSW_SP_RIF_COUNTER_INGRESS,
8462 					       NULL);
8463 	if (err)
8464 		goto err_clear_ingress;
8465 
8466 	err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8467 	if (err)
8468 		goto err_alloc_egress;
8469 
8470 	/* Clear stale data. */
8471 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
8472 					       MLXSW_SP_RIF_COUNTER_EGRESS,
8473 					       NULL);
8474 	if (err)
8475 		goto err_clear_egress;
8476 
8477 	return 0;
8478 
8479 err_clear_egress:
8480 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8481 err_alloc_egress:
8482 err_clear_ingress:
8483 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8484 	return err;
8485 }
8486 
8487 static void
mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif * rif)8488 mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif *rif)
8489 {
8490 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8491 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8492 }
8493 
8494 static void
mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif * rif,struct netdev_notifier_offload_xstats_info * info)8495 mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif *rif,
8496 					  struct netdev_notifier_offload_xstats_info *info)
8497 {
8498 	if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8499 		return;
8500 	netdev_offload_xstats_report_used(info->report_used);
8501 }
8502 
8503 static int
mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif * rif,struct rtnl_hw_stats64 * p_stats)8504 mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif *rif,
8505 				    struct rtnl_hw_stats64 *p_stats)
8506 {
8507 	struct mlxsw_sp_rif_counter_set_basic ingress;
8508 	struct mlxsw_sp_rif_counter_set_basic egress;
8509 	int err;
8510 
8511 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
8512 					       MLXSW_SP_RIF_COUNTER_INGRESS,
8513 					       &ingress);
8514 	if (err)
8515 		return err;
8516 
8517 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
8518 					       MLXSW_SP_RIF_COUNTER_EGRESS,
8519 					       &egress);
8520 	if (err)
8521 		return err;
8522 
8523 #define MLXSW_SP_ROUTER_ALL_GOOD(SET, SFX)		\
8524 		((SET.good_unicast_ ## SFX) +		\
8525 		 (SET.good_multicast_ ## SFX) +		\
8526 		 (SET.good_broadcast_ ## SFX))
8527 
8528 	p_stats->rx_packets = MLXSW_SP_ROUTER_ALL_GOOD(ingress, packets);
8529 	p_stats->tx_packets = MLXSW_SP_ROUTER_ALL_GOOD(egress, packets);
8530 	p_stats->rx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(ingress, bytes);
8531 	p_stats->tx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(egress, bytes);
8532 	p_stats->rx_errors = ingress.error_packets;
8533 	p_stats->tx_errors = egress.error_packets;
8534 	p_stats->rx_dropped = ingress.discard_packets;
8535 	p_stats->tx_dropped = egress.discard_packets;
8536 	p_stats->multicast = ingress.good_multicast_packets +
8537 			     ingress.good_broadcast_packets;
8538 
8539 #undef MLXSW_SP_ROUTER_ALL_GOOD
8540 
8541 	return 0;
8542 }
8543 
8544 static int
mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif * rif,struct netdev_notifier_offload_xstats_info * info)8545 mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif *rif,
8546 					   struct netdev_notifier_offload_xstats_info *info)
8547 {
8548 	struct rtnl_hw_stats64 stats = {};
8549 	int err;
8550 
8551 	if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8552 		return 0;
8553 
8554 	err = mlxsw_sp_router_port_l3_stats_fetch(rif, &stats);
8555 	if (err)
8556 		return err;
8557 
8558 	netdev_offload_xstats_report_delta(info->report_delta, &stats);
8559 	return 0;
8560 }
8561 
8562 struct mlxsw_sp_router_hwstats_notify_work {
8563 	struct work_struct work;
8564 	struct net_device *dev;
8565 	netdevice_tracker dev_tracker;
8566 };
8567 
mlxsw_sp_router_hwstats_notify_work(struct work_struct * work)8568 static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work)
8569 {
8570 	struct mlxsw_sp_router_hwstats_notify_work *hws_work =
8571 		container_of(work, struct mlxsw_sp_router_hwstats_notify_work,
8572 			     work);
8573 
8574 	rtnl_lock();
8575 	rtnl_offload_xstats_notify(hws_work->dev);
8576 	rtnl_unlock();
8577 	netdev_put(hws_work->dev, &hws_work->dev_tracker);
8578 	kfree(hws_work);
8579 }
8580 
8581 static void
mlxsw_sp_router_hwstats_notify_schedule(struct net_device * dev)8582 mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev)
8583 {
8584 	struct mlxsw_sp_router_hwstats_notify_work *hws_work;
8585 
8586 	/* To collect notification payload, the core ends up sending another
8587 	 * notifier block message, which would deadlock on the attempt to
8588 	 * acquire the router lock again. Just postpone the notification until
8589 	 * later.
8590 	 */
8591 
8592 	hws_work = kzalloc(sizeof(*hws_work), GFP_KERNEL);
8593 	if (!hws_work)
8594 		return;
8595 
8596 	INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work);
8597 	netdev_hold(dev, &hws_work->dev_tracker, GFP_KERNEL);
8598 	hws_work->dev = dev;
8599 	mlxsw_core_schedule_work(&hws_work->work);
8600 }
8601 
mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif * rif)8602 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
8603 {
8604 	return mlxsw_sp_rif_dev(rif)->ifindex;
8605 }
8606 
mlxsw_sp_rif_has_dev(const struct mlxsw_sp_rif * rif)8607 bool mlxsw_sp_rif_has_dev(const struct mlxsw_sp_rif *rif)
8608 {
8609 	return !!mlxsw_sp_rif_dev(rif);
8610 }
8611 
mlxsw_sp_rif_dev_is(const struct mlxsw_sp_rif * rif,const struct net_device * dev)8612 bool mlxsw_sp_rif_dev_is(const struct mlxsw_sp_rif *rif,
8613 			 const struct net_device *dev)
8614 {
8615 	return mlxsw_sp_rif_dev(rif) == dev;
8616 }
8617 
mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif * rif)8618 static void mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif *rif)
8619 {
8620 	struct rtnl_hw_stats64 stats = {};
8621 
8622 	if (!mlxsw_sp_router_port_l3_stats_fetch(rif, &stats))
8623 		netdev_offload_xstats_push_delta(mlxsw_sp_rif_dev(rif),
8624 						 NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8625 						 &stats);
8626 }
8627 
8628 static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)8629 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
8630 		    const struct mlxsw_sp_rif_params *params,
8631 		    struct netlink_ext_ack *extack)
8632 {
8633 	u8 rif_entries = params->double_entry ? 2 : 1;
8634 	u32 tb_id = l3mdev_fib_table(params->dev);
8635 	const struct mlxsw_sp_rif_ops *ops;
8636 	struct mlxsw_sp_fid *fid = NULL;
8637 	enum mlxsw_sp_rif_type type;
8638 	struct mlxsw_sp_crif *crif;
8639 	struct mlxsw_sp_rif *rif;
8640 	struct mlxsw_sp_vr *vr;
8641 	u16 rif_index;
8642 	int i, err;
8643 
8644 	type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
8645 	ops = mlxsw_sp->router->rif_ops_arr[type];
8646 
8647 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
8648 	if (IS_ERR(vr))
8649 		return ERR_CAST(vr);
8650 	vr->rif_count++;
8651 
8652 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index, rif_entries);
8653 	if (err) {
8654 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
8655 		goto err_rif_index_alloc;
8656 	}
8657 
8658 	crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, params->dev);
8659 	if (WARN_ON(!crif)) {
8660 		err = -ENOENT;
8661 		goto err_crif_lookup;
8662 	}
8663 
8664 	rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, crif);
8665 	if (!rif) {
8666 		err = -ENOMEM;
8667 		goto err_rif_alloc;
8668 	}
8669 	netdev_hold(params->dev, &rif->dev_tracker, GFP_KERNEL);
8670 	mlxsw_sp->router->rifs[rif_index] = rif;
8671 	rif->mlxsw_sp = mlxsw_sp;
8672 	rif->ops = ops;
8673 	rif->rif_entries = rif_entries;
8674 
8675 	if (ops->setup)
8676 		ops->setup(rif, params);
8677 
8678 	if (ops->fid_get) {
8679 		fid = ops->fid_get(rif, params, extack);
8680 		if (IS_ERR(fid)) {
8681 			err = PTR_ERR(fid);
8682 			goto err_fid_get;
8683 		}
8684 		rif->fid = fid;
8685 	}
8686 
8687 	err = ops->configure(rif, extack);
8688 	if (err)
8689 		goto err_configure;
8690 
8691 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
8692 		err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
8693 		if (err)
8694 			goto err_mr_rif_add;
8695 	}
8696 
8697 	err = mlxsw_sp_router_rif_made_sync(mlxsw_sp, rif);
8698 	if (err)
8699 		goto err_rif_made_sync;
8700 
8701 	if (netdev_offload_xstats_enabled(params->dev,
8702 					  NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8703 		err = mlxsw_sp_router_port_l3_stats_enable(rif);
8704 		if (err)
8705 			goto err_stats_enable;
8706 		mlxsw_sp_router_hwstats_notify_schedule(params->dev);
8707 	} else {
8708 		mlxsw_sp_rif_counters_alloc(rif);
8709 	}
8710 
8711 	atomic_add(rif_entries, &mlxsw_sp->router->rifs_count);
8712 	return rif;
8713 
8714 err_stats_enable:
8715 	mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8716 err_rif_made_sync:
8717 err_mr_rif_add:
8718 	for (i--; i >= 0; i--)
8719 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8720 	ops->deconfigure(rif);
8721 err_configure:
8722 	if (fid)
8723 		mlxsw_sp_fid_put(fid);
8724 err_fid_get:
8725 	mlxsw_sp->router->rifs[rif_index] = NULL;
8726 	netdev_put(params->dev, &rif->dev_tracker);
8727 	mlxsw_sp_rif_free(rif);
8728 err_rif_alloc:
8729 err_crif_lookup:
8730 	mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
8731 err_rif_index_alloc:
8732 	vr->rif_count--;
8733 	mlxsw_sp_vr_put(mlxsw_sp, vr);
8734 	return ERR_PTR(err);
8735 }
8736 
mlxsw_sp_rif_destroy(struct mlxsw_sp_rif * rif)8737 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
8738 {
8739 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
8740 	const struct mlxsw_sp_rif_ops *ops = rif->ops;
8741 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8742 	struct mlxsw_sp_crif *crif = rif->crif;
8743 	struct mlxsw_sp_fid *fid = rif->fid;
8744 	u8 rif_entries = rif->rif_entries;
8745 	u16 rif_index = rif->rif_index;
8746 	struct mlxsw_sp_vr *vr;
8747 	int i;
8748 
8749 	atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
8750 	mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8751 	vr = &mlxsw_sp->router->vrs[rif->vr_id];
8752 
8753 	if (netdev_offload_xstats_enabled(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8754 		mlxsw_sp_rif_push_l3_stats(rif);
8755 		mlxsw_sp_router_port_l3_stats_disable(rif);
8756 		mlxsw_sp_router_hwstats_notify_schedule(dev);
8757 	} else {
8758 		mlxsw_sp_rif_counters_free(rif);
8759 	}
8760 
8761 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
8762 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8763 	ops->deconfigure(rif);
8764 	if (fid)
8765 		/* Loopback RIFs are not associated with a FID. */
8766 		mlxsw_sp_fid_put(fid);
8767 	mlxsw_sp->router->rifs[rif->rif_index] = NULL;
8768 	netdev_put(dev, &rif->dev_tracker);
8769 	mlxsw_sp_rif_free(rif);
8770 	mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
8771 	vr->rif_count--;
8772 	mlxsw_sp_vr_put(mlxsw_sp, vr);
8773 
8774 	if (crif->can_destroy)
8775 		mlxsw_sp_crif_free(crif);
8776 }
8777 
mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp * mlxsw_sp,struct net_device * dev)8778 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
8779 				 struct net_device *dev)
8780 {
8781 	struct mlxsw_sp_rif *rif;
8782 
8783 	mutex_lock(&mlxsw_sp->router->lock);
8784 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8785 	if (!rif)
8786 		goto out;
8787 	mlxsw_sp_rif_destroy(rif);
8788 out:
8789 	mutex_unlock(&mlxsw_sp->router->lock);
8790 }
8791 
mlxsw_sp_rif_destroy_vlan_upper(struct mlxsw_sp * mlxsw_sp,struct net_device * br_dev,u16 vid)8792 static void mlxsw_sp_rif_destroy_vlan_upper(struct mlxsw_sp *mlxsw_sp,
8793 					    struct net_device *br_dev,
8794 					    u16 vid)
8795 {
8796 	struct net_device *upper_dev;
8797 	struct mlxsw_sp_crif *crif;
8798 
8799 	rcu_read_lock();
8800 	upper_dev = __vlan_find_dev_deep_rcu(br_dev, htons(ETH_P_8021Q), vid);
8801 	rcu_read_unlock();
8802 
8803 	if (!upper_dev)
8804 		return;
8805 
8806 	crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, upper_dev);
8807 	if (!crif || !crif->rif)
8808 		return;
8809 
8810 	mlxsw_sp_rif_destroy(crif->rif);
8811 }
8812 
8813 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
8814 					  struct net_device *l3_dev,
8815 					  int lower_pvid,
8816 					  unsigned long event,
8817 					  struct netlink_ext_ack *extack);
8818 
mlxsw_sp_router_bridge_vlan_add(struct mlxsw_sp * mlxsw_sp,struct net_device * br_dev,u16 new_vid,bool is_pvid,struct netlink_ext_ack * extack)8819 int mlxsw_sp_router_bridge_vlan_add(struct mlxsw_sp *mlxsw_sp,
8820 				    struct net_device *br_dev,
8821 				    u16 new_vid, bool is_pvid,
8822 				    struct netlink_ext_ack *extack)
8823 {
8824 	struct mlxsw_sp_rif *old_rif;
8825 	struct mlxsw_sp_rif *new_rif;
8826 	struct net_device *upper_dev;
8827 	u16 old_pvid = 0;
8828 	u16 new_pvid;
8829 	int err = 0;
8830 
8831 	mutex_lock(&mlxsw_sp->router->lock);
8832 	old_rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
8833 	if (old_rif) {
8834 		/* If the RIF on the bridge is not a VLAN RIF, we shouldn't have
8835 		 * gotten a PVID notification.
8836 		 */
8837 		if (WARN_ON(old_rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN))
8838 			old_rif = NULL;
8839 		else
8840 			old_pvid = mlxsw_sp_fid_8021q_vid(old_rif->fid);
8841 	}
8842 
8843 	if (is_pvid)
8844 		new_pvid = new_vid;
8845 	else if (old_pvid == new_vid)
8846 		new_pvid = 0;
8847 	else
8848 		goto out;
8849 
8850 	if (old_pvid == new_pvid)
8851 		goto out;
8852 
8853 	if (new_pvid) {
8854 		struct mlxsw_sp_rif_params params = {
8855 			.dev = br_dev,
8856 			.vid = new_pvid,
8857 		};
8858 
8859 		/* If there is a VLAN upper with the same VID as the new PVID,
8860 		 * kill its RIF, if there is one.
8861 		 */
8862 		mlxsw_sp_rif_destroy_vlan_upper(mlxsw_sp, br_dev, new_pvid);
8863 
8864 		if (mlxsw_sp_dev_addr_list_empty(br_dev))
8865 			goto out;
8866 		new_rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
8867 		if (IS_ERR(new_rif)) {
8868 			err = PTR_ERR(new_rif);
8869 			goto out;
8870 		}
8871 
8872 		if (old_pvid)
8873 			mlxsw_sp_rif_migrate_destroy(mlxsw_sp, old_rif, new_rif,
8874 						     true);
8875 	} else {
8876 		mlxsw_sp_rif_destroy(old_rif);
8877 	}
8878 
8879 	if (old_pvid) {
8880 		rcu_read_lock();
8881 		upper_dev = __vlan_find_dev_deep_rcu(br_dev, htons(ETH_P_8021Q),
8882 						     old_pvid);
8883 		rcu_read_unlock();
8884 		if (upper_dev)
8885 			err = mlxsw_sp_inetaddr_bridge_event(mlxsw_sp,
8886 							     upper_dev,
8887 							     new_pvid,
8888 							     NETDEV_UP, extack);
8889 	}
8890 
8891 out:
8892 	mutex_unlock(&mlxsw_sp->router->lock);
8893 	return err;
8894 }
8895 
8896 static void
mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params * params,struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)8897 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
8898 				 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8899 {
8900 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8901 
8902 	params->vid = mlxsw_sp_port_vlan->vid;
8903 	params->lag = mlxsw_sp_port->lagged;
8904 	if (params->lag)
8905 		params->lag_id = mlxsw_sp_port->lag_id;
8906 	else
8907 		params->system_port = mlxsw_sp_port->local_port;
8908 }
8909 
8910 static struct mlxsw_sp_rif_subport *
mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif * rif)8911 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
8912 {
8913 	return container_of(rif, struct mlxsw_sp_rif_subport, common);
8914 }
8915 
mlxsw_sp_rif_subport_port(const struct mlxsw_sp_rif * rif,u16 * port,bool * is_lag)8916 int mlxsw_sp_rif_subport_port(const struct mlxsw_sp_rif *rif,
8917 			      u16 *port, bool *is_lag)
8918 {
8919 	struct mlxsw_sp_rif_subport *rif_subport;
8920 
8921 	if (WARN_ON(rif->ops->type != MLXSW_SP_RIF_TYPE_SUBPORT))
8922 		return -EINVAL;
8923 
8924 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8925 	*is_lag = rif_subport->lag;
8926 	*port = *is_lag ? rif_subport->lag_id : rif_subport->system_port;
8927 	return 0;
8928 }
8929 
8930 static struct mlxsw_sp_rif *
mlxsw_sp_rif_subport_get(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)8931 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
8932 			 const struct mlxsw_sp_rif_params *params,
8933 			 struct netlink_ext_ack *extack)
8934 {
8935 	struct mlxsw_sp_rif_subport *rif_subport;
8936 	struct mlxsw_sp_rif *rif;
8937 
8938 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
8939 	if (!rif)
8940 		return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
8941 
8942 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8943 	refcount_inc(&rif_subport->ref_count);
8944 	return rif;
8945 }
8946 
mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif * rif)8947 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
8948 {
8949 	struct mlxsw_sp_rif_subport *rif_subport;
8950 
8951 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8952 	if (!refcount_dec_and_test(&rif_subport->ref_count))
8953 		return;
8954 
8955 	mlxsw_sp_rif_destroy(rif);
8956 }
8957 
mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif_mac_profile * profile,struct netlink_ext_ack * extack)8958 static int mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp *mlxsw_sp,
8959 						struct mlxsw_sp_rif_mac_profile *profile,
8960 						struct netlink_ext_ack *extack)
8961 {
8962 	u8 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
8963 	struct mlxsw_sp_router *router = mlxsw_sp->router;
8964 	int id;
8965 
8966 	id = idr_alloc(&router->rif_mac_profiles_idr, profile, 0,
8967 		       max_rif_mac_profiles, GFP_KERNEL);
8968 
8969 	if (id >= 0) {
8970 		profile->id = id;
8971 		return 0;
8972 	}
8973 
8974 	if (id == -ENOSPC)
8975 		NL_SET_ERR_MSG_MOD(extack,
8976 				   "Exceeded number of supported router interface MAC profiles");
8977 
8978 	return id;
8979 }
8980 
8981 static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp * mlxsw_sp,u8 mac_profile)8982 mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp *mlxsw_sp, u8 mac_profile)
8983 {
8984 	struct mlxsw_sp_rif_mac_profile *profile;
8985 
8986 	profile = idr_remove(&mlxsw_sp->router->rif_mac_profiles_idr,
8987 			     mac_profile);
8988 	WARN_ON(!profile);
8989 	return profile;
8990 }
8991 
8992 static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_alloc(const char * mac)8993 mlxsw_sp_rif_mac_profile_alloc(const char *mac)
8994 {
8995 	struct mlxsw_sp_rif_mac_profile *profile;
8996 
8997 	profile = kzalloc(sizeof(*profile), GFP_KERNEL);
8998 	if (!profile)
8999 		return NULL;
9000 
9001 	ether_addr_copy(profile->mac_prefix, mac);
9002 	refcount_set(&profile->ref_count, 1);
9003 	return profile;
9004 }
9005 
9006 static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp * mlxsw_sp,const char * mac)9007 mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac)
9008 {
9009 	struct mlxsw_sp_router *router = mlxsw_sp->router;
9010 	struct mlxsw_sp_rif_mac_profile *profile;
9011 	int id;
9012 
9013 	idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) {
9014 		if (ether_addr_equal_masked(profile->mac_prefix, mac,
9015 					    mlxsw_sp->mac_mask))
9016 			return profile;
9017 	}
9018 
9019 	return NULL;
9020 }
9021 
mlxsw_sp_rif_mac_profiles_occ_get(void * priv)9022 static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
9023 {
9024 	const struct mlxsw_sp *mlxsw_sp = priv;
9025 
9026 	return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
9027 }
9028 
mlxsw_sp_rifs_occ_get(void * priv)9029 static u64 mlxsw_sp_rifs_occ_get(void *priv)
9030 {
9031 	const struct mlxsw_sp *mlxsw_sp = priv;
9032 
9033 	return atomic_read(&mlxsw_sp->router->rifs_count);
9034 }
9035 
9036 static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp * mlxsw_sp,const char * mac,struct netlink_ext_ack * extack)9037 mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
9038 				struct netlink_ext_ack *extack)
9039 {
9040 	struct mlxsw_sp_rif_mac_profile *profile;
9041 	int err;
9042 
9043 	profile = mlxsw_sp_rif_mac_profile_alloc(mac);
9044 	if (!profile)
9045 		return ERR_PTR(-ENOMEM);
9046 
9047 	err = mlxsw_sp_rif_mac_profile_index_alloc(mlxsw_sp, profile, extack);
9048 	if (err)
9049 		goto profile_index_alloc_err;
9050 
9051 	atomic_inc(&mlxsw_sp->router->rif_mac_profiles_count);
9052 	return profile;
9053 
9054 profile_index_alloc_err:
9055 	kfree(profile);
9056 	return ERR_PTR(err);
9057 }
9058 
mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp * mlxsw_sp,u8 mac_profile)9059 static void mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp *mlxsw_sp,
9060 					     u8 mac_profile)
9061 {
9062 	struct mlxsw_sp_rif_mac_profile *profile;
9063 
9064 	atomic_dec(&mlxsw_sp->router->rif_mac_profiles_count);
9065 	profile = mlxsw_sp_rif_mac_profile_index_free(mlxsw_sp, mac_profile);
9066 	kfree(profile);
9067 }
9068 
mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp * mlxsw_sp,const char * mac,u8 * p_mac_profile,struct netlink_ext_ack * extack)9069 static int mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp *mlxsw_sp,
9070 					const char *mac, u8 *p_mac_profile,
9071 					struct netlink_ext_ack *extack)
9072 {
9073 	struct mlxsw_sp_rif_mac_profile *profile;
9074 
9075 	profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, mac);
9076 	if (profile) {
9077 		refcount_inc(&profile->ref_count);
9078 		goto out;
9079 	}
9080 
9081 	profile = mlxsw_sp_rif_mac_profile_create(mlxsw_sp, mac, extack);
9082 	if (IS_ERR(profile))
9083 		return PTR_ERR(profile);
9084 
9085 out:
9086 	*p_mac_profile = profile->id;
9087 	return 0;
9088 }
9089 
mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp * mlxsw_sp,u8 mac_profile)9090 static void mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp *mlxsw_sp,
9091 					 u8 mac_profile)
9092 {
9093 	struct mlxsw_sp_rif_mac_profile *profile;
9094 
9095 	profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
9096 			   mac_profile);
9097 	if (WARN_ON(!profile))
9098 		return;
9099 
9100 	if (!refcount_dec_and_test(&profile->ref_count))
9101 		return;
9102 
9103 	mlxsw_sp_rif_mac_profile_destroy(mlxsw_sp, mac_profile);
9104 }
9105 
mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif * rif)9106 static bool mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif *rif)
9107 {
9108 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9109 	struct mlxsw_sp_rif_mac_profile *profile;
9110 
9111 	profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
9112 			   rif->mac_profile_id);
9113 	if (WARN_ON(!profile))
9114 		return false;
9115 
9116 	return refcount_read(&profile->ref_count) > 1;
9117 }
9118 
mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif * rif,const char * new_mac)9119 static int mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif *rif,
9120 					 const char *new_mac)
9121 {
9122 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9123 	struct mlxsw_sp_rif_mac_profile *profile;
9124 
9125 	profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
9126 			   rif->mac_profile_id);
9127 	if (WARN_ON(!profile))
9128 		return -EINVAL;
9129 
9130 	ether_addr_copy(profile->mac_prefix, new_mac);
9131 	return 0;
9132 }
9133 
9134 static int
mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif,const char * new_mac,struct netlink_ext_ack * extack)9135 mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
9136 				 struct mlxsw_sp_rif *rif,
9137 				 const char *new_mac,
9138 				 struct netlink_ext_ack *extack)
9139 {
9140 	u8 mac_profile;
9141 	int err;
9142 
9143 	if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
9144 	    !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
9145 		return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
9146 
9147 	err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
9148 					   &mac_profile, extack);
9149 	if (err)
9150 		return err;
9151 
9152 	mlxsw_sp_rif_mac_profile_put(mlxsw_sp, rif->mac_profile_id);
9153 	rif->mac_profile_id = mac_profile;
9154 	return 0;
9155 }
9156 
9157 static int
__mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan,struct net_device * l3_dev,struct netlink_ext_ack * extack)9158 __mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
9159 				 struct net_device *l3_dev,
9160 				 struct netlink_ext_ack *extack)
9161 {
9162 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
9163 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
9164 	struct mlxsw_sp_rif_params params;
9165 	u16 vid = mlxsw_sp_port_vlan->vid;
9166 	struct mlxsw_sp_rif *rif;
9167 	struct mlxsw_sp_fid *fid;
9168 	int err;
9169 
9170 	params = (struct mlxsw_sp_rif_params) {
9171 		.dev = l3_dev,
9172 		.vid = vid,
9173 	};
9174 
9175 	mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
9176 	rif = mlxsw_sp_rif_subport_get(mlxsw_sp, &params, extack);
9177 	if (IS_ERR(rif))
9178 		return PTR_ERR(rif);
9179 
9180 	/* FID was already created, just take a reference */
9181 	fid = rif->ops->fid_get(rif, &params, extack);
9182 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
9183 	if (err)
9184 		goto err_fid_port_vid_map;
9185 
9186 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
9187 	if (err)
9188 		goto err_port_vid_learning_set;
9189 
9190 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
9191 					BR_STATE_FORWARDING);
9192 	if (err)
9193 		goto err_port_vid_stp_set;
9194 
9195 	mlxsw_sp_port_vlan->fid = fid;
9196 
9197 	return 0;
9198 
9199 err_port_vid_stp_set:
9200 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
9201 err_port_vid_learning_set:
9202 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
9203 err_fid_port_vid_map:
9204 	mlxsw_sp_fid_put(fid);
9205 	mlxsw_sp_rif_subport_put(rif);
9206 	return err;
9207 }
9208 
9209 static void
__mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)9210 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
9211 {
9212 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
9213 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
9214 	struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
9215 	u16 vid = mlxsw_sp_port_vlan->vid;
9216 
9217 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
9218 		return;
9219 
9220 	mlxsw_sp_port_vlan->fid = NULL;
9221 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
9222 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
9223 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
9224 	mlxsw_sp_fid_put(fid);
9225 	mlxsw_sp_rif_subport_put(rif);
9226 }
9227 
9228 static int
mlxsw_sp_port_vlan_router_join_existing(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan,struct net_device * l3_dev,struct netlink_ext_ack * extack)9229 mlxsw_sp_port_vlan_router_join_existing(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
9230 					struct net_device *l3_dev,
9231 					struct netlink_ext_ack *extack)
9232 {
9233 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
9234 
9235 	lockdep_assert_held(&mlxsw_sp->router->lock);
9236 
9237 	if (!mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev))
9238 		return 0;
9239 
9240 	return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
9241 						extack);
9242 }
9243 
9244 void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)9245 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
9246 {
9247 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
9248 
9249 	mutex_lock(&mlxsw_sp->router->lock);
9250 	__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
9251 	mutex_unlock(&mlxsw_sp->router->lock);
9252 }
9253 
mlxsw_sp_inetaddr_port_vlan_event(struct net_device * l3_dev,struct net_device * port_dev,unsigned long event,u16 vid,struct netlink_ext_ack * extack)9254 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
9255 					     struct net_device *port_dev,
9256 					     unsigned long event, u16 vid,
9257 					     struct netlink_ext_ack *extack)
9258 {
9259 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
9260 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
9261 
9262 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
9263 	if (WARN_ON(!mlxsw_sp_port_vlan))
9264 		return -EINVAL;
9265 
9266 	switch (event) {
9267 	case NETDEV_UP:
9268 		return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
9269 							l3_dev, extack);
9270 	case NETDEV_DOWN:
9271 		__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
9272 		break;
9273 	}
9274 
9275 	return 0;
9276 }
9277 
mlxsw_sp_inetaddr_port_event(struct net_device * port_dev,unsigned long event,bool nomaster,struct netlink_ext_ack * extack)9278 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
9279 					unsigned long event, bool nomaster,
9280 					struct netlink_ext_ack *extack)
9281 {
9282 	if (!nomaster && (netif_is_any_bridge_port(port_dev) ||
9283 			  netif_is_lag_port(port_dev)))
9284 		return 0;
9285 
9286 	return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
9287 						 MLXSW_SP_DEFAULT_VID, extack);
9288 }
9289 
__mlxsw_sp_inetaddr_lag_event(struct net_device * l3_dev,struct net_device * lag_dev,unsigned long event,u16 vid,struct netlink_ext_ack * extack)9290 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
9291 					 struct net_device *lag_dev,
9292 					 unsigned long event, u16 vid,
9293 					 struct netlink_ext_ack *extack)
9294 {
9295 	struct net_device *port_dev;
9296 	struct list_head *iter;
9297 	int err;
9298 
9299 	netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
9300 		if (mlxsw_sp_port_dev_check(port_dev)) {
9301 			err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
9302 								port_dev,
9303 								event, vid,
9304 								extack);
9305 			if (err)
9306 				return err;
9307 		}
9308 	}
9309 
9310 	return 0;
9311 }
9312 
mlxsw_sp_inetaddr_lag_event(struct net_device * lag_dev,unsigned long event,bool nomaster,struct netlink_ext_ack * extack)9313 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
9314 				       unsigned long event, bool nomaster,
9315 				       struct netlink_ext_ack *extack)
9316 {
9317 	if (!nomaster && netif_is_bridge_port(lag_dev))
9318 		return 0;
9319 
9320 	return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
9321 					     MLXSW_SP_DEFAULT_VID, extack);
9322 }
9323 
mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp * mlxsw_sp,struct net_device * l3_dev,int lower_pvid,unsigned long event,struct netlink_ext_ack * extack)9324 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
9325 					  struct net_device *l3_dev,
9326 					  int lower_pvid,
9327 					  unsigned long event,
9328 					  struct netlink_ext_ack *extack)
9329 {
9330 	struct mlxsw_sp_rif_params params = {
9331 		.dev = l3_dev,
9332 	};
9333 	struct mlxsw_sp_rif *rif;
9334 	int err;
9335 
9336 	switch (event) {
9337 	case NETDEV_UP:
9338 		if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) {
9339 			u16 proto;
9340 
9341 			br_vlan_get_proto(l3_dev, &proto);
9342 			if (proto == ETH_P_8021AD) {
9343 				NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
9344 				return -EOPNOTSUPP;
9345 			}
9346 			err = br_vlan_get_pvid(l3_dev, &params.vid);
9347 			if (err)
9348 				return err;
9349 			if (!params.vid)
9350 				return 0;
9351 		} else if (is_vlan_dev(l3_dev)) {
9352 			params.vid = vlan_dev_vlan_id(l3_dev);
9353 
9354 			/* If the VID matches PVID of the bridge below, the
9355 			 * bridge owns the RIF for this VLAN. Don't do anything.
9356 			 */
9357 			if ((int)params.vid == lower_pvid)
9358 				return 0;
9359 		}
9360 
9361 		rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
9362 		if (IS_ERR(rif))
9363 			return PTR_ERR(rif);
9364 		break;
9365 	case NETDEV_DOWN:
9366 		rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9367 		mlxsw_sp_rif_destroy(rif);
9368 		break;
9369 	}
9370 
9371 	return 0;
9372 }
9373 
mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp * mlxsw_sp,struct net_device * vlan_dev,unsigned long event,bool nomaster,struct netlink_ext_ack * extack)9374 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
9375 					struct net_device *vlan_dev,
9376 					unsigned long event, bool nomaster,
9377 					struct netlink_ext_ack *extack)
9378 {
9379 	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
9380 	u16 vid = vlan_dev_vlan_id(vlan_dev);
9381 	u16 lower_pvid;
9382 	int err;
9383 
9384 	if (!nomaster && netif_is_bridge_port(vlan_dev))
9385 		return 0;
9386 
9387 	if (mlxsw_sp_port_dev_check(real_dev)) {
9388 		return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
9389 							 event, vid, extack);
9390 	} else if (netif_is_lag_master(real_dev)) {
9391 		return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
9392 						     vid, extack);
9393 	} else if (netif_is_bridge_master(real_dev) &&
9394 		   br_vlan_enabled(real_dev)) {
9395 		err = br_vlan_get_pvid(real_dev, &lower_pvid);
9396 		if (err)
9397 			return err;
9398 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev,
9399 						      lower_pvid, event,
9400 						      extack);
9401 	}
9402 
9403 	return 0;
9404 }
9405 
mlxsw_sp_rif_macvlan_is_vrrp4(const u8 * mac)9406 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
9407 {
9408 	u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
9409 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
9410 
9411 	return ether_addr_equal_masked(mac, vrrp4, mask);
9412 }
9413 
mlxsw_sp_rif_macvlan_is_vrrp6(const u8 * mac)9414 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
9415 {
9416 	u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
9417 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
9418 
9419 	return ether_addr_equal_masked(mac, vrrp6, mask);
9420 }
9421 
mlxsw_sp_rif_vrrp_op(struct mlxsw_sp * mlxsw_sp,u16 rif_index,const u8 * mac,bool adding)9422 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9423 				const u8 *mac, bool adding)
9424 {
9425 	char ritr_pl[MLXSW_REG_RITR_LEN];
9426 	u8 vrrp_id = adding ? mac[5] : 0;
9427 	int err;
9428 
9429 	if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
9430 	    !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
9431 		return 0;
9432 
9433 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9434 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9435 	if (err)
9436 		return err;
9437 
9438 	if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
9439 		mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
9440 	else
9441 		mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
9442 
9443 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9444 }
9445 
mlxsw_sp_rif_macvlan_add(struct mlxsw_sp * mlxsw_sp,const struct net_device * macvlan_dev,struct netlink_ext_ack * extack)9446 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
9447 				    const struct net_device *macvlan_dev,
9448 				    struct netlink_ext_ack *extack)
9449 {
9450 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
9451 	struct mlxsw_sp_rif *rif;
9452 	int err;
9453 
9454 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
9455 	if (!rif)
9456 		return 0;
9457 
9458 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9459 				  mlxsw_sp_fid_index(rif->fid), true);
9460 	if (err)
9461 		return err;
9462 
9463 	err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
9464 				   macvlan_dev->dev_addr, true);
9465 	if (err)
9466 		goto err_rif_vrrp_add;
9467 
9468 	/* Make sure the bridge driver does not have this MAC pointing at
9469 	 * some other port.
9470 	 */
9471 	if (rif->ops->fdb_del)
9472 		rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
9473 
9474 	return 0;
9475 
9476 err_rif_vrrp_add:
9477 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9478 			    mlxsw_sp_fid_index(rif->fid), false);
9479 	return err;
9480 }
9481 
__mlxsw_sp_rif_macvlan_del(struct mlxsw_sp * mlxsw_sp,const struct net_device * macvlan_dev)9482 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
9483 				       const struct net_device *macvlan_dev)
9484 {
9485 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
9486 	struct mlxsw_sp_rif *rif;
9487 
9488 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
9489 	/* If we do not have a RIF, then we already took care of
9490 	 * removing the macvlan's MAC during RIF deletion.
9491 	 */
9492 	if (!rif)
9493 		return;
9494 	mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
9495 			     false);
9496 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9497 			    mlxsw_sp_fid_index(rif->fid), false);
9498 }
9499 
mlxsw_sp_rif_macvlan_del(struct mlxsw_sp * mlxsw_sp,const struct net_device * macvlan_dev)9500 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
9501 			      const struct net_device *macvlan_dev)
9502 {
9503 	mutex_lock(&mlxsw_sp->router->lock);
9504 	__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
9505 	mutex_unlock(&mlxsw_sp->router->lock);
9506 }
9507 
mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp * mlxsw_sp,struct net_device * macvlan_dev,unsigned long event,struct netlink_ext_ack * extack)9508 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
9509 					   struct net_device *macvlan_dev,
9510 					   unsigned long event,
9511 					   struct netlink_ext_ack *extack)
9512 {
9513 	switch (event) {
9514 	case NETDEV_UP:
9515 		return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
9516 	case NETDEV_DOWN:
9517 		__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
9518 		break;
9519 	}
9520 
9521 	return 0;
9522 }
9523 
__mlxsw_sp_inetaddr_event(struct mlxsw_sp * mlxsw_sp,struct net_device * dev,unsigned long event,bool nomaster,struct netlink_ext_ack * extack)9524 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
9525 				     struct net_device *dev,
9526 				     unsigned long event, bool nomaster,
9527 				     struct netlink_ext_ack *extack)
9528 {
9529 	if (mlxsw_sp_port_dev_check(dev))
9530 		return mlxsw_sp_inetaddr_port_event(dev, event, nomaster,
9531 						    extack);
9532 	else if (netif_is_lag_master(dev))
9533 		return mlxsw_sp_inetaddr_lag_event(dev, event, nomaster,
9534 						   extack);
9535 	else if (netif_is_bridge_master(dev))
9536 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, -1, event,
9537 						      extack);
9538 	else if (is_vlan_dev(dev))
9539 		return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
9540 						    nomaster, extack);
9541 	else if (netif_is_macvlan(dev))
9542 		return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
9543 						       extack);
9544 	else
9545 		return 0;
9546 }
9547 
mlxsw_sp_inetaddr_event(struct notifier_block * nb,unsigned long event,void * ptr)9548 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
9549 				   unsigned long event, void *ptr)
9550 {
9551 	struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
9552 	struct net_device *dev = ifa->ifa_dev->dev;
9553 	struct mlxsw_sp_router *router;
9554 	struct mlxsw_sp_rif *rif;
9555 	int err = 0;
9556 
9557 	/* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
9558 	if (event == NETDEV_UP)
9559 		return NOTIFY_DONE;
9560 
9561 	router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
9562 	mutex_lock(&router->lock);
9563 	rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
9564 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
9565 		goto out;
9566 
9567 	err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, false,
9568 					NULL);
9569 out:
9570 	mutex_unlock(&router->lock);
9571 	return notifier_from_errno(err);
9572 }
9573 
mlxsw_sp_inetaddr_valid_event(struct notifier_block * unused,unsigned long event,void * ptr)9574 static int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
9575 					 unsigned long event, void *ptr)
9576 {
9577 	struct in_validator_info *ivi = (struct in_validator_info *) ptr;
9578 	struct net_device *dev = ivi->ivi_dev->dev;
9579 	struct mlxsw_sp *mlxsw_sp;
9580 	struct mlxsw_sp_rif *rif;
9581 	int err = 0;
9582 
9583 	mlxsw_sp = mlxsw_sp_lower_get(dev);
9584 	if (!mlxsw_sp)
9585 		return NOTIFY_DONE;
9586 
9587 	mutex_lock(&mlxsw_sp->router->lock);
9588 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9589 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
9590 		goto out;
9591 
9592 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false,
9593 					ivi->extack);
9594 out:
9595 	mutex_unlock(&mlxsw_sp->router->lock);
9596 	return notifier_from_errno(err);
9597 }
9598 
9599 struct mlxsw_sp_inet6addr_event_work {
9600 	struct work_struct work;
9601 	struct mlxsw_sp *mlxsw_sp;
9602 	struct net_device *dev;
9603 	netdevice_tracker dev_tracker;
9604 	unsigned long event;
9605 };
9606 
mlxsw_sp_inet6addr_event_work(struct work_struct * work)9607 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
9608 {
9609 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
9610 		container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
9611 	struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
9612 	struct net_device *dev = inet6addr_work->dev;
9613 	unsigned long event = inet6addr_work->event;
9614 	struct mlxsw_sp_rif *rif;
9615 
9616 	rtnl_lock();
9617 	mutex_lock(&mlxsw_sp->router->lock);
9618 
9619 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9620 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
9621 		goto out;
9622 
9623 	__mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false, NULL);
9624 out:
9625 	mutex_unlock(&mlxsw_sp->router->lock);
9626 	rtnl_unlock();
9627 	netdev_put(dev, &inet6addr_work->dev_tracker);
9628 	kfree(inet6addr_work);
9629 }
9630 
9631 /* Called with rcu_read_lock() */
mlxsw_sp_inet6addr_event(struct notifier_block * nb,unsigned long event,void * ptr)9632 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
9633 				    unsigned long event, void *ptr)
9634 {
9635 	struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
9636 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
9637 	struct net_device *dev = if6->idev->dev;
9638 	struct mlxsw_sp_router *router;
9639 
9640 	/* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
9641 	if (event == NETDEV_UP)
9642 		return NOTIFY_DONE;
9643 
9644 	inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
9645 	if (!inet6addr_work)
9646 		return NOTIFY_BAD;
9647 
9648 	router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
9649 	INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
9650 	inet6addr_work->mlxsw_sp = router->mlxsw_sp;
9651 	inet6addr_work->dev = dev;
9652 	inet6addr_work->event = event;
9653 	netdev_hold(dev, &inet6addr_work->dev_tracker, GFP_ATOMIC);
9654 	mlxsw_core_schedule_work(&inet6addr_work->work);
9655 
9656 	return NOTIFY_DONE;
9657 }
9658 
mlxsw_sp_inet6addr_valid_event(struct notifier_block * unused,unsigned long event,void * ptr)9659 static int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
9660 					  unsigned long event, void *ptr)
9661 {
9662 	struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
9663 	struct net_device *dev = i6vi->i6vi_dev->dev;
9664 	struct mlxsw_sp *mlxsw_sp;
9665 	struct mlxsw_sp_rif *rif;
9666 	int err = 0;
9667 
9668 	mlxsw_sp = mlxsw_sp_lower_get(dev);
9669 	if (!mlxsw_sp)
9670 		return NOTIFY_DONE;
9671 
9672 	mutex_lock(&mlxsw_sp->router->lock);
9673 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9674 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
9675 		goto out;
9676 
9677 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false,
9678 					i6vi->extack);
9679 out:
9680 	mutex_unlock(&mlxsw_sp->router->lock);
9681 	return notifier_from_errno(err);
9682 }
9683 
mlxsw_sp_rif_edit(struct mlxsw_sp * mlxsw_sp,u16 rif_index,const char * mac,int mtu,u8 mac_profile)9684 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9685 			     const char *mac, int mtu, u8 mac_profile)
9686 {
9687 	char ritr_pl[MLXSW_REG_RITR_LEN];
9688 	int err;
9689 
9690 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9691 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9692 	if (err)
9693 		return err;
9694 
9695 	mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
9696 	mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
9697 	mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, mac_profile);
9698 	mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
9699 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9700 }
9701 
9702 static int
mlxsw_sp_router_port_change_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)9703 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
9704 				  struct mlxsw_sp_rif *rif,
9705 				  struct netlink_ext_ack *extack)
9706 {
9707 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
9708 	u8 old_mac_profile;
9709 	u16 fid_index;
9710 	int err;
9711 
9712 	fid_index = mlxsw_sp_fid_index(rif->fid);
9713 
9714 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
9715 	if (err)
9716 		return err;
9717 
9718 	old_mac_profile = rif->mac_profile_id;
9719 	err = mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, dev->dev_addr,
9720 					       extack);
9721 	if (err)
9722 		goto err_rif_mac_profile_replace;
9723 
9724 	err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
9725 				dev->mtu, rif->mac_profile_id);
9726 	if (err)
9727 		goto err_rif_edit;
9728 
9729 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
9730 	if (err)
9731 		goto err_rif_fdb_op;
9732 
9733 	if (rif->mtu != dev->mtu) {
9734 		struct mlxsw_sp_vr *vr;
9735 		int i;
9736 
9737 		/* The RIF is relevant only to its mr_table instance, as unlike
9738 		 * unicast routing, in multicast routing a RIF cannot be shared
9739 		 * between several multicast routing tables.
9740 		 */
9741 		vr = &mlxsw_sp->router->vrs[rif->vr_id];
9742 		for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
9743 			mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
9744 						   rif, dev->mtu);
9745 	}
9746 
9747 	ether_addr_copy(rif->addr, dev->dev_addr);
9748 	rif->mtu = dev->mtu;
9749 
9750 	netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
9751 
9752 	return 0;
9753 
9754 err_rif_fdb_op:
9755 	mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu,
9756 			  old_mac_profile);
9757 err_rif_edit:
9758 	mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, rif->addr, extack);
9759 err_rif_mac_profile_replace:
9760 	mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
9761 	return err;
9762 }
9763 
mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif * rif,struct netdev_notifier_pre_changeaddr_info * info)9764 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
9765 			    struct netdev_notifier_pre_changeaddr_info *info)
9766 {
9767 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9768 	struct mlxsw_sp_rif_mac_profile *profile;
9769 	struct netlink_ext_ack *extack;
9770 	u8 max_rif_mac_profiles;
9771 	u64 occ;
9772 
9773 	extack = netdev_notifier_info_to_extack(&info->info);
9774 
9775 	profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, info->dev_addr);
9776 	if (profile)
9777 		return 0;
9778 
9779 	max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
9780 	occ = mlxsw_sp_rif_mac_profiles_occ_get(mlxsw_sp);
9781 	if (occ < max_rif_mac_profiles)
9782 		return 0;
9783 
9784 	if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
9785 		return 0;
9786 
9787 	NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interface MAC profiles");
9788 	return -ENOBUFS;
9789 }
9790 
mlxsw_sp_router_netdevice_interesting(struct mlxsw_sp * mlxsw_sp,struct net_device * dev)9791 static bool mlxsw_sp_router_netdevice_interesting(struct mlxsw_sp *mlxsw_sp,
9792 						  struct net_device *dev)
9793 {
9794 	struct vlan_dev_priv *vlan;
9795 
9796 	if (netif_is_lag_master(dev) ||
9797 	    netif_is_bridge_master(dev) ||
9798 	    mlxsw_sp_port_dev_check(dev) ||
9799 	    mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev) ||
9800 	    netif_is_l3_master(dev))
9801 		return true;
9802 
9803 	if (!is_vlan_dev(dev))
9804 		return false;
9805 
9806 	vlan = vlan_dev_priv(dev);
9807 	return netif_is_lag_master(vlan->real_dev) ||
9808 	       netif_is_bridge_master(vlan->real_dev) ||
9809 	       mlxsw_sp_port_dev_check(vlan->real_dev);
9810 }
9811 
9812 static struct mlxsw_sp_crif *
mlxsw_sp_crif_register(struct mlxsw_sp_router * router,struct net_device * dev)9813 mlxsw_sp_crif_register(struct mlxsw_sp_router *router, struct net_device *dev)
9814 {
9815 	struct mlxsw_sp_crif *crif;
9816 	int err;
9817 
9818 	if (WARN_ON(mlxsw_sp_crif_lookup(router, dev)))
9819 		return NULL;
9820 
9821 	crif = mlxsw_sp_crif_alloc(dev);
9822 	if (!crif)
9823 		return ERR_PTR(-ENOMEM);
9824 
9825 	err = mlxsw_sp_crif_insert(router, crif);
9826 	if (err)
9827 		goto err_netdev_insert;
9828 
9829 	return crif;
9830 
9831 err_netdev_insert:
9832 	mlxsw_sp_crif_free(crif);
9833 	return ERR_PTR(err);
9834 }
9835 
mlxsw_sp_crif_unregister(struct mlxsw_sp_router * router,struct mlxsw_sp_crif * crif)9836 static void mlxsw_sp_crif_unregister(struct mlxsw_sp_router *router,
9837 				     struct mlxsw_sp_crif *crif)
9838 {
9839 	struct mlxsw_sp_nexthop *nh, *tmp;
9840 
9841 	mlxsw_sp_crif_remove(router, crif);
9842 
9843 	list_for_each_entry_safe(nh, tmp, &crif->nexthop_list, crif_list_node)
9844 		mlxsw_sp_nexthop_type_fini(router->mlxsw_sp, nh);
9845 
9846 	if (crif->rif)
9847 		crif->can_destroy = true;
9848 	else
9849 		mlxsw_sp_crif_free(crif);
9850 }
9851 
mlxsw_sp_netdevice_register(struct mlxsw_sp_router * router,struct net_device * dev)9852 static int mlxsw_sp_netdevice_register(struct mlxsw_sp_router *router,
9853 				       struct net_device *dev)
9854 {
9855 	struct mlxsw_sp_crif *crif;
9856 
9857 	if (!mlxsw_sp_router_netdevice_interesting(router->mlxsw_sp, dev))
9858 		return 0;
9859 
9860 	crif = mlxsw_sp_crif_register(router, dev);
9861 	return PTR_ERR_OR_ZERO(crif);
9862 }
9863 
mlxsw_sp_netdevice_unregister(struct mlxsw_sp_router * router,struct net_device * dev)9864 static void mlxsw_sp_netdevice_unregister(struct mlxsw_sp_router *router,
9865 					  struct net_device *dev)
9866 {
9867 	struct mlxsw_sp_crif *crif;
9868 
9869 	if (!mlxsw_sp_router_netdevice_interesting(router->mlxsw_sp, dev))
9870 		return;
9871 
9872 	/* netdev_run_todo(), by way of netdev_wait_allrefs_any(), rebroadcasts
9873 	 * the NETDEV_UNREGISTER message, so we can get here twice. If that's
9874 	 * what happened, the netdevice state is NETREG_UNREGISTERED. In that
9875 	 * case, we expect to have collected the CRIF already, and warn if it
9876 	 * still exists. Otherwise we expect the CRIF to exist.
9877 	 */
9878 	crif = mlxsw_sp_crif_lookup(router, dev);
9879 	if (dev->reg_state == NETREG_UNREGISTERED) {
9880 		if (!WARN_ON(crif))
9881 			return;
9882 	}
9883 	if (WARN_ON(!crif))
9884 		return;
9885 
9886 	mlxsw_sp_crif_unregister(router, crif);
9887 }
9888 
mlxsw_sp_is_offload_xstats_event(unsigned long event)9889 static bool mlxsw_sp_is_offload_xstats_event(unsigned long event)
9890 {
9891 	switch (event) {
9892 	case NETDEV_OFFLOAD_XSTATS_ENABLE:
9893 	case NETDEV_OFFLOAD_XSTATS_DISABLE:
9894 	case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9895 	case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9896 		return true;
9897 	}
9898 
9899 	return false;
9900 }
9901 
9902 static int
mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif * rif,unsigned long event,struct netdev_notifier_offload_xstats_info * info)9903 mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif *rif,
9904 					unsigned long event,
9905 					struct netdev_notifier_offload_xstats_info *info)
9906 {
9907 	switch (info->type) {
9908 	case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
9909 		break;
9910 	default:
9911 		return 0;
9912 	}
9913 
9914 	switch (event) {
9915 	case NETDEV_OFFLOAD_XSTATS_ENABLE:
9916 		return mlxsw_sp_router_port_l3_stats_enable(rif);
9917 	case NETDEV_OFFLOAD_XSTATS_DISABLE:
9918 		mlxsw_sp_router_port_l3_stats_disable(rif);
9919 		return 0;
9920 	case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9921 		mlxsw_sp_router_port_l3_stats_report_used(rif, info);
9922 		return 0;
9923 	case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9924 		return mlxsw_sp_router_port_l3_stats_report_delta(rif, info);
9925 	}
9926 
9927 	WARN_ON_ONCE(1);
9928 	return 0;
9929 }
9930 
9931 static int
mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp * mlxsw_sp,struct net_device * dev,unsigned long event,struct netdev_notifier_offload_xstats_info * info)9932 mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp *mlxsw_sp,
9933 				      struct net_device *dev,
9934 				      unsigned long event,
9935 				      struct netdev_notifier_offload_xstats_info *info)
9936 {
9937 	struct mlxsw_sp_rif *rif;
9938 
9939 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9940 	if (!rif)
9941 		return 0;
9942 
9943 	return mlxsw_sp_router_port_offload_xstats_cmd(rif, event, info);
9944 }
9945 
mlxsw_sp_is_router_event(unsigned long event)9946 static bool mlxsw_sp_is_router_event(unsigned long event)
9947 {
9948 	switch (event) {
9949 	case NETDEV_PRE_CHANGEADDR:
9950 	case NETDEV_CHANGEADDR:
9951 	case NETDEV_CHANGEMTU:
9952 		return true;
9953 	default:
9954 		return false;
9955 	}
9956 }
9957 
mlxsw_sp_netdevice_router_port_event(struct net_device * dev,unsigned long event,void * ptr)9958 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
9959 						unsigned long event, void *ptr)
9960 {
9961 	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
9962 	struct mlxsw_sp *mlxsw_sp;
9963 	struct mlxsw_sp_rif *rif;
9964 
9965 	mlxsw_sp = mlxsw_sp_lower_get(dev);
9966 	if (!mlxsw_sp)
9967 		return 0;
9968 
9969 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9970 	if (!rif)
9971 		return 0;
9972 
9973 	switch (event) {
9974 	case NETDEV_CHANGEMTU:
9975 	case NETDEV_CHANGEADDR:
9976 		return mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack);
9977 	case NETDEV_PRE_CHANGEADDR:
9978 		return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
9979 	default:
9980 		WARN_ON_ONCE(1);
9981 		break;
9982 	}
9983 
9984 	return 0;
9985 }
9986 
mlxsw_sp_port_vrf_join(struct mlxsw_sp * mlxsw_sp,struct net_device * l3_dev,struct netlink_ext_ack * extack)9987 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
9988 				  struct net_device *l3_dev,
9989 				  struct netlink_ext_ack *extack)
9990 {
9991 	struct mlxsw_sp_rif *rif;
9992 
9993 	/* If netdev is already associated with a RIF, then we need to
9994 	 * destroy it and create a new one with the new virtual router ID.
9995 	 */
9996 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9997 	if (rif)
9998 		__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, false,
9999 					  extack);
10000 
10001 	return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, false,
10002 					 extack);
10003 }
10004 
mlxsw_sp_port_vrf_leave(struct mlxsw_sp * mlxsw_sp,struct net_device * l3_dev)10005 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
10006 				    struct net_device *l3_dev)
10007 {
10008 	struct mlxsw_sp_rif *rif;
10009 
10010 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
10011 	if (!rif)
10012 		return;
10013 	__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, false, NULL);
10014 }
10015 
mlxsw_sp_is_vrf_event(unsigned long event,void * ptr)10016 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
10017 {
10018 	struct netdev_notifier_changeupper_info *info = ptr;
10019 
10020 	if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
10021 		return false;
10022 	return netif_is_l3_master(info->upper_dev);
10023 }
10024 
10025 static int
mlxsw_sp_netdevice_vrf_event(struct net_device * l3_dev,unsigned long event,struct netdev_notifier_changeupper_info * info)10026 mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
10027 			     struct netdev_notifier_changeupper_info *info)
10028 {
10029 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
10030 	int err = 0;
10031 
10032 	/* We do not create a RIF for a macvlan, but only use it to
10033 	 * direct more MAC addresses to the router.
10034 	 */
10035 	if (!mlxsw_sp || netif_is_macvlan(l3_dev))
10036 		return 0;
10037 
10038 	switch (event) {
10039 	case NETDEV_PRECHANGEUPPER:
10040 		break;
10041 	case NETDEV_CHANGEUPPER:
10042 		if (info->linking) {
10043 			struct netlink_ext_ack *extack;
10044 
10045 			extack = netdev_notifier_info_to_extack(&info->info);
10046 			err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
10047 		} else {
10048 			mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
10049 		}
10050 		break;
10051 	}
10052 
10053 	return err;
10054 }
10055 
10056 struct mlxsw_sp_router_replay_inetaddr_up {
10057 	struct mlxsw_sp *mlxsw_sp;
10058 	struct netlink_ext_ack *extack;
10059 	unsigned int done;
10060 	bool deslavement;
10061 };
10062 
mlxsw_sp_router_replay_inetaddr_up(struct net_device * dev,struct netdev_nested_priv * priv)10063 static int mlxsw_sp_router_replay_inetaddr_up(struct net_device *dev,
10064 					      struct netdev_nested_priv *priv)
10065 {
10066 	struct mlxsw_sp_router_replay_inetaddr_up *ctx = priv->data;
10067 	bool nomaster = ctx->deslavement;
10068 	struct mlxsw_sp_crif *crif;
10069 	int err;
10070 
10071 	if (mlxsw_sp_dev_addr_list_empty(dev))
10072 		return 0;
10073 
10074 	crif = mlxsw_sp_crif_lookup(ctx->mlxsw_sp->router, dev);
10075 	if (!crif || crif->rif)
10076 		return 0;
10077 
10078 	if (!mlxsw_sp_rif_should_config(crif->rif, dev, NETDEV_UP))
10079 		return 0;
10080 
10081 	err = __mlxsw_sp_inetaddr_event(ctx->mlxsw_sp, dev, NETDEV_UP,
10082 					nomaster, ctx->extack);
10083 	if (err)
10084 		return err;
10085 
10086 	ctx->done++;
10087 	return 0;
10088 }
10089 
mlxsw_sp_router_unreplay_inetaddr_up(struct net_device * dev,struct netdev_nested_priv * priv)10090 static int mlxsw_sp_router_unreplay_inetaddr_up(struct net_device *dev,
10091 						struct netdev_nested_priv *priv)
10092 {
10093 	struct mlxsw_sp_router_replay_inetaddr_up *ctx = priv->data;
10094 	bool nomaster = ctx->deslavement;
10095 	struct mlxsw_sp_crif *crif;
10096 
10097 	if (!ctx->done)
10098 		return 0;
10099 
10100 	if (mlxsw_sp_dev_addr_list_empty(dev))
10101 		return 0;
10102 
10103 	crif = mlxsw_sp_crif_lookup(ctx->mlxsw_sp->router, dev);
10104 	if (!crif || !crif->rif)
10105 		return 0;
10106 
10107 	/* We are rolling back NETDEV_UP, so ask for that. */
10108 	if (!mlxsw_sp_rif_should_config(crif->rif, dev, NETDEV_UP))
10109 		return 0;
10110 
10111 	__mlxsw_sp_inetaddr_event(ctx->mlxsw_sp, dev, NETDEV_DOWN, nomaster,
10112 				  NULL);
10113 
10114 	ctx->done--;
10115 	return 0;
10116 }
10117 
mlxsw_sp_netdevice_enslavement_replay(struct mlxsw_sp * mlxsw_sp,struct net_device * upper_dev,struct netlink_ext_ack * extack)10118 int mlxsw_sp_netdevice_enslavement_replay(struct mlxsw_sp *mlxsw_sp,
10119 					  struct net_device *upper_dev,
10120 					  struct netlink_ext_ack *extack)
10121 {
10122 	struct mlxsw_sp_router_replay_inetaddr_up ctx = {
10123 		.mlxsw_sp = mlxsw_sp,
10124 		.extack = extack,
10125 		.deslavement = false,
10126 	};
10127 	struct netdev_nested_priv priv = {
10128 		.data = &ctx,
10129 	};
10130 	int err;
10131 
10132 	err = mlxsw_sp_router_replay_inetaddr_up(upper_dev, &priv);
10133 	if (err)
10134 		return err;
10135 
10136 	err = netdev_walk_all_upper_dev_rcu(upper_dev,
10137 					    mlxsw_sp_router_replay_inetaddr_up,
10138 					    &priv);
10139 	if (err)
10140 		goto err_replay_up;
10141 
10142 	return 0;
10143 
10144 err_replay_up:
10145 	netdev_walk_all_upper_dev_rcu(upper_dev,
10146 				      mlxsw_sp_router_unreplay_inetaddr_up,
10147 				      &priv);
10148 	mlxsw_sp_router_unreplay_inetaddr_up(upper_dev, &priv);
10149 	return err;
10150 }
10151 
mlxsw_sp_netdevice_deslavement_replay(struct mlxsw_sp * mlxsw_sp,struct net_device * dev)10152 void mlxsw_sp_netdevice_deslavement_replay(struct mlxsw_sp *mlxsw_sp,
10153 					   struct net_device *dev)
10154 {
10155 	struct mlxsw_sp_router_replay_inetaddr_up ctx = {
10156 		.mlxsw_sp = mlxsw_sp,
10157 		.deslavement = true,
10158 	};
10159 	struct netdev_nested_priv priv = {
10160 		.data = &ctx,
10161 	};
10162 
10163 	mlxsw_sp_router_replay_inetaddr_up(dev, &priv);
10164 }
10165 
10166 static int
mlxsw_sp_port_vid_router_join_existing(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,struct net_device * dev,struct netlink_ext_ack * extack)10167 mlxsw_sp_port_vid_router_join_existing(struct mlxsw_sp_port *mlxsw_sp_port,
10168 				       u16 vid, struct net_device *dev,
10169 				       struct netlink_ext_ack *extack)
10170 {
10171 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
10172 
10173 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port,
10174 							    vid);
10175 	if (WARN_ON(!mlxsw_sp_port_vlan))
10176 		return -EINVAL;
10177 
10178 	return mlxsw_sp_port_vlan_router_join_existing(mlxsw_sp_port_vlan,
10179 						       dev, extack);
10180 }
10181 
10182 static void
mlxsw_sp_port_vid_router_leave(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,struct net_device * dev)10183 mlxsw_sp_port_vid_router_leave(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
10184 			       struct net_device *dev)
10185 {
10186 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
10187 
10188 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port,
10189 							    vid);
10190 	if (WARN_ON(!mlxsw_sp_port_vlan))
10191 		return;
10192 
10193 	__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
10194 }
10195 
__mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev,struct netlink_ext_ack * extack)10196 static int __mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
10197 					   struct net_device *lag_dev,
10198 					   struct netlink_ext_ack *extack)
10199 {
10200 	u16 default_vid = MLXSW_SP_DEFAULT_VID;
10201 	struct net_device *upper_dev;
10202 	struct list_head *iter;
10203 	int done = 0;
10204 	u16 vid;
10205 	int err;
10206 
10207 	err = mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port, default_vid,
10208 						     lag_dev, extack);
10209 	if (err)
10210 		return err;
10211 
10212 	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
10213 		if (!is_vlan_dev(upper_dev))
10214 			continue;
10215 
10216 		vid = vlan_dev_vlan_id(upper_dev);
10217 		err = mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port, vid,
10218 							     upper_dev, extack);
10219 		if (err)
10220 			goto err_router_join_dev;
10221 
10222 		++done;
10223 	}
10224 
10225 	return 0;
10226 
10227 err_router_join_dev:
10228 	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
10229 		if (!is_vlan_dev(upper_dev))
10230 			continue;
10231 		if (!done--)
10232 			break;
10233 
10234 		vid = vlan_dev_vlan_id(upper_dev);
10235 		mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, vid, upper_dev);
10236 	}
10237 
10238 	mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, default_vid, lag_dev);
10239 	return err;
10240 }
10241 
10242 static void
__mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev)10243 __mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port *mlxsw_sp_port,
10244 				 struct net_device *lag_dev)
10245 {
10246 	u16 default_vid = MLXSW_SP_DEFAULT_VID;
10247 	struct net_device *upper_dev;
10248 	struct list_head *iter;
10249 	u16 vid;
10250 
10251 	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
10252 		if (!is_vlan_dev(upper_dev))
10253 			continue;
10254 
10255 		vid = vlan_dev_vlan_id(upper_dev);
10256 		mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, vid, upper_dev);
10257 	}
10258 
10259 	mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, default_vid, lag_dev);
10260 }
10261 
mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev,struct netlink_ext_ack * extack)10262 int mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
10263 				  struct net_device *lag_dev,
10264 				  struct netlink_ext_ack *extack)
10265 {
10266 	int err;
10267 
10268 	mutex_lock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10269 	err = __mlxsw_sp_router_port_join_lag(mlxsw_sp_port, lag_dev, extack);
10270 	mutex_unlock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10271 
10272 	return err;
10273 }
10274 
mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev)10275 void mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port *mlxsw_sp_port,
10276 				    struct net_device *lag_dev)
10277 {
10278 	mutex_lock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10279 	__mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev);
10280 	mutex_unlock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10281 }
10282 
mlxsw_sp_router_netdevice_event(struct notifier_block * nb,unsigned long event,void * ptr)10283 static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb,
10284 					   unsigned long event, void *ptr)
10285 {
10286 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
10287 	struct mlxsw_sp_router *router;
10288 	struct mlxsw_sp *mlxsw_sp;
10289 	int err = 0;
10290 
10291 	router = container_of(nb, struct mlxsw_sp_router, netdevice_nb);
10292 	mlxsw_sp = router->mlxsw_sp;
10293 
10294 	mutex_lock(&mlxsw_sp->router->lock);
10295 
10296 	if (event == NETDEV_REGISTER) {
10297 		err = mlxsw_sp_netdevice_register(router, dev);
10298 		if (err)
10299 			/* No need to roll this back, UNREGISTER will collect it
10300 			 * anyhow.
10301 			 */
10302 			goto out;
10303 	}
10304 
10305 	if (mlxsw_sp_is_offload_xstats_event(event))
10306 		err = mlxsw_sp_netdevice_offload_xstats_cmd(mlxsw_sp, dev,
10307 							    event, ptr);
10308 	else if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
10309 		err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
10310 						       event, ptr);
10311 	else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
10312 		err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
10313 						       event, ptr);
10314 	else if (mlxsw_sp_is_router_event(event))
10315 		err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
10316 	else if (mlxsw_sp_is_vrf_event(event, ptr))
10317 		err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
10318 
10319 	if (event == NETDEV_UNREGISTER)
10320 		mlxsw_sp_netdevice_unregister(router, dev);
10321 
10322 out:
10323 	mutex_unlock(&mlxsw_sp->router->lock);
10324 
10325 	return notifier_from_errno(err);
10326 }
10327 
10328 struct mlxsw_sp_macvlan_replay {
10329 	struct mlxsw_sp *mlxsw_sp;
10330 	struct netlink_ext_ack *extack;
10331 };
10332 
mlxsw_sp_macvlan_replay_upper(struct net_device * dev,struct netdev_nested_priv * priv)10333 static int mlxsw_sp_macvlan_replay_upper(struct net_device *dev,
10334 					 struct netdev_nested_priv *priv)
10335 {
10336 	const struct mlxsw_sp_macvlan_replay *rms = priv->data;
10337 	struct netlink_ext_ack *extack = rms->extack;
10338 	struct mlxsw_sp *mlxsw_sp = rms->mlxsw_sp;
10339 
10340 	if (!netif_is_macvlan(dev))
10341 		return 0;
10342 
10343 	return mlxsw_sp_rif_macvlan_add(mlxsw_sp, dev, extack);
10344 }
10345 
mlxsw_sp_macvlan_replay(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10346 static int mlxsw_sp_macvlan_replay(struct mlxsw_sp_rif *rif,
10347 				   struct netlink_ext_ack *extack)
10348 {
10349 	struct mlxsw_sp_macvlan_replay rms = {
10350 		.mlxsw_sp = rif->mlxsw_sp,
10351 		.extack = extack,
10352 	};
10353 	struct netdev_nested_priv priv = {
10354 		.data = &rms,
10355 	};
10356 
10357 	return netdev_walk_all_upper_dev_rcu(mlxsw_sp_rif_dev(rif),
10358 					     mlxsw_sp_macvlan_replay_upper,
10359 					     &priv);
10360 }
10361 
__mlxsw_sp_rif_macvlan_flush(struct net_device * dev,struct netdev_nested_priv * priv)10362 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
10363 					struct netdev_nested_priv *priv)
10364 {
10365 	struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
10366 
10367 	if (!netif_is_macvlan(dev))
10368 		return 0;
10369 
10370 	return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10371 				   mlxsw_sp_fid_index(rif->fid), false);
10372 }
10373 
mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif * rif)10374 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
10375 {
10376 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10377 	struct netdev_nested_priv priv = {
10378 		.data = (void *)rif,
10379 	};
10380 
10381 	if (!netif_is_macvlan_port(dev))
10382 		return 0;
10383 
10384 	return netdev_walk_all_upper_dev_rcu(dev,
10385 					     __mlxsw_sp_rif_macvlan_flush, &priv);
10386 }
10387 
mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params)10388 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
10389 				       const struct mlxsw_sp_rif_params *params)
10390 {
10391 	struct mlxsw_sp_rif_subport *rif_subport;
10392 
10393 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
10394 	refcount_set(&rif_subport->ref_count, 1);
10395 	rif_subport->vid = params->vid;
10396 	rif_subport->lag = params->lag;
10397 	if (params->lag)
10398 		rif_subport->lag_id = params->lag_id;
10399 	else
10400 		rif_subport->system_port = params->system_port;
10401 }
10402 
mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif * rif,bool enable)10403 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
10404 {
10405 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10406 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10407 	struct mlxsw_sp_rif_subport *rif_subport;
10408 	char ritr_pl[MLXSW_REG_RITR_LEN];
10409 	u16 efid;
10410 
10411 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
10412 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
10413 			    rif->rif_index, rif->vr_id, dev->mtu);
10414 	mlxsw_reg_ritr_mac_pack(ritr_pl, dev->dev_addr);
10415 	mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
10416 	efid = mlxsw_sp_fid_index(rif->fid);
10417 	mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
10418 				  rif_subport->lag ? rif_subport->lag_id :
10419 						     rif_subport->system_port,
10420 				  efid, 0);
10421 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10422 }
10423 
mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10424 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
10425 					  struct netlink_ext_ack *extack)
10426 {
10427 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10428 	u8 mac_profile;
10429 	int err;
10430 
10431 	err = mlxsw_sp_rif_mac_profile_get(rif->mlxsw_sp, rif->addr,
10432 					   &mac_profile, extack);
10433 	if (err)
10434 		return err;
10435 	rif->mac_profile_id = mac_profile;
10436 
10437 	err = mlxsw_sp_rif_subport_op(rif, true);
10438 	if (err)
10439 		goto err_rif_subport_op;
10440 
10441 	err = mlxsw_sp_macvlan_replay(rif, extack);
10442 	if (err)
10443 		goto err_macvlan_replay;
10444 
10445 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10446 				  mlxsw_sp_fid_index(rif->fid), true);
10447 	if (err)
10448 		goto err_rif_fdb_op;
10449 
10450 	err = mlxsw_sp_fid_rif_set(rif->fid, rif);
10451 	if (err)
10452 		goto err_fid_rif_set;
10453 
10454 	return 0;
10455 
10456 err_fid_rif_set:
10457 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10458 			    mlxsw_sp_fid_index(rif->fid), false);
10459 err_rif_fdb_op:
10460 	mlxsw_sp_rif_macvlan_flush(rif);
10461 err_macvlan_replay:
10462 	mlxsw_sp_rif_subport_op(rif, false);
10463 err_rif_subport_op:
10464 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, mac_profile);
10465 	return err;
10466 }
10467 
mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif * rif)10468 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
10469 {
10470 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10471 	struct mlxsw_sp_fid *fid = rif->fid;
10472 
10473 	mlxsw_sp_fid_rif_unset(fid);
10474 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10475 			    mlxsw_sp_fid_index(fid), false);
10476 	mlxsw_sp_rif_macvlan_flush(rif);
10477 	mlxsw_sp_rif_subport_op(rif, false);
10478 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
10479 }
10480 
10481 static struct mlxsw_sp_fid *
mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)10482 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
10483 			     const struct mlxsw_sp_rif_params *params,
10484 			     struct netlink_ext_ack *extack)
10485 {
10486 	return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
10487 }
10488 
10489 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
10490 	.type			= MLXSW_SP_RIF_TYPE_SUBPORT,
10491 	.rif_size		= sizeof(struct mlxsw_sp_rif_subport),
10492 	.setup			= mlxsw_sp_rif_subport_setup,
10493 	.configure		= mlxsw_sp_rif_subport_configure,
10494 	.deconfigure		= mlxsw_sp_rif_subport_deconfigure,
10495 	.fid_get		= mlxsw_sp_rif_subport_fid_get,
10496 };
10497 
mlxsw_sp_rif_fid_op(struct mlxsw_sp_rif * rif,u16 fid,bool enable)10498 static int mlxsw_sp_rif_fid_op(struct mlxsw_sp_rif *rif, u16 fid, bool enable)
10499 {
10500 	enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_FID_IF;
10501 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10502 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10503 	char ritr_pl[MLXSW_REG_RITR_LEN];
10504 
10505 	mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
10506 			    dev->mtu);
10507 	mlxsw_reg_ritr_mac_pack(ritr_pl, dev->dev_addr);
10508 	mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
10509 	mlxsw_reg_ritr_fid_if_fid_set(ritr_pl, fid);
10510 
10511 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10512 }
10513 
mlxsw_sp_router_port(const struct mlxsw_sp * mlxsw_sp)10514 u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
10515 {
10516 	return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
10517 }
10518 
mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10519 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
10520 				      struct netlink_ext_ack *extack)
10521 {
10522 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10523 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10524 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
10525 	u8 mac_profile;
10526 	int err;
10527 
10528 	err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
10529 					   &mac_profile, extack);
10530 	if (err)
10531 		return err;
10532 	rif->mac_profile_id = mac_profile;
10533 
10534 	err = mlxsw_sp_rif_fid_op(rif, fid_index, true);
10535 	if (err)
10536 		goto err_rif_fid_op;
10537 
10538 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10539 				     mlxsw_sp_router_port(mlxsw_sp), true);
10540 	if (err)
10541 		goto err_fid_mc_flood_set;
10542 
10543 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10544 				     mlxsw_sp_router_port(mlxsw_sp), true);
10545 	if (err)
10546 		goto err_fid_bc_flood_set;
10547 
10548 	err = mlxsw_sp_macvlan_replay(rif, extack);
10549 	if (err)
10550 		goto err_macvlan_replay;
10551 
10552 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10553 				  mlxsw_sp_fid_index(rif->fid), true);
10554 	if (err)
10555 		goto err_rif_fdb_op;
10556 
10557 	err = mlxsw_sp_fid_rif_set(rif->fid, rif);
10558 	if (err)
10559 		goto err_fid_rif_set;
10560 
10561 	return 0;
10562 
10563 err_fid_rif_set:
10564 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10565 			    mlxsw_sp_fid_index(rif->fid), false);
10566 err_rif_fdb_op:
10567 	mlxsw_sp_rif_macvlan_flush(rif);
10568 err_macvlan_replay:
10569 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10570 			       mlxsw_sp_router_port(mlxsw_sp), false);
10571 err_fid_bc_flood_set:
10572 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10573 			       mlxsw_sp_router_port(mlxsw_sp), false);
10574 err_fid_mc_flood_set:
10575 	mlxsw_sp_rif_fid_op(rif, fid_index, false);
10576 err_rif_fid_op:
10577 	mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
10578 	return err;
10579 }
10580 
mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif * rif)10581 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
10582 {
10583 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10584 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
10585 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10586 	struct mlxsw_sp_fid *fid = rif->fid;
10587 
10588 	mlxsw_sp_fid_rif_unset(fid);
10589 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10590 			    mlxsw_sp_fid_index(fid), false);
10591 	mlxsw_sp_rif_macvlan_flush(rif);
10592 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10593 			       mlxsw_sp_router_port(mlxsw_sp), false);
10594 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10595 			       mlxsw_sp_router_port(mlxsw_sp), false);
10596 	mlxsw_sp_rif_fid_op(rif, fid_index, false);
10597 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
10598 }
10599 
10600 static struct mlxsw_sp_fid *
mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)10601 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
10602 			 const struct mlxsw_sp_rif_params *params,
10603 			 struct netlink_ext_ack *extack)
10604 {
10605 	int rif_ifindex = mlxsw_sp_rif_dev_ifindex(rif);
10606 
10607 	return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif_ifindex);
10608 }
10609 
mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif * rif,const char * mac)10610 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
10611 {
10612 	struct switchdev_notifier_fdb_info info = {};
10613 	struct net_device *dev;
10614 
10615 	dev = br_fdb_find_port(mlxsw_sp_rif_dev(rif), mac, 0);
10616 	if (!dev)
10617 		return;
10618 
10619 	info.addr = mac;
10620 	info.vid = 0;
10621 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
10622 				 NULL);
10623 }
10624 
10625 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
10626 	.type			= MLXSW_SP_RIF_TYPE_FID,
10627 	.rif_size		= sizeof(struct mlxsw_sp_rif),
10628 	.configure		= mlxsw_sp_rif_fid_configure,
10629 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
10630 	.fid_get		= mlxsw_sp_rif_fid_fid_get,
10631 	.fdb_del		= mlxsw_sp_rif_fid_fdb_del,
10632 };
10633 
10634 static struct mlxsw_sp_fid *
mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)10635 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
10636 			  const struct mlxsw_sp_rif_params *params,
10637 			  struct netlink_ext_ack *extack)
10638 {
10639 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10640 	struct net_device *br_dev;
10641 
10642 	if (WARN_ON(!params->vid))
10643 		return ERR_PTR(-EINVAL);
10644 
10645 	if (is_vlan_dev(dev)) {
10646 		br_dev = vlan_dev_real_dev(dev);
10647 		if (WARN_ON(!netif_is_bridge_master(br_dev)))
10648 			return ERR_PTR(-EINVAL);
10649 	}
10650 
10651 	return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, params->vid);
10652 }
10653 
mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif * rif,const char * mac)10654 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
10655 {
10656 	struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
10657 	struct switchdev_notifier_fdb_info info = {};
10658 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
10659 	struct net_device *br_dev;
10660 	struct net_device *dev;
10661 
10662 	br_dev = is_vlan_dev(rif_dev) ? vlan_dev_real_dev(rif_dev) : rif_dev;
10663 	dev = br_fdb_find_port(br_dev, mac, vid);
10664 	if (!dev)
10665 		return;
10666 
10667 	info.addr = mac;
10668 	info.vid = vid;
10669 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
10670 				 NULL);
10671 }
10672 
mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif * rif,u16 vid,u16 efid,bool enable)10673 static int mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif *rif, u16 vid, u16 efid,
10674 				bool enable)
10675 {
10676 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10677 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10678 	char ritr_pl[MLXSW_REG_RITR_LEN];
10679 
10680 	mlxsw_reg_ritr_vlan_if_pack(ritr_pl, enable, rif->rif_index, rif->vr_id,
10681 				    dev->mtu, dev->dev_addr,
10682 				    rif->mac_profile_id, vid, efid);
10683 
10684 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10685 }
10686 
mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif * rif,u16 efid,struct netlink_ext_ack * extack)10687 static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
10688 				       struct netlink_ext_ack *extack)
10689 {
10690 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10691 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
10692 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10693 	u8 mac_profile;
10694 	int err;
10695 
10696 	err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
10697 					   &mac_profile, extack);
10698 	if (err)
10699 		return err;
10700 	rif->mac_profile_id = mac_profile;
10701 
10702 	err = mlxsw_sp_rif_vlan_op(rif, vid, efid, true);
10703 	if (err)
10704 		goto err_rif_vlan_fid_op;
10705 
10706 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10707 				     mlxsw_sp_router_port(mlxsw_sp), true);
10708 	if (err)
10709 		goto err_fid_mc_flood_set;
10710 
10711 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10712 				     mlxsw_sp_router_port(mlxsw_sp), true);
10713 	if (err)
10714 		goto err_fid_bc_flood_set;
10715 
10716 	err = mlxsw_sp_macvlan_replay(rif, extack);
10717 	if (err)
10718 		goto err_macvlan_replay;
10719 
10720 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10721 				  mlxsw_sp_fid_index(rif->fid), true);
10722 	if (err)
10723 		goto err_rif_fdb_op;
10724 
10725 	err = mlxsw_sp_fid_rif_set(rif->fid, rif);
10726 	if (err)
10727 		goto err_fid_rif_set;
10728 
10729 	return 0;
10730 
10731 err_fid_rif_set:
10732 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10733 			    mlxsw_sp_fid_index(rif->fid), false);
10734 err_rif_fdb_op:
10735 	mlxsw_sp_rif_macvlan_flush(rif);
10736 err_macvlan_replay:
10737 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10738 			       mlxsw_sp_router_port(mlxsw_sp), false);
10739 err_fid_bc_flood_set:
10740 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10741 			       mlxsw_sp_router_port(mlxsw_sp), false);
10742 err_fid_mc_flood_set:
10743 	mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
10744 err_rif_vlan_fid_op:
10745 	mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
10746 	return err;
10747 }
10748 
mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif * rif)10749 static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
10750 {
10751 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10752 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
10753 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10754 
10755 	mlxsw_sp_fid_rif_unset(rif->fid);
10756 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10757 			    mlxsw_sp_fid_index(rif->fid), false);
10758 	mlxsw_sp_rif_macvlan_flush(rif);
10759 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10760 			       mlxsw_sp_router_port(mlxsw_sp), false);
10761 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10762 			       mlxsw_sp_router_port(mlxsw_sp), false);
10763 	mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
10764 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
10765 }
10766 
mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10767 static int mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif *rif,
10768 					struct netlink_ext_ack *extack)
10769 {
10770 	return mlxsw_sp_rif_vlan_configure(rif, 0, extack);
10771 }
10772 
10773 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_vlan_ops = {
10774 	.type			= MLXSW_SP_RIF_TYPE_VLAN,
10775 	.rif_size		= sizeof(struct mlxsw_sp_rif),
10776 	.configure		= mlxsw_sp1_rif_vlan_configure,
10777 	.deconfigure		= mlxsw_sp_rif_vlan_deconfigure,
10778 	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
10779 	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
10780 };
10781 
mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10782 static int mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif *rif,
10783 					struct netlink_ext_ack *extack)
10784 {
10785 	u16 efid = mlxsw_sp_fid_index(rif->fid);
10786 
10787 	return mlxsw_sp_rif_vlan_configure(rif, efid, extack);
10788 }
10789 
10790 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_vlan_ops = {
10791 	.type			= MLXSW_SP_RIF_TYPE_VLAN,
10792 	.rif_size		= sizeof(struct mlxsw_sp_rif),
10793 	.configure		= mlxsw_sp2_rif_vlan_configure,
10794 	.deconfigure		= mlxsw_sp_rif_vlan_deconfigure,
10795 	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
10796 	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
10797 };
10798 
10799 static struct mlxsw_sp_rif_ipip_lb *
mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif * rif)10800 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
10801 {
10802 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
10803 }
10804 
10805 static void
mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params)10806 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
10807 			   const struct mlxsw_sp_rif_params *params)
10808 {
10809 	struct mlxsw_sp_rif_params_ipip_lb *params_lb;
10810 	struct mlxsw_sp_rif_ipip_lb *rif_lb;
10811 
10812 	params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
10813 				 common);
10814 	rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
10815 	rif_lb->lb_config = params_lb->lb_config;
10816 }
10817 
10818 static int
mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10819 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
10820 				struct netlink_ext_ack *extack)
10821 {
10822 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10823 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10824 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
10825 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10826 	struct mlxsw_sp_vr *ul_vr;
10827 	int err;
10828 
10829 	ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, extack);
10830 	if (IS_ERR(ul_vr))
10831 		return PTR_ERR(ul_vr);
10832 
10833 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
10834 	if (err)
10835 		goto err_loopback_op;
10836 
10837 	lb_rif->ul_vr_id = ul_vr->id;
10838 	lb_rif->ul_rif_id = 0;
10839 	++ul_vr->rif_count;
10840 	return 0;
10841 
10842 err_loopback_op:
10843 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
10844 	return err;
10845 }
10846 
mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif * rif)10847 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
10848 {
10849 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10850 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10851 	struct mlxsw_sp_vr *ul_vr;
10852 
10853 	ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
10854 	mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
10855 
10856 	--ul_vr->rif_count;
10857 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
10858 }
10859 
10860 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
10861 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
10862 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
10863 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
10864 	.configure		= mlxsw_sp1_rif_ipip_lb_configure,
10865 	.deconfigure		= mlxsw_sp1_rif_ipip_lb_deconfigure,
10866 };
10867 
10868 static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
10869 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
10870 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp1_rif_vlan_ops,
10871 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
10872 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp1_rif_ipip_lb_ops,
10873 };
10874 
10875 static int
mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif * ul_rif,bool enable)10876 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
10877 {
10878 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10879 	char ritr_pl[MLXSW_REG_RITR_LEN];
10880 
10881 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
10882 			    ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
10883 	mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
10884 					     MLXSW_REG_RITR_LOOPBACK_GENERIC);
10885 
10886 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10887 }
10888 
10889 static struct mlxsw_sp_rif *
mlxsw_sp_ul_rif_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr,struct mlxsw_sp_crif * ul_crif,struct netlink_ext_ack * extack)10890 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
10891 		       struct mlxsw_sp_crif *ul_crif,
10892 		       struct netlink_ext_ack *extack)
10893 {
10894 	struct mlxsw_sp_rif *ul_rif;
10895 	u8 rif_entries = 1;
10896 	u16 rif_index;
10897 	int err;
10898 
10899 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index, rif_entries);
10900 	if (err) {
10901 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
10902 		return ERR_PTR(err);
10903 	}
10904 
10905 	ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id,
10906 				    ul_crif);
10907 	if (!ul_rif) {
10908 		err = -ENOMEM;
10909 		goto err_rif_alloc;
10910 	}
10911 
10912 	mlxsw_sp->router->rifs[rif_index] = ul_rif;
10913 	ul_rif->mlxsw_sp = mlxsw_sp;
10914 	ul_rif->rif_entries = rif_entries;
10915 	err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
10916 	if (err)
10917 		goto ul_rif_op_err;
10918 
10919 	atomic_add(rif_entries, &mlxsw_sp->router->rifs_count);
10920 	return ul_rif;
10921 
10922 ul_rif_op_err:
10923 	mlxsw_sp->router->rifs[rif_index] = NULL;
10924 	mlxsw_sp_rif_free(ul_rif);
10925 err_rif_alloc:
10926 	mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
10927 	return ERR_PTR(err);
10928 }
10929 
mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif * ul_rif)10930 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
10931 {
10932 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10933 	u8 rif_entries = ul_rif->rif_entries;
10934 	u16 rif_index = ul_rif->rif_index;
10935 
10936 	atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
10937 	mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
10938 	mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
10939 	mlxsw_sp_rif_free(ul_rif);
10940 	mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
10941 }
10942 
10943 static struct mlxsw_sp_rif *
mlxsw_sp_ul_rif_get(struct mlxsw_sp * mlxsw_sp,u32 tb_id,struct mlxsw_sp_crif * ul_crif,struct netlink_ext_ack * extack)10944 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
10945 		    struct mlxsw_sp_crif *ul_crif,
10946 		    struct netlink_ext_ack *extack)
10947 {
10948 	struct mlxsw_sp_vr *vr;
10949 	int err;
10950 
10951 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
10952 	if (IS_ERR(vr))
10953 		return ERR_CAST(vr);
10954 
10955 	if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
10956 		return vr->ul_rif;
10957 
10958 	vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, ul_crif, extack);
10959 	if (IS_ERR(vr->ul_rif)) {
10960 		err = PTR_ERR(vr->ul_rif);
10961 		goto err_ul_rif_create;
10962 	}
10963 
10964 	vr->rif_count++;
10965 	refcount_set(&vr->ul_rif_refcnt, 1);
10966 
10967 	return vr->ul_rif;
10968 
10969 err_ul_rif_create:
10970 	mlxsw_sp_vr_put(mlxsw_sp, vr);
10971 	return ERR_PTR(err);
10972 }
10973 
mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif * ul_rif)10974 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
10975 {
10976 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10977 	struct mlxsw_sp_vr *vr;
10978 
10979 	vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
10980 
10981 	if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
10982 		return;
10983 
10984 	vr->rif_count--;
10985 	mlxsw_sp_ul_rif_destroy(ul_rif);
10986 	mlxsw_sp_vr_put(mlxsw_sp, vr);
10987 }
10988 
mlxsw_sp_router_ul_rif_get(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,u16 * ul_rif_index)10989 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
10990 			       u16 *ul_rif_index)
10991 {
10992 	struct mlxsw_sp_rif *ul_rif;
10993 	int err = 0;
10994 
10995 	mutex_lock(&mlxsw_sp->router->lock);
10996 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL, NULL);
10997 	if (IS_ERR(ul_rif)) {
10998 		err = PTR_ERR(ul_rif);
10999 		goto out;
11000 	}
11001 	*ul_rif_index = ul_rif->rif_index;
11002 out:
11003 	mutex_unlock(&mlxsw_sp->router->lock);
11004 	return err;
11005 }
11006 
mlxsw_sp_router_ul_rif_put(struct mlxsw_sp * mlxsw_sp,u16 ul_rif_index)11007 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
11008 {
11009 	struct mlxsw_sp_rif *ul_rif;
11010 
11011 	mutex_lock(&mlxsw_sp->router->lock);
11012 	ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
11013 	if (WARN_ON(!ul_rif))
11014 		goto out;
11015 
11016 	mlxsw_sp_ul_rif_put(ul_rif);
11017 out:
11018 	mutex_unlock(&mlxsw_sp->router->lock);
11019 }
11020 
11021 static int
mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)11022 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
11023 				struct netlink_ext_ack *extack)
11024 {
11025 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
11026 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
11027 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
11028 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
11029 	struct mlxsw_sp_rif *ul_rif;
11030 	int err;
11031 
11032 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL, extack);
11033 	if (IS_ERR(ul_rif))
11034 		return PTR_ERR(ul_rif);
11035 
11036 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
11037 	if (err)
11038 		goto err_loopback_op;
11039 
11040 	lb_rif->ul_vr_id = 0;
11041 	lb_rif->ul_rif_id = ul_rif->rif_index;
11042 
11043 	return 0;
11044 
11045 err_loopback_op:
11046 	mlxsw_sp_ul_rif_put(ul_rif);
11047 	return err;
11048 }
11049 
mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif * rif)11050 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
11051 {
11052 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
11053 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
11054 	struct mlxsw_sp_rif *ul_rif;
11055 
11056 	ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
11057 	mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
11058 	mlxsw_sp_ul_rif_put(ul_rif);
11059 }
11060 
11061 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
11062 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
11063 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
11064 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
11065 	.configure		= mlxsw_sp2_rif_ipip_lb_configure,
11066 	.deconfigure		= mlxsw_sp2_rif_ipip_lb_deconfigure,
11067 };
11068 
11069 static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
11070 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
11071 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp2_rif_vlan_ops,
11072 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
11073 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp2_rif_ipip_lb_ops,
11074 };
11075 
mlxsw_sp_rifs_table_init(struct mlxsw_sp * mlxsw_sp)11076 static int mlxsw_sp_rifs_table_init(struct mlxsw_sp *mlxsw_sp)
11077 {
11078 	struct gen_pool *rifs_table;
11079 	int err;
11080 
11081 	rifs_table = gen_pool_create(0, -1);
11082 	if (!rifs_table)
11083 		return -ENOMEM;
11084 
11085 	gen_pool_set_algo(rifs_table, gen_pool_first_fit_order_align,
11086 			  NULL);
11087 
11088 	err = gen_pool_add(rifs_table, MLXSW_SP_ROUTER_GENALLOC_OFFSET,
11089 			   MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS), -1);
11090 	if (err)
11091 		goto err_gen_pool_add;
11092 
11093 	mlxsw_sp->router->rifs_table = rifs_table;
11094 
11095 	return 0;
11096 
11097 err_gen_pool_add:
11098 	gen_pool_destroy(rifs_table);
11099 	return err;
11100 }
11101 
mlxsw_sp_rifs_table_fini(struct mlxsw_sp * mlxsw_sp)11102 static void mlxsw_sp_rifs_table_fini(struct mlxsw_sp *mlxsw_sp)
11103 {
11104 	gen_pool_destroy(mlxsw_sp->router->rifs_table);
11105 }
11106 
mlxsw_sp_rifs_init(struct mlxsw_sp * mlxsw_sp)11107 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
11108 {
11109 	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
11110 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
11111 	struct mlxsw_core *core = mlxsw_sp->core;
11112 	int err;
11113 
11114 	if (!MLXSW_CORE_RES_VALID(core, MAX_RIF_MAC_PROFILES))
11115 		return -EIO;
11116 	mlxsw_sp->router->max_rif_mac_profile =
11117 		MLXSW_CORE_RES_GET(core, MAX_RIF_MAC_PROFILES);
11118 
11119 	mlxsw_sp->router->rifs = kcalloc(max_rifs,
11120 					 sizeof(struct mlxsw_sp_rif *),
11121 					 GFP_KERNEL);
11122 	if (!mlxsw_sp->router->rifs)
11123 		return -ENOMEM;
11124 
11125 	err = mlxsw_sp_rifs_table_init(mlxsw_sp);
11126 	if (err)
11127 		goto err_rifs_table_init;
11128 
11129 	idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
11130 	atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
11131 	atomic_set(&mlxsw_sp->router->rifs_count, 0);
11132 	devl_resource_occ_get_register(devlink,
11133 				       MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
11134 				       mlxsw_sp_rif_mac_profiles_occ_get,
11135 				       mlxsw_sp);
11136 	devl_resource_occ_get_register(devlink,
11137 				       MLXSW_SP_RESOURCE_RIFS,
11138 				       mlxsw_sp_rifs_occ_get,
11139 				       mlxsw_sp);
11140 
11141 	return 0;
11142 
11143 err_rifs_table_init:
11144 	kfree(mlxsw_sp->router->rifs);
11145 	return err;
11146 }
11147 
mlxsw_sp_rifs_fini(struct mlxsw_sp * mlxsw_sp)11148 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
11149 {
11150 	int max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
11151 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
11152 	int i;
11153 
11154 	WARN_ON_ONCE(atomic_read(&mlxsw_sp->router->rifs_count));
11155 	for (i = 0; i < max_rifs; i++)
11156 		WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
11157 
11158 	devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_RIFS);
11159 	devl_resource_occ_get_unregister(devlink,
11160 					 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
11161 	WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
11162 	idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
11163 	mlxsw_sp_rifs_table_fini(mlxsw_sp);
11164 	kfree(mlxsw_sp->router->rifs);
11165 }
11166 
11167 static int
mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp * mlxsw_sp)11168 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
11169 {
11170 	char tigcr_pl[MLXSW_REG_TIGCR_LEN];
11171 
11172 	mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
11173 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
11174 }
11175 
mlxsw_sp_ipips_init(struct mlxsw_sp * mlxsw_sp)11176 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
11177 {
11178 	int err;
11179 
11180 	INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
11181 
11182 	err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
11183 	if (err)
11184 		return err;
11185 	err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
11186 	if (err)
11187 		return err;
11188 
11189 	return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
11190 }
11191 
mlxsw_sp1_ipips_init(struct mlxsw_sp * mlxsw_sp)11192 static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp)
11193 {
11194 	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr;
11195 	return mlxsw_sp_ipips_init(mlxsw_sp);
11196 }
11197 
mlxsw_sp2_ipips_init(struct mlxsw_sp * mlxsw_sp)11198 static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp)
11199 {
11200 	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr;
11201 	return mlxsw_sp_ipips_init(mlxsw_sp);
11202 }
11203 
mlxsw_sp_ipips_fini(struct mlxsw_sp * mlxsw_sp)11204 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
11205 {
11206 	WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
11207 }
11208 
mlxsw_sp_router_fib_dump_flush(struct notifier_block * nb)11209 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
11210 {
11211 	struct mlxsw_sp_router *router;
11212 
11213 	/* Flush pending FIB notifications and then flush the device's
11214 	 * table before requesting another dump. The FIB notification
11215 	 * block is unregistered, so no need to take RTNL.
11216 	 */
11217 	mlxsw_core_flush_owq();
11218 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
11219 	mlxsw_sp_router_fib_flush(router->mlxsw_sp);
11220 }
11221 
11222 #ifdef CONFIG_IP_ROUTE_MULTIPATH
11223 struct mlxsw_sp_mp_hash_config {
11224 	DECLARE_BITMAP(headers, __MLXSW_REG_RECR2_HEADER_CNT);
11225 	DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT);
11226 	DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT);
11227 	DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT);
11228 	bool inc_parsing_depth;
11229 };
11230 
11231 #define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \
11232 	bitmap_set(_headers, MLXSW_REG_RECR2_##_header, 1)
11233 
11234 #define MLXSW_SP_MP_HASH_FIELD_SET(_fields, _field) \
11235 	bitmap_set(_fields, MLXSW_REG_RECR2_##_field, 1)
11236 
11237 #define MLXSW_SP_MP_HASH_FIELD_RANGE_SET(_fields, _field, _nr) \
11238 	bitmap_set(_fields, MLXSW_REG_RECR2_##_field, _nr)
11239 
mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config * config)11240 static void mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config *config)
11241 {
11242 	unsigned long *inner_headers = config->inner_headers;
11243 	unsigned long *inner_fields = config->inner_fields;
11244 
11245 	/* IPv4 inner */
11246 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
11247 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
11248 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
11249 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
11250 	/* IPv6 inner */
11251 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
11252 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
11253 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
11254 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
11255 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
11256 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
11257 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
11258 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
11259 }
11260 
mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config * config)11261 static void mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
11262 {
11263 	unsigned long *headers = config->headers;
11264 	unsigned long *fields = config->fields;
11265 
11266 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
11267 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
11268 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
11269 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
11270 }
11271 
11272 static void
mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config * config,u32 hash_fields)11273 mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config *config,
11274 			      u32 hash_fields)
11275 {
11276 	unsigned long *inner_headers = config->inner_headers;
11277 	unsigned long *inner_fields = config->inner_fields;
11278 
11279 	/* IPv4 Inner */
11280 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
11281 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
11282 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
11283 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
11284 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
11285 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
11286 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
11287 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV4_PROTOCOL);
11288 	/* IPv6 inner */
11289 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
11290 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
11291 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) {
11292 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
11293 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
11294 	}
11295 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) {
11296 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
11297 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
11298 	}
11299 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
11300 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
11301 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
11302 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
11303 	/* L4 inner */
11304 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV4);
11305 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV6);
11306 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
11307 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_SPORT);
11308 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
11309 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_DPORT);
11310 }
11311 
mlxsw_sp_mp4_hash_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mp_hash_config * config)11312 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
11313 				   struct mlxsw_sp_mp_hash_config *config)
11314 {
11315 	struct net *net = mlxsw_sp_net(mlxsw_sp);
11316 	unsigned long *headers = config->headers;
11317 	unsigned long *fields = config->fields;
11318 	u32 hash_fields;
11319 
11320 	switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
11321 	case 0:
11322 		mlxsw_sp_mp4_hash_outer_addr(config);
11323 		break;
11324 	case 1:
11325 		mlxsw_sp_mp4_hash_outer_addr(config);
11326 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
11327 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
11328 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11329 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11330 		break;
11331 	case 2:
11332 		/* Outer */
11333 		mlxsw_sp_mp4_hash_outer_addr(config);
11334 		/* Inner */
11335 		mlxsw_sp_mp_hash_inner_l3(config);
11336 		break;
11337 	case 3:
11338 		hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
11339 		/* Outer */
11340 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
11341 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
11342 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
11343 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
11344 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
11345 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
11346 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
11347 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
11348 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
11349 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
11350 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11351 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
11352 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11353 		/* Inner */
11354 		mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
11355 		break;
11356 	}
11357 }
11358 
mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config * config)11359 static void mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
11360 {
11361 	unsigned long *headers = config->headers;
11362 	unsigned long *fields = config->fields;
11363 
11364 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
11365 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
11366 	MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
11367 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
11368 	MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
11369 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
11370 }
11371 
mlxsw_sp_mp6_hash_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mp_hash_config * config)11372 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp,
11373 				   struct mlxsw_sp_mp_hash_config *config)
11374 {
11375 	u32 hash_fields = ip6_multipath_hash_fields(mlxsw_sp_net(mlxsw_sp));
11376 	unsigned long *headers = config->headers;
11377 	unsigned long *fields = config->fields;
11378 
11379 	switch (ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp))) {
11380 	case 0:
11381 		mlxsw_sp_mp6_hash_outer_addr(config);
11382 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11383 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
11384 		break;
11385 	case 1:
11386 		mlxsw_sp_mp6_hash_outer_addr(config);
11387 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
11388 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11389 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11390 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11391 		break;
11392 	case 2:
11393 		/* Outer */
11394 		mlxsw_sp_mp6_hash_outer_addr(config);
11395 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11396 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
11397 		/* Inner */
11398 		mlxsw_sp_mp_hash_inner_l3(config);
11399 		config->inc_parsing_depth = true;
11400 		break;
11401 	case 3:
11402 		/* Outer */
11403 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
11404 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
11405 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
11406 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) {
11407 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
11408 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
11409 		}
11410 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) {
11411 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
11412 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
11413 		}
11414 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
11415 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11416 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
11417 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
11418 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
11419 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11420 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
11421 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11422 		/* Inner */
11423 		mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
11424 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
11425 			config->inc_parsing_depth = true;
11426 		break;
11427 	}
11428 }
11429 
mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp * mlxsw_sp,bool old_inc_parsing_depth,bool new_inc_parsing_depth)11430 static int mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp *mlxsw_sp,
11431 						 bool old_inc_parsing_depth,
11432 						 bool new_inc_parsing_depth)
11433 {
11434 	int err;
11435 
11436 	if (!old_inc_parsing_depth && new_inc_parsing_depth) {
11437 		err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
11438 		if (err)
11439 			return err;
11440 		mlxsw_sp->router->inc_parsing_depth = true;
11441 	} else if (old_inc_parsing_depth && !new_inc_parsing_depth) {
11442 		mlxsw_sp_parsing_depth_dec(mlxsw_sp);
11443 		mlxsw_sp->router->inc_parsing_depth = false;
11444 	}
11445 
11446 	return 0;
11447 }
11448 
mlxsw_sp_mp_hash_init(struct mlxsw_sp * mlxsw_sp)11449 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
11450 {
11451 	bool old_inc_parsing_depth, new_inc_parsing_depth;
11452 	struct mlxsw_sp_mp_hash_config config = {};
11453 	char recr2_pl[MLXSW_REG_RECR2_LEN];
11454 	unsigned long bit;
11455 	u32 seed;
11456 	int err;
11457 
11458 	seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
11459 	mlxsw_reg_recr2_pack(recr2_pl, seed);
11460 	mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
11461 	mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);
11462 
11463 	old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
11464 	new_inc_parsing_depth = config.inc_parsing_depth;
11465 	err = mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp,
11466 						    old_inc_parsing_depth,
11467 						    new_inc_parsing_depth);
11468 	if (err)
11469 		return err;
11470 
11471 	for_each_set_bit(bit, config.headers, __MLXSW_REG_RECR2_HEADER_CNT)
11472 		mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1);
11473 	for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT)
11474 		mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, bit, 1);
11475 	for_each_set_bit(bit, config.inner_headers, __MLXSW_REG_RECR2_HEADER_CNT)
11476 		mlxsw_reg_recr2_inner_header_enables_set(recr2_pl, bit, 1);
11477 	for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT)
11478 		mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1);
11479 
11480 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
11481 	if (err)
11482 		goto err_reg_write;
11483 
11484 	return 0;
11485 
11486 err_reg_write:
11487 	mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, new_inc_parsing_depth,
11488 					      old_inc_parsing_depth);
11489 	return err;
11490 }
11491 
mlxsw_sp_mp_hash_fini(struct mlxsw_sp * mlxsw_sp)11492 static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
11493 {
11494 	bool old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
11495 
11496 	mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, old_inc_parsing_depth,
11497 					      false);
11498 }
11499 #else
mlxsw_sp_mp_hash_init(struct mlxsw_sp * mlxsw_sp)11500 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
11501 {
11502 	return 0;
11503 }
11504 
mlxsw_sp_mp_hash_fini(struct mlxsw_sp * mlxsw_sp)11505 static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
11506 {
11507 }
11508 #endif
11509 
mlxsw_sp_dscp_init(struct mlxsw_sp * mlxsw_sp)11510 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
11511 {
11512 	char rdpm_pl[MLXSW_REG_RDPM_LEN];
11513 	unsigned int i;
11514 
11515 	MLXSW_REG_ZERO(rdpm, rdpm_pl);
11516 
11517 	/* HW is determining switch priority based on DSCP-bits, but the
11518 	 * kernel is still doing that based on the ToS. Since there's a
11519 	 * mismatch in bits we need to make sure to translate the right
11520 	 * value ToS would observe, skipping the 2 least-significant ECN bits.
11521 	 */
11522 	for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
11523 		mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
11524 
11525 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
11526 }
11527 
__mlxsw_sp_router_init(struct mlxsw_sp * mlxsw_sp)11528 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
11529 {
11530 	struct net *net = mlxsw_sp_net(mlxsw_sp);
11531 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
11532 	u64 max_rifs;
11533 	bool usp;
11534 
11535 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
11536 		return -EIO;
11537 	max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
11538 	usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
11539 
11540 	mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
11541 	mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
11542 	mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
11543 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
11544 }
11545 
__mlxsw_sp_router_fini(struct mlxsw_sp * mlxsw_sp)11546 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
11547 {
11548 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
11549 
11550 	mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
11551 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
11552 }
11553 
mlxsw_sp_lb_rif_init(struct mlxsw_sp * mlxsw_sp,struct netlink_ext_ack * extack)11554 static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp,
11555 				struct netlink_ext_ack *extack)
11556 {
11557 	struct mlxsw_sp_router *router = mlxsw_sp->router;
11558 	struct mlxsw_sp_rif *lb_rif;
11559 	int err;
11560 
11561 	router->lb_crif = mlxsw_sp_crif_alloc(NULL);
11562 	if (!router->lb_crif)
11563 		return -ENOMEM;
11564 
11565 	/* Create a generic loopback RIF associated with the main table
11566 	 * (default VRF). Any table can be used, but the main table exists
11567 	 * anyway, so we do not waste resources. Loopback RIFs are usually
11568 	 * created with a NULL CRIF, but this RIF is used as a fallback RIF
11569 	 * for blackhole nexthops, and nexthops expect to have a valid CRIF.
11570 	 */
11571 	lb_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN, router->lb_crif,
11572 				     extack);
11573 	if (IS_ERR(lb_rif)) {
11574 		err = PTR_ERR(lb_rif);
11575 		goto err_ul_rif_get;
11576 	}
11577 
11578 	return 0;
11579 
11580 err_ul_rif_get:
11581 	mlxsw_sp_crif_free(router->lb_crif);
11582 	return err;
11583 }
11584 
mlxsw_sp_lb_rif_fini(struct mlxsw_sp * mlxsw_sp)11585 static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
11586 {
11587 	mlxsw_sp_ul_rif_put(mlxsw_sp->router->lb_crif->rif);
11588 	mlxsw_sp_crif_free(mlxsw_sp->router->lb_crif);
11589 }
11590 
mlxsw_sp1_router_init(struct mlxsw_sp * mlxsw_sp)11591 static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
11592 {
11593 	size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges);
11594 
11595 	mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
11596 	mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges;
11597 	mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
11598 
11599 	return 0;
11600 }
11601 
11602 const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
11603 	.init = mlxsw_sp1_router_init,
11604 	.ipips_init = mlxsw_sp1_ipips_init,
11605 };
11606 
mlxsw_sp2_router_init(struct mlxsw_sp * mlxsw_sp)11607 static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
11608 {
11609 	size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges);
11610 
11611 	mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
11612 	mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges;
11613 	mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
11614 
11615 	return 0;
11616 }
11617 
11618 const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
11619 	.init = mlxsw_sp2_router_init,
11620 	.ipips_init = mlxsw_sp2_ipips_init,
11621 };
11622 
mlxsw_sp_router_init(struct mlxsw_sp * mlxsw_sp,struct netlink_ext_ack * extack)11623 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
11624 			 struct netlink_ext_ack *extack)
11625 {
11626 	struct mlxsw_sp_router *router;
11627 	struct notifier_block *nb;
11628 	int err;
11629 
11630 	router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
11631 	if (!router)
11632 		return -ENOMEM;
11633 	mutex_init(&router->lock);
11634 	mlxsw_sp->router = router;
11635 	router->mlxsw_sp = mlxsw_sp;
11636 
11637 	err = mlxsw_sp->router_ops->init(mlxsw_sp);
11638 	if (err)
11639 		goto err_router_ops_init;
11640 
11641 	INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
11642 	INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
11643 			  mlxsw_sp_nh_grp_activity_work);
11644 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
11645 	err = __mlxsw_sp_router_init(mlxsw_sp);
11646 	if (err)
11647 		goto err_router_init;
11648 
11649 	err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp);
11650 	if (err)
11651 		goto err_ipips_init;
11652 
11653 	err = rhashtable_init(&mlxsw_sp->router->crif_ht,
11654 			      &mlxsw_sp_crif_ht_params);
11655 	if (err)
11656 		goto err_crif_ht_init;
11657 
11658 	err = mlxsw_sp_rifs_init(mlxsw_sp);
11659 	if (err)
11660 		goto err_rifs_init;
11661 
11662 	err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
11663 			      &mlxsw_sp_nexthop_ht_params);
11664 	if (err)
11665 		goto err_nexthop_ht_init;
11666 
11667 	err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
11668 			      &mlxsw_sp_nexthop_group_ht_params);
11669 	if (err)
11670 		goto err_nexthop_group_ht_init;
11671 
11672 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
11673 	err = mlxsw_sp_lpm_init(mlxsw_sp);
11674 	if (err)
11675 		goto err_lpm_init;
11676 
11677 	err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
11678 	if (err)
11679 		goto err_mr_init;
11680 
11681 	err = mlxsw_sp_vrs_init(mlxsw_sp);
11682 	if (err)
11683 		goto err_vrs_init;
11684 
11685 	err = mlxsw_sp_lb_rif_init(mlxsw_sp, extack);
11686 	if (err)
11687 		goto err_lb_rif_init;
11688 
11689 	err = mlxsw_sp_neigh_init(mlxsw_sp);
11690 	if (err)
11691 		goto err_neigh_init;
11692 
11693 	err = mlxsw_sp_mp_hash_init(mlxsw_sp);
11694 	if (err)
11695 		goto err_mp_hash_init;
11696 
11697 	err = mlxsw_sp_dscp_init(mlxsw_sp);
11698 	if (err)
11699 		goto err_dscp_init;
11700 
11701 	router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
11702 	err = register_inetaddr_notifier(&router->inetaddr_nb);
11703 	if (err)
11704 		goto err_register_inetaddr_notifier;
11705 
11706 	router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
11707 	err = register_inet6addr_notifier(&router->inet6addr_nb);
11708 	if (err)
11709 		goto err_register_inet6addr_notifier;
11710 
11711 	router->inetaddr_valid_nb.notifier_call = mlxsw_sp_inetaddr_valid_event;
11712 	err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
11713 	if (err)
11714 		goto err_register_inetaddr_valid_notifier;
11715 
11716 	nb = &router->inet6addr_valid_nb;
11717 	nb->notifier_call = mlxsw_sp_inet6addr_valid_event;
11718 	err = register_inet6addr_validator_notifier(nb);
11719 	if (err)
11720 		goto err_register_inet6addr_valid_notifier;
11721 
11722 	mlxsw_sp->router->netevent_nb.notifier_call =
11723 		mlxsw_sp_router_netevent_event;
11724 	err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
11725 	if (err)
11726 		goto err_register_netevent_notifier;
11727 
11728 	mlxsw_sp->router->netdevice_nb.notifier_call =
11729 		mlxsw_sp_router_netdevice_event;
11730 	err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
11731 					      &mlxsw_sp->router->netdevice_nb);
11732 	if (err)
11733 		goto err_register_netdev_notifier;
11734 
11735 	mlxsw_sp->router->nexthop_nb.notifier_call =
11736 		mlxsw_sp_nexthop_obj_event;
11737 	err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
11738 					&mlxsw_sp->router->nexthop_nb,
11739 					extack);
11740 	if (err)
11741 		goto err_register_nexthop_notifier;
11742 
11743 	mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
11744 	err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
11745 				    &mlxsw_sp->router->fib_nb,
11746 				    mlxsw_sp_router_fib_dump_flush, extack);
11747 	if (err)
11748 		goto err_register_fib_notifier;
11749 
11750 	return 0;
11751 
11752 err_register_fib_notifier:
11753 	unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
11754 				    &mlxsw_sp->router->nexthop_nb);
11755 err_register_nexthop_notifier:
11756 	unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
11757 					  &router->netdevice_nb);
11758 err_register_netdev_notifier:
11759 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
11760 err_register_netevent_notifier:
11761 	unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
11762 err_register_inet6addr_valid_notifier:
11763 	unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
11764 err_register_inetaddr_valid_notifier:
11765 	unregister_inet6addr_notifier(&router->inet6addr_nb);
11766 err_register_inet6addr_notifier:
11767 	unregister_inetaddr_notifier(&router->inetaddr_nb);
11768 err_register_inetaddr_notifier:
11769 	mlxsw_core_flush_owq();
11770 err_dscp_init:
11771 	mlxsw_sp_mp_hash_fini(mlxsw_sp);
11772 err_mp_hash_init:
11773 	mlxsw_sp_neigh_fini(mlxsw_sp);
11774 err_neigh_init:
11775 	mlxsw_sp_lb_rif_fini(mlxsw_sp);
11776 err_lb_rif_init:
11777 	mlxsw_sp_vrs_fini(mlxsw_sp);
11778 err_vrs_init:
11779 	mlxsw_sp_mr_fini(mlxsw_sp);
11780 err_mr_init:
11781 	mlxsw_sp_lpm_fini(mlxsw_sp);
11782 err_lpm_init:
11783 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
11784 err_nexthop_group_ht_init:
11785 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
11786 err_nexthop_ht_init:
11787 	mlxsw_sp_rifs_fini(mlxsw_sp);
11788 err_rifs_init:
11789 	rhashtable_destroy(&mlxsw_sp->router->crif_ht);
11790 err_crif_ht_init:
11791 	mlxsw_sp_ipips_fini(mlxsw_sp);
11792 err_ipips_init:
11793 	__mlxsw_sp_router_fini(mlxsw_sp);
11794 err_router_init:
11795 	cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
11796 err_router_ops_init:
11797 	mutex_destroy(&mlxsw_sp->router->lock);
11798 	kfree(mlxsw_sp->router);
11799 	return err;
11800 }
11801 
mlxsw_sp_router_fini(struct mlxsw_sp * mlxsw_sp)11802 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
11803 {
11804 	struct mlxsw_sp_router *router = mlxsw_sp->router;
11805 
11806 	unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp), &router->fib_nb);
11807 	unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
11808 				    &router->nexthop_nb);
11809 	unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
11810 					  &router->netdevice_nb);
11811 	unregister_netevent_notifier(&router->netevent_nb);
11812 	unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
11813 	unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
11814 	unregister_inet6addr_notifier(&router->inet6addr_nb);
11815 	unregister_inetaddr_notifier(&router->inetaddr_nb);
11816 	mlxsw_core_flush_owq();
11817 	mlxsw_sp_mp_hash_fini(mlxsw_sp);
11818 	mlxsw_sp_neigh_fini(mlxsw_sp);
11819 	mlxsw_sp_lb_rif_fini(mlxsw_sp);
11820 	mlxsw_sp_vrs_fini(mlxsw_sp);
11821 	mlxsw_sp_mr_fini(mlxsw_sp);
11822 	mlxsw_sp_lpm_fini(mlxsw_sp);
11823 	rhashtable_destroy(&router->nexthop_group_ht);
11824 	rhashtable_destroy(&router->nexthop_ht);
11825 	mlxsw_sp_rifs_fini(mlxsw_sp);
11826 	rhashtable_destroy(&mlxsw_sp->router->crif_ht);
11827 	mlxsw_sp_ipips_fini(mlxsw_sp);
11828 	__mlxsw_sp_router_fini(mlxsw_sp);
11829 	cancel_delayed_work_sync(&router->nh_grp_activity_dw);
11830 	mutex_destroy(&router->lock);
11831 	kfree(router);
11832 }
11833