1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/inetdevice.h>
7 #include <net/inet_dscp.h>
8 #include <net/switchdev.h>
9 #include <linux/rhashtable.h>
10 #include <net/nexthop.h>
11 #include <net/arp.h>
12 #include <linux/if_vlan.h>
13 #include <linux/if_macvlan.h>
14 #include <net/netevent.h>
15 
16 #include "prestera.h"
17 #include "prestera_router_hw.h"
18 
19 #define PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH
20 #define PRESTERA_NH_PROBE_INTERVAL 5000 /* ms */
21 
22 struct prestera_kern_neigh_cache_key {
23 	struct prestera_ip_addr addr;
24 	struct net_device *dev;
25 };
26 
27 struct prestera_kern_neigh_cache {
28 	struct prestera_kern_neigh_cache_key key;
29 	struct rhash_head ht_node;
30 	struct list_head kern_fib_cache_list;
31 	/* Hold prepared nh_neigh info if is in_kernel */
32 	struct prestera_neigh_info nh_neigh_info;
33 	/* Indicate if neighbour is reachable by direct route */
34 	bool reachable;
35 	/* Lock cache if neigh is present in kernel */
36 	bool in_kernel;
37 };
38 
39 struct prestera_kern_fib_cache_key {
40 	struct prestera_ip_addr addr;
41 	u32 prefix_len;
42 	u32 kern_tb_id; /* tb_id from kernel (not fixed) */
43 };
44 
45 /* Subscribing on neighbours in kernel */
46 struct prestera_kern_fib_cache {
47 	struct prestera_kern_fib_cache_key key;
48 	struct {
49 		struct prestera_fib_key fib_key;
50 		enum prestera_fib_type fib_type;
51 		struct prestera_nexthop_group_key nh_grp_key;
52 	} lpm_info; /* hold prepared lpm info */
53 	/* Indicate if route is not overlapped by another table */
54 	struct rhash_head ht_node; /* node of prestera_router */
55 	struct prestera_kern_neigh_cache_head {
56 		struct prestera_kern_fib_cache *this;
57 		struct list_head head;
58 		struct prestera_kern_neigh_cache *n_cache;
59 	} kern_neigh_cache_head[PRESTERA_NHGR_SIZE_MAX];
60 	union {
61 		struct fib_notifier_info info; /* point to any of 4/6 */
62 		struct fib_entry_notifier_info fen4_info;
63 	};
64 	bool reachable;
65 };
66 
67 static const struct rhashtable_params __prestera_kern_neigh_cache_ht_params = {
68 	.key_offset  = offsetof(struct prestera_kern_neigh_cache, key),
69 	.head_offset = offsetof(struct prestera_kern_neigh_cache, ht_node),
70 	.key_len     = sizeof(struct prestera_kern_neigh_cache_key),
71 	.automatic_shrinking = true,
72 };
73 
74 static const struct rhashtable_params __prestera_kern_fib_cache_ht_params = {
75 	.key_offset  = offsetof(struct prestera_kern_fib_cache, key),
76 	.head_offset = offsetof(struct prestera_kern_fib_cache, ht_node),
77 	.key_len     = sizeof(struct prestera_kern_fib_cache_key),
78 	.automatic_shrinking = true,
79 };
80 
81 /* This util to be used, to convert kernel rules for default vr in hw_vr */
82 static u32 prestera_fix_tb_id(u32 tb_id)
83 {
84 	if (tb_id == RT_TABLE_UNSPEC ||
85 	    tb_id == RT_TABLE_LOCAL ||
86 	    tb_id == RT_TABLE_DEFAULT)
87 		tb_id = RT_TABLE_MAIN;
88 
89 	return tb_id;
90 }
91 
92 static void
93 prestera_util_fen_info2fib_cache_key(struct fib_notifier_info *info,
94 				     struct prestera_kern_fib_cache_key *key)
95 {
96 	struct fib_entry_notifier_info *fen_info =
97 		container_of(info, struct fib_entry_notifier_info, info);
98 
99 	memset(key, 0, sizeof(*key));
100 	key->addr.v = PRESTERA_IPV4;
101 	key->addr.u.ipv4 = cpu_to_be32(fen_info->dst);
102 	key->prefix_len = fen_info->dst_len;
103 	key->kern_tb_id = fen_info->tb_id;
104 }
105 
106 static int prestera_util_nhc2nc_key(struct prestera_switch *sw,
107 				    struct fib_nh_common *nhc,
108 				    struct prestera_kern_neigh_cache_key *nk)
109 {
110 	memset(nk, 0, sizeof(*nk));
111 	if (nhc->nhc_gw_family == AF_INET) {
112 		nk->addr.v = PRESTERA_IPV4;
113 		nk->addr.u.ipv4 = nhc->nhc_gw.ipv4;
114 	} else {
115 		nk->addr.v = PRESTERA_IPV6;
116 		nk->addr.u.ipv6 = nhc->nhc_gw.ipv6;
117 	}
118 
119 	nk->dev = nhc->nhc_dev;
120 	return 0;
121 }
122 
123 static void
124 prestera_util_nc_key2nh_key(struct prestera_kern_neigh_cache_key *ck,
125 			    struct prestera_nh_neigh_key *nk)
126 {
127 	memset(nk, 0, sizeof(*nk));
128 	nk->addr = ck->addr;
129 	nk->rif = (void *)ck->dev;
130 }
131 
132 static bool
133 prestera_util_nhc_eq_n_cache_key(struct prestera_switch *sw,
134 				 struct fib_nh_common *nhc,
135 				 struct prestera_kern_neigh_cache_key *nk)
136 {
137 	struct prestera_kern_neigh_cache_key tk;
138 	int err;
139 
140 	err = prestera_util_nhc2nc_key(sw, nhc, &tk);
141 	if (err)
142 		return false;
143 
144 	if (memcmp(&tk, nk, sizeof(tk)))
145 		return false;
146 
147 	return true;
148 }
149 
150 static int
151 prestera_util_neigh2nc_key(struct prestera_switch *sw, struct neighbour *n,
152 			   struct prestera_kern_neigh_cache_key *key)
153 {
154 	memset(key, 0, sizeof(*key));
155 	if (n->tbl->family == AF_INET) {
156 		key->addr.v = PRESTERA_IPV4;
157 		key->addr.u.ipv4 = *(__be32 *)n->primary_key;
158 	} else {
159 		return -ENOENT;
160 	}
161 
162 	key->dev = n->dev;
163 
164 	return 0;
165 }
166 
167 static bool __prestera_fi_is_direct(struct fib_info *fi)
168 {
169 	struct fib_nh *fib_nh;
170 
171 	if (fib_info_num_path(fi) == 1) {
172 		fib_nh = fib_info_nh(fi, 0);
173 		if (fib_nh->fib_nh_gw_family == AF_UNSPEC)
174 			return true;
175 	}
176 
177 	return false;
178 }
179 
180 static bool prestera_fi_is_direct(struct fib_info *fi)
181 {
182 	if (fi->fib_type != RTN_UNICAST)
183 		return false;
184 
185 	return __prestera_fi_is_direct(fi);
186 }
187 
188 static bool prestera_fi_is_nh(struct fib_info *fi)
189 {
190 	if (fi->fib_type != RTN_UNICAST)
191 		return false;
192 
193 	return !__prestera_fi_is_direct(fi);
194 }
195 
196 static bool __prestera_fi6_is_direct(struct fib6_info *fi)
197 {
198 	if (!fi->fib6_nh->nh_common.nhc_gw_family)
199 		return true;
200 
201 	return false;
202 }
203 
204 static bool prestera_fi6_is_direct(struct fib6_info *fi)
205 {
206 	if (fi->fib6_type != RTN_UNICAST)
207 		return false;
208 
209 	return __prestera_fi6_is_direct(fi);
210 }
211 
212 static bool prestera_fi6_is_nh(struct fib6_info *fi)
213 {
214 	if (fi->fib6_type != RTN_UNICAST)
215 		return false;
216 
217 	return !__prestera_fi6_is_direct(fi);
218 }
219 
220 static bool prestera_fib_info_is_direct(struct fib_notifier_info *info)
221 {
222 	struct fib6_entry_notifier_info *fen6_info =
223 		container_of(info, struct fib6_entry_notifier_info, info);
224 	struct fib_entry_notifier_info *fen_info =
225 		container_of(info, struct fib_entry_notifier_info, info);
226 
227 	if (info->family == AF_INET)
228 		return prestera_fi_is_direct(fen_info->fi);
229 	else
230 		return prestera_fi6_is_direct(fen6_info->rt);
231 }
232 
233 static bool prestera_fib_info_is_nh(struct fib_notifier_info *info)
234 {
235 	struct fib6_entry_notifier_info *fen6_info =
236 		container_of(info, struct fib6_entry_notifier_info, info);
237 	struct fib_entry_notifier_info *fen_info =
238 		container_of(info, struct fib_entry_notifier_info, info);
239 
240 	if (info->family == AF_INET)
241 		return prestera_fi_is_nh(fen_info->fi);
242 	else
243 		return prestera_fi6_is_nh(fen6_info->rt);
244 }
245 
246 /* must be called with rcu_read_lock() */
247 static int prestera_util_kern_get_route(struct fib_result *res, u32 tb_id,
248 					__be32 *addr)
249 {
250 	struct flowi4 fl4;
251 
252 	/* TODO: walkthrough appropriate tables in kernel
253 	 * to know if the same prefix exists in several tables
254 	 */
255 	memset(&fl4, 0, sizeof(fl4));
256 	fl4.daddr = *addr;
257 	return fib_lookup(&init_net, &fl4, res, 0 /* FIB_LOOKUP_NOREF */);
258 }
259 
260 static bool
261 __prestera_util_kern_n_is_reachable_v4(u32 tb_id, __be32 *addr,
262 				       struct net_device *dev)
263 {
264 	struct fib_nh *fib_nh;
265 	struct fib_result res;
266 	bool reachable;
267 
268 	reachable = false;
269 
270 	if (!prestera_util_kern_get_route(&res, tb_id, addr))
271 		if (prestera_fi_is_direct(res.fi)) {
272 			fib_nh = fib_info_nh(res.fi, 0);
273 			if (dev == fib_nh->fib_nh_dev)
274 				reachable = true;
275 		}
276 
277 	return reachable;
278 }
279 
280 /* Check if neigh route is reachable */
281 static bool
282 prestera_util_kern_n_is_reachable(u32 tb_id,
283 				  struct prestera_ip_addr *addr,
284 				  struct net_device *dev)
285 {
286 	if (addr->v == PRESTERA_IPV4)
287 		return __prestera_util_kern_n_is_reachable_v4(tb_id,
288 							      &addr->u.ipv4,
289 							      dev);
290 	else
291 		return false;
292 }
293 
294 static void prestera_util_kern_set_neigh_offload(struct neighbour *n,
295 						 bool offloaded)
296 {
297 	if (offloaded)
298 		n->flags |= NTF_OFFLOADED;
299 	else
300 		n->flags &= ~NTF_OFFLOADED;
301 }
302 
303 static void
304 prestera_util_kern_set_nh_offload(struct fib_nh_common *nhc, bool offloaded, bool trap)
305 {
306 		if (offloaded)
307 			nhc->nhc_flags |= RTNH_F_OFFLOAD;
308 		else
309 			nhc->nhc_flags &= ~RTNH_F_OFFLOAD;
310 
311 		if (trap)
312 			nhc->nhc_flags |= RTNH_F_TRAP;
313 		else
314 			nhc->nhc_flags &= ~RTNH_F_TRAP;
315 }
316 
317 static struct fib_nh_common *
318 prestera_kern_fib_info_nhc(struct fib_notifier_info *info, int n)
319 {
320 	struct fib6_entry_notifier_info *fen6_info;
321 	struct fib_entry_notifier_info *fen4_info;
322 	struct fib6_info *iter;
323 
324 	if (info->family == AF_INET) {
325 		fen4_info = container_of(info, struct fib_entry_notifier_info,
326 					 info);
327 		return &fib_info_nh(fen4_info->fi, n)->nh_common;
328 	} else if (info->family == AF_INET6) {
329 		fen6_info = container_of(info, struct fib6_entry_notifier_info,
330 					 info);
331 		if (!n)
332 			return &fen6_info->rt->fib6_nh->nh_common;
333 
334 		list_for_each_entry(iter, &fen6_info->rt->fib6_siblings,
335 				    fib6_siblings) {
336 			if (!--n)
337 				return &iter->fib6_nh->nh_common;
338 		}
339 	}
340 
341 	/* if family is incorrect - than upper functions has BUG */
342 	/* if doesn't find requested index - there is alsi bug, because
343 	 * valid index must be produced by nhs, which checks list length
344 	 */
345 	WARN(1, "Invalid parameters passed to %s n=%d i=%p",
346 	     __func__, n, info);
347 	return NULL;
348 }
349 
350 static int prestera_kern_fib_info_nhs(struct fib_notifier_info *info)
351 {
352 	struct fib6_entry_notifier_info *fen6_info;
353 	struct fib_entry_notifier_info *fen4_info;
354 
355 	if (info->family == AF_INET) {
356 		fen4_info = container_of(info, struct fib_entry_notifier_info,
357 					 info);
358 		return fib_info_num_path(fen4_info->fi);
359 	} else if (info->family == AF_INET6) {
360 		fen6_info = container_of(info, struct fib6_entry_notifier_info,
361 					 info);
362 		return fen6_info->rt->fib6_nsiblings + 1;
363 	}
364 
365 	return 0;
366 }
367 
368 static unsigned char
369 prestera_kern_fib_info_type(struct fib_notifier_info *info)
370 {
371 	struct fib6_entry_notifier_info *fen6_info;
372 	struct fib_entry_notifier_info *fen4_info;
373 
374 	if (info->family == AF_INET) {
375 		fen4_info = container_of(info, struct fib_entry_notifier_info,
376 					 info);
377 		return fen4_info->fi->fib_type;
378 	} else if (info->family == AF_INET6) {
379 		fen6_info = container_of(info, struct fib6_entry_notifier_info,
380 					 info);
381 		/* TODO: ECMP in ipv6 is several routes.
382 		 * Every route has single nh.
383 		 */
384 		return fen6_info->rt->fib6_type;
385 	}
386 
387 	return RTN_UNSPEC;
388 }
389 
390 /* Decided, that uc_nh route with key==nh is obviously neighbour route */
391 static bool
392 prestera_fib_node_util_is_neighbour(struct prestera_fib_node *fib_node)
393 {
394 	if (fib_node->info.type != PRESTERA_FIB_TYPE_UC_NH)
395 		return false;
396 
397 	if (fib_node->info.nh_grp->nh_neigh_head[1].neigh)
398 		return false;
399 
400 	if (!fib_node->info.nh_grp->nh_neigh_head[0].neigh)
401 		return false;
402 
403 	if (memcmp(&fib_node->info.nh_grp->nh_neigh_head[0].neigh->key.addr,
404 		   &fib_node->key.addr, sizeof(struct prestera_ip_addr)))
405 		return false;
406 
407 	return true;
408 }
409 
410 static int prestera_dev_if_type(const struct net_device *dev)
411 {
412 	struct macvlan_dev *vlan;
413 
414 	if (is_vlan_dev(dev) &&
415 	    netif_is_bridge_master(vlan_dev_real_dev(dev))) {
416 		return PRESTERA_IF_VID_E;
417 	} else if (netif_is_bridge_master(dev)) {
418 		return PRESTERA_IF_VID_E;
419 	} else if (netif_is_lag_master(dev)) {
420 		return PRESTERA_IF_LAG_E;
421 	} else if (netif_is_macvlan(dev)) {
422 		vlan = netdev_priv(dev);
423 		return prestera_dev_if_type(vlan->lowerdev);
424 	} else {
425 		return PRESTERA_IF_PORT_E;
426 	}
427 }
428 
429 static int
430 prestera_neigh_iface_init(struct prestera_switch *sw,
431 			  struct prestera_iface *iface,
432 			  struct neighbour *n)
433 {
434 	struct prestera_port *port;
435 
436 	iface->vlan_id = 0; /* TODO: vlan egress */
437 	iface->type = prestera_dev_if_type(n->dev);
438 	if (iface->type != PRESTERA_IF_PORT_E)
439 		return -EINVAL;
440 
441 	if (!prestera_netdev_check(n->dev))
442 		return -EINVAL;
443 
444 	port = netdev_priv(n->dev);
445 	iface->dev_port.hw_dev_num = port->dev_id;
446 	iface->dev_port.port_num = port->hw_id;
447 
448 	return 0;
449 }
450 
451 static struct prestera_kern_neigh_cache *
452 prestera_kern_neigh_cache_find(struct prestera_switch *sw,
453 			       struct prestera_kern_neigh_cache_key *key)
454 {
455 	struct prestera_kern_neigh_cache *n_cache;
456 
457 	n_cache =
458 	 rhashtable_lookup_fast(&sw->router->kern_neigh_cache_ht, key,
459 				__prestera_kern_neigh_cache_ht_params);
460 	return IS_ERR(n_cache) ? NULL : n_cache;
461 }
462 
463 static void
464 __prestera_kern_neigh_cache_destruct(struct prestera_switch *sw,
465 				     struct prestera_kern_neigh_cache *n_cache)
466 {
467 	dev_put(n_cache->key.dev);
468 }
469 
470 static void
471 __prestera_kern_neigh_cache_destroy(struct prestera_switch *sw,
472 				    struct prestera_kern_neigh_cache *n_cache)
473 {
474 	rhashtable_remove_fast(&sw->router->kern_neigh_cache_ht,
475 			       &n_cache->ht_node,
476 			       __prestera_kern_neigh_cache_ht_params);
477 	__prestera_kern_neigh_cache_destruct(sw, n_cache);
478 	kfree(n_cache);
479 }
480 
481 static struct prestera_kern_neigh_cache *
482 __prestera_kern_neigh_cache_create(struct prestera_switch *sw,
483 				   struct prestera_kern_neigh_cache_key *key)
484 {
485 	struct prestera_kern_neigh_cache *n_cache;
486 	int err;
487 
488 	n_cache = kzalloc(sizeof(*n_cache), GFP_KERNEL);
489 	if (!n_cache)
490 		goto err_kzalloc;
491 
492 	memcpy(&n_cache->key, key, sizeof(*key));
493 	dev_hold(n_cache->key.dev);
494 
495 	INIT_LIST_HEAD(&n_cache->kern_fib_cache_list);
496 	err = rhashtable_insert_fast(&sw->router->kern_neigh_cache_ht,
497 				     &n_cache->ht_node,
498 				     __prestera_kern_neigh_cache_ht_params);
499 	if (err)
500 		goto err_ht_insert;
501 
502 	return n_cache;
503 
504 err_ht_insert:
505 	dev_put(n_cache->key.dev);
506 	kfree(n_cache);
507 err_kzalloc:
508 	return NULL;
509 }
510 
511 static struct prestera_kern_neigh_cache *
512 prestera_kern_neigh_cache_get(struct prestera_switch *sw,
513 			      struct prestera_kern_neigh_cache_key *key)
514 {
515 	struct prestera_kern_neigh_cache *n_cache;
516 
517 	n_cache = prestera_kern_neigh_cache_find(sw, key);
518 	if (!n_cache)
519 		n_cache = __prestera_kern_neigh_cache_create(sw, key);
520 
521 	return n_cache;
522 }
523 
524 static struct prestera_kern_neigh_cache *
525 prestera_kern_neigh_cache_put(struct prestera_switch *sw,
526 			      struct prestera_kern_neigh_cache *n_cache)
527 {
528 	if (!n_cache->in_kernel &&
529 	    list_empty(&n_cache->kern_fib_cache_list)) {
530 		__prestera_kern_neigh_cache_destroy(sw, n_cache);
531 		return NULL;
532 	}
533 
534 	return n_cache;
535 }
536 
537 static struct prestera_kern_fib_cache *
538 prestera_kern_fib_cache_find(struct prestera_switch *sw,
539 			     struct prestera_kern_fib_cache_key *key)
540 {
541 	struct prestera_kern_fib_cache *fib_cache;
542 
543 	fib_cache =
544 	 rhashtable_lookup_fast(&sw->router->kern_fib_cache_ht, key,
545 				__prestera_kern_fib_cache_ht_params);
546 	return fib_cache;
547 }
548 
549 static void
550 __prestera_kern_fib_cache_destruct(struct prestera_switch *sw,
551 				   struct prestera_kern_fib_cache *fib_cache)
552 {
553 	struct prestera_kern_neigh_cache *n_cache;
554 	int i;
555 
556 	for (i = 0; i < PRESTERA_NHGR_SIZE_MAX; i++) {
557 		n_cache = fib_cache->kern_neigh_cache_head[i].n_cache;
558 		if (n_cache) {
559 			list_del(&fib_cache->kern_neigh_cache_head[i].head);
560 			prestera_kern_neigh_cache_put(sw, n_cache);
561 		}
562 	}
563 
564 	fib_info_put(fib_cache->fen4_info.fi);
565 }
566 
567 static void
568 prestera_kern_fib_cache_destroy(struct prestera_switch *sw,
569 				struct prestera_kern_fib_cache *fib_cache)
570 {
571 	rhashtable_remove_fast(&sw->router->kern_fib_cache_ht,
572 			       &fib_cache->ht_node,
573 			       __prestera_kern_fib_cache_ht_params);
574 	__prestera_kern_fib_cache_destruct(sw, fib_cache);
575 	kfree(fib_cache);
576 }
577 
578 static int
579 __prestera_kern_fib_cache_create_nhs(struct prestera_switch *sw,
580 				     struct prestera_kern_fib_cache *fc)
581 {
582 	struct prestera_kern_neigh_cache_key nc_key;
583 	struct prestera_kern_neigh_cache *n_cache;
584 	struct fib_nh_common *nhc;
585 	int i, nhs, err;
586 
587 	if (!prestera_fib_info_is_nh(&fc->info))
588 		return 0;
589 
590 	nhs = prestera_kern_fib_info_nhs(&fc->info);
591 	if (nhs > PRESTERA_NHGR_SIZE_MAX)
592 		return 0;
593 
594 	for (i = 0; i < nhs; i++) {
595 		nhc = prestera_kern_fib_info_nhc(&fc->fen4_info.info, i);
596 		err = prestera_util_nhc2nc_key(sw, nhc, &nc_key);
597 		if (err)
598 			return 0;
599 
600 		n_cache = prestera_kern_neigh_cache_get(sw, &nc_key);
601 		if (!n_cache)
602 			return 0;
603 
604 		fc->kern_neigh_cache_head[i].this = fc;
605 		fc->kern_neigh_cache_head[i].n_cache = n_cache;
606 		list_add(&fc->kern_neigh_cache_head[i].head,
607 			 &n_cache->kern_fib_cache_list);
608 	}
609 
610 	return 0;
611 }
612 
613 /* Operations on fi (offload, etc) must be wrapped in utils.
614  * This function just create storage.
615  */
616 static struct prestera_kern_fib_cache *
617 prestera_kern_fib_cache_create(struct prestera_switch *sw,
618 			       struct prestera_kern_fib_cache_key *key,
619 			       struct fib_notifier_info *info)
620 {
621 	struct fib_entry_notifier_info *fen_info =
622 		container_of(info, struct fib_entry_notifier_info, info);
623 	struct prestera_kern_fib_cache *fib_cache;
624 	int err;
625 
626 	fib_cache = kzalloc(sizeof(*fib_cache), GFP_KERNEL);
627 	if (!fib_cache)
628 		goto err_kzalloc;
629 
630 	memcpy(&fib_cache->key, key, sizeof(*key));
631 	fib_info_hold(fen_info->fi);
632 	memcpy(&fib_cache->fen4_info, fen_info, sizeof(*fen_info));
633 
634 	err = rhashtable_insert_fast(&sw->router->kern_fib_cache_ht,
635 				     &fib_cache->ht_node,
636 				     __prestera_kern_fib_cache_ht_params);
637 	if (err)
638 		goto err_ht_insert;
639 
640 	/* Handle nexthops */
641 	err = __prestera_kern_fib_cache_create_nhs(sw, fib_cache);
642 	if (err)
643 		goto out; /* Not critical */
644 
645 out:
646 	return fib_cache;
647 
648 err_ht_insert:
649 	fib_info_put(fen_info->fi);
650 	kfree(fib_cache);
651 err_kzalloc:
652 	return NULL;
653 }
654 
655 static void
656 __prestera_k_arb_fib_nh_offload_set(struct prestera_switch *sw,
657 				    struct prestera_kern_fib_cache *fibc,
658 				    struct prestera_kern_neigh_cache *nc,
659 				    bool offloaded, bool trap)
660 {
661 	struct fib_nh_common *nhc;
662 	int i, nhs;
663 
664 	nhs = prestera_kern_fib_info_nhs(&fibc->info);
665 	for (i = 0; i < nhs; i++) {
666 		nhc = prestera_kern_fib_info_nhc(&fibc->info, i);
667 		if (!nc) {
668 			prestera_util_kern_set_nh_offload(nhc, offloaded, trap);
669 			continue;
670 		}
671 
672 		if (prestera_util_nhc_eq_n_cache_key(sw, nhc, &nc->key)) {
673 			prestera_util_kern_set_nh_offload(nhc, offloaded, trap);
674 			break;
675 		}
676 	}
677 }
678 
679 static void
680 __prestera_k_arb_n_offload_set(struct prestera_switch *sw,
681 			       struct prestera_kern_neigh_cache *nc,
682 			       bool offloaded)
683 {
684 	struct neighbour *n;
685 
686 	n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4,
687 			 nc->key.dev);
688 	if (!n)
689 		return;
690 
691 	prestera_util_kern_set_neigh_offload(n, offloaded);
692 	neigh_release(n);
693 }
694 
695 static void
696 __prestera_k_arb_fib_lpm_offload_set(struct prestera_switch *sw,
697 				     struct prestera_kern_fib_cache *fc,
698 				     bool fail, bool offload, bool trap)
699 {
700 	struct fib_rt_info fri;
701 
702 	switch (fc->key.addr.v) {
703 	case PRESTERA_IPV4:
704 		fri.fi = fc->fen4_info.fi;
705 		fri.tb_id = fc->key.kern_tb_id;
706 		fri.dst = fc->key.addr.u.ipv4;
707 		fri.dst_len = fc->key.prefix_len;
708 		fri.dscp = fc->fen4_info.dscp;
709 		fri.type = fc->fen4_info.type;
710 		/* flags begin */
711 		fri.offload = offload;
712 		fri.trap = trap;
713 		fri.offload_failed = fail;
714 		/* flags end */
715 		fib_alias_hw_flags_set(&init_net, &fri);
716 		return;
717 	case PRESTERA_IPV6:
718 		/* TODO */
719 		return;
720 	}
721 }
722 
723 static void
724 __prestera_k_arb_n_lpm_set(struct prestera_switch *sw,
725 			   struct prestera_kern_neigh_cache *n_cache,
726 			   bool enabled)
727 {
728 	struct prestera_nexthop_group_key nh_grp_key;
729 	struct prestera_kern_fib_cache_key fc_key;
730 	struct prestera_kern_fib_cache *fib_cache;
731 	struct prestera_fib_node *fib_node;
732 	struct prestera_fib_key fib_key;
733 
734 	/* Exception for fc with prefix 32: LPM entry is already used by fib */
735 	memset(&fc_key, 0, sizeof(fc_key));
736 	fc_key.addr = n_cache->key.addr;
737 	fc_key.prefix_len = PRESTERA_IP_ADDR_PLEN(n_cache->key.addr.v);
738 	/* But better to use tb_id of route, which pointed to this neighbour. */
739 	/* We take it from rif, because rif inconsistent.
740 	 * Must be separated in_rif and out_rif.
741 	 * Also note: for each fib pointed to this neigh should be separated
742 	 *            neigh lpm entry (for each ingress vr)
743 	 */
744 	fc_key.kern_tb_id = l3mdev_fib_table(n_cache->key.dev);
745 	fib_cache = prestera_kern_fib_cache_find(sw, &fc_key);
746 	memset(&fib_key, 0, sizeof(fib_key));
747 	fib_key.addr = n_cache->key.addr;
748 	fib_key.prefix_len = PRESTERA_IP_ADDR_PLEN(n_cache->key.addr.v);
749 	fib_key.tb_id = prestera_fix_tb_id(fc_key.kern_tb_id);
750 	fib_node = prestera_fib_node_find(sw, &fib_key);
751 	if (!fib_cache || !fib_cache->reachable) {
752 		if (!enabled && fib_node) {
753 			if (prestera_fib_node_util_is_neighbour(fib_node))
754 				prestera_fib_node_destroy(sw, fib_node);
755 			return;
756 		}
757 	}
758 
759 	if (enabled && !fib_node) {
760 		memset(&nh_grp_key, 0, sizeof(nh_grp_key));
761 		prestera_util_nc_key2nh_key(&n_cache->key,
762 					    &nh_grp_key.neigh[0]);
763 		fib_node = prestera_fib_node_create(sw, &fib_key,
764 						    PRESTERA_FIB_TYPE_UC_NH,
765 						    &nh_grp_key);
766 		if (!fib_node)
767 			pr_err("%s failed ip=%pI4n", "prestera_fib_node_create",
768 			       &fib_key.addr.u.ipv4);
769 		return;
770 	}
771 }
772 
773 static void
774 __prestera_k_arb_nc_kern_fib_fetch(struct prestera_switch *sw,
775 				   struct prestera_kern_neigh_cache *nc)
776 {
777 	if (prestera_util_kern_n_is_reachable(l3mdev_fib_table(nc->key.dev),
778 					      &nc->key.addr, nc->key.dev))
779 		nc->reachable = true;
780 	else
781 		nc->reachable = false;
782 }
783 
784 /* Kernel neighbour -> neigh_cache info */
785 static void
786 __prestera_k_arb_nc_kern_n_fetch(struct prestera_switch *sw,
787 				 struct prestera_kern_neigh_cache *nc)
788 {
789 	struct neighbour *n;
790 	int err;
791 
792 	memset(&nc->nh_neigh_info, 0, sizeof(nc->nh_neigh_info));
793 	n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4, nc->key.dev);
794 	if (!n)
795 		goto out;
796 
797 	read_lock_bh(&n->lock);
798 	if (n->nud_state & NUD_VALID && !n->dead) {
799 		err = prestera_neigh_iface_init(sw, &nc->nh_neigh_info.iface,
800 						n);
801 		if (err)
802 			goto n_read_out;
803 
804 		memcpy(&nc->nh_neigh_info.ha[0], &n->ha[0], ETH_ALEN);
805 		nc->nh_neigh_info.connected = true;
806 	}
807 n_read_out:
808 	read_unlock_bh(&n->lock);
809 out:
810 	nc->in_kernel = nc->nh_neigh_info.connected;
811 	if (n)
812 		neigh_release(n);
813 }
814 
815 /* neigh_cache info -> lpm update */
816 static void
817 __prestera_k_arb_nc_apply(struct prestera_switch *sw,
818 			  struct prestera_kern_neigh_cache *nc)
819 {
820 	struct prestera_kern_neigh_cache_head *nhead;
821 	struct prestera_nh_neigh_key nh_key;
822 	struct prestera_nh_neigh *nh_neigh;
823 	int err;
824 
825 	__prestera_k_arb_n_lpm_set(sw, nc, nc->reachable && nc->in_kernel);
826 	__prestera_k_arb_n_offload_set(sw, nc, nc->reachable && nc->in_kernel);
827 
828 	prestera_util_nc_key2nh_key(&nc->key, &nh_key);
829 	nh_neigh = prestera_nh_neigh_find(sw, &nh_key);
830 	if (!nh_neigh)
831 		goto out;
832 
833 	/* Do hw update only if something changed to prevent nh flap */
834 	if (memcmp(&nc->nh_neigh_info, &nh_neigh->info,
835 		   sizeof(nh_neigh->info))) {
836 		memcpy(&nh_neigh->info, &nc->nh_neigh_info,
837 		       sizeof(nh_neigh->info));
838 		err = prestera_nh_neigh_set(sw, nh_neigh);
839 		if (err) {
840 			pr_err("%s failed with err=%d ip=%pI4n mac=%pM",
841 			       "prestera_nh_neigh_set", err,
842 			       &nh_neigh->key.addr.u.ipv4,
843 			       &nh_neigh->info.ha[0]);
844 			goto out;
845 		}
846 	}
847 
848 out:
849 	list_for_each_entry(nhead, &nc->kern_fib_cache_list, head) {
850 		__prestera_k_arb_fib_nh_offload_set(sw, nhead->this, nc,
851 						    nc->in_kernel,
852 						    !nc->in_kernel);
853 	}
854 }
855 
856 static int
857 __prestera_pr_k_arb_fc_lpm_info_calc(struct prestera_switch *sw,
858 				     struct prestera_kern_fib_cache *fc)
859 {
860 	struct fib_nh_common *nhc;
861 	int nh_cnt;
862 
863 	memset(&fc->lpm_info, 0, sizeof(fc->lpm_info));
864 
865 	switch (prestera_kern_fib_info_type(&fc->info)) {
866 	case RTN_UNICAST:
867 		if (prestera_fib_info_is_direct(&fc->info) &&
868 		    fc->key.prefix_len ==
869 			PRESTERA_IP_ADDR_PLEN(fc->key.addr.v)) {
870 			/* This is special case.
871 			 * When prefix is 32. Than we will have conflict in lpm
872 			 * for direct route - once TRAP added, there is no
873 			 * place for neighbour entry. So represent direct route
874 			 * with prefix 32, as NH. So neighbour will be resolved
875 			 * as nexthop of this route.
876 			 */
877 			nhc = prestera_kern_fib_info_nhc(&fc->info, 0);
878 			fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_UC_NH;
879 			fc->lpm_info.nh_grp_key.neigh[0].addr =
880 				fc->key.addr;
881 			fc->lpm_info.nh_grp_key.neigh[0].rif =
882 				nhc->nhc_dev;
883 
884 			break;
885 		}
886 
887 		/* We can also get nh_grp_key from fi. This will be correct to
888 		 * because cache not always represent, what actually written to
889 		 * lpm. But we use nh cache, as well for now (for this case).
890 		 */
891 		for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) {
892 			if (!fc->kern_neigh_cache_head[nh_cnt].n_cache)
893 				break;
894 
895 			fc->lpm_info.nh_grp_key.neigh[nh_cnt].addr =
896 				fc->kern_neigh_cache_head[nh_cnt].n_cache->key.addr;
897 			fc->lpm_info.nh_grp_key.neigh[nh_cnt].rif =
898 				fc->kern_neigh_cache_head[nh_cnt].n_cache->key.dev;
899 		}
900 
901 		fc->lpm_info.fib_type = nh_cnt ?
902 					PRESTERA_FIB_TYPE_UC_NH :
903 					PRESTERA_FIB_TYPE_TRAP;
904 		break;
905 	/* Unsupported. Leave it for kernel: */
906 	case RTN_BROADCAST:
907 	case RTN_MULTICAST:
908 	/* Routes we must trap by design: */
909 	case RTN_LOCAL:
910 	case RTN_UNREACHABLE:
911 	case RTN_PROHIBIT:
912 		fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_TRAP;
913 		break;
914 	case RTN_BLACKHOLE:
915 		fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_DROP;
916 		break;
917 	default:
918 		dev_err(sw->dev->dev, "Unsupported fib_type");
919 		return -EOPNOTSUPP;
920 	}
921 
922 	fc->lpm_info.fib_key.addr = fc->key.addr;
923 	fc->lpm_info.fib_key.prefix_len = fc->key.prefix_len;
924 	fc->lpm_info.fib_key.tb_id = prestera_fix_tb_id(fc->key.kern_tb_id);
925 
926 	return 0;
927 }
928 
929 static int __prestera_k_arb_f_lpm_set(struct prestera_switch *sw,
930 				      struct prestera_kern_fib_cache *fc,
931 				      bool enabled)
932 {
933 	struct prestera_fib_node *fib_node;
934 
935 	fib_node = prestera_fib_node_find(sw, &fc->lpm_info.fib_key);
936 	if (fib_node)
937 		prestera_fib_node_destroy(sw, fib_node);
938 
939 	if (!enabled)
940 		return 0;
941 
942 	fib_node = prestera_fib_node_create(sw, &fc->lpm_info.fib_key,
943 					    fc->lpm_info.fib_type,
944 					    &fc->lpm_info.nh_grp_key);
945 
946 	if (!fib_node) {
947 		dev_err(sw->dev->dev, "fib_node=NULL %pI4n/%d kern_tb_id = %d",
948 			&fc->key.addr.u.ipv4, fc->key.prefix_len,
949 			fc->key.kern_tb_id);
950 		return -ENOENT;
951 	}
952 
953 	return 0;
954 }
955 
956 static int __prestera_k_arb_fc_apply(struct prestera_switch *sw,
957 				     struct prestera_kern_fib_cache *fc)
958 {
959 	int err;
960 
961 	err = __prestera_pr_k_arb_fc_lpm_info_calc(sw, fc);
962 	if (err)
963 		return err;
964 
965 	err = __prestera_k_arb_f_lpm_set(sw, fc, fc->reachable);
966 	if (err) {
967 		__prestera_k_arb_fib_lpm_offload_set(sw, fc,
968 						     true, false, false);
969 		return err;
970 	}
971 
972 	switch (fc->lpm_info.fib_type) {
973 	case PRESTERA_FIB_TYPE_UC_NH:
974 		__prestera_k_arb_fib_lpm_offload_set(sw, fc, false,
975 						     fc->reachable, false);
976 		break;
977 	case PRESTERA_FIB_TYPE_TRAP:
978 		__prestera_k_arb_fib_lpm_offload_set(sw, fc, false,
979 						     false, fc->reachable);
980 		break;
981 	case PRESTERA_FIB_TYPE_DROP:
982 		__prestera_k_arb_fib_lpm_offload_set(sw, fc, false, true,
983 						     fc->reachable);
984 		break;
985 	case PRESTERA_FIB_TYPE_INVALID:
986 		break;
987 	}
988 
989 	return 0;
990 }
991 
992 static struct prestera_kern_fib_cache *
993 __prestera_k_arb_util_fib_overlaps(struct prestera_switch *sw,
994 				   struct prestera_kern_fib_cache *fc)
995 {
996 	struct prestera_kern_fib_cache_key fc_key;
997 	struct prestera_kern_fib_cache *rfc;
998 
999 	/* TODO: parse kernel rules */
1000 	rfc = NULL;
1001 	if (fc->key.kern_tb_id == RT_TABLE_LOCAL) {
1002 		memcpy(&fc_key, &fc->key, sizeof(fc_key));
1003 		fc_key.kern_tb_id = RT_TABLE_MAIN;
1004 		rfc = prestera_kern_fib_cache_find(sw, &fc_key);
1005 	}
1006 
1007 	return rfc;
1008 }
1009 
1010 static struct prestera_kern_fib_cache *
1011 __prestera_k_arb_util_fib_overlapped(struct prestera_switch *sw,
1012 				     struct prestera_kern_fib_cache *fc)
1013 {
1014 	struct prestera_kern_fib_cache_key fc_key;
1015 	struct prestera_kern_fib_cache *rfc;
1016 
1017 	/* TODO: parse kernel rules */
1018 	rfc = NULL;
1019 	if (fc->key.kern_tb_id == RT_TABLE_MAIN) {
1020 		memcpy(&fc_key, &fc->key, sizeof(fc_key));
1021 		fc_key.kern_tb_id = RT_TABLE_LOCAL;
1022 		rfc = prestera_kern_fib_cache_find(sw, &fc_key);
1023 	}
1024 
1025 	return rfc;
1026 }
1027 
1028 static void __prestera_k_arb_hw_state_upd(struct prestera_switch *sw,
1029 					  struct prestera_kern_neigh_cache *nc)
1030 {
1031 	struct prestera_nh_neigh_key nh_key;
1032 	struct prestera_nh_neigh *nh_neigh;
1033 	struct neighbour *n;
1034 	bool hw_active;
1035 
1036 	prestera_util_nc_key2nh_key(&nc->key, &nh_key);
1037 	nh_neigh = prestera_nh_neigh_find(sw, &nh_key);
1038 	if (!nh_neigh) {
1039 		pr_err("Cannot find nh_neigh for cached %pI4n",
1040 		       &nc->key.addr.u.ipv4);
1041 		return;
1042 	}
1043 
1044 	hw_active = prestera_nh_neigh_util_hw_state(sw, nh_neigh);
1045 
1046 #ifdef PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH
1047 	if (!hw_active && nc->in_kernel)
1048 		goto out;
1049 #else /* PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH */
1050 	if (!hw_active)
1051 		goto out;
1052 #endif /* PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH */
1053 
1054 	if (nc->key.addr.v == PRESTERA_IPV4) {
1055 		n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4,
1056 				 nc->key.dev);
1057 		if (!n)
1058 			n = neigh_create(&arp_tbl, &nc->key.addr.u.ipv4,
1059 					 nc->key.dev);
1060 	} else {
1061 		n = NULL;
1062 	}
1063 
1064 	if (!IS_ERR(n) && n) {
1065 		neigh_event_send(n, NULL);
1066 		neigh_release(n);
1067 	} else {
1068 		pr_err("Cannot create neighbour %pI4n", &nc->key.addr.u.ipv4);
1069 	}
1070 
1071 out:
1072 	return;
1073 }
1074 
1075 /* Propagate hw state to kernel */
1076 static void prestera_k_arb_hw_evt(struct prestera_switch *sw)
1077 {
1078 	struct prestera_kern_neigh_cache *n_cache;
1079 	struct rhashtable_iter iter;
1080 
1081 	rhashtable_walk_enter(&sw->router->kern_neigh_cache_ht, &iter);
1082 	rhashtable_walk_start(&iter);
1083 	while (1) {
1084 		n_cache = rhashtable_walk_next(&iter);
1085 
1086 		if (!n_cache)
1087 			break;
1088 
1089 		if (IS_ERR(n_cache))
1090 			continue;
1091 
1092 		rhashtable_walk_stop(&iter);
1093 		__prestera_k_arb_hw_state_upd(sw, n_cache);
1094 		rhashtable_walk_start(&iter);
1095 	}
1096 	rhashtable_walk_stop(&iter);
1097 	rhashtable_walk_exit(&iter);
1098 }
1099 
1100 /* Propagate kernel event to hw */
1101 static void prestera_k_arb_n_evt(struct prestera_switch *sw,
1102 				 struct neighbour *n)
1103 {
1104 	struct prestera_kern_neigh_cache_key n_key;
1105 	struct prestera_kern_neigh_cache *n_cache;
1106 	int err;
1107 
1108 	err = prestera_util_neigh2nc_key(sw, n, &n_key);
1109 	if (err)
1110 		return;
1111 
1112 	n_cache = prestera_kern_neigh_cache_find(sw, &n_key);
1113 	if (!n_cache) {
1114 		n_cache = prestera_kern_neigh_cache_get(sw, &n_key);
1115 		if (!n_cache)
1116 			return;
1117 		__prestera_k_arb_nc_kern_fib_fetch(sw, n_cache);
1118 	}
1119 
1120 	__prestera_k_arb_nc_kern_n_fetch(sw, n_cache);
1121 	__prestera_k_arb_nc_apply(sw, n_cache);
1122 
1123 	prestera_kern_neigh_cache_put(sw, n_cache);
1124 }
1125 
1126 static void __prestera_k_arb_fib_evt2nc(struct prestera_switch *sw)
1127 {
1128 	struct prestera_kern_neigh_cache *n_cache;
1129 	struct rhashtable_iter iter;
1130 
1131 	rhashtable_walk_enter(&sw->router->kern_neigh_cache_ht, &iter);
1132 	rhashtable_walk_start(&iter);
1133 	while (1) {
1134 		n_cache = rhashtable_walk_next(&iter);
1135 
1136 		if (!n_cache)
1137 			break;
1138 
1139 		if (IS_ERR(n_cache))
1140 			continue;
1141 
1142 		rhashtable_walk_stop(&iter);
1143 		__prestera_k_arb_nc_kern_fib_fetch(sw, n_cache);
1144 		__prestera_k_arb_nc_apply(sw, n_cache);
1145 		rhashtable_walk_start(&iter);
1146 	}
1147 	rhashtable_walk_stop(&iter);
1148 	rhashtable_walk_exit(&iter);
1149 }
1150 
1151 static int
1152 prestera_k_arb_fib_evt(struct prestera_switch *sw,
1153 		       bool replace, /* replace or del */
1154 		       struct fib_notifier_info *info)
1155 {
1156 	struct prestera_kern_fib_cache *tfib_cache, *bfib_cache; /* top/btm */
1157 	struct prestera_kern_fib_cache_key fc_key;
1158 	struct prestera_kern_fib_cache *fib_cache;
1159 	int err;
1160 
1161 	prestera_util_fen_info2fib_cache_key(info, &fc_key);
1162 	fib_cache = prestera_kern_fib_cache_find(sw, &fc_key);
1163 	if (fib_cache) {
1164 		fib_cache->reachable = false;
1165 		err = __prestera_k_arb_fc_apply(sw, fib_cache);
1166 		if (err)
1167 			dev_err(sw->dev->dev,
1168 				"Applying destroyed fib_cache failed");
1169 
1170 		bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache);
1171 		tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache);
1172 		if (!tfib_cache && bfib_cache) {
1173 			bfib_cache->reachable = true;
1174 			err = __prestera_k_arb_fc_apply(sw, bfib_cache);
1175 			if (err)
1176 				dev_err(sw->dev->dev,
1177 					"Applying fib_cache btm failed");
1178 		}
1179 
1180 		prestera_kern_fib_cache_destroy(sw, fib_cache);
1181 	}
1182 
1183 	if (replace) {
1184 		fib_cache = prestera_kern_fib_cache_create(sw, &fc_key, info);
1185 		if (!fib_cache) {
1186 			dev_err(sw->dev->dev, "fib_cache == NULL");
1187 			return -ENOENT;
1188 		}
1189 
1190 		bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache);
1191 		tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache);
1192 		if (!tfib_cache)
1193 			fib_cache->reachable = true;
1194 
1195 		if (bfib_cache) {
1196 			bfib_cache->reachable = false;
1197 			err = __prestera_k_arb_fc_apply(sw, bfib_cache);
1198 			if (err)
1199 				dev_err(sw->dev->dev,
1200 					"Applying fib_cache btm failed");
1201 		}
1202 
1203 		err = __prestera_k_arb_fc_apply(sw, fib_cache);
1204 		if (err)
1205 			dev_err(sw->dev->dev, "Applying fib_cache failed");
1206 	}
1207 
1208 	/* Update all neighs to resolve overlapped and apply related */
1209 	__prestera_k_arb_fib_evt2nc(sw);
1210 
1211 	return 0;
1212 }
1213 
1214 static void __prestera_k_arb_abort_neigh_ht_cb(void *ptr, void *arg)
1215 {
1216 	struct prestera_kern_neigh_cache *n_cache = ptr;
1217 	struct prestera_switch *sw = arg;
1218 
1219 	if (!list_empty(&n_cache->kern_fib_cache_list)) {
1220 		WARN_ON(1); /* BUG */
1221 		return;
1222 	}
1223 	__prestera_k_arb_n_offload_set(sw, n_cache, false);
1224 	n_cache->in_kernel = false;
1225 	/* No need to destroy lpm.
1226 	 * It will be aborted by destroy_ht
1227 	 */
1228 	__prestera_kern_neigh_cache_destruct(sw, n_cache);
1229 	kfree(n_cache);
1230 }
1231 
1232 static void __prestera_k_arb_abort_fib_ht_cb(void *ptr, void *arg)
1233 {
1234 	struct prestera_kern_fib_cache *fib_cache = ptr;
1235 	struct prestera_switch *sw = arg;
1236 
1237 	__prestera_k_arb_fib_lpm_offload_set(sw, fib_cache,
1238 					     false, false,
1239 					     false);
1240 	__prestera_k_arb_fib_nh_offload_set(sw, fib_cache, NULL,
1241 					    false, false);
1242 	/* No need to destroy lpm.
1243 	 * It will be aborted by destroy_ht
1244 	 */
1245 	__prestera_kern_fib_cache_destruct(sw, fib_cache);
1246 	kfree(fib_cache);
1247 }
1248 
1249 static void prestera_k_arb_abort(struct prestera_switch *sw)
1250 {
1251 	/* Function to remove all arbiter entries and related hw objects. */
1252 	/* Sequence:
1253 	 *   1) Clear arbiter tables, but don't touch hw
1254 	 *   2) Clear hw
1255 	 * We use such approach, because arbiter object is not directly mapped
1256 	 * to hw. So deletion of one arbiter object may even lead to creation of
1257 	 * hw object (e.g. in case of overlapped routes).
1258 	 */
1259 	rhashtable_free_and_destroy(&sw->router->kern_fib_cache_ht,
1260 				    __prestera_k_arb_abort_fib_ht_cb,
1261 				    sw);
1262 	rhashtable_free_and_destroy(&sw->router->kern_neigh_cache_ht,
1263 				    __prestera_k_arb_abort_neigh_ht_cb,
1264 				    sw);
1265 }
1266 
1267 static int __prestera_inetaddr_port_event(struct net_device *port_dev,
1268 					  unsigned long event,
1269 					  struct netlink_ext_ack *extack)
1270 {
1271 	struct prestera_port *port = netdev_priv(port_dev);
1272 	struct prestera_rif_entry_key re_key = {};
1273 	struct prestera_rif_entry *re;
1274 	u32 kern_tb_id;
1275 	int err;
1276 
1277 	err = prestera_is_valid_mac_addr(port, port_dev->dev_addr);
1278 	if (err) {
1279 		NL_SET_ERR_MSG_MOD(extack, "RIF MAC must have the same prefix");
1280 		return err;
1281 	}
1282 
1283 	kern_tb_id = l3mdev_fib_table(port_dev);
1284 	re_key.iface.type = PRESTERA_IF_PORT_E;
1285 	re_key.iface.dev_port.hw_dev_num  = port->dev_id;
1286 	re_key.iface.dev_port.port_num  = port->hw_id;
1287 	re = prestera_rif_entry_find(port->sw, &re_key);
1288 
1289 	switch (event) {
1290 	case NETDEV_UP:
1291 		if (re) {
1292 			NL_SET_ERR_MSG_MOD(extack, "RIF already exist");
1293 			return -EEXIST;
1294 		}
1295 		re = prestera_rif_entry_create(port->sw, &re_key,
1296 					       prestera_fix_tb_id(kern_tb_id),
1297 					       port_dev->dev_addr);
1298 		if (!re) {
1299 			NL_SET_ERR_MSG_MOD(extack, "Can't create RIF");
1300 			return -EINVAL;
1301 		}
1302 		dev_hold(port_dev);
1303 		break;
1304 	case NETDEV_DOWN:
1305 		if (!re) {
1306 			NL_SET_ERR_MSG_MOD(extack, "Can't find RIF");
1307 			return -EEXIST;
1308 		}
1309 		prestera_rif_entry_destroy(port->sw, re);
1310 		dev_put(port_dev);
1311 		break;
1312 	}
1313 
1314 	return 0;
1315 }
1316 
1317 static int __prestera_inetaddr_event(struct prestera_switch *sw,
1318 				     struct net_device *dev,
1319 				     unsigned long event,
1320 				     struct netlink_ext_ack *extack)
1321 {
1322 	if (!prestera_netdev_check(dev) || netif_is_any_bridge_port(dev) ||
1323 	    netif_is_lag_port(dev))
1324 		return 0;
1325 
1326 	return __prestera_inetaddr_port_event(dev, event, extack);
1327 }
1328 
1329 static int __prestera_inetaddr_cb(struct notifier_block *nb,
1330 				  unsigned long event, void *ptr)
1331 {
1332 	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
1333 	struct net_device *dev = ifa->ifa_dev->dev;
1334 	struct prestera_router *router = container_of(nb,
1335 						      struct prestera_router,
1336 						      inetaddr_nb);
1337 	struct in_device *idev;
1338 	int err = 0;
1339 
1340 	if (event != NETDEV_DOWN)
1341 		goto out;
1342 
1343 	/* Ignore if this is not latest address */
1344 	idev = __in_dev_get_rtnl(dev);
1345 	if (idev && idev->ifa_list)
1346 		goto out;
1347 
1348 	err = __prestera_inetaddr_event(router->sw, dev, event, NULL);
1349 out:
1350 	return notifier_from_errno(err);
1351 }
1352 
1353 static int __prestera_inetaddr_valid_cb(struct notifier_block *nb,
1354 					unsigned long event, void *ptr)
1355 {
1356 	struct in_validator_info *ivi = (struct in_validator_info *)ptr;
1357 	struct net_device *dev = ivi->ivi_dev->dev;
1358 	struct prestera_router *router = container_of(nb,
1359 						      struct prestera_router,
1360 						      inetaddr_valid_nb);
1361 	struct in_device *idev;
1362 	int err = 0;
1363 
1364 	if (event != NETDEV_UP)
1365 		goto out;
1366 
1367 	/* Ignore if this is not first address */
1368 	idev = __in_dev_get_rtnl(dev);
1369 	if (idev && idev->ifa_list)
1370 		goto out;
1371 
1372 	if (ipv4_is_multicast(ivi->ivi_addr)) {
1373 		NL_SET_ERR_MSG_MOD(ivi->extack,
1374 				   "Multicast addr on RIF is not supported");
1375 		err = -EINVAL;
1376 		goto out;
1377 	}
1378 
1379 	err = __prestera_inetaddr_event(router->sw, dev, event, ivi->extack);
1380 out:
1381 	return notifier_from_errno(err);
1382 }
1383 
1384 struct prestera_fib_event_work {
1385 	struct work_struct work;
1386 	struct prestera_switch *sw;
1387 	struct fib_entry_notifier_info fen_info;
1388 	unsigned long event;
1389 };
1390 
1391 static void __prestera_router_fib_event_work(struct work_struct *work)
1392 {
1393 	struct prestera_fib_event_work *fib_work =
1394 			container_of(work, struct prestera_fib_event_work, work);
1395 	struct prestera_switch *sw = fib_work->sw;
1396 	int err;
1397 
1398 	rtnl_lock();
1399 
1400 	switch (fib_work->event) {
1401 	case FIB_EVENT_ENTRY_REPLACE:
1402 		err = prestera_k_arb_fib_evt(sw, true,
1403 					     &fib_work->fen_info.info);
1404 		if (err)
1405 			goto err_out;
1406 
1407 		break;
1408 	case FIB_EVENT_ENTRY_DEL:
1409 		err = prestera_k_arb_fib_evt(sw, false,
1410 					     &fib_work->fen_info.info);
1411 		if (err)
1412 			goto err_out;
1413 
1414 		break;
1415 	}
1416 
1417 	goto out;
1418 
1419 err_out:
1420 	dev_err(sw->dev->dev, "Error when processing %pI4h/%d",
1421 		&fib_work->fen_info.dst,
1422 		fib_work->fen_info.dst_len);
1423 out:
1424 	fib_info_put(fib_work->fen_info.fi);
1425 	rtnl_unlock();
1426 	kfree(fib_work);
1427 }
1428 
1429 /* Called with rcu_read_lock() */
1430 static int __prestera_router_fib_event(struct notifier_block *nb,
1431 				       unsigned long event, void *ptr)
1432 {
1433 	struct prestera_fib_event_work *fib_work;
1434 	struct fib_entry_notifier_info *fen_info;
1435 	struct fib_notifier_info *info = ptr;
1436 	struct prestera_router *router;
1437 
1438 	if (info->family != AF_INET)
1439 		return NOTIFY_DONE;
1440 
1441 	router = container_of(nb, struct prestera_router, fib_nb);
1442 
1443 	switch (event) {
1444 	case FIB_EVENT_ENTRY_REPLACE:
1445 	case FIB_EVENT_ENTRY_DEL:
1446 		fen_info = container_of(info, struct fib_entry_notifier_info,
1447 					info);
1448 		if (!fen_info->fi)
1449 			return NOTIFY_DONE;
1450 
1451 		fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
1452 		if (WARN_ON(!fib_work))
1453 			return NOTIFY_BAD;
1454 
1455 		fib_info_hold(fen_info->fi);
1456 		fib_work->fen_info = *fen_info;
1457 		fib_work->event = event;
1458 		fib_work->sw = router->sw;
1459 		INIT_WORK(&fib_work->work, __prestera_router_fib_event_work);
1460 		prestera_queue_work(&fib_work->work);
1461 		break;
1462 	default:
1463 		return NOTIFY_DONE;
1464 	}
1465 
1466 	return NOTIFY_DONE;
1467 }
1468 
1469 struct prestera_netevent_work {
1470 	struct work_struct work;
1471 	struct prestera_switch *sw;
1472 	struct neighbour *n;
1473 };
1474 
1475 static void prestera_router_neigh_event_work(struct work_struct *work)
1476 {
1477 	struct prestera_netevent_work *net_work =
1478 		container_of(work, struct prestera_netevent_work, work);
1479 	struct prestera_switch *sw = net_work->sw;
1480 	struct neighbour *n = net_work->n;
1481 
1482 	/* neigh - its not hw related object. It stored only in kernel. So... */
1483 	rtnl_lock();
1484 
1485 	prestera_k_arb_n_evt(sw, n);
1486 
1487 	neigh_release(n);
1488 	rtnl_unlock();
1489 	kfree(net_work);
1490 }
1491 
1492 static int prestera_router_netevent_event(struct notifier_block *nb,
1493 					  unsigned long event, void *ptr)
1494 {
1495 	struct prestera_netevent_work *net_work;
1496 	struct prestera_router *router;
1497 	struct neighbour *n = ptr;
1498 
1499 	router = container_of(nb, struct prestera_router, netevent_nb);
1500 
1501 	switch (event) {
1502 	case NETEVENT_NEIGH_UPDATE:
1503 		if (n->tbl->family != AF_INET)
1504 			return NOTIFY_DONE;
1505 
1506 		net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
1507 		if (WARN_ON(!net_work))
1508 			return NOTIFY_BAD;
1509 
1510 		neigh_clone(n);
1511 		net_work->n = n;
1512 		net_work->sw = router->sw;
1513 		INIT_WORK(&net_work->work, prestera_router_neigh_event_work);
1514 		prestera_queue_work(&net_work->work);
1515 	}
1516 
1517 	return NOTIFY_DONE;
1518 }
1519 
1520 static void prestera_router_update_neighs_work(struct work_struct *work)
1521 {
1522 	struct prestera_router *router;
1523 
1524 	router = container_of(work, struct prestera_router,
1525 			      neighs_update.dw.work);
1526 	rtnl_lock();
1527 
1528 	prestera_k_arb_hw_evt(router->sw);
1529 
1530 	rtnl_unlock();
1531 	prestera_queue_delayed_work(&router->neighs_update.dw,
1532 				    msecs_to_jiffies(PRESTERA_NH_PROBE_INTERVAL));
1533 }
1534 
1535 static int prestera_neigh_work_init(struct prestera_switch *sw)
1536 {
1537 	INIT_DELAYED_WORK(&sw->router->neighs_update.dw,
1538 			  prestera_router_update_neighs_work);
1539 	prestera_queue_delayed_work(&sw->router->neighs_update.dw, 0);
1540 	return 0;
1541 }
1542 
1543 static void prestera_neigh_work_fini(struct prestera_switch *sw)
1544 {
1545 	cancel_delayed_work_sync(&sw->router->neighs_update.dw);
1546 }
1547 
1548 int prestera_router_init(struct prestera_switch *sw)
1549 {
1550 	struct prestera_router *router;
1551 	int err, nhgrp_cache_bytes;
1552 
1553 	router = kzalloc(sizeof(*sw->router), GFP_KERNEL);
1554 	if (!router)
1555 		return -ENOMEM;
1556 
1557 	sw->router = router;
1558 	router->sw = sw;
1559 
1560 	err = prestera_router_hw_init(sw);
1561 	if (err)
1562 		goto err_router_lib_init;
1563 
1564 	err = rhashtable_init(&router->kern_fib_cache_ht,
1565 			      &__prestera_kern_fib_cache_ht_params);
1566 	if (err)
1567 		goto err_kern_fib_cache_ht_init;
1568 
1569 	err = rhashtable_init(&router->kern_neigh_cache_ht,
1570 			      &__prestera_kern_neigh_cache_ht_params);
1571 	if (err)
1572 		goto err_kern_neigh_cache_ht_init;
1573 
1574 	nhgrp_cache_bytes = sw->size_tbl_router_nexthop / 8 + 1;
1575 	router->nhgrp_hw_state_cache = kzalloc(nhgrp_cache_bytes, GFP_KERNEL);
1576 	if (!router->nhgrp_hw_state_cache) {
1577 		err = -ENOMEM;
1578 		goto err_nh_state_cache_alloc;
1579 	}
1580 
1581 	err = prestera_neigh_work_init(sw);
1582 	if (err)
1583 		goto err_neigh_work_init;
1584 
1585 	router->inetaddr_valid_nb.notifier_call = __prestera_inetaddr_valid_cb;
1586 	err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
1587 	if (err)
1588 		goto err_register_inetaddr_validator_notifier;
1589 
1590 	router->inetaddr_nb.notifier_call = __prestera_inetaddr_cb;
1591 	err = register_inetaddr_notifier(&router->inetaddr_nb);
1592 	if (err)
1593 		goto err_register_inetaddr_notifier;
1594 
1595 	router->netevent_nb.notifier_call = prestera_router_netevent_event;
1596 	err = register_netevent_notifier(&router->netevent_nb);
1597 	if (err)
1598 		goto err_register_netevent_notifier;
1599 
1600 	router->fib_nb.notifier_call = __prestera_router_fib_event;
1601 	err = register_fib_notifier(&init_net, &router->fib_nb,
1602 				    /* TODO: flush fib entries */ NULL, NULL);
1603 	if (err)
1604 		goto err_register_fib_notifier;
1605 
1606 	return 0;
1607 
1608 err_register_fib_notifier:
1609 	unregister_netevent_notifier(&router->netevent_nb);
1610 err_register_netevent_notifier:
1611 	unregister_inetaddr_notifier(&router->inetaddr_nb);
1612 err_register_inetaddr_notifier:
1613 	unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
1614 err_register_inetaddr_validator_notifier:
1615 	prestera_neigh_work_fini(sw);
1616 err_neigh_work_init:
1617 	kfree(router->nhgrp_hw_state_cache);
1618 err_nh_state_cache_alloc:
1619 	rhashtable_destroy(&router->kern_neigh_cache_ht);
1620 err_kern_neigh_cache_ht_init:
1621 	rhashtable_destroy(&router->kern_fib_cache_ht);
1622 err_kern_fib_cache_ht_init:
1623 	prestera_router_hw_fini(sw);
1624 err_router_lib_init:
1625 	kfree(sw->router);
1626 	return err;
1627 }
1628 
1629 void prestera_router_fini(struct prestera_switch *sw)
1630 {
1631 	unregister_fib_notifier(&init_net, &sw->router->fib_nb);
1632 	unregister_netevent_notifier(&sw->router->netevent_nb);
1633 	unregister_inetaddr_notifier(&sw->router->inetaddr_nb);
1634 	unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb);
1635 	prestera_neigh_work_fini(sw);
1636 	prestera_queue_drain();
1637 
1638 	prestera_k_arb_abort(sw);
1639 
1640 	kfree(sw->router->nhgrp_hw_state_cache);
1641 	rhashtable_destroy(&sw->router->kern_fib_cache_ht);
1642 	prestera_router_hw_fini(sw);
1643 	kfree(sw->router);
1644 	sw->router = NULL;
1645 }
1646