1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Linux INET6 implementation
4 * FIB front-end.
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 */
9
10 /* Changes:
11 *
12 * YOSHIFUJI Hideaki @USAGI
13 * reworked default router selection.
14 * - respect outgoing interface
15 * - select from (probably) reachable routers (i.e.
16 * routers in REACHABLE, STALE, DELAY or PROBE states).
17 * - always select the same router if it is (probably)
18 * reachable. otherwise, round-robin the list.
19 * Ville Nuorvala
20 * Fixed routing subtrees.
21 */
22
23 #define pr_fmt(fmt) "IPv6: " fmt
24
25 #include <linux/capability.h>
26 #include <linux/errno.h>
27 #include <linux/export.h>
28 #include <linux/types.h>
29 #include <linux/times.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/route.h>
34 #include <linux/netdevice.h>
35 #include <linux/in6.h>
36 #include <linux/mroute6.h>
37 #include <linux/init.h>
38 #include <linux/if_arp.h>
39 #include <linux/proc_fs.h>
40 #include <linux/seq_file.h>
41 #include <linux/nsproxy.h>
42 #include <linux/slab.h>
43 #include <linux/jhash.h>
44 #include <net/net_namespace.h>
45 #include <net/snmp.h>
46 #include <net/ipv6.h>
47 #include <net/ip6_fib.h>
48 #include <net/ip6_route.h>
49 #include <net/ndisc.h>
50 #include <net/addrconf.h>
51 #include <net/tcp.h>
52 #include <linux/rtnetlink.h>
53 #include <net/dst.h>
54 #include <net/dst_metadata.h>
55 #include <net/xfrm.h>
56 #include <net/netevent.h>
57 #include <net/netlink.h>
58 #include <net/rtnh.h>
59 #include <net/lwtunnel.h>
60 #include <net/ip_tunnels.h>
61 #include <net/l3mdev.h>
62 #include <net/ip.h>
63 #include <linux/uaccess.h>
64 #include <linux/btf_ids.h>
65
66 #ifdef CONFIG_SYSCTL
67 #include <linux/sysctl.h>
68 #endif
69
70 static int ip6_rt_type_to_error(u8 fib6_type);
71
72 #define CREATE_TRACE_POINTS
73 #include <trace/events/fib6.h>
74 EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
75 #undef CREATE_TRACE_POINTS
76
77 enum rt6_nud_state {
78 RT6_NUD_FAIL_HARD = -3,
79 RT6_NUD_FAIL_PROBE = -2,
80 RT6_NUD_FAIL_DO_RR = -1,
81 RT6_NUD_SUCCEED = 1
82 };
83
84 INDIRECT_CALLABLE_SCOPE
85 struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
86 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
87 INDIRECT_CALLABLE_SCOPE
88 unsigned int ip6_mtu(const struct dst_entry *dst);
89 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
90 static void ip6_dst_destroy(struct dst_entry *);
91 static void ip6_dst_ifdown(struct dst_entry *,
92 struct net_device *dev, int how);
93 static int ip6_dst_gc(struct dst_ops *ops);
94
95 static int ip6_pkt_discard(struct sk_buff *skb);
96 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
97 static int ip6_pkt_prohibit(struct sk_buff *skb);
98 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
99 static void ip6_link_failure(struct sk_buff *skb);
100 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
101 struct sk_buff *skb, u32 mtu,
102 bool confirm_neigh);
103 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
104 struct sk_buff *skb);
105 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
106 int strict);
107 static size_t rt6_nlmsg_size(struct fib6_info *f6i);
108 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
109 struct fib6_info *rt, struct dst_entry *dst,
110 struct in6_addr *dest, struct in6_addr *src,
111 int iif, int type, u32 portid, u32 seq,
112 unsigned int flags);
113 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
114 const struct in6_addr *daddr,
115 const struct in6_addr *saddr);
116
117 #ifdef CONFIG_IPV6_ROUTE_INFO
118 static struct fib6_info *rt6_add_route_info(struct net *net,
119 const struct in6_addr *prefix, int prefixlen,
120 const struct in6_addr *gwaddr,
121 struct net_device *dev,
122 unsigned int pref);
123 static struct fib6_info *rt6_get_route_info(struct net *net,
124 const struct in6_addr *prefix, int prefixlen,
125 const struct in6_addr *gwaddr,
126 struct net_device *dev);
127 #endif
128
129 struct uncached_list {
130 spinlock_t lock;
131 struct list_head head;
132 };
133
134 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
135
rt6_uncached_list_add(struct rt6_info * rt)136 void rt6_uncached_list_add(struct rt6_info *rt)
137 {
138 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
139
140 rt->rt6i_uncached_list = ul;
141
142 spin_lock_bh(&ul->lock);
143 list_add_tail(&rt->rt6i_uncached, &ul->head);
144 spin_unlock_bh(&ul->lock);
145 }
146
rt6_uncached_list_del(struct rt6_info * rt)147 void rt6_uncached_list_del(struct rt6_info *rt)
148 {
149 if (!list_empty(&rt->rt6i_uncached)) {
150 struct uncached_list *ul = rt->rt6i_uncached_list;
151 struct net *net = dev_net(rt->dst.dev);
152
153 spin_lock_bh(&ul->lock);
154 list_del(&rt->rt6i_uncached);
155 atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
156 spin_unlock_bh(&ul->lock);
157 }
158 }
159
rt6_uncached_list_flush_dev(struct net * net,struct net_device * dev)160 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
161 {
162 struct net_device *loopback_dev = net->loopback_dev;
163 int cpu;
164
165 if (dev == loopback_dev)
166 return;
167
168 for_each_possible_cpu(cpu) {
169 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
170 struct rt6_info *rt;
171
172 spin_lock_bh(&ul->lock);
173 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
174 struct inet6_dev *rt_idev = rt->rt6i_idev;
175 struct net_device *rt_dev = rt->dst.dev;
176
177 if (rt_idev->dev == dev) {
178 rt->rt6i_idev = in6_dev_get(loopback_dev);
179 in6_dev_put(rt_idev);
180 }
181
182 if (rt_dev == dev) {
183 rt->dst.dev = blackhole_netdev;
184 dev_hold(rt->dst.dev);
185 dev_put(rt_dev);
186 }
187 }
188 spin_unlock_bh(&ul->lock);
189 }
190 }
191
choose_neigh_daddr(const struct in6_addr * p,struct sk_buff * skb,const void * daddr)192 static inline const void *choose_neigh_daddr(const struct in6_addr *p,
193 struct sk_buff *skb,
194 const void *daddr)
195 {
196 if (!ipv6_addr_any(p))
197 return (const void *) p;
198 else if (skb)
199 return &ipv6_hdr(skb)->daddr;
200 return daddr;
201 }
202
ip6_neigh_lookup(const struct in6_addr * gw,struct net_device * dev,struct sk_buff * skb,const void * daddr)203 struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
204 struct net_device *dev,
205 struct sk_buff *skb,
206 const void *daddr)
207 {
208 struct neighbour *n;
209
210 daddr = choose_neigh_daddr(gw, skb, daddr);
211 n = __ipv6_neigh_lookup(dev, daddr);
212 if (n)
213 return n;
214
215 n = neigh_create(&nd_tbl, daddr, dev);
216 return IS_ERR(n) ? NULL : n;
217 }
218
ip6_dst_neigh_lookup(const struct dst_entry * dst,struct sk_buff * skb,const void * daddr)219 static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
220 struct sk_buff *skb,
221 const void *daddr)
222 {
223 const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);
224
225 return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
226 dst->dev, skb, daddr);
227 }
228
ip6_confirm_neigh(const struct dst_entry * dst,const void * daddr)229 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
230 {
231 struct net_device *dev = dst->dev;
232 struct rt6_info *rt = (struct rt6_info *)dst;
233
234 daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr);
235 if (!daddr)
236 return;
237 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
238 return;
239 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
240 return;
241 __ipv6_confirm_neigh(dev, daddr);
242 }
243
244 static struct dst_ops ip6_dst_ops_template = {
245 .family = AF_INET6,
246 .gc = ip6_dst_gc,
247 .gc_thresh = 1024,
248 .check = ip6_dst_check,
249 .default_advmss = ip6_default_advmss,
250 .mtu = ip6_mtu,
251 .cow_metrics = dst_cow_metrics_generic,
252 .destroy = ip6_dst_destroy,
253 .ifdown = ip6_dst_ifdown,
254 .negative_advice = ip6_negative_advice,
255 .link_failure = ip6_link_failure,
256 .update_pmtu = ip6_rt_update_pmtu,
257 .redirect = rt6_do_redirect,
258 .local_out = __ip6_local_out,
259 .neigh_lookup = ip6_dst_neigh_lookup,
260 .confirm_neigh = ip6_confirm_neigh,
261 };
262
263 static struct dst_ops ip6_dst_blackhole_ops = {
264 .family = AF_INET6,
265 .default_advmss = ip6_default_advmss,
266 .neigh_lookup = ip6_dst_neigh_lookup,
267 .check = ip6_dst_check,
268 .destroy = ip6_dst_destroy,
269 .cow_metrics = dst_cow_metrics_generic,
270 .update_pmtu = dst_blackhole_update_pmtu,
271 .redirect = dst_blackhole_redirect,
272 .mtu = dst_blackhole_mtu,
273 };
274
275 static const u32 ip6_template_metrics[RTAX_MAX] = {
276 [RTAX_HOPLIMIT - 1] = 0,
277 };
278
279 static const struct fib6_info fib6_null_entry_template = {
280 .fib6_flags = (RTF_REJECT | RTF_NONEXTHOP),
281 .fib6_protocol = RTPROT_KERNEL,
282 .fib6_metric = ~(u32)0,
283 .fib6_ref = REFCOUNT_INIT(1),
284 .fib6_type = RTN_UNREACHABLE,
285 .fib6_metrics = (struct dst_metrics *)&dst_default_metrics,
286 };
287
288 static const struct rt6_info ip6_null_entry_template = {
289 .dst = {
290 .__refcnt = ATOMIC_INIT(1),
291 .__use = 1,
292 .obsolete = DST_OBSOLETE_FORCE_CHK,
293 .error = -ENETUNREACH,
294 .input = ip6_pkt_discard,
295 .output = ip6_pkt_discard_out,
296 },
297 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
298 };
299
300 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
301
302 static const struct rt6_info ip6_prohibit_entry_template = {
303 .dst = {
304 .__refcnt = ATOMIC_INIT(1),
305 .__use = 1,
306 .obsolete = DST_OBSOLETE_FORCE_CHK,
307 .error = -EACCES,
308 .input = ip6_pkt_prohibit,
309 .output = ip6_pkt_prohibit_out,
310 },
311 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
312 };
313
314 static const struct rt6_info ip6_blk_hole_entry_template = {
315 .dst = {
316 .__refcnt = ATOMIC_INIT(1),
317 .__use = 1,
318 .obsolete = DST_OBSOLETE_FORCE_CHK,
319 .error = -EINVAL,
320 .input = dst_discard,
321 .output = dst_discard_out,
322 },
323 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
324 };
325
326 #endif
327
rt6_info_init(struct rt6_info * rt)328 static void rt6_info_init(struct rt6_info *rt)
329 {
330 struct dst_entry *dst = &rt->dst;
331
332 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
333 INIT_LIST_HEAD(&rt->rt6i_uncached);
334 }
335
336 /* allocate dst with ip6_dst_ops */
ip6_dst_alloc(struct net * net,struct net_device * dev,int flags)337 struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
338 int flags)
339 {
340 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
341 1, DST_OBSOLETE_FORCE_CHK, flags);
342
343 if (rt) {
344 rt6_info_init(rt);
345 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
346 }
347
348 return rt;
349 }
350 EXPORT_SYMBOL(ip6_dst_alloc);
351
ip6_dst_destroy(struct dst_entry * dst)352 static void ip6_dst_destroy(struct dst_entry *dst)
353 {
354 struct rt6_info *rt = (struct rt6_info *)dst;
355 struct fib6_info *from;
356 struct inet6_dev *idev;
357
358 ip_dst_metrics_put(dst);
359 rt6_uncached_list_del(rt);
360
361 idev = rt->rt6i_idev;
362 if (idev) {
363 rt->rt6i_idev = NULL;
364 in6_dev_put(idev);
365 }
366
367 from = xchg((__force struct fib6_info **)&rt->from, NULL);
368 fib6_info_release(from);
369 }
370
ip6_dst_ifdown(struct dst_entry * dst,struct net_device * dev,int how)371 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
372 int how)
373 {
374 struct rt6_info *rt = (struct rt6_info *)dst;
375 struct inet6_dev *idev = rt->rt6i_idev;
376 struct net_device *loopback_dev =
377 dev_net(dev)->loopback_dev;
378
379 if (idev && idev->dev != loopback_dev) {
380 struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
381 if (loopback_idev) {
382 rt->rt6i_idev = loopback_idev;
383 in6_dev_put(idev);
384 }
385 }
386 }
387
__rt6_check_expired(const struct rt6_info * rt)388 static bool __rt6_check_expired(const struct rt6_info *rt)
389 {
390 if (rt->rt6i_flags & RTF_EXPIRES)
391 return time_after(jiffies, rt->dst.expires);
392 else
393 return false;
394 }
395
rt6_check_expired(const struct rt6_info * rt)396 static bool rt6_check_expired(const struct rt6_info *rt)
397 {
398 struct fib6_info *from;
399
400 from = rcu_dereference(rt->from);
401
402 if (rt->rt6i_flags & RTF_EXPIRES) {
403 if (time_after(jiffies, rt->dst.expires))
404 return true;
405 } else if (from) {
406 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
407 fib6_check_expired(from);
408 }
409 return false;
410 }
411
fib6_select_path(const struct net * net,struct fib6_result * res,struct flowi6 * fl6,int oif,bool have_oif_match,const struct sk_buff * skb,int strict)412 void fib6_select_path(const struct net *net, struct fib6_result *res,
413 struct flowi6 *fl6, int oif, bool have_oif_match,
414 const struct sk_buff *skb, int strict)
415 {
416 struct fib6_info *sibling, *next_sibling;
417 struct fib6_info *match = res->f6i;
418
419 if (!match->nh && (!match->fib6_nsiblings || have_oif_match))
420 goto out;
421
422 if (match->nh && have_oif_match && res->nh)
423 return;
424
425 /* We might have already computed the hash for ICMPv6 errors. In such
426 * case it will always be non-zero. Otherwise now is the time to do it.
427 */
428 if (!fl6->mp_hash &&
429 (!match->nh || nexthop_is_multipath(match->nh)))
430 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
431
432 if (unlikely(match->nh)) {
433 nexthop_path_fib6_result(res, fl6->mp_hash);
434 return;
435 }
436
437 if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound))
438 goto out;
439
440 list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
441 fib6_siblings) {
442 const struct fib6_nh *nh = sibling->fib6_nh;
443 int nh_upper_bound;
444
445 nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
446 if (fl6->mp_hash > nh_upper_bound)
447 continue;
448 if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
449 break;
450 match = sibling;
451 break;
452 }
453
454 out:
455 res->f6i = match;
456 res->nh = match->fib6_nh;
457 }
458
459 /*
460 * Route lookup. rcu_read_lock() should be held.
461 */
462
__rt6_device_match(struct net * net,const struct fib6_nh * nh,const struct in6_addr * saddr,int oif,int flags)463 static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
464 const struct in6_addr *saddr, int oif, int flags)
465 {
466 const struct net_device *dev;
467
468 if (nh->fib_nh_flags & RTNH_F_DEAD)
469 return false;
470
471 dev = nh->fib_nh_dev;
472 if (oif) {
473 if (dev->ifindex == oif)
474 return true;
475 } else {
476 if (ipv6_chk_addr(net, saddr, dev,
477 flags & RT6_LOOKUP_F_IFACE))
478 return true;
479 }
480
481 return false;
482 }
483
484 struct fib6_nh_dm_arg {
485 struct net *net;
486 const struct in6_addr *saddr;
487 int oif;
488 int flags;
489 struct fib6_nh *nh;
490 };
491
__rt6_nh_dev_match(struct fib6_nh * nh,void * _arg)492 static int __rt6_nh_dev_match(struct fib6_nh *nh, void *_arg)
493 {
494 struct fib6_nh_dm_arg *arg = _arg;
495
496 arg->nh = nh;
497 return __rt6_device_match(arg->net, nh, arg->saddr, arg->oif,
498 arg->flags);
499 }
500
501 /* returns fib6_nh from nexthop or NULL */
rt6_nh_dev_match(struct net * net,struct nexthop * nh,struct fib6_result * res,const struct in6_addr * saddr,int oif,int flags)502 static struct fib6_nh *rt6_nh_dev_match(struct net *net, struct nexthop *nh,
503 struct fib6_result *res,
504 const struct in6_addr *saddr,
505 int oif, int flags)
506 {
507 struct fib6_nh_dm_arg arg = {
508 .net = net,
509 .saddr = saddr,
510 .oif = oif,
511 .flags = flags,
512 };
513
514 if (nexthop_is_blackhole(nh))
515 return NULL;
516
517 if (nexthop_for_each_fib6_nh(nh, __rt6_nh_dev_match, &arg))
518 return arg.nh;
519
520 return NULL;
521 }
522
rt6_device_match(struct net * net,struct fib6_result * res,const struct in6_addr * saddr,int oif,int flags)523 static void rt6_device_match(struct net *net, struct fib6_result *res,
524 const struct in6_addr *saddr, int oif, int flags)
525 {
526 struct fib6_info *f6i = res->f6i;
527 struct fib6_info *spf6i;
528 struct fib6_nh *nh;
529
530 if (!oif && ipv6_addr_any(saddr)) {
531 if (unlikely(f6i->nh)) {
532 nh = nexthop_fib6_nh(f6i->nh);
533 if (nexthop_is_blackhole(f6i->nh))
534 goto out_blackhole;
535 } else {
536 nh = f6i->fib6_nh;
537 }
538 if (!(nh->fib_nh_flags & RTNH_F_DEAD))
539 goto out;
540 }
541
542 for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) {
543 bool matched = false;
544
545 if (unlikely(spf6i->nh)) {
546 nh = rt6_nh_dev_match(net, spf6i->nh, res, saddr,
547 oif, flags);
548 if (nh)
549 matched = true;
550 } else {
551 nh = spf6i->fib6_nh;
552 if (__rt6_device_match(net, nh, saddr, oif, flags))
553 matched = true;
554 }
555 if (matched) {
556 res->f6i = spf6i;
557 goto out;
558 }
559 }
560
561 if (oif && flags & RT6_LOOKUP_F_IFACE) {
562 res->f6i = net->ipv6.fib6_null_entry;
563 nh = res->f6i->fib6_nh;
564 goto out;
565 }
566
567 if (unlikely(f6i->nh)) {
568 nh = nexthop_fib6_nh(f6i->nh);
569 if (nexthop_is_blackhole(f6i->nh))
570 goto out_blackhole;
571 } else {
572 nh = f6i->fib6_nh;
573 }
574
575 if (nh->fib_nh_flags & RTNH_F_DEAD) {
576 res->f6i = net->ipv6.fib6_null_entry;
577 nh = res->f6i->fib6_nh;
578 }
579 out:
580 res->nh = nh;
581 res->fib6_type = res->f6i->fib6_type;
582 res->fib6_flags = res->f6i->fib6_flags;
583 return;
584
585 out_blackhole:
586 res->fib6_flags |= RTF_REJECT;
587 res->fib6_type = RTN_BLACKHOLE;
588 res->nh = nh;
589 }
590
591 #ifdef CONFIG_IPV6_ROUTER_PREF
592 struct __rt6_probe_work {
593 struct work_struct work;
594 struct in6_addr target;
595 struct net_device *dev;
596 };
597
rt6_probe_deferred(struct work_struct * w)598 static void rt6_probe_deferred(struct work_struct *w)
599 {
600 struct in6_addr mcaddr;
601 struct __rt6_probe_work *work =
602 container_of(w, struct __rt6_probe_work, work);
603
604 addrconf_addr_solict_mult(&work->target, &mcaddr);
605 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
606 dev_put(work->dev);
607 kfree(work);
608 }
609
rt6_probe(struct fib6_nh * fib6_nh)610 static void rt6_probe(struct fib6_nh *fib6_nh)
611 {
612 struct __rt6_probe_work *work = NULL;
613 const struct in6_addr *nh_gw;
614 unsigned long last_probe;
615 struct neighbour *neigh;
616 struct net_device *dev;
617 struct inet6_dev *idev;
618
619 /*
620 * Okay, this does not seem to be appropriate
621 * for now, however, we need to check if it
622 * is really so; aka Router Reachability Probing.
623 *
624 * Router Reachability Probe MUST be rate-limited
625 * to no more than one per minute.
626 */
627 if (!fib6_nh->fib_nh_gw_family)
628 return;
629
630 nh_gw = &fib6_nh->fib_nh_gw6;
631 dev = fib6_nh->fib_nh_dev;
632 rcu_read_lock_bh();
633 last_probe = READ_ONCE(fib6_nh->last_probe);
634 idev = __in6_dev_get(dev);
635 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
636 if (neigh) {
637 if (neigh->nud_state & NUD_VALID)
638 goto out;
639
640 write_lock(&neigh->lock);
641 if (!(neigh->nud_state & NUD_VALID) &&
642 time_after(jiffies,
643 neigh->updated + idev->cnf.rtr_probe_interval)) {
644 work = kmalloc(sizeof(*work), GFP_ATOMIC);
645 if (work)
646 __neigh_set_probe_once(neigh);
647 }
648 write_unlock(&neigh->lock);
649 } else if (time_after(jiffies, last_probe +
650 idev->cnf.rtr_probe_interval)) {
651 work = kmalloc(sizeof(*work), GFP_ATOMIC);
652 }
653
654 if (!work || cmpxchg(&fib6_nh->last_probe,
655 last_probe, jiffies) != last_probe) {
656 kfree(work);
657 } else {
658 INIT_WORK(&work->work, rt6_probe_deferred);
659 work->target = *nh_gw;
660 dev_hold(dev);
661 work->dev = dev;
662 schedule_work(&work->work);
663 }
664
665 out:
666 rcu_read_unlock_bh();
667 }
668 #else
rt6_probe(struct fib6_nh * fib6_nh)669 static inline void rt6_probe(struct fib6_nh *fib6_nh)
670 {
671 }
672 #endif
673
674 /*
675 * Default Router Selection (RFC 2461 6.3.6)
676 */
rt6_check_neigh(const struct fib6_nh * fib6_nh)677 static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh)
678 {
679 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
680 struct neighbour *neigh;
681
682 rcu_read_lock_bh();
683 neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev,
684 &fib6_nh->fib_nh_gw6);
685 if (neigh) {
686 read_lock(&neigh->lock);
687 if (neigh->nud_state & NUD_VALID)
688 ret = RT6_NUD_SUCCEED;
689 #ifdef CONFIG_IPV6_ROUTER_PREF
690 else if (!(neigh->nud_state & NUD_FAILED))
691 ret = RT6_NUD_SUCCEED;
692 else
693 ret = RT6_NUD_FAIL_PROBE;
694 #endif
695 read_unlock(&neigh->lock);
696 } else {
697 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
698 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
699 }
700 rcu_read_unlock_bh();
701
702 return ret;
703 }
704
rt6_score_route(const struct fib6_nh * nh,u32 fib6_flags,int oif,int strict)705 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
706 int strict)
707 {
708 int m = 0;
709
710 if (!oif || nh->fib_nh_dev->ifindex == oif)
711 m = 2;
712
713 if (!m && (strict & RT6_LOOKUP_F_IFACE))
714 return RT6_NUD_FAIL_HARD;
715 #ifdef CONFIG_IPV6_ROUTER_PREF
716 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2;
717 #endif
718 if ((strict & RT6_LOOKUP_F_REACHABLE) &&
719 !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) {
720 int n = rt6_check_neigh(nh);
721 if (n < 0)
722 return n;
723 }
724 return m;
725 }
726
find_match(struct fib6_nh * nh,u32 fib6_flags,int oif,int strict,int * mpri,bool * do_rr)727 static bool find_match(struct fib6_nh *nh, u32 fib6_flags,
728 int oif, int strict, int *mpri, bool *do_rr)
729 {
730 bool match_do_rr = false;
731 bool rc = false;
732 int m;
733
734 if (nh->fib_nh_flags & RTNH_F_DEAD)
735 goto out;
736
737 if (ip6_ignore_linkdown(nh->fib_nh_dev) &&
738 nh->fib_nh_flags & RTNH_F_LINKDOWN &&
739 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
740 goto out;
741
742 m = rt6_score_route(nh, fib6_flags, oif, strict);
743 if (m == RT6_NUD_FAIL_DO_RR) {
744 match_do_rr = true;
745 m = 0; /* lowest valid score */
746 } else if (m == RT6_NUD_FAIL_HARD) {
747 goto out;
748 }
749
750 if (strict & RT6_LOOKUP_F_REACHABLE)
751 rt6_probe(nh);
752
753 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
754 if (m > *mpri) {
755 *do_rr = match_do_rr;
756 *mpri = m;
757 rc = true;
758 }
759 out:
760 return rc;
761 }
762
763 struct fib6_nh_frl_arg {
764 u32 flags;
765 int oif;
766 int strict;
767 int *mpri;
768 bool *do_rr;
769 struct fib6_nh *nh;
770 };
771
rt6_nh_find_match(struct fib6_nh * nh,void * _arg)772 static int rt6_nh_find_match(struct fib6_nh *nh, void *_arg)
773 {
774 struct fib6_nh_frl_arg *arg = _arg;
775
776 arg->nh = nh;
777 return find_match(nh, arg->flags, arg->oif, arg->strict,
778 arg->mpri, arg->do_rr);
779 }
780
__find_rr_leaf(struct fib6_info * f6i_start,struct fib6_info * nomatch,u32 metric,struct fib6_result * res,struct fib6_info ** cont,int oif,int strict,bool * do_rr,int * mpri)781 static void __find_rr_leaf(struct fib6_info *f6i_start,
782 struct fib6_info *nomatch, u32 metric,
783 struct fib6_result *res, struct fib6_info **cont,
784 int oif, int strict, bool *do_rr, int *mpri)
785 {
786 struct fib6_info *f6i;
787
788 for (f6i = f6i_start;
789 f6i && f6i != nomatch;
790 f6i = rcu_dereference(f6i->fib6_next)) {
791 bool matched = false;
792 struct fib6_nh *nh;
793
794 if (cont && f6i->fib6_metric != metric) {
795 *cont = f6i;
796 return;
797 }
798
799 if (fib6_check_expired(f6i))
800 continue;
801
802 if (unlikely(f6i->nh)) {
803 struct fib6_nh_frl_arg arg = {
804 .flags = f6i->fib6_flags,
805 .oif = oif,
806 .strict = strict,
807 .mpri = mpri,
808 .do_rr = do_rr
809 };
810
811 if (nexthop_is_blackhole(f6i->nh)) {
812 res->fib6_flags = RTF_REJECT;
813 res->fib6_type = RTN_BLACKHOLE;
814 res->f6i = f6i;
815 res->nh = nexthop_fib6_nh(f6i->nh);
816 return;
817 }
818 if (nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_find_match,
819 &arg)) {
820 matched = true;
821 nh = arg.nh;
822 }
823 } else {
824 nh = f6i->fib6_nh;
825 if (find_match(nh, f6i->fib6_flags, oif, strict,
826 mpri, do_rr))
827 matched = true;
828 }
829 if (matched) {
830 res->f6i = f6i;
831 res->nh = nh;
832 res->fib6_flags = f6i->fib6_flags;
833 res->fib6_type = f6i->fib6_type;
834 }
835 }
836 }
837
find_rr_leaf(struct fib6_node * fn,struct fib6_info * leaf,struct fib6_info * rr_head,int oif,int strict,bool * do_rr,struct fib6_result * res)838 static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf,
839 struct fib6_info *rr_head, int oif, int strict,
840 bool *do_rr, struct fib6_result *res)
841 {
842 u32 metric = rr_head->fib6_metric;
843 struct fib6_info *cont = NULL;
844 int mpri = -1;
845
846 __find_rr_leaf(rr_head, NULL, metric, res, &cont,
847 oif, strict, do_rr, &mpri);
848
849 __find_rr_leaf(leaf, rr_head, metric, res, &cont,
850 oif, strict, do_rr, &mpri);
851
852 if (res->f6i || !cont)
853 return;
854
855 __find_rr_leaf(cont, NULL, metric, res, NULL,
856 oif, strict, do_rr, &mpri);
857 }
858
rt6_select(struct net * net,struct fib6_node * fn,int oif,struct fib6_result * res,int strict)859 static void rt6_select(struct net *net, struct fib6_node *fn, int oif,
860 struct fib6_result *res, int strict)
861 {
862 struct fib6_info *leaf = rcu_dereference(fn->leaf);
863 struct fib6_info *rt0;
864 bool do_rr = false;
865 int key_plen;
866
867 /* make sure this function or its helpers sets f6i */
868 res->f6i = NULL;
869
870 if (!leaf || leaf == net->ipv6.fib6_null_entry)
871 goto out;
872
873 rt0 = rcu_dereference(fn->rr_ptr);
874 if (!rt0)
875 rt0 = leaf;
876
877 /* Double check to make sure fn is not an intermediate node
878 * and fn->leaf does not points to its child's leaf
879 * (This might happen if all routes under fn are deleted from
880 * the tree and fib6_repair_tree() is called on the node.)
881 */
882 key_plen = rt0->fib6_dst.plen;
883 #ifdef CONFIG_IPV6_SUBTREES
884 if (rt0->fib6_src.plen)
885 key_plen = rt0->fib6_src.plen;
886 #endif
887 if (fn->fn_bit != key_plen)
888 goto out;
889
890 find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res);
891 if (do_rr) {
892 struct fib6_info *next = rcu_dereference(rt0->fib6_next);
893
894 /* no entries matched; do round-robin */
895 if (!next || next->fib6_metric != rt0->fib6_metric)
896 next = leaf;
897
898 if (next != rt0) {
899 spin_lock_bh(&leaf->fib6_table->tb6_lock);
900 /* make sure next is not being deleted from the tree */
901 if (next->fib6_node)
902 rcu_assign_pointer(fn->rr_ptr, next);
903 spin_unlock_bh(&leaf->fib6_table->tb6_lock);
904 }
905 }
906
907 out:
908 if (!res->f6i) {
909 res->f6i = net->ipv6.fib6_null_entry;
910 res->nh = res->f6i->fib6_nh;
911 res->fib6_flags = res->f6i->fib6_flags;
912 res->fib6_type = res->f6i->fib6_type;
913 }
914 }
915
rt6_is_gw_or_nonexthop(const struct fib6_result * res)916 static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
917 {
918 return (res->f6i->fib6_flags & RTF_NONEXTHOP) ||
919 res->nh->fib_nh_gw_family;
920 }
921
922 #ifdef CONFIG_IPV6_ROUTE_INFO
rt6_route_rcv(struct net_device * dev,u8 * opt,int len,const struct in6_addr * gwaddr)923 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
924 const struct in6_addr *gwaddr)
925 {
926 struct net *net = dev_net(dev);
927 struct route_info *rinfo = (struct route_info *) opt;
928 struct in6_addr prefix_buf, *prefix;
929 unsigned int pref;
930 unsigned long lifetime;
931 struct fib6_info *rt;
932
933 if (len < sizeof(struct route_info)) {
934 return -EINVAL;
935 }
936
937 /* Sanity check for prefix_len and length */
938 if (rinfo->length > 3) {
939 return -EINVAL;
940 } else if (rinfo->prefix_len > 128) {
941 return -EINVAL;
942 } else if (rinfo->prefix_len > 64) {
943 if (rinfo->length < 2) {
944 return -EINVAL;
945 }
946 } else if (rinfo->prefix_len > 0) {
947 if (rinfo->length < 1) {
948 return -EINVAL;
949 }
950 }
951
952 pref = rinfo->route_pref;
953 if (pref == ICMPV6_ROUTER_PREF_INVALID)
954 return -EINVAL;
955
956 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
957
958 if (rinfo->length == 3)
959 prefix = (struct in6_addr *)rinfo->prefix;
960 else {
961 /* this function is safe */
962 ipv6_addr_prefix(&prefix_buf,
963 (struct in6_addr *)rinfo->prefix,
964 rinfo->prefix_len);
965 prefix = &prefix_buf;
966 }
967
968 if (rinfo->prefix_len == 0)
969 rt = rt6_get_dflt_router(net, gwaddr, dev);
970 else
971 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
972 gwaddr, dev);
973
974 if (rt && !lifetime) {
975 ip6_del_rt(net, rt, false);
976 rt = NULL;
977 }
978
979 if (!rt && lifetime)
980 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
981 dev, pref);
982 else if (rt)
983 rt->fib6_flags = RTF_ROUTEINFO |
984 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
985
986 if (rt) {
987 if (!addrconf_finite_timeout(lifetime))
988 fib6_clean_expires(rt);
989 else
990 fib6_set_expires(rt, jiffies + HZ * lifetime);
991
992 fib6_info_release(rt);
993 }
994 return 0;
995 }
996 #endif
997
998 /*
999 * Misc support functions
1000 */
1001
1002 /* called with rcu_lock held */
ip6_rt_get_dev_rcu(const struct fib6_result * res)1003 static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
1004 {
1005 struct net_device *dev = res->nh->fib_nh_dev;
1006
1007 if (res->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
1008 /* for copies of local routes, dst->dev needs to be the
1009 * device if it is a master device, the master device if
1010 * device is enslaved, and the loopback as the default
1011 */
1012 if (netif_is_l3_slave(dev) &&
1013 !rt6_need_strict(&res->f6i->fib6_dst.addr))
1014 dev = l3mdev_master_dev_rcu(dev);
1015 else if (!netif_is_l3_master(dev))
1016 dev = dev_net(dev)->loopback_dev;
1017 /* last case is netif_is_l3_master(dev) is true in which
1018 * case we want dev returned to be dev
1019 */
1020 }
1021
1022 return dev;
1023 }
1024
1025 static const int fib6_prop[RTN_MAX + 1] = {
1026 [RTN_UNSPEC] = 0,
1027 [RTN_UNICAST] = 0,
1028 [RTN_LOCAL] = 0,
1029 [RTN_BROADCAST] = 0,
1030 [RTN_ANYCAST] = 0,
1031 [RTN_MULTICAST] = 0,
1032 [RTN_BLACKHOLE] = -EINVAL,
1033 [RTN_UNREACHABLE] = -EHOSTUNREACH,
1034 [RTN_PROHIBIT] = -EACCES,
1035 [RTN_THROW] = -EAGAIN,
1036 [RTN_NAT] = -EINVAL,
1037 [RTN_XRESOLVE] = -EINVAL,
1038 };
1039
ip6_rt_type_to_error(u8 fib6_type)1040 static int ip6_rt_type_to_error(u8 fib6_type)
1041 {
1042 return fib6_prop[fib6_type];
1043 }
1044
fib6_info_dst_flags(struct fib6_info * rt)1045 static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
1046 {
1047 unsigned short flags = 0;
1048
1049 if (rt->dst_nocount)
1050 flags |= DST_NOCOUNT;
1051 if (rt->dst_nopolicy)
1052 flags |= DST_NOPOLICY;
1053
1054 return flags;
1055 }
1056
ip6_rt_init_dst_reject(struct rt6_info * rt,u8 fib6_type)1057 static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type)
1058 {
1059 rt->dst.error = ip6_rt_type_to_error(fib6_type);
1060
1061 switch (fib6_type) {
1062 case RTN_BLACKHOLE:
1063 rt->dst.output = dst_discard_out;
1064 rt->dst.input = dst_discard;
1065 break;
1066 case RTN_PROHIBIT:
1067 rt->dst.output = ip6_pkt_prohibit_out;
1068 rt->dst.input = ip6_pkt_prohibit;
1069 break;
1070 case RTN_THROW:
1071 case RTN_UNREACHABLE:
1072 default:
1073 rt->dst.output = ip6_pkt_discard_out;
1074 rt->dst.input = ip6_pkt_discard;
1075 break;
1076 }
1077 }
1078
ip6_rt_init_dst(struct rt6_info * rt,const struct fib6_result * res)1079 static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
1080 {
1081 struct fib6_info *f6i = res->f6i;
1082
1083 if (res->fib6_flags & RTF_REJECT) {
1084 ip6_rt_init_dst_reject(rt, res->fib6_type);
1085 return;
1086 }
1087
1088 rt->dst.error = 0;
1089 rt->dst.output = ip6_output;
1090
1091 if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) {
1092 rt->dst.input = ip6_input;
1093 } else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
1094 rt->dst.input = ip6_mc_input;
1095 } else {
1096 rt->dst.input = ip6_forward;
1097 }
1098
1099 if (res->nh->fib_nh_lws) {
1100 rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws);
1101 lwtunnel_set_redirect(&rt->dst);
1102 }
1103
1104 rt->dst.lastuse = jiffies;
1105 }
1106
1107 /* Caller must already hold reference to @from */
rt6_set_from(struct rt6_info * rt,struct fib6_info * from)1108 static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
1109 {
1110 rt->rt6i_flags &= ~RTF_EXPIRES;
1111 rcu_assign_pointer(rt->from, from);
1112 ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
1113 }
1114
1115 /* Caller must already hold reference to f6i in result */
ip6_rt_copy_init(struct rt6_info * rt,const struct fib6_result * res)1116 static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res)
1117 {
1118 const struct fib6_nh *nh = res->nh;
1119 const struct net_device *dev = nh->fib_nh_dev;
1120 struct fib6_info *f6i = res->f6i;
1121
1122 ip6_rt_init_dst(rt, res);
1123
1124 rt->rt6i_dst = f6i->fib6_dst;
1125 rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
1126 rt->rt6i_flags = res->fib6_flags;
1127 if (nh->fib_nh_gw_family) {
1128 rt->rt6i_gateway = nh->fib_nh_gw6;
1129 rt->rt6i_flags |= RTF_GATEWAY;
1130 }
1131 rt6_set_from(rt, f6i);
1132 #ifdef CONFIG_IPV6_SUBTREES
1133 rt->rt6i_src = f6i->fib6_src;
1134 #endif
1135 }
1136
fib6_backtrack(struct fib6_node * fn,struct in6_addr * saddr)1137 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
1138 struct in6_addr *saddr)
1139 {
1140 struct fib6_node *pn, *sn;
1141 while (1) {
1142 if (fn->fn_flags & RTN_TL_ROOT)
1143 return NULL;
1144 pn = rcu_dereference(fn->parent);
1145 sn = FIB6_SUBTREE(pn);
1146 if (sn && sn != fn)
1147 fn = fib6_node_lookup(sn, NULL, saddr);
1148 else
1149 fn = pn;
1150 if (fn->fn_flags & RTN_RTINFO)
1151 return fn;
1152 }
1153 }
1154
ip6_hold_safe(struct net * net,struct rt6_info ** prt)1155 static bool ip6_hold_safe(struct net *net, struct rt6_info **prt)
1156 {
1157 struct rt6_info *rt = *prt;
1158
1159 if (dst_hold_safe(&rt->dst))
1160 return true;
1161 if (net) {
1162 rt = net->ipv6.ip6_null_entry;
1163 dst_hold(&rt->dst);
1164 } else {
1165 rt = NULL;
1166 }
1167 *prt = rt;
1168 return false;
1169 }
1170
1171 /* called with rcu_lock held */
ip6_create_rt_rcu(const struct fib6_result * res)1172 static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res)
1173 {
1174 struct net_device *dev = res->nh->fib_nh_dev;
1175 struct fib6_info *f6i = res->f6i;
1176 unsigned short flags;
1177 struct rt6_info *nrt;
1178
1179 if (!fib6_info_hold_safe(f6i))
1180 goto fallback;
1181
1182 flags = fib6_info_dst_flags(f6i);
1183 nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1184 if (!nrt) {
1185 fib6_info_release(f6i);
1186 goto fallback;
1187 }
1188
1189 ip6_rt_copy_init(nrt, res);
1190 return nrt;
1191
1192 fallback:
1193 nrt = dev_net(dev)->ipv6.ip6_null_entry;
1194 dst_hold(&nrt->dst);
1195 return nrt;
1196 }
1197
ip6_pol_route_lookup(struct net * net,struct fib6_table * table,struct flowi6 * fl6,const struct sk_buff * skb,int flags)1198 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_lookup(struct net *net,
1199 struct fib6_table *table,
1200 struct flowi6 *fl6,
1201 const struct sk_buff *skb,
1202 int flags)
1203 {
1204 struct fib6_result res = {};
1205 struct fib6_node *fn;
1206 struct rt6_info *rt;
1207
1208 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1209 flags &= ~RT6_LOOKUP_F_IFACE;
1210
1211 rcu_read_lock();
1212 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1213 restart:
1214 res.f6i = rcu_dereference(fn->leaf);
1215 if (!res.f6i)
1216 res.f6i = net->ipv6.fib6_null_entry;
1217 else
1218 rt6_device_match(net, &res, &fl6->saddr, fl6->flowi6_oif,
1219 flags);
1220
1221 if (res.f6i == net->ipv6.fib6_null_entry) {
1222 fn = fib6_backtrack(fn, &fl6->saddr);
1223 if (fn)
1224 goto restart;
1225
1226 rt = net->ipv6.ip6_null_entry;
1227 dst_hold(&rt->dst);
1228 goto out;
1229 } else if (res.fib6_flags & RTF_REJECT) {
1230 goto do_create;
1231 }
1232
1233 fib6_select_path(net, &res, fl6, fl6->flowi6_oif,
1234 fl6->flowi6_oif != 0, skb, flags);
1235
1236 /* Search through exception table */
1237 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
1238 if (rt) {
1239 if (ip6_hold_safe(net, &rt))
1240 dst_use_noref(&rt->dst, jiffies);
1241 } else {
1242 do_create:
1243 rt = ip6_create_rt_rcu(&res);
1244 }
1245
1246 out:
1247 trace_fib6_table_lookup(net, &res, table, fl6);
1248
1249 rcu_read_unlock();
1250
1251 return rt;
1252 }
1253
ip6_route_lookup(struct net * net,struct flowi6 * fl6,const struct sk_buff * skb,int flags)1254 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
1255 const struct sk_buff *skb, int flags)
1256 {
1257 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
1258 }
1259 EXPORT_SYMBOL_GPL(ip6_route_lookup);
1260
rt6_lookup(struct net * net,const struct in6_addr * daddr,const struct in6_addr * saddr,int oif,const struct sk_buff * skb,int strict)1261 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
1262 const struct in6_addr *saddr, int oif,
1263 const struct sk_buff *skb, int strict)
1264 {
1265 struct flowi6 fl6 = {
1266 .flowi6_oif = oif,
1267 .daddr = *daddr,
1268 };
1269 struct dst_entry *dst;
1270 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
1271
1272 if (saddr) {
1273 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
1274 flags |= RT6_LOOKUP_F_HAS_SADDR;
1275 }
1276
1277 dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
1278 if (dst->error == 0)
1279 return (struct rt6_info *) dst;
1280
1281 dst_release(dst);
1282
1283 return NULL;
1284 }
1285 EXPORT_SYMBOL(rt6_lookup);
1286
1287 /* ip6_ins_rt is called with FREE table->tb6_lock.
1288 * It takes new route entry, the addition fails by any reason the
1289 * route is released.
1290 * Caller must hold dst before calling it.
1291 */
1292
__ip6_ins_rt(struct fib6_info * rt,struct nl_info * info,struct netlink_ext_ack * extack)1293 static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
1294 struct netlink_ext_ack *extack)
1295 {
1296 int err;
1297 struct fib6_table *table;
1298
1299 table = rt->fib6_table;
1300 spin_lock_bh(&table->tb6_lock);
1301 err = fib6_add(&table->tb6_root, rt, info, extack);
1302 spin_unlock_bh(&table->tb6_lock);
1303
1304 return err;
1305 }
1306
ip6_ins_rt(struct net * net,struct fib6_info * rt)1307 int ip6_ins_rt(struct net *net, struct fib6_info *rt)
1308 {
1309 struct nl_info info = { .nl_net = net, };
1310
1311 return __ip6_ins_rt(rt, &info, NULL);
1312 }
1313
ip6_rt_cache_alloc(const struct fib6_result * res,const struct in6_addr * daddr,const struct in6_addr * saddr)1314 static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res,
1315 const struct in6_addr *daddr,
1316 const struct in6_addr *saddr)
1317 {
1318 struct fib6_info *f6i = res->f6i;
1319 struct net_device *dev;
1320 struct rt6_info *rt;
1321
1322 /*
1323 * Clone the route.
1324 */
1325
1326 if (!fib6_info_hold_safe(f6i))
1327 return NULL;
1328
1329 dev = ip6_rt_get_dev_rcu(res);
1330 rt = ip6_dst_alloc(dev_net(dev), dev, 0);
1331 if (!rt) {
1332 fib6_info_release(f6i);
1333 return NULL;
1334 }
1335
1336 ip6_rt_copy_init(rt, res);
1337 rt->rt6i_flags |= RTF_CACHE;
1338 rt->rt6i_dst.addr = *daddr;
1339 rt->rt6i_dst.plen = 128;
1340
1341 if (!rt6_is_gw_or_nonexthop(res)) {
1342 if (f6i->fib6_dst.plen != 128 &&
1343 ipv6_addr_equal(&f6i->fib6_dst.addr, daddr))
1344 rt->rt6i_flags |= RTF_ANYCAST;
1345 #ifdef CONFIG_IPV6_SUBTREES
1346 if (rt->rt6i_src.plen && saddr) {
1347 rt->rt6i_src.addr = *saddr;
1348 rt->rt6i_src.plen = 128;
1349 }
1350 #endif
1351 }
1352
1353 return rt;
1354 }
1355
ip6_rt_pcpu_alloc(const struct fib6_result * res)1356 static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
1357 {
1358 struct fib6_info *f6i = res->f6i;
1359 unsigned short flags = fib6_info_dst_flags(f6i);
1360 struct net_device *dev;
1361 struct rt6_info *pcpu_rt;
1362
1363 if (!fib6_info_hold_safe(f6i))
1364 return NULL;
1365
1366 rcu_read_lock();
1367 dev = ip6_rt_get_dev_rcu(res);
1368 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags | DST_NOCOUNT);
1369 rcu_read_unlock();
1370 if (!pcpu_rt) {
1371 fib6_info_release(f6i);
1372 return NULL;
1373 }
1374 ip6_rt_copy_init(pcpu_rt, res);
1375 pcpu_rt->rt6i_flags |= RTF_PCPU;
1376
1377 if (f6i->nh)
1378 pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev));
1379
1380 return pcpu_rt;
1381 }
1382
rt6_is_valid(const struct rt6_info * rt6)1383 static bool rt6_is_valid(const struct rt6_info *rt6)
1384 {
1385 return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev));
1386 }
1387
1388 /* It should be called with rcu_read_lock() acquired */
rt6_get_pcpu_route(const struct fib6_result * res)1389 static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
1390 {
1391 struct rt6_info *pcpu_rt;
1392
1393 pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
1394
1395 if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) {
1396 struct rt6_info *prev, **p;
1397
1398 p = this_cpu_ptr(res->nh->rt6i_pcpu);
1399 prev = xchg(p, NULL);
1400 if (prev) {
1401 dst_dev_put(&prev->dst);
1402 dst_release(&prev->dst);
1403 }
1404
1405 pcpu_rt = NULL;
1406 }
1407
1408 return pcpu_rt;
1409 }
1410
rt6_make_pcpu_route(struct net * net,const struct fib6_result * res)1411 static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1412 const struct fib6_result *res)
1413 {
1414 struct rt6_info *pcpu_rt, *prev, **p;
1415
1416 pcpu_rt = ip6_rt_pcpu_alloc(res);
1417 if (!pcpu_rt)
1418 return NULL;
1419
1420 p = this_cpu_ptr(res->nh->rt6i_pcpu);
1421 prev = cmpxchg(p, NULL, pcpu_rt);
1422 BUG_ON(prev);
1423
1424 if (res->f6i->fib6_destroying) {
1425 struct fib6_info *from;
1426
1427 from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
1428 fib6_info_release(from);
1429 }
1430
1431 return pcpu_rt;
1432 }
1433
1434 /* exception hash table implementation
1435 */
1436 static DEFINE_SPINLOCK(rt6_exception_lock);
1437
1438 /* Remove rt6_ex from hash table and free the memory
1439 * Caller must hold rt6_exception_lock
1440 */
rt6_remove_exception(struct rt6_exception_bucket * bucket,struct rt6_exception * rt6_ex)1441 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1442 struct rt6_exception *rt6_ex)
1443 {
1444 struct fib6_info *from;
1445 struct net *net;
1446
1447 if (!bucket || !rt6_ex)
1448 return;
1449
1450 net = dev_net(rt6_ex->rt6i->dst.dev);
1451 net->ipv6.rt6_stats->fib_rt_cache--;
1452
1453 /* purge completely the exception to allow releasing the held resources:
1454 * some [sk] cache may keep the dst around for unlimited time
1455 */
1456 from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
1457 fib6_info_release(from);
1458 dst_dev_put(&rt6_ex->rt6i->dst);
1459
1460 hlist_del_rcu(&rt6_ex->hlist);
1461 dst_release(&rt6_ex->rt6i->dst);
1462 kfree_rcu(rt6_ex, rcu);
1463 WARN_ON_ONCE(!bucket->depth);
1464 bucket->depth--;
1465 }
1466
1467 /* Remove oldest rt6_ex in bucket and free the memory
1468 * Caller must hold rt6_exception_lock
1469 */
rt6_exception_remove_oldest(struct rt6_exception_bucket * bucket)1470 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1471 {
1472 struct rt6_exception *rt6_ex, *oldest = NULL;
1473
1474 if (!bucket)
1475 return;
1476
1477 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1478 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1479 oldest = rt6_ex;
1480 }
1481 rt6_remove_exception(bucket, oldest);
1482 }
1483
rt6_exception_hash(const struct in6_addr * dst,const struct in6_addr * src)1484 static u32 rt6_exception_hash(const struct in6_addr *dst,
1485 const struct in6_addr *src)
1486 {
1487 static u32 seed __read_mostly;
1488 u32 val;
1489
1490 net_get_random_once(&seed, sizeof(seed));
1491 val = jhash2((const u32 *)dst, sizeof(*dst)/sizeof(u32), seed);
1492
1493 #ifdef CONFIG_IPV6_SUBTREES
1494 if (src)
1495 val = jhash2((const u32 *)src, sizeof(*src)/sizeof(u32), val);
1496 #endif
1497 return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1498 }
1499
1500 /* Helper function to find the cached rt in the hash table
1501 * and update bucket pointer to point to the bucket for this
1502 * (daddr, saddr) pair
1503 * Caller must hold rt6_exception_lock
1504 */
1505 static struct rt6_exception *
__rt6_find_exception_spinlock(struct rt6_exception_bucket ** bucket,const struct in6_addr * daddr,const struct in6_addr * saddr)1506 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1507 const struct in6_addr *daddr,
1508 const struct in6_addr *saddr)
1509 {
1510 struct rt6_exception *rt6_ex;
1511 u32 hval;
1512
1513 if (!(*bucket) || !daddr)
1514 return NULL;
1515
1516 hval = rt6_exception_hash(daddr, saddr);
1517 *bucket += hval;
1518
1519 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1520 struct rt6_info *rt6 = rt6_ex->rt6i;
1521 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1522
1523 #ifdef CONFIG_IPV6_SUBTREES
1524 if (matched && saddr)
1525 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1526 #endif
1527 if (matched)
1528 return rt6_ex;
1529 }
1530 return NULL;
1531 }
1532
1533 /* Helper function to find the cached rt in the hash table
1534 * and update bucket pointer to point to the bucket for this
1535 * (daddr, saddr) pair
1536 * Caller must hold rcu_read_lock()
1537 */
1538 static struct rt6_exception *
__rt6_find_exception_rcu(struct rt6_exception_bucket ** bucket,const struct in6_addr * daddr,const struct in6_addr * saddr)1539 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1540 const struct in6_addr *daddr,
1541 const struct in6_addr *saddr)
1542 {
1543 struct rt6_exception *rt6_ex;
1544 u32 hval;
1545
1546 WARN_ON_ONCE(!rcu_read_lock_held());
1547
1548 if (!(*bucket) || !daddr)
1549 return NULL;
1550
1551 hval = rt6_exception_hash(daddr, saddr);
1552 *bucket += hval;
1553
1554 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1555 struct rt6_info *rt6 = rt6_ex->rt6i;
1556 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1557
1558 #ifdef CONFIG_IPV6_SUBTREES
1559 if (matched && saddr)
1560 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1561 #endif
1562 if (matched)
1563 return rt6_ex;
1564 }
1565 return NULL;
1566 }
1567
fib6_mtu(const struct fib6_result * res)1568 static unsigned int fib6_mtu(const struct fib6_result *res)
1569 {
1570 const struct fib6_nh *nh = res->nh;
1571 unsigned int mtu;
1572
1573 if (res->f6i->fib6_pmtu) {
1574 mtu = res->f6i->fib6_pmtu;
1575 } else {
1576 struct net_device *dev = nh->fib_nh_dev;
1577 struct inet6_dev *idev;
1578
1579 rcu_read_lock();
1580 idev = __in6_dev_get(dev);
1581 mtu = idev->cnf.mtu6;
1582 rcu_read_unlock();
1583 }
1584
1585 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1586
1587 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
1588 }
1589
1590 #define FIB6_EXCEPTION_BUCKET_FLUSHED 0x1UL
1591
1592 /* used when the flushed bit is not relevant, only access to the bucket
1593 * (ie., all bucket users except rt6_insert_exception);
1594 *
1595 * called under rcu lock; sometimes called with rt6_exception_lock held
1596 */
1597 static
fib6_nh_get_excptn_bucket(const struct fib6_nh * nh,spinlock_t * lock)1598 struct rt6_exception_bucket *fib6_nh_get_excptn_bucket(const struct fib6_nh *nh,
1599 spinlock_t *lock)
1600 {
1601 struct rt6_exception_bucket *bucket;
1602
1603 if (lock)
1604 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1605 lockdep_is_held(lock));
1606 else
1607 bucket = rcu_dereference(nh->rt6i_exception_bucket);
1608
1609 /* remove bucket flushed bit if set */
1610 if (bucket) {
1611 unsigned long p = (unsigned long)bucket;
1612
1613 p &= ~FIB6_EXCEPTION_BUCKET_FLUSHED;
1614 bucket = (struct rt6_exception_bucket *)p;
1615 }
1616
1617 return bucket;
1618 }
1619
fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket * bucket)1620 static bool fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket)
1621 {
1622 unsigned long p = (unsigned long)bucket;
1623
1624 return !!(p & FIB6_EXCEPTION_BUCKET_FLUSHED);
1625 }
1626
1627 /* called with rt6_exception_lock held */
fib6_nh_excptn_bucket_set_flushed(struct fib6_nh * nh,spinlock_t * lock)1628 static void fib6_nh_excptn_bucket_set_flushed(struct fib6_nh *nh,
1629 spinlock_t *lock)
1630 {
1631 struct rt6_exception_bucket *bucket;
1632 unsigned long p;
1633
1634 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1635 lockdep_is_held(lock));
1636
1637 p = (unsigned long)bucket;
1638 p |= FIB6_EXCEPTION_BUCKET_FLUSHED;
1639 bucket = (struct rt6_exception_bucket *)p;
1640 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1641 }
1642
rt6_insert_exception(struct rt6_info * nrt,const struct fib6_result * res)1643 static int rt6_insert_exception(struct rt6_info *nrt,
1644 const struct fib6_result *res)
1645 {
1646 struct net *net = dev_net(nrt->dst.dev);
1647 struct rt6_exception_bucket *bucket;
1648 struct fib6_info *f6i = res->f6i;
1649 struct in6_addr *src_key = NULL;
1650 struct rt6_exception *rt6_ex;
1651 struct fib6_nh *nh = res->nh;
1652 int err = 0;
1653
1654 spin_lock_bh(&rt6_exception_lock);
1655
1656 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1657 lockdep_is_held(&rt6_exception_lock));
1658 if (!bucket) {
1659 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1660 GFP_ATOMIC);
1661 if (!bucket) {
1662 err = -ENOMEM;
1663 goto out;
1664 }
1665 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1666 } else if (fib6_nh_excptn_bucket_flushed(bucket)) {
1667 err = -EINVAL;
1668 goto out;
1669 }
1670
1671 #ifdef CONFIG_IPV6_SUBTREES
1672 /* fib6_src.plen != 0 indicates f6i is in subtree
1673 * and exception table is indexed by a hash of
1674 * both fib6_dst and fib6_src.
1675 * Otherwise, the exception table is indexed by
1676 * a hash of only fib6_dst.
1677 */
1678 if (f6i->fib6_src.plen)
1679 src_key = &nrt->rt6i_src.addr;
1680 #endif
1681 /* rt6_mtu_change() might lower mtu on f6i.
1682 * Only insert this exception route if its mtu
1683 * is less than f6i's mtu value.
1684 */
1685 if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) {
1686 err = -EINVAL;
1687 goto out;
1688 }
1689
1690 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1691 src_key);
1692 if (rt6_ex)
1693 rt6_remove_exception(bucket, rt6_ex);
1694
1695 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1696 if (!rt6_ex) {
1697 err = -ENOMEM;
1698 goto out;
1699 }
1700 rt6_ex->rt6i = nrt;
1701 rt6_ex->stamp = jiffies;
1702 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1703 bucket->depth++;
1704 net->ipv6.rt6_stats->fib_rt_cache++;
1705
1706 if (bucket->depth > FIB6_MAX_DEPTH)
1707 rt6_exception_remove_oldest(bucket);
1708
1709 out:
1710 spin_unlock_bh(&rt6_exception_lock);
1711
1712 /* Update fn->fn_sernum to invalidate all cached dst */
1713 if (!err) {
1714 spin_lock_bh(&f6i->fib6_table->tb6_lock);
1715 fib6_update_sernum(net, f6i);
1716 spin_unlock_bh(&f6i->fib6_table->tb6_lock);
1717 fib6_force_start_gc(net);
1718 }
1719
1720 return err;
1721 }
1722
fib6_nh_flush_exceptions(struct fib6_nh * nh,struct fib6_info * from)1723 static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from)
1724 {
1725 struct rt6_exception_bucket *bucket;
1726 struct rt6_exception *rt6_ex;
1727 struct hlist_node *tmp;
1728 int i;
1729
1730 spin_lock_bh(&rt6_exception_lock);
1731
1732 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1733 if (!bucket)
1734 goto out;
1735
1736 /* Prevent rt6_insert_exception() to recreate the bucket list */
1737 if (!from)
1738 fib6_nh_excptn_bucket_set_flushed(nh, &rt6_exception_lock);
1739
1740 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1741 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) {
1742 if (!from ||
1743 rcu_access_pointer(rt6_ex->rt6i->from) == from)
1744 rt6_remove_exception(bucket, rt6_ex);
1745 }
1746 WARN_ON_ONCE(!from && bucket->depth);
1747 bucket++;
1748 }
1749 out:
1750 spin_unlock_bh(&rt6_exception_lock);
1751 }
1752
rt6_nh_flush_exceptions(struct fib6_nh * nh,void * arg)1753 static int rt6_nh_flush_exceptions(struct fib6_nh *nh, void *arg)
1754 {
1755 struct fib6_info *f6i = arg;
1756
1757 fib6_nh_flush_exceptions(nh, f6i);
1758
1759 return 0;
1760 }
1761
rt6_flush_exceptions(struct fib6_info * f6i)1762 void rt6_flush_exceptions(struct fib6_info *f6i)
1763 {
1764 if (f6i->nh)
1765 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_flush_exceptions,
1766 f6i);
1767 else
1768 fib6_nh_flush_exceptions(f6i->fib6_nh, f6i);
1769 }
1770
1771 /* Find cached rt in the hash table inside passed in rt
1772 * Caller has to hold rcu_read_lock()
1773 */
rt6_find_cached_rt(const struct fib6_result * res,const struct in6_addr * daddr,const struct in6_addr * saddr)1774 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
1775 const struct in6_addr *daddr,
1776 const struct in6_addr *saddr)
1777 {
1778 const struct in6_addr *src_key = NULL;
1779 struct rt6_exception_bucket *bucket;
1780 struct rt6_exception *rt6_ex;
1781 struct rt6_info *ret = NULL;
1782
1783 #ifdef CONFIG_IPV6_SUBTREES
1784 /* fib6i_src.plen != 0 indicates f6i is in subtree
1785 * and exception table is indexed by a hash of
1786 * both fib6_dst and fib6_src.
1787 * However, the src addr used to create the hash
1788 * might not be exactly the passed in saddr which
1789 * is a /128 addr from the flow.
1790 * So we need to use f6i->fib6_src to redo lookup
1791 * if the passed in saddr does not find anything.
1792 * (See the logic in ip6_rt_cache_alloc() on how
1793 * rt->rt6i_src is updated.)
1794 */
1795 if (res->f6i->fib6_src.plen)
1796 src_key = saddr;
1797 find_ex:
1798 #endif
1799 bucket = fib6_nh_get_excptn_bucket(res->nh, NULL);
1800 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1801
1802 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1803 ret = rt6_ex->rt6i;
1804
1805 #ifdef CONFIG_IPV6_SUBTREES
1806 /* Use fib6_src as src_key and redo lookup */
1807 if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) {
1808 src_key = &res->f6i->fib6_src.addr;
1809 goto find_ex;
1810 }
1811 #endif
1812
1813 return ret;
1814 }
1815
1816 /* Remove the passed in cached rt from the hash table that contains it */
fib6_nh_remove_exception(const struct fib6_nh * nh,int plen,const struct rt6_info * rt)1817 static int fib6_nh_remove_exception(const struct fib6_nh *nh, int plen,
1818 const struct rt6_info *rt)
1819 {
1820 const struct in6_addr *src_key = NULL;
1821 struct rt6_exception_bucket *bucket;
1822 struct rt6_exception *rt6_ex;
1823 int err;
1824
1825 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
1826 return -ENOENT;
1827
1828 spin_lock_bh(&rt6_exception_lock);
1829 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1830
1831 #ifdef CONFIG_IPV6_SUBTREES
1832 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1833 * and exception table is indexed by a hash of
1834 * both rt6i_dst and rt6i_src.
1835 * Otherwise, the exception table is indexed by
1836 * a hash of only rt6i_dst.
1837 */
1838 if (plen)
1839 src_key = &rt->rt6i_src.addr;
1840 #endif
1841 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1842 &rt->rt6i_dst.addr,
1843 src_key);
1844 if (rt6_ex) {
1845 rt6_remove_exception(bucket, rt6_ex);
1846 err = 0;
1847 } else {
1848 err = -ENOENT;
1849 }
1850
1851 spin_unlock_bh(&rt6_exception_lock);
1852 return err;
1853 }
1854
1855 struct fib6_nh_excptn_arg {
1856 struct rt6_info *rt;
1857 int plen;
1858 };
1859
rt6_nh_remove_exception_rt(struct fib6_nh * nh,void * _arg)1860 static int rt6_nh_remove_exception_rt(struct fib6_nh *nh, void *_arg)
1861 {
1862 struct fib6_nh_excptn_arg *arg = _arg;
1863 int err;
1864
1865 err = fib6_nh_remove_exception(nh, arg->plen, arg->rt);
1866 if (err == 0)
1867 return 1;
1868
1869 return 0;
1870 }
1871
rt6_remove_exception_rt(struct rt6_info * rt)1872 static int rt6_remove_exception_rt(struct rt6_info *rt)
1873 {
1874 struct fib6_info *from;
1875
1876 from = rcu_dereference(rt->from);
1877 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1878 return -EINVAL;
1879
1880 if (from->nh) {
1881 struct fib6_nh_excptn_arg arg = {
1882 .rt = rt,
1883 .plen = from->fib6_src.plen
1884 };
1885 int rc;
1886
1887 /* rc = 1 means an entry was found */
1888 rc = nexthop_for_each_fib6_nh(from->nh,
1889 rt6_nh_remove_exception_rt,
1890 &arg);
1891 return rc ? 0 : -ENOENT;
1892 }
1893
1894 return fib6_nh_remove_exception(from->fib6_nh,
1895 from->fib6_src.plen, rt);
1896 }
1897
1898 /* Find rt6_ex which contains the passed in rt cache and
1899 * refresh its stamp
1900 */
fib6_nh_update_exception(const struct fib6_nh * nh,int plen,const struct rt6_info * rt)1901 static void fib6_nh_update_exception(const struct fib6_nh *nh, int plen,
1902 const struct rt6_info *rt)
1903 {
1904 const struct in6_addr *src_key = NULL;
1905 struct rt6_exception_bucket *bucket;
1906 struct rt6_exception *rt6_ex;
1907
1908 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
1909 #ifdef CONFIG_IPV6_SUBTREES
1910 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1911 * and exception table is indexed by a hash of
1912 * both rt6i_dst and rt6i_src.
1913 * Otherwise, the exception table is indexed by
1914 * a hash of only rt6i_dst.
1915 */
1916 if (plen)
1917 src_key = &rt->rt6i_src.addr;
1918 #endif
1919 rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key);
1920 if (rt6_ex)
1921 rt6_ex->stamp = jiffies;
1922 }
1923
1924 struct fib6_nh_match_arg {
1925 const struct net_device *dev;
1926 const struct in6_addr *gw;
1927 struct fib6_nh *match;
1928 };
1929
1930 /* determine if fib6_nh has given device and gateway */
fib6_nh_find_match(struct fib6_nh * nh,void * _arg)1931 static int fib6_nh_find_match(struct fib6_nh *nh, void *_arg)
1932 {
1933 struct fib6_nh_match_arg *arg = _arg;
1934
1935 if (arg->dev != nh->fib_nh_dev ||
1936 (arg->gw && !nh->fib_nh_gw_family) ||
1937 (!arg->gw && nh->fib_nh_gw_family) ||
1938 (arg->gw && !ipv6_addr_equal(arg->gw, &nh->fib_nh_gw6)))
1939 return 0;
1940
1941 arg->match = nh;
1942
1943 /* found a match, break the loop */
1944 return 1;
1945 }
1946
rt6_update_exception_stamp_rt(struct rt6_info * rt)1947 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1948 {
1949 struct fib6_info *from;
1950 struct fib6_nh *fib6_nh;
1951
1952 rcu_read_lock();
1953
1954 from = rcu_dereference(rt->from);
1955 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1956 goto unlock;
1957
1958 if (from->nh) {
1959 struct fib6_nh_match_arg arg = {
1960 .dev = rt->dst.dev,
1961 .gw = &rt->rt6i_gateway,
1962 };
1963
1964 nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg);
1965
1966 if (!arg.match)
1967 goto unlock;
1968 fib6_nh = arg.match;
1969 } else {
1970 fib6_nh = from->fib6_nh;
1971 }
1972 fib6_nh_update_exception(fib6_nh, from->fib6_src.plen, rt);
1973 unlock:
1974 rcu_read_unlock();
1975 }
1976
rt6_mtu_change_route_allowed(struct inet6_dev * idev,struct rt6_info * rt,int mtu)1977 static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1978 struct rt6_info *rt, int mtu)
1979 {
1980 /* If the new MTU is lower than the route PMTU, this new MTU will be the
1981 * lowest MTU in the path: always allow updating the route PMTU to
1982 * reflect PMTU decreases.
1983 *
1984 * If the new MTU is higher, and the route PMTU is equal to the local
1985 * MTU, this means the old MTU is the lowest in the path, so allow
1986 * updating it: if other nodes now have lower MTUs, PMTU discovery will
1987 * handle this.
1988 */
1989
1990 if (dst_mtu(&rt->dst) >= mtu)
1991 return true;
1992
1993 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
1994 return true;
1995
1996 return false;
1997 }
1998
rt6_exceptions_update_pmtu(struct inet6_dev * idev,const struct fib6_nh * nh,int mtu)1999 static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
2000 const struct fib6_nh *nh, int mtu)
2001 {
2002 struct rt6_exception_bucket *bucket;
2003 struct rt6_exception *rt6_ex;
2004 int i;
2005
2006 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2007 if (!bucket)
2008 return;
2009
2010 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2011 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
2012 struct rt6_info *entry = rt6_ex->rt6i;
2013
2014 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
2015 * route), the metrics of its rt->from have already
2016 * been updated.
2017 */
2018 if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
2019 rt6_mtu_change_route_allowed(idev, entry, mtu))
2020 dst_metric_set(&entry->dst, RTAX_MTU, mtu);
2021 }
2022 bucket++;
2023 }
2024 }
2025
2026 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2027
fib6_nh_exceptions_clean_tohost(const struct fib6_nh * nh,const struct in6_addr * gateway)2028 static void fib6_nh_exceptions_clean_tohost(const struct fib6_nh *nh,
2029 const struct in6_addr *gateway)
2030 {
2031 struct rt6_exception_bucket *bucket;
2032 struct rt6_exception *rt6_ex;
2033 struct hlist_node *tmp;
2034 int i;
2035
2036 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2037 return;
2038
2039 spin_lock_bh(&rt6_exception_lock);
2040 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2041 if (bucket) {
2042 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2043 hlist_for_each_entry_safe(rt6_ex, tmp,
2044 &bucket->chain, hlist) {
2045 struct rt6_info *entry = rt6_ex->rt6i;
2046
2047 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
2048 RTF_CACHE_GATEWAY &&
2049 ipv6_addr_equal(gateway,
2050 &entry->rt6i_gateway)) {
2051 rt6_remove_exception(bucket, rt6_ex);
2052 }
2053 }
2054 bucket++;
2055 }
2056 }
2057
2058 spin_unlock_bh(&rt6_exception_lock);
2059 }
2060
rt6_age_examine_exception(struct rt6_exception_bucket * bucket,struct rt6_exception * rt6_ex,struct fib6_gc_args * gc_args,unsigned long now)2061 static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
2062 struct rt6_exception *rt6_ex,
2063 struct fib6_gc_args *gc_args,
2064 unsigned long now)
2065 {
2066 struct rt6_info *rt = rt6_ex->rt6i;
2067
2068 /* we are pruning and obsoleting aged-out and non gateway exceptions
2069 * even if others have still references to them, so that on next
2070 * dst_check() such references can be dropped.
2071 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
2072 * expired, independently from their aging, as per RFC 8201 section 4
2073 */
2074 if (!(rt->rt6i_flags & RTF_EXPIRES)) {
2075 if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
2076 RT6_TRACE("aging clone %p\n", rt);
2077 rt6_remove_exception(bucket, rt6_ex);
2078 return;
2079 }
2080 } else if (time_after(jiffies, rt->dst.expires)) {
2081 RT6_TRACE("purging expired route %p\n", rt);
2082 rt6_remove_exception(bucket, rt6_ex);
2083 return;
2084 }
2085
2086 if (rt->rt6i_flags & RTF_GATEWAY) {
2087 struct neighbour *neigh;
2088
2089 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
2090
2091 if (!(neigh && (neigh->flags & NTF_ROUTER))) {
2092 RT6_TRACE("purging route %p via non-router but gateway\n",
2093 rt);
2094 rt6_remove_exception(bucket, rt6_ex);
2095 return;
2096 }
2097 }
2098
2099 gc_args->more++;
2100 }
2101
fib6_nh_age_exceptions(const struct fib6_nh * nh,struct fib6_gc_args * gc_args,unsigned long now)2102 static void fib6_nh_age_exceptions(const struct fib6_nh *nh,
2103 struct fib6_gc_args *gc_args,
2104 unsigned long now)
2105 {
2106 struct rt6_exception_bucket *bucket;
2107 struct rt6_exception *rt6_ex;
2108 struct hlist_node *tmp;
2109 int i;
2110
2111 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2112 return;
2113
2114 rcu_read_lock_bh();
2115 spin_lock(&rt6_exception_lock);
2116 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2117 if (bucket) {
2118 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2119 hlist_for_each_entry_safe(rt6_ex, tmp,
2120 &bucket->chain, hlist) {
2121 rt6_age_examine_exception(bucket, rt6_ex,
2122 gc_args, now);
2123 }
2124 bucket++;
2125 }
2126 }
2127 spin_unlock(&rt6_exception_lock);
2128 rcu_read_unlock_bh();
2129 }
2130
2131 struct fib6_nh_age_excptn_arg {
2132 struct fib6_gc_args *gc_args;
2133 unsigned long now;
2134 };
2135
rt6_nh_age_exceptions(struct fib6_nh * nh,void * _arg)2136 static int rt6_nh_age_exceptions(struct fib6_nh *nh, void *_arg)
2137 {
2138 struct fib6_nh_age_excptn_arg *arg = _arg;
2139
2140 fib6_nh_age_exceptions(nh, arg->gc_args, arg->now);
2141 return 0;
2142 }
2143
rt6_age_exceptions(struct fib6_info * f6i,struct fib6_gc_args * gc_args,unsigned long now)2144 void rt6_age_exceptions(struct fib6_info *f6i,
2145 struct fib6_gc_args *gc_args,
2146 unsigned long now)
2147 {
2148 if (f6i->nh) {
2149 struct fib6_nh_age_excptn_arg arg = {
2150 .gc_args = gc_args,
2151 .now = now
2152 };
2153
2154 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_age_exceptions,
2155 &arg);
2156 } else {
2157 fib6_nh_age_exceptions(f6i->fib6_nh, gc_args, now);
2158 }
2159 }
2160
2161 /* must be called with rcu lock held */
fib6_table_lookup(struct net * net,struct fib6_table * table,int oif,struct flowi6 * fl6,struct fib6_result * res,int strict)2162 int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
2163 struct flowi6 *fl6, struct fib6_result *res, int strict)
2164 {
2165 struct fib6_node *fn, *saved_fn;
2166
2167 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2168 saved_fn = fn;
2169
2170 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
2171 oif = 0;
2172
2173 redo_rt6_select:
2174 rt6_select(net, fn, oif, res, strict);
2175 if (res->f6i == net->ipv6.fib6_null_entry) {
2176 fn = fib6_backtrack(fn, &fl6->saddr);
2177 if (fn)
2178 goto redo_rt6_select;
2179 else if (strict & RT6_LOOKUP_F_REACHABLE) {
2180 /* also consider unreachable route */
2181 strict &= ~RT6_LOOKUP_F_REACHABLE;
2182 fn = saved_fn;
2183 goto redo_rt6_select;
2184 }
2185 }
2186
2187 trace_fib6_table_lookup(net, res, table, fl6);
2188
2189 return 0;
2190 }
2191
ip6_pol_route(struct net * net,struct fib6_table * table,int oif,struct flowi6 * fl6,const struct sk_buff * skb,int flags)2192 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
2193 int oif, struct flowi6 *fl6,
2194 const struct sk_buff *skb, int flags)
2195 {
2196 struct fib6_result res = {};
2197 struct rt6_info *rt = NULL;
2198 int strict = 0;
2199
2200 WARN_ON_ONCE((flags & RT6_LOOKUP_F_DST_NOREF) &&
2201 !rcu_read_lock_held());
2202
2203 strict |= flags & RT6_LOOKUP_F_IFACE;
2204 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
2205 if (net->ipv6.devconf_all->forwarding == 0)
2206 strict |= RT6_LOOKUP_F_REACHABLE;
2207
2208 rcu_read_lock();
2209
2210 fib6_table_lookup(net, table, oif, fl6, &res, strict);
2211 if (res.f6i == net->ipv6.fib6_null_entry)
2212 goto out;
2213
2214 fib6_select_path(net, &res, fl6, oif, false, skb, strict);
2215
2216 /*Search through exception table */
2217 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
2218 if (rt) {
2219 goto out;
2220 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
2221 !res.nh->fib_nh_gw_family)) {
2222 /* Create a RTF_CACHE clone which will not be
2223 * owned by the fib6 tree. It is for the special case where
2224 * the daddr in the skb during the neighbor look-up is different
2225 * from the fl6->daddr used to look-up route here.
2226 */
2227 rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
2228
2229 if (rt) {
2230 /* 1 refcnt is taken during ip6_rt_cache_alloc().
2231 * As rt6_uncached_list_add() does not consume refcnt,
2232 * this refcnt is always returned to the caller even
2233 * if caller sets RT6_LOOKUP_F_DST_NOREF flag.
2234 */
2235 rt6_uncached_list_add(rt);
2236 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
2237 rcu_read_unlock();
2238
2239 return rt;
2240 }
2241 } else {
2242 /* Get a percpu copy */
2243 local_bh_disable();
2244 rt = rt6_get_pcpu_route(&res);
2245
2246 if (!rt)
2247 rt = rt6_make_pcpu_route(net, &res);
2248
2249 local_bh_enable();
2250 }
2251 out:
2252 if (!rt)
2253 rt = net->ipv6.ip6_null_entry;
2254 if (!(flags & RT6_LOOKUP_F_DST_NOREF))
2255 ip6_hold_safe(net, &rt);
2256 rcu_read_unlock();
2257
2258 return rt;
2259 }
2260 EXPORT_SYMBOL_GPL(ip6_pol_route);
2261
ip6_pol_route_input(struct net * net,struct fib6_table * table,struct flowi6 * fl6,const struct sk_buff * skb,int flags)2262 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_input(struct net *net,
2263 struct fib6_table *table,
2264 struct flowi6 *fl6,
2265 const struct sk_buff *skb,
2266 int flags)
2267 {
2268 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
2269 }
2270
ip6_route_input_lookup(struct net * net,struct net_device * dev,struct flowi6 * fl6,const struct sk_buff * skb,int flags)2271 struct dst_entry *ip6_route_input_lookup(struct net *net,
2272 struct net_device *dev,
2273 struct flowi6 *fl6,
2274 const struct sk_buff *skb,
2275 int flags)
2276 {
2277 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
2278 flags |= RT6_LOOKUP_F_IFACE;
2279
2280 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
2281 }
2282 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
2283
ip6_multipath_l3_keys(const struct sk_buff * skb,struct flow_keys * keys,struct flow_keys * flkeys)2284 static void ip6_multipath_l3_keys(const struct sk_buff *skb,
2285 struct flow_keys *keys,
2286 struct flow_keys *flkeys)
2287 {
2288 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
2289 const struct ipv6hdr *key_iph = outer_iph;
2290 struct flow_keys *_flkeys = flkeys;
2291 const struct ipv6hdr *inner_iph;
2292 const struct icmp6hdr *icmph;
2293 struct ipv6hdr _inner_iph;
2294 struct icmp6hdr _icmph;
2295
2296 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
2297 goto out;
2298
2299 icmph = skb_header_pointer(skb, skb_transport_offset(skb),
2300 sizeof(_icmph), &_icmph);
2301 if (!icmph)
2302 goto out;
2303
2304 if (!icmpv6_is_err(icmph->icmp6_type))
2305 goto out;
2306
2307 inner_iph = skb_header_pointer(skb,
2308 skb_transport_offset(skb) + sizeof(*icmph),
2309 sizeof(_inner_iph), &_inner_iph);
2310 if (!inner_iph)
2311 goto out;
2312
2313 key_iph = inner_iph;
2314 _flkeys = NULL;
2315 out:
2316 if (_flkeys) {
2317 keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
2318 keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
2319 keys->tags.flow_label = _flkeys->tags.flow_label;
2320 keys->basic.ip_proto = _flkeys->basic.ip_proto;
2321 } else {
2322 keys->addrs.v6addrs.src = key_iph->saddr;
2323 keys->addrs.v6addrs.dst = key_iph->daddr;
2324 keys->tags.flow_label = ip6_flowlabel(key_iph);
2325 keys->basic.ip_proto = key_iph->nexthdr;
2326 }
2327 }
2328
2329 /* if skb is set it will be used and fl6 can be NULL */
rt6_multipath_hash(const struct net * net,const struct flowi6 * fl6,const struct sk_buff * skb,struct flow_keys * flkeys)2330 u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
2331 const struct sk_buff *skb, struct flow_keys *flkeys)
2332 {
2333 struct flow_keys hash_keys;
2334 u32 mhash;
2335
2336 switch (ip6_multipath_hash_policy(net)) {
2337 case 0:
2338 memset(&hash_keys, 0, sizeof(hash_keys));
2339 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2340 if (skb) {
2341 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2342 } else {
2343 hash_keys.addrs.v6addrs.src = fl6->saddr;
2344 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2345 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2346 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2347 }
2348 break;
2349 case 1:
2350 if (skb) {
2351 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2352 struct flow_keys keys;
2353
2354 /* short-circuit if we already have L4 hash present */
2355 if (skb->l4_hash)
2356 return skb_get_hash_raw(skb) >> 1;
2357
2358 memset(&hash_keys, 0, sizeof(hash_keys));
2359
2360 if (!flkeys) {
2361 skb_flow_dissect_flow_keys(skb, &keys, flag);
2362 flkeys = &keys;
2363 }
2364 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2365 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2366 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2367 hash_keys.ports.src = flkeys->ports.src;
2368 hash_keys.ports.dst = flkeys->ports.dst;
2369 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2370 } else {
2371 memset(&hash_keys, 0, sizeof(hash_keys));
2372 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2373 hash_keys.addrs.v6addrs.src = fl6->saddr;
2374 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2375 hash_keys.ports.src = fl6->fl6_sport;
2376 hash_keys.ports.dst = fl6->fl6_dport;
2377 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2378 }
2379 break;
2380 case 2:
2381 memset(&hash_keys, 0, sizeof(hash_keys));
2382 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2383 if (skb) {
2384 struct flow_keys keys;
2385
2386 if (!flkeys) {
2387 skb_flow_dissect_flow_keys(skb, &keys, 0);
2388 flkeys = &keys;
2389 }
2390
2391 /* Inner can be v4 or v6 */
2392 if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2393 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2394 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2395 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2396 } else if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2397 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2398 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2399 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2400 hash_keys.tags.flow_label = flkeys->tags.flow_label;
2401 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2402 } else {
2403 /* Same as case 0 */
2404 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2405 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2406 }
2407 } else {
2408 /* Same as case 0 */
2409 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2410 hash_keys.addrs.v6addrs.src = fl6->saddr;
2411 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2412 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2413 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2414 }
2415 break;
2416 }
2417 mhash = flow_hash_from_keys(&hash_keys);
2418
2419 return mhash >> 1;
2420 }
2421
2422 /* Called with rcu held */
ip6_route_input(struct sk_buff * skb)2423 void ip6_route_input(struct sk_buff *skb)
2424 {
2425 const struct ipv6hdr *iph = ipv6_hdr(skb);
2426 struct net *net = dev_net(skb->dev);
2427 int flags = RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_DST_NOREF;
2428 struct ip_tunnel_info *tun_info;
2429 struct flowi6 fl6 = {
2430 .flowi6_iif = skb->dev->ifindex,
2431 .daddr = iph->daddr,
2432 .saddr = iph->saddr,
2433 .flowlabel = ip6_flowinfo(iph),
2434 .flowi6_mark = skb->mark,
2435 .flowi6_proto = iph->nexthdr,
2436 };
2437 struct flow_keys *flkeys = NULL, _flkeys;
2438
2439 tun_info = skb_tunnel_info(skb);
2440 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2441 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
2442
2443 if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2444 flkeys = &_flkeys;
2445
2446 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
2447 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
2448 skb_dst_drop(skb);
2449 skb_dst_set_noref(skb, ip6_route_input_lookup(net, skb->dev,
2450 &fl6, skb, flags));
2451 }
2452
ip6_pol_route_output(struct net * net,struct fib6_table * table,struct flowi6 * fl6,const struct sk_buff * skb,int flags)2453 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_output(struct net *net,
2454 struct fib6_table *table,
2455 struct flowi6 *fl6,
2456 const struct sk_buff *skb,
2457 int flags)
2458 {
2459 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
2460 }
2461
ip6_route_output_flags_noref(struct net * net,const struct sock * sk,struct flowi6 * fl6,int flags)2462 struct dst_entry *ip6_route_output_flags_noref(struct net *net,
2463 const struct sock *sk,
2464 struct flowi6 *fl6, int flags)
2465 {
2466 bool any_src;
2467
2468 if (ipv6_addr_type(&fl6->daddr) &
2469 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
2470 struct dst_entry *dst;
2471
2472 /* This function does not take refcnt on the dst */
2473 dst = l3mdev_link_scope_lookup(net, fl6);
2474 if (dst)
2475 return dst;
2476 }
2477
2478 fl6->flowi6_iif = LOOPBACK_IFINDEX;
2479
2480 flags |= RT6_LOOKUP_F_DST_NOREF;
2481 any_src = ipv6_addr_any(&fl6->saddr);
2482 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
2483 (fl6->flowi6_oif && any_src))
2484 flags |= RT6_LOOKUP_F_IFACE;
2485
2486 if (!any_src)
2487 flags |= RT6_LOOKUP_F_HAS_SADDR;
2488 else if (sk)
2489 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
2490
2491 return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
2492 }
2493 EXPORT_SYMBOL_GPL(ip6_route_output_flags_noref);
2494
ip6_route_output_flags(struct net * net,const struct sock * sk,struct flowi6 * fl6,int flags)2495 struct dst_entry *ip6_route_output_flags(struct net *net,
2496 const struct sock *sk,
2497 struct flowi6 *fl6,
2498 int flags)
2499 {
2500 struct dst_entry *dst;
2501 struct rt6_info *rt6;
2502
2503 rcu_read_lock();
2504 dst = ip6_route_output_flags_noref(net, sk, fl6, flags);
2505 rt6 = (struct rt6_info *)dst;
2506 /* For dst cached in uncached_list, refcnt is already taken. */
2507 if (list_empty(&rt6->rt6i_uncached) && !dst_hold_safe(dst)) {
2508 dst = &net->ipv6.ip6_null_entry->dst;
2509 dst_hold(dst);
2510 }
2511 rcu_read_unlock();
2512
2513 return dst;
2514 }
2515 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
2516
ip6_blackhole_route(struct net * net,struct dst_entry * dst_orig)2517 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2518 {
2519 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
2520 struct net_device *loopback_dev = net->loopback_dev;
2521 struct dst_entry *new = NULL;
2522
2523 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
2524 DST_OBSOLETE_DEAD, 0);
2525 if (rt) {
2526 rt6_info_init(rt);
2527 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
2528
2529 new = &rt->dst;
2530 new->__use = 1;
2531 new->input = dst_discard;
2532 new->output = dst_discard_out;
2533
2534 dst_copy_metrics(new, &ort->dst);
2535
2536 rt->rt6i_idev = in6_dev_get(loopback_dev);
2537 rt->rt6i_gateway = ort->rt6i_gateway;
2538 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
2539
2540 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
2541 #ifdef CONFIG_IPV6_SUBTREES
2542 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
2543 #endif
2544 }
2545
2546 dst_release(dst_orig);
2547 return new ? new : ERR_PTR(-ENOMEM);
2548 }
2549
2550 /*
2551 * Destination cache support functions
2552 */
2553
fib6_check(struct fib6_info * f6i,u32 cookie)2554 static bool fib6_check(struct fib6_info *f6i, u32 cookie)
2555 {
2556 u32 rt_cookie = 0;
2557
2558 if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
2559 return false;
2560
2561 if (fib6_check_expired(f6i))
2562 return false;
2563
2564 return true;
2565 }
2566
rt6_check(struct rt6_info * rt,struct fib6_info * from,u32 cookie)2567 static struct dst_entry *rt6_check(struct rt6_info *rt,
2568 struct fib6_info *from,
2569 u32 cookie)
2570 {
2571 u32 rt_cookie = 0;
2572
2573 if (!from || !fib6_get_cookie_safe(from, &rt_cookie) ||
2574 rt_cookie != cookie)
2575 return NULL;
2576
2577 if (rt6_check_expired(rt))
2578 return NULL;
2579
2580 return &rt->dst;
2581 }
2582
rt6_dst_from_check(struct rt6_info * rt,struct fib6_info * from,u32 cookie)2583 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
2584 struct fib6_info *from,
2585 u32 cookie)
2586 {
2587 if (!__rt6_check_expired(rt) &&
2588 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
2589 fib6_check(from, cookie))
2590 return &rt->dst;
2591 else
2592 return NULL;
2593 }
2594
ip6_dst_check(struct dst_entry * dst,u32 cookie)2595 INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst,
2596 u32 cookie)
2597 {
2598 struct dst_entry *dst_ret;
2599 struct fib6_info *from;
2600 struct rt6_info *rt;
2601
2602 rt = container_of(dst, struct rt6_info, dst);
2603
2604 if (rt->sernum)
2605 return rt6_is_valid(rt) ? dst : NULL;
2606
2607 rcu_read_lock();
2608
2609 /* All IPV6 dsts are created with ->obsolete set to the value
2610 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2611 * into this function always.
2612 */
2613
2614 from = rcu_dereference(rt->from);
2615
2616 if (from && (rt->rt6i_flags & RTF_PCPU ||
2617 unlikely(!list_empty(&rt->rt6i_uncached))))
2618 dst_ret = rt6_dst_from_check(rt, from, cookie);
2619 else
2620 dst_ret = rt6_check(rt, from, cookie);
2621
2622 rcu_read_unlock();
2623
2624 return dst_ret;
2625 }
2626 EXPORT_INDIRECT_CALLABLE(ip6_dst_check);
2627
ip6_negative_advice(struct dst_entry * dst)2628 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
2629 {
2630 struct rt6_info *rt = (struct rt6_info *) dst;
2631
2632 if (rt) {
2633 if (rt->rt6i_flags & RTF_CACHE) {
2634 rcu_read_lock();
2635 if (rt6_check_expired(rt)) {
2636 rt6_remove_exception_rt(rt);
2637 dst = NULL;
2638 }
2639 rcu_read_unlock();
2640 } else {
2641 dst_release(dst);
2642 dst = NULL;
2643 }
2644 }
2645 return dst;
2646 }
2647
ip6_link_failure(struct sk_buff * skb)2648 static void ip6_link_failure(struct sk_buff *skb)
2649 {
2650 struct rt6_info *rt;
2651
2652 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
2653
2654 rt = (struct rt6_info *) skb_dst(skb);
2655 if (rt) {
2656 rcu_read_lock();
2657 if (rt->rt6i_flags & RTF_CACHE) {
2658 rt6_remove_exception_rt(rt);
2659 } else {
2660 struct fib6_info *from;
2661 struct fib6_node *fn;
2662
2663 from = rcu_dereference(rt->from);
2664 if (from) {
2665 fn = rcu_dereference(from->fib6_node);
2666 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2667 fn->fn_sernum = -1;
2668 }
2669 }
2670 rcu_read_unlock();
2671 }
2672 }
2673
rt6_update_expires(struct rt6_info * rt0,int timeout)2674 static void rt6_update_expires(struct rt6_info *rt0, int timeout)
2675 {
2676 if (!(rt0->rt6i_flags & RTF_EXPIRES)) {
2677 struct fib6_info *from;
2678
2679 rcu_read_lock();
2680 from = rcu_dereference(rt0->from);
2681 if (from)
2682 rt0->dst.expires = from->expires;
2683 rcu_read_unlock();
2684 }
2685
2686 dst_set_expires(&rt0->dst, timeout);
2687 rt0->rt6i_flags |= RTF_EXPIRES;
2688 }
2689
rt6_do_update_pmtu(struct rt6_info * rt,u32 mtu)2690 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2691 {
2692 struct net *net = dev_net(rt->dst.dev);
2693
2694 dst_metric_set(&rt->dst, RTAX_MTU, mtu);
2695 rt->rt6i_flags |= RTF_MODIFIED;
2696 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2697 }
2698
rt6_cache_allowed_for_pmtu(const struct rt6_info * rt)2699 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2700 {
2701 return !(rt->rt6i_flags & RTF_CACHE) &&
2702 (rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from));
2703 }
2704
__ip6_rt_update_pmtu(struct dst_entry * dst,const struct sock * sk,const struct ipv6hdr * iph,u32 mtu,bool confirm_neigh)2705 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2706 const struct ipv6hdr *iph, u32 mtu,
2707 bool confirm_neigh)
2708 {
2709 const struct in6_addr *daddr, *saddr;
2710 struct rt6_info *rt6 = (struct rt6_info *)dst;
2711
2712 /* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU)
2713 * IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it.
2714 * [see also comment in rt6_mtu_change_route()]
2715 */
2716
2717 if (iph) {
2718 daddr = &iph->daddr;
2719 saddr = &iph->saddr;
2720 } else if (sk) {
2721 daddr = &sk->sk_v6_daddr;
2722 saddr = &inet6_sk(sk)->saddr;
2723 } else {
2724 daddr = NULL;
2725 saddr = NULL;
2726 }
2727
2728 if (confirm_neigh)
2729 dst_confirm_neigh(dst, daddr);
2730
2731 if (mtu < IPV6_MIN_MTU)
2732 return;
2733 if (mtu >= dst_mtu(dst))
2734 return;
2735
2736 if (!rt6_cache_allowed_for_pmtu(rt6)) {
2737 rt6_do_update_pmtu(rt6, mtu);
2738 /* update rt6_ex->stamp for cache */
2739 if (rt6->rt6i_flags & RTF_CACHE)
2740 rt6_update_exception_stamp_rt(rt6);
2741 } else if (daddr) {
2742 struct fib6_result res = {};
2743 struct rt6_info *nrt6;
2744
2745 rcu_read_lock();
2746 res.f6i = rcu_dereference(rt6->from);
2747 if (!res.f6i)
2748 goto out_unlock;
2749
2750 res.fib6_flags = res.f6i->fib6_flags;
2751 res.fib6_type = res.f6i->fib6_type;
2752
2753 if (res.f6i->nh) {
2754 struct fib6_nh_match_arg arg = {
2755 .dev = dst->dev,
2756 .gw = &rt6->rt6i_gateway,
2757 };
2758
2759 nexthop_for_each_fib6_nh(res.f6i->nh,
2760 fib6_nh_find_match, &arg);
2761
2762 /* fib6_info uses a nexthop that does not have fib6_nh
2763 * using the dst->dev + gw. Should be impossible.
2764 */
2765 if (!arg.match)
2766 goto out_unlock;
2767
2768 res.nh = arg.match;
2769 } else {
2770 res.nh = res.f6i->fib6_nh;
2771 }
2772
2773 nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
2774 if (nrt6) {
2775 rt6_do_update_pmtu(nrt6, mtu);
2776 if (rt6_insert_exception(nrt6, &res))
2777 dst_release_immediate(&nrt6->dst);
2778 }
2779 out_unlock:
2780 rcu_read_unlock();
2781 }
2782 }
2783
ip6_rt_update_pmtu(struct dst_entry * dst,struct sock * sk,struct sk_buff * skb,u32 mtu,bool confirm_neigh)2784 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2785 struct sk_buff *skb, u32 mtu,
2786 bool confirm_neigh)
2787 {
2788 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu,
2789 confirm_neigh);
2790 }
2791
ip6_update_pmtu(struct sk_buff * skb,struct net * net,__be32 mtu,int oif,u32 mark,kuid_t uid)2792 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2793 int oif, u32 mark, kuid_t uid)
2794 {
2795 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2796 struct dst_entry *dst;
2797 struct flowi6 fl6 = {
2798 .flowi6_oif = oif,
2799 .flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
2800 .daddr = iph->daddr,
2801 .saddr = iph->saddr,
2802 .flowlabel = ip6_flowinfo(iph),
2803 .flowi6_uid = uid,
2804 };
2805
2806 dst = ip6_route_output(net, NULL, &fl6);
2807 if (!dst->error)
2808 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true);
2809 dst_release(dst);
2810 }
2811 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2812
ip6_sk_update_pmtu(struct sk_buff * skb,struct sock * sk,__be32 mtu)2813 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2814 {
2815 int oif = sk->sk_bound_dev_if;
2816 struct dst_entry *dst;
2817
2818 if (!oif && skb->dev)
2819 oif = l3mdev_master_ifindex(skb->dev);
2820
2821 ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
2822
2823 dst = __sk_dst_get(sk);
2824 if (!dst || !dst->obsolete ||
2825 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
2826 return;
2827
2828 bh_lock_sock(sk);
2829 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
2830 ip6_datagram_dst_update(sk, false);
2831 bh_unlock_sock(sk);
2832 }
2833 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
2834
ip6_sk_dst_store_flow(struct sock * sk,struct dst_entry * dst,const struct flowi6 * fl6)2835 void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
2836 const struct flowi6 *fl6)
2837 {
2838 #ifdef CONFIG_IPV6_SUBTREES
2839 struct ipv6_pinfo *np = inet6_sk(sk);
2840 #endif
2841
2842 ip6_dst_store(sk, dst,
2843 ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
2844 &sk->sk_v6_daddr : NULL,
2845 #ifdef CONFIG_IPV6_SUBTREES
2846 ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
2847 &np->saddr :
2848 #endif
2849 NULL);
2850 }
2851
ip6_redirect_nh_match(const struct fib6_result * res,struct flowi6 * fl6,const struct in6_addr * gw,struct rt6_info ** ret)2852 static bool ip6_redirect_nh_match(const struct fib6_result *res,
2853 struct flowi6 *fl6,
2854 const struct in6_addr *gw,
2855 struct rt6_info **ret)
2856 {
2857 const struct fib6_nh *nh = res->nh;
2858
2859 if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family ||
2860 fl6->flowi6_oif != nh->fib_nh_dev->ifindex)
2861 return false;
2862
2863 /* rt_cache's gateway might be different from its 'parent'
2864 * in the case of an ip redirect.
2865 * So we keep searching in the exception table if the gateway
2866 * is different.
2867 */
2868 if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) {
2869 struct rt6_info *rt_cache;
2870
2871 rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr);
2872 if (rt_cache &&
2873 ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) {
2874 *ret = rt_cache;
2875 return true;
2876 }
2877 return false;
2878 }
2879 return true;
2880 }
2881
2882 struct fib6_nh_rd_arg {
2883 struct fib6_result *res;
2884 struct flowi6 *fl6;
2885 const struct in6_addr *gw;
2886 struct rt6_info **ret;
2887 };
2888
fib6_nh_redirect_match(struct fib6_nh * nh,void * _arg)2889 static int fib6_nh_redirect_match(struct fib6_nh *nh, void *_arg)
2890 {
2891 struct fib6_nh_rd_arg *arg = _arg;
2892
2893 arg->res->nh = nh;
2894 return ip6_redirect_nh_match(arg->res, arg->fl6, arg->gw, arg->ret);
2895 }
2896
2897 /* Handle redirects */
2898 struct ip6rd_flowi {
2899 struct flowi6 fl6;
2900 struct in6_addr gateway;
2901 };
2902
__ip6_route_redirect(struct net * net,struct fib6_table * table,struct flowi6 * fl6,const struct sk_buff * skb,int flags)2903 INDIRECT_CALLABLE_SCOPE struct rt6_info *__ip6_route_redirect(struct net *net,
2904 struct fib6_table *table,
2905 struct flowi6 *fl6,
2906 const struct sk_buff *skb,
2907 int flags)
2908 {
2909 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
2910 struct rt6_info *ret = NULL;
2911 struct fib6_result res = {};
2912 struct fib6_nh_rd_arg arg = {
2913 .res = &res,
2914 .fl6 = fl6,
2915 .gw = &rdfl->gateway,
2916 .ret = &ret
2917 };
2918 struct fib6_info *rt;
2919 struct fib6_node *fn;
2920
2921 /* l3mdev_update_flow overrides oif if the device is enslaved; in
2922 * this case we must match on the real ingress device, so reset it
2923 */
2924 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
2925 fl6->flowi6_oif = skb->dev->ifindex;
2926
2927 /* Get the "current" route for this destination and
2928 * check if the redirect has come from appropriate router.
2929 *
2930 * RFC 4861 specifies that redirects should only be
2931 * accepted if they come from the nexthop to the target.
2932 * Due to the way the routes are chosen, this notion
2933 * is a bit fuzzy and one might need to check all possible
2934 * routes.
2935 */
2936
2937 rcu_read_lock();
2938 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2939 restart:
2940 for_each_fib6_node_rt_rcu(fn) {
2941 res.f6i = rt;
2942 if (fib6_check_expired(rt))
2943 continue;
2944 if (rt->fib6_flags & RTF_REJECT)
2945 break;
2946 if (unlikely(rt->nh)) {
2947 if (nexthop_is_blackhole(rt->nh))
2948 continue;
2949 /* on match, res->nh is filled in and potentially ret */
2950 if (nexthop_for_each_fib6_nh(rt->nh,
2951 fib6_nh_redirect_match,
2952 &arg))
2953 goto out;
2954 } else {
2955 res.nh = rt->fib6_nh;
2956 if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway,
2957 &ret))
2958 goto out;
2959 }
2960 }
2961
2962 if (!rt)
2963 rt = net->ipv6.fib6_null_entry;
2964 else if (rt->fib6_flags & RTF_REJECT) {
2965 ret = net->ipv6.ip6_null_entry;
2966 goto out;
2967 }
2968
2969 if (rt == net->ipv6.fib6_null_entry) {
2970 fn = fib6_backtrack(fn, &fl6->saddr);
2971 if (fn)
2972 goto restart;
2973 }
2974
2975 res.f6i = rt;
2976 res.nh = rt->fib6_nh;
2977 out:
2978 if (ret) {
2979 ip6_hold_safe(net, &ret);
2980 } else {
2981 res.fib6_flags = res.f6i->fib6_flags;
2982 res.fib6_type = res.f6i->fib6_type;
2983 ret = ip6_create_rt_rcu(&res);
2984 }
2985
2986 rcu_read_unlock();
2987
2988 trace_fib6_table_lookup(net, &res, table, fl6);
2989 return ret;
2990 };
2991
ip6_route_redirect(struct net * net,const struct flowi6 * fl6,const struct sk_buff * skb,const struct in6_addr * gateway)2992 static struct dst_entry *ip6_route_redirect(struct net *net,
2993 const struct flowi6 *fl6,
2994 const struct sk_buff *skb,
2995 const struct in6_addr *gateway)
2996 {
2997 int flags = RT6_LOOKUP_F_HAS_SADDR;
2998 struct ip6rd_flowi rdfl;
2999
3000 rdfl.fl6 = *fl6;
3001 rdfl.gateway = *gateway;
3002
3003 return fib6_rule_lookup(net, &rdfl.fl6, skb,
3004 flags, __ip6_route_redirect);
3005 }
3006
ip6_redirect(struct sk_buff * skb,struct net * net,int oif,u32 mark,kuid_t uid)3007 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
3008 kuid_t uid)
3009 {
3010 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
3011 struct dst_entry *dst;
3012 struct flowi6 fl6 = {
3013 .flowi6_iif = LOOPBACK_IFINDEX,
3014 .flowi6_oif = oif,
3015 .flowi6_mark = mark,
3016 .daddr = iph->daddr,
3017 .saddr = iph->saddr,
3018 .flowlabel = ip6_flowinfo(iph),
3019 .flowi6_uid = uid,
3020 };
3021
3022 dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
3023 rt6_do_redirect(dst, NULL, skb);
3024 dst_release(dst);
3025 }
3026 EXPORT_SYMBOL_GPL(ip6_redirect);
3027
ip6_redirect_no_header(struct sk_buff * skb,struct net * net,int oif)3028 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
3029 {
3030 const struct ipv6hdr *iph = ipv6_hdr(skb);
3031 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
3032 struct dst_entry *dst;
3033 struct flowi6 fl6 = {
3034 .flowi6_iif = LOOPBACK_IFINDEX,
3035 .flowi6_oif = oif,
3036 .daddr = msg->dest,
3037 .saddr = iph->daddr,
3038 .flowi6_uid = sock_net_uid(net, NULL),
3039 };
3040
3041 dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
3042 rt6_do_redirect(dst, NULL, skb);
3043 dst_release(dst);
3044 }
3045
ip6_sk_redirect(struct sk_buff * skb,struct sock * sk)3046 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
3047 {
3048 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
3049 sk->sk_uid);
3050 }
3051 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
3052
ip6_default_advmss(const struct dst_entry * dst)3053 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
3054 {
3055 struct net_device *dev = dst->dev;
3056 unsigned int mtu = dst_mtu(dst);
3057 struct net *net = dev_net(dev);
3058
3059 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
3060
3061 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
3062 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
3063
3064 /*
3065 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
3066 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
3067 * IPV6_MAXPLEN is also valid and means: "any MSS,
3068 * rely only on pmtu discovery"
3069 */
3070 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
3071 mtu = IPV6_MAXPLEN;
3072 return mtu;
3073 }
3074
ip6_mtu(const struct dst_entry * dst)3075 INDIRECT_CALLABLE_SCOPE unsigned int ip6_mtu(const struct dst_entry *dst)
3076 {
3077 struct inet6_dev *idev;
3078 unsigned int mtu;
3079
3080 mtu = dst_metric_raw(dst, RTAX_MTU);
3081 if (mtu)
3082 goto out;
3083
3084 mtu = IPV6_MIN_MTU;
3085
3086 rcu_read_lock();
3087 idev = __in6_dev_get(dst->dev);
3088 if (idev)
3089 mtu = idev->cnf.mtu6;
3090 rcu_read_unlock();
3091
3092 out:
3093 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
3094
3095 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
3096 }
3097 EXPORT_INDIRECT_CALLABLE(ip6_mtu);
3098
3099 /* MTU selection:
3100 * 1. mtu on route is locked - use it
3101 * 2. mtu from nexthop exception
3102 * 3. mtu from egress device
3103 *
3104 * based on ip6_dst_mtu_forward and exception logic of
3105 * rt6_find_cached_rt; called with rcu_read_lock
3106 */
ip6_mtu_from_fib6(const struct fib6_result * res,const struct in6_addr * daddr,const struct in6_addr * saddr)3107 u32 ip6_mtu_from_fib6(const struct fib6_result *res,
3108 const struct in6_addr *daddr,
3109 const struct in6_addr *saddr)
3110 {
3111 const struct fib6_nh *nh = res->nh;
3112 struct fib6_info *f6i = res->f6i;
3113 struct inet6_dev *idev;
3114 struct rt6_info *rt;
3115 u32 mtu = 0;
3116
3117 if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
3118 mtu = f6i->fib6_pmtu;
3119 if (mtu)
3120 goto out;
3121 }
3122
3123 rt = rt6_find_cached_rt(res, daddr, saddr);
3124 if (unlikely(rt)) {
3125 mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
3126 } else {
3127 struct net_device *dev = nh->fib_nh_dev;
3128
3129 mtu = IPV6_MIN_MTU;
3130 idev = __in6_dev_get(dev);
3131 if (idev && idev->cnf.mtu6 > mtu)
3132 mtu = idev->cnf.mtu6;
3133 }
3134
3135 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
3136 out:
3137 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
3138 }
3139
icmp6_dst_alloc(struct net_device * dev,struct flowi6 * fl6)3140 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
3141 struct flowi6 *fl6)
3142 {
3143 struct dst_entry *dst;
3144 struct rt6_info *rt;
3145 struct inet6_dev *idev = in6_dev_get(dev);
3146 struct net *net = dev_net(dev);
3147
3148 if (unlikely(!idev))
3149 return ERR_PTR(-ENODEV);
3150
3151 rt = ip6_dst_alloc(net, dev, 0);
3152 if (unlikely(!rt)) {
3153 in6_dev_put(idev);
3154 dst = ERR_PTR(-ENOMEM);
3155 goto out;
3156 }
3157
3158 rt->dst.input = ip6_input;
3159 rt->dst.output = ip6_output;
3160 rt->rt6i_gateway = fl6->daddr;
3161 rt->rt6i_dst.addr = fl6->daddr;
3162 rt->rt6i_dst.plen = 128;
3163 rt->rt6i_idev = idev;
3164 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
3165
3166 /* Add this dst into uncached_list so that rt6_disable_ip() can
3167 * do proper release of the net_device
3168 */
3169 rt6_uncached_list_add(rt);
3170 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
3171
3172 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
3173
3174 out:
3175 return dst;
3176 }
3177
ip6_dst_gc(struct dst_ops * ops)3178 static int ip6_dst_gc(struct dst_ops *ops)
3179 {
3180 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
3181 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
3182 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
3183 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
3184 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
3185 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
3186 int entries;
3187
3188 entries = dst_entries_get_fast(ops);
3189 if (entries > rt_max_size)
3190 entries = dst_entries_get_slow(ops);
3191
3192 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
3193 entries <= rt_max_size)
3194 goto out;
3195
3196 net->ipv6.ip6_rt_gc_expire++;
3197 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
3198 entries = dst_entries_get_slow(ops);
3199 if (entries < ops->gc_thresh)
3200 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
3201 out:
3202 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
3203 return entries > rt_max_size;
3204 }
3205
ip6_nh_lookup_table(struct net * net,struct fib6_config * cfg,const struct in6_addr * gw_addr,u32 tbid,int flags,struct fib6_result * res)3206 static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg,
3207 const struct in6_addr *gw_addr, u32 tbid,
3208 int flags, struct fib6_result *res)
3209 {
3210 struct flowi6 fl6 = {
3211 .flowi6_oif = cfg->fc_ifindex,
3212 .daddr = *gw_addr,
3213 .saddr = cfg->fc_prefsrc,
3214 };
3215 struct fib6_table *table;
3216 int err;
3217
3218 table = fib6_get_table(net, tbid);
3219 if (!table)
3220 return -EINVAL;
3221
3222 if (!ipv6_addr_any(&cfg->fc_prefsrc))
3223 flags |= RT6_LOOKUP_F_HAS_SADDR;
3224
3225 flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
3226
3227 err = fib6_table_lookup(net, table, cfg->fc_ifindex, &fl6, res, flags);
3228 if (!err && res->f6i != net->ipv6.fib6_null_entry)
3229 fib6_select_path(net, res, &fl6, cfg->fc_ifindex,
3230 cfg->fc_ifindex != 0, NULL, flags);
3231
3232 return err;
3233 }
3234
ip6_route_check_nh_onlink(struct net * net,struct fib6_config * cfg,const struct net_device * dev,struct netlink_ext_ack * extack)3235 static int ip6_route_check_nh_onlink(struct net *net,
3236 struct fib6_config *cfg,
3237 const struct net_device *dev,
3238 struct netlink_ext_ack *extack)
3239 {
3240 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
3241 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3242 struct fib6_result res = {};
3243 int err;
3244
3245 err = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0, &res);
3246 if (!err && !(res.fib6_flags & RTF_REJECT) &&
3247 /* ignore match if it is the default route */
3248 !ipv6_addr_any(&res.f6i->fib6_dst.addr) &&
3249 (res.fib6_type != RTN_UNICAST || dev != res.nh->fib_nh_dev)) {
3250 NL_SET_ERR_MSG(extack,
3251 "Nexthop has invalid gateway or device mismatch");
3252 err = -EINVAL;
3253 }
3254
3255 return err;
3256 }
3257
ip6_route_check_nh(struct net * net,struct fib6_config * cfg,struct net_device ** _dev,struct inet6_dev ** idev)3258 static int ip6_route_check_nh(struct net *net,
3259 struct fib6_config *cfg,
3260 struct net_device **_dev,
3261 struct inet6_dev **idev)
3262 {
3263 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3264 struct net_device *dev = _dev ? *_dev : NULL;
3265 int flags = RT6_LOOKUP_F_IFACE;
3266 struct fib6_result res = {};
3267 int err = -EHOSTUNREACH;
3268
3269 if (cfg->fc_table) {
3270 err = ip6_nh_lookup_table(net, cfg, gw_addr,
3271 cfg->fc_table, flags, &res);
3272 /* gw_addr can not require a gateway or resolve to a reject
3273 * route. If a device is given, it must match the result.
3274 */
3275 if (err || res.fib6_flags & RTF_REJECT ||
3276 res.nh->fib_nh_gw_family ||
3277 (dev && dev != res.nh->fib_nh_dev))
3278 err = -EHOSTUNREACH;
3279 }
3280
3281 if (err < 0) {
3282 struct flowi6 fl6 = {
3283 .flowi6_oif = cfg->fc_ifindex,
3284 .daddr = *gw_addr,
3285 };
3286
3287 err = fib6_lookup(net, cfg->fc_ifindex, &fl6, &res, flags);
3288 if (err || res.fib6_flags & RTF_REJECT ||
3289 res.nh->fib_nh_gw_family)
3290 err = -EHOSTUNREACH;
3291
3292 if (err)
3293 return err;
3294
3295 fib6_select_path(net, &res, &fl6, cfg->fc_ifindex,
3296 cfg->fc_ifindex != 0, NULL, flags);
3297 }
3298
3299 err = 0;
3300 if (dev) {
3301 if (dev != res.nh->fib_nh_dev)
3302 err = -EHOSTUNREACH;
3303 } else {
3304 *_dev = dev = res.nh->fib_nh_dev;
3305 dev_hold(dev);
3306 *idev = in6_dev_get(dev);
3307 }
3308
3309 return err;
3310 }
3311
ip6_validate_gw(struct net * net,struct fib6_config * cfg,struct net_device ** _dev,struct inet6_dev ** idev,struct netlink_ext_ack * extack)3312 static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
3313 struct net_device **_dev, struct inet6_dev **idev,
3314 struct netlink_ext_ack *extack)
3315 {
3316 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3317 int gwa_type = ipv6_addr_type(gw_addr);
3318 bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
3319 const struct net_device *dev = *_dev;
3320 bool need_addr_check = !dev;
3321 int err = -EINVAL;
3322
3323 /* if gw_addr is local we will fail to detect this in case
3324 * address is still TENTATIVE (DAD in progress). rt6_lookup()
3325 * will return already-added prefix route via interface that
3326 * prefix route was assigned to, which might be non-loopback.
3327 */
3328 if (dev &&
3329 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3330 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3331 goto out;
3332 }
3333
3334 if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
3335 /* IPv6 strictly inhibits using not link-local
3336 * addresses as nexthop address.
3337 * Otherwise, router will not able to send redirects.
3338 * It is very good, but in some (rare!) circumstances
3339 * (SIT, PtP, NBMA NOARP links) it is handy to allow
3340 * some exceptions. --ANK
3341 * We allow IPv4-mapped nexthops to support RFC4798-type
3342 * addressing
3343 */
3344 if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
3345 NL_SET_ERR_MSG(extack, "Invalid gateway address");
3346 goto out;
3347 }
3348
3349 rcu_read_lock();
3350
3351 if (cfg->fc_flags & RTNH_F_ONLINK)
3352 err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
3353 else
3354 err = ip6_route_check_nh(net, cfg, _dev, idev);
3355
3356 rcu_read_unlock();
3357
3358 if (err)
3359 goto out;
3360 }
3361
3362 /* reload in case device was changed */
3363 dev = *_dev;
3364
3365 err = -EINVAL;
3366 if (!dev) {
3367 NL_SET_ERR_MSG(extack, "Egress device not specified");
3368 goto out;
3369 } else if (dev->flags & IFF_LOOPBACK) {
3370 NL_SET_ERR_MSG(extack,
3371 "Egress device can not be loopback device for this route");
3372 goto out;
3373 }
3374
3375 /* if we did not check gw_addr above, do so now that the
3376 * egress device has been resolved.
3377 */
3378 if (need_addr_check &&
3379 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3380 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3381 goto out;
3382 }
3383
3384 err = 0;
3385 out:
3386 return err;
3387 }
3388
fib6_is_reject(u32 flags,struct net_device * dev,int addr_type)3389 static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
3390 {
3391 if ((flags & RTF_REJECT) ||
3392 (dev && (dev->flags & IFF_LOOPBACK) &&
3393 !(addr_type & IPV6_ADDR_LOOPBACK) &&
3394 !(flags & (RTF_ANYCAST | RTF_LOCAL))))
3395 return true;
3396
3397 return false;
3398 }
3399
fib6_nh_init(struct net * net,struct fib6_nh * fib6_nh,struct fib6_config * cfg,gfp_t gfp_flags,struct netlink_ext_ack * extack)3400 int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
3401 struct fib6_config *cfg, gfp_t gfp_flags,
3402 struct netlink_ext_ack *extack)
3403 {
3404 struct net_device *dev = NULL;
3405 struct inet6_dev *idev = NULL;
3406 int addr_type;
3407 int err;
3408
3409 fib6_nh->fib_nh_family = AF_INET6;
3410 #ifdef CONFIG_IPV6_ROUTER_PREF
3411 fib6_nh->last_probe = jiffies;
3412 #endif
3413 if (cfg->fc_is_fdb) {
3414 fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3415 fib6_nh->fib_nh_gw_family = AF_INET6;
3416 return 0;
3417 }
3418
3419 err = -ENODEV;
3420 if (cfg->fc_ifindex) {
3421 dev = dev_get_by_index(net, cfg->fc_ifindex);
3422 if (!dev)
3423 goto out;
3424 idev = in6_dev_get(dev);
3425 if (!idev)
3426 goto out;
3427 }
3428
3429 if (cfg->fc_flags & RTNH_F_ONLINK) {
3430 if (!dev) {
3431 NL_SET_ERR_MSG(extack,
3432 "Nexthop device required for onlink");
3433 goto out;
3434 }
3435
3436 if (!(dev->flags & IFF_UP)) {
3437 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3438 err = -ENETDOWN;
3439 goto out;
3440 }
3441
3442 fib6_nh->fib_nh_flags |= RTNH_F_ONLINK;
3443 }
3444
3445 fib6_nh->fib_nh_weight = 1;
3446
3447 /* We cannot add true routes via loopback here,
3448 * they would result in kernel looping; promote them to reject routes
3449 */
3450 addr_type = ipv6_addr_type(&cfg->fc_dst);
3451 if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
3452 /* hold loopback dev/idev if we haven't done so. */
3453 if (dev != net->loopback_dev) {
3454 if (dev) {
3455 dev_put(dev);
3456 in6_dev_put(idev);
3457 }
3458 dev = net->loopback_dev;
3459 dev_hold(dev);
3460 idev = in6_dev_get(dev);
3461 if (!idev) {
3462 err = -ENODEV;
3463 goto out;
3464 }
3465 }
3466 goto pcpu_alloc;
3467 }
3468
3469 if (cfg->fc_flags & RTF_GATEWAY) {
3470 err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
3471 if (err)
3472 goto out;
3473
3474 fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3475 fib6_nh->fib_nh_gw_family = AF_INET6;
3476 }
3477
3478 err = -ENODEV;
3479 if (!dev)
3480 goto out;
3481
3482 if (idev->cnf.disable_ipv6) {
3483 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
3484 err = -EACCES;
3485 goto out;
3486 }
3487
3488 if (!(dev->flags & IFF_UP) && !cfg->fc_ignore_dev_down) {
3489 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3490 err = -ENETDOWN;
3491 goto out;
3492 }
3493
3494 if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3495 !netif_carrier_ok(dev))
3496 fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
3497
3498 err = fib_nh_common_init(net, &fib6_nh->nh_common, cfg->fc_encap,
3499 cfg->fc_encap_type, cfg, gfp_flags, extack);
3500 if (err)
3501 goto out;
3502
3503 pcpu_alloc:
3504 fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags);
3505 if (!fib6_nh->rt6i_pcpu) {
3506 err = -ENOMEM;
3507 goto out;
3508 }
3509
3510 fib6_nh->fib_nh_dev = dev;
3511 fib6_nh->fib_nh_oif = dev->ifindex;
3512 err = 0;
3513 out:
3514 if (idev)
3515 in6_dev_put(idev);
3516
3517 if (err) {
3518 lwtstate_put(fib6_nh->fib_nh_lws);
3519 fib6_nh->fib_nh_lws = NULL;
3520 if (dev)
3521 dev_put(dev);
3522 }
3523
3524 return err;
3525 }
3526
fib6_nh_release(struct fib6_nh * fib6_nh)3527 void fib6_nh_release(struct fib6_nh *fib6_nh)
3528 {
3529 struct rt6_exception_bucket *bucket;
3530
3531 rcu_read_lock();
3532
3533 fib6_nh_flush_exceptions(fib6_nh, NULL);
3534 bucket = fib6_nh_get_excptn_bucket(fib6_nh, NULL);
3535 if (bucket) {
3536 rcu_assign_pointer(fib6_nh->rt6i_exception_bucket, NULL);
3537 kfree(bucket);
3538 }
3539
3540 rcu_read_unlock();
3541
3542 if (fib6_nh->rt6i_pcpu) {
3543 int cpu;
3544
3545 for_each_possible_cpu(cpu) {
3546 struct rt6_info **ppcpu_rt;
3547 struct rt6_info *pcpu_rt;
3548
3549 ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
3550 pcpu_rt = *ppcpu_rt;
3551 if (pcpu_rt) {
3552 dst_dev_put(&pcpu_rt->dst);
3553 dst_release(&pcpu_rt->dst);
3554 *ppcpu_rt = NULL;
3555 }
3556 }
3557
3558 free_percpu(fib6_nh->rt6i_pcpu);
3559 }
3560
3561 fib_nh_common_release(&fib6_nh->nh_common);
3562 }
3563
ip6_route_info_create(struct fib6_config * cfg,gfp_t gfp_flags,struct netlink_ext_ack * extack)3564 static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
3565 gfp_t gfp_flags,
3566 struct netlink_ext_ack *extack)
3567 {
3568 struct net *net = cfg->fc_nlinfo.nl_net;
3569 struct fib6_info *rt = NULL;
3570 struct nexthop *nh = NULL;
3571 struct fib6_table *table;
3572 struct fib6_nh *fib6_nh;
3573 int err = -EINVAL;
3574 int addr_type;
3575
3576 /* RTF_PCPU is an internal flag; can not be set by userspace */
3577 if (cfg->fc_flags & RTF_PCPU) {
3578 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
3579 goto out;
3580 }
3581
3582 /* RTF_CACHE is an internal flag; can not be set by userspace */
3583 if (cfg->fc_flags & RTF_CACHE) {
3584 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
3585 goto out;
3586 }
3587
3588 if (cfg->fc_type > RTN_MAX) {
3589 NL_SET_ERR_MSG(extack, "Invalid route type");
3590 goto out;
3591 }
3592
3593 if (cfg->fc_dst_len > 128) {
3594 NL_SET_ERR_MSG(extack, "Invalid prefix length");
3595 goto out;
3596 }
3597 if (cfg->fc_src_len > 128) {
3598 NL_SET_ERR_MSG(extack, "Invalid source address length");
3599 goto out;
3600 }
3601 #ifndef CONFIG_IPV6_SUBTREES
3602 if (cfg->fc_src_len) {
3603 NL_SET_ERR_MSG(extack,
3604 "Specifying source address requires IPV6_SUBTREES to be enabled");
3605 goto out;
3606 }
3607 #endif
3608 if (cfg->fc_nh_id) {
3609 nh = nexthop_find_by_id(net, cfg->fc_nh_id);
3610 if (!nh) {
3611 NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
3612 goto out;
3613 }
3614 err = fib6_check_nexthop(nh, cfg, extack);
3615 if (err)
3616 goto out;
3617 }
3618
3619 err = -ENOBUFS;
3620 if (cfg->fc_nlinfo.nlh &&
3621 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
3622 table = fib6_get_table(net, cfg->fc_table);
3623 if (!table) {
3624 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
3625 table = fib6_new_table(net, cfg->fc_table);
3626 }
3627 } else {
3628 table = fib6_new_table(net, cfg->fc_table);
3629 }
3630
3631 if (!table)
3632 goto out;
3633
3634 err = -ENOMEM;
3635 rt = fib6_info_alloc(gfp_flags, !nh);
3636 if (!rt)
3637 goto out;
3638
3639 rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
3640 extack);
3641 if (IS_ERR(rt->fib6_metrics)) {
3642 err = PTR_ERR(rt->fib6_metrics);
3643 /* Do not leave garbage there. */
3644 rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
3645 goto out;
3646 }
3647
3648 if (cfg->fc_flags & RTF_ADDRCONF)
3649 rt->dst_nocount = true;
3650
3651 if (cfg->fc_flags & RTF_EXPIRES)
3652 fib6_set_expires(rt, jiffies +
3653 clock_t_to_jiffies(cfg->fc_expires));
3654 else
3655 fib6_clean_expires(rt);
3656
3657 if (cfg->fc_protocol == RTPROT_UNSPEC)
3658 cfg->fc_protocol = RTPROT_BOOT;
3659 rt->fib6_protocol = cfg->fc_protocol;
3660
3661 rt->fib6_table = table;
3662 rt->fib6_metric = cfg->fc_metric;
3663 rt->fib6_type = cfg->fc_type ? : RTN_UNICAST;
3664 rt->fib6_flags = cfg->fc_flags & ~RTF_GATEWAY;
3665
3666 ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
3667 rt->fib6_dst.plen = cfg->fc_dst_len;
3668
3669 #ifdef CONFIG_IPV6_SUBTREES
3670 ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
3671 rt->fib6_src.plen = cfg->fc_src_len;
3672 #endif
3673 if (nh) {
3674 if (rt->fib6_src.plen) {
3675 NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
3676 goto out;
3677 }
3678 if (!nexthop_get(nh)) {
3679 NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
3680 goto out;
3681 }
3682 rt->nh = nh;
3683 fib6_nh = nexthop_fib6_nh(rt->nh);
3684 } else {
3685 err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack);
3686 if (err)
3687 goto out;
3688
3689 fib6_nh = rt->fib6_nh;
3690
3691 /* We cannot add true routes via loopback here, they would
3692 * result in kernel looping; promote them to reject routes
3693 */
3694 addr_type = ipv6_addr_type(&cfg->fc_dst);
3695 if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh->fib_nh_dev,
3696 addr_type))
3697 rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
3698 }
3699
3700 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
3701 struct net_device *dev = fib6_nh->fib_nh_dev;
3702
3703 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
3704 NL_SET_ERR_MSG(extack, "Invalid source address");
3705 err = -EINVAL;
3706 goto out;
3707 }
3708 rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
3709 rt->fib6_prefsrc.plen = 128;
3710 } else
3711 rt->fib6_prefsrc.plen = 0;
3712
3713 return rt;
3714 out:
3715 fib6_info_release(rt);
3716 return ERR_PTR(err);
3717 }
3718
ip6_route_add(struct fib6_config * cfg,gfp_t gfp_flags,struct netlink_ext_ack * extack)3719 int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
3720 struct netlink_ext_ack *extack)
3721 {
3722 struct fib6_info *rt;
3723 int err;
3724
3725 rt = ip6_route_info_create(cfg, gfp_flags, extack);
3726 if (IS_ERR(rt))
3727 return PTR_ERR(rt);
3728
3729 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
3730 fib6_info_release(rt);
3731
3732 return err;
3733 }
3734
__ip6_del_rt(struct fib6_info * rt,struct nl_info * info)3735 static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
3736 {
3737 struct net *net = info->nl_net;
3738 struct fib6_table *table;
3739 int err;
3740
3741 if (rt == net->ipv6.fib6_null_entry) {
3742 err = -ENOENT;
3743 goto out;
3744 }
3745
3746 table = rt->fib6_table;
3747 spin_lock_bh(&table->tb6_lock);
3748 err = fib6_del(rt, info);
3749 spin_unlock_bh(&table->tb6_lock);
3750
3751 out:
3752 fib6_info_release(rt);
3753 return err;
3754 }
3755
ip6_del_rt(struct net * net,struct fib6_info * rt,bool skip_notify)3756 int ip6_del_rt(struct net *net, struct fib6_info *rt, bool skip_notify)
3757 {
3758 struct nl_info info = {
3759 .nl_net = net,
3760 .skip_notify = skip_notify
3761 };
3762
3763 return __ip6_del_rt(rt, &info);
3764 }
3765
__ip6_del_rt_siblings(struct fib6_info * rt,struct fib6_config * cfg)3766 static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
3767 {
3768 struct nl_info *info = &cfg->fc_nlinfo;
3769 struct net *net = info->nl_net;
3770 struct sk_buff *skb = NULL;
3771 struct fib6_table *table;
3772 int err = -ENOENT;
3773
3774 if (rt == net->ipv6.fib6_null_entry)
3775 goto out_put;
3776 table = rt->fib6_table;
3777 spin_lock_bh(&table->tb6_lock);
3778
3779 if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
3780 struct fib6_info *sibling, *next_sibling;
3781 struct fib6_node *fn;
3782
3783 /* prefer to send a single notification with all hops */
3784 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3785 if (skb) {
3786 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3787
3788 if (rt6_fill_node(net, skb, rt, NULL,
3789 NULL, NULL, 0, RTM_DELROUTE,
3790 info->portid, seq, 0) < 0) {
3791 kfree_skb(skb);
3792 skb = NULL;
3793 } else
3794 info->skip_notify = 1;
3795 }
3796
3797 /* 'rt' points to the first sibling route. If it is not the
3798 * leaf, then we do not need to send a notification. Otherwise,
3799 * we need to check if the last sibling has a next route or not
3800 * and emit a replace or delete notification, respectively.
3801 */
3802 info->skip_notify_kernel = 1;
3803 fn = rcu_dereference_protected(rt->fib6_node,
3804 lockdep_is_held(&table->tb6_lock));
3805 if (rcu_access_pointer(fn->leaf) == rt) {
3806 struct fib6_info *last_sibling, *replace_rt;
3807
3808 last_sibling = list_last_entry(&rt->fib6_siblings,
3809 struct fib6_info,
3810 fib6_siblings);
3811 replace_rt = rcu_dereference_protected(
3812 last_sibling->fib6_next,
3813 lockdep_is_held(&table->tb6_lock));
3814 if (replace_rt)
3815 call_fib6_entry_notifiers_replace(net,
3816 replace_rt);
3817 else
3818 call_fib6_multipath_entry_notifiers(net,
3819 FIB_EVENT_ENTRY_DEL,
3820 rt, rt->fib6_nsiblings,
3821 NULL);
3822 }
3823 list_for_each_entry_safe(sibling, next_sibling,
3824 &rt->fib6_siblings,
3825 fib6_siblings) {
3826 err = fib6_del(sibling, info);
3827 if (err)
3828 goto out_unlock;
3829 }
3830 }
3831
3832 err = fib6_del(rt, info);
3833 out_unlock:
3834 spin_unlock_bh(&table->tb6_lock);
3835 out_put:
3836 fib6_info_release(rt);
3837
3838 if (skb) {
3839 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3840 info->nlh, gfp_any());
3841 }
3842 return err;
3843 }
3844
__ip6_del_cached_rt(struct rt6_info * rt,struct fib6_config * cfg)3845 static int __ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
3846 {
3847 int rc = -ESRCH;
3848
3849 if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
3850 goto out;
3851
3852 if (cfg->fc_flags & RTF_GATEWAY &&
3853 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
3854 goto out;
3855
3856 rc = rt6_remove_exception_rt(rt);
3857 out:
3858 return rc;
3859 }
3860
ip6_del_cached_rt(struct fib6_config * cfg,struct fib6_info * rt,struct fib6_nh * nh)3861 static int ip6_del_cached_rt(struct fib6_config *cfg, struct fib6_info *rt,
3862 struct fib6_nh *nh)
3863 {
3864 struct fib6_result res = {
3865 .f6i = rt,
3866 .nh = nh,
3867 };
3868 struct rt6_info *rt_cache;
3869
3870 rt_cache = rt6_find_cached_rt(&res, &cfg->fc_dst, &cfg->fc_src);
3871 if (rt_cache)
3872 return __ip6_del_cached_rt(rt_cache, cfg);
3873
3874 return 0;
3875 }
3876
3877 struct fib6_nh_del_cached_rt_arg {
3878 struct fib6_config *cfg;
3879 struct fib6_info *f6i;
3880 };
3881
fib6_nh_del_cached_rt(struct fib6_nh * nh,void * _arg)3882 static int fib6_nh_del_cached_rt(struct fib6_nh *nh, void *_arg)
3883 {
3884 struct fib6_nh_del_cached_rt_arg *arg = _arg;
3885 int rc;
3886
3887 rc = ip6_del_cached_rt(arg->cfg, arg->f6i, nh);
3888 return rc != -ESRCH ? rc : 0;
3889 }
3890
ip6_del_cached_rt_nh(struct fib6_config * cfg,struct fib6_info * f6i)3891 static int ip6_del_cached_rt_nh(struct fib6_config *cfg, struct fib6_info *f6i)
3892 {
3893 struct fib6_nh_del_cached_rt_arg arg = {
3894 .cfg = cfg,
3895 .f6i = f6i
3896 };
3897
3898 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_del_cached_rt, &arg);
3899 }
3900
ip6_route_del(struct fib6_config * cfg,struct netlink_ext_ack * extack)3901 static int ip6_route_del(struct fib6_config *cfg,
3902 struct netlink_ext_ack *extack)
3903 {
3904 struct fib6_table *table;
3905 struct fib6_info *rt;
3906 struct fib6_node *fn;
3907 int err = -ESRCH;
3908
3909 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
3910 if (!table) {
3911 NL_SET_ERR_MSG(extack, "FIB table does not exist");
3912 return err;
3913 }
3914
3915 rcu_read_lock();
3916
3917 fn = fib6_locate(&table->tb6_root,
3918 &cfg->fc_dst, cfg->fc_dst_len,
3919 &cfg->fc_src, cfg->fc_src_len,
3920 !(cfg->fc_flags & RTF_CACHE));
3921
3922 if (fn) {
3923 for_each_fib6_node_rt_rcu(fn) {
3924 struct fib6_nh *nh;
3925
3926 if (rt->nh && cfg->fc_nh_id &&
3927 rt->nh->id != cfg->fc_nh_id)
3928 continue;
3929
3930 if (cfg->fc_flags & RTF_CACHE) {
3931 int rc = 0;
3932
3933 if (rt->nh) {
3934 rc = ip6_del_cached_rt_nh(cfg, rt);
3935 } else if (cfg->fc_nh_id) {
3936 continue;
3937 } else {
3938 nh = rt->fib6_nh;
3939 rc = ip6_del_cached_rt(cfg, rt, nh);
3940 }
3941 if (rc != -ESRCH) {
3942 rcu_read_unlock();
3943 return rc;
3944 }
3945 continue;
3946 }
3947
3948 if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
3949 continue;
3950 if (cfg->fc_protocol &&
3951 cfg->fc_protocol != rt->fib6_protocol)
3952 continue;
3953
3954 if (rt->nh) {
3955 if (!fib6_info_hold_safe(rt))
3956 continue;
3957 rcu_read_unlock();
3958
3959 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3960 }
3961 if (cfg->fc_nh_id)
3962 continue;
3963
3964 nh = rt->fib6_nh;
3965 if (cfg->fc_ifindex &&
3966 (!nh->fib_nh_dev ||
3967 nh->fib_nh_dev->ifindex != cfg->fc_ifindex))
3968 continue;
3969 if (cfg->fc_flags & RTF_GATEWAY &&
3970 !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6))
3971 continue;
3972 if (!fib6_info_hold_safe(rt))
3973 continue;
3974 rcu_read_unlock();
3975
3976 /* if gateway was specified only delete the one hop */
3977 if (cfg->fc_flags & RTF_GATEWAY)
3978 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3979
3980 return __ip6_del_rt_siblings(rt, cfg);
3981 }
3982 }
3983 rcu_read_unlock();
3984
3985 return err;
3986 }
3987
rt6_do_redirect(struct dst_entry * dst,struct sock * sk,struct sk_buff * skb)3988 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
3989 {
3990 struct netevent_redirect netevent;
3991 struct rt6_info *rt, *nrt = NULL;
3992 struct fib6_result res = {};
3993 struct ndisc_options ndopts;
3994 struct inet6_dev *in6_dev;
3995 struct neighbour *neigh;
3996 struct rd_msg *msg;
3997 int optlen, on_link;
3998 u8 *lladdr;
3999
4000 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
4001 optlen -= sizeof(*msg);
4002
4003 if (optlen < 0) {
4004 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
4005 return;
4006 }
4007
4008 msg = (struct rd_msg *)icmp6_hdr(skb);
4009
4010 if (ipv6_addr_is_multicast(&msg->dest)) {
4011 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
4012 return;
4013 }
4014
4015 on_link = 0;
4016 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
4017 on_link = 1;
4018 } else if (ipv6_addr_type(&msg->target) !=
4019 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
4020 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
4021 return;
4022 }
4023
4024 in6_dev = __in6_dev_get(skb->dev);
4025 if (!in6_dev)
4026 return;
4027 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
4028 return;
4029
4030 /* RFC2461 8.1:
4031 * The IP source address of the Redirect MUST be the same as the current
4032 * first-hop router for the specified ICMP Destination Address.
4033 */
4034
4035 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
4036 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
4037 return;
4038 }
4039
4040 lladdr = NULL;
4041 if (ndopts.nd_opts_tgt_lladdr) {
4042 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
4043 skb->dev);
4044 if (!lladdr) {
4045 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
4046 return;
4047 }
4048 }
4049
4050 rt = (struct rt6_info *) dst;
4051 if (rt->rt6i_flags & RTF_REJECT) {
4052 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
4053 return;
4054 }
4055
4056 /* Redirect received -> path was valid.
4057 * Look, redirects are sent only in response to data packets,
4058 * so that this nexthop apparently is reachable. --ANK
4059 */
4060 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
4061
4062 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
4063 if (!neigh)
4064 return;
4065
4066 /*
4067 * We have finally decided to accept it.
4068 */
4069
4070 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
4071 NEIGH_UPDATE_F_WEAK_OVERRIDE|
4072 NEIGH_UPDATE_F_OVERRIDE|
4073 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
4074 NEIGH_UPDATE_F_ISROUTER)),
4075 NDISC_REDIRECT, &ndopts);
4076
4077 rcu_read_lock();
4078 res.f6i = rcu_dereference(rt->from);
4079 if (!res.f6i)
4080 goto out;
4081
4082 if (res.f6i->nh) {
4083 struct fib6_nh_match_arg arg = {
4084 .dev = dst->dev,
4085 .gw = &rt->rt6i_gateway,
4086 };
4087
4088 nexthop_for_each_fib6_nh(res.f6i->nh,
4089 fib6_nh_find_match, &arg);
4090
4091 /* fib6_info uses a nexthop that does not have fib6_nh
4092 * using the dst->dev. Should be impossible
4093 */
4094 if (!arg.match)
4095 goto out;
4096 res.nh = arg.match;
4097 } else {
4098 res.nh = res.f6i->fib6_nh;
4099 }
4100
4101 res.fib6_flags = res.f6i->fib6_flags;
4102 res.fib6_type = res.f6i->fib6_type;
4103 nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL);
4104 if (!nrt)
4105 goto out;
4106
4107 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
4108 if (on_link)
4109 nrt->rt6i_flags &= ~RTF_GATEWAY;
4110
4111 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
4112
4113 /* rt6_insert_exception() will take care of duplicated exceptions */
4114 if (rt6_insert_exception(nrt, &res)) {
4115 dst_release_immediate(&nrt->dst);
4116 goto out;
4117 }
4118
4119 netevent.old = &rt->dst;
4120 netevent.new = &nrt->dst;
4121 netevent.daddr = &msg->dest;
4122 netevent.neigh = neigh;
4123 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
4124
4125 out:
4126 rcu_read_unlock();
4127 neigh_release(neigh);
4128 }
4129
4130 #ifdef CONFIG_IPV6_ROUTE_INFO
rt6_get_route_info(struct net * net,const struct in6_addr * prefix,int prefixlen,const struct in6_addr * gwaddr,struct net_device * dev)4131 static struct fib6_info *rt6_get_route_info(struct net *net,
4132 const struct in6_addr *prefix, int prefixlen,
4133 const struct in6_addr *gwaddr,
4134 struct net_device *dev)
4135 {
4136 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4137 int ifindex = dev->ifindex;
4138 struct fib6_node *fn;
4139 struct fib6_info *rt = NULL;
4140 struct fib6_table *table;
4141
4142 table = fib6_get_table(net, tb_id);
4143 if (!table)
4144 return NULL;
4145
4146 rcu_read_lock();
4147 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
4148 if (!fn)
4149 goto out;
4150
4151 for_each_fib6_node_rt_rcu(fn) {
4152 /* these routes do not use nexthops */
4153 if (rt->nh)
4154 continue;
4155 if (rt->fib6_nh->fib_nh_dev->ifindex != ifindex)
4156 continue;
4157 if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
4158 !rt->fib6_nh->fib_nh_gw_family)
4159 continue;
4160 if (!ipv6_addr_equal(&rt->fib6_nh->fib_nh_gw6, gwaddr))
4161 continue;
4162 if (!fib6_info_hold_safe(rt))
4163 continue;
4164 break;
4165 }
4166 out:
4167 rcu_read_unlock();
4168 return rt;
4169 }
4170
rt6_add_route_info(struct net * net,const struct in6_addr * prefix,int prefixlen,const struct in6_addr * gwaddr,struct net_device * dev,unsigned int pref)4171 static struct fib6_info *rt6_add_route_info(struct net *net,
4172 const struct in6_addr *prefix, int prefixlen,
4173 const struct in6_addr *gwaddr,
4174 struct net_device *dev,
4175 unsigned int pref)
4176 {
4177 struct fib6_config cfg = {
4178 .fc_metric = IP6_RT_PRIO_USER,
4179 .fc_ifindex = dev->ifindex,
4180 .fc_dst_len = prefixlen,
4181 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
4182 RTF_UP | RTF_PREF(pref),
4183 .fc_protocol = RTPROT_RA,
4184 .fc_type = RTN_UNICAST,
4185 .fc_nlinfo.portid = 0,
4186 .fc_nlinfo.nlh = NULL,
4187 .fc_nlinfo.nl_net = net,
4188 };
4189
4190 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4191 cfg.fc_dst = *prefix;
4192 cfg.fc_gateway = *gwaddr;
4193
4194 /* We should treat it as a default route if prefix length is 0. */
4195 if (!prefixlen)
4196 cfg.fc_flags |= RTF_DEFAULT;
4197
4198 ip6_route_add(&cfg, GFP_ATOMIC, NULL);
4199
4200 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
4201 }
4202 #endif
4203
rt6_get_dflt_router(struct net * net,const struct in6_addr * addr,struct net_device * dev)4204 struct fib6_info *rt6_get_dflt_router(struct net *net,
4205 const struct in6_addr *addr,
4206 struct net_device *dev)
4207 {
4208 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
4209 struct fib6_info *rt;
4210 struct fib6_table *table;
4211
4212 table = fib6_get_table(net, tb_id);
4213 if (!table)
4214 return NULL;
4215
4216 rcu_read_lock();
4217 for_each_fib6_node_rt_rcu(&table->tb6_root) {
4218 struct fib6_nh *nh;
4219
4220 /* RA routes do not use nexthops */
4221 if (rt->nh)
4222 continue;
4223
4224 nh = rt->fib6_nh;
4225 if (dev == nh->fib_nh_dev &&
4226 ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
4227 ipv6_addr_equal(&nh->fib_nh_gw6, addr))
4228 break;
4229 }
4230 if (rt && !fib6_info_hold_safe(rt))
4231 rt = NULL;
4232 rcu_read_unlock();
4233 return rt;
4234 }
4235
rt6_add_dflt_router(struct net * net,const struct in6_addr * gwaddr,struct net_device * dev,unsigned int pref,u32 defrtr_usr_metric)4236 struct fib6_info *rt6_add_dflt_router(struct net *net,
4237 const struct in6_addr *gwaddr,
4238 struct net_device *dev,
4239 unsigned int pref,
4240 u32 defrtr_usr_metric)
4241 {
4242 struct fib6_config cfg = {
4243 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
4244 .fc_metric = defrtr_usr_metric,
4245 .fc_ifindex = dev->ifindex,
4246 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
4247 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
4248 .fc_protocol = RTPROT_RA,
4249 .fc_type = RTN_UNICAST,
4250 .fc_nlinfo.portid = 0,
4251 .fc_nlinfo.nlh = NULL,
4252 .fc_nlinfo.nl_net = net,
4253 };
4254
4255 cfg.fc_gateway = *gwaddr;
4256
4257 if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
4258 struct fib6_table *table;
4259
4260 table = fib6_get_table(dev_net(dev), cfg.fc_table);
4261 if (table)
4262 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
4263 }
4264
4265 return rt6_get_dflt_router(net, gwaddr, dev);
4266 }
4267
__rt6_purge_dflt_routers(struct net * net,struct fib6_table * table)4268 static void __rt6_purge_dflt_routers(struct net *net,
4269 struct fib6_table *table)
4270 {
4271 struct fib6_info *rt;
4272
4273 restart:
4274 rcu_read_lock();
4275 for_each_fib6_node_rt_rcu(&table->tb6_root) {
4276 struct net_device *dev = fib6_info_nh_dev(rt);
4277 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
4278
4279 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
4280 (!idev || idev->cnf.accept_ra != 2) &&
4281 fib6_info_hold_safe(rt)) {
4282 rcu_read_unlock();
4283 ip6_del_rt(net, rt, false);
4284 goto restart;
4285 }
4286 }
4287 rcu_read_unlock();
4288
4289 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
4290 }
4291
rt6_purge_dflt_routers(struct net * net)4292 void rt6_purge_dflt_routers(struct net *net)
4293 {
4294 struct fib6_table *table;
4295 struct hlist_head *head;
4296 unsigned int h;
4297
4298 rcu_read_lock();
4299
4300 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
4301 head = &net->ipv6.fib_table_hash[h];
4302 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
4303 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
4304 __rt6_purge_dflt_routers(net, table);
4305 }
4306 }
4307
4308 rcu_read_unlock();
4309 }
4310
rtmsg_to_fib6_config(struct net * net,struct in6_rtmsg * rtmsg,struct fib6_config * cfg)4311 static void rtmsg_to_fib6_config(struct net *net,
4312 struct in6_rtmsg *rtmsg,
4313 struct fib6_config *cfg)
4314 {
4315 *cfg = (struct fib6_config){
4316 .fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
4317 : RT6_TABLE_MAIN,
4318 .fc_ifindex = rtmsg->rtmsg_ifindex,
4319 .fc_metric = rtmsg->rtmsg_metric ? : IP6_RT_PRIO_USER,
4320 .fc_expires = rtmsg->rtmsg_info,
4321 .fc_dst_len = rtmsg->rtmsg_dst_len,
4322 .fc_src_len = rtmsg->rtmsg_src_len,
4323 .fc_flags = rtmsg->rtmsg_flags,
4324 .fc_type = rtmsg->rtmsg_type,
4325
4326 .fc_nlinfo.nl_net = net,
4327
4328 .fc_dst = rtmsg->rtmsg_dst,
4329 .fc_src = rtmsg->rtmsg_src,
4330 .fc_gateway = rtmsg->rtmsg_gateway,
4331 };
4332 }
4333
ipv6_route_ioctl(struct net * net,unsigned int cmd,struct in6_rtmsg * rtmsg)4334 int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg)
4335 {
4336 struct fib6_config cfg;
4337 int err;
4338
4339 if (cmd != SIOCADDRT && cmd != SIOCDELRT)
4340 return -EINVAL;
4341 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4342 return -EPERM;
4343
4344 rtmsg_to_fib6_config(net, rtmsg, &cfg);
4345
4346 rtnl_lock();
4347 switch (cmd) {
4348 case SIOCADDRT:
4349 err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
4350 break;
4351 case SIOCDELRT:
4352 err = ip6_route_del(&cfg, NULL);
4353 break;
4354 }
4355 rtnl_unlock();
4356 return err;
4357 }
4358
4359 /*
4360 * Drop the packet on the floor
4361 */
4362
ip6_pkt_drop(struct sk_buff * skb,u8 code,int ipstats_mib_noroutes)4363 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
4364 {
4365 struct dst_entry *dst = skb_dst(skb);
4366 struct net *net = dev_net(dst->dev);
4367 struct inet6_dev *idev;
4368 int type;
4369
4370 if (netif_is_l3_master(skb->dev) &&
4371 dst->dev == net->loopback_dev)
4372 idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
4373 else
4374 idev = ip6_dst_idev(dst);
4375
4376 switch (ipstats_mib_noroutes) {
4377 case IPSTATS_MIB_INNOROUTES:
4378 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
4379 if (type == IPV6_ADDR_ANY) {
4380 IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
4381 break;
4382 }
4383 fallthrough;
4384 case IPSTATS_MIB_OUTNOROUTES:
4385 IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
4386 break;
4387 }
4388
4389 /* Start over by dropping the dst for l3mdev case */
4390 if (netif_is_l3_master(skb->dev))
4391 skb_dst_drop(skb);
4392
4393 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
4394 kfree_skb(skb);
4395 return 0;
4396 }
4397
ip6_pkt_discard(struct sk_buff * skb)4398 static int ip6_pkt_discard(struct sk_buff *skb)
4399 {
4400 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
4401 }
4402
ip6_pkt_discard_out(struct net * net,struct sock * sk,struct sk_buff * skb)4403 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4404 {
4405 skb->dev = skb_dst(skb)->dev;
4406 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
4407 }
4408
ip6_pkt_prohibit(struct sk_buff * skb)4409 static int ip6_pkt_prohibit(struct sk_buff *skb)
4410 {
4411 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
4412 }
4413
ip6_pkt_prohibit_out(struct net * net,struct sock * sk,struct sk_buff * skb)4414 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4415 {
4416 skb->dev = skb_dst(skb)->dev;
4417 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
4418 }
4419
4420 /*
4421 * Allocate a dst for local (unicast / anycast) address.
4422 */
4423
addrconf_f6i_alloc(struct net * net,struct inet6_dev * idev,const struct in6_addr * addr,bool anycast,gfp_t gfp_flags)4424 struct fib6_info *addrconf_f6i_alloc(struct net *net,
4425 struct inet6_dev *idev,
4426 const struct in6_addr *addr,
4427 bool anycast, gfp_t gfp_flags)
4428 {
4429 struct fib6_config cfg = {
4430 .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
4431 .fc_ifindex = idev->dev->ifindex,
4432 .fc_flags = RTF_UP | RTF_NONEXTHOP,
4433 .fc_dst = *addr,
4434 .fc_dst_len = 128,
4435 .fc_protocol = RTPROT_KERNEL,
4436 .fc_nlinfo.nl_net = net,
4437 .fc_ignore_dev_down = true,
4438 };
4439 struct fib6_info *f6i;
4440
4441 if (anycast) {
4442 cfg.fc_type = RTN_ANYCAST;
4443 cfg.fc_flags |= RTF_ANYCAST;
4444 } else {
4445 cfg.fc_type = RTN_LOCAL;
4446 cfg.fc_flags |= RTF_LOCAL;
4447 }
4448
4449 f6i = ip6_route_info_create(&cfg, gfp_flags, NULL);
4450 if (!IS_ERR(f6i))
4451 f6i->dst_nocount = true;
4452 return f6i;
4453 }
4454
4455 /* remove deleted ip from prefsrc entries */
4456 struct arg_dev_net_ip {
4457 struct net_device *dev;
4458 struct net *net;
4459 struct in6_addr *addr;
4460 };
4461
fib6_remove_prefsrc(struct fib6_info * rt,void * arg)4462 static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
4463 {
4464 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
4465 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
4466 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
4467
4468 if (!rt->nh &&
4469 ((void *)rt->fib6_nh->fib_nh_dev == dev || !dev) &&
4470 rt != net->ipv6.fib6_null_entry &&
4471 ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
4472 spin_lock_bh(&rt6_exception_lock);
4473 /* remove prefsrc entry */
4474 rt->fib6_prefsrc.plen = 0;
4475 spin_unlock_bh(&rt6_exception_lock);
4476 }
4477 return 0;
4478 }
4479
rt6_remove_prefsrc(struct inet6_ifaddr * ifp)4480 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
4481 {
4482 struct net *net = dev_net(ifp->idev->dev);
4483 struct arg_dev_net_ip adni = {
4484 .dev = ifp->idev->dev,
4485 .net = net,
4486 .addr = &ifp->addr,
4487 };
4488 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
4489 }
4490
4491 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT)
4492
4493 /* Remove routers and update dst entries when gateway turn into host. */
fib6_clean_tohost(struct fib6_info * rt,void * arg)4494 static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
4495 {
4496 struct in6_addr *gateway = (struct in6_addr *)arg;
4497 struct fib6_nh *nh;
4498
4499 /* RA routes do not use nexthops */
4500 if (rt->nh)
4501 return 0;
4502
4503 nh = rt->fib6_nh;
4504 if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
4505 nh->fib_nh_gw_family && ipv6_addr_equal(gateway, &nh->fib_nh_gw6))
4506 return -1;
4507
4508 /* Further clean up cached routes in exception table.
4509 * This is needed because cached route may have a different
4510 * gateway than its 'parent' in the case of an ip redirect.
4511 */
4512 fib6_nh_exceptions_clean_tohost(nh, gateway);
4513
4514 return 0;
4515 }
4516
rt6_clean_tohost(struct net * net,struct in6_addr * gateway)4517 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
4518 {
4519 fib6_clean_all(net, fib6_clean_tohost, gateway);
4520 }
4521
4522 struct arg_netdev_event {
4523 const struct net_device *dev;
4524 union {
4525 unsigned char nh_flags;
4526 unsigned long event;
4527 };
4528 };
4529
rt6_multipath_first_sibling(const struct fib6_info * rt)4530 static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
4531 {
4532 struct fib6_info *iter;
4533 struct fib6_node *fn;
4534
4535 fn = rcu_dereference_protected(rt->fib6_node,
4536 lockdep_is_held(&rt->fib6_table->tb6_lock));
4537 iter = rcu_dereference_protected(fn->leaf,
4538 lockdep_is_held(&rt->fib6_table->tb6_lock));
4539 while (iter) {
4540 if (iter->fib6_metric == rt->fib6_metric &&
4541 rt6_qualify_for_ecmp(iter))
4542 return iter;
4543 iter = rcu_dereference_protected(iter->fib6_next,
4544 lockdep_is_held(&rt->fib6_table->tb6_lock));
4545 }
4546
4547 return NULL;
4548 }
4549
4550 /* only called for fib entries with builtin fib6_nh */
rt6_is_dead(const struct fib6_info * rt)4551 static bool rt6_is_dead(const struct fib6_info *rt)
4552 {
4553 if (rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD ||
4554 (rt->fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN &&
4555 ip6_ignore_linkdown(rt->fib6_nh->fib_nh_dev)))
4556 return true;
4557
4558 return false;
4559 }
4560
rt6_multipath_total_weight(const struct fib6_info * rt)4561 static int rt6_multipath_total_weight(const struct fib6_info *rt)
4562 {
4563 struct fib6_info *iter;
4564 int total = 0;
4565
4566 if (!rt6_is_dead(rt))
4567 total += rt->fib6_nh->fib_nh_weight;
4568
4569 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
4570 if (!rt6_is_dead(iter))
4571 total += iter->fib6_nh->fib_nh_weight;
4572 }
4573
4574 return total;
4575 }
4576
rt6_upper_bound_set(struct fib6_info * rt,int * weight,int total)4577 static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
4578 {
4579 int upper_bound = -1;
4580
4581 if (!rt6_is_dead(rt)) {
4582 *weight += rt->fib6_nh->fib_nh_weight;
4583 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
4584 total) - 1;
4585 }
4586 atomic_set(&rt->fib6_nh->fib_nh_upper_bound, upper_bound);
4587 }
4588
rt6_multipath_upper_bound_set(struct fib6_info * rt,int total)4589 static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
4590 {
4591 struct fib6_info *iter;
4592 int weight = 0;
4593
4594 rt6_upper_bound_set(rt, &weight, total);
4595
4596 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4597 rt6_upper_bound_set(iter, &weight, total);
4598 }
4599
rt6_multipath_rebalance(struct fib6_info * rt)4600 void rt6_multipath_rebalance(struct fib6_info *rt)
4601 {
4602 struct fib6_info *first;
4603 int total;
4604
4605 /* In case the entire multipath route was marked for flushing,
4606 * then there is no need to rebalance upon the removal of every
4607 * sibling route.
4608 */
4609 if (!rt->fib6_nsiblings || rt->should_flush)
4610 return;
4611
4612 /* During lookup routes are evaluated in order, so we need to
4613 * make sure upper bounds are assigned from the first sibling
4614 * onwards.
4615 */
4616 first = rt6_multipath_first_sibling(rt);
4617 if (WARN_ON_ONCE(!first))
4618 return;
4619
4620 total = rt6_multipath_total_weight(first);
4621 rt6_multipath_upper_bound_set(first, total);
4622 }
4623
fib6_ifup(struct fib6_info * rt,void * p_arg)4624 static int fib6_ifup(struct fib6_info *rt, void *p_arg)
4625 {
4626 const struct arg_netdev_event *arg = p_arg;
4627 struct net *net = dev_net(arg->dev);
4628
4629 if (rt != net->ipv6.fib6_null_entry && !rt->nh &&
4630 rt->fib6_nh->fib_nh_dev == arg->dev) {
4631 rt->fib6_nh->fib_nh_flags &= ~arg->nh_flags;
4632 fib6_update_sernum_upto_root(net, rt);
4633 rt6_multipath_rebalance(rt);
4634 }
4635
4636 return 0;
4637 }
4638
rt6_sync_up(struct net_device * dev,unsigned char nh_flags)4639 void rt6_sync_up(struct net_device *dev, unsigned char nh_flags)
4640 {
4641 struct arg_netdev_event arg = {
4642 .dev = dev,
4643 {
4644 .nh_flags = nh_flags,
4645 },
4646 };
4647
4648 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
4649 arg.nh_flags |= RTNH_F_LINKDOWN;
4650
4651 fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
4652 }
4653
4654 /* only called for fib entries with inline fib6_nh */
rt6_multipath_uses_dev(const struct fib6_info * rt,const struct net_device * dev)4655 static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
4656 const struct net_device *dev)
4657 {
4658 struct fib6_info *iter;
4659
4660 if (rt->fib6_nh->fib_nh_dev == dev)
4661 return true;
4662 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4663 if (iter->fib6_nh->fib_nh_dev == dev)
4664 return true;
4665
4666 return false;
4667 }
4668
rt6_multipath_flush(struct fib6_info * rt)4669 static void rt6_multipath_flush(struct fib6_info *rt)
4670 {
4671 struct fib6_info *iter;
4672
4673 rt->should_flush = 1;
4674 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4675 iter->should_flush = 1;
4676 }
4677
rt6_multipath_dead_count(const struct fib6_info * rt,const struct net_device * down_dev)4678 static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
4679 const struct net_device *down_dev)
4680 {
4681 struct fib6_info *iter;
4682 unsigned int dead = 0;
4683
4684 if (rt->fib6_nh->fib_nh_dev == down_dev ||
4685 rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4686 dead++;
4687 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4688 if (iter->fib6_nh->fib_nh_dev == down_dev ||
4689 iter->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4690 dead++;
4691
4692 return dead;
4693 }
4694
rt6_multipath_nh_flags_set(struct fib6_info * rt,const struct net_device * dev,unsigned char nh_flags)4695 static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
4696 const struct net_device *dev,
4697 unsigned char nh_flags)
4698 {
4699 struct fib6_info *iter;
4700
4701 if (rt->fib6_nh->fib_nh_dev == dev)
4702 rt->fib6_nh->fib_nh_flags |= nh_flags;
4703 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4704 if (iter->fib6_nh->fib_nh_dev == dev)
4705 iter->fib6_nh->fib_nh_flags |= nh_flags;
4706 }
4707
4708 /* called with write lock held for table with rt */
fib6_ifdown(struct fib6_info * rt,void * p_arg)4709 static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
4710 {
4711 const struct arg_netdev_event *arg = p_arg;
4712 const struct net_device *dev = arg->dev;
4713 struct net *net = dev_net(dev);
4714
4715 if (rt == net->ipv6.fib6_null_entry || rt->nh)
4716 return 0;
4717
4718 switch (arg->event) {
4719 case NETDEV_UNREGISTER:
4720 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4721 case NETDEV_DOWN:
4722 if (rt->should_flush)
4723 return -1;
4724 if (!rt->fib6_nsiblings)
4725 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4726 if (rt6_multipath_uses_dev(rt, dev)) {
4727 unsigned int count;
4728
4729 count = rt6_multipath_dead_count(rt, dev);
4730 if (rt->fib6_nsiblings + 1 == count) {
4731 rt6_multipath_flush(rt);
4732 return -1;
4733 }
4734 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
4735 RTNH_F_LINKDOWN);
4736 fib6_update_sernum(net, rt);
4737 rt6_multipath_rebalance(rt);
4738 }
4739 return -2;
4740 case NETDEV_CHANGE:
4741 if (rt->fib6_nh->fib_nh_dev != dev ||
4742 rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
4743 break;
4744 rt->fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
4745 rt6_multipath_rebalance(rt);
4746 break;
4747 }
4748
4749 return 0;
4750 }
4751
rt6_sync_down_dev(struct net_device * dev,unsigned long event)4752 void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
4753 {
4754 struct arg_netdev_event arg = {
4755 .dev = dev,
4756 {
4757 .event = event,
4758 },
4759 };
4760 struct net *net = dev_net(dev);
4761
4762 if (net->ipv6.sysctl.skip_notify_on_dev_down)
4763 fib6_clean_all_skip_notify(net, fib6_ifdown, &arg);
4764 else
4765 fib6_clean_all(net, fib6_ifdown, &arg);
4766 }
4767
rt6_disable_ip(struct net_device * dev,unsigned long event)4768 void rt6_disable_ip(struct net_device *dev, unsigned long event)
4769 {
4770 rt6_sync_down_dev(dev, event);
4771 rt6_uncached_list_flush_dev(dev_net(dev), dev);
4772 neigh_ifdown(&nd_tbl, dev);
4773 }
4774
4775 struct rt6_mtu_change_arg {
4776 struct net_device *dev;
4777 unsigned int mtu;
4778 struct fib6_info *f6i;
4779 };
4780
fib6_nh_mtu_change(struct fib6_nh * nh,void * _arg)4781 static int fib6_nh_mtu_change(struct fib6_nh *nh, void *_arg)
4782 {
4783 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *)_arg;
4784 struct fib6_info *f6i = arg->f6i;
4785
4786 /* For administrative MTU increase, there is no way to discover
4787 * IPv6 PMTU increase, so PMTU increase should be updated here.
4788 * Since RFC 1981 doesn't include administrative MTU increase
4789 * update PMTU increase is a MUST. (i.e. jumbo frame)
4790 */
4791 if (nh->fib_nh_dev == arg->dev) {
4792 struct inet6_dev *idev = __in6_dev_get(arg->dev);
4793 u32 mtu = f6i->fib6_pmtu;
4794
4795 if (mtu >= arg->mtu ||
4796 (mtu < arg->mtu && mtu == idev->cnf.mtu6))
4797 fib6_metric_set(f6i, RTAX_MTU, arg->mtu);
4798
4799 spin_lock_bh(&rt6_exception_lock);
4800 rt6_exceptions_update_pmtu(idev, nh, arg->mtu);
4801 spin_unlock_bh(&rt6_exception_lock);
4802 }
4803
4804 return 0;
4805 }
4806
rt6_mtu_change_route(struct fib6_info * f6i,void * p_arg)4807 static int rt6_mtu_change_route(struct fib6_info *f6i, void *p_arg)
4808 {
4809 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
4810 struct inet6_dev *idev;
4811
4812 /* In IPv6 pmtu discovery is not optional,
4813 so that RTAX_MTU lock cannot disable it.
4814 We still use this lock to block changes
4815 caused by addrconf/ndisc.
4816 */
4817
4818 idev = __in6_dev_get(arg->dev);
4819 if (!idev)
4820 return 0;
4821
4822 if (fib6_metric_locked(f6i, RTAX_MTU))
4823 return 0;
4824
4825 arg->f6i = f6i;
4826 if (f6i->nh) {
4827 /* fib6_nh_mtu_change only returns 0, so this is safe */
4828 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_mtu_change,
4829 arg);
4830 }
4831
4832 return fib6_nh_mtu_change(f6i->fib6_nh, arg);
4833 }
4834
rt6_mtu_change(struct net_device * dev,unsigned int mtu)4835 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
4836 {
4837 struct rt6_mtu_change_arg arg = {
4838 .dev = dev,
4839 .mtu = mtu,
4840 };
4841
4842 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
4843 }
4844
4845 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
4846 [RTA_UNSPEC] = { .strict_start_type = RTA_DPORT + 1 },
4847 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
4848 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
4849 [RTA_OIF] = { .type = NLA_U32 },
4850 [RTA_IIF] = { .type = NLA_U32 },
4851 [RTA_PRIORITY] = { .type = NLA_U32 },
4852 [RTA_METRICS] = { .type = NLA_NESTED },
4853 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
4854 [RTA_PREF] = { .type = NLA_U8 },
4855 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
4856 [RTA_ENCAP] = { .type = NLA_NESTED },
4857 [RTA_EXPIRES] = { .type = NLA_U32 },
4858 [RTA_UID] = { .type = NLA_U32 },
4859 [RTA_MARK] = { .type = NLA_U32 },
4860 [RTA_TABLE] = { .type = NLA_U32 },
4861 [RTA_IP_PROTO] = { .type = NLA_U8 },
4862 [RTA_SPORT] = { .type = NLA_U16 },
4863 [RTA_DPORT] = { .type = NLA_U16 },
4864 [RTA_NH_ID] = { .type = NLA_U32 },
4865 };
4866
rtm_to_fib6_config(struct sk_buff * skb,struct nlmsghdr * nlh,struct fib6_config * cfg,struct netlink_ext_ack * extack)4867 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
4868 struct fib6_config *cfg,
4869 struct netlink_ext_ack *extack)
4870 {
4871 struct rtmsg *rtm;
4872 struct nlattr *tb[RTA_MAX+1];
4873 unsigned int pref;
4874 int err;
4875
4876 err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
4877 rtm_ipv6_policy, extack);
4878 if (err < 0)
4879 goto errout;
4880
4881 err = -EINVAL;
4882 rtm = nlmsg_data(nlh);
4883
4884 *cfg = (struct fib6_config){
4885 .fc_table = rtm->rtm_table,
4886 .fc_dst_len = rtm->rtm_dst_len,
4887 .fc_src_len = rtm->rtm_src_len,
4888 .fc_flags = RTF_UP,
4889 .fc_protocol = rtm->rtm_protocol,
4890 .fc_type = rtm->rtm_type,
4891
4892 .fc_nlinfo.portid = NETLINK_CB(skb).portid,
4893 .fc_nlinfo.nlh = nlh,
4894 .fc_nlinfo.nl_net = sock_net(skb->sk),
4895 };
4896
4897 if (rtm->rtm_type == RTN_UNREACHABLE ||
4898 rtm->rtm_type == RTN_BLACKHOLE ||
4899 rtm->rtm_type == RTN_PROHIBIT ||
4900 rtm->rtm_type == RTN_THROW)
4901 cfg->fc_flags |= RTF_REJECT;
4902
4903 if (rtm->rtm_type == RTN_LOCAL)
4904 cfg->fc_flags |= RTF_LOCAL;
4905
4906 if (rtm->rtm_flags & RTM_F_CLONED)
4907 cfg->fc_flags |= RTF_CACHE;
4908
4909 cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
4910
4911 if (tb[RTA_NH_ID]) {
4912 if (tb[RTA_GATEWAY] || tb[RTA_OIF] ||
4913 tb[RTA_MULTIPATH] || tb[RTA_ENCAP]) {
4914 NL_SET_ERR_MSG(extack,
4915 "Nexthop specification and nexthop id are mutually exclusive");
4916 goto errout;
4917 }
4918 cfg->fc_nh_id = nla_get_u32(tb[RTA_NH_ID]);
4919 }
4920
4921 if (tb[RTA_GATEWAY]) {
4922 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
4923 cfg->fc_flags |= RTF_GATEWAY;
4924 }
4925 if (tb[RTA_VIA]) {
4926 NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
4927 goto errout;
4928 }
4929
4930 if (tb[RTA_DST]) {
4931 int plen = (rtm->rtm_dst_len + 7) >> 3;
4932
4933 if (nla_len(tb[RTA_DST]) < plen)
4934 goto errout;
4935
4936 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
4937 }
4938
4939 if (tb[RTA_SRC]) {
4940 int plen = (rtm->rtm_src_len + 7) >> 3;
4941
4942 if (nla_len(tb[RTA_SRC]) < plen)
4943 goto errout;
4944
4945 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
4946 }
4947
4948 if (tb[RTA_PREFSRC])
4949 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
4950
4951 if (tb[RTA_OIF])
4952 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
4953
4954 if (tb[RTA_PRIORITY])
4955 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
4956
4957 if (tb[RTA_METRICS]) {
4958 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
4959 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
4960 }
4961
4962 if (tb[RTA_TABLE])
4963 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
4964
4965 if (tb[RTA_MULTIPATH]) {
4966 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
4967 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
4968
4969 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
4970 cfg->fc_mp_len, extack);
4971 if (err < 0)
4972 goto errout;
4973 }
4974
4975 if (tb[RTA_PREF]) {
4976 pref = nla_get_u8(tb[RTA_PREF]);
4977 if (pref != ICMPV6_ROUTER_PREF_LOW &&
4978 pref != ICMPV6_ROUTER_PREF_HIGH)
4979 pref = ICMPV6_ROUTER_PREF_MEDIUM;
4980 cfg->fc_flags |= RTF_PREF(pref);
4981 }
4982
4983 if (tb[RTA_ENCAP])
4984 cfg->fc_encap = tb[RTA_ENCAP];
4985
4986 if (tb[RTA_ENCAP_TYPE]) {
4987 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
4988
4989 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
4990 if (err < 0)
4991 goto errout;
4992 }
4993
4994 if (tb[RTA_EXPIRES]) {
4995 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
4996
4997 if (addrconf_finite_timeout(timeout)) {
4998 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
4999 cfg->fc_flags |= RTF_EXPIRES;
5000 }
5001 }
5002
5003 err = 0;
5004 errout:
5005 return err;
5006 }
5007
5008 struct rt6_nh {
5009 struct fib6_info *fib6_info;
5010 struct fib6_config r_cfg;
5011 struct list_head next;
5012 };
5013
ip6_route_info_append(struct net * net,struct list_head * rt6_nh_list,struct fib6_info * rt,struct fib6_config * r_cfg)5014 static int ip6_route_info_append(struct net *net,
5015 struct list_head *rt6_nh_list,
5016 struct fib6_info *rt,
5017 struct fib6_config *r_cfg)
5018 {
5019 struct rt6_nh *nh;
5020 int err = -EEXIST;
5021
5022 list_for_each_entry(nh, rt6_nh_list, next) {
5023 /* check if fib6_info already exists */
5024 if (rt6_duplicate_nexthop(nh->fib6_info, rt))
5025 return err;
5026 }
5027
5028 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
5029 if (!nh)
5030 return -ENOMEM;
5031 nh->fib6_info = rt;
5032 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
5033 list_add_tail(&nh->next, rt6_nh_list);
5034
5035 return 0;
5036 }
5037
ip6_route_mpath_notify(struct fib6_info * rt,struct fib6_info * rt_last,struct nl_info * info,__u16 nlflags)5038 static void ip6_route_mpath_notify(struct fib6_info *rt,
5039 struct fib6_info *rt_last,
5040 struct nl_info *info,
5041 __u16 nlflags)
5042 {
5043 /* if this is an APPEND route, then rt points to the first route
5044 * inserted and rt_last points to last route inserted. Userspace
5045 * wants a consistent dump of the route which starts at the first
5046 * nexthop. Since sibling routes are always added at the end of
5047 * the list, find the first sibling of the last route appended
5048 */
5049 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
5050 rt = list_first_entry(&rt_last->fib6_siblings,
5051 struct fib6_info,
5052 fib6_siblings);
5053 }
5054
5055 if (rt)
5056 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
5057 }
5058
ip6_route_mpath_should_notify(const struct fib6_info * rt)5059 static bool ip6_route_mpath_should_notify(const struct fib6_info *rt)
5060 {
5061 bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
5062 bool should_notify = false;
5063 struct fib6_info *leaf;
5064 struct fib6_node *fn;
5065
5066 rcu_read_lock();
5067 fn = rcu_dereference(rt->fib6_node);
5068 if (!fn)
5069 goto out;
5070
5071 leaf = rcu_dereference(fn->leaf);
5072 if (!leaf)
5073 goto out;
5074
5075 if (rt == leaf ||
5076 (rt_can_ecmp && rt->fib6_metric == leaf->fib6_metric &&
5077 rt6_qualify_for_ecmp(leaf)))
5078 should_notify = true;
5079 out:
5080 rcu_read_unlock();
5081
5082 return should_notify;
5083 }
5084
ip6_route_multipath_add(struct fib6_config * cfg,struct netlink_ext_ack * extack)5085 static int ip6_route_multipath_add(struct fib6_config *cfg,
5086 struct netlink_ext_ack *extack)
5087 {
5088 struct fib6_info *rt_notif = NULL, *rt_last = NULL;
5089 struct nl_info *info = &cfg->fc_nlinfo;
5090 struct fib6_config r_cfg;
5091 struct rtnexthop *rtnh;
5092 struct fib6_info *rt;
5093 struct rt6_nh *err_nh;
5094 struct rt6_nh *nh, *nh_safe;
5095 __u16 nlflags;
5096 int remaining;
5097 int attrlen;
5098 int err = 1;
5099 int nhn = 0;
5100 int replace = (cfg->fc_nlinfo.nlh &&
5101 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
5102 LIST_HEAD(rt6_nh_list);
5103
5104 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
5105 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
5106 nlflags |= NLM_F_APPEND;
5107
5108 remaining = cfg->fc_mp_len;
5109 rtnh = (struct rtnexthop *)cfg->fc_mp;
5110
5111 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
5112 * fib6_info structs per nexthop
5113 */
5114 while (rtnh_ok(rtnh, remaining)) {
5115 memcpy(&r_cfg, cfg, sizeof(*cfg));
5116 if (rtnh->rtnh_ifindex)
5117 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5118
5119 attrlen = rtnh_attrlen(rtnh);
5120 if (attrlen > 0) {
5121 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5122
5123 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5124 if (nla) {
5125 r_cfg.fc_gateway = nla_get_in6_addr(nla);
5126 r_cfg.fc_flags |= RTF_GATEWAY;
5127 }
5128 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
5129 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
5130 if (nla)
5131 r_cfg.fc_encap_type = nla_get_u16(nla);
5132 }
5133
5134 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
5135 rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
5136 if (IS_ERR(rt)) {
5137 err = PTR_ERR(rt);
5138 rt = NULL;
5139 goto cleanup;
5140 }
5141 if (!rt6_qualify_for_ecmp(rt)) {
5142 err = -EINVAL;
5143 NL_SET_ERR_MSG(extack,
5144 "Device only routes can not be added for IPv6 using the multipath API.");
5145 fib6_info_release(rt);
5146 goto cleanup;
5147 }
5148
5149 rt->fib6_nh->fib_nh_weight = rtnh->rtnh_hops + 1;
5150
5151 err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
5152 rt, &r_cfg);
5153 if (err) {
5154 fib6_info_release(rt);
5155 goto cleanup;
5156 }
5157
5158 rtnh = rtnh_next(rtnh, &remaining);
5159 }
5160
5161 if (list_empty(&rt6_nh_list)) {
5162 NL_SET_ERR_MSG(extack,
5163 "Invalid nexthop configuration - no valid nexthops");
5164 return -EINVAL;
5165 }
5166
5167 /* for add and replace send one notification with all nexthops.
5168 * Skip the notification in fib6_add_rt2node and send one with
5169 * the full route when done
5170 */
5171 info->skip_notify = 1;
5172
5173 /* For add and replace, send one notification with all nexthops. For
5174 * append, send one notification with all appended nexthops.
5175 */
5176 info->skip_notify_kernel = 1;
5177
5178 err_nh = NULL;
5179 list_for_each_entry(nh, &rt6_nh_list, next) {
5180 err = __ip6_ins_rt(nh->fib6_info, info, extack);
5181 fib6_info_release(nh->fib6_info);
5182
5183 if (!err) {
5184 /* save reference to last route successfully inserted */
5185 rt_last = nh->fib6_info;
5186
5187 /* save reference to first route for notification */
5188 if (!rt_notif)
5189 rt_notif = nh->fib6_info;
5190 }
5191
5192 /* nh->fib6_info is used or freed at this point, reset to NULL*/
5193 nh->fib6_info = NULL;
5194 if (err) {
5195 if (replace && nhn)
5196 NL_SET_ERR_MSG_MOD(extack,
5197 "multipath route replace failed (check consistency of installed routes)");
5198 err_nh = nh;
5199 goto add_errout;
5200 }
5201
5202 /* Because each route is added like a single route we remove
5203 * these flags after the first nexthop: if there is a collision,
5204 * we have already failed to add the first nexthop:
5205 * fib6_add_rt2node() has rejected it; when replacing, old
5206 * nexthops have been replaced by first new, the rest should
5207 * be added to it.
5208 */
5209 if (cfg->fc_nlinfo.nlh) {
5210 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
5211 NLM_F_REPLACE);
5212 cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
5213 }
5214 nhn++;
5215 }
5216
5217 /* An in-kernel notification should only be sent in case the new
5218 * multipath route is added as the first route in the node, or if
5219 * it was appended to it. We pass 'rt_notif' since it is the first
5220 * sibling and might allow us to skip some checks in the replace case.
5221 */
5222 if (ip6_route_mpath_should_notify(rt_notif)) {
5223 enum fib_event_type fib_event;
5224
5225 if (rt_notif->fib6_nsiblings != nhn - 1)
5226 fib_event = FIB_EVENT_ENTRY_APPEND;
5227 else
5228 fib_event = FIB_EVENT_ENTRY_REPLACE;
5229
5230 err = call_fib6_multipath_entry_notifiers(info->nl_net,
5231 fib_event, rt_notif,
5232 nhn - 1, extack);
5233 if (err) {
5234 /* Delete all the siblings that were just added */
5235 err_nh = NULL;
5236 goto add_errout;
5237 }
5238 }
5239
5240 /* success ... tell user about new route */
5241 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5242 goto cleanup;
5243
5244 add_errout:
5245 /* send notification for routes that were added so that
5246 * the delete notifications sent by ip6_route_del are
5247 * coherent
5248 */
5249 if (rt_notif)
5250 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5251
5252 /* Delete routes that were already added */
5253 list_for_each_entry(nh, &rt6_nh_list, next) {
5254 if (err_nh == nh)
5255 break;
5256 ip6_route_del(&nh->r_cfg, extack);
5257 }
5258
5259 cleanup:
5260 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
5261 if (nh->fib6_info)
5262 fib6_info_release(nh->fib6_info);
5263 list_del(&nh->next);
5264 kfree(nh);
5265 }
5266
5267 return err;
5268 }
5269
ip6_route_multipath_del(struct fib6_config * cfg,struct netlink_ext_ack * extack)5270 static int ip6_route_multipath_del(struct fib6_config *cfg,
5271 struct netlink_ext_ack *extack)
5272 {
5273 struct fib6_config r_cfg;
5274 struct rtnexthop *rtnh;
5275 int last_err = 0;
5276 int remaining;
5277 int attrlen;
5278 int err;
5279
5280 remaining = cfg->fc_mp_len;
5281 rtnh = (struct rtnexthop *)cfg->fc_mp;
5282
5283 /* Parse a Multipath Entry */
5284 while (rtnh_ok(rtnh, remaining)) {
5285 memcpy(&r_cfg, cfg, sizeof(*cfg));
5286 if (rtnh->rtnh_ifindex)
5287 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5288
5289 attrlen = rtnh_attrlen(rtnh);
5290 if (attrlen > 0) {
5291 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5292
5293 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5294 if (nla) {
5295 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
5296 r_cfg.fc_flags |= RTF_GATEWAY;
5297 }
5298 }
5299 err = ip6_route_del(&r_cfg, extack);
5300 if (err)
5301 last_err = err;
5302
5303 rtnh = rtnh_next(rtnh, &remaining);
5304 }
5305
5306 return last_err;
5307 }
5308
inet6_rtm_delroute(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)5309 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5310 struct netlink_ext_ack *extack)
5311 {
5312 struct fib6_config cfg;
5313 int err;
5314
5315 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5316 if (err < 0)
5317 return err;
5318
5319 if (cfg.fc_nh_id &&
5320 !nexthop_find_by_id(sock_net(skb->sk), cfg.fc_nh_id)) {
5321 NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
5322 return -EINVAL;
5323 }
5324
5325 if (cfg.fc_mp)
5326 return ip6_route_multipath_del(&cfg, extack);
5327 else {
5328 cfg.fc_delete_all_nh = 1;
5329 return ip6_route_del(&cfg, extack);
5330 }
5331 }
5332
inet6_rtm_newroute(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)5333 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5334 struct netlink_ext_ack *extack)
5335 {
5336 struct fib6_config cfg;
5337 int err;
5338
5339 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5340 if (err < 0)
5341 return err;
5342
5343 if (cfg.fc_metric == 0)
5344 cfg.fc_metric = IP6_RT_PRIO_USER;
5345
5346 if (cfg.fc_mp)
5347 return ip6_route_multipath_add(&cfg, extack);
5348 else
5349 return ip6_route_add(&cfg, GFP_KERNEL, extack);
5350 }
5351
5352 /* add the overhead of this fib6_nh to nexthop_len */
rt6_nh_nlmsg_size(struct fib6_nh * nh,void * arg)5353 static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg)
5354 {
5355 int *nexthop_len = arg;
5356
5357 *nexthop_len += nla_total_size(0) /* RTA_MULTIPATH */
5358 + NLA_ALIGN(sizeof(struct rtnexthop))
5359 + nla_total_size(16); /* RTA_GATEWAY */
5360
5361 if (nh->fib_nh_lws) {
5362 /* RTA_ENCAP_TYPE */
5363 *nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5364 /* RTA_ENCAP */
5365 *nexthop_len += nla_total_size(2);
5366 }
5367
5368 return 0;
5369 }
5370
rt6_nlmsg_size(struct fib6_info * f6i)5371 static size_t rt6_nlmsg_size(struct fib6_info *f6i)
5372 {
5373 int nexthop_len;
5374
5375 if (f6i->nh) {
5376 nexthop_len = nla_total_size(4); /* RTA_NH_ID */
5377 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
5378 &nexthop_len);
5379 } else {
5380 struct fib6_nh *nh = f6i->fib6_nh;
5381
5382 nexthop_len = 0;
5383 if (f6i->fib6_nsiblings) {
5384 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
5385 + NLA_ALIGN(sizeof(struct rtnexthop))
5386 + nla_total_size(16) /* RTA_GATEWAY */
5387 + lwtunnel_get_encap_size(nh->fib_nh_lws);
5388
5389 nexthop_len *= f6i->fib6_nsiblings;
5390 }
5391 nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5392 }
5393
5394 return NLMSG_ALIGN(sizeof(struct rtmsg))
5395 + nla_total_size(16) /* RTA_SRC */
5396 + nla_total_size(16) /* RTA_DST */
5397 + nla_total_size(16) /* RTA_GATEWAY */
5398 + nla_total_size(16) /* RTA_PREFSRC */
5399 + nla_total_size(4) /* RTA_TABLE */
5400 + nla_total_size(4) /* RTA_IIF */
5401 + nla_total_size(4) /* RTA_OIF */
5402 + nla_total_size(4) /* RTA_PRIORITY */
5403 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
5404 + nla_total_size(sizeof(struct rta_cacheinfo))
5405 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
5406 + nla_total_size(1) /* RTA_PREF */
5407 + nexthop_len;
5408 }
5409
rt6_fill_node_nexthop(struct sk_buff * skb,struct nexthop * nh,unsigned char * flags)5410 static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
5411 unsigned char *flags)
5412 {
5413 if (nexthop_is_multipath(nh)) {
5414 struct nlattr *mp;
5415
5416 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5417 if (!mp)
5418 goto nla_put_failure;
5419
5420 if (nexthop_mpath_fill_node(skb, nh, AF_INET6))
5421 goto nla_put_failure;
5422
5423 nla_nest_end(skb, mp);
5424 } else {
5425 struct fib6_nh *fib6_nh;
5426
5427 fib6_nh = nexthop_fib6_nh(nh);
5428 if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6,
5429 flags, false) < 0)
5430 goto nla_put_failure;
5431 }
5432
5433 return 0;
5434
5435 nla_put_failure:
5436 return -EMSGSIZE;
5437 }
5438
rt6_fill_node(struct net * net,struct sk_buff * skb,struct fib6_info * rt,struct dst_entry * dst,struct in6_addr * dest,struct in6_addr * src,int iif,int type,u32 portid,u32 seq,unsigned int flags)5439 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
5440 struct fib6_info *rt, struct dst_entry *dst,
5441 struct in6_addr *dest, struct in6_addr *src,
5442 int iif, int type, u32 portid, u32 seq,
5443 unsigned int flags)
5444 {
5445 struct rt6_info *rt6 = (struct rt6_info *)dst;
5446 struct rt6key *rt6_dst, *rt6_src;
5447 u32 *pmetrics, table, rt6_flags;
5448 unsigned char nh_flags = 0;
5449 struct nlmsghdr *nlh;
5450 struct rtmsg *rtm;
5451 long expires = 0;
5452
5453 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
5454 if (!nlh)
5455 return -EMSGSIZE;
5456
5457 if (rt6) {
5458 rt6_dst = &rt6->rt6i_dst;
5459 rt6_src = &rt6->rt6i_src;
5460 rt6_flags = rt6->rt6i_flags;
5461 } else {
5462 rt6_dst = &rt->fib6_dst;
5463 rt6_src = &rt->fib6_src;
5464 rt6_flags = rt->fib6_flags;
5465 }
5466
5467 rtm = nlmsg_data(nlh);
5468 rtm->rtm_family = AF_INET6;
5469 rtm->rtm_dst_len = rt6_dst->plen;
5470 rtm->rtm_src_len = rt6_src->plen;
5471 rtm->rtm_tos = 0;
5472 if (rt->fib6_table)
5473 table = rt->fib6_table->tb6_id;
5474 else
5475 table = RT6_TABLE_UNSPEC;
5476 rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
5477 if (nla_put_u32(skb, RTA_TABLE, table))
5478 goto nla_put_failure;
5479
5480 rtm->rtm_type = rt->fib6_type;
5481 rtm->rtm_flags = 0;
5482 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
5483 rtm->rtm_protocol = rt->fib6_protocol;
5484
5485 if (rt6_flags & RTF_CACHE)
5486 rtm->rtm_flags |= RTM_F_CLONED;
5487
5488 if (dest) {
5489 if (nla_put_in6_addr(skb, RTA_DST, dest))
5490 goto nla_put_failure;
5491 rtm->rtm_dst_len = 128;
5492 } else if (rtm->rtm_dst_len)
5493 if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
5494 goto nla_put_failure;
5495 #ifdef CONFIG_IPV6_SUBTREES
5496 if (src) {
5497 if (nla_put_in6_addr(skb, RTA_SRC, src))
5498 goto nla_put_failure;
5499 rtm->rtm_src_len = 128;
5500 } else if (rtm->rtm_src_len &&
5501 nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
5502 goto nla_put_failure;
5503 #endif
5504 if (iif) {
5505 #ifdef CONFIG_IPV6_MROUTE
5506 if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
5507 int err = ip6mr_get_route(net, skb, rtm, portid);
5508
5509 if (err == 0)
5510 return 0;
5511 if (err < 0)
5512 goto nla_put_failure;
5513 } else
5514 #endif
5515 if (nla_put_u32(skb, RTA_IIF, iif))
5516 goto nla_put_failure;
5517 } else if (dest) {
5518 struct in6_addr saddr_buf;
5519 if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
5520 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5521 goto nla_put_failure;
5522 }
5523
5524 if (rt->fib6_prefsrc.plen) {
5525 struct in6_addr saddr_buf;
5526 saddr_buf = rt->fib6_prefsrc.addr;
5527 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5528 goto nla_put_failure;
5529 }
5530
5531 pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
5532 if (rtnetlink_put_metrics(skb, pmetrics) < 0)
5533 goto nla_put_failure;
5534
5535 if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
5536 goto nla_put_failure;
5537
5538 /* For multipath routes, walk the siblings list and add
5539 * each as a nexthop within RTA_MULTIPATH.
5540 */
5541 if (rt6) {
5542 if (rt6_flags & RTF_GATEWAY &&
5543 nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
5544 goto nla_put_failure;
5545
5546 if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
5547 goto nla_put_failure;
5548
5549 if (dst->lwtstate &&
5550 lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
5551 goto nla_put_failure;
5552 } else if (rt->fib6_nsiblings) {
5553 struct fib6_info *sibling, *next_sibling;
5554 struct nlattr *mp;
5555
5556 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5557 if (!mp)
5558 goto nla_put_failure;
5559
5560 if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
5561 rt->fib6_nh->fib_nh_weight, AF_INET6) < 0)
5562 goto nla_put_failure;
5563
5564 list_for_each_entry_safe(sibling, next_sibling,
5565 &rt->fib6_siblings, fib6_siblings) {
5566 if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
5567 sibling->fib6_nh->fib_nh_weight,
5568 AF_INET6) < 0)
5569 goto nla_put_failure;
5570 }
5571
5572 nla_nest_end(skb, mp);
5573 } else if (rt->nh) {
5574 if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
5575 goto nla_put_failure;
5576
5577 if (nexthop_is_blackhole(rt->nh))
5578 rtm->rtm_type = RTN_BLACKHOLE;
5579
5580 if (net->ipv4.sysctl_nexthop_compat_mode &&
5581 rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0)
5582 goto nla_put_failure;
5583
5584 rtm->rtm_flags |= nh_flags;
5585 } else {
5586 if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6,
5587 &nh_flags, false) < 0)
5588 goto nla_put_failure;
5589
5590 rtm->rtm_flags |= nh_flags;
5591 }
5592
5593 if (rt6_flags & RTF_EXPIRES) {
5594 expires = dst ? dst->expires : rt->expires;
5595 expires -= jiffies;
5596 }
5597
5598 if (!dst) {
5599 if (rt->offload)
5600 rtm->rtm_flags |= RTM_F_OFFLOAD;
5601 if (rt->trap)
5602 rtm->rtm_flags |= RTM_F_TRAP;
5603 if (rt->offload_failed)
5604 rtm->rtm_flags |= RTM_F_OFFLOAD_FAILED;
5605 }
5606
5607 if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
5608 goto nla_put_failure;
5609
5610 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
5611 goto nla_put_failure;
5612
5613
5614 nlmsg_end(skb, nlh);
5615 return 0;
5616
5617 nla_put_failure:
5618 nlmsg_cancel(skb, nlh);
5619 return -EMSGSIZE;
5620 }
5621
fib6_info_nh_uses_dev(struct fib6_nh * nh,void * arg)5622 static int fib6_info_nh_uses_dev(struct fib6_nh *nh, void *arg)
5623 {
5624 const struct net_device *dev = arg;
5625
5626 if (nh->fib_nh_dev == dev)
5627 return 1;
5628
5629 return 0;
5630 }
5631
fib6_info_uses_dev(const struct fib6_info * f6i,const struct net_device * dev)5632 static bool fib6_info_uses_dev(const struct fib6_info *f6i,
5633 const struct net_device *dev)
5634 {
5635 if (f6i->nh) {
5636 struct net_device *_dev = (struct net_device *)dev;
5637
5638 return !!nexthop_for_each_fib6_nh(f6i->nh,
5639 fib6_info_nh_uses_dev,
5640 _dev);
5641 }
5642
5643 if (f6i->fib6_nh->fib_nh_dev == dev)
5644 return true;
5645
5646 if (f6i->fib6_nsiblings) {
5647 struct fib6_info *sibling, *next_sibling;
5648
5649 list_for_each_entry_safe(sibling, next_sibling,
5650 &f6i->fib6_siblings, fib6_siblings) {
5651 if (sibling->fib6_nh->fib_nh_dev == dev)
5652 return true;
5653 }
5654 }
5655
5656 return false;
5657 }
5658
5659 struct fib6_nh_exception_dump_walker {
5660 struct rt6_rtnl_dump_arg *dump;
5661 struct fib6_info *rt;
5662 unsigned int flags;
5663 unsigned int skip;
5664 unsigned int count;
5665 };
5666
rt6_nh_dump_exceptions(struct fib6_nh * nh,void * arg)5667 static int rt6_nh_dump_exceptions(struct fib6_nh *nh, void *arg)
5668 {
5669 struct fib6_nh_exception_dump_walker *w = arg;
5670 struct rt6_rtnl_dump_arg *dump = w->dump;
5671 struct rt6_exception_bucket *bucket;
5672 struct rt6_exception *rt6_ex;
5673 int i, err;
5674
5675 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
5676 if (!bucket)
5677 return 0;
5678
5679 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
5680 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
5681 if (w->skip) {
5682 w->skip--;
5683 continue;
5684 }
5685
5686 /* Expiration of entries doesn't bump sernum, insertion
5687 * does. Removal is triggered by insertion, so we can
5688 * rely on the fact that if entries change between two
5689 * partial dumps, this node is scanned again completely,
5690 * see rt6_insert_exception() and fib6_dump_table().
5691 *
5692 * Count expired entries we go through as handled
5693 * entries that we'll skip next time, in case of partial
5694 * node dump. Otherwise, if entries expire meanwhile,
5695 * we'll skip the wrong amount.
5696 */
5697 if (rt6_check_expired(rt6_ex->rt6i)) {
5698 w->count++;
5699 continue;
5700 }
5701
5702 err = rt6_fill_node(dump->net, dump->skb, w->rt,
5703 &rt6_ex->rt6i->dst, NULL, NULL, 0,
5704 RTM_NEWROUTE,
5705 NETLINK_CB(dump->cb->skb).portid,
5706 dump->cb->nlh->nlmsg_seq, w->flags);
5707 if (err)
5708 return err;
5709
5710 w->count++;
5711 }
5712 bucket++;
5713 }
5714
5715 return 0;
5716 }
5717
5718 /* Return -1 if done with node, number of handled routes on partial dump */
rt6_dump_route(struct fib6_info * rt,void * p_arg,unsigned int skip)5719 int rt6_dump_route(struct fib6_info *rt, void *p_arg, unsigned int skip)
5720 {
5721 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
5722 struct fib_dump_filter *filter = &arg->filter;
5723 unsigned int flags = NLM_F_MULTI;
5724 struct net *net = arg->net;
5725 int count = 0;
5726
5727 if (rt == net->ipv6.fib6_null_entry)
5728 return -1;
5729
5730 if ((filter->flags & RTM_F_PREFIX) &&
5731 !(rt->fib6_flags & RTF_PREFIX_RT)) {
5732 /* success since this is not a prefix route */
5733 return -1;
5734 }
5735 if (filter->filter_set &&
5736 ((filter->rt_type && rt->fib6_type != filter->rt_type) ||
5737 (filter->dev && !fib6_info_uses_dev(rt, filter->dev)) ||
5738 (filter->protocol && rt->fib6_protocol != filter->protocol))) {
5739 return -1;
5740 }
5741
5742 if (filter->filter_set ||
5743 !filter->dump_routes || !filter->dump_exceptions) {
5744 flags |= NLM_F_DUMP_FILTERED;
5745 }
5746
5747 if (filter->dump_routes) {
5748 if (skip) {
5749 skip--;
5750 } else {
5751 if (rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL,
5752 0, RTM_NEWROUTE,
5753 NETLINK_CB(arg->cb->skb).portid,
5754 arg->cb->nlh->nlmsg_seq, flags)) {
5755 return 0;
5756 }
5757 count++;
5758 }
5759 }
5760
5761 if (filter->dump_exceptions) {
5762 struct fib6_nh_exception_dump_walker w = { .dump = arg,
5763 .rt = rt,
5764 .flags = flags,
5765 .skip = skip,
5766 .count = 0 };
5767 int err;
5768
5769 rcu_read_lock();
5770 if (rt->nh) {
5771 err = nexthop_for_each_fib6_nh(rt->nh,
5772 rt6_nh_dump_exceptions,
5773 &w);
5774 } else {
5775 err = rt6_nh_dump_exceptions(rt->fib6_nh, &w);
5776 }
5777 rcu_read_unlock();
5778
5779 if (err)
5780 return count += w.count;
5781 }
5782
5783 return -1;
5784 }
5785
inet6_rtm_valid_getroute_req(struct sk_buff * skb,const struct nlmsghdr * nlh,struct nlattr ** tb,struct netlink_ext_ack * extack)5786 static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
5787 const struct nlmsghdr *nlh,
5788 struct nlattr **tb,
5789 struct netlink_ext_ack *extack)
5790 {
5791 struct rtmsg *rtm;
5792 int i, err;
5793
5794 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
5795 NL_SET_ERR_MSG_MOD(extack,
5796 "Invalid header for get route request");
5797 return -EINVAL;
5798 }
5799
5800 if (!netlink_strict_get_check(skb))
5801 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
5802 rtm_ipv6_policy, extack);
5803
5804 rtm = nlmsg_data(nlh);
5805 if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
5806 (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
5807 rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
5808 rtm->rtm_type) {
5809 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
5810 return -EINVAL;
5811 }
5812 if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
5813 NL_SET_ERR_MSG_MOD(extack,
5814 "Invalid flags for get route request");
5815 return -EINVAL;
5816 }
5817
5818 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
5819 rtm_ipv6_policy, extack);
5820 if (err)
5821 return err;
5822
5823 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
5824 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
5825 NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
5826 return -EINVAL;
5827 }
5828
5829 for (i = 0; i <= RTA_MAX; i++) {
5830 if (!tb[i])
5831 continue;
5832
5833 switch (i) {
5834 case RTA_SRC:
5835 case RTA_DST:
5836 case RTA_IIF:
5837 case RTA_OIF:
5838 case RTA_MARK:
5839 case RTA_UID:
5840 case RTA_SPORT:
5841 case RTA_DPORT:
5842 case RTA_IP_PROTO:
5843 break;
5844 default:
5845 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
5846 return -EINVAL;
5847 }
5848 }
5849
5850 return 0;
5851 }
5852
inet6_rtm_getroute(struct sk_buff * in_skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)5853 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5854 struct netlink_ext_ack *extack)
5855 {
5856 struct net *net = sock_net(in_skb->sk);
5857 struct nlattr *tb[RTA_MAX+1];
5858 int err, iif = 0, oif = 0;
5859 struct fib6_info *from;
5860 struct dst_entry *dst;
5861 struct rt6_info *rt;
5862 struct sk_buff *skb;
5863 struct rtmsg *rtm;
5864 struct flowi6 fl6 = {};
5865 bool fibmatch;
5866
5867 err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
5868 if (err < 0)
5869 goto errout;
5870
5871 err = -EINVAL;
5872 rtm = nlmsg_data(nlh);
5873 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
5874 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
5875
5876 if (tb[RTA_SRC]) {
5877 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
5878 goto errout;
5879
5880 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
5881 }
5882
5883 if (tb[RTA_DST]) {
5884 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
5885 goto errout;
5886
5887 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
5888 }
5889
5890 if (tb[RTA_IIF])
5891 iif = nla_get_u32(tb[RTA_IIF]);
5892
5893 if (tb[RTA_OIF])
5894 oif = nla_get_u32(tb[RTA_OIF]);
5895
5896 if (tb[RTA_MARK])
5897 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
5898
5899 if (tb[RTA_UID])
5900 fl6.flowi6_uid = make_kuid(current_user_ns(),
5901 nla_get_u32(tb[RTA_UID]));
5902 else
5903 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
5904
5905 if (tb[RTA_SPORT])
5906 fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);
5907
5908 if (tb[RTA_DPORT])
5909 fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);
5910
5911 if (tb[RTA_IP_PROTO]) {
5912 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
5913 &fl6.flowi6_proto, AF_INET6,
5914 extack);
5915 if (err)
5916 goto errout;
5917 }
5918
5919 if (iif) {
5920 struct net_device *dev;
5921 int flags = 0;
5922
5923 rcu_read_lock();
5924
5925 dev = dev_get_by_index_rcu(net, iif);
5926 if (!dev) {
5927 rcu_read_unlock();
5928 err = -ENODEV;
5929 goto errout;
5930 }
5931
5932 fl6.flowi6_iif = iif;
5933
5934 if (!ipv6_addr_any(&fl6.saddr))
5935 flags |= RT6_LOOKUP_F_HAS_SADDR;
5936
5937 dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
5938
5939 rcu_read_unlock();
5940 } else {
5941 fl6.flowi6_oif = oif;
5942
5943 dst = ip6_route_output(net, NULL, &fl6);
5944 }
5945
5946
5947 rt = container_of(dst, struct rt6_info, dst);
5948 if (rt->dst.error) {
5949 err = rt->dst.error;
5950 ip6_rt_put(rt);
5951 goto errout;
5952 }
5953
5954 if (rt == net->ipv6.ip6_null_entry) {
5955 err = rt->dst.error;
5956 ip6_rt_put(rt);
5957 goto errout;
5958 }
5959
5960 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
5961 if (!skb) {
5962 ip6_rt_put(rt);
5963 err = -ENOBUFS;
5964 goto errout;
5965 }
5966
5967 skb_dst_set(skb, &rt->dst);
5968
5969 rcu_read_lock();
5970 from = rcu_dereference(rt->from);
5971 if (from) {
5972 if (fibmatch)
5973 err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
5974 iif, RTM_NEWROUTE,
5975 NETLINK_CB(in_skb).portid,
5976 nlh->nlmsg_seq, 0);
5977 else
5978 err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
5979 &fl6.saddr, iif, RTM_NEWROUTE,
5980 NETLINK_CB(in_skb).portid,
5981 nlh->nlmsg_seq, 0);
5982 } else {
5983 err = -ENETUNREACH;
5984 }
5985 rcu_read_unlock();
5986
5987 if (err < 0) {
5988 kfree_skb(skb);
5989 goto errout;
5990 }
5991
5992 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
5993 errout:
5994 return err;
5995 }
5996
inet6_rt_notify(int event,struct fib6_info * rt,struct nl_info * info,unsigned int nlm_flags)5997 void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
5998 unsigned int nlm_flags)
5999 {
6000 struct sk_buff *skb;
6001 struct net *net = info->nl_net;
6002 u32 seq;
6003 int err;
6004
6005 err = -ENOBUFS;
6006 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6007
6008 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
6009 if (!skb)
6010 goto errout;
6011
6012 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6013 event, info->portid, seq, nlm_flags);
6014 if (err < 0) {
6015 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6016 WARN_ON(err == -EMSGSIZE);
6017 kfree_skb(skb);
6018 goto errout;
6019 }
6020 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6021 info->nlh, gfp_any());
6022 return;
6023 errout:
6024 if (err < 0)
6025 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6026 }
6027
fib6_rt_update(struct net * net,struct fib6_info * rt,struct nl_info * info)6028 void fib6_rt_update(struct net *net, struct fib6_info *rt,
6029 struct nl_info *info)
6030 {
6031 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6032 struct sk_buff *skb;
6033 int err = -ENOBUFS;
6034
6035 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
6036 if (!skb)
6037 goto errout;
6038
6039 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6040 RTM_NEWROUTE, info->portid, seq, NLM_F_REPLACE);
6041 if (err < 0) {
6042 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6043 WARN_ON(err == -EMSGSIZE);
6044 kfree_skb(skb);
6045 goto errout;
6046 }
6047 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6048 info->nlh, gfp_any());
6049 return;
6050 errout:
6051 if (err < 0)
6052 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6053 }
6054
fib6_info_hw_flags_set(struct net * net,struct fib6_info * f6i,bool offload,bool trap,bool offload_failed)6055 void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i,
6056 bool offload, bool trap, bool offload_failed)
6057 {
6058 struct sk_buff *skb;
6059 int err;
6060
6061 if (f6i->offload == offload && f6i->trap == trap &&
6062 f6i->offload_failed == offload_failed)
6063 return;
6064
6065 f6i->offload = offload;
6066 f6i->trap = trap;
6067
6068 /* 2 means send notifications only if offload_failed was changed. */
6069 if (net->ipv6.sysctl.fib_notify_on_flag_change == 2 &&
6070 f6i->offload_failed == offload_failed)
6071 return;
6072
6073 f6i->offload_failed = offload_failed;
6074
6075 if (!rcu_access_pointer(f6i->fib6_node))
6076 /* The route was removed from the tree, do not send
6077 * notification.
6078 */
6079 return;
6080
6081 if (!net->ipv6.sysctl.fib_notify_on_flag_change)
6082 return;
6083
6084 skb = nlmsg_new(rt6_nlmsg_size(f6i), GFP_KERNEL);
6085 if (!skb) {
6086 err = -ENOBUFS;
6087 goto errout;
6088 }
6089
6090 err = rt6_fill_node(net, skb, f6i, NULL, NULL, NULL, 0, RTM_NEWROUTE, 0,
6091 0, 0);
6092 if (err < 0) {
6093 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6094 WARN_ON(err == -EMSGSIZE);
6095 kfree_skb(skb);
6096 goto errout;
6097 }
6098
6099 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_ROUTE, NULL, GFP_KERNEL);
6100 return;
6101
6102 errout:
6103 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6104 }
6105 EXPORT_SYMBOL(fib6_info_hw_flags_set);
6106
ip6_route_dev_notify(struct notifier_block * this,unsigned long event,void * ptr)6107 static int ip6_route_dev_notify(struct notifier_block *this,
6108 unsigned long event, void *ptr)
6109 {
6110 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6111 struct net *net = dev_net(dev);
6112
6113 if (!(dev->flags & IFF_LOOPBACK))
6114 return NOTIFY_OK;
6115
6116 if (event == NETDEV_REGISTER) {
6117 net->ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = dev;
6118 net->ipv6.ip6_null_entry->dst.dev = dev;
6119 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
6120 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6121 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
6122 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
6123 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
6124 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
6125 #endif
6126 } else if (event == NETDEV_UNREGISTER &&
6127 dev->reg_state != NETREG_UNREGISTERED) {
6128 /* NETDEV_UNREGISTER could be fired for multiple times by
6129 * netdev_wait_allrefs(). Make sure we only call this once.
6130 */
6131 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
6132 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6133 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
6134 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
6135 #endif
6136 }
6137
6138 return NOTIFY_OK;
6139 }
6140
6141 /*
6142 * /proc
6143 */
6144
6145 #ifdef CONFIG_PROC_FS
rt6_stats_seq_show(struct seq_file * seq,void * v)6146 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
6147 {
6148 struct net *net = (struct net *)seq->private;
6149 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
6150 net->ipv6.rt6_stats->fib_nodes,
6151 net->ipv6.rt6_stats->fib_route_nodes,
6152 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
6153 net->ipv6.rt6_stats->fib_rt_entries,
6154 net->ipv6.rt6_stats->fib_rt_cache,
6155 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
6156 net->ipv6.rt6_stats->fib_discarded_routes);
6157
6158 return 0;
6159 }
6160 #endif /* CONFIG_PROC_FS */
6161
6162 #ifdef CONFIG_SYSCTL
6163
ipv6_sysctl_rtcache_flush(struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)6164 static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
6165 void *buffer, size_t *lenp, loff_t *ppos)
6166 {
6167 struct net *net;
6168 int delay;
6169 int ret;
6170 if (!write)
6171 return -EINVAL;
6172
6173 net = (struct net *)ctl->extra1;
6174 delay = net->ipv6.sysctl.flush_delay;
6175 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6176 if (ret)
6177 return ret;
6178
6179 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
6180 return 0;
6181 }
6182
6183 static struct ctl_table ipv6_route_table_template[] = {
6184 {
6185 .procname = "flush",
6186 .data = &init_net.ipv6.sysctl.flush_delay,
6187 .maxlen = sizeof(int),
6188 .mode = 0200,
6189 .proc_handler = ipv6_sysctl_rtcache_flush
6190 },
6191 {
6192 .procname = "gc_thresh",
6193 .data = &ip6_dst_ops_template.gc_thresh,
6194 .maxlen = sizeof(int),
6195 .mode = 0644,
6196 .proc_handler = proc_dointvec,
6197 },
6198 {
6199 .procname = "max_size",
6200 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
6201 .maxlen = sizeof(int),
6202 .mode = 0644,
6203 .proc_handler = proc_dointvec,
6204 },
6205 {
6206 .procname = "gc_min_interval",
6207 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6208 .maxlen = sizeof(int),
6209 .mode = 0644,
6210 .proc_handler = proc_dointvec_jiffies,
6211 },
6212 {
6213 .procname = "gc_timeout",
6214 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
6215 .maxlen = sizeof(int),
6216 .mode = 0644,
6217 .proc_handler = proc_dointvec_jiffies,
6218 },
6219 {
6220 .procname = "gc_interval",
6221 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
6222 .maxlen = sizeof(int),
6223 .mode = 0644,
6224 .proc_handler = proc_dointvec_jiffies,
6225 },
6226 {
6227 .procname = "gc_elasticity",
6228 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
6229 .maxlen = sizeof(int),
6230 .mode = 0644,
6231 .proc_handler = proc_dointvec,
6232 },
6233 {
6234 .procname = "mtu_expires",
6235 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
6236 .maxlen = sizeof(int),
6237 .mode = 0644,
6238 .proc_handler = proc_dointvec_jiffies,
6239 },
6240 {
6241 .procname = "min_adv_mss",
6242 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
6243 .maxlen = sizeof(int),
6244 .mode = 0644,
6245 .proc_handler = proc_dointvec,
6246 },
6247 {
6248 .procname = "gc_min_interval_ms",
6249 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6250 .maxlen = sizeof(int),
6251 .mode = 0644,
6252 .proc_handler = proc_dointvec_ms_jiffies,
6253 },
6254 {
6255 .procname = "skip_notify_on_dev_down",
6256 .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down,
6257 .maxlen = sizeof(int),
6258 .mode = 0644,
6259 .proc_handler = proc_dointvec_minmax,
6260 .extra1 = SYSCTL_ZERO,
6261 .extra2 = SYSCTL_ONE,
6262 },
6263 { }
6264 };
6265
ipv6_route_sysctl_init(struct net * net)6266 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
6267 {
6268 struct ctl_table *table;
6269
6270 table = kmemdup(ipv6_route_table_template,
6271 sizeof(ipv6_route_table_template),
6272 GFP_KERNEL);
6273
6274 if (table) {
6275 table[0].data = &net->ipv6.sysctl.flush_delay;
6276 table[0].extra1 = net;
6277 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
6278 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
6279 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6280 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
6281 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
6282 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
6283 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
6284 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
6285 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6286 table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
6287
6288 /* Don't export sysctls to unprivileged users */
6289 if (net->user_ns != &init_user_ns)
6290 table[0].procname = NULL;
6291 }
6292
6293 return table;
6294 }
6295 #endif
6296
ip6_route_net_init(struct net * net)6297 static int __net_init ip6_route_net_init(struct net *net)
6298 {
6299 int ret = -ENOMEM;
6300
6301 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
6302 sizeof(net->ipv6.ip6_dst_ops));
6303
6304 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
6305 goto out_ip6_dst_ops;
6306
6307 net->ipv6.fib6_null_entry = fib6_info_alloc(GFP_KERNEL, true);
6308 if (!net->ipv6.fib6_null_entry)
6309 goto out_ip6_dst_entries;
6310 memcpy(net->ipv6.fib6_null_entry, &fib6_null_entry_template,
6311 sizeof(*net->ipv6.fib6_null_entry));
6312
6313 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
6314 sizeof(*net->ipv6.ip6_null_entry),
6315 GFP_KERNEL);
6316 if (!net->ipv6.ip6_null_entry)
6317 goto out_fib6_null_entry;
6318 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6319 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
6320 ip6_template_metrics, true);
6321 INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->rt6i_uncached);
6322
6323 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6324 net->ipv6.fib6_has_custom_rules = false;
6325 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
6326 sizeof(*net->ipv6.ip6_prohibit_entry),
6327 GFP_KERNEL);
6328 if (!net->ipv6.ip6_prohibit_entry)
6329 goto out_ip6_null_entry;
6330 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6331 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
6332 ip6_template_metrics, true);
6333 INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->rt6i_uncached);
6334
6335 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
6336 sizeof(*net->ipv6.ip6_blk_hole_entry),
6337 GFP_KERNEL);
6338 if (!net->ipv6.ip6_blk_hole_entry)
6339 goto out_ip6_prohibit_entry;
6340 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6341 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
6342 ip6_template_metrics, true);
6343 INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->rt6i_uncached);
6344 #ifdef CONFIG_IPV6_SUBTREES
6345 net->ipv6.fib6_routes_require_src = 0;
6346 #endif
6347 #endif
6348
6349 net->ipv6.sysctl.flush_delay = 0;
6350 net->ipv6.sysctl.ip6_rt_max_size = 4096;
6351 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
6352 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
6353 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
6354 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
6355 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
6356 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
6357 net->ipv6.sysctl.skip_notify_on_dev_down = 0;
6358
6359 net->ipv6.ip6_rt_gc_expire = 30*HZ;
6360
6361 ret = 0;
6362 out:
6363 return ret;
6364
6365 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6366 out_ip6_prohibit_entry:
6367 kfree(net->ipv6.ip6_prohibit_entry);
6368 out_ip6_null_entry:
6369 kfree(net->ipv6.ip6_null_entry);
6370 #endif
6371 out_fib6_null_entry:
6372 kfree(net->ipv6.fib6_null_entry);
6373 out_ip6_dst_entries:
6374 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6375 out_ip6_dst_ops:
6376 goto out;
6377 }
6378
ip6_route_net_exit(struct net * net)6379 static void __net_exit ip6_route_net_exit(struct net *net)
6380 {
6381 kfree(net->ipv6.fib6_null_entry);
6382 kfree(net->ipv6.ip6_null_entry);
6383 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6384 kfree(net->ipv6.ip6_prohibit_entry);
6385 kfree(net->ipv6.ip6_blk_hole_entry);
6386 #endif
6387 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6388 }
6389
ip6_route_net_init_late(struct net * net)6390 static int __net_init ip6_route_net_init_late(struct net *net)
6391 {
6392 #ifdef CONFIG_PROC_FS
6393 proc_create_net("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops,
6394 sizeof(struct ipv6_route_iter));
6395 proc_create_net_single("rt6_stats", 0444, net->proc_net,
6396 rt6_stats_seq_show, NULL);
6397 #endif
6398 return 0;
6399 }
6400
ip6_route_net_exit_late(struct net * net)6401 static void __net_exit ip6_route_net_exit_late(struct net *net)
6402 {
6403 #ifdef CONFIG_PROC_FS
6404 remove_proc_entry("ipv6_route", net->proc_net);
6405 remove_proc_entry("rt6_stats", net->proc_net);
6406 #endif
6407 }
6408
6409 static struct pernet_operations ip6_route_net_ops = {
6410 .init = ip6_route_net_init,
6411 .exit = ip6_route_net_exit,
6412 };
6413
ipv6_inetpeer_init(struct net * net)6414 static int __net_init ipv6_inetpeer_init(struct net *net)
6415 {
6416 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
6417
6418 if (!bp)
6419 return -ENOMEM;
6420 inet_peer_base_init(bp);
6421 net->ipv6.peers = bp;
6422 return 0;
6423 }
6424
ipv6_inetpeer_exit(struct net * net)6425 static void __net_exit ipv6_inetpeer_exit(struct net *net)
6426 {
6427 struct inet_peer_base *bp = net->ipv6.peers;
6428
6429 net->ipv6.peers = NULL;
6430 inetpeer_invalidate_tree(bp);
6431 kfree(bp);
6432 }
6433
6434 static struct pernet_operations ipv6_inetpeer_ops = {
6435 .init = ipv6_inetpeer_init,
6436 .exit = ipv6_inetpeer_exit,
6437 };
6438
6439 static struct pernet_operations ip6_route_net_late_ops = {
6440 .init = ip6_route_net_init_late,
6441 .exit = ip6_route_net_exit_late,
6442 };
6443
6444 static struct notifier_block ip6_route_dev_notifier = {
6445 .notifier_call = ip6_route_dev_notify,
6446 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
6447 };
6448
ip6_route_init_special_entries(void)6449 void __init ip6_route_init_special_entries(void)
6450 {
6451 /* Registering of the loopback is done before this portion of code,
6452 * the loopback reference in rt6_info will not be taken, do it
6453 * manually for init_net */
6454 init_net.ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = init_net.loopback_dev;
6455 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
6456 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6457 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6458 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
6459 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6460 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
6461 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6462 #endif
6463 }
6464
6465 #if IS_BUILTIN(CONFIG_IPV6)
6466 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6467 DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *rt)
6468
6469 BTF_ID_LIST(btf_fib6_info_id)
6470 BTF_ID(struct, fib6_info)
6471
6472 static const struct bpf_iter_seq_info ipv6_route_seq_info = {
6473 .seq_ops = &ipv6_route_seq_ops,
6474 .init_seq_private = bpf_iter_init_seq_net,
6475 .fini_seq_private = bpf_iter_fini_seq_net,
6476 .seq_priv_size = sizeof(struct ipv6_route_iter),
6477 };
6478
6479 static struct bpf_iter_reg ipv6_route_reg_info = {
6480 .target = "ipv6_route",
6481 .ctx_arg_info_size = 1,
6482 .ctx_arg_info = {
6483 { offsetof(struct bpf_iter__ipv6_route, rt),
6484 PTR_TO_BTF_ID_OR_NULL },
6485 },
6486 .seq_info = &ipv6_route_seq_info,
6487 };
6488
bpf_iter_register(void)6489 static int __init bpf_iter_register(void)
6490 {
6491 ipv6_route_reg_info.ctx_arg_info[0].btf_id = *btf_fib6_info_id;
6492 return bpf_iter_reg_target(&ipv6_route_reg_info);
6493 }
6494
bpf_iter_unregister(void)6495 static void bpf_iter_unregister(void)
6496 {
6497 bpf_iter_unreg_target(&ipv6_route_reg_info);
6498 }
6499 #endif
6500 #endif
6501
ip6_route_init(void)6502 int __init ip6_route_init(void)
6503 {
6504 int ret;
6505 int cpu;
6506
6507 ret = -ENOMEM;
6508 ip6_dst_ops_template.kmem_cachep =
6509 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
6510 SLAB_HWCACHE_ALIGN, NULL);
6511 if (!ip6_dst_ops_template.kmem_cachep)
6512 goto out;
6513
6514 ret = dst_entries_init(&ip6_dst_blackhole_ops);
6515 if (ret)
6516 goto out_kmem_cache;
6517
6518 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
6519 if (ret)
6520 goto out_dst_entries;
6521
6522 ret = register_pernet_subsys(&ip6_route_net_ops);
6523 if (ret)
6524 goto out_register_inetpeer;
6525
6526 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
6527
6528 ret = fib6_init();
6529 if (ret)
6530 goto out_register_subsys;
6531
6532 ret = xfrm6_init();
6533 if (ret)
6534 goto out_fib6_init;
6535
6536 ret = fib6_rules_init();
6537 if (ret)
6538 goto xfrm6_init;
6539
6540 ret = register_pernet_subsys(&ip6_route_net_late_ops);
6541 if (ret)
6542 goto fib6_rules_init;
6543
6544 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
6545 inet6_rtm_newroute, NULL, 0);
6546 if (ret < 0)
6547 goto out_register_late_subsys;
6548
6549 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
6550 inet6_rtm_delroute, NULL, 0);
6551 if (ret < 0)
6552 goto out_register_late_subsys;
6553
6554 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
6555 inet6_rtm_getroute, NULL,
6556 RTNL_FLAG_DOIT_UNLOCKED);
6557 if (ret < 0)
6558 goto out_register_late_subsys;
6559
6560 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
6561 if (ret)
6562 goto out_register_late_subsys;
6563
6564 #if IS_BUILTIN(CONFIG_IPV6)
6565 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6566 ret = bpf_iter_register();
6567 if (ret)
6568 goto out_register_late_subsys;
6569 #endif
6570 #endif
6571
6572 for_each_possible_cpu(cpu) {
6573 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
6574
6575 INIT_LIST_HEAD(&ul->head);
6576 spin_lock_init(&ul->lock);
6577 }
6578
6579 out:
6580 return ret;
6581
6582 out_register_late_subsys:
6583 rtnl_unregister_all(PF_INET6);
6584 unregister_pernet_subsys(&ip6_route_net_late_ops);
6585 fib6_rules_init:
6586 fib6_rules_cleanup();
6587 xfrm6_init:
6588 xfrm6_fini();
6589 out_fib6_init:
6590 fib6_gc_cleanup();
6591 out_register_subsys:
6592 unregister_pernet_subsys(&ip6_route_net_ops);
6593 out_register_inetpeer:
6594 unregister_pernet_subsys(&ipv6_inetpeer_ops);
6595 out_dst_entries:
6596 dst_entries_destroy(&ip6_dst_blackhole_ops);
6597 out_kmem_cache:
6598 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6599 goto out;
6600 }
6601
ip6_route_cleanup(void)6602 void ip6_route_cleanup(void)
6603 {
6604 #if IS_BUILTIN(CONFIG_IPV6)
6605 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6606 bpf_iter_unregister();
6607 #endif
6608 #endif
6609 unregister_netdevice_notifier(&ip6_route_dev_notifier);
6610 unregister_pernet_subsys(&ip6_route_net_late_ops);
6611 fib6_rules_cleanup();
6612 xfrm6_fini();
6613 fib6_gc_cleanup();
6614 unregister_pernet_subsys(&ipv6_inetpeer_ops);
6615 unregister_pernet_subsys(&ip6_route_net_ops);
6616 dst_entries_destroy(&ip6_dst_blackhole_ops);
6617 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6618 }
6619