1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * GRE over IPv6 protocol decoder.
4 *
5 * Authors: Dmitry Kozlov (xeb@mail.ru)
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/uaccess.h>
16 #include <linux/skbuff.h>
17 #include <linux/netdevice.h>
18 #include <linux/in.h>
19 #include <linux/tcp.h>
20 #include <linux/udp.h>
21 #include <linux/if_arp.h>
22 #include <linux/init.h>
23 #include <linux/in6.h>
24 #include <linux/inetdevice.h>
25 #include <linux/igmp.h>
26 #include <linux/netfilter_ipv4.h>
27 #include <linux/etherdevice.h>
28 #include <linux/if_ether.h>
29 #include <linux/hash.h>
30 #include <linux/if_tunnel.h>
31 #include <linux/ip6_tunnel.h>
32
33 #include <net/sock.h>
34 #include <net/ip.h>
35 #include <net/ip_tunnels.h>
36 #include <net/icmp.h>
37 #include <net/protocol.h>
38 #include <net/addrconf.h>
39 #include <net/arp.h>
40 #include <net/checksum.h>
41 #include <net/dsfield.h>
42 #include <net/inet_ecn.h>
43 #include <net/xfrm.h>
44 #include <net/net_namespace.h>
45 #include <net/netns/generic.h>
46 #include <net/rtnetlink.h>
47
48 #include <net/ipv6.h>
49 #include <net/ip6_fib.h>
50 #include <net/ip6_route.h>
51 #include <net/ip6_tunnel.h>
52 #include <net/gre.h>
53 #include <net/erspan.h>
54 #include <net/dst_metadata.h>
55
56
57 static bool log_ecn_error = true;
58 module_param(log_ecn_error, bool, 0644);
59 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
60
61 #define IP6_GRE_HASH_SIZE_SHIFT 5
62 #define IP6_GRE_HASH_SIZE (1 << IP6_GRE_HASH_SIZE_SHIFT)
63
64 static unsigned int ip6gre_net_id __read_mostly;
65 struct ip6gre_net {
66 struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE];
67
68 struct ip6_tnl __rcu *collect_md_tun;
69 struct ip6_tnl __rcu *collect_md_tun_erspan;
70 struct net_device *fb_tunnel_dev;
71 };
72
73 static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
74 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
75 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly;
76 static int ip6gre_tunnel_init(struct net_device *dev);
77 static void ip6gre_tunnel_setup(struct net_device *dev);
78 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
79 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
80 static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu);
81
82 /* Tunnel hash table */
83
84 /*
85 4 hash tables:
86
87 3: (remote,local)
88 2: (remote,*)
89 1: (*,local)
90 0: (*,*)
91
92 We require exact key match i.e. if a key is present in packet
93 it will match only tunnel with the same key; if it is not present,
94 it will match only keyless tunnel.
95
96 All keysless packets, if not matched configured keyless tunnels
97 will match fallback tunnel.
98 */
99
100 #define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(IP6_GRE_HASH_SIZE - 1))
HASH_ADDR(const struct in6_addr * addr)101 static u32 HASH_ADDR(const struct in6_addr *addr)
102 {
103 u32 hash = ipv6_addr_hash(addr);
104
105 return hash_32(hash, IP6_GRE_HASH_SIZE_SHIFT);
106 }
107
108 #define tunnels_r_l tunnels[3]
109 #define tunnels_r tunnels[2]
110 #define tunnels_l tunnels[1]
111 #define tunnels_wc tunnels[0]
112
113 /* Given src, dst and key, find appropriate for input tunnel. */
114
ip6gre_tunnel_lookup(struct net_device * dev,const struct in6_addr * remote,const struct in6_addr * local,__be32 key,__be16 gre_proto)115 static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
116 const struct in6_addr *remote, const struct in6_addr *local,
117 __be32 key, __be16 gre_proto)
118 {
119 struct net *net = dev_net(dev);
120 int link = dev->ifindex;
121 unsigned int h0 = HASH_ADDR(remote);
122 unsigned int h1 = HASH_KEY(key);
123 struct ip6_tnl *t, *cand = NULL;
124 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
125 int dev_type = (gre_proto == htons(ETH_P_TEB) ||
126 gre_proto == htons(ETH_P_ERSPAN) ||
127 gre_proto == htons(ETH_P_ERSPAN2)) ?
128 ARPHRD_ETHER : ARPHRD_IP6GRE;
129 int score, cand_score = 4;
130 struct net_device *ndev;
131
132 for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
133 if (!ipv6_addr_equal(local, &t->parms.laddr) ||
134 !ipv6_addr_equal(remote, &t->parms.raddr) ||
135 key != t->parms.i_key ||
136 !(t->dev->flags & IFF_UP))
137 continue;
138
139 if (t->dev->type != ARPHRD_IP6GRE &&
140 t->dev->type != dev_type)
141 continue;
142
143 score = 0;
144 if (t->parms.link != link)
145 score |= 1;
146 if (t->dev->type != dev_type)
147 score |= 2;
148 if (score == 0)
149 return t;
150
151 if (score < cand_score) {
152 cand = t;
153 cand_score = score;
154 }
155 }
156
157 for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
158 if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
159 key != t->parms.i_key ||
160 !(t->dev->flags & IFF_UP))
161 continue;
162
163 if (t->dev->type != ARPHRD_IP6GRE &&
164 t->dev->type != dev_type)
165 continue;
166
167 score = 0;
168 if (t->parms.link != link)
169 score |= 1;
170 if (t->dev->type != dev_type)
171 score |= 2;
172 if (score == 0)
173 return t;
174
175 if (score < cand_score) {
176 cand = t;
177 cand_score = score;
178 }
179 }
180
181 for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
182 if ((!ipv6_addr_equal(local, &t->parms.laddr) &&
183 (!ipv6_addr_equal(local, &t->parms.raddr) ||
184 !ipv6_addr_is_multicast(local))) ||
185 key != t->parms.i_key ||
186 !(t->dev->flags & IFF_UP))
187 continue;
188
189 if (t->dev->type != ARPHRD_IP6GRE &&
190 t->dev->type != dev_type)
191 continue;
192
193 score = 0;
194 if (t->parms.link != link)
195 score |= 1;
196 if (t->dev->type != dev_type)
197 score |= 2;
198 if (score == 0)
199 return t;
200
201 if (score < cand_score) {
202 cand = t;
203 cand_score = score;
204 }
205 }
206
207 for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
208 if (t->parms.i_key != key ||
209 !(t->dev->flags & IFF_UP))
210 continue;
211
212 if (t->dev->type != ARPHRD_IP6GRE &&
213 t->dev->type != dev_type)
214 continue;
215
216 score = 0;
217 if (t->parms.link != link)
218 score |= 1;
219 if (t->dev->type != dev_type)
220 score |= 2;
221 if (score == 0)
222 return t;
223
224 if (score < cand_score) {
225 cand = t;
226 cand_score = score;
227 }
228 }
229
230 if (cand)
231 return cand;
232
233 if (gre_proto == htons(ETH_P_ERSPAN) ||
234 gre_proto == htons(ETH_P_ERSPAN2))
235 t = rcu_dereference(ign->collect_md_tun_erspan);
236 else
237 t = rcu_dereference(ign->collect_md_tun);
238
239 if (t && t->dev->flags & IFF_UP)
240 return t;
241
242 ndev = READ_ONCE(ign->fb_tunnel_dev);
243 if (ndev && ndev->flags & IFF_UP)
244 return netdev_priv(ndev);
245
246 return NULL;
247 }
248
__ip6gre_bucket(struct ip6gre_net * ign,const struct __ip6_tnl_parm * p)249 static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
250 const struct __ip6_tnl_parm *p)
251 {
252 const struct in6_addr *remote = &p->raddr;
253 const struct in6_addr *local = &p->laddr;
254 unsigned int h = HASH_KEY(p->i_key);
255 int prio = 0;
256
257 if (!ipv6_addr_any(local))
258 prio |= 1;
259 if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) {
260 prio |= 2;
261 h ^= HASH_ADDR(remote);
262 }
263
264 return &ign->tunnels[prio][h];
265 }
266
ip6gre_tunnel_link_md(struct ip6gre_net * ign,struct ip6_tnl * t)267 static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
268 {
269 if (t->parms.collect_md)
270 rcu_assign_pointer(ign->collect_md_tun, t);
271 }
272
ip6erspan_tunnel_link_md(struct ip6gre_net * ign,struct ip6_tnl * t)273 static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
274 {
275 if (t->parms.collect_md)
276 rcu_assign_pointer(ign->collect_md_tun_erspan, t);
277 }
278
ip6gre_tunnel_unlink_md(struct ip6gre_net * ign,struct ip6_tnl * t)279 static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t)
280 {
281 if (t->parms.collect_md)
282 rcu_assign_pointer(ign->collect_md_tun, NULL);
283 }
284
ip6erspan_tunnel_unlink_md(struct ip6gre_net * ign,struct ip6_tnl * t)285 static void ip6erspan_tunnel_unlink_md(struct ip6gre_net *ign,
286 struct ip6_tnl *t)
287 {
288 if (t->parms.collect_md)
289 rcu_assign_pointer(ign->collect_md_tun_erspan, NULL);
290 }
291
ip6gre_bucket(struct ip6gre_net * ign,const struct ip6_tnl * t)292 static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
293 const struct ip6_tnl *t)
294 {
295 return __ip6gre_bucket(ign, &t->parms);
296 }
297
ip6gre_tunnel_link(struct ip6gre_net * ign,struct ip6_tnl * t)298 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
299 {
300 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
301
302 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
303 rcu_assign_pointer(*tp, t);
304 }
305
ip6gre_tunnel_unlink(struct ip6gre_net * ign,struct ip6_tnl * t)306 static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
307 {
308 struct ip6_tnl __rcu **tp;
309 struct ip6_tnl *iter;
310
311 for (tp = ip6gre_bucket(ign, t);
312 (iter = rtnl_dereference(*tp)) != NULL;
313 tp = &iter->next) {
314 if (t == iter) {
315 rcu_assign_pointer(*tp, t->next);
316 break;
317 }
318 }
319 }
320
ip6gre_tunnel_find(struct net * net,const struct __ip6_tnl_parm * parms,int type)321 static struct ip6_tnl *ip6gre_tunnel_find(struct net *net,
322 const struct __ip6_tnl_parm *parms,
323 int type)
324 {
325 const struct in6_addr *remote = &parms->raddr;
326 const struct in6_addr *local = &parms->laddr;
327 __be32 key = parms->i_key;
328 int link = parms->link;
329 struct ip6_tnl *t;
330 struct ip6_tnl __rcu **tp;
331 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
332
333 for (tp = __ip6gre_bucket(ign, parms);
334 (t = rtnl_dereference(*tp)) != NULL;
335 tp = &t->next)
336 if (ipv6_addr_equal(local, &t->parms.laddr) &&
337 ipv6_addr_equal(remote, &t->parms.raddr) &&
338 key == t->parms.i_key &&
339 link == t->parms.link &&
340 type == t->dev->type)
341 break;
342
343 return t;
344 }
345
ip6gre_tunnel_locate(struct net * net,const struct __ip6_tnl_parm * parms,int create)346 static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
347 const struct __ip6_tnl_parm *parms, int create)
348 {
349 struct ip6_tnl *t, *nt;
350 struct net_device *dev;
351 char name[IFNAMSIZ];
352 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
353
354 t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE);
355 if (t && create)
356 return NULL;
357 if (t || !create)
358 return t;
359
360 if (parms->name[0]) {
361 if (!dev_valid_name(parms->name))
362 return NULL;
363 strlcpy(name, parms->name, IFNAMSIZ);
364 } else {
365 strcpy(name, "ip6gre%d");
366 }
367 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
368 ip6gre_tunnel_setup);
369 if (!dev)
370 return NULL;
371
372 dev_net_set(dev, net);
373
374 nt = netdev_priv(dev);
375 nt->parms = *parms;
376 dev->rtnl_link_ops = &ip6gre_link_ops;
377
378 nt->dev = dev;
379 nt->net = dev_net(dev);
380
381 if (register_netdevice(dev) < 0)
382 goto failed_free;
383
384 ip6gre_tnl_link_config(nt, 1);
385
386 /* Can use a lockless transmit, unless we generate output sequences */
387 if (!(nt->parms.o_flags & TUNNEL_SEQ))
388 dev->features |= NETIF_F_LLTX;
389
390 ip6gre_tunnel_link(ign, nt);
391 return nt;
392
393 failed_free:
394 free_netdev(dev);
395 return NULL;
396 }
397
ip6erspan_tunnel_uninit(struct net_device * dev)398 static void ip6erspan_tunnel_uninit(struct net_device *dev)
399 {
400 struct ip6_tnl *t = netdev_priv(dev);
401 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
402
403 ip6erspan_tunnel_unlink_md(ign, t);
404 ip6gre_tunnel_unlink(ign, t);
405 dst_cache_reset(&t->dst_cache);
406 dev_put(dev);
407 }
408
ip6gre_tunnel_uninit(struct net_device * dev)409 static void ip6gre_tunnel_uninit(struct net_device *dev)
410 {
411 struct ip6_tnl *t = netdev_priv(dev);
412 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
413
414 ip6gre_tunnel_unlink_md(ign, t);
415 ip6gre_tunnel_unlink(ign, t);
416 if (ign->fb_tunnel_dev == dev)
417 WRITE_ONCE(ign->fb_tunnel_dev, NULL);
418 dst_cache_reset(&t->dst_cache);
419 dev_put(dev);
420 }
421
422
ip6gre_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)423 static int ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
424 u8 type, u8 code, int offset, __be32 info)
425 {
426 struct net *net = dev_net(skb->dev);
427 const struct ipv6hdr *ipv6h;
428 struct tnl_ptk_info tpi;
429 struct ip6_tnl *t;
430
431 if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IPV6),
432 offset) < 0)
433 return -EINVAL;
434
435 ipv6h = (const struct ipv6hdr *)skb->data;
436 t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
437 tpi.key, tpi.proto);
438 if (!t)
439 return -ENOENT;
440
441 switch (type) {
442 case ICMPV6_DEST_UNREACH:
443 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
444 t->parms.name);
445 if (code != ICMPV6_PORT_UNREACH)
446 break;
447 return 0;
448 case ICMPV6_TIME_EXCEED:
449 if (code == ICMPV6_EXC_HOPLIMIT) {
450 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
451 t->parms.name);
452 break;
453 }
454 return 0;
455 case ICMPV6_PARAMPROB: {
456 struct ipv6_tlv_tnl_enc_lim *tel;
457 __u32 teli;
458
459 teli = 0;
460 if (code == ICMPV6_HDR_FIELD)
461 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
462
463 if (teli && teli == be32_to_cpu(info) - 2) {
464 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
465 if (tel->encap_limit == 0) {
466 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
467 t->parms.name);
468 }
469 } else {
470 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
471 t->parms.name);
472 }
473 return 0;
474 }
475 case ICMPV6_PKT_TOOBIG:
476 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
477 return 0;
478 case NDISC_REDIRECT:
479 ip6_redirect(skb, net, skb->dev->ifindex, 0,
480 sock_net_uid(net, NULL));
481 return 0;
482 }
483
484 if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
485 t->err_count++;
486 else
487 t->err_count = 1;
488 t->err_time = jiffies;
489
490 return 0;
491 }
492
ip6gre_rcv(struct sk_buff * skb,const struct tnl_ptk_info * tpi)493 static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
494 {
495 const struct ipv6hdr *ipv6h;
496 struct ip6_tnl *tunnel;
497
498 ipv6h = ipv6_hdr(skb);
499 tunnel = ip6gre_tunnel_lookup(skb->dev,
500 &ipv6h->saddr, &ipv6h->daddr, tpi->key,
501 tpi->proto);
502 if (tunnel) {
503 if (tunnel->parms.collect_md) {
504 struct metadata_dst *tun_dst;
505 __be64 tun_id;
506 __be16 flags;
507
508 flags = tpi->flags;
509 tun_id = key32_to_tunnel_id(tpi->key);
510
511 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 0);
512 if (!tun_dst)
513 return PACKET_REJECT;
514
515 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
516 } else {
517 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
518 }
519
520 return PACKET_RCVD;
521 }
522
523 return PACKET_REJECT;
524 }
525
ip6erspan_rcv(struct sk_buff * skb,struct tnl_ptk_info * tpi,int gre_hdr_len)526 static int ip6erspan_rcv(struct sk_buff *skb,
527 struct tnl_ptk_info *tpi,
528 int gre_hdr_len)
529 {
530 struct erspan_base_hdr *ershdr;
531 const struct ipv6hdr *ipv6h;
532 struct erspan_md2 *md2;
533 struct ip6_tnl *tunnel;
534 u8 ver;
535
536 ipv6h = ipv6_hdr(skb);
537 ershdr = (struct erspan_base_hdr *)skb->data;
538 ver = ershdr->ver;
539
540 tunnel = ip6gre_tunnel_lookup(skb->dev,
541 &ipv6h->saddr, &ipv6h->daddr, tpi->key,
542 tpi->proto);
543 if (tunnel) {
544 int len = erspan_hdr_len(ver);
545
546 if (unlikely(!pskb_may_pull(skb, len)))
547 return PACKET_REJECT;
548
549 if (__iptunnel_pull_header(skb, len,
550 htons(ETH_P_TEB),
551 false, false) < 0)
552 return PACKET_REJECT;
553
554 if (tunnel->parms.collect_md) {
555 struct erspan_metadata *pkt_md, *md;
556 struct metadata_dst *tun_dst;
557 struct ip_tunnel_info *info;
558 unsigned char *gh;
559 __be64 tun_id;
560 __be16 flags;
561
562 tpi->flags |= TUNNEL_KEY;
563 flags = tpi->flags;
564 tun_id = key32_to_tunnel_id(tpi->key);
565
566 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id,
567 sizeof(*md));
568 if (!tun_dst)
569 return PACKET_REJECT;
570
571 /* skb can be uncloned in __iptunnel_pull_header, so
572 * old pkt_md is no longer valid and we need to reset
573 * it
574 */
575 gh = skb_network_header(skb) +
576 skb_network_header_len(skb);
577 pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
578 sizeof(*ershdr));
579 info = &tun_dst->u.tun_info;
580 md = ip_tunnel_info_opts(info);
581 md->version = ver;
582 md2 = &md->u.md2;
583 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
584 ERSPAN_V2_MDSIZE);
585 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
586 info->options_len = sizeof(*md);
587
588 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
589
590 } else {
591 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
592 }
593
594 return PACKET_RCVD;
595 }
596
597 return PACKET_REJECT;
598 }
599
gre_rcv(struct sk_buff * skb)600 static int gre_rcv(struct sk_buff *skb)
601 {
602 struct tnl_ptk_info tpi;
603 bool csum_err = false;
604 int hdr_len;
605
606 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6), 0);
607 if (hdr_len < 0)
608 goto drop;
609
610 if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false))
611 goto drop;
612
613 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
614 tpi.proto == htons(ETH_P_ERSPAN2))) {
615 if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
616 return 0;
617 goto out;
618 }
619
620 if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD)
621 return 0;
622
623 out:
624 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
625 drop:
626 kfree_skb(skb);
627 return 0;
628 }
629
gre_handle_offloads(struct sk_buff * skb,bool csum)630 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
631 {
632 return iptunnel_handle_offloads(skb,
633 csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
634 }
635
prepare_ip6gre_xmit_ipv4(struct sk_buff * skb,struct net_device * dev,struct flowi6 * fl6,__u8 * dsfield,int * encap_limit)636 static void prepare_ip6gre_xmit_ipv4(struct sk_buff *skb,
637 struct net_device *dev,
638 struct flowi6 *fl6, __u8 *dsfield,
639 int *encap_limit)
640 {
641 const struct iphdr *iph = ip_hdr(skb);
642 struct ip6_tnl *t = netdev_priv(dev);
643
644 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
645 *encap_limit = t->parms.encap_limit;
646
647 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
648
649 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
650 *dsfield = ipv4_get_dsfield(iph);
651 else
652 *dsfield = ip6_tclass(t->parms.flowinfo);
653
654 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
655 fl6->flowi6_mark = skb->mark;
656 else
657 fl6->flowi6_mark = t->parms.fwmark;
658
659 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
660 }
661
prepare_ip6gre_xmit_ipv6(struct sk_buff * skb,struct net_device * dev,struct flowi6 * fl6,__u8 * dsfield,int * encap_limit)662 static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
663 struct net_device *dev,
664 struct flowi6 *fl6, __u8 *dsfield,
665 int *encap_limit)
666 {
667 struct ipv6hdr *ipv6h;
668 struct ip6_tnl *t = netdev_priv(dev);
669 __u16 offset;
670
671 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
672 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
673 ipv6h = ipv6_hdr(skb);
674
675 if (offset > 0) {
676 struct ipv6_tlv_tnl_enc_lim *tel;
677
678 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
679 if (tel->encap_limit == 0) {
680 icmpv6_ndo_send(skb, ICMPV6_PARAMPROB,
681 ICMPV6_HDR_FIELD, offset + 2);
682 return -1;
683 }
684 *encap_limit = tel->encap_limit - 1;
685 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
686 *encap_limit = t->parms.encap_limit;
687 }
688
689 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
690
691 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
692 *dsfield = ipv6_get_dsfield(ipv6h);
693 else
694 *dsfield = ip6_tclass(t->parms.flowinfo);
695
696 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
697 fl6->flowlabel |= ip6_flowlabel(ipv6h);
698
699 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
700 fl6->flowi6_mark = skb->mark;
701 else
702 fl6->flowi6_mark = t->parms.fwmark;
703
704 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
705
706 return 0;
707 }
708
skb_tunnel_info_txcheck(struct sk_buff * skb)709 static struct ip_tunnel_info *skb_tunnel_info_txcheck(struct sk_buff *skb)
710 {
711 struct ip_tunnel_info *tun_info;
712
713 tun_info = skb_tunnel_info(skb);
714 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX)))
715 return ERR_PTR(-EINVAL);
716
717 return tun_info;
718 }
719
__gre6_xmit(struct sk_buff * skb,struct net_device * dev,__u8 dsfield,struct flowi6 * fl6,int encap_limit,__u32 * pmtu,__be16 proto)720 static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
721 struct net_device *dev, __u8 dsfield,
722 struct flowi6 *fl6, int encap_limit,
723 __u32 *pmtu, __be16 proto)
724 {
725 struct ip6_tnl *tunnel = netdev_priv(dev);
726 __be16 protocol;
727
728 if (dev->type == ARPHRD_ETHER)
729 IPCB(skb)->flags = 0;
730
731 if (dev->header_ops && dev->type == ARPHRD_IP6GRE)
732 fl6->daddr = ((struct ipv6hdr *)skb->data)->daddr;
733 else
734 fl6->daddr = tunnel->parms.raddr;
735
736 if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
737 return -ENOMEM;
738
739 /* Push GRE header. */
740 protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
741
742 if (tunnel->parms.collect_md) {
743 struct ip_tunnel_info *tun_info;
744 const struct ip_tunnel_key *key;
745 __be16 flags;
746
747 tun_info = skb_tunnel_info_txcheck(skb);
748 if (IS_ERR(tun_info) ||
749 unlikely(ip_tunnel_info_af(tun_info) != AF_INET6))
750 return -EINVAL;
751
752 key = &tun_info->key;
753 memset(fl6, 0, sizeof(*fl6));
754 fl6->flowi6_proto = IPPROTO_GRE;
755 fl6->daddr = key->u.ipv6.dst;
756 fl6->flowlabel = key->label;
757 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
758
759 dsfield = key->tos;
760 flags = key->tun_flags &
761 (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
762 tunnel->tun_hlen = gre_calc_hlen(flags);
763
764 gre_build_header(skb, tunnel->tun_hlen,
765 flags, protocol,
766 tunnel_id_to_key32(tun_info->key.tun_id),
767 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++)
768 : 0);
769
770 } else {
771 if (tunnel->parms.o_flags & TUNNEL_SEQ)
772 tunnel->o_seqno++;
773
774 gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
775 protocol, tunnel->parms.o_key,
776 htonl(tunnel->o_seqno));
777 }
778
779 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
780 NEXTHDR_GRE);
781 }
782
ip6gre_xmit_ipv4(struct sk_buff * skb,struct net_device * dev)783 static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
784 {
785 struct ip6_tnl *t = netdev_priv(dev);
786 int encap_limit = -1;
787 struct flowi6 fl6;
788 __u8 dsfield = 0;
789 __u32 mtu;
790 int err;
791
792 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
793
794 if (!t->parms.collect_md)
795 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
796 &dsfield, &encap_limit);
797
798 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
799 if (err)
800 return -1;
801
802 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
803 skb->protocol);
804 if (err != 0) {
805 /* XXX: send ICMP error even if DF is not set. */
806 if (err == -EMSGSIZE)
807 icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
808 htonl(mtu));
809 return -1;
810 }
811
812 return 0;
813 }
814
ip6gre_xmit_ipv6(struct sk_buff * skb,struct net_device * dev)815 static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
816 {
817 struct ip6_tnl *t = netdev_priv(dev);
818 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
819 int encap_limit = -1;
820 struct flowi6 fl6;
821 __u8 dsfield = 0;
822 __u32 mtu;
823 int err;
824
825 if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
826 return -1;
827
828 if (!t->parms.collect_md &&
829 prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, &dsfield, &encap_limit))
830 return -1;
831
832 if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)))
833 return -1;
834
835 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit,
836 &mtu, skb->protocol);
837 if (err != 0) {
838 if (err == -EMSGSIZE)
839 icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
840 return -1;
841 }
842
843 return 0;
844 }
845
846 /**
847 * ip6gre_tnl_addr_conflict - compare packet addresses to tunnel's own
848 * @t: the outgoing tunnel device
849 * @hdr: IPv6 header from the incoming packet
850 *
851 * Description:
852 * Avoid trivial tunneling loop by checking that tunnel exit-point
853 * doesn't match source of incoming packet.
854 *
855 * Return:
856 * 1 if conflict,
857 * 0 else
858 **/
859
ip6gre_tnl_addr_conflict(const struct ip6_tnl * t,const struct ipv6hdr * hdr)860 static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl *t,
861 const struct ipv6hdr *hdr)
862 {
863 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
864 }
865
ip6gre_xmit_other(struct sk_buff * skb,struct net_device * dev)866 static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
867 {
868 struct ip6_tnl *t = netdev_priv(dev);
869 int encap_limit = -1;
870 struct flowi6 fl6;
871 __u32 mtu;
872 int err;
873
874 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
875 encap_limit = t->parms.encap_limit;
876
877 if (!t->parms.collect_md)
878 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
879
880 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
881 if (err)
882 return err;
883
884 err = __gre6_xmit(skb, dev, 0, &fl6, encap_limit, &mtu, skb->protocol);
885
886 return err;
887 }
888
ip6gre_tunnel_xmit(struct sk_buff * skb,struct net_device * dev)889 static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
890 struct net_device *dev)
891 {
892 struct ip6_tnl *t = netdev_priv(dev);
893 struct net_device_stats *stats = &t->dev->stats;
894 int ret;
895
896 if (!pskb_inet_may_pull(skb))
897 goto tx_err;
898
899 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
900 goto tx_err;
901
902 switch (skb->protocol) {
903 case htons(ETH_P_IP):
904 ret = ip6gre_xmit_ipv4(skb, dev);
905 break;
906 case htons(ETH_P_IPV6):
907 ret = ip6gre_xmit_ipv6(skb, dev);
908 break;
909 default:
910 ret = ip6gre_xmit_other(skb, dev);
911 break;
912 }
913
914 if (ret < 0)
915 goto tx_err;
916
917 return NETDEV_TX_OK;
918
919 tx_err:
920 if (!t->parms.collect_md || !IS_ERR(skb_tunnel_info_txcheck(skb)))
921 stats->tx_errors++;
922 stats->tx_dropped++;
923 kfree_skb(skb);
924 return NETDEV_TX_OK;
925 }
926
ip6erspan_tunnel_xmit(struct sk_buff * skb,struct net_device * dev)927 static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
928 struct net_device *dev)
929 {
930 struct ip_tunnel_info *tun_info = NULL;
931 struct ip6_tnl *t = netdev_priv(dev);
932 struct dst_entry *dst = skb_dst(skb);
933 struct net_device_stats *stats;
934 bool truncate = false;
935 int encap_limit = -1;
936 __u8 dsfield = false;
937 struct flowi6 fl6;
938 int err = -EINVAL;
939 __be16 proto;
940 __u32 mtu;
941 int nhoff;
942 int thoff;
943
944 if (!pskb_inet_may_pull(skb))
945 goto tx_err;
946
947 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
948 goto tx_err;
949
950 if (gre_handle_offloads(skb, false))
951 goto tx_err;
952
953 if (skb->len > dev->mtu + dev->hard_header_len) {
954 pskb_trim(skb, dev->mtu + dev->hard_header_len);
955 truncate = true;
956 }
957
958 nhoff = skb_network_header(skb) - skb_mac_header(skb);
959 if (skb->protocol == htons(ETH_P_IP) &&
960 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
961 truncate = true;
962
963 thoff = skb_transport_header(skb) - skb_mac_header(skb);
964 if (skb->protocol == htons(ETH_P_IPV6) &&
965 (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
966 truncate = true;
967
968 if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen))
969 goto tx_err;
970
971 t->parms.o_flags &= ~TUNNEL_KEY;
972 IPCB(skb)->flags = 0;
973
974 /* For collect_md mode, derive fl6 from the tunnel key,
975 * for native mode, call prepare_ip6gre_xmit_{ipv4,ipv6}.
976 */
977 if (t->parms.collect_md) {
978 const struct ip_tunnel_key *key;
979 struct erspan_metadata *md;
980 __be32 tun_id;
981
982 tun_info = skb_tunnel_info_txcheck(skb);
983 if (IS_ERR(tun_info) ||
984 unlikely(ip_tunnel_info_af(tun_info) != AF_INET6))
985 goto tx_err;
986
987 key = &tun_info->key;
988 memset(&fl6, 0, sizeof(fl6));
989 fl6.flowi6_proto = IPPROTO_GRE;
990 fl6.daddr = key->u.ipv6.dst;
991 fl6.flowlabel = key->label;
992 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
993
994 dsfield = key->tos;
995 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
996 goto tx_err;
997 if (tun_info->options_len < sizeof(*md))
998 goto tx_err;
999 md = ip_tunnel_info_opts(tun_info);
1000
1001 tun_id = tunnel_id_to_key32(key->tun_id);
1002 if (md->version == 1) {
1003 erspan_build_header(skb,
1004 ntohl(tun_id),
1005 ntohl(md->u.index), truncate,
1006 false);
1007 } else if (md->version == 2) {
1008 erspan_build_header_v2(skb,
1009 ntohl(tun_id),
1010 md->u.md2.dir,
1011 get_hwid(&md->u.md2),
1012 truncate, false);
1013 } else {
1014 goto tx_err;
1015 }
1016 } else {
1017 switch (skb->protocol) {
1018 case htons(ETH_P_IP):
1019 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1020 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
1021 &dsfield, &encap_limit);
1022 break;
1023 case htons(ETH_P_IPV6):
1024 if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr))
1025 goto tx_err;
1026 if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
1027 &dsfield, &encap_limit))
1028 goto tx_err;
1029 break;
1030 default:
1031 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1032 break;
1033 }
1034
1035 if (t->parms.erspan_ver == 1)
1036 erspan_build_header(skb, ntohl(t->parms.o_key),
1037 t->parms.index,
1038 truncate, false);
1039 else if (t->parms.erspan_ver == 2)
1040 erspan_build_header_v2(skb, ntohl(t->parms.o_key),
1041 t->parms.dir,
1042 t->parms.hwid,
1043 truncate, false);
1044 else
1045 goto tx_err;
1046
1047 fl6.daddr = t->parms.raddr;
1048 }
1049
1050 /* Push GRE header. */
1051 proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
1052 : htons(ETH_P_ERSPAN2);
1053 gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
1054
1055 /* TooBig packet may have updated dst->dev's mtu */
1056 if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
1057 dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false);
1058
1059 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1060 NEXTHDR_GRE);
1061 if (err != 0) {
1062 /* XXX: send ICMP error even if DF is not set. */
1063 if (err == -EMSGSIZE) {
1064 if (skb->protocol == htons(ETH_P_IP))
1065 icmp_ndo_send(skb, ICMP_DEST_UNREACH,
1066 ICMP_FRAG_NEEDED, htonl(mtu));
1067 else
1068 icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1069 }
1070
1071 goto tx_err;
1072 }
1073 return NETDEV_TX_OK;
1074
1075 tx_err:
1076 stats = &t->dev->stats;
1077 if (!IS_ERR(tun_info))
1078 stats->tx_errors++;
1079 stats->tx_dropped++;
1080 kfree_skb(skb);
1081 return NETDEV_TX_OK;
1082 }
1083
ip6gre_tnl_link_config_common(struct ip6_tnl * t)1084 static void ip6gre_tnl_link_config_common(struct ip6_tnl *t)
1085 {
1086 struct net_device *dev = t->dev;
1087 struct __ip6_tnl_parm *p = &t->parms;
1088 struct flowi6 *fl6 = &t->fl.u.ip6;
1089
1090 if (dev->type != ARPHRD_ETHER) {
1091 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1092 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1093 }
1094
1095 /* Set up flowi template */
1096 fl6->saddr = p->laddr;
1097 fl6->daddr = p->raddr;
1098 fl6->flowi6_oif = p->link;
1099 fl6->flowlabel = 0;
1100 fl6->flowi6_proto = IPPROTO_GRE;
1101
1102 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1103 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1104 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1105 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1106
1107 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1108 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1109
1110 if (p->flags&IP6_TNL_F_CAP_XMIT &&
1111 p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER)
1112 dev->flags |= IFF_POINTOPOINT;
1113 else
1114 dev->flags &= ~IFF_POINTOPOINT;
1115 }
1116
ip6gre_tnl_link_config_route(struct ip6_tnl * t,int set_mtu,int t_hlen)1117 static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
1118 int t_hlen)
1119 {
1120 const struct __ip6_tnl_parm *p = &t->parms;
1121 struct net_device *dev = t->dev;
1122
1123 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1124 int strict = (ipv6_addr_type(&p->raddr) &
1125 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1126
1127 struct rt6_info *rt = rt6_lookup(t->net,
1128 &p->raddr, &p->laddr,
1129 p->link, NULL, strict);
1130
1131 if (!rt)
1132 return;
1133
1134 if (rt->dst.dev) {
1135 unsigned short dst_len = rt->dst.dev->hard_header_len +
1136 t_hlen;
1137
1138 if (t->dev->header_ops)
1139 dev->hard_header_len = dst_len;
1140 else
1141 dev->needed_headroom = dst_len;
1142
1143 if (set_mtu) {
1144 dev->mtu = rt->dst.dev->mtu - t_hlen;
1145 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1146 dev->mtu -= 8;
1147 if (dev->type == ARPHRD_ETHER)
1148 dev->mtu -= ETH_HLEN;
1149
1150 if (dev->mtu < IPV6_MIN_MTU)
1151 dev->mtu = IPV6_MIN_MTU;
1152 }
1153 }
1154 ip6_rt_put(rt);
1155 }
1156 }
1157
ip6gre_calc_hlen(struct ip6_tnl * tunnel)1158 static int ip6gre_calc_hlen(struct ip6_tnl *tunnel)
1159 {
1160 int t_hlen;
1161
1162 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
1163 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
1164
1165 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1166
1167 if (tunnel->dev->header_ops)
1168 tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1169 else
1170 tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
1171
1172 return t_hlen;
1173 }
1174
ip6gre_tnl_link_config(struct ip6_tnl * t,int set_mtu)1175 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
1176 {
1177 ip6gre_tnl_link_config_common(t);
1178 ip6gre_tnl_link_config_route(t, set_mtu, ip6gre_calc_hlen(t));
1179 }
1180
ip6gre_tnl_copy_tnl_parm(struct ip6_tnl * t,const struct __ip6_tnl_parm * p)1181 static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
1182 const struct __ip6_tnl_parm *p)
1183 {
1184 t->parms.laddr = p->laddr;
1185 t->parms.raddr = p->raddr;
1186 t->parms.flags = p->flags;
1187 t->parms.hop_limit = p->hop_limit;
1188 t->parms.encap_limit = p->encap_limit;
1189 t->parms.flowinfo = p->flowinfo;
1190 t->parms.link = p->link;
1191 t->parms.proto = p->proto;
1192 t->parms.i_key = p->i_key;
1193 t->parms.o_key = p->o_key;
1194 t->parms.i_flags = p->i_flags;
1195 t->parms.o_flags = p->o_flags;
1196 t->parms.fwmark = p->fwmark;
1197 t->parms.erspan_ver = p->erspan_ver;
1198 t->parms.index = p->index;
1199 t->parms.dir = p->dir;
1200 t->parms.hwid = p->hwid;
1201 dst_cache_reset(&t->dst_cache);
1202 }
1203
ip6gre_tnl_change(struct ip6_tnl * t,const struct __ip6_tnl_parm * p,int set_mtu)1204 static int ip6gre_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p,
1205 int set_mtu)
1206 {
1207 ip6gre_tnl_copy_tnl_parm(t, p);
1208 ip6gre_tnl_link_config(t, set_mtu);
1209 return 0;
1210 }
1211
ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm * p,const struct ip6_tnl_parm2 * u)1212 static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p,
1213 const struct ip6_tnl_parm2 *u)
1214 {
1215 p->laddr = u->laddr;
1216 p->raddr = u->raddr;
1217 p->flags = u->flags;
1218 p->hop_limit = u->hop_limit;
1219 p->encap_limit = u->encap_limit;
1220 p->flowinfo = u->flowinfo;
1221 p->link = u->link;
1222 p->i_key = u->i_key;
1223 p->o_key = u->o_key;
1224 p->i_flags = gre_flags_to_tnl_flags(u->i_flags);
1225 p->o_flags = gre_flags_to_tnl_flags(u->o_flags);
1226 memcpy(p->name, u->name, sizeof(u->name));
1227 }
1228
ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 * u,const struct __ip6_tnl_parm * p)1229 static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u,
1230 const struct __ip6_tnl_parm *p)
1231 {
1232 u->proto = IPPROTO_GRE;
1233 u->laddr = p->laddr;
1234 u->raddr = p->raddr;
1235 u->flags = p->flags;
1236 u->hop_limit = p->hop_limit;
1237 u->encap_limit = p->encap_limit;
1238 u->flowinfo = p->flowinfo;
1239 u->link = p->link;
1240 u->i_key = p->i_key;
1241 u->o_key = p->o_key;
1242 u->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
1243 u->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
1244 memcpy(u->name, p->name, sizeof(u->name));
1245 }
1246
ip6gre_tunnel_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)1247 static int ip6gre_tunnel_ioctl(struct net_device *dev,
1248 struct ifreq *ifr, int cmd)
1249 {
1250 int err = 0;
1251 struct ip6_tnl_parm2 p;
1252 struct __ip6_tnl_parm p1;
1253 struct ip6_tnl *t = netdev_priv(dev);
1254 struct net *net = t->net;
1255 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1256
1257 memset(&p1, 0, sizeof(p1));
1258
1259 switch (cmd) {
1260 case SIOCGETTUNNEL:
1261 if (dev == ign->fb_tunnel_dev) {
1262 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1263 err = -EFAULT;
1264 break;
1265 }
1266 ip6gre_tnl_parm_from_user(&p1, &p);
1267 t = ip6gre_tunnel_locate(net, &p1, 0);
1268 if (!t)
1269 t = netdev_priv(dev);
1270 }
1271 memset(&p, 0, sizeof(p));
1272 ip6gre_tnl_parm_to_user(&p, &t->parms);
1273 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1274 err = -EFAULT;
1275 break;
1276
1277 case SIOCADDTUNNEL:
1278 case SIOCCHGTUNNEL:
1279 err = -EPERM;
1280 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1281 goto done;
1282
1283 err = -EFAULT;
1284 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1285 goto done;
1286
1287 err = -EINVAL;
1288 if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))
1289 goto done;
1290
1291 if (!(p.i_flags&GRE_KEY))
1292 p.i_key = 0;
1293 if (!(p.o_flags&GRE_KEY))
1294 p.o_key = 0;
1295
1296 ip6gre_tnl_parm_from_user(&p1, &p);
1297 t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL);
1298
1299 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1300 if (t) {
1301 if (t->dev != dev) {
1302 err = -EEXIST;
1303 break;
1304 }
1305 } else {
1306 t = netdev_priv(dev);
1307
1308 ip6gre_tunnel_unlink(ign, t);
1309 synchronize_net();
1310 ip6gre_tnl_change(t, &p1, 1);
1311 ip6gre_tunnel_link(ign, t);
1312 netdev_state_change(dev);
1313 }
1314 }
1315
1316 if (t) {
1317 err = 0;
1318
1319 memset(&p, 0, sizeof(p));
1320 ip6gre_tnl_parm_to_user(&p, &t->parms);
1321 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1322 err = -EFAULT;
1323 } else
1324 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1325 break;
1326
1327 case SIOCDELTUNNEL:
1328 err = -EPERM;
1329 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1330 goto done;
1331
1332 if (dev == ign->fb_tunnel_dev) {
1333 err = -EFAULT;
1334 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1335 goto done;
1336 err = -ENOENT;
1337 ip6gre_tnl_parm_from_user(&p1, &p);
1338 t = ip6gre_tunnel_locate(net, &p1, 0);
1339 if (!t)
1340 goto done;
1341 err = -EPERM;
1342 if (t == netdev_priv(ign->fb_tunnel_dev))
1343 goto done;
1344 dev = t->dev;
1345 }
1346 unregister_netdevice(dev);
1347 err = 0;
1348 break;
1349
1350 default:
1351 err = -EINVAL;
1352 }
1353
1354 done:
1355 return err;
1356 }
1357
ip6gre_header(struct sk_buff * skb,struct net_device * dev,unsigned short type,const void * daddr,const void * saddr,unsigned int len)1358 static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
1359 unsigned short type, const void *daddr,
1360 const void *saddr, unsigned int len)
1361 {
1362 struct ip6_tnl *t = netdev_priv(dev);
1363 struct ipv6hdr *ipv6h;
1364 __be16 *p;
1365
1366 ipv6h = skb_push(skb, t->hlen + sizeof(*ipv6h));
1367 ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb,
1368 t->fl.u.ip6.flowlabel,
1369 true, &t->fl.u.ip6));
1370 ipv6h->hop_limit = t->parms.hop_limit;
1371 ipv6h->nexthdr = NEXTHDR_GRE;
1372 ipv6h->saddr = t->parms.laddr;
1373 ipv6h->daddr = t->parms.raddr;
1374
1375 p = (__be16 *)(ipv6h + 1);
1376 p[0] = t->parms.o_flags;
1377 p[1] = htons(type);
1378
1379 /*
1380 * Set the source hardware address.
1381 */
1382
1383 if (saddr)
1384 memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr));
1385 if (daddr)
1386 memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr));
1387 if (!ipv6_addr_any(&ipv6h->daddr))
1388 return t->hlen;
1389
1390 return -t->hlen;
1391 }
1392
1393 static const struct header_ops ip6gre_header_ops = {
1394 .create = ip6gre_header,
1395 };
1396
1397 static const struct net_device_ops ip6gre_netdev_ops = {
1398 .ndo_init = ip6gre_tunnel_init,
1399 .ndo_uninit = ip6gre_tunnel_uninit,
1400 .ndo_start_xmit = ip6gre_tunnel_xmit,
1401 .ndo_do_ioctl = ip6gre_tunnel_ioctl,
1402 .ndo_change_mtu = ip6_tnl_change_mtu,
1403 .ndo_get_stats64 = dev_get_tstats64,
1404 .ndo_get_iflink = ip6_tnl_get_iflink,
1405 };
1406
ip6gre_dev_free(struct net_device * dev)1407 static void ip6gre_dev_free(struct net_device *dev)
1408 {
1409 struct ip6_tnl *t = netdev_priv(dev);
1410
1411 gro_cells_destroy(&t->gro_cells);
1412 dst_cache_destroy(&t->dst_cache);
1413 free_percpu(dev->tstats);
1414 }
1415
ip6gre_tunnel_setup(struct net_device * dev)1416 static void ip6gre_tunnel_setup(struct net_device *dev)
1417 {
1418 dev->netdev_ops = &ip6gre_netdev_ops;
1419 dev->needs_free_netdev = true;
1420 dev->priv_destructor = ip6gre_dev_free;
1421
1422 dev->type = ARPHRD_IP6GRE;
1423
1424 dev->flags |= IFF_NOARP;
1425 dev->addr_len = sizeof(struct in6_addr);
1426 netif_keep_dst(dev);
1427 /* This perm addr will be used as interface identifier by IPv6 */
1428 dev->addr_assign_type = NET_ADDR_RANDOM;
1429 eth_random_addr(dev->perm_addr);
1430 }
1431
1432 #define GRE6_FEATURES (NETIF_F_SG | \
1433 NETIF_F_FRAGLIST | \
1434 NETIF_F_HIGHDMA | \
1435 NETIF_F_HW_CSUM)
1436
ip6gre_tnl_init_features(struct net_device * dev)1437 static void ip6gre_tnl_init_features(struct net_device *dev)
1438 {
1439 struct ip6_tnl *nt = netdev_priv(dev);
1440
1441 dev->features |= GRE6_FEATURES;
1442 dev->hw_features |= GRE6_FEATURES;
1443
1444 if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
1445 /* TCP offload with GRE SEQ is not supported, nor
1446 * can we support 2 levels of outer headers requiring
1447 * an update.
1448 */
1449 if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
1450 nt->encap.type == TUNNEL_ENCAP_NONE) {
1451 dev->features |= NETIF_F_GSO_SOFTWARE;
1452 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1453 }
1454
1455 /* Can use a lockless transmit, unless we generate
1456 * output sequences
1457 */
1458 dev->features |= NETIF_F_LLTX;
1459 }
1460 }
1461
ip6gre_tunnel_init_common(struct net_device * dev)1462 static int ip6gre_tunnel_init_common(struct net_device *dev)
1463 {
1464 struct ip6_tnl *tunnel;
1465 int ret;
1466 int t_hlen;
1467
1468 tunnel = netdev_priv(dev);
1469
1470 tunnel->dev = dev;
1471 tunnel->net = dev_net(dev);
1472 strcpy(tunnel->parms.name, dev->name);
1473
1474 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1475 if (!dev->tstats)
1476 return -ENOMEM;
1477
1478 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1479 if (ret)
1480 goto cleanup_alloc_pcpu_stats;
1481
1482 ret = gro_cells_init(&tunnel->gro_cells, dev);
1483 if (ret)
1484 goto cleanup_dst_cache_init;
1485
1486 t_hlen = ip6gre_calc_hlen(tunnel);
1487 dev->mtu = ETH_DATA_LEN - t_hlen;
1488 if (dev->type == ARPHRD_ETHER)
1489 dev->mtu -= ETH_HLEN;
1490 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1491 dev->mtu -= 8;
1492
1493 if (tunnel->parms.collect_md) {
1494 netif_keep_dst(dev);
1495 }
1496 ip6gre_tnl_init_features(dev);
1497
1498 dev_hold(dev);
1499 return 0;
1500
1501 cleanup_dst_cache_init:
1502 dst_cache_destroy(&tunnel->dst_cache);
1503 cleanup_alloc_pcpu_stats:
1504 free_percpu(dev->tstats);
1505 dev->tstats = NULL;
1506 return ret;
1507 }
1508
ip6gre_tunnel_init(struct net_device * dev)1509 static int ip6gre_tunnel_init(struct net_device *dev)
1510 {
1511 struct ip6_tnl *tunnel;
1512 int ret;
1513
1514 ret = ip6gre_tunnel_init_common(dev);
1515 if (ret)
1516 return ret;
1517
1518 tunnel = netdev_priv(dev);
1519
1520 if (tunnel->parms.collect_md)
1521 return 0;
1522
1523 memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
1524 memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
1525
1526 if (ipv6_addr_any(&tunnel->parms.raddr))
1527 dev->header_ops = &ip6gre_header_ops;
1528
1529 return 0;
1530 }
1531
ip6gre_fb_tunnel_init(struct net_device * dev)1532 static void ip6gre_fb_tunnel_init(struct net_device *dev)
1533 {
1534 struct ip6_tnl *tunnel = netdev_priv(dev);
1535
1536 tunnel->dev = dev;
1537 tunnel->net = dev_net(dev);
1538 strcpy(tunnel->parms.name, dev->name);
1539
1540 tunnel->hlen = sizeof(struct ipv6hdr) + 4;
1541 }
1542
1543 static struct inet6_protocol ip6gre_protocol __read_mostly = {
1544 .handler = gre_rcv,
1545 .err_handler = ip6gre_err,
1546 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1547 };
1548
ip6gre_destroy_tunnels(struct net * net,struct list_head * head)1549 static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
1550 {
1551 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1552 struct net_device *dev, *aux;
1553 int prio;
1554
1555 for_each_netdev_safe(net, dev, aux)
1556 if (dev->rtnl_link_ops == &ip6gre_link_ops ||
1557 dev->rtnl_link_ops == &ip6gre_tap_ops ||
1558 dev->rtnl_link_ops == &ip6erspan_tap_ops)
1559 unregister_netdevice_queue(dev, head);
1560
1561 for (prio = 0; prio < 4; prio++) {
1562 int h;
1563 for (h = 0; h < IP6_GRE_HASH_SIZE; h++) {
1564 struct ip6_tnl *t;
1565
1566 t = rtnl_dereference(ign->tunnels[prio][h]);
1567
1568 while (t) {
1569 /* If dev is in the same netns, it has already
1570 * been added to the list by the previous loop.
1571 */
1572 if (!net_eq(dev_net(t->dev), net))
1573 unregister_netdevice_queue(t->dev,
1574 head);
1575 t = rtnl_dereference(t->next);
1576 }
1577 }
1578 }
1579 }
1580
ip6gre_init_net(struct net * net)1581 static int __net_init ip6gre_init_net(struct net *net)
1582 {
1583 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1584 struct net_device *ndev;
1585 int err;
1586
1587 if (!net_has_fallback_tunnels(net))
1588 return 0;
1589 ndev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
1590 NET_NAME_UNKNOWN, ip6gre_tunnel_setup);
1591 if (!ndev) {
1592 err = -ENOMEM;
1593 goto err_alloc_dev;
1594 }
1595 ign->fb_tunnel_dev = ndev;
1596 dev_net_set(ign->fb_tunnel_dev, net);
1597 /* FB netdevice is special: we have one, and only one per netns.
1598 * Allowing to move it to another netns is clearly unsafe.
1599 */
1600 ign->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
1601
1602
1603 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
1604 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
1605
1606 err = register_netdev(ign->fb_tunnel_dev);
1607 if (err)
1608 goto err_reg_dev;
1609
1610 rcu_assign_pointer(ign->tunnels_wc[0],
1611 netdev_priv(ign->fb_tunnel_dev));
1612 return 0;
1613
1614 err_reg_dev:
1615 free_netdev(ndev);
1616 err_alloc_dev:
1617 return err;
1618 }
1619
ip6gre_exit_batch_net(struct list_head * net_list)1620 static void __net_exit ip6gre_exit_batch_net(struct list_head *net_list)
1621 {
1622 struct net *net;
1623 LIST_HEAD(list);
1624
1625 rtnl_lock();
1626 list_for_each_entry(net, net_list, exit_list)
1627 ip6gre_destroy_tunnels(net, &list);
1628 unregister_netdevice_many(&list);
1629 rtnl_unlock();
1630 }
1631
1632 static struct pernet_operations ip6gre_net_ops = {
1633 .init = ip6gre_init_net,
1634 .exit_batch = ip6gre_exit_batch_net,
1635 .id = &ip6gre_net_id,
1636 .size = sizeof(struct ip6gre_net),
1637 };
1638
ip6gre_tunnel_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1639 static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1640 struct netlink_ext_ack *extack)
1641 {
1642 __be16 flags;
1643
1644 if (!data)
1645 return 0;
1646
1647 flags = 0;
1648 if (data[IFLA_GRE_IFLAGS])
1649 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1650 if (data[IFLA_GRE_OFLAGS])
1651 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1652 if (flags & (GRE_VERSION|GRE_ROUTING))
1653 return -EINVAL;
1654
1655 return 0;
1656 }
1657
ip6gre_tap_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1658 static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1659 struct netlink_ext_ack *extack)
1660 {
1661 struct in6_addr daddr;
1662
1663 if (tb[IFLA_ADDRESS]) {
1664 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1665 return -EINVAL;
1666 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1667 return -EADDRNOTAVAIL;
1668 }
1669
1670 if (!data)
1671 goto out;
1672
1673 if (data[IFLA_GRE_REMOTE]) {
1674 daddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]);
1675 if (ipv6_addr_any(&daddr))
1676 return -EINVAL;
1677 }
1678
1679 out:
1680 return ip6gre_tunnel_validate(tb, data, extack);
1681 }
1682
ip6erspan_tap_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1683 static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1684 struct netlink_ext_ack *extack)
1685 {
1686 __be16 flags = 0;
1687 int ret, ver = 0;
1688
1689 if (!data)
1690 return 0;
1691
1692 ret = ip6gre_tap_validate(tb, data, extack);
1693 if (ret)
1694 return ret;
1695
1696 /* ERSPAN should only have GRE sequence and key flag */
1697 if (data[IFLA_GRE_OFLAGS])
1698 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1699 if (data[IFLA_GRE_IFLAGS])
1700 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1701 if (!data[IFLA_GRE_COLLECT_METADATA] &&
1702 flags != (GRE_SEQ | GRE_KEY))
1703 return -EINVAL;
1704
1705 /* ERSPAN Session ID only has 10-bit. Since we reuse
1706 * 32-bit key field as ID, check it's range.
1707 */
1708 if (data[IFLA_GRE_IKEY] &&
1709 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1710 return -EINVAL;
1711
1712 if (data[IFLA_GRE_OKEY] &&
1713 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1714 return -EINVAL;
1715
1716 if (data[IFLA_GRE_ERSPAN_VER]) {
1717 ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1718 if (ver != 1 && ver != 2)
1719 return -EINVAL;
1720 }
1721
1722 if (ver == 1) {
1723 if (data[IFLA_GRE_ERSPAN_INDEX]) {
1724 u32 index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1725
1726 if (index & ~INDEX_MASK)
1727 return -EINVAL;
1728 }
1729 } else if (ver == 2) {
1730 if (data[IFLA_GRE_ERSPAN_DIR]) {
1731 u16 dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1732
1733 if (dir & ~(DIR_MASK >> DIR_OFFSET))
1734 return -EINVAL;
1735 }
1736
1737 if (data[IFLA_GRE_ERSPAN_HWID]) {
1738 u16 hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1739
1740 if (hwid & ~(HWID_MASK >> HWID_OFFSET))
1741 return -EINVAL;
1742 }
1743 }
1744
1745 return 0;
1746 }
1747
ip6erspan_set_version(struct nlattr * data[],struct __ip6_tnl_parm * parms)1748 static void ip6erspan_set_version(struct nlattr *data[],
1749 struct __ip6_tnl_parm *parms)
1750 {
1751 if (!data)
1752 return;
1753
1754 parms->erspan_ver = 1;
1755 if (data[IFLA_GRE_ERSPAN_VER])
1756 parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1757
1758 if (parms->erspan_ver == 1) {
1759 if (data[IFLA_GRE_ERSPAN_INDEX])
1760 parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1761 } else if (parms->erspan_ver == 2) {
1762 if (data[IFLA_GRE_ERSPAN_DIR])
1763 parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1764 if (data[IFLA_GRE_ERSPAN_HWID])
1765 parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1766 }
1767 }
1768
ip6gre_netlink_parms(struct nlattr * data[],struct __ip6_tnl_parm * parms)1769 static void ip6gre_netlink_parms(struct nlattr *data[],
1770 struct __ip6_tnl_parm *parms)
1771 {
1772 memset(parms, 0, sizeof(*parms));
1773
1774 if (!data)
1775 return;
1776
1777 if (data[IFLA_GRE_LINK])
1778 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1779
1780 if (data[IFLA_GRE_IFLAGS])
1781 parms->i_flags = gre_flags_to_tnl_flags(
1782 nla_get_be16(data[IFLA_GRE_IFLAGS]));
1783
1784 if (data[IFLA_GRE_OFLAGS])
1785 parms->o_flags = gre_flags_to_tnl_flags(
1786 nla_get_be16(data[IFLA_GRE_OFLAGS]));
1787
1788 if (data[IFLA_GRE_IKEY])
1789 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1790
1791 if (data[IFLA_GRE_OKEY])
1792 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1793
1794 if (data[IFLA_GRE_LOCAL])
1795 parms->laddr = nla_get_in6_addr(data[IFLA_GRE_LOCAL]);
1796
1797 if (data[IFLA_GRE_REMOTE])
1798 parms->raddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]);
1799
1800 if (data[IFLA_GRE_TTL])
1801 parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]);
1802
1803 if (data[IFLA_GRE_ENCAP_LIMIT])
1804 parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]);
1805
1806 if (data[IFLA_GRE_FLOWINFO])
1807 parms->flowinfo = nla_get_be32(data[IFLA_GRE_FLOWINFO]);
1808
1809 if (data[IFLA_GRE_FLAGS])
1810 parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]);
1811
1812 if (data[IFLA_GRE_FWMARK])
1813 parms->fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1814
1815 if (data[IFLA_GRE_COLLECT_METADATA])
1816 parms->collect_md = true;
1817 }
1818
ip6gre_tap_init(struct net_device * dev)1819 static int ip6gre_tap_init(struct net_device *dev)
1820 {
1821 int ret;
1822
1823 ret = ip6gre_tunnel_init_common(dev);
1824 if (ret)
1825 return ret;
1826
1827 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1828
1829 return 0;
1830 }
1831
1832 static const struct net_device_ops ip6gre_tap_netdev_ops = {
1833 .ndo_init = ip6gre_tap_init,
1834 .ndo_uninit = ip6gre_tunnel_uninit,
1835 .ndo_start_xmit = ip6gre_tunnel_xmit,
1836 .ndo_set_mac_address = eth_mac_addr,
1837 .ndo_validate_addr = eth_validate_addr,
1838 .ndo_change_mtu = ip6_tnl_change_mtu,
1839 .ndo_get_stats64 = dev_get_tstats64,
1840 .ndo_get_iflink = ip6_tnl_get_iflink,
1841 };
1842
ip6erspan_calc_hlen(struct ip6_tnl * tunnel)1843 static int ip6erspan_calc_hlen(struct ip6_tnl *tunnel)
1844 {
1845 int t_hlen;
1846
1847 tunnel->tun_hlen = 8;
1848 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1849 erspan_hdr_len(tunnel->parms.erspan_ver);
1850
1851 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1852 tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
1853 return t_hlen;
1854 }
1855
ip6erspan_tap_init(struct net_device * dev)1856 static int ip6erspan_tap_init(struct net_device *dev)
1857 {
1858 struct ip6_tnl *tunnel;
1859 int t_hlen;
1860 int ret;
1861
1862 tunnel = netdev_priv(dev);
1863
1864 tunnel->dev = dev;
1865 tunnel->net = dev_net(dev);
1866 strcpy(tunnel->parms.name, dev->name);
1867
1868 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1869 if (!dev->tstats)
1870 return -ENOMEM;
1871
1872 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1873 if (ret)
1874 goto cleanup_alloc_pcpu_stats;
1875
1876 ret = gro_cells_init(&tunnel->gro_cells, dev);
1877 if (ret)
1878 goto cleanup_dst_cache_init;
1879
1880 t_hlen = ip6erspan_calc_hlen(tunnel);
1881 dev->mtu = ETH_DATA_LEN - t_hlen;
1882 if (dev->type == ARPHRD_ETHER)
1883 dev->mtu -= ETH_HLEN;
1884 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1885 dev->mtu -= 8;
1886
1887 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1888 ip6erspan_tnl_link_config(tunnel, 1);
1889
1890 dev_hold(dev);
1891 return 0;
1892
1893 cleanup_dst_cache_init:
1894 dst_cache_destroy(&tunnel->dst_cache);
1895 cleanup_alloc_pcpu_stats:
1896 free_percpu(dev->tstats);
1897 dev->tstats = NULL;
1898 return ret;
1899 }
1900
1901 static const struct net_device_ops ip6erspan_netdev_ops = {
1902 .ndo_init = ip6erspan_tap_init,
1903 .ndo_uninit = ip6erspan_tunnel_uninit,
1904 .ndo_start_xmit = ip6erspan_tunnel_xmit,
1905 .ndo_set_mac_address = eth_mac_addr,
1906 .ndo_validate_addr = eth_validate_addr,
1907 .ndo_change_mtu = ip6_tnl_change_mtu,
1908 .ndo_get_stats64 = dev_get_tstats64,
1909 .ndo_get_iflink = ip6_tnl_get_iflink,
1910 };
1911
ip6gre_tap_setup(struct net_device * dev)1912 static void ip6gre_tap_setup(struct net_device *dev)
1913 {
1914
1915 ether_setup(dev);
1916
1917 dev->max_mtu = 0;
1918 dev->netdev_ops = &ip6gre_tap_netdev_ops;
1919 dev->needs_free_netdev = true;
1920 dev->priv_destructor = ip6gre_dev_free;
1921
1922 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1923 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1924 netif_keep_dst(dev);
1925 }
1926
ip6gre_netlink_encap_parms(struct nlattr * data[],struct ip_tunnel_encap * ipencap)1927 static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
1928 struct ip_tunnel_encap *ipencap)
1929 {
1930 bool ret = false;
1931
1932 memset(ipencap, 0, sizeof(*ipencap));
1933
1934 if (!data)
1935 return ret;
1936
1937 if (data[IFLA_GRE_ENCAP_TYPE]) {
1938 ret = true;
1939 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1940 }
1941
1942 if (data[IFLA_GRE_ENCAP_FLAGS]) {
1943 ret = true;
1944 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1945 }
1946
1947 if (data[IFLA_GRE_ENCAP_SPORT]) {
1948 ret = true;
1949 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1950 }
1951
1952 if (data[IFLA_GRE_ENCAP_DPORT]) {
1953 ret = true;
1954 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1955 }
1956
1957 return ret;
1958 }
1959
ip6gre_newlink_common(struct net * src_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1960 static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev,
1961 struct nlattr *tb[], struct nlattr *data[],
1962 struct netlink_ext_ack *extack)
1963 {
1964 struct ip6_tnl *nt;
1965 struct ip_tunnel_encap ipencap;
1966 int err;
1967
1968 nt = netdev_priv(dev);
1969
1970 if (ip6gre_netlink_encap_parms(data, &ipencap)) {
1971 int err = ip6_tnl_encap_setup(nt, &ipencap);
1972
1973 if (err < 0)
1974 return err;
1975 }
1976
1977 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1978 eth_hw_addr_random(dev);
1979
1980 nt->dev = dev;
1981 nt->net = dev_net(dev);
1982
1983 err = register_netdevice(dev);
1984 if (err)
1985 goto out;
1986
1987 if (tb[IFLA_MTU])
1988 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
1989
1990 out:
1991 return err;
1992 }
1993
ip6gre_newlink(struct net * src_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1994 static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
1995 struct nlattr *tb[], struct nlattr *data[],
1996 struct netlink_ext_ack *extack)
1997 {
1998 struct ip6_tnl *nt = netdev_priv(dev);
1999 struct net *net = dev_net(dev);
2000 struct ip6gre_net *ign;
2001 int err;
2002
2003 ip6gre_netlink_parms(data, &nt->parms);
2004 ign = net_generic(net, ip6gre_net_id);
2005
2006 if (nt->parms.collect_md) {
2007 if (rtnl_dereference(ign->collect_md_tun))
2008 return -EEXIST;
2009 } else {
2010 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
2011 return -EEXIST;
2012 }
2013
2014 err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
2015 if (!err) {
2016 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
2017 ip6gre_tunnel_link_md(ign, nt);
2018 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
2019 }
2020 return err;
2021 }
2022
2023 static struct ip6_tnl *
ip6gre_changelink_common(struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct __ip6_tnl_parm * p_p,struct netlink_ext_ack * extack)2024 ip6gre_changelink_common(struct net_device *dev, struct nlattr *tb[],
2025 struct nlattr *data[], struct __ip6_tnl_parm *p_p,
2026 struct netlink_ext_ack *extack)
2027 {
2028 struct ip6_tnl *t, *nt = netdev_priv(dev);
2029 struct net *net = nt->net;
2030 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
2031 struct ip_tunnel_encap ipencap;
2032
2033 if (dev == ign->fb_tunnel_dev)
2034 return ERR_PTR(-EINVAL);
2035
2036 if (ip6gre_netlink_encap_parms(data, &ipencap)) {
2037 int err = ip6_tnl_encap_setup(nt, &ipencap);
2038
2039 if (err < 0)
2040 return ERR_PTR(err);
2041 }
2042
2043 ip6gre_netlink_parms(data, p_p);
2044
2045 t = ip6gre_tunnel_locate(net, p_p, 0);
2046
2047 if (t) {
2048 if (t->dev != dev)
2049 return ERR_PTR(-EEXIST);
2050 } else {
2051 t = nt;
2052 }
2053
2054 return t;
2055 }
2056
ip6gre_changelink(struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)2057 static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
2058 struct nlattr *data[],
2059 struct netlink_ext_ack *extack)
2060 {
2061 struct ip6_tnl *t = netdev_priv(dev);
2062 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
2063 struct __ip6_tnl_parm p;
2064
2065 t = ip6gre_changelink_common(dev, tb, data, &p, extack);
2066 if (IS_ERR(t))
2067 return PTR_ERR(t);
2068
2069 ip6gre_tunnel_unlink_md(ign, t);
2070 ip6gre_tunnel_unlink(ign, t);
2071 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
2072 ip6gre_tunnel_link_md(ign, t);
2073 ip6gre_tunnel_link(ign, t);
2074 return 0;
2075 }
2076
ip6gre_dellink(struct net_device * dev,struct list_head * head)2077 static void ip6gre_dellink(struct net_device *dev, struct list_head *head)
2078 {
2079 struct net *net = dev_net(dev);
2080 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
2081
2082 if (dev != ign->fb_tunnel_dev)
2083 unregister_netdevice_queue(dev, head);
2084 }
2085
ip6gre_get_size(const struct net_device * dev)2086 static size_t ip6gre_get_size(const struct net_device *dev)
2087 {
2088 return
2089 /* IFLA_GRE_LINK */
2090 nla_total_size(4) +
2091 /* IFLA_GRE_IFLAGS */
2092 nla_total_size(2) +
2093 /* IFLA_GRE_OFLAGS */
2094 nla_total_size(2) +
2095 /* IFLA_GRE_IKEY */
2096 nla_total_size(4) +
2097 /* IFLA_GRE_OKEY */
2098 nla_total_size(4) +
2099 /* IFLA_GRE_LOCAL */
2100 nla_total_size(sizeof(struct in6_addr)) +
2101 /* IFLA_GRE_REMOTE */
2102 nla_total_size(sizeof(struct in6_addr)) +
2103 /* IFLA_GRE_TTL */
2104 nla_total_size(1) +
2105 /* IFLA_GRE_ENCAP_LIMIT */
2106 nla_total_size(1) +
2107 /* IFLA_GRE_FLOWINFO */
2108 nla_total_size(4) +
2109 /* IFLA_GRE_FLAGS */
2110 nla_total_size(4) +
2111 /* IFLA_GRE_ENCAP_TYPE */
2112 nla_total_size(2) +
2113 /* IFLA_GRE_ENCAP_FLAGS */
2114 nla_total_size(2) +
2115 /* IFLA_GRE_ENCAP_SPORT */
2116 nla_total_size(2) +
2117 /* IFLA_GRE_ENCAP_DPORT */
2118 nla_total_size(2) +
2119 /* IFLA_GRE_COLLECT_METADATA */
2120 nla_total_size(0) +
2121 /* IFLA_GRE_FWMARK */
2122 nla_total_size(4) +
2123 /* IFLA_GRE_ERSPAN_INDEX */
2124 nla_total_size(4) +
2125 0;
2126 }
2127
ip6gre_fill_info(struct sk_buff * skb,const struct net_device * dev)2128 static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2129 {
2130 struct ip6_tnl *t = netdev_priv(dev);
2131 struct __ip6_tnl_parm *p = &t->parms;
2132 __be16 o_flags = p->o_flags;
2133
2134 if (p->erspan_ver == 1 || p->erspan_ver == 2) {
2135 if (!p->collect_md)
2136 o_flags |= TUNNEL_KEY;
2137
2138 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
2139 goto nla_put_failure;
2140
2141 if (p->erspan_ver == 1) {
2142 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
2143 goto nla_put_failure;
2144 } else {
2145 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
2146 goto nla_put_failure;
2147 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
2148 goto nla_put_failure;
2149 }
2150 }
2151
2152 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
2153 nla_put_be16(skb, IFLA_GRE_IFLAGS,
2154 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
2155 nla_put_be16(skb, IFLA_GRE_OFLAGS,
2156 gre_tnl_flags_to_gre_flags(o_flags)) ||
2157 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
2158 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
2159 nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
2160 nla_put_in6_addr(skb, IFLA_GRE_REMOTE, &p->raddr) ||
2161 nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
2162 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
2163 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
2164 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) ||
2165 nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark))
2166 goto nla_put_failure;
2167
2168 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
2169 t->encap.type) ||
2170 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
2171 t->encap.sport) ||
2172 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
2173 t->encap.dport) ||
2174 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
2175 t->encap.flags))
2176 goto nla_put_failure;
2177
2178 if (p->collect_md) {
2179 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
2180 goto nla_put_failure;
2181 }
2182
2183 return 0;
2184
2185 nla_put_failure:
2186 return -EMSGSIZE;
2187 }
2188
2189 static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
2190 [IFLA_GRE_LINK] = { .type = NLA_U32 },
2191 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
2192 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
2193 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
2194 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
2195 [IFLA_GRE_LOCAL] = { .len = sizeof_field(struct ipv6hdr, saddr) },
2196 [IFLA_GRE_REMOTE] = { .len = sizeof_field(struct ipv6hdr, daddr) },
2197 [IFLA_GRE_TTL] = { .type = NLA_U8 },
2198 [IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 },
2199 [IFLA_GRE_FLOWINFO] = { .type = NLA_U32 },
2200 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
2201 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
2202 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
2203 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
2204 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
2205 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
2206 [IFLA_GRE_FWMARK] = { .type = NLA_U32 },
2207 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
2208 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 },
2209 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 },
2210 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
2211 };
2212
ip6erspan_tap_setup(struct net_device * dev)2213 static void ip6erspan_tap_setup(struct net_device *dev)
2214 {
2215 ether_setup(dev);
2216
2217 dev->max_mtu = 0;
2218 dev->netdev_ops = &ip6erspan_netdev_ops;
2219 dev->needs_free_netdev = true;
2220 dev->priv_destructor = ip6gre_dev_free;
2221
2222 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2223 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2224 netif_keep_dst(dev);
2225 }
2226
ip6erspan_newlink(struct net * src_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)2227 static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
2228 struct nlattr *tb[], struct nlattr *data[],
2229 struct netlink_ext_ack *extack)
2230 {
2231 struct ip6_tnl *nt = netdev_priv(dev);
2232 struct net *net = dev_net(dev);
2233 struct ip6gre_net *ign;
2234 int err;
2235
2236 ip6gre_netlink_parms(data, &nt->parms);
2237 ip6erspan_set_version(data, &nt->parms);
2238 ign = net_generic(net, ip6gre_net_id);
2239
2240 if (nt->parms.collect_md) {
2241 if (rtnl_dereference(ign->collect_md_tun_erspan))
2242 return -EEXIST;
2243 } else {
2244 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
2245 return -EEXIST;
2246 }
2247
2248 err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
2249 if (!err) {
2250 ip6erspan_tnl_link_config(nt, !tb[IFLA_MTU]);
2251 ip6erspan_tunnel_link_md(ign, nt);
2252 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
2253 }
2254 return err;
2255 }
2256
ip6erspan_tnl_link_config(struct ip6_tnl * t,int set_mtu)2257 static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu)
2258 {
2259 ip6gre_tnl_link_config_common(t);
2260 ip6gre_tnl_link_config_route(t, set_mtu, ip6erspan_calc_hlen(t));
2261 }
2262
ip6erspan_tnl_change(struct ip6_tnl * t,const struct __ip6_tnl_parm * p,int set_mtu)2263 static int ip6erspan_tnl_change(struct ip6_tnl *t,
2264 const struct __ip6_tnl_parm *p, int set_mtu)
2265 {
2266 ip6gre_tnl_copy_tnl_parm(t, p);
2267 ip6erspan_tnl_link_config(t, set_mtu);
2268 return 0;
2269 }
2270
ip6erspan_changelink(struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)2271 static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
2272 struct nlattr *data[],
2273 struct netlink_ext_ack *extack)
2274 {
2275 struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
2276 struct __ip6_tnl_parm p;
2277 struct ip6_tnl *t;
2278
2279 t = ip6gre_changelink_common(dev, tb, data, &p, extack);
2280 if (IS_ERR(t))
2281 return PTR_ERR(t);
2282
2283 ip6erspan_set_version(data, &p);
2284 ip6gre_tunnel_unlink_md(ign, t);
2285 ip6gre_tunnel_unlink(ign, t);
2286 ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
2287 ip6erspan_tunnel_link_md(ign, t);
2288 ip6gre_tunnel_link(ign, t);
2289 return 0;
2290 }
2291
2292 static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
2293 .kind = "ip6gre",
2294 .maxtype = IFLA_GRE_MAX,
2295 .policy = ip6gre_policy,
2296 .priv_size = sizeof(struct ip6_tnl),
2297 .setup = ip6gre_tunnel_setup,
2298 .validate = ip6gre_tunnel_validate,
2299 .newlink = ip6gre_newlink,
2300 .changelink = ip6gre_changelink,
2301 .dellink = ip6gre_dellink,
2302 .get_size = ip6gre_get_size,
2303 .fill_info = ip6gre_fill_info,
2304 .get_link_net = ip6_tnl_get_link_net,
2305 };
2306
2307 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
2308 .kind = "ip6gretap",
2309 .maxtype = IFLA_GRE_MAX,
2310 .policy = ip6gre_policy,
2311 .priv_size = sizeof(struct ip6_tnl),
2312 .setup = ip6gre_tap_setup,
2313 .validate = ip6gre_tap_validate,
2314 .newlink = ip6gre_newlink,
2315 .changelink = ip6gre_changelink,
2316 .get_size = ip6gre_get_size,
2317 .fill_info = ip6gre_fill_info,
2318 .get_link_net = ip6_tnl_get_link_net,
2319 };
2320
2321 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = {
2322 .kind = "ip6erspan",
2323 .maxtype = IFLA_GRE_MAX,
2324 .policy = ip6gre_policy,
2325 .priv_size = sizeof(struct ip6_tnl),
2326 .setup = ip6erspan_tap_setup,
2327 .validate = ip6erspan_tap_validate,
2328 .newlink = ip6erspan_newlink,
2329 .changelink = ip6erspan_changelink,
2330 .get_size = ip6gre_get_size,
2331 .fill_info = ip6gre_fill_info,
2332 .get_link_net = ip6_tnl_get_link_net,
2333 };
2334
2335 /*
2336 * And now the modules code and kernel interface.
2337 */
2338
ip6gre_init(void)2339 static int __init ip6gre_init(void)
2340 {
2341 int err;
2342
2343 pr_info("GRE over IPv6 tunneling driver\n");
2344
2345 err = register_pernet_device(&ip6gre_net_ops);
2346 if (err < 0)
2347 return err;
2348
2349 err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE);
2350 if (err < 0) {
2351 pr_info("%s: can't add protocol\n", __func__);
2352 goto add_proto_failed;
2353 }
2354
2355 err = rtnl_link_register(&ip6gre_link_ops);
2356 if (err < 0)
2357 goto rtnl_link_failed;
2358
2359 err = rtnl_link_register(&ip6gre_tap_ops);
2360 if (err < 0)
2361 goto tap_ops_failed;
2362
2363 err = rtnl_link_register(&ip6erspan_tap_ops);
2364 if (err < 0)
2365 goto erspan_link_failed;
2366
2367 out:
2368 return err;
2369
2370 erspan_link_failed:
2371 rtnl_link_unregister(&ip6gre_tap_ops);
2372 tap_ops_failed:
2373 rtnl_link_unregister(&ip6gre_link_ops);
2374 rtnl_link_failed:
2375 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
2376 add_proto_failed:
2377 unregister_pernet_device(&ip6gre_net_ops);
2378 goto out;
2379 }
2380
ip6gre_fini(void)2381 static void __exit ip6gre_fini(void)
2382 {
2383 rtnl_link_unregister(&ip6gre_tap_ops);
2384 rtnl_link_unregister(&ip6gre_link_ops);
2385 rtnl_link_unregister(&ip6erspan_tap_ops);
2386 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
2387 unregister_pernet_device(&ip6gre_net_ops);
2388 }
2389
2390 module_init(ip6gre_init);
2391 module_exit(ip6gre_fini);
2392 MODULE_LICENSE("GPL");
2393 MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
2394 MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
2395 MODULE_ALIAS_RTNL_LINK("ip6gre");
2396 MODULE_ALIAS_RTNL_LINK("ip6gretap");
2397 MODULE_ALIAS_RTNL_LINK("ip6erspan");
2398 MODULE_ALIAS_NETDEV("ip6gre0");
2399