1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _IPV6_FRAG_H
3 #define _IPV6_FRAG_H
4 #include <linux/kernel.h>
5 #include <net/addrconf.h>
6 #include <net/ipv6.h>
7 #include <net/inet_frag.h>
8 
9 enum ip6_defrag_users {
10 	IP6_DEFRAG_LOCAL_DELIVER,
11 	IP6_DEFRAG_CONNTRACK_IN,
12 	__IP6_DEFRAG_CONNTRACK_IN	= IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
13 	IP6_DEFRAG_CONNTRACK_OUT,
14 	__IP6_DEFRAG_CONNTRACK_OUT	= IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
15 	IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
16 	__IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
17 };
18 
19 /*
20  *	Equivalent of ipv4 struct ip
21  */
22 struct frag_queue {
23 	struct inet_frag_queue	q;
24 
25 	int			iif;
26 	__u16			nhoffset;
27 	u8			ecn;
28 };
29 
30 #if IS_ENABLED(CONFIG_IPV6)
ip6frag_init(struct inet_frag_queue * q,const void * a)31 static inline void ip6frag_init(struct inet_frag_queue *q, const void *a)
32 {
33 	struct frag_queue *fq = container_of(q, struct frag_queue, q);
34 	const struct frag_v6_compare_key *key = a;
35 
36 	q->key.v6 = *key;
37 	fq->ecn = 0;
38 }
39 
ip6frag_key_hashfn(const void * data,u32 len,u32 seed)40 static inline u32 ip6frag_key_hashfn(const void *data, u32 len, u32 seed)
41 {
42 	return jhash2(data,
43 		      sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
44 }
45 
ip6frag_obj_hashfn(const void * data,u32 len,u32 seed)46 static inline u32 ip6frag_obj_hashfn(const void *data, u32 len, u32 seed)
47 {
48 	const struct inet_frag_queue *fq = data;
49 
50 	return jhash2((const u32 *)&fq->key.v6,
51 		      sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
52 }
53 
54 static inline int
ip6frag_obj_cmpfn(struct rhashtable_compare_arg * arg,const void * ptr)55 ip6frag_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
56 {
57 	const struct frag_v6_compare_key *key = arg->key;
58 	const struct inet_frag_queue *fq = ptr;
59 
60 	return !!memcmp(&fq->key, key, sizeof(*key));
61 }
62 
63 static inline void
ip6frag_expire_frag_queue(struct net * net,struct frag_queue * fq)64 ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
65 {
66 	struct net_device *dev = NULL;
67 	struct sk_buff *head;
68 
69 	rcu_read_lock();
70 	if (fq->q.fqdir->dead)
71 		goto out_rcu_unlock;
72 	spin_lock(&fq->q.lock);
73 
74 	if (fq->q.flags & INET_FRAG_COMPLETE)
75 		goto out;
76 
77 	inet_frag_kill(&fq->q);
78 
79 	dev = dev_get_by_index_rcu(net, fq->iif);
80 	if (!dev)
81 		goto out;
82 
83 	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
84 	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
85 
86 	/* Don't send error if the first segment did not arrive. */
87 	if (!(fq->q.flags & INET_FRAG_FIRST_IN))
88 		goto out;
89 
90 	/* sk_buff::dev and sk_buff::rbnode are unionized. So we
91 	 * pull the head out of the tree in order to be able to
92 	 * deal with head->dev.
93 	 */
94 	head = inet_frag_pull_head(&fq->q);
95 	if (!head)
96 		goto out;
97 
98 	head->dev = dev;
99 	spin_unlock(&fq->q.lock);
100 
101 	icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
102 	kfree_skb(head);
103 	goto out_rcu_unlock;
104 
105 out:
106 	spin_unlock(&fq->q.lock);
107 out_rcu_unlock:
108 	rcu_read_unlock();
109 	inet_frag_put(&fq->q);
110 }
111 
112 /* Check if the upper layer header is truncated in the first fragment. */
113 static inline bool
ipv6frag_thdr_truncated(struct sk_buff * skb,int start,u8 * nexthdrp)114 ipv6frag_thdr_truncated(struct sk_buff *skb, int start, u8 *nexthdrp)
115 {
116 	u8 nexthdr = *nexthdrp;
117 	__be16 frag_off;
118 	int offset;
119 
120 	offset = ipv6_skip_exthdr(skb, start, &nexthdr, &frag_off);
121 	if (offset < 0 || (frag_off & htons(IP6_OFFSET)))
122 		return false;
123 	switch (nexthdr) {
124 	case NEXTHDR_TCP:
125 		offset += sizeof(struct tcphdr);
126 		break;
127 	case NEXTHDR_UDP:
128 		offset += sizeof(struct udphdr);
129 		break;
130 	case NEXTHDR_ICMP:
131 		offset += sizeof(struct icmp6hdr);
132 		break;
133 	default:
134 		offset += 1;
135 	}
136 	if (offset > skb->len)
137 		return true;
138 	return false;
139 }
140 
141 #endif
142 #endif
143