1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	IPV6 GSO/GRO offload support
4  *	Linux INET6 implementation
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/socket.h>
9 #include <linux/netdevice.h>
10 #include <linux/skbuff.h>
11 #include <linux/printk.h>
12 
13 #include <net/protocol.h>
14 #include <net/ipv6.h>
15 #include <net/inet_common.h>
16 #include <net/tcp.h>
17 #include <net/udp.h>
18 #include <net/gro.h>
19 
20 #include "ip6_offload.h"
21 
22 /* All GRO functions are always builtin, except UDP over ipv6, which lays in
23  * ipv6 module, as it depends on UDPv6 lookup function, so we need special care
24  * when ipv6 is built as a module
25  */
26 #if IS_BUILTIN(CONFIG_IPV6)
27 #define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__)
28 #else
29 #define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__)
30 #endif
31 
32 #define indirect_call_gro_receive_l4(f2, f1, cb, head, skb)	\
33 ({								\
34 	unlikely(gro_recursion_inc_test(skb)) ?			\
35 		NAPI_GRO_CB(skb)->flush |= 1, NULL :		\
36 		INDIRECT_CALL_L4(cb, f2, f1, head, skb);	\
37 })
38 
ipv6_gso_pull_exthdrs(struct sk_buff * skb,int proto)39 static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
40 {
41 	const struct net_offload *ops = NULL;
42 
43 	for (;;) {
44 		struct ipv6_opt_hdr *opth;
45 		int len;
46 
47 		if (proto != NEXTHDR_HOP) {
48 			ops = rcu_dereference(inet6_offloads[proto]);
49 
50 			if (unlikely(!ops))
51 				break;
52 
53 			if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
54 				break;
55 		}
56 
57 		if (unlikely(!pskb_may_pull(skb, 8)))
58 			break;
59 
60 		opth = (void *)skb->data;
61 		len = ipv6_optlen(opth);
62 
63 		if (unlikely(!pskb_may_pull(skb, len)))
64 			break;
65 
66 		opth = (void *)skb->data;
67 		proto = opth->nexthdr;
68 		__skb_pull(skb, len);
69 	}
70 
71 	return proto;
72 }
73 
ipv6_gso_segment(struct sk_buff * skb,netdev_features_t features)74 static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
75 	netdev_features_t features)
76 {
77 	struct sk_buff *segs = ERR_PTR(-EINVAL);
78 	struct ipv6hdr *ipv6h;
79 	const struct net_offload *ops;
80 	int proto;
81 	struct frag_hdr *fptr;
82 	unsigned int payload_len;
83 	u8 *prevhdr;
84 	int offset = 0;
85 	bool encap, udpfrag;
86 	int nhoff;
87 	bool gso_partial;
88 
89 	skb_reset_network_header(skb);
90 	nhoff = skb_network_header(skb) - skb_mac_header(skb);
91 	if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
92 		goto out;
93 
94 	encap = SKB_GSO_CB(skb)->encap_level > 0;
95 	if (encap)
96 		features &= skb->dev->hw_enc_features;
97 	SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
98 
99 	ipv6h = ipv6_hdr(skb);
100 	__skb_pull(skb, sizeof(*ipv6h));
101 	segs = ERR_PTR(-EPROTONOSUPPORT);
102 
103 	proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
104 
105 	if (skb->encapsulation &&
106 	    skb_shinfo(skb)->gso_type & (SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6))
107 		udpfrag = proto == IPPROTO_UDP && encap &&
108 			  (skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
109 	else
110 		udpfrag = proto == IPPROTO_UDP && !skb->encapsulation &&
111 			  (skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
112 
113 	ops = rcu_dereference(inet6_offloads[proto]);
114 	if (likely(ops && ops->callbacks.gso_segment)) {
115 		skb_reset_transport_header(skb);
116 		segs = ops->callbacks.gso_segment(skb, features);
117 	}
118 
119 	if (IS_ERR_OR_NULL(segs))
120 		goto out;
121 
122 	gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
123 
124 	for (skb = segs; skb; skb = skb->next) {
125 		ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
126 		if (gso_partial && skb_is_gso(skb))
127 			payload_len = skb_shinfo(skb)->gso_size +
128 				      SKB_GSO_CB(skb)->data_offset +
129 				      skb->head - (unsigned char *)(ipv6h + 1);
130 		else
131 			payload_len = skb->len - nhoff - sizeof(*ipv6h);
132 		ipv6h->payload_len = htons(payload_len);
133 		skb->network_header = (u8 *)ipv6h - skb->head;
134 		skb_reset_mac_len(skb);
135 
136 		if (udpfrag) {
137 			int err = ip6_find_1stfragopt(skb, &prevhdr);
138 			if (err < 0) {
139 				kfree_skb_list(segs);
140 				return ERR_PTR(err);
141 			}
142 			fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
143 			fptr->frag_off = htons(offset);
144 			if (skb->next)
145 				fptr->frag_off |= htons(IP6_MF);
146 			offset += (ntohs(ipv6h->payload_len) -
147 				   sizeof(struct frag_hdr));
148 		}
149 		if (encap)
150 			skb_reset_inner_headers(skb);
151 	}
152 
153 out:
154 	return segs;
155 }
156 
157 /* Return the total length of all the extension hdrs, following the same
158  * logic in ipv6_gso_pull_exthdrs() when parsing ext-hdrs.
159  */
ipv6_exthdrs_len(struct ipv6hdr * iph,const struct net_offload ** opps)160 static int ipv6_exthdrs_len(struct ipv6hdr *iph,
161 			    const struct net_offload **opps)
162 {
163 	struct ipv6_opt_hdr *opth = (void *)iph;
164 	int len = 0, proto, optlen = sizeof(*iph);
165 
166 	proto = iph->nexthdr;
167 	for (;;) {
168 		if (proto != NEXTHDR_HOP) {
169 			*opps = rcu_dereference(inet6_offloads[proto]);
170 			if (unlikely(!(*opps)))
171 				break;
172 			if (!((*opps)->flags & INET6_PROTO_GSO_EXTHDR))
173 				break;
174 		}
175 		opth = (void *)opth + optlen;
176 		optlen = ipv6_optlen(opth);
177 		len += optlen;
178 		proto = opth->nexthdr;
179 	}
180 	return len;
181 }
182 
ipv6_gro_receive(struct list_head * head,struct sk_buff * skb)183 INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
184 							 struct sk_buff *skb)
185 {
186 	const struct net_offload *ops;
187 	struct sk_buff *pp = NULL;
188 	struct sk_buff *p;
189 	struct ipv6hdr *iph;
190 	unsigned int nlen;
191 	unsigned int hlen;
192 	unsigned int off;
193 	u16 flush = 1;
194 	int proto;
195 
196 	off = skb_gro_offset(skb);
197 	hlen = off + sizeof(*iph);
198 	iph = skb_gro_header_fast(skb, off);
199 	if (skb_gro_header_hard(skb, hlen)) {
200 		iph = skb_gro_header_slow(skb, hlen, off);
201 		if (unlikely(!iph))
202 			goto out;
203 	}
204 
205 	skb_set_network_header(skb, off);
206 	skb_gro_pull(skb, sizeof(*iph));
207 	skb_set_transport_header(skb, skb_gro_offset(skb));
208 
209 	flush += ntohs(iph->payload_len) != skb_gro_len(skb);
210 
211 	rcu_read_lock();
212 	proto = iph->nexthdr;
213 	ops = rcu_dereference(inet6_offloads[proto]);
214 	if (!ops || !ops->callbacks.gro_receive) {
215 		__pskb_pull(skb, skb_gro_offset(skb));
216 		skb_gro_frag0_invalidate(skb);
217 		proto = ipv6_gso_pull_exthdrs(skb, proto);
218 		skb_gro_pull(skb, -skb_transport_offset(skb));
219 		skb_reset_transport_header(skb);
220 		__skb_push(skb, skb_gro_offset(skb));
221 
222 		ops = rcu_dereference(inet6_offloads[proto]);
223 		if (!ops || !ops->callbacks.gro_receive)
224 			goto out_unlock;
225 
226 		iph = ipv6_hdr(skb);
227 	}
228 
229 	NAPI_GRO_CB(skb)->proto = proto;
230 
231 	flush--;
232 	nlen = skb_network_header_len(skb);
233 
234 	list_for_each_entry(p, head, list) {
235 		const struct ipv6hdr *iph2;
236 		__be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
237 
238 		if (!NAPI_GRO_CB(p)->same_flow)
239 			continue;
240 
241 		iph2 = (struct ipv6hdr *)(p->data + off);
242 		first_word = *(__be32 *)iph ^ *(__be32 *)iph2;
243 
244 		/* All fields must match except length and Traffic Class.
245 		 * XXX skbs on the gro_list have all been parsed and pulled
246 		 * already so we don't need to compare nlen
247 		 * (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops)))
248 		 * memcmp() alone below is sufficient, right?
249 		 */
250 		 if ((first_word & htonl(0xF00FFFFF)) ||
251 		    !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
252 		    !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
253 		    *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) {
254 not_same_flow:
255 			NAPI_GRO_CB(p)->same_flow = 0;
256 			continue;
257 		}
258 		if (unlikely(nlen > sizeof(struct ipv6hdr))) {
259 			if (memcmp(iph + 1, iph2 + 1,
260 				   nlen - sizeof(struct ipv6hdr)))
261 				goto not_same_flow;
262 		}
263 		/* flush if Traffic Class fields are different */
264 		NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
265 		NAPI_GRO_CB(p)->flush |= flush;
266 
267 		/* If the previous IP ID value was based on an atomic
268 		 * datagram we can overwrite the value and ignore it.
269 		 */
270 		if (NAPI_GRO_CB(skb)->is_atomic)
271 			NAPI_GRO_CB(p)->flush_id = 0;
272 	}
273 
274 	NAPI_GRO_CB(skb)->is_atomic = true;
275 	NAPI_GRO_CB(skb)->flush |= flush;
276 
277 	skb_gro_postpull_rcsum(skb, iph, nlen);
278 
279 	pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive,
280 					 ops->callbacks.gro_receive, head, skb);
281 
282 out_unlock:
283 	rcu_read_unlock();
284 
285 out:
286 	skb_gro_flush_final(skb, pp, flush);
287 
288 	return pp;
289 }
290 
sit_ip6ip6_gro_receive(struct list_head * head,struct sk_buff * skb)291 static struct sk_buff *sit_ip6ip6_gro_receive(struct list_head *head,
292 					      struct sk_buff *skb)
293 {
294 	/* Common GRO receive for SIT and IP6IP6 */
295 
296 	if (NAPI_GRO_CB(skb)->encap_mark) {
297 		NAPI_GRO_CB(skb)->flush = 1;
298 		return NULL;
299 	}
300 
301 	NAPI_GRO_CB(skb)->encap_mark = 1;
302 
303 	return ipv6_gro_receive(head, skb);
304 }
305 
ip4ip6_gro_receive(struct list_head * head,struct sk_buff * skb)306 static struct sk_buff *ip4ip6_gro_receive(struct list_head *head,
307 					  struct sk_buff *skb)
308 {
309 	/* Common GRO receive for SIT and IP6IP6 */
310 
311 	if (NAPI_GRO_CB(skb)->encap_mark) {
312 		NAPI_GRO_CB(skb)->flush = 1;
313 		return NULL;
314 	}
315 
316 	NAPI_GRO_CB(skb)->encap_mark = 1;
317 
318 	return inet_gro_receive(head, skb);
319 }
320 
ipv6_gro_complete(struct sk_buff * skb,int nhoff)321 INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
322 {
323 	const struct net_offload *ops;
324 	struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
325 	int err = -ENOSYS;
326 
327 	if (skb->encapsulation) {
328 		skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6));
329 		skb_set_inner_network_header(skb, nhoff);
330 	}
331 
332 	iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
333 
334 	rcu_read_lock();
335 
336 	nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
337 	if (WARN_ON(!ops || !ops->callbacks.gro_complete))
338 		goto out_unlock;
339 
340 	err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete,
341 			       udp6_gro_complete, skb, nhoff);
342 
343 out_unlock:
344 	rcu_read_unlock();
345 
346 	return err;
347 }
348 
sit_gro_complete(struct sk_buff * skb,int nhoff)349 static int sit_gro_complete(struct sk_buff *skb, int nhoff)
350 {
351 	skb->encapsulation = 1;
352 	skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
353 	return ipv6_gro_complete(skb, nhoff);
354 }
355 
ip6ip6_gro_complete(struct sk_buff * skb,int nhoff)356 static int ip6ip6_gro_complete(struct sk_buff *skb, int nhoff)
357 {
358 	skb->encapsulation = 1;
359 	skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
360 	return ipv6_gro_complete(skb, nhoff);
361 }
362 
ip4ip6_gro_complete(struct sk_buff * skb,int nhoff)363 static int ip4ip6_gro_complete(struct sk_buff *skb, int nhoff)
364 {
365 	skb->encapsulation = 1;
366 	skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
367 	return inet_gro_complete(skb, nhoff);
368 }
369 
370 static struct packet_offload ipv6_packet_offload __read_mostly = {
371 	.type = cpu_to_be16(ETH_P_IPV6),
372 	.callbacks = {
373 		.gso_segment = ipv6_gso_segment,
374 		.gro_receive = ipv6_gro_receive,
375 		.gro_complete = ipv6_gro_complete,
376 	},
377 };
378 
sit_gso_segment(struct sk_buff * skb,netdev_features_t features)379 static struct sk_buff *sit_gso_segment(struct sk_buff *skb,
380 				       netdev_features_t features)
381 {
382 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4))
383 		return ERR_PTR(-EINVAL);
384 
385 	return ipv6_gso_segment(skb, features);
386 }
387 
ip4ip6_gso_segment(struct sk_buff * skb,netdev_features_t features)388 static struct sk_buff *ip4ip6_gso_segment(struct sk_buff *skb,
389 					  netdev_features_t features)
390 {
391 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6))
392 		return ERR_PTR(-EINVAL);
393 
394 	return inet_gso_segment(skb, features);
395 }
396 
ip6ip6_gso_segment(struct sk_buff * skb,netdev_features_t features)397 static struct sk_buff *ip6ip6_gso_segment(struct sk_buff *skb,
398 					  netdev_features_t features)
399 {
400 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6))
401 		return ERR_PTR(-EINVAL);
402 
403 	return ipv6_gso_segment(skb, features);
404 }
405 
406 static const struct net_offload sit_offload = {
407 	.callbacks = {
408 		.gso_segment	= sit_gso_segment,
409 		.gro_receive    = sit_ip6ip6_gro_receive,
410 		.gro_complete   = sit_gro_complete,
411 	},
412 };
413 
414 static const struct net_offload ip4ip6_offload = {
415 	.callbacks = {
416 		.gso_segment	= ip4ip6_gso_segment,
417 		.gro_receive    = ip4ip6_gro_receive,
418 		.gro_complete   = ip4ip6_gro_complete,
419 	},
420 };
421 
422 static const struct net_offload ip6ip6_offload = {
423 	.callbacks = {
424 		.gso_segment	= ip6ip6_gso_segment,
425 		.gro_receive    = sit_ip6ip6_gro_receive,
426 		.gro_complete   = ip6ip6_gro_complete,
427 	},
428 };
ipv6_offload_init(void)429 static int __init ipv6_offload_init(void)
430 {
431 
432 	if (tcpv6_offload_init() < 0)
433 		pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
434 	if (ipv6_exthdrs_offload_init() < 0)
435 		pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
436 
437 	dev_add_offload(&ipv6_packet_offload);
438 
439 	inet_add_offload(&sit_offload, IPPROTO_IPV6);
440 	inet6_add_offload(&ip6ip6_offload, IPPROTO_IPV6);
441 	inet6_add_offload(&ip4ip6_offload, IPPROTO_IPIP);
442 
443 	return 0;
444 }
445 
446 fs_initcall(ipv6_offload_init);
447