1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4  * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
5  *
6  * Development of this code funded by Astaro AG (http://www.astaro.com/)
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/if_vlan.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 #include <net/netfilter/nf_tables.h>
18 #include <net/netfilter/nf_tables_offload.h>
19 /* For layer 4 checksum field offset. */
20 #include <linux/tcp.h>
21 #include <linux/udp.h>
22 #include <linux/icmpv6.h>
23 #include <linux/ip.h>
24 #include <linux/ipv6.h>
25 #include <net/sctp/checksum.h>
26 
nft_payload_rebuild_vlan_hdr(const struct sk_buff * skb,int mac_off,struct vlan_ethhdr * veth)27 static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
28 					 struct vlan_ethhdr *veth)
29 {
30 	if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN))
31 		return false;
32 
33 	veth->h_vlan_proto = skb->vlan_proto;
34 	veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb));
35 	veth->h_vlan_encapsulated_proto = skb->protocol;
36 
37 	return true;
38 }
39 
40 /* add vlan header into the user buffer for if tag was removed by offloads */
41 static bool
nft_payload_copy_vlan(u32 * d,const struct sk_buff * skb,u8 offset,u8 len)42 nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
43 {
44 	int mac_off = skb_mac_header(skb) - skb->data;
45 	u8 *vlanh, *dst_u8 = (u8 *) d;
46 	struct vlan_ethhdr veth;
47 	u8 vlan_hlen = 0;
48 
49 	if ((skb->protocol == htons(ETH_P_8021AD) ||
50 	     skb->protocol == htons(ETH_P_8021Q)) &&
51 	    offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
52 		vlan_hlen += VLAN_HLEN;
53 
54 	vlanh = (u8 *) &veth;
55 	if (offset < VLAN_ETH_HLEN + vlan_hlen) {
56 		u8 ethlen = len;
57 
58 		if (vlan_hlen &&
59 		    skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
60 			return false;
61 		else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
62 			return false;
63 
64 		if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
65 			ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen;
66 
67 		memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
68 
69 		len -= ethlen;
70 		if (len == 0)
71 			return true;
72 
73 		dst_u8 += ethlen;
74 		offset = ETH_HLEN + vlan_hlen;
75 	} else {
76 		offset -= VLAN_HLEN + vlan_hlen;
77 	}
78 
79 	return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
80 }
81 
nft_payload_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)82 void nft_payload_eval(const struct nft_expr *expr,
83 		      struct nft_regs *regs,
84 		      const struct nft_pktinfo *pkt)
85 {
86 	const struct nft_payload *priv = nft_expr_priv(expr);
87 	const struct sk_buff *skb = pkt->skb;
88 	u32 *dest = &regs->data[priv->dreg];
89 	int offset;
90 
91 	if (priv->len % NFT_REG32_SIZE)
92 		dest[priv->len / NFT_REG32_SIZE] = 0;
93 
94 	switch (priv->base) {
95 	case NFT_PAYLOAD_LL_HEADER:
96 		if (!skb_mac_header_was_set(skb))
97 			goto err;
98 
99 		if (skb_vlan_tag_present(skb)) {
100 			if (!nft_payload_copy_vlan(dest, skb,
101 						   priv->offset, priv->len))
102 				goto err;
103 			return;
104 		}
105 		offset = skb_mac_header(skb) - skb->data;
106 		break;
107 	case NFT_PAYLOAD_NETWORK_HEADER:
108 		offset = skb_network_offset(skb);
109 		break;
110 	case NFT_PAYLOAD_TRANSPORT_HEADER:
111 		if (!pkt->tprot_set)
112 			goto err;
113 		offset = pkt->xt.thoff;
114 		break;
115 	default:
116 		BUG();
117 	}
118 	offset += priv->offset;
119 
120 	if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
121 		goto err;
122 	return;
123 err:
124 	regs->verdict.code = NFT_BREAK;
125 }
126 
127 static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
128 	[NFTA_PAYLOAD_SREG]		= { .type = NLA_U32 },
129 	[NFTA_PAYLOAD_DREG]		= { .type = NLA_U32 },
130 	[NFTA_PAYLOAD_BASE]		= { .type = NLA_U32 },
131 	[NFTA_PAYLOAD_OFFSET]		= { .type = NLA_U32 },
132 	[NFTA_PAYLOAD_LEN]		= { .type = NLA_U32 },
133 	[NFTA_PAYLOAD_CSUM_TYPE]	= { .type = NLA_U32 },
134 	[NFTA_PAYLOAD_CSUM_OFFSET]	= { .type = NLA_U32 },
135 	[NFTA_PAYLOAD_CSUM_FLAGS]	= { .type = NLA_U32 },
136 };
137 
nft_payload_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])138 static int nft_payload_init(const struct nft_ctx *ctx,
139 			    const struct nft_expr *expr,
140 			    const struct nlattr * const tb[])
141 {
142 	struct nft_payload *priv = nft_expr_priv(expr);
143 
144 	priv->base   = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
145 	priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
146 	priv->len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
147 
148 	return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
149 					&priv->dreg, NULL, NFT_DATA_VALUE,
150 					priv->len);
151 }
152 
nft_payload_dump(struct sk_buff * skb,const struct nft_expr * expr)153 static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
154 {
155 	const struct nft_payload *priv = nft_expr_priv(expr);
156 
157 	if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
158 	    nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
159 	    nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
160 	    nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
161 		goto nla_put_failure;
162 	return 0;
163 
164 nla_put_failure:
165 	return -1;
166 }
167 
nft_payload_offload_mask(struct nft_offload_reg * reg,u32 priv_len,u32 field_len)168 static bool nft_payload_offload_mask(struct nft_offload_reg *reg,
169 				     u32 priv_len, u32 field_len)
170 {
171 	unsigned int remainder, delta, k;
172 	struct nft_data mask = {};
173 	__be32 remainder_mask;
174 
175 	if (priv_len == field_len) {
176 		memset(&reg->mask, 0xff, priv_len);
177 		return true;
178 	} else if (priv_len > field_len) {
179 		return false;
180 	}
181 
182 	memset(&mask, 0xff, field_len);
183 	remainder = priv_len % sizeof(u32);
184 	if (remainder) {
185 		k = priv_len / sizeof(u32);
186 		delta = field_len - priv_len;
187 		remainder_mask = htonl(~((1 << (delta * BITS_PER_BYTE)) - 1));
188 		mask.data[k] = (__force u32)remainder_mask;
189 	}
190 
191 	memcpy(&reg->mask, &mask, field_len);
192 
193 	return true;
194 }
195 
nft_payload_offload_ll(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)196 static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
197 				  struct nft_flow_rule *flow,
198 				  const struct nft_payload *priv)
199 {
200 	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
201 
202 	switch (priv->offset) {
203 	case offsetof(struct ethhdr, h_source):
204 		if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
205 			return -EOPNOTSUPP;
206 
207 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
208 				  src, ETH_ALEN, reg);
209 		break;
210 	case offsetof(struct ethhdr, h_dest):
211 		if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
212 			return -EOPNOTSUPP;
213 
214 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
215 				  dst, ETH_ALEN, reg);
216 		break;
217 	case offsetof(struct ethhdr, h_proto):
218 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
219 			return -EOPNOTSUPP;
220 
221 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
222 				  n_proto, sizeof(__be16), reg);
223 		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
224 		break;
225 	case offsetof(struct vlan_ethhdr, h_vlan_TCI):
226 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
227 			return -EOPNOTSUPP;
228 
229 		NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
230 					vlan_tci, sizeof(__be16), reg,
231 					NFT_OFFLOAD_F_NETWORK2HOST);
232 		break;
233 	case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
234 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
235 			return -EOPNOTSUPP;
236 
237 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
238 				  vlan_tpid, sizeof(__be16), reg);
239 		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
240 		break;
241 	case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
242 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
243 			return -EOPNOTSUPP;
244 
245 		NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
246 					vlan_tci, sizeof(__be16), reg,
247 					NFT_OFFLOAD_F_NETWORK2HOST);
248 		break;
249 	case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
250 							sizeof(struct vlan_hdr):
251 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
252 			return -EOPNOTSUPP;
253 
254 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
255 				  vlan_tpid, sizeof(__be16), reg);
256 		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
257 		break;
258 	default:
259 		return -EOPNOTSUPP;
260 	}
261 
262 	return 0;
263 }
264 
nft_payload_offload_ip(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)265 static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
266 				  struct nft_flow_rule *flow,
267 				  const struct nft_payload *priv)
268 {
269 	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
270 
271 	switch (priv->offset) {
272 	case offsetof(struct iphdr, saddr):
273 		if (!nft_payload_offload_mask(reg, priv->len,
274 					      sizeof(struct in_addr)))
275 			return -EOPNOTSUPP;
276 
277 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
278 				  sizeof(struct in_addr), reg);
279 		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
280 		break;
281 	case offsetof(struct iphdr, daddr):
282 		if (!nft_payload_offload_mask(reg, priv->len,
283 					      sizeof(struct in_addr)))
284 			return -EOPNOTSUPP;
285 
286 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
287 				  sizeof(struct in_addr), reg);
288 		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
289 		break;
290 	case offsetof(struct iphdr, protocol):
291 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
292 			return -EOPNOTSUPP;
293 
294 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
295 				  sizeof(__u8), reg);
296 		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
297 		break;
298 	default:
299 		return -EOPNOTSUPP;
300 	}
301 
302 	return 0;
303 }
304 
nft_payload_offload_ip6(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)305 static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
306 				  struct nft_flow_rule *flow,
307 				  const struct nft_payload *priv)
308 {
309 	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
310 
311 	switch (priv->offset) {
312 	case offsetof(struct ipv6hdr, saddr):
313 		if (!nft_payload_offload_mask(reg, priv->len,
314 					      sizeof(struct in6_addr)))
315 			return -EOPNOTSUPP;
316 
317 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
318 				  sizeof(struct in6_addr), reg);
319 		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
320 		break;
321 	case offsetof(struct ipv6hdr, daddr):
322 		if (!nft_payload_offload_mask(reg, priv->len,
323 					      sizeof(struct in6_addr)))
324 			return -EOPNOTSUPP;
325 
326 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
327 				  sizeof(struct in6_addr), reg);
328 		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
329 		break;
330 	case offsetof(struct ipv6hdr, nexthdr):
331 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
332 			return -EOPNOTSUPP;
333 
334 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
335 				  sizeof(__u8), reg);
336 		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
337 		break;
338 	default:
339 		return -EOPNOTSUPP;
340 	}
341 
342 	return 0;
343 }
344 
nft_payload_offload_nh(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)345 static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
346 				  struct nft_flow_rule *flow,
347 				  const struct nft_payload *priv)
348 {
349 	int err;
350 
351 	switch (ctx->dep.l3num) {
352 	case htons(ETH_P_IP):
353 		err = nft_payload_offload_ip(ctx, flow, priv);
354 		break;
355 	case htons(ETH_P_IPV6):
356 		err = nft_payload_offload_ip6(ctx, flow, priv);
357 		break;
358 	default:
359 		return -EOPNOTSUPP;
360 	}
361 
362 	return err;
363 }
364 
nft_payload_offload_tcp(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)365 static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
366 				   struct nft_flow_rule *flow,
367 				   const struct nft_payload *priv)
368 {
369 	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
370 
371 	switch (priv->offset) {
372 	case offsetof(struct tcphdr, source):
373 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
374 			return -EOPNOTSUPP;
375 
376 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
377 				  sizeof(__be16), reg);
378 		break;
379 	case offsetof(struct tcphdr, dest):
380 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
381 			return -EOPNOTSUPP;
382 
383 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
384 				  sizeof(__be16), reg);
385 		break;
386 	default:
387 		return -EOPNOTSUPP;
388 	}
389 
390 	return 0;
391 }
392 
nft_payload_offload_udp(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)393 static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
394 				   struct nft_flow_rule *flow,
395 				   const struct nft_payload *priv)
396 {
397 	struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
398 
399 	switch (priv->offset) {
400 	case offsetof(struct udphdr, source):
401 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
402 			return -EOPNOTSUPP;
403 
404 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
405 				  sizeof(__be16), reg);
406 		break;
407 	case offsetof(struct udphdr, dest):
408 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
409 			return -EOPNOTSUPP;
410 
411 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
412 				  sizeof(__be16), reg);
413 		break;
414 	default:
415 		return -EOPNOTSUPP;
416 	}
417 
418 	return 0;
419 }
420 
nft_payload_offload_th(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_payload * priv)421 static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
422 				  struct nft_flow_rule *flow,
423 				  const struct nft_payload *priv)
424 {
425 	int err;
426 
427 	switch (ctx->dep.protonum) {
428 	case IPPROTO_TCP:
429 		err = nft_payload_offload_tcp(ctx, flow, priv);
430 		break;
431 	case IPPROTO_UDP:
432 		err = nft_payload_offload_udp(ctx, flow, priv);
433 		break;
434 	default:
435 		return -EOPNOTSUPP;
436 	}
437 
438 	return err;
439 }
440 
nft_payload_offload(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_expr * expr)441 static int nft_payload_offload(struct nft_offload_ctx *ctx,
442 			       struct nft_flow_rule *flow,
443 			       const struct nft_expr *expr)
444 {
445 	const struct nft_payload *priv = nft_expr_priv(expr);
446 	int err;
447 
448 	switch (priv->base) {
449 	case NFT_PAYLOAD_LL_HEADER:
450 		err = nft_payload_offload_ll(ctx, flow, priv);
451 		break;
452 	case NFT_PAYLOAD_NETWORK_HEADER:
453 		err = nft_payload_offload_nh(ctx, flow, priv);
454 		break;
455 	case NFT_PAYLOAD_TRANSPORT_HEADER:
456 		err = nft_payload_offload_th(ctx, flow, priv);
457 		break;
458 	default:
459 		err = -EOPNOTSUPP;
460 		break;
461 	}
462 	return err;
463 }
464 
465 static const struct nft_expr_ops nft_payload_ops = {
466 	.type		= &nft_payload_type,
467 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload)),
468 	.eval		= nft_payload_eval,
469 	.init		= nft_payload_init,
470 	.dump		= nft_payload_dump,
471 	.offload	= nft_payload_offload,
472 };
473 
474 const struct nft_expr_ops nft_payload_fast_ops = {
475 	.type		= &nft_payload_type,
476 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload)),
477 	.eval		= nft_payload_eval,
478 	.init		= nft_payload_init,
479 	.dump		= nft_payload_dump,
480 	.offload	= nft_payload_offload,
481 };
482 
nft_csum_replace(__sum16 * sum,__wsum fsum,__wsum tsum)483 static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
484 {
485 	*sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
486 	if (*sum == 0)
487 		*sum = CSUM_MANGLED_0;
488 }
489 
nft_payload_udp_checksum(struct sk_buff * skb,unsigned int thoff)490 static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
491 {
492 	struct udphdr *uh, _uh;
493 
494 	uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
495 	if (!uh)
496 		return false;
497 
498 	return (__force bool)uh->check;
499 }
500 
nft_payload_l4csum_offset(const struct nft_pktinfo * pkt,struct sk_buff * skb,unsigned int * l4csum_offset)501 static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
502 				     struct sk_buff *skb,
503 				     unsigned int *l4csum_offset)
504 {
505 	switch (pkt->tprot) {
506 	case IPPROTO_TCP:
507 		*l4csum_offset = offsetof(struct tcphdr, check);
508 		break;
509 	case IPPROTO_UDP:
510 		if (!nft_payload_udp_checksum(skb, pkt->xt.thoff))
511 			return -1;
512 		fallthrough;
513 	case IPPROTO_UDPLITE:
514 		*l4csum_offset = offsetof(struct udphdr, check);
515 		break;
516 	case IPPROTO_ICMPV6:
517 		*l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
518 		break;
519 	default:
520 		return -1;
521 	}
522 
523 	*l4csum_offset += pkt->xt.thoff;
524 	return 0;
525 }
526 
nft_payload_csum_sctp(struct sk_buff * skb,int offset)527 static int nft_payload_csum_sctp(struct sk_buff *skb, int offset)
528 {
529 	struct sctphdr *sh;
530 
531 	if (skb_ensure_writable(skb, offset + sizeof(*sh)))
532 		return -1;
533 
534 	sh = (struct sctphdr *)(skb->data + offset);
535 	sh->checksum = sctp_compute_cksum(skb, offset);
536 	skb->ip_summed = CHECKSUM_UNNECESSARY;
537 	return 0;
538 }
539 
nft_payload_l4csum_update(const struct nft_pktinfo * pkt,struct sk_buff * skb,__wsum fsum,__wsum tsum)540 static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
541 				     struct sk_buff *skb,
542 				     __wsum fsum, __wsum tsum)
543 {
544 	int l4csum_offset;
545 	__sum16 sum;
546 
547 	/* If we cannot determine layer 4 checksum offset or this packet doesn't
548 	 * require layer 4 checksum recalculation, skip this packet.
549 	 */
550 	if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
551 		return 0;
552 
553 	if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
554 		return -1;
555 
556 	/* Checksum mangling for an arbitrary amount of bytes, based on
557 	 * inet_proto_csum_replace*() functions.
558 	 */
559 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
560 		nft_csum_replace(&sum, fsum, tsum);
561 		if (skb->ip_summed == CHECKSUM_COMPLETE) {
562 			skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
563 					      tsum);
564 		}
565 	} else {
566 		sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
567 					  tsum));
568 	}
569 
570 	if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) ||
571 	    skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
572 		return -1;
573 
574 	return 0;
575 }
576 
nft_payload_csum_inet(struct sk_buff * skb,const u32 * src,__wsum fsum,__wsum tsum,int csum_offset)577 static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
578 				 __wsum fsum, __wsum tsum, int csum_offset)
579 {
580 	__sum16 sum;
581 
582 	if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
583 		return -1;
584 
585 	nft_csum_replace(&sum, fsum, tsum);
586 	if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) ||
587 	    skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
588 		return -1;
589 
590 	return 0;
591 }
592 
nft_payload_set_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)593 static void nft_payload_set_eval(const struct nft_expr *expr,
594 				 struct nft_regs *regs,
595 				 const struct nft_pktinfo *pkt)
596 {
597 	const struct nft_payload_set *priv = nft_expr_priv(expr);
598 	struct sk_buff *skb = pkt->skb;
599 	const u32 *src = &regs->data[priv->sreg];
600 	int offset, csum_offset;
601 	__wsum fsum, tsum;
602 
603 	switch (priv->base) {
604 	case NFT_PAYLOAD_LL_HEADER:
605 		if (!skb_mac_header_was_set(skb))
606 			goto err;
607 		offset = skb_mac_header(skb) - skb->data;
608 		break;
609 	case NFT_PAYLOAD_NETWORK_HEADER:
610 		offset = skb_network_offset(skb);
611 		break;
612 	case NFT_PAYLOAD_TRANSPORT_HEADER:
613 		if (!pkt->tprot_set)
614 			goto err;
615 		offset = pkt->xt.thoff;
616 		break;
617 	default:
618 		BUG();
619 	}
620 
621 	csum_offset = offset + priv->csum_offset;
622 	offset += priv->offset;
623 
624 	if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
625 	    (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
626 	     skb->ip_summed != CHECKSUM_PARTIAL)) {
627 		fsum = skb_checksum(skb, offset, priv->len, 0);
628 		tsum = csum_partial(src, priv->len, 0);
629 
630 		if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
631 		    nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
632 			goto err;
633 
634 		if (priv->csum_flags &&
635 		    nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
636 			goto err;
637 	}
638 
639 	if (skb_ensure_writable(skb, max(offset + priv->len, 0)) ||
640 	    skb_store_bits(skb, offset, src, priv->len) < 0)
641 		goto err;
642 
643 	if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP &&
644 	    pkt->tprot == IPPROTO_SCTP &&
645 	    skb->ip_summed != CHECKSUM_PARTIAL) {
646 		if (nft_payload_csum_sctp(skb, pkt->xt.thoff))
647 			goto err;
648 	}
649 
650 	return;
651 err:
652 	regs->verdict.code = NFT_BREAK;
653 }
654 
nft_payload_set_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])655 static int nft_payload_set_init(const struct nft_ctx *ctx,
656 				const struct nft_expr *expr,
657 				const struct nlattr * const tb[])
658 {
659 	struct nft_payload_set *priv = nft_expr_priv(expr);
660 
661 	priv->base        = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
662 	priv->offset      = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
663 	priv->len         = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
664 
665 	if (tb[NFTA_PAYLOAD_CSUM_TYPE])
666 		priv->csum_type =
667 			ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
668 	if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
669 		priv->csum_offset =
670 			ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
671 	if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
672 		u32 flags;
673 
674 		flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
675 		if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
676 			return -EINVAL;
677 
678 		priv->csum_flags = flags;
679 	}
680 
681 	switch (priv->csum_type) {
682 	case NFT_PAYLOAD_CSUM_NONE:
683 	case NFT_PAYLOAD_CSUM_INET:
684 		break;
685 	case NFT_PAYLOAD_CSUM_SCTP:
686 		if (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER)
687 			return -EINVAL;
688 
689 		if (priv->csum_offset != offsetof(struct sctphdr, checksum))
690 			return -EINVAL;
691 		break;
692 	default:
693 		return -EOPNOTSUPP;
694 	}
695 
696 	return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg,
697 				       priv->len);
698 }
699 
nft_payload_set_dump(struct sk_buff * skb,const struct nft_expr * expr)700 static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
701 {
702 	const struct nft_payload_set *priv = nft_expr_priv(expr);
703 
704 	if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
705 	    nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
706 	    nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
707 	    nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
708 	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
709 	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
710 			 htonl(priv->csum_offset)) ||
711 	    nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
712 		goto nla_put_failure;
713 	return 0;
714 
715 nla_put_failure:
716 	return -1;
717 }
718 
719 static const struct nft_expr_ops nft_payload_set_ops = {
720 	.type		= &nft_payload_type,
721 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
722 	.eval		= nft_payload_set_eval,
723 	.init		= nft_payload_set_init,
724 	.dump		= nft_payload_set_dump,
725 };
726 
727 static const struct nft_expr_ops *
nft_payload_select_ops(const struct nft_ctx * ctx,const struct nlattr * const tb[])728 nft_payload_select_ops(const struct nft_ctx *ctx,
729 		       const struct nlattr * const tb[])
730 {
731 	enum nft_payload_bases base;
732 	unsigned int offset, len;
733 
734 	if (tb[NFTA_PAYLOAD_BASE] == NULL ||
735 	    tb[NFTA_PAYLOAD_OFFSET] == NULL ||
736 	    tb[NFTA_PAYLOAD_LEN] == NULL)
737 		return ERR_PTR(-EINVAL);
738 
739 	base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
740 	switch (base) {
741 	case NFT_PAYLOAD_LL_HEADER:
742 	case NFT_PAYLOAD_NETWORK_HEADER:
743 	case NFT_PAYLOAD_TRANSPORT_HEADER:
744 		break;
745 	default:
746 		return ERR_PTR(-EOPNOTSUPP);
747 	}
748 
749 	if (tb[NFTA_PAYLOAD_SREG] != NULL) {
750 		if (tb[NFTA_PAYLOAD_DREG] != NULL)
751 			return ERR_PTR(-EINVAL);
752 		return &nft_payload_set_ops;
753 	}
754 
755 	if (tb[NFTA_PAYLOAD_DREG] == NULL)
756 		return ERR_PTR(-EINVAL);
757 
758 	offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
759 	len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
760 
761 	if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
762 	    base != NFT_PAYLOAD_LL_HEADER)
763 		return &nft_payload_fast_ops;
764 	else
765 		return &nft_payload_ops;
766 }
767 
768 struct nft_expr_type nft_payload_type __read_mostly = {
769 	.name		= "payload",
770 	.select_ops	= nft_payload_select_ops,
771 	.policy		= nft_payload_policy,
772 	.maxattr	= NFTA_PAYLOAD_MAX,
773 	.owner		= THIS_MODULE,
774 };
775