xref: /linux/net/netfilter/nft_tunnel.c (revision 5832c4a7)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/seqlock.h>
6 #include <linux/netlink.h>
7 #include <linux/netfilter.h>
8 #include <linux/netfilter/nf_tables.h>
9 #include <net/netfilter/nf_tables.h>
10 #include <net/dst_metadata.h>
11 #include <net/ip_tunnels.h>
12 #include <net/vxlan.h>
13 #include <net/erspan.h>
14 #include <net/geneve.h>
15 
16 struct nft_tunnel {
17 	enum nft_tunnel_keys	key:8;
18 	u8			dreg;
19 	enum nft_tunnel_mode	mode:8;
20 	u8			len;
21 };
22 
nft_tunnel_get_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)23 static void nft_tunnel_get_eval(const struct nft_expr *expr,
24 				struct nft_regs *regs,
25 				const struct nft_pktinfo *pkt)
26 {
27 	const struct nft_tunnel *priv = nft_expr_priv(expr);
28 	u32 *dest = &regs->data[priv->dreg];
29 	struct ip_tunnel_info *tun_info;
30 
31 	tun_info = skb_tunnel_info(pkt->skb);
32 
33 	switch (priv->key) {
34 	case NFT_TUNNEL_PATH:
35 		if (!tun_info) {
36 			nft_reg_store8(dest, false);
37 			return;
38 		}
39 		if (priv->mode == NFT_TUNNEL_MODE_NONE ||
40 		    (priv->mode == NFT_TUNNEL_MODE_RX &&
41 		     !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
42 		    (priv->mode == NFT_TUNNEL_MODE_TX &&
43 		     (tun_info->mode & IP_TUNNEL_INFO_TX)))
44 			nft_reg_store8(dest, true);
45 		else
46 			nft_reg_store8(dest, false);
47 		break;
48 	case NFT_TUNNEL_ID:
49 		if (!tun_info) {
50 			regs->verdict.code = NFT_BREAK;
51 			return;
52 		}
53 		if (priv->mode == NFT_TUNNEL_MODE_NONE ||
54 		    (priv->mode == NFT_TUNNEL_MODE_RX &&
55 		     !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
56 		    (priv->mode == NFT_TUNNEL_MODE_TX &&
57 		     (tun_info->mode & IP_TUNNEL_INFO_TX)))
58 			*dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
59 		else
60 			regs->verdict.code = NFT_BREAK;
61 		break;
62 	default:
63 		WARN_ON(1);
64 		regs->verdict.code = NFT_BREAK;
65 	}
66 }
67 
68 static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = {
69 	[NFTA_TUNNEL_KEY]	= NLA_POLICY_MAX(NLA_BE32, 255),
70 	[NFTA_TUNNEL_DREG]	= { .type = NLA_U32 },
71 	[NFTA_TUNNEL_MODE]	= NLA_POLICY_MAX(NLA_BE32, 255),
72 };
73 
nft_tunnel_get_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])74 static int nft_tunnel_get_init(const struct nft_ctx *ctx,
75 			       const struct nft_expr *expr,
76 			       const struct nlattr * const tb[])
77 {
78 	struct nft_tunnel *priv = nft_expr_priv(expr);
79 	u32 len;
80 
81 	if (!tb[NFTA_TUNNEL_KEY] ||
82 	    !tb[NFTA_TUNNEL_DREG])
83 		return -EINVAL;
84 
85 	priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY]));
86 	switch (priv->key) {
87 	case NFT_TUNNEL_PATH:
88 		len = sizeof(u8);
89 		break;
90 	case NFT_TUNNEL_ID:
91 		len = sizeof(u32);
92 		break;
93 	default:
94 		return -EOPNOTSUPP;
95 	}
96 
97 	if (tb[NFTA_TUNNEL_MODE]) {
98 		priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE]));
99 		if (priv->mode > NFT_TUNNEL_MODE_MAX)
100 			return -EOPNOTSUPP;
101 	} else {
102 		priv->mode = NFT_TUNNEL_MODE_NONE;
103 	}
104 
105 	priv->len = len;
106 	return nft_parse_register_store(ctx, tb[NFTA_TUNNEL_DREG], &priv->dreg,
107 					NULL, NFT_DATA_VALUE, len);
108 }
109 
nft_tunnel_get_dump(struct sk_buff * skb,const struct nft_expr * expr,bool reset)110 static int nft_tunnel_get_dump(struct sk_buff *skb,
111 			       const struct nft_expr *expr, bool reset)
112 {
113 	const struct nft_tunnel *priv = nft_expr_priv(expr);
114 
115 	if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key)))
116 		goto nla_put_failure;
117 	if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg))
118 		goto nla_put_failure;
119 	if (nla_put_be32(skb, NFTA_TUNNEL_MODE, htonl(priv->mode)))
120 		goto nla_put_failure;
121 	return 0;
122 
123 nla_put_failure:
124 	return -1;
125 }
126 
nft_tunnel_get_reduce(struct nft_regs_track * track,const struct nft_expr * expr)127 static bool nft_tunnel_get_reduce(struct nft_regs_track *track,
128 				  const struct nft_expr *expr)
129 {
130 	const struct nft_tunnel *priv = nft_expr_priv(expr);
131 	const struct nft_tunnel *tunnel;
132 
133 	if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
134 		nft_reg_track_update(track, expr, priv->dreg, priv->len);
135 		return false;
136 	}
137 
138 	tunnel = nft_expr_priv(track->regs[priv->dreg].selector);
139 	if (priv->key != tunnel->key ||
140 	    priv->dreg != tunnel->dreg ||
141 	    priv->mode != tunnel->mode) {
142 		nft_reg_track_update(track, expr, priv->dreg, priv->len);
143 		return false;
144 	}
145 
146 	if (!track->regs[priv->dreg].bitwise)
147 		return true;
148 
149 	return false;
150 }
151 
152 static struct nft_expr_type nft_tunnel_type;
153 static const struct nft_expr_ops nft_tunnel_get_ops = {
154 	.type		= &nft_tunnel_type,
155 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_tunnel)),
156 	.eval		= nft_tunnel_get_eval,
157 	.init		= nft_tunnel_get_init,
158 	.dump		= nft_tunnel_get_dump,
159 	.reduce		= nft_tunnel_get_reduce,
160 };
161 
162 static struct nft_expr_type nft_tunnel_type __read_mostly = {
163 	.name		= "tunnel",
164 	.family		= NFPROTO_NETDEV,
165 	.ops		= &nft_tunnel_get_ops,
166 	.policy		= nft_tunnel_policy,
167 	.maxattr	= NFTA_TUNNEL_MAX,
168 	.owner		= THIS_MODULE,
169 };
170 
171 struct nft_tunnel_opts {
172 	union {
173 		struct vxlan_metadata	vxlan;
174 		struct erspan_metadata	erspan;
175 		u8	data[IP_TUNNEL_OPTS_MAX];
176 	} u;
177 	IP_TUNNEL_DECLARE_FLAGS(flags);
178 	u32	len;
179 };
180 
181 struct nft_tunnel_obj {
182 	struct metadata_dst	*md;
183 	struct nft_tunnel_opts	opts;
184 };
185 
186 static const struct nla_policy nft_tunnel_ip_policy[NFTA_TUNNEL_KEY_IP_MAX + 1] = {
187 	[NFTA_TUNNEL_KEY_IP_SRC]	= { .type = NLA_U32 },
188 	[NFTA_TUNNEL_KEY_IP_DST]	= { .type = NLA_U32 },
189 };
190 
nft_tunnel_obj_ip_init(const struct nft_ctx * ctx,const struct nlattr * attr,struct ip_tunnel_info * info)191 static int nft_tunnel_obj_ip_init(const struct nft_ctx *ctx,
192 				  const struct nlattr *attr,
193 				  struct ip_tunnel_info *info)
194 {
195 	struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1];
196 	int err;
197 
198 	err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP_MAX, attr,
199 					  nft_tunnel_ip_policy, NULL);
200 	if (err < 0)
201 		return err;
202 
203 	if (!tb[NFTA_TUNNEL_KEY_IP_DST])
204 		return -EINVAL;
205 
206 	if (tb[NFTA_TUNNEL_KEY_IP_SRC])
207 		info->key.u.ipv4.src = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_SRC]);
208 	if (tb[NFTA_TUNNEL_KEY_IP_DST])
209 		info->key.u.ipv4.dst = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_DST]);
210 
211 	return 0;
212 }
213 
214 static const struct nla_policy nft_tunnel_ip6_policy[NFTA_TUNNEL_KEY_IP6_MAX + 1] = {
215 	[NFTA_TUNNEL_KEY_IP6_SRC]	= { .len = sizeof(struct in6_addr), },
216 	[NFTA_TUNNEL_KEY_IP6_DST]	= { .len = sizeof(struct in6_addr), },
217 	[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]	= { .type = NLA_U32, }
218 };
219 
nft_tunnel_obj_ip6_init(const struct nft_ctx * ctx,const struct nlattr * attr,struct ip_tunnel_info * info)220 static int nft_tunnel_obj_ip6_init(const struct nft_ctx *ctx,
221 				   const struct nlattr *attr,
222 				   struct ip_tunnel_info *info)
223 {
224 	struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1];
225 	int err;
226 
227 	err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP6_MAX, attr,
228 					  nft_tunnel_ip6_policy, NULL);
229 	if (err < 0)
230 		return err;
231 
232 	if (!tb[NFTA_TUNNEL_KEY_IP6_DST])
233 		return -EINVAL;
234 
235 	if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) {
236 		memcpy(&info->key.u.ipv6.src,
237 		       nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]),
238 		       sizeof(struct in6_addr));
239 	}
240 	if (tb[NFTA_TUNNEL_KEY_IP6_DST]) {
241 		memcpy(&info->key.u.ipv6.dst,
242 		       nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]),
243 		       sizeof(struct in6_addr));
244 	}
245 	if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL])
246 		info->key.label = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]);
247 
248 	info->mode |= IP_TUNNEL_INFO_IPV6;
249 
250 	return 0;
251 }
252 
253 static const struct nla_policy nft_tunnel_opts_vxlan_policy[NFTA_TUNNEL_KEY_VXLAN_MAX + 1] = {
254 	[NFTA_TUNNEL_KEY_VXLAN_GBP]	= { .type = NLA_U32 },
255 };
256 
nft_tunnel_obj_vxlan_init(const struct nlattr * attr,struct nft_tunnel_opts * opts)257 static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
258 				     struct nft_tunnel_opts *opts)
259 {
260 	struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1];
261 	int err;
262 
263 	err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, attr,
264 					  nft_tunnel_opts_vxlan_policy, NULL);
265 	if (err < 0)
266 		return err;
267 
268 	if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP])
269 		return -EINVAL;
270 
271 	opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP]));
272 
273 	opts->len	= sizeof(struct vxlan_metadata);
274 	ip_tunnel_flags_zero(opts->flags);
275 	__set_bit(IP_TUNNEL_VXLAN_OPT_BIT, opts->flags);
276 
277 	return 0;
278 }
279 
280 static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = {
281 	[NFTA_TUNNEL_KEY_ERSPAN_VERSION]	= { .type = NLA_U32 },
282 	[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]	= { .type = NLA_U32 },
283 	[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]		= { .type = NLA_U8 },
284 	[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]	= { .type = NLA_U8 },
285 };
286 
nft_tunnel_obj_erspan_init(const struct nlattr * attr,struct nft_tunnel_opts * opts)287 static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
288 				      struct nft_tunnel_opts *opts)
289 {
290 	struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1];
291 	uint8_t hwid, dir;
292 	int err, version;
293 
294 	err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX,
295 					  attr, nft_tunnel_opts_erspan_policy,
296 					  NULL);
297 	if (err < 0)
298 		return err;
299 
300 	if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])
301 		 return -EINVAL;
302 
303 	version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
304 	switch (version) {
305 	case ERSPAN_VERSION:
306 		if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX])
307 			return -EINVAL;
308 
309 		opts->u.erspan.u.index =
310 			nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]);
311 		break;
312 	case ERSPAN_VERSION2:
313 		if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] ||
314 		    !tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID])
315 			return -EINVAL;
316 
317 		hwid = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]);
318 		dir = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]);
319 
320 		set_hwid(&opts->u.erspan.u.md2, hwid);
321 		opts->u.erspan.u.md2.dir = dir;
322 		break;
323 	default:
324 		return -EOPNOTSUPP;
325 	}
326 	opts->u.erspan.version = version;
327 
328 	opts->len	= sizeof(struct erspan_metadata);
329 	ip_tunnel_flags_zero(opts->flags);
330 	__set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, opts->flags);
331 
332 	return 0;
333 }
334 
335 static const struct nla_policy nft_tunnel_opts_geneve_policy[NFTA_TUNNEL_KEY_GENEVE_MAX + 1] = {
336 	[NFTA_TUNNEL_KEY_GENEVE_CLASS]	= { .type = NLA_U16 },
337 	[NFTA_TUNNEL_KEY_GENEVE_TYPE]	= { .type = NLA_U8 },
338 	[NFTA_TUNNEL_KEY_GENEVE_DATA]	= { .type = NLA_BINARY, .len = 128 },
339 };
340 
nft_tunnel_obj_geneve_init(const struct nlattr * attr,struct nft_tunnel_opts * opts)341 static int nft_tunnel_obj_geneve_init(const struct nlattr *attr,
342 				      struct nft_tunnel_opts *opts)
343 {
344 	struct geneve_opt *opt = (struct geneve_opt *)opts->u.data + opts->len;
345 	struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1];
346 	int err, data_len;
347 
348 	err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_GENEVE_MAX, attr,
349 			       nft_tunnel_opts_geneve_policy, NULL);
350 	if (err < 0)
351 		return err;
352 
353 	if (!tb[NFTA_TUNNEL_KEY_GENEVE_CLASS] ||
354 	    !tb[NFTA_TUNNEL_KEY_GENEVE_TYPE] ||
355 	    !tb[NFTA_TUNNEL_KEY_GENEVE_DATA])
356 		return -EINVAL;
357 
358 	attr = tb[NFTA_TUNNEL_KEY_GENEVE_DATA];
359 	data_len = nla_len(attr);
360 	if (data_len % 4)
361 		return -EINVAL;
362 
363 	opts->len += sizeof(*opt) + data_len;
364 	if (opts->len > IP_TUNNEL_OPTS_MAX)
365 		return -EINVAL;
366 
367 	memcpy(opt->opt_data, nla_data(attr), data_len);
368 	opt->length = data_len / 4;
369 	opt->opt_class = nla_get_be16(tb[NFTA_TUNNEL_KEY_GENEVE_CLASS]);
370 	opt->type = nla_get_u8(tb[NFTA_TUNNEL_KEY_GENEVE_TYPE]);
371 	ip_tunnel_flags_zero(opts->flags);
372 	__set_bit(IP_TUNNEL_GENEVE_OPT_BIT, opts->flags);
373 
374 	return 0;
375 }
376 
377 static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = {
378 	[NFTA_TUNNEL_KEY_OPTS_UNSPEC]	= {
379 		.strict_start_type = NFTA_TUNNEL_KEY_OPTS_GENEVE },
380 	[NFTA_TUNNEL_KEY_OPTS_VXLAN]	= { .type = NLA_NESTED, },
381 	[NFTA_TUNNEL_KEY_OPTS_ERSPAN]	= { .type = NLA_NESTED, },
382 	[NFTA_TUNNEL_KEY_OPTS_GENEVE]	= { .type = NLA_NESTED, },
383 };
384 
nft_tunnel_obj_opts_init(const struct nft_ctx * ctx,const struct nlattr * attr,struct ip_tunnel_info * info,struct nft_tunnel_opts * opts)385 static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
386 				    const struct nlattr *attr,
387 				    struct ip_tunnel_info *info,
388 				    struct nft_tunnel_opts *opts)
389 {
390 	struct nlattr *nla;
391 	int err, rem;
392 	u32 type = 0;
393 
394 	err = nla_validate_nested_deprecated(attr, NFTA_TUNNEL_KEY_OPTS_MAX,
395 					     nft_tunnel_opts_policy, NULL);
396 	if (err < 0)
397 		return err;
398 
399 	nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) {
400 		switch (nla_type(nla)) {
401 		case NFTA_TUNNEL_KEY_OPTS_VXLAN:
402 			if (type)
403 				return -EINVAL;
404 			err = nft_tunnel_obj_vxlan_init(nla, opts);
405 			if (err)
406 				return err;
407 			type = IP_TUNNEL_VXLAN_OPT_BIT;
408 			break;
409 		case NFTA_TUNNEL_KEY_OPTS_ERSPAN:
410 			if (type)
411 				return -EINVAL;
412 			err = nft_tunnel_obj_erspan_init(nla, opts);
413 			if (err)
414 				return err;
415 			type = IP_TUNNEL_ERSPAN_OPT_BIT;
416 			break;
417 		case NFTA_TUNNEL_KEY_OPTS_GENEVE:
418 			if (type && type != IP_TUNNEL_GENEVE_OPT_BIT)
419 				return -EINVAL;
420 			err = nft_tunnel_obj_geneve_init(nla, opts);
421 			if (err)
422 				return err;
423 			type = IP_TUNNEL_GENEVE_OPT_BIT;
424 			break;
425 		default:
426 			return -EOPNOTSUPP;
427 		}
428 	}
429 
430 	return err;
431 }
432 
433 static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = {
434 	[NFTA_TUNNEL_KEY_IP]	= { .type = NLA_NESTED, },
435 	[NFTA_TUNNEL_KEY_IP6]	= { .type = NLA_NESTED, },
436 	[NFTA_TUNNEL_KEY_ID]	= { .type = NLA_U32, },
437 	[NFTA_TUNNEL_KEY_FLAGS]	= { .type = NLA_U32, },
438 	[NFTA_TUNNEL_KEY_TOS]	= { .type = NLA_U8, },
439 	[NFTA_TUNNEL_KEY_TTL]	= { .type = NLA_U8, },
440 	[NFTA_TUNNEL_KEY_SPORT]	= { .type = NLA_U16, },
441 	[NFTA_TUNNEL_KEY_DPORT]	= { .type = NLA_U16, },
442 	[NFTA_TUNNEL_KEY_OPTS]	= { .type = NLA_NESTED, },
443 };
444 
nft_tunnel_obj_init(const struct nft_ctx * ctx,const struct nlattr * const tb[],struct nft_object * obj)445 static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
446 			       const struct nlattr * const tb[],
447 			       struct nft_object *obj)
448 {
449 	struct nft_tunnel_obj *priv = nft_obj_data(obj);
450 	struct ip_tunnel_info info;
451 	struct metadata_dst *md;
452 	int err;
453 
454 	if (!tb[NFTA_TUNNEL_KEY_ID])
455 		return -EINVAL;
456 
457 	memset(&info, 0, sizeof(info));
458 	info.mode		= IP_TUNNEL_INFO_TX;
459 	info.key.tun_id		= key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID]));
460 	__set_bit(IP_TUNNEL_KEY_BIT, info.key.tun_flags);
461 	__set_bit(IP_TUNNEL_CSUM_BIT, info.key.tun_flags);
462 	__set_bit(IP_TUNNEL_NOCACHE_BIT, info.key.tun_flags);
463 
464 	if (tb[NFTA_TUNNEL_KEY_IP]) {
465 		err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info);
466 		if (err < 0)
467 			return err;
468 	} else if (tb[NFTA_TUNNEL_KEY_IP6]) {
469 		err = nft_tunnel_obj_ip6_init(ctx, tb[NFTA_TUNNEL_KEY_IP6], &info);
470 		if (err < 0)
471 			return err;
472 	} else {
473 		return -EINVAL;
474 	}
475 
476 	if (tb[NFTA_TUNNEL_KEY_SPORT]) {
477 		info.key.tp_src = nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT]);
478 	}
479 	if (tb[NFTA_TUNNEL_KEY_DPORT]) {
480 		info.key.tp_dst = nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT]);
481 	}
482 
483 	if (tb[NFTA_TUNNEL_KEY_FLAGS]) {
484 		u32 tun_flags;
485 
486 		tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS]));
487 		if (tun_flags & ~NFT_TUNNEL_F_MASK)
488 			return -EOPNOTSUPP;
489 
490 		if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX)
491 			__clear_bit(IP_TUNNEL_CSUM_BIT, info.key.tun_flags);
492 		if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT)
493 			__set_bit(IP_TUNNEL_DONT_FRAGMENT_BIT,
494 				  info.key.tun_flags);
495 		if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER)
496 			__set_bit(IP_TUNNEL_SEQ_BIT, info.key.tun_flags);
497 	}
498 	if (tb[NFTA_TUNNEL_KEY_TOS])
499 		info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]);
500 	if (tb[NFTA_TUNNEL_KEY_TTL])
501 		info.key.ttl = nla_get_u8(tb[NFTA_TUNNEL_KEY_TTL]);
502 	else
503 		info.key.ttl = U8_MAX;
504 
505 	if (tb[NFTA_TUNNEL_KEY_OPTS]) {
506 		err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS],
507 					       &info, &priv->opts);
508 		if (err < 0)
509 			return err;
510 	}
511 
512 	md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL);
513 	if (!md)
514 		return -ENOMEM;
515 
516 	memcpy(&md->u.tun_info, &info, sizeof(info));
517 #ifdef CONFIG_DST_CACHE
518 	err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL);
519 	if (err < 0) {
520 		metadata_dst_free(md);
521 		return err;
522 	}
523 #endif
524 	ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len,
525 				priv->opts.flags);
526 	priv->md = md;
527 
528 	return 0;
529 }
530 
nft_tunnel_obj_eval(struct nft_object * obj,struct nft_regs * regs,const struct nft_pktinfo * pkt)531 static inline void nft_tunnel_obj_eval(struct nft_object *obj,
532 				       struct nft_regs *regs,
533 				       const struct nft_pktinfo *pkt)
534 {
535 	struct nft_tunnel_obj *priv = nft_obj_data(obj);
536 	struct sk_buff *skb = pkt->skb;
537 
538 	skb_dst_drop(skb);
539 	dst_hold((struct dst_entry *) priv->md);
540 	skb_dst_set(skb, (struct dst_entry *) priv->md);
541 }
542 
nft_tunnel_ip_dump(struct sk_buff * skb,struct ip_tunnel_info * info)543 static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info)
544 {
545 	struct nlattr *nest;
546 
547 	if (info->mode & IP_TUNNEL_INFO_IPV6) {
548 		nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP6);
549 		if (!nest)
550 			return -1;
551 
552 		if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC,
553 				     &info->key.u.ipv6.src) < 0 ||
554 		    nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST,
555 				     &info->key.u.ipv6.dst) < 0 ||
556 		    nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL,
557 				 info->key.label)) {
558 			nla_nest_cancel(skb, nest);
559 			return -1;
560 		}
561 
562 		nla_nest_end(skb, nest);
563 	} else {
564 		nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP);
565 		if (!nest)
566 			return -1;
567 
568 		if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC,
569 				    info->key.u.ipv4.src) < 0 ||
570 		    nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST,
571 				    info->key.u.ipv4.dst) < 0) {
572 			nla_nest_cancel(skb, nest);
573 			return -1;
574 		}
575 
576 		nla_nest_end(skb, nest);
577 	}
578 
579 	return 0;
580 }
581 
nft_tunnel_opts_dump(struct sk_buff * skb,struct nft_tunnel_obj * priv)582 static int nft_tunnel_opts_dump(struct sk_buff *skb,
583 				struct nft_tunnel_obj *priv)
584 {
585 	struct nft_tunnel_opts *opts = &priv->opts;
586 	struct nlattr *nest, *inner;
587 
588 	nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS);
589 	if (!nest)
590 		return -1;
591 
592 	if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, opts->flags)) {
593 		inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_VXLAN);
594 		if (!inner)
595 			goto failure;
596 		if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP,
597 				 htonl(opts->u.vxlan.gbp)))
598 			goto inner_failure;
599 		nla_nest_end(skb, inner);
600 	} else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, opts->flags)) {
601 		inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_ERSPAN);
602 		if (!inner)
603 			goto failure;
604 		if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_VERSION,
605 				 htonl(opts->u.erspan.version)))
606 			goto inner_failure;
607 		switch (opts->u.erspan.version) {
608 		case ERSPAN_VERSION:
609 			if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
610 					 opts->u.erspan.u.index))
611 				goto inner_failure;
612 			break;
613 		case ERSPAN_VERSION2:
614 			if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
615 				       get_hwid(&opts->u.erspan.u.md2)) ||
616 			    nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
617 				       opts->u.erspan.u.md2.dir))
618 				goto inner_failure;
619 			break;
620 		}
621 		nla_nest_end(skb, inner);
622 	} else if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, opts->flags)) {
623 		struct geneve_opt *opt;
624 		int offset = 0;
625 
626 		inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_GENEVE);
627 		if (!inner)
628 			goto failure;
629 		while (opts->len > offset) {
630 			opt = (struct geneve_opt *)opts->u.data + offset;
631 			if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS,
632 					 opt->opt_class) ||
633 			    nla_put_u8(skb, NFTA_TUNNEL_KEY_GENEVE_TYPE,
634 				       opt->type) ||
635 			    nla_put(skb, NFTA_TUNNEL_KEY_GENEVE_DATA,
636 				    opt->length * 4, opt->opt_data))
637 				goto inner_failure;
638 			offset += sizeof(*opt) + opt->length * 4;
639 		}
640 		nla_nest_end(skb, inner);
641 	}
642 	nla_nest_end(skb, nest);
643 	return 0;
644 
645 inner_failure:
646 	nla_nest_cancel(skb, inner);
647 failure:
648 	nla_nest_cancel(skb, nest);
649 	return -1;
650 }
651 
nft_tunnel_ports_dump(struct sk_buff * skb,struct ip_tunnel_info * info)652 static int nft_tunnel_ports_dump(struct sk_buff *skb,
653 				 struct ip_tunnel_info *info)
654 {
655 	if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 ||
656 	    nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0)
657 		return -1;
658 
659 	return 0;
660 }
661 
nft_tunnel_flags_dump(struct sk_buff * skb,struct ip_tunnel_info * info)662 static int nft_tunnel_flags_dump(struct sk_buff *skb,
663 				 struct ip_tunnel_info *info)
664 {
665 	u32 flags = 0;
666 
667 	if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, info->key.tun_flags))
668 		flags |= NFT_TUNNEL_F_DONT_FRAGMENT;
669 	if (!test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags))
670 		flags |= NFT_TUNNEL_F_ZERO_CSUM_TX;
671 	if (test_bit(IP_TUNNEL_SEQ_BIT, info->key.tun_flags))
672 		flags |= NFT_TUNNEL_F_SEQ_NUMBER;
673 
674 	if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)
675 		return -1;
676 
677 	return 0;
678 }
679 
nft_tunnel_obj_dump(struct sk_buff * skb,struct nft_object * obj,bool reset)680 static int nft_tunnel_obj_dump(struct sk_buff *skb,
681 			       struct nft_object *obj, bool reset)
682 {
683 	struct nft_tunnel_obj *priv = nft_obj_data(obj);
684 	struct ip_tunnel_info *info = &priv->md->u.tun_info;
685 
686 	if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ID,
687 			 tunnel_id_to_key32(info->key.tun_id)) ||
688 	    nft_tunnel_ip_dump(skb, info) < 0 ||
689 	    nft_tunnel_ports_dump(skb, info) < 0 ||
690 	    nft_tunnel_flags_dump(skb, info) < 0 ||
691 	    nla_put_u8(skb, NFTA_TUNNEL_KEY_TOS, info->key.tos) ||
692 	    nla_put_u8(skb, NFTA_TUNNEL_KEY_TTL, info->key.ttl) ||
693 	    nft_tunnel_opts_dump(skb, priv) < 0)
694 		goto nla_put_failure;
695 
696 	return 0;
697 
698 nla_put_failure:
699 	return -1;
700 }
701 
nft_tunnel_obj_destroy(const struct nft_ctx * ctx,struct nft_object * obj)702 static void nft_tunnel_obj_destroy(const struct nft_ctx *ctx,
703 				   struct nft_object *obj)
704 {
705 	struct nft_tunnel_obj *priv = nft_obj_data(obj);
706 
707 	metadata_dst_free(priv->md);
708 }
709 
710 static struct nft_object_type nft_tunnel_obj_type;
711 static const struct nft_object_ops nft_tunnel_obj_ops = {
712 	.type		= &nft_tunnel_obj_type,
713 	.size		= sizeof(struct nft_tunnel_obj),
714 	.eval		= nft_tunnel_obj_eval,
715 	.init		= nft_tunnel_obj_init,
716 	.destroy	= nft_tunnel_obj_destroy,
717 	.dump		= nft_tunnel_obj_dump,
718 };
719 
720 static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
721 	.type		= NFT_OBJECT_TUNNEL,
722 	.family		= NFPROTO_NETDEV,
723 	.ops		= &nft_tunnel_obj_ops,
724 	.maxattr	= NFTA_TUNNEL_KEY_MAX,
725 	.policy		= nft_tunnel_key_policy,
726 	.owner		= THIS_MODULE,
727 };
728 
nft_tunnel_module_init(void)729 static int __init nft_tunnel_module_init(void)
730 {
731 	int err;
732 
733 	err = nft_register_expr(&nft_tunnel_type);
734 	if (err < 0)
735 		return err;
736 
737 	err = nft_register_obj(&nft_tunnel_obj_type);
738 	if (err < 0)
739 		nft_unregister_expr(&nft_tunnel_type);
740 
741 	return err;
742 }
743 
nft_tunnel_module_exit(void)744 static void __exit nft_tunnel_module_exit(void)
745 {
746 	nft_unregister_obj(&nft_tunnel_obj_type);
747 	nft_unregister_expr(&nft_tunnel_type);
748 }
749 
750 module_init(nft_tunnel_module_init);
751 module_exit(nft_tunnel_module_exit);
752 
753 MODULE_LICENSE("GPL");
754 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
755 MODULE_ALIAS_NFT_EXPR("tunnel");
756 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);
757 MODULE_DESCRIPTION("nftables tunnel expression support");
758