xref: /linux/net/sched/cls_flower.c (revision 2da68a77)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_flower.c		Flower classifier
4  *
5  * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14 
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/mpls.h>
19 #include <linux/ppp_defs.h>
20 
21 #include <net/sch_generic.h>
22 #include <net/pkt_cls.h>
23 #include <net/pkt_sched.h>
24 #include <net/ip.h>
25 #include <net/flow_dissector.h>
26 #include <net/geneve.h>
27 #include <net/vxlan.h>
28 #include <net/erspan.h>
29 #include <net/gtp.h>
30 
31 #include <net/dst.h>
32 #include <net/dst_metadata.h>
33 
34 #include <uapi/linux/netfilter/nf_conntrack_common.h>
35 
36 #define TCA_FLOWER_KEY_CT_FLAGS_MAX \
37 		((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
38 #define TCA_FLOWER_KEY_CT_FLAGS_MASK \
39 		(TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
40 
41 struct fl_flow_key {
42 	struct flow_dissector_key_meta meta;
43 	struct flow_dissector_key_control control;
44 	struct flow_dissector_key_control enc_control;
45 	struct flow_dissector_key_basic basic;
46 	struct flow_dissector_key_eth_addrs eth;
47 	struct flow_dissector_key_vlan vlan;
48 	struct flow_dissector_key_vlan cvlan;
49 	union {
50 		struct flow_dissector_key_ipv4_addrs ipv4;
51 		struct flow_dissector_key_ipv6_addrs ipv6;
52 	};
53 	struct flow_dissector_key_ports tp;
54 	struct flow_dissector_key_icmp icmp;
55 	struct flow_dissector_key_arp arp;
56 	struct flow_dissector_key_keyid enc_key_id;
57 	union {
58 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
59 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
60 	};
61 	struct flow_dissector_key_ports enc_tp;
62 	struct flow_dissector_key_mpls mpls;
63 	struct flow_dissector_key_tcp tcp;
64 	struct flow_dissector_key_ip ip;
65 	struct flow_dissector_key_ip enc_ip;
66 	struct flow_dissector_key_enc_opts enc_opts;
67 	struct flow_dissector_key_ports_range tp_range;
68 	struct flow_dissector_key_ct ct;
69 	struct flow_dissector_key_hash hash;
70 	struct flow_dissector_key_num_of_vlans num_of_vlans;
71 	struct flow_dissector_key_pppoe pppoe;
72 	struct flow_dissector_key_l2tpv3 l2tpv3;
73 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
74 
75 struct fl_flow_mask_range {
76 	unsigned short int start;
77 	unsigned short int end;
78 };
79 
80 struct fl_flow_mask {
81 	struct fl_flow_key key;
82 	struct fl_flow_mask_range range;
83 	u32 flags;
84 	struct rhash_head ht_node;
85 	struct rhashtable ht;
86 	struct rhashtable_params filter_ht_params;
87 	struct flow_dissector dissector;
88 	struct list_head filters;
89 	struct rcu_work rwork;
90 	struct list_head list;
91 	refcount_t refcnt;
92 };
93 
94 struct fl_flow_tmplt {
95 	struct fl_flow_key dummy_key;
96 	struct fl_flow_key mask;
97 	struct flow_dissector dissector;
98 	struct tcf_chain *chain;
99 };
100 
101 struct cls_fl_head {
102 	struct rhashtable ht;
103 	spinlock_t masks_lock; /* Protect masks list */
104 	struct list_head masks;
105 	struct list_head hw_filters;
106 	struct rcu_work rwork;
107 	struct idr handle_idr;
108 };
109 
110 struct cls_fl_filter {
111 	struct fl_flow_mask *mask;
112 	struct rhash_head ht_node;
113 	struct fl_flow_key mkey;
114 	struct tcf_exts exts;
115 	struct tcf_result res;
116 	struct fl_flow_key key;
117 	struct list_head list;
118 	struct list_head hw_list;
119 	u32 handle;
120 	u32 flags;
121 	u32 in_hw_count;
122 	struct rcu_work rwork;
123 	struct net_device *hw_dev;
124 	/* Flower classifier is unlocked, which means that its reference counter
125 	 * can be changed concurrently without any kind of external
126 	 * synchronization. Use atomic reference counter to be concurrency-safe.
127 	 */
128 	refcount_t refcnt;
129 	bool deleted;
130 };
131 
132 static const struct rhashtable_params mask_ht_params = {
133 	.key_offset = offsetof(struct fl_flow_mask, key),
134 	.key_len = sizeof(struct fl_flow_key),
135 	.head_offset = offsetof(struct fl_flow_mask, ht_node),
136 	.automatic_shrinking = true,
137 };
138 
139 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
140 {
141 	return mask->range.end - mask->range.start;
142 }
143 
144 static void fl_mask_update_range(struct fl_flow_mask *mask)
145 {
146 	const u8 *bytes = (const u8 *) &mask->key;
147 	size_t size = sizeof(mask->key);
148 	size_t i, first = 0, last;
149 
150 	for (i = 0; i < size; i++) {
151 		if (bytes[i]) {
152 			first = i;
153 			break;
154 		}
155 	}
156 	last = first;
157 	for (i = size - 1; i != first; i--) {
158 		if (bytes[i]) {
159 			last = i;
160 			break;
161 		}
162 	}
163 	mask->range.start = rounddown(first, sizeof(long));
164 	mask->range.end = roundup(last + 1, sizeof(long));
165 }
166 
167 static void *fl_key_get_start(struct fl_flow_key *key,
168 			      const struct fl_flow_mask *mask)
169 {
170 	return (u8 *) key + mask->range.start;
171 }
172 
173 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
174 			      struct fl_flow_mask *mask)
175 {
176 	const long *lkey = fl_key_get_start(key, mask);
177 	const long *lmask = fl_key_get_start(&mask->key, mask);
178 	long *lmkey = fl_key_get_start(mkey, mask);
179 	int i;
180 
181 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
182 		*lmkey++ = *lkey++ & *lmask++;
183 }
184 
185 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
186 			       struct fl_flow_mask *mask)
187 {
188 	const long *lmask = fl_key_get_start(&mask->key, mask);
189 	const long *ltmplt;
190 	int i;
191 
192 	if (!tmplt)
193 		return true;
194 	ltmplt = fl_key_get_start(&tmplt->mask, mask);
195 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
196 		if (~*ltmplt++ & *lmask++)
197 			return false;
198 	}
199 	return true;
200 }
201 
202 static void fl_clear_masked_range(struct fl_flow_key *key,
203 				  struct fl_flow_mask *mask)
204 {
205 	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
206 }
207 
208 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
209 				  struct fl_flow_key *key,
210 				  struct fl_flow_key *mkey)
211 {
212 	u16 min_mask, max_mask, min_val, max_val;
213 
214 	min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
215 	max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
216 	min_val = ntohs(filter->key.tp_range.tp_min.dst);
217 	max_val = ntohs(filter->key.tp_range.tp_max.dst);
218 
219 	if (min_mask && max_mask) {
220 		if (ntohs(key->tp_range.tp.dst) < min_val ||
221 		    ntohs(key->tp_range.tp.dst) > max_val)
222 			return false;
223 
224 		/* skb does not have min and max values */
225 		mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
226 		mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
227 	}
228 	return true;
229 }
230 
231 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
232 				  struct fl_flow_key *key,
233 				  struct fl_flow_key *mkey)
234 {
235 	u16 min_mask, max_mask, min_val, max_val;
236 
237 	min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
238 	max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
239 	min_val = ntohs(filter->key.tp_range.tp_min.src);
240 	max_val = ntohs(filter->key.tp_range.tp_max.src);
241 
242 	if (min_mask && max_mask) {
243 		if (ntohs(key->tp_range.tp.src) < min_val ||
244 		    ntohs(key->tp_range.tp.src) > max_val)
245 			return false;
246 
247 		/* skb does not have min and max values */
248 		mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
249 		mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
250 	}
251 	return true;
252 }
253 
254 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
255 					 struct fl_flow_key *mkey)
256 {
257 	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
258 				      mask->filter_ht_params);
259 }
260 
261 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
262 					     struct fl_flow_key *mkey,
263 					     struct fl_flow_key *key)
264 {
265 	struct cls_fl_filter *filter, *f;
266 
267 	list_for_each_entry_rcu(filter, &mask->filters, list) {
268 		if (!fl_range_port_dst_cmp(filter, key, mkey))
269 			continue;
270 
271 		if (!fl_range_port_src_cmp(filter, key, mkey))
272 			continue;
273 
274 		f = __fl_lookup(mask, mkey);
275 		if (f)
276 			return f;
277 	}
278 	return NULL;
279 }
280 
281 static noinline_for_stack
282 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
283 {
284 	struct fl_flow_key mkey;
285 
286 	fl_set_masked_key(&mkey, key, mask);
287 	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
288 		return fl_lookup_range(mask, &mkey, key);
289 
290 	return __fl_lookup(mask, &mkey);
291 }
292 
293 static u16 fl_ct_info_to_flower_map[] = {
294 	[IP_CT_ESTABLISHED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
295 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
296 	[IP_CT_RELATED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
297 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
298 	[IP_CT_ESTABLISHED_REPLY] =	TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
299 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
300 					TCA_FLOWER_KEY_CT_FLAGS_REPLY,
301 	[IP_CT_RELATED_REPLY] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
302 					TCA_FLOWER_KEY_CT_FLAGS_RELATED |
303 					TCA_FLOWER_KEY_CT_FLAGS_REPLY,
304 	[IP_CT_NEW] =			TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
305 					TCA_FLOWER_KEY_CT_FLAGS_NEW,
306 };
307 
308 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
309 		       struct tcf_result *res)
310 {
311 	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
312 	bool post_ct = tc_skb_cb(skb)->post_ct;
313 	u16 zone = tc_skb_cb(skb)->zone;
314 	struct fl_flow_key skb_key;
315 	struct fl_flow_mask *mask;
316 	struct cls_fl_filter *f;
317 
318 	list_for_each_entry_rcu(mask, &head->masks, list) {
319 		flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
320 		fl_clear_masked_range(&skb_key, mask);
321 
322 		skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
323 		/* skb_flow_dissect() does not set n_proto in case an unknown
324 		 * protocol, so do it rather here.
325 		 */
326 		skb_key.basic.n_proto = skb_protocol(skb, false);
327 		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
328 		skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
329 				    fl_ct_info_to_flower_map,
330 				    ARRAY_SIZE(fl_ct_info_to_flower_map),
331 				    post_ct, zone);
332 		skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
333 		skb_flow_dissect(skb, &mask->dissector, &skb_key,
334 				 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
335 
336 		f = fl_mask_lookup(mask, &skb_key);
337 		if (f && !tc_skip_sw(f->flags)) {
338 			*res = f->res;
339 			return tcf_exts_exec(skb, &f->exts, res);
340 		}
341 	}
342 	return -1;
343 }
344 
345 static int fl_init(struct tcf_proto *tp)
346 {
347 	struct cls_fl_head *head;
348 
349 	head = kzalloc(sizeof(*head), GFP_KERNEL);
350 	if (!head)
351 		return -ENOBUFS;
352 
353 	spin_lock_init(&head->masks_lock);
354 	INIT_LIST_HEAD_RCU(&head->masks);
355 	INIT_LIST_HEAD(&head->hw_filters);
356 	rcu_assign_pointer(tp->root, head);
357 	idr_init(&head->handle_idr);
358 
359 	return rhashtable_init(&head->ht, &mask_ht_params);
360 }
361 
362 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
363 {
364 	/* temporary masks don't have their filters list and ht initialized */
365 	if (mask_init_done) {
366 		WARN_ON(!list_empty(&mask->filters));
367 		rhashtable_destroy(&mask->ht);
368 	}
369 	kfree(mask);
370 }
371 
372 static void fl_mask_free_work(struct work_struct *work)
373 {
374 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
375 						 struct fl_flow_mask, rwork);
376 
377 	fl_mask_free(mask, true);
378 }
379 
380 static void fl_uninit_mask_free_work(struct work_struct *work)
381 {
382 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
383 						 struct fl_flow_mask, rwork);
384 
385 	fl_mask_free(mask, false);
386 }
387 
388 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
389 {
390 	if (!refcount_dec_and_test(&mask->refcnt))
391 		return false;
392 
393 	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
394 
395 	spin_lock(&head->masks_lock);
396 	list_del_rcu(&mask->list);
397 	spin_unlock(&head->masks_lock);
398 
399 	tcf_queue_work(&mask->rwork, fl_mask_free_work);
400 
401 	return true;
402 }
403 
404 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
405 {
406 	/* Flower classifier only changes root pointer during init and destroy.
407 	 * Users must obtain reference to tcf_proto instance before calling its
408 	 * API, so tp->root pointer is protected from concurrent call to
409 	 * fl_destroy() by reference counting.
410 	 */
411 	return rcu_dereference_raw(tp->root);
412 }
413 
414 static void __fl_destroy_filter(struct cls_fl_filter *f)
415 {
416 	tcf_exts_destroy(&f->exts);
417 	tcf_exts_put_net(&f->exts);
418 	kfree(f);
419 }
420 
421 static void fl_destroy_filter_work(struct work_struct *work)
422 {
423 	struct cls_fl_filter *f = container_of(to_rcu_work(work),
424 					struct cls_fl_filter, rwork);
425 
426 	__fl_destroy_filter(f);
427 }
428 
429 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
430 				 bool rtnl_held, struct netlink_ext_ack *extack)
431 {
432 	struct tcf_block *block = tp->chain->block;
433 	struct flow_cls_offload cls_flower = {};
434 
435 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
436 	cls_flower.command = FLOW_CLS_DESTROY;
437 	cls_flower.cookie = (unsigned long) f;
438 
439 	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
440 			    &f->flags, &f->in_hw_count, rtnl_held);
441 
442 }
443 
444 static int fl_hw_replace_filter(struct tcf_proto *tp,
445 				struct cls_fl_filter *f, bool rtnl_held,
446 				struct netlink_ext_ack *extack)
447 {
448 	struct tcf_block *block = tp->chain->block;
449 	struct flow_cls_offload cls_flower = {};
450 	bool skip_sw = tc_skip_sw(f->flags);
451 	int err = 0;
452 
453 	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
454 	if (!cls_flower.rule)
455 		return -ENOMEM;
456 
457 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
458 	cls_flower.command = FLOW_CLS_REPLACE;
459 	cls_flower.cookie = (unsigned long) f;
460 	cls_flower.rule->match.dissector = &f->mask->dissector;
461 	cls_flower.rule->match.mask = &f->mask->key;
462 	cls_flower.rule->match.key = &f->mkey;
463 	cls_flower.classid = f->res.classid;
464 
465 	err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
466 				      cls_flower.common.extack);
467 	if (err) {
468 		kfree(cls_flower.rule);
469 
470 		return skip_sw ? err : 0;
471 	}
472 
473 	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
474 			      skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
475 	tc_cleanup_offload_action(&cls_flower.rule->action);
476 	kfree(cls_flower.rule);
477 
478 	if (err) {
479 		fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
480 		return err;
481 	}
482 
483 	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
484 		return -EINVAL;
485 
486 	return 0;
487 }
488 
489 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
490 			       bool rtnl_held)
491 {
492 	struct tcf_block *block = tp->chain->block;
493 	struct flow_cls_offload cls_flower = {};
494 
495 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
496 	cls_flower.command = FLOW_CLS_STATS;
497 	cls_flower.cookie = (unsigned long) f;
498 	cls_flower.classid = f->res.classid;
499 
500 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
501 			 rtnl_held);
502 
503 	tcf_exts_hw_stats_update(&f->exts, cls_flower.stats.bytes,
504 				 cls_flower.stats.pkts,
505 				 cls_flower.stats.drops,
506 				 cls_flower.stats.lastused,
507 				 cls_flower.stats.used_hw_stats,
508 				 cls_flower.stats.used_hw_stats_valid);
509 }
510 
511 static void __fl_put(struct cls_fl_filter *f)
512 {
513 	if (!refcount_dec_and_test(&f->refcnt))
514 		return;
515 
516 	if (tcf_exts_get_net(&f->exts))
517 		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
518 	else
519 		__fl_destroy_filter(f);
520 }
521 
522 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
523 {
524 	struct cls_fl_filter *f;
525 
526 	rcu_read_lock();
527 	f = idr_find(&head->handle_idr, handle);
528 	if (f && !refcount_inc_not_zero(&f->refcnt))
529 		f = NULL;
530 	rcu_read_unlock();
531 
532 	return f;
533 }
534 
535 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
536 		       bool *last, bool rtnl_held,
537 		       struct netlink_ext_ack *extack)
538 {
539 	struct cls_fl_head *head = fl_head_dereference(tp);
540 
541 	*last = false;
542 
543 	spin_lock(&tp->lock);
544 	if (f->deleted) {
545 		spin_unlock(&tp->lock);
546 		return -ENOENT;
547 	}
548 
549 	f->deleted = true;
550 	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
551 			       f->mask->filter_ht_params);
552 	idr_remove(&head->handle_idr, f->handle);
553 	list_del_rcu(&f->list);
554 	spin_unlock(&tp->lock);
555 
556 	*last = fl_mask_put(head, f->mask);
557 	if (!tc_skip_hw(f->flags))
558 		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
559 	tcf_unbind_filter(tp, &f->res);
560 	__fl_put(f);
561 
562 	return 0;
563 }
564 
565 static void fl_destroy_sleepable(struct work_struct *work)
566 {
567 	struct cls_fl_head *head = container_of(to_rcu_work(work),
568 						struct cls_fl_head,
569 						rwork);
570 
571 	rhashtable_destroy(&head->ht);
572 	kfree(head);
573 	module_put(THIS_MODULE);
574 }
575 
576 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
577 		       struct netlink_ext_ack *extack)
578 {
579 	struct cls_fl_head *head = fl_head_dereference(tp);
580 	struct fl_flow_mask *mask, *next_mask;
581 	struct cls_fl_filter *f, *next;
582 	bool last;
583 
584 	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
585 		list_for_each_entry_safe(f, next, &mask->filters, list) {
586 			__fl_delete(tp, f, &last, rtnl_held, extack);
587 			if (last)
588 				break;
589 		}
590 	}
591 	idr_destroy(&head->handle_idr);
592 
593 	__module_get(THIS_MODULE);
594 	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
595 }
596 
597 static void fl_put(struct tcf_proto *tp, void *arg)
598 {
599 	struct cls_fl_filter *f = arg;
600 
601 	__fl_put(f);
602 }
603 
604 static void *fl_get(struct tcf_proto *tp, u32 handle)
605 {
606 	struct cls_fl_head *head = fl_head_dereference(tp);
607 
608 	return __fl_get(head, handle);
609 }
610 
611 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
612 	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
613 	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
614 	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
615 					    .len = IFNAMSIZ },
616 	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
617 	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
618 	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
619 	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
620 	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
621 	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
622 	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
623 	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
624 	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
625 	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
626 	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
627 	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
628 	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
629 	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
630 	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
631 	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
632 	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
633 	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
634 	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
635 	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
636 	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
637 	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
638 	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
639 	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
640 	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
641 	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
642 	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
643 	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
644 	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
645 	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
646 	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
647 	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
648 	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
649 	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
650 	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
651 	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
652 	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
653 	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
654 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
655 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
656 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
657 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
658 	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
659 	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
660 	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
661 	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
662 	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
663 	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
664 	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
665 	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
666 	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
667 	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
668 	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
669 	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
670 	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
671 	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
672 	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
673 	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
674 	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
675 	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
676 	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
677 	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
678 	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
679 	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
680 	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
681 	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
682 	[TCA_FLOWER_KEY_MPLS_OPTS]	= { .type = NLA_NESTED },
683 	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
684 	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
685 	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
686 	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
687 	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
688 	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
689 	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
690 	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
691 	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
692 	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
693 	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
694 	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
695 	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
696 	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
697 	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
698 	[TCA_FLOWER_KEY_CT_STATE]	=
699 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
700 	[TCA_FLOWER_KEY_CT_STATE_MASK]	=
701 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
702 	[TCA_FLOWER_KEY_CT_ZONE]	= { .type = NLA_U16 },
703 	[TCA_FLOWER_KEY_CT_ZONE_MASK]	= { .type = NLA_U16 },
704 	[TCA_FLOWER_KEY_CT_MARK]	= { .type = NLA_U32 },
705 	[TCA_FLOWER_KEY_CT_MARK_MASK]	= { .type = NLA_U32 },
706 	[TCA_FLOWER_KEY_CT_LABELS]	= { .type = NLA_BINARY,
707 					    .len = 128 / BITS_PER_BYTE },
708 	[TCA_FLOWER_KEY_CT_LABELS_MASK]	= { .type = NLA_BINARY,
709 					    .len = 128 / BITS_PER_BYTE },
710 	[TCA_FLOWER_FLAGS]		= { .type = NLA_U32 },
711 	[TCA_FLOWER_KEY_HASH]		= { .type = NLA_U32 },
712 	[TCA_FLOWER_KEY_HASH_MASK]	= { .type = NLA_U32 },
713 	[TCA_FLOWER_KEY_NUM_OF_VLANS]	= { .type = NLA_U8 },
714 	[TCA_FLOWER_KEY_PPPOE_SID]	= { .type = NLA_U16 },
715 	[TCA_FLOWER_KEY_PPP_PROTO]	= { .type = NLA_U16 },
716 	[TCA_FLOWER_KEY_L2TPV3_SID]	= { .type = NLA_U32 },
717 
718 };
719 
720 static const struct nla_policy
721 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
722 	[TCA_FLOWER_KEY_ENC_OPTS_UNSPEC]        = {
723 		.strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
724 	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
725 	[TCA_FLOWER_KEY_ENC_OPTS_VXLAN]         = { .type = NLA_NESTED },
726 	[TCA_FLOWER_KEY_ENC_OPTS_ERSPAN]        = { .type = NLA_NESTED },
727 	[TCA_FLOWER_KEY_ENC_OPTS_GTP]		= { .type = NLA_NESTED },
728 };
729 
730 static const struct nla_policy
731 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
732 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
733 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
734 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
735 						       .len = 128 },
736 };
737 
738 static const struct nla_policy
739 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
740 	[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]         = { .type = NLA_U32 },
741 };
742 
743 static const struct nla_policy
744 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
745 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]        = { .type = NLA_U8 },
746 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]      = { .type = NLA_U32 },
747 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]        = { .type = NLA_U8 },
748 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]       = { .type = NLA_U8 },
749 };
750 
751 static const struct nla_policy
752 gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = {
753 	[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]	   = { .type = NLA_U8 },
754 	[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]	   = { .type = NLA_U8 },
755 };
756 
757 static const struct nla_policy
758 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
759 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]    = { .type = NLA_U8 },
760 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]      = { .type = NLA_U8 },
761 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]      = { .type = NLA_U8 },
762 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]       = { .type = NLA_U8 },
763 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]    = { .type = NLA_U32 },
764 };
765 
766 static void fl_set_key_val(struct nlattr **tb,
767 			   void *val, int val_type,
768 			   void *mask, int mask_type, int len)
769 {
770 	if (!tb[val_type])
771 		return;
772 	nla_memcpy(val, tb[val_type], len);
773 	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
774 		memset(mask, 0xff, len);
775 	else
776 		nla_memcpy(mask, tb[mask_type], len);
777 }
778 
779 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
780 				 struct fl_flow_key *mask,
781 				 struct netlink_ext_ack *extack)
782 {
783 	fl_set_key_val(tb, &key->tp_range.tp_min.dst,
784 		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
785 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
786 	fl_set_key_val(tb, &key->tp_range.tp_max.dst,
787 		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
788 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
789 	fl_set_key_val(tb, &key->tp_range.tp_min.src,
790 		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
791 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
792 	fl_set_key_val(tb, &key->tp_range.tp_max.src,
793 		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
794 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
795 
796 	if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
797 	    ntohs(key->tp_range.tp_max.dst) <=
798 	    ntohs(key->tp_range.tp_min.dst)) {
799 		NL_SET_ERR_MSG_ATTR(extack,
800 				    tb[TCA_FLOWER_KEY_PORT_DST_MIN],
801 				    "Invalid destination port range (min must be strictly smaller than max)");
802 		return -EINVAL;
803 	}
804 	if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
805 	    ntohs(key->tp_range.tp_max.src) <=
806 	    ntohs(key->tp_range.tp_min.src)) {
807 		NL_SET_ERR_MSG_ATTR(extack,
808 				    tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
809 				    "Invalid source port range (min must be strictly smaller than max)");
810 		return -EINVAL;
811 	}
812 
813 	return 0;
814 }
815 
816 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
817 			       struct flow_dissector_key_mpls *key_val,
818 			       struct flow_dissector_key_mpls *key_mask,
819 			       struct netlink_ext_ack *extack)
820 {
821 	struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
822 	struct flow_dissector_mpls_lse *lse_mask;
823 	struct flow_dissector_mpls_lse *lse_val;
824 	u8 lse_index;
825 	u8 depth;
826 	int err;
827 
828 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
829 			       mpls_stack_entry_policy, extack);
830 	if (err < 0)
831 		return err;
832 
833 	if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
834 		NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
835 		return -EINVAL;
836 	}
837 
838 	depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
839 
840 	/* LSE depth starts at 1, for consistency with terminology used by
841 	 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
842 	 */
843 	if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
844 		NL_SET_ERR_MSG_ATTR(extack,
845 				    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
846 				    "Invalid MPLS depth");
847 		return -EINVAL;
848 	}
849 	lse_index = depth - 1;
850 
851 	dissector_set_mpls_lse(key_val, lse_index);
852 	dissector_set_mpls_lse(key_mask, lse_index);
853 
854 	lse_val = &key_val->ls[lse_index];
855 	lse_mask = &key_mask->ls[lse_index];
856 
857 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
858 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
859 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
860 	}
861 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
862 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
863 
864 		if (bos & ~MPLS_BOS_MASK) {
865 			NL_SET_ERR_MSG_ATTR(extack,
866 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
867 					    "Bottom Of Stack (BOS) must be 0 or 1");
868 			return -EINVAL;
869 		}
870 		lse_val->mpls_bos = bos;
871 		lse_mask->mpls_bos = MPLS_BOS_MASK;
872 	}
873 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
874 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
875 
876 		if (tc & ~MPLS_TC_MASK) {
877 			NL_SET_ERR_MSG_ATTR(extack,
878 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
879 					    "Traffic Class (TC) must be between 0 and 7");
880 			return -EINVAL;
881 		}
882 		lse_val->mpls_tc = tc;
883 		lse_mask->mpls_tc = MPLS_TC_MASK;
884 	}
885 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
886 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
887 
888 		if (label & ~MPLS_LABEL_MASK) {
889 			NL_SET_ERR_MSG_ATTR(extack,
890 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
891 					    "Label must be between 0 and 1048575");
892 			return -EINVAL;
893 		}
894 		lse_val->mpls_label = label;
895 		lse_mask->mpls_label = MPLS_LABEL_MASK;
896 	}
897 
898 	return 0;
899 }
900 
901 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
902 				struct flow_dissector_key_mpls *key_val,
903 				struct flow_dissector_key_mpls *key_mask,
904 				struct netlink_ext_ack *extack)
905 {
906 	struct nlattr *nla_lse;
907 	int rem;
908 	int err;
909 
910 	if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
911 		NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
912 				    "NLA_F_NESTED is missing");
913 		return -EINVAL;
914 	}
915 
916 	nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
917 		if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
918 			NL_SET_ERR_MSG_ATTR(extack, nla_lse,
919 					    "Invalid MPLS option type");
920 			return -EINVAL;
921 		}
922 
923 		err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
924 		if (err < 0)
925 			return err;
926 	}
927 	if (rem) {
928 		NL_SET_ERR_MSG(extack,
929 			       "Bytes leftover after parsing MPLS options");
930 		return -EINVAL;
931 	}
932 
933 	return 0;
934 }
935 
936 static int fl_set_key_mpls(struct nlattr **tb,
937 			   struct flow_dissector_key_mpls *key_val,
938 			   struct flow_dissector_key_mpls *key_mask,
939 			   struct netlink_ext_ack *extack)
940 {
941 	struct flow_dissector_mpls_lse *lse_mask;
942 	struct flow_dissector_mpls_lse *lse_val;
943 
944 	if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
945 		if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
946 		    tb[TCA_FLOWER_KEY_MPLS_BOS] ||
947 		    tb[TCA_FLOWER_KEY_MPLS_TC] ||
948 		    tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
949 			NL_SET_ERR_MSG_ATTR(extack,
950 					    tb[TCA_FLOWER_KEY_MPLS_OPTS],
951 					    "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
952 			return -EBADMSG;
953 		}
954 
955 		return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
956 					    key_val, key_mask, extack);
957 	}
958 
959 	lse_val = &key_val->ls[0];
960 	lse_mask = &key_mask->ls[0];
961 
962 	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
963 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
964 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
965 		dissector_set_mpls_lse(key_val, 0);
966 		dissector_set_mpls_lse(key_mask, 0);
967 	}
968 	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
969 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
970 
971 		if (bos & ~MPLS_BOS_MASK) {
972 			NL_SET_ERR_MSG_ATTR(extack,
973 					    tb[TCA_FLOWER_KEY_MPLS_BOS],
974 					    "Bottom Of Stack (BOS) must be 0 or 1");
975 			return -EINVAL;
976 		}
977 		lse_val->mpls_bos = bos;
978 		lse_mask->mpls_bos = MPLS_BOS_MASK;
979 		dissector_set_mpls_lse(key_val, 0);
980 		dissector_set_mpls_lse(key_mask, 0);
981 	}
982 	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
983 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
984 
985 		if (tc & ~MPLS_TC_MASK) {
986 			NL_SET_ERR_MSG_ATTR(extack,
987 					    tb[TCA_FLOWER_KEY_MPLS_TC],
988 					    "Traffic Class (TC) must be between 0 and 7");
989 			return -EINVAL;
990 		}
991 		lse_val->mpls_tc = tc;
992 		lse_mask->mpls_tc = MPLS_TC_MASK;
993 		dissector_set_mpls_lse(key_val, 0);
994 		dissector_set_mpls_lse(key_mask, 0);
995 	}
996 	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
997 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
998 
999 		if (label & ~MPLS_LABEL_MASK) {
1000 			NL_SET_ERR_MSG_ATTR(extack,
1001 					    tb[TCA_FLOWER_KEY_MPLS_LABEL],
1002 					    "Label must be between 0 and 1048575");
1003 			return -EINVAL;
1004 		}
1005 		lse_val->mpls_label = label;
1006 		lse_mask->mpls_label = MPLS_LABEL_MASK;
1007 		dissector_set_mpls_lse(key_val, 0);
1008 		dissector_set_mpls_lse(key_mask, 0);
1009 	}
1010 	return 0;
1011 }
1012 
1013 static void fl_set_key_vlan(struct nlattr **tb,
1014 			    __be16 ethertype,
1015 			    int vlan_id_key, int vlan_prio_key,
1016 			    int vlan_next_eth_type_key,
1017 			    struct flow_dissector_key_vlan *key_val,
1018 			    struct flow_dissector_key_vlan *key_mask)
1019 {
1020 #define VLAN_PRIORITY_MASK	0x7
1021 
1022 	if (tb[vlan_id_key]) {
1023 		key_val->vlan_id =
1024 			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
1025 		key_mask->vlan_id = VLAN_VID_MASK;
1026 	}
1027 	if (tb[vlan_prio_key]) {
1028 		key_val->vlan_priority =
1029 			nla_get_u8(tb[vlan_prio_key]) &
1030 			VLAN_PRIORITY_MASK;
1031 		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1032 	}
1033 	if (ethertype) {
1034 		key_val->vlan_tpid = ethertype;
1035 		key_mask->vlan_tpid = cpu_to_be16(~0);
1036 	}
1037 	if (tb[vlan_next_eth_type_key]) {
1038 		key_val->vlan_eth_type =
1039 			nla_get_be16(tb[vlan_next_eth_type_key]);
1040 		key_mask->vlan_eth_type = cpu_to_be16(~0);
1041 	}
1042 }
1043 
1044 static void fl_set_key_pppoe(struct nlattr **tb,
1045 			     struct flow_dissector_key_pppoe *key_val,
1046 			     struct flow_dissector_key_pppoe *key_mask,
1047 			     struct fl_flow_key *key,
1048 			     struct fl_flow_key *mask)
1049 {
1050 	/* key_val::type must be set to ETH_P_PPP_SES
1051 	 * because ETH_P_PPP_SES was stored in basic.n_proto
1052 	 * which might get overwritten by ppp_proto
1053 	 * or might be set to 0, the role of key_val::type
1054 	 * is simmilar to vlan_key::tpid
1055 	 */
1056 	key_val->type = htons(ETH_P_PPP_SES);
1057 	key_mask->type = cpu_to_be16(~0);
1058 
1059 	if (tb[TCA_FLOWER_KEY_PPPOE_SID]) {
1060 		key_val->session_id =
1061 			nla_get_be16(tb[TCA_FLOWER_KEY_PPPOE_SID]);
1062 		key_mask->session_id = cpu_to_be16(~0);
1063 	}
1064 	if (tb[TCA_FLOWER_KEY_PPP_PROTO]) {
1065 		key_val->ppp_proto =
1066 			nla_get_be16(tb[TCA_FLOWER_KEY_PPP_PROTO]);
1067 		key_mask->ppp_proto = cpu_to_be16(~0);
1068 
1069 		if (key_val->ppp_proto == htons(PPP_IP)) {
1070 			key->basic.n_proto = htons(ETH_P_IP);
1071 			mask->basic.n_proto = cpu_to_be16(~0);
1072 		} else if (key_val->ppp_proto == htons(PPP_IPV6)) {
1073 			key->basic.n_proto = htons(ETH_P_IPV6);
1074 			mask->basic.n_proto = cpu_to_be16(~0);
1075 		} else if (key_val->ppp_proto == htons(PPP_MPLS_UC)) {
1076 			key->basic.n_proto = htons(ETH_P_MPLS_UC);
1077 			mask->basic.n_proto = cpu_to_be16(~0);
1078 		} else if (key_val->ppp_proto == htons(PPP_MPLS_MC)) {
1079 			key->basic.n_proto = htons(ETH_P_MPLS_MC);
1080 			mask->basic.n_proto = cpu_to_be16(~0);
1081 		}
1082 	} else {
1083 		key->basic.n_proto = 0;
1084 		mask->basic.n_proto = cpu_to_be16(0);
1085 	}
1086 }
1087 
1088 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1089 			    u32 *dissector_key, u32 *dissector_mask,
1090 			    u32 flower_flag_bit, u32 dissector_flag_bit)
1091 {
1092 	if (flower_mask & flower_flag_bit) {
1093 		*dissector_mask |= dissector_flag_bit;
1094 		if (flower_key & flower_flag_bit)
1095 			*dissector_key |= dissector_flag_bit;
1096 	}
1097 }
1098 
1099 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1100 			    u32 *flags_mask, struct netlink_ext_ack *extack)
1101 {
1102 	u32 key, mask;
1103 
1104 	/* mask is mandatory for flags */
1105 	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1106 		NL_SET_ERR_MSG(extack, "Missing flags mask");
1107 		return -EINVAL;
1108 	}
1109 
1110 	key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS]));
1111 	mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
1112 
1113 	*flags_key  = 0;
1114 	*flags_mask = 0;
1115 
1116 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1117 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1118 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1119 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1120 			FLOW_DIS_FIRST_FRAG);
1121 
1122 	return 0;
1123 }
1124 
1125 static void fl_set_key_ip(struct nlattr **tb, bool encap,
1126 			  struct flow_dissector_key_ip *key,
1127 			  struct flow_dissector_key_ip *mask)
1128 {
1129 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1130 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1131 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1132 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1133 
1134 	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1135 	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
1136 }
1137 
1138 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1139 			     int depth, int option_len,
1140 			     struct netlink_ext_ack *extack)
1141 {
1142 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1143 	struct nlattr *class = NULL, *type = NULL, *data = NULL;
1144 	struct geneve_opt *opt;
1145 	int err, data_len = 0;
1146 
1147 	if (option_len > sizeof(struct geneve_opt))
1148 		data_len = option_len - sizeof(struct geneve_opt);
1149 
1150 	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1151 	memset(opt, 0xff, option_len);
1152 	opt->length = data_len / 4;
1153 	opt->r1 = 0;
1154 	opt->r2 = 0;
1155 	opt->r3 = 0;
1156 
1157 	/* If no mask has been prodived we assume an exact match. */
1158 	if (!depth)
1159 		return sizeof(struct geneve_opt) + data_len;
1160 
1161 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1162 		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1163 		return -EINVAL;
1164 	}
1165 
1166 	err = nla_parse_nested_deprecated(tb,
1167 					  TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1168 					  nla, geneve_opt_policy, extack);
1169 	if (err < 0)
1170 		return err;
1171 
1172 	/* We are not allowed to omit any of CLASS, TYPE or DATA
1173 	 * fields from the key.
1174 	 */
1175 	if (!option_len &&
1176 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1177 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1178 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1179 		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1180 		return -EINVAL;
1181 	}
1182 
1183 	/* Omitting any of CLASS, TYPE or DATA fields is allowed
1184 	 * for the mask.
1185 	 */
1186 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1187 		int new_len = key->enc_opts.len;
1188 
1189 		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1190 		data_len = nla_len(data);
1191 		if (data_len < 4) {
1192 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1193 			return -ERANGE;
1194 		}
1195 		if (data_len % 4) {
1196 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1197 			return -ERANGE;
1198 		}
1199 
1200 		new_len += sizeof(struct geneve_opt) + data_len;
1201 		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1202 		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1203 			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1204 			return -ERANGE;
1205 		}
1206 		opt->length = data_len / 4;
1207 		memcpy(opt->opt_data, nla_data(data), data_len);
1208 	}
1209 
1210 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1211 		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1212 		opt->opt_class = nla_get_be16(class);
1213 	}
1214 
1215 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1216 		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1217 		opt->type = nla_get_u8(type);
1218 	}
1219 
1220 	return sizeof(struct geneve_opt) + data_len;
1221 }
1222 
1223 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1224 			    int depth, int option_len,
1225 			    struct netlink_ext_ack *extack)
1226 {
1227 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1228 	struct vxlan_metadata *md;
1229 	int err;
1230 
1231 	md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1232 	memset(md, 0xff, sizeof(*md));
1233 
1234 	if (!depth)
1235 		return sizeof(*md);
1236 
1237 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1238 		NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1239 		return -EINVAL;
1240 	}
1241 
1242 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1243 			       vxlan_opt_policy, extack);
1244 	if (err < 0)
1245 		return err;
1246 
1247 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1248 		NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1249 		return -EINVAL;
1250 	}
1251 
1252 	if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1253 		md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1254 		md->gbp &= VXLAN_GBP_MASK;
1255 	}
1256 
1257 	return sizeof(*md);
1258 }
1259 
1260 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1261 			     int depth, int option_len,
1262 			     struct netlink_ext_ack *extack)
1263 {
1264 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1265 	struct erspan_metadata *md;
1266 	int err;
1267 
1268 	md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1269 	memset(md, 0xff, sizeof(*md));
1270 	md->version = 1;
1271 
1272 	if (!depth)
1273 		return sizeof(*md);
1274 
1275 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1276 		NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1277 		return -EINVAL;
1278 	}
1279 
1280 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1281 			       erspan_opt_policy, extack);
1282 	if (err < 0)
1283 		return err;
1284 
1285 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1286 		NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1287 		return -EINVAL;
1288 	}
1289 
1290 	if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1291 		md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1292 
1293 	if (md->version == 1) {
1294 		if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1295 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1296 			return -EINVAL;
1297 		}
1298 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1299 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1300 			memset(&md->u, 0x00, sizeof(md->u));
1301 			md->u.index = nla_get_be32(nla);
1302 		}
1303 	} else if (md->version == 2) {
1304 		if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1305 				    !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1306 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1307 			return -EINVAL;
1308 		}
1309 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1310 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1311 			md->u.md2.dir = nla_get_u8(nla);
1312 		}
1313 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1314 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1315 			set_hwid(&md->u.md2, nla_get_u8(nla));
1316 		}
1317 	} else {
1318 		NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1319 		return -EINVAL;
1320 	}
1321 
1322 	return sizeof(*md);
1323 }
1324 
1325 static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key,
1326 			  int depth, int option_len,
1327 			  struct netlink_ext_ack *extack)
1328 {
1329 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1];
1330 	struct gtp_pdu_session_info *sinfo;
1331 	u8 len = key->enc_opts.len;
1332 	int err;
1333 
1334 	sinfo = (struct gtp_pdu_session_info *)&key->enc_opts.data[len];
1335 	memset(sinfo, 0xff, option_len);
1336 
1337 	if (!depth)
1338 		return sizeof(*sinfo);
1339 
1340 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GTP) {
1341 		NL_SET_ERR_MSG_MOD(extack, "Non-gtp option type for mask");
1342 		return -EINVAL;
1343 	}
1344 
1345 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, nla,
1346 			       gtp_opt_policy, extack);
1347 	if (err < 0)
1348 		return err;
1349 
1350 	if (!option_len &&
1351 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] ||
1352 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])) {
1353 		NL_SET_ERR_MSG_MOD(extack,
1354 				   "Missing tunnel key gtp option pdu type or qfi");
1355 		return -EINVAL;
1356 	}
1357 
1358 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE])
1359 		sinfo->pdu_type =
1360 			nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]);
1361 
1362 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])
1363 		sinfo->qfi = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]);
1364 
1365 	return sizeof(*sinfo);
1366 }
1367 
1368 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1369 			  struct fl_flow_key *mask,
1370 			  struct netlink_ext_ack *extack)
1371 {
1372 	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1373 	int err, option_len, key_depth, msk_depth = 0;
1374 
1375 	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1376 					     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1377 					     enc_opts_policy, extack);
1378 	if (err)
1379 		return err;
1380 
1381 	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1382 
1383 	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1384 		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1385 						     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1386 						     enc_opts_policy, extack);
1387 		if (err)
1388 			return err;
1389 
1390 		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1391 		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1392 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1393 			NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1394 			return -EINVAL;
1395 		}
1396 	}
1397 
1398 	nla_for_each_attr(nla_opt_key, nla_enc_key,
1399 			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1400 		switch (nla_type(nla_opt_key)) {
1401 		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1402 			if (key->enc_opts.dst_opt_type &&
1403 			    key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1404 				NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1405 				return -EINVAL;
1406 			}
1407 			option_len = 0;
1408 			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1409 			option_len = fl_set_geneve_opt(nla_opt_key, key,
1410 						       key_depth, option_len,
1411 						       extack);
1412 			if (option_len < 0)
1413 				return option_len;
1414 
1415 			key->enc_opts.len += option_len;
1416 			/* At the same time we need to parse through the mask
1417 			 * in order to verify exact and mask attribute lengths.
1418 			 */
1419 			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1420 			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1421 						       msk_depth, option_len,
1422 						       extack);
1423 			if (option_len < 0)
1424 				return option_len;
1425 
1426 			mask->enc_opts.len += option_len;
1427 			if (key->enc_opts.len != mask->enc_opts.len) {
1428 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1429 				return -EINVAL;
1430 			}
1431 			break;
1432 		case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1433 			if (key->enc_opts.dst_opt_type) {
1434 				NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1435 				return -EINVAL;
1436 			}
1437 			option_len = 0;
1438 			key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1439 			option_len = fl_set_vxlan_opt(nla_opt_key, key,
1440 						      key_depth, option_len,
1441 						      extack);
1442 			if (option_len < 0)
1443 				return option_len;
1444 
1445 			key->enc_opts.len += option_len;
1446 			/* At the same time we need to parse through the mask
1447 			 * in order to verify exact and mask attribute lengths.
1448 			 */
1449 			mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1450 			option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1451 						      msk_depth, option_len,
1452 						      extack);
1453 			if (option_len < 0)
1454 				return option_len;
1455 
1456 			mask->enc_opts.len += option_len;
1457 			if (key->enc_opts.len != mask->enc_opts.len) {
1458 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1459 				return -EINVAL;
1460 			}
1461 			break;
1462 		case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1463 			if (key->enc_opts.dst_opt_type) {
1464 				NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1465 				return -EINVAL;
1466 			}
1467 			option_len = 0;
1468 			key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1469 			option_len = fl_set_erspan_opt(nla_opt_key, key,
1470 						       key_depth, option_len,
1471 						       extack);
1472 			if (option_len < 0)
1473 				return option_len;
1474 
1475 			key->enc_opts.len += option_len;
1476 			/* At the same time we need to parse through the mask
1477 			 * in order to verify exact and mask attribute lengths.
1478 			 */
1479 			mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1480 			option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1481 						       msk_depth, option_len,
1482 						       extack);
1483 			if (option_len < 0)
1484 				return option_len;
1485 
1486 			mask->enc_opts.len += option_len;
1487 			if (key->enc_opts.len != mask->enc_opts.len) {
1488 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1489 				return -EINVAL;
1490 			}
1491 			break;
1492 		case TCA_FLOWER_KEY_ENC_OPTS_GTP:
1493 			if (key->enc_opts.dst_opt_type) {
1494 				NL_SET_ERR_MSG_MOD(extack,
1495 						   "Duplicate type for gtp options");
1496 				return -EINVAL;
1497 			}
1498 			option_len = 0;
1499 			key->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1500 			option_len = fl_set_gtp_opt(nla_opt_key, key,
1501 						    key_depth, option_len,
1502 						    extack);
1503 			if (option_len < 0)
1504 				return option_len;
1505 
1506 			key->enc_opts.len += option_len;
1507 			/* At the same time we need to parse through the mask
1508 			 * in order to verify exact and mask attribute lengths.
1509 			 */
1510 			mask->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1511 			option_len = fl_set_gtp_opt(nla_opt_msk, mask,
1512 						    msk_depth, option_len,
1513 						    extack);
1514 			if (option_len < 0)
1515 				return option_len;
1516 
1517 			mask->enc_opts.len += option_len;
1518 			if (key->enc_opts.len != mask->enc_opts.len) {
1519 				NL_SET_ERR_MSG_MOD(extack,
1520 						   "Key and mask miss aligned");
1521 				return -EINVAL;
1522 			}
1523 			break;
1524 		default:
1525 			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1526 			return -EINVAL;
1527 		}
1528 
1529 		if (!msk_depth)
1530 			continue;
1531 
1532 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1533 			NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1534 			return -EINVAL;
1535 		}
1536 		nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1537 	}
1538 
1539 	return 0;
1540 }
1541 
1542 static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1543 				struct netlink_ext_ack *extack)
1544 {
1545 	if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1546 		NL_SET_ERR_MSG_ATTR(extack, tb,
1547 				    "no trk, so no other flag can be set");
1548 		return -EINVAL;
1549 	}
1550 
1551 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1552 	    state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1553 		NL_SET_ERR_MSG_ATTR(extack, tb,
1554 				    "new and est are mutually exclusive");
1555 		return -EINVAL;
1556 	}
1557 
1558 	if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID &&
1559 	    state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
1560 		      TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
1561 		NL_SET_ERR_MSG_ATTR(extack, tb,
1562 				    "when inv is set, only trk may be set");
1563 		return -EINVAL;
1564 	}
1565 
1566 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1567 	    state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) {
1568 		NL_SET_ERR_MSG_ATTR(extack, tb,
1569 				    "new and rpl are mutually exclusive");
1570 		return -EINVAL;
1571 	}
1572 
1573 	return 0;
1574 }
1575 
1576 static int fl_set_key_ct(struct nlattr **tb,
1577 			 struct flow_dissector_key_ct *key,
1578 			 struct flow_dissector_key_ct *mask,
1579 			 struct netlink_ext_ack *extack)
1580 {
1581 	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1582 		int err;
1583 
1584 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1585 			NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1586 			return -EOPNOTSUPP;
1587 		}
1588 		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1589 			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1590 			       sizeof(key->ct_state));
1591 
1592 		err = fl_validate_ct_state(key->ct_state & mask->ct_state,
1593 					   tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1594 					   extack);
1595 		if (err)
1596 			return err;
1597 
1598 	}
1599 	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1600 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1601 			NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1602 			return -EOPNOTSUPP;
1603 		}
1604 		fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1605 			       &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1606 			       sizeof(key->ct_zone));
1607 	}
1608 	if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1609 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1610 			NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1611 			return -EOPNOTSUPP;
1612 		}
1613 		fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1614 			       &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1615 			       sizeof(key->ct_mark));
1616 	}
1617 	if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1618 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1619 			NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1620 			return -EOPNOTSUPP;
1621 		}
1622 		fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1623 			       mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1624 			       sizeof(key->ct_labels));
1625 	}
1626 
1627 	return 0;
1628 }
1629 
1630 static bool is_vlan_key(struct nlattr *tb, __be16 *ethertype,
1631 			struct fl_flow_key *key, struct fl_flow_key *mask,
1632 			int vthresh)
1633 {
1634 	const bool good_num_of_vlans = key->num_of_vlans.num_of_vlans > vthresh;
1635 
1636 	if (!tb) {
1637 		*ethertype = 0;
1638 		return good_num_of_vlans;
1639 	}
1640 
1641 	*ethertype = nla_get_be16(tb);
1642 	if (good_num_of_vlans || eth_type_vlan(*ethertype))
1643 		return true;
1644 
1645 	key->basic.n_proto = *ethertype;
1646 	mask->basic.n_proto = cpu_to_be16(~0);
1647 	return false;
1648 }
1649 
1650 static int fl_set_key(struct net *net, struct nlattr **tb,
1651 		      struct fl_flow_key *key, struct fl_flow_key *mask,
1652 		      struct netlink_ext_ack *extack)
1653 {
1654 	__be16 ethertype;
1655 	int ret = 0;
1656 
1657 	if (tb[TCA_FLOWER_INDEV]) {
1658 		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1659 		if (err < 0)
1660 			return err;
1661 		key->meta.ingress_ifindex = err;
1662 		mask->meta.ingress_ifindex = 0xffffffff;
1663 	}
1664 
1665 	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1666 		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1667 		       sizeof(key->eth.dst));
1668 	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1669 		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1670 		       sizeof(key->eth.src));
1671 	fl_set_key_val(tb, &key->num_of_vlans,
1672 		       TCA_FLOWER_KEY_NUM_OF_VLANS,
1673 		       &mask->num_of_vlans,
1674 		       TCA_FLOWER_UNSPEC,
1675 		       sizeof(key->num_of_vlans));
1676 
1677 	if (is_vlan_key(tb[TCA_FLOWER_KEY_ETH_TYPE], &ethertype, key, mask, 0)) {
1678 		fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1679 				TCA_FLOWER_KEY_VLAN_PRIO,
1680 				TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1681 				&key->vlan, &mask->vlan);
1682 
1683 		if (is_vlan_key(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE],
1684 				&ethertype, key, mask, 1)) {
1685 			fl_set_key_vlan(tb, ethertype,
1686 					TCA_FLOWER_KEY_CVLAN_ID,
1687 					TCA_FLOWER_KEY_CVLAN_PRIO,
1688 					TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1689 					&key->cvlan, &mask->cvlan);
1690 			fl_set_key_val(tb, &key->basic.n_proto,
1691 				       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1692 				       &mask->basic.n_proto,
1693 				       TCA_FLOWER_UNSPEC,
1694 				       sizeof(key->basic.n_proto));
1695 		}
1696 	}
1697 
1698 	if (key->basic.n_proto == htons(ETH_P_PPP_SES))
1699 		fl_set_key_pppoe(tb, &key->pppoe, &mask->pppoe, key, mask);
1700 
1701 	if (key->basic.n_proto == htons(ETH_P_IP) ||
1702 	    key->basic.n_proto == htons(ETH_P_IPV6)) {
1703 		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1704 			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1705 			       sizeof(key->basic.ip_proto));
1706 		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1707 	}
1708 
1709 	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1710 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1711 		mask->control.addr_type = ~0;
1712 		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1713 			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1714 			       sizeof(key->ipv4.src));
1715 		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1716 			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1717 			       sizeof(key->ipv4.dst));
1718 	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1719 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1720 		mask->control.addr_type = ~0;
1721 		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1722 			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1723 			       sizeof(key->ipv6.src));
1724 		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1725 			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1726 			       sizeof(key->ipv6.dst));
1727 	}
1728 
1729 	if (key->basic.ip_proto == IPPROTO_TCP) {
1730 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1731 			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1732 			       sizeof(key->tp.src));
1733 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1734 			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1735 			       sizeof(key->tp.dst));
1736 		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1737 			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1738 			       sizeof(key->tcp.flags));
1739 	} else if (key->basic.ip_proto == IPPROTO_UDP) {
1740 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1741 			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1742 			       sizeof(key->tp.src));
1743 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1744 			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1745 			       sizeof(key->tp.dst));
1746 	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
1747 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1748 			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1749 			       sizeof(key->tp.src));
1750 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1751 			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1752 			       sizeof(key->tp.dst));
1753 	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
1754 		   key->basic.ip_proto == IPPROTO_ICMP) {
1755 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1756 			       &mask->icmp.type,
1757 			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1758 			       sizeof(key->icmp.type));
1759 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1760 			       &mask->icmp.code,
1761 			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1762 			       sizeof(key->icmp.code));
1763 	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1764 		   key->basic.ip_proto == IPPROTO_ICMPV6) {
1765 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1766 			       &mask->icmp.type,
1767 			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1768 			       sizeof(key->icmp.type));
1769 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1770 			       &mask->icmp.code,
1771 			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1772 			       sizeof(key->icmp.code));
1773 	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1774 		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1775 		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
1776 		if (ret)
1777 			return ret;
1778 	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1779 		   key->basic.n_proto == htons(ETH_P_RARP)) {
1780 		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1781 			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1782 			       sizeof(key->arp.sip));
1783 		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1784 			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1785 			       sizeof(key->arp.tip));
1786 		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1787 			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1788 			       sizeof(key->arp.op));
1789 		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1790 			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1791 			       sizeof(key->arp.sha));
1792 		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1793 			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1794 			       sizeof(key->arp.tha));
1795 	} else if (key->basic.ip_proto == IPPROTO_L2TP) {
1796 		fl_set_key_val(tb, &key->l2tpv3.session_id,
1797 			       TCA_FLOWER_KEY_L2TPV3_SID,
1798 			       &mask->l2tpv3.session_id, TCA_FLOWER_UNSPEC,
1799 			       sizeof(key->l2tpv3.session_id));
1800 	}
1801 
1802 	if (key->basic.ip_proto == IPPROTO_TCP ||
1803 	    key->basic.ip_proto == IPPROTO_UDP ||
1804 	    key->basic.ip_proto == IPPROTO_SCTP) {
1805 		ret = fl_set_key_port_range(tb, key, mask, extack);
1806 		if (ret)
1807 			return ret;
1808 	}
1809 
1810 	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1811 	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1812 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1813 		mask->enc_control.addr_type = ~0;
1814 		fl_set_key_val(tb, &key->enc_ipv4.src,
1815 			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
1816 			       &mask->enc_ipv4.src,
1817 			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1818 			       sizeof(key->enc_ipv4.src));
1819 		fl_set_key_val(tb, &key->enc_ipv4.dst,
1820 			       TCA_FLOWER_KEY_ENC_IPV4_DST,
1821 			       &mask->enc_ipv4.dst,
1822 			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1823 			       sizeof(key->enc_ipv4.dst));
1824 	}
1825 
1826 	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1827 	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1828 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1829 		mask->enc_control.addr_type = ~0;
1830 		fl_set_key_val(tb, &key->enc_ipv6.src,
1831 			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
1832 			       &mask->enc_ipv6.src,
1833 			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1834 			       sizeof(key->enc_ipv6.src));
1835 		fl_set_key_val(tb, &key->enc_ipv6.dst,
1836 			       TCA_FLOWER_KEY_ENC_IPV6_DST,
1837 			       &mask->enc_ipv6.dst,
1838 			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1839 			       sizeof(key->enc_ipv6.dst));
1840 	}
1841 
1842 	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1843 		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1844 		       sizeof(key->enc_key_id.keyid));
1845 
1846 	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1847 		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1848 		       sizeof(key->enc_tp.src));
1849 
1850 	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1851 		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1852 		       sizeof(key->enc_tp.dst));
1853 
1854 	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1855 
1856 	fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1857 		       &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1858 		       sizeof(key->hash.hash));
1859 
1860 	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1861 		ret = fl_set_enc_opt(tb, key, mask, extack);
1862 		if (ret)
1863 			return ret;
1864 	}
1865 
1866 	ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1867 	if (ret)
1868 		return ret;
1869 
1870 	if (tb[TCA_FLOWER_KEY_FLAGS])
1871 		ret = fl_set_key_flags(tb, &key->control.flags,
1872 				       &mask->control.flags, extack);
1873 
1874 	return ret;
1875 }
1876 
1877 static void fl_mask_copy(struct fl_flow_mask *dst,
1878 			 struct fl_flow_mask *src)
1879 {
1880 	const void *psrc = fl_key_get_start(&src->key, src);
1881 	void *pdst = fl_key_get_start(&dst->key, src);
1882 
1883 	memcpy(pdst, psrc, fl_mask_range(src));
1884 	dst->range = src->range;
1885 }
1886 
1887 static const struct rhashtable_params fl_ht_params = {
1888 	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1889 	.head_offset = offsetof(struct cls_fl_filter, ht_node),
1890 	.automatic_shrinking = true,
1891 };
1892 
1893 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1894 {
1895 	mask->filter_ht_params = fl_ht_params;
1896 	mask->filter_ht_params.key_len = fl_mask_range(mask);
1897 	mask->filter_ht_params.key_offset += mask->range.start;
1898 
1899 	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1900 }
1901 
1902 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1903 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1904 
1905 #define FL_KEY_IS_MASKED(mask, member)						\
1906 	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
1907 		   0, FL_KEY_MEMBER_SIZE(member))				\
1908 
1909 #define FL_KEY_SET(keys, cnt, id, member)					\
1910 	do {									\
1911 		keys[cnt].key_id = id;						\
1912 		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
1913 		cnt++;								\
1914 	} while(0);
1915 
1916 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
1917 	do {									\
1918 		if (FL_KEY_IS_MASKED(mask, member))				\
1919 			FL_KEY_SET(keys, cnt, id, member);			\
1920 	} while(0);
1921 
1922 static void fl_init_dissector(struct flow_dissector *dissector,
1923 			      struct fl_flow_key *mask)
1924 {
1925 	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1926 	size_t cnt = 0;
1927 
1928 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1929 			     FLOW_DISSECTOR_KEY_META, meta);
1930 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1931 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1932 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1933 			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1934 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1935 			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1936 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1937 			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1938 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1939 			     FLOW_DISSECTOR_KEY_PORTS, tp);
1940 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1941 			     FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
1942 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1943 			     FLOW_DISSECTOR_KEY_IP, ip);
1944 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1945 			     FLOW_DISSECTOR_KEY_TCP, tcp);
1946 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1947 			     FLOW_DISSECTOR_KEY_ICMP, icmp);
1948 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1949 			     FLOW_DISSECTOR_KEY_ARP, arp);
1950 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1951 			     FLOW_DISSECTOR_KEY_MPLS, mpls);
1952 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1953 			     FLOW_DISSECTOR_KEY_VLAN, vlan);
1954 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1955 			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1956 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1957 			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1958 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1959 			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1960 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1961 			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1962 	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1963 	    FL_KEY_IS_MASKED(mask, enc_ipv6))
1964 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1965 			   enc_control);
1966 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1967 			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1968 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1969 			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1970 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1971 			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1972 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1973 			     FLOW_DISSECTOR_KEY_CT, ct);
1974 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1975 			     FLOW_DISSECTOR_KEY_HASH, hash);
1976 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1977 			     FLOW_DISSECTOR_KEY_NUM_OF_VLANS, num_of_vlans);
1978 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1979 			     FLOW_DISSECTOR_KEY_PPPOE, pppoe);
1980 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1981 			     FLOW_DISSECTOR_KEY_L2TPV3, l2tpv3);
1982 
1983 	skb_flow_dissector_init(dissector, keys, cnt);
1984 }
1985 
1986 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1987 					       struct fl_flow_mask *mask)
1988 {
1989 	struct fl_flow_mask *newmask;
1990 	int err;
1991 
1992 	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1993 	if (!newmask)
1994 		return ERR_PTR(-ENOMEM);
1995 
1996 	fl_mask_copy(newmask, mask);
1997 
1998 	if ((newmask->key.tp_range.tp_min.dst &&
1999 	     newmask->key.tp_range.tp_max.dst) ||
2000 	    (newmask->key.tp_range.tp_min.src &&
2001 	     newmask->key.tp_range.tp_max.src))
2002 		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
2003 
2004 	err = fl_init_mask_hashtable(newmask);
2005 	if (err)
2006 		goto errout_free;
2007 
2008 	fl_init_dissector(&newmask->dissector, &newmask->key);
2009 
2010 	INIT_LIST_HEAD_RCU(&newmask->filters);
2011 
2012 	refcount_set(&newmask->refcnt, 1);
2013 	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
2014 				      &newmask->ht_node, mask_ht_params);
2015 	if (err)
2016 		goto errout_destroy;
2017 
2018 	spin_lock(&head->masks_lock);
2019 	list_add_tail_rcu(&newmask->list, &head->masks);
2020 	spin_unlock(&head->masks_lock);
2021 
2022 	return newmask;
2023 
2024 errout_destroy:
2025 	rhashtable_destroy(&newmask->ht);
2026 errout_free:
2027 	kfree(newmask);
2028 
2029 	return ERR_PTR(err);
2030 }
2031 
2032 static int fl_check_assign_mask(struct cls_fl_head *head,
2033 				struct cls_fl_filter *fnew,
2034 				struct cls_fl_filter *fold,
2035 				struct fl_flow_mask *mask)
2036 {
2037 	struct fl_flow_mask *newmask;
2038 	int ret = 0;
2039 
2040 	rcu_read_lock();
2041 
2042 	/* Insert mask as temporary node to prevent concurrent creation of mask
2043 	 * with same key. Any concurrent lookups with same key will return
2044 	 * -EAGAIN because mask's refcnt is zero.
2045 	 */
2046 	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
2047 						       &mask->ht_node,
2048 						       mask_ht_params);
2049 	if (!fnew->mask) {
2050 		rcu_read_unlock();
2051 
2052 		if (fold) {
2053 			ret = -EINVAL;
2054 			goto errout_cleanup;
2055 		}
2056 
2057 		newmask = fl_create_new_mask(head, mask);
2058 		if (IS_ERR(newmask)) {
2059 			ret = PTR_ERR(newmask);
2060 			goto errout_cleanup;
2061 		}
2062 
2063 		fnew->mask = newmask;
2064 		return 0;
2065 	} else if (IS_ERR(fnew->mask)) {
2066 		ret = PTR_ERR(fnew->mask);
2067 	} else if (fold && fold->mask != fnew->mask) {
2068 		ret = -EINVAL;
2069 	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
2070 		/* Mask was deleted concurrently, try again */
2071 		ret = -EAGAIN;
2072 	}
2073 	rcu_read_unlock();
2074 	return ret;
2075 
2076 errout_cleanup:
2077 	rhashtable_remove_fast(&head->ht, &mask->ht_node,
2078 			       mask_ht_params);
2079 	return ret;
2080 }
2081 
2082 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
2083 			struct cls_fl_filter *f, struct fl_flow_mask *mask,
2084 			unsigned long base, struct nlattr **tb,
2085 			struct nlattr *est,
2086 			struct fl_flow_tmplt *tmplt,
2087 			u32 flags, u32 fl_flags,
2088 			struct netlink_ext_ack *extack)
2089 {
2090 	int err;
2091 
2092 	err = tcf_exts_validate_ex(net, tp, tb, est, &f->exts, flags,
2093 				   fl_flags, extack);
2094 	if (err < 0)
2095 		return err;
2096 
2097 	if (tb[TCA_FLOWER_CLASSID]) {
2098 		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
2099 		if (flags & TCA_ACT_FLAGS_NO_RTNL)
2100 			rtnl_lock();
2101 		tcf_bind_filter(tp, &f->res, base);
2102 		if (flags & TCA_ACT_FLAGS_NO_RTNL)
2103 			rtnl_unlock();
2104 	}
2105 
2106 	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
2107 	if (err)
2108 		return err;
2109 
2110 	fl_mask_update_range(mask);
2111 	fl_set_masked_key(&f->mkey, &f->key, mask);
2112 
2113 	if (!fl_mask_fits_tmplt(tmplt, mask)) {
2114 		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
2115 		return -EINVAL;
2116 	}
2117 
2118 	return 0;
2119 }
2120 
2121 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
2122 			       struct cls_fl_filter *fold,
2123 			       bool *in_ht)
2124 {
2125 	struct fl_flow_mask *mask = fnew->mask;
2126 	int err;
2127 
2128 	err = rhashtable_lookup_insert_fast(&mask->ht,
2129 					    &fnew->ht_node,
2130 					    mask->filter_ht_params);
2131 	if (err) {
2132 		*in_ht = false;
2133 		/* It is okay if filter with same key exists when
2134 		 * overwriting.
2135 		 */
2136 		return fold && err == -EEXIST ? 0 : err;
2137 	}
2138 
2139 	*in_ht = true;
2140 	return 0;
2141 }
2142 
2143 static int fl_change(struct net *net, struct sk_buff *in_skb,
2144 		     struct tcf_proto *tp, unsigned long base,
2145 		     u32 handle, struct nlattr **tca,
2146 		     void **arg, u32 flags,
2147 		     struct netlink_ext_ack *extack)
2148 {
2149 	struct cls_fl_head *head = fl_head_dereference(tp);
2150 	bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
2151 	struct cls_fl_filter *fold = *arg;
2152 	struct cls_fl_filter *fnew;
2153 	struct fl_flow_mask *mask;
2154 	struct nlattr **tb;
2155 	bool in_ht;
2156 	int err;
2157 
2158 	if (!tca[TCA_OPTIONS]) {
2159 		err = -EINVAL;
2160 		goto errout_fold;
2161 	}
2162 
2163 	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
2164 	if (!mask) {
2165 		err = -ENOBUFS;
2166 		goto errout_fold;
2167 	}
2168 
2169 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2170 	if (!tb) {
2171 		err = -ENOBUFS;
2172 		goto errout_mask_alloc;
2173 	}
2174 
2175 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2176 					  tca[TCA_OPTIONS], fl_policy, NULL);
2177 	if (err < 0)
2178 		goto errout_tb;
2179 
2180 	if (fold && handle && fold->handle != handle) {
2181 		err = -EINVAL;
2182 		goto errout_tb;
2183 	}
2184 
2185 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
2186 	if (!fnew) {
2187 		err = -ENOBUFS;
2188 		goto errout_tb;
2189 	}
2190 	INIT_LIST_HEAD(&fnew->hw_list);
2191 	refcount_set(&fnew->refcnt, 1);
2192 
2193 	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
2194 	if (err < 0)
2195 		goto errout;
2196 
2197 	if (tb[TCA_FLOWER_FLAGS]) {
2198 		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
2199 
2200 		if (!tc_flags_valid(fnew->flags)) {
2201 			err = -EINVAL;
2202 			goto errout;
2203 		}
2204 	}
2205 
2206 	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
2207 			   tp->chain->tmplt_priv, flags, fnew->flags,
2208 			   extack);
2209 	if (err)
2210 		goto errout;
2211 
2212 	err = fl_check_assign_mask(head, fnew, fold, mask);
2213 	if (err)
2214 		goto errout;
2215 
2216 	err = fl_ht_insert_unique(fnew, fold, &in_ht);
2217 	if (err)
2218 		goto errout_mask;
2219 
2220 	if (!tc_skip_hw(fnew->flags)) {
2221 		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
2222 		if (err)
2223 			goto errout_ht;
2224 	}
2225 
2226 	if (!tc_in_hw(fnew->flags))
2227 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2228 
2229 	spin_lock(&tp->lock);
2230 
2231 	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2232 	 * proto again or create new one, if necessary.
2233 	 */
2234 	if (tp->deleting) {
2235 		err = -EAGAIN;
2236 		goto errout_hw;
2237 	}
2238 
2239 	if (fold) {
2240 		/* Fold filter was deleted concurrently. Retry lookup. */
2241 		if (fold->deleted) {
2242 			err = -EAGAIN;
2243 			goto errout_hw;
2244 		}
2245 
2246 		fnew->handle = handle;
2247 
2248 		if (!in_ht) {
2249 			struct rhashtable_params params =
2250 				fnew->mask->filter_ht_params;
2251 
2252 			err = rhashtable_insert_fast(&fnew->mask->ht,
2253 						     &fnew->ht_node,
2254 						     params);
2255 			if (err)
2256 				goto errout_hw;
2257 			in_ht = true;
2258 		}
2259 
2260 		refcount_inc(&fnew->refcnt);
2261 		rhashtable_remove_fast(&fold->mask->ht,
2262 				       &fold->ht_node,
2263 				       fold->mask->filter_ht_params);
2264 		idr_replace(&head->handle_idr, fnew, fnew->handle);
2265 		list_replace_rcu(&fold->list, &fnew->list);
2266 		fold->deleted = true;
2267 
2268 		spin_unlock(&tp->lock);
2269 
2270 		fl_mask_put(head, fold->mask);
2271 		if (!tc_skip_hw(fold->flags))
2272 			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2273 		tcf_unbind_filter(tp, &fold->res);
2274 		/* Caller holds reference to fold, so refcnt is always > 0
2275 		 * after this.
2276 		 */
2277 		refcount_dec(&fold->refcnt);
2278 		__fl_put(fold);
2279 	} else {
2280 		if (handle) {
2281 			/* user specifies a handle and it doesn't exist */
2282 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2283 					    handle, GFP_ATOMIC);
2284 
2285 			/* Filter with specified handle was concurrently
2286 			 * inserted after initial check in cls_api. This is not
2287 			 * necessarily an error if NLM_F_EXCL is not set in
2288 			 * message flags. Returning EAGAIN will cause cls_api to
2289 			 * try to update concurrently inserted rule.
2290 			 */
2291 			if (err == -ENOSPC)
2292 				err = -EAGAIN;
2293 		} else {
2294 			handle = 1;
2295 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2296 					    INT_MAX, GFP_ATOMIC);
2297 		}
2298 		if (err)
2299 			goto errout_hw;
2300 
2301 		refcount_inc(&fnew->refcnt);
2302 		fnew->handle = handle;
2303 		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2304 		spin_unlock(&tp->lock);
2305 	}
2306 
2307 	*arg = fnew;
2308 
2309 	kfree(tb);
2310 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2311 	return 0;
2312 
2313 errout_ht:
2314 	spin_lock(&tp->lock);
2315 errout_hw:
2316 	fnew->deleted = true;
2317 	spin_unlock(&tp->lock);
2318 	if (!tc_skip_hw(fnew->flags))
2319 		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2320 	if (in_ht)
2321 		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2322 				       fnew->mask->filter_ht_params);
2323 errout_mask:
2324 	fl_mask_put(head, fnew->mask);
2325 errout:
2326 	__fl_put(fnew);
2327 errout_tb:
2328 	kfree(tb);
2329 errout_mask_alloc:
2330 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2331 errout_fold:
2332 	if (fold)
2333 		__fl_put(fold);
2334 	return err;
2335 }
2336 
2337 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
2338 		     bool rtnl_held, struct netlink_ext_ack *extack)
2339 {
2340 	struct cls_fl_head *head = fl_head_dereference(tp);
2341 	struct cls_fl_filter *f = arg;
2342 	bool last_on_mask;
2343 	int err = 0;
2344 
2345 	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
2346 	*last = list_empty(&head->masks);
2347 	__fl_put(f);
2348 
2349 	return err;
2350 }
2351 
2352 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2353 		    bool rtnl_held)
2354 {
2355 	struct cls_fl_head *head = fl_head_dereference(tp);
2356 	unsigned long id = arg->cookie, tmp;
2357 	struct cls_fl_filter *f;
2358 
2359 	arg->count = arg->skip;
2360 
2361 	rcu_read_lock();
2362 	idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2363 		/* don't return filters that are being deleted */
2364 		if (!refcount_inc_not_zero(&f->refcnt))
2365 			continue;
2366 		rcu_read_unlock();
2367 
2368 		if (arg->fn(tp, f, arg) < 0) {
2369 			__fl_put(f);
2370 			arg->stop = 1;
2371 			rcu_read_lock();
2372 			break;
2373 		}
2374 		__fl_put(f);
2375 		arg->count++;
2376 		rcu_read_lock();
2377 	}
2378 	rcu_read_unlock();
2379 	arg->cookie = id;
2380 }
2381 
2382 static struct cls_fl_filter *
2383 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2384 {
2385 	struct cls_fl_head *head = fl_head_dereference(tp);
2386 
2387 	spin_lock(&tp->lock);
2388 	if (list_empty(&head->hw_filters)) {
2389 		spin_unlock(&tp->lock);
2390 		return NULL;
2391 	}
2392 
2393 	if (!f)
2394 		f = list_entry(&head->hw_filters, struct cls_fl_filter,
2395 			       hw_list);
2396 	list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2397 		if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2398 			spin_unlock(&tp->lock);
2399 			return f;
2400 		}
2401 	}
2402 
2403 	spin_unlock(&tp->lock);
2404 	return NULL;
2405 }
2406 
2407 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
2408 			void *cb_priv, struct netlink_ext_ack *extack)
2409 {
2410 	struct tcf_block *block = tp->chain->block;
2411 	struct flow_cls_offload cls_flower = {};
2412 	struct cls_fl_filter *f = NULL;
2413 	int err;
2414 
2415 	/* hw_filters list can only be changed by hw offload functions after
2416 	 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2417 	 * iterating it.
2418 	 */
2419 	ASSERT_RTNL();
2420 
2421 	while ((f = fl_get_next_hw_filter(tp, f, add))) {
2422 		cls_flower.rule =
2423 			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2424 		if (!cls_flower.rule) {
2425 			__fl_put(f);
2426 			return -ENOMEM;
2427 		}
2428 
2429 		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2430 					   extack);
2431 		cls_flower.command = add ?
2432 			FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2433 		cls_flower.cookie = (unsigned long)f;
2434 		cls_flower.rule->match.dissector = &f->mask->dissector;
2435 		cls_flower.rule->match.mask = &f->mask->key;
2436 		cls_flower.rule->match.key = &f->mkey;
2437 
2438 		err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
2439 					      cls_flower.common.extack);
2440 		if (err) {
2441 			kfree(cls_flower.rule);
2442 			if (tc_skip_sw(f->flags)) {
2443 				__fl_put(f);
2444 				return err;
2445 			}
2446 			goto next_flow;
2447 		}
2448 
2449 		cls_flower.classid = f->res.classid;
2450 
2451 		err = tc_setup_cb_reoffload(block, tp, add, cb,
2452 					    TC_SETUP_CLSFLOWER, &cls_flower,
2453 					    cb_priv, &f->flags,
2454 					    &f->in_hw_count);
2455 		tc_cleanup_offload_action(&cls_flower.rule->action);
2456 		kfree(cls_flower.rule);
2457 
2458 		if (err) {
2459 			__fl_put(f);
2460 			return err;
2461 		}
2462 next_flow:
2463 		__fl_put(f);
2464 	}
2465 
2466 	return 0;
2467 }
2468 
2469 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2470 {
2471 	struct flow_cls_offload *cls_flower = type_data;
2472 	struct cls_fl_filter *f =
2473 		(struct cls_fl_filter *) cls_flower->cookie;
2474 	struct cls_fl_head *head = fl_head_dereference(tp);
2475 
2476 	spin_lock(&tp->lock);
2477 	list_add(&f->hw_list, &head->hw_filters);
2478 	spin_unlock(&tp->lock);
2479 }
2480 
2481 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2482 {
2483 	struct flow_cls_offload *cls_flower = type_data;
2484 	struct cls_fl_filter *f =
2485 		(struct cls_fl_filter *) cls_flower->cookie;
2486 
2487 	spin_lock(&tp->lock);
2488 	if (!list_empty(&f->hw_list))
2489 		list_del_init(&f->hw_list);
2490 	spin_unlock(&tp->lock);
2491 }
2492 
2493 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2494 			      struct fl_flow_tmplt *tmplt)
2495 {
2496 	struct flow_cls_offload cls_flower = {};
2497 	struct tcf_block *block = chain->block;
2498 
2499 	cls_flower.rule = flow_rule_alloc(0);
2500 	if (!cls_flower.rule)
2501 		return -ENOMEM;
2502 
2503 	cls_flower.common.chain_index = chain->index;
2504 	cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2505 	cls_flower.cookie = (unsigned long) tmplt;
2506 	cls_flower.rule->match.dissector = &tmplt->dissector;
2507 	cls_flower.rule->match.mask = &tmplt->mask;
2508 	cls_flower.rule->match.key = &tmplt->dummy_key;
2509 
2510 	/* We don't care if driver (any of them) fails to handle this
2511 	 * call. It serves just as a hint for it.
2512 	 */
2513 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2514 	kfree(cls_flower.rule);
2515 
2516 	return 0;
2517 }
2518 
2519 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2520 				struct fl_flow_tmplt *tmplt)
2521 {
2522 	struct flow_cls_offload cls_flower = {};
2523 	struct tcf_block *block = chain->block;
2524 
2525 	cls_flower.common.chain_index = chain->index;
2526 	cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2527 	cls_flower.cookie = (unsigned long) tmplt;
2528 
2529 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2530 }
2531 
2532 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2533 			     struct nlattr **tca,
2534 			     struct netlink_ext_ack *extack)
2535 {
2536 	struct fl_flow_tmplt *tmplt;
2537 	struct nlattr **tb;
2538 	int err;
2539 
2540 	if (!tca[TCA_OPTIONS])
2541 		return ERR_PTR(-EINVAL);
2542 
2543 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2544 	if (!tb)
2545 		return ERR_PTR(-ENOBUFS);
2546 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2547 					  tca[TCA_OPTIONS], fl_policy, NULL);
2548 	if (err)
2549 		goto errout_tb;
2550 
2551 	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2552 	if (!tmplt) {
2553 		err = -ENOMEM;
2554 		goto errout_tb;
2555 	}
2556 	tmplt->chain = chain;
2557 	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2558 	if (err)
2559 		goto errout_tmplt;
2560 
2561 	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2562 
2563 	err = fl_hw_create_tmplt(chain, tmplt);
2564 	if (err)
2565 		goto errout_tmplt;
2566 
2567 	kfree(tb);
2568 	return tmplt;
2569 
2570 errout_tmplt:
2571 	kfree(tmplt);
2572 errout_tb:
2573 	kfree(tb);
2574 	return ERR_PTR(err);
2575 }
2576 
2577 static void fl_tmplt_destroy(void *tmplt_priv)
2578 {
2579 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2580 
2581 	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2582 	kfree(tmplt);
2583 }
2584 
2585 static int fl_dump_key_val(struct sk_buff *skb,
2586 			   void *val, int val_type,
2587 			   void *mask, int mask_type, int len)
2588 {
2589 	int err;
2590 
2591 	if (!memchr_inv(mask, 0, len))
2592 		return 0;
2593 	err = nla_put(skb, val_type, len, val);
2594 	if (err)
2595 		return err;
2596 	if (mask_type != TCA_FLOWER_UNSPEC) {
2597 		err = nla_put(skb, mask_type, len, mask);
2598 		if (err)
2599 			return err;
2600 	}
2601 	return 0;
2602 }
2603 
2604 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2605 				  struct fl_flow_key *mask)
2606 {
2607 	if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2608 			    TCA_FLOWER_KEY_PORT_DST_MIN,
2609 			    &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2610 			    sizeof(key->tp_range.tp_min.dst)) ||
2611 	    fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2612 			    TCA_FLOWER_KEY_PORT_DST_MAX,
2613 			    &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2614 			    sizeof(key->tp_range.tp_max.dst)) ||
2615 	    fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2616 			    TCA_FLOWER_KEY_PORT_SRC_MIN,
2617 			    &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2618 			    sizeof(key->tp_range.tp_min.src)) ||
2619 	    fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2620 			    TCA_FLOWER_KEY_PORT_SRC_MAX,
2621 			    &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2622 			    sizeof(key->tp_range.tp_max.src)))
2623 		return -1;
2624 
2625 	return 0;
2626 }
2627 
2628 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2629 				    struct flow_dissector_key_mpls *mpls_key,
2630 				    struct flow_dissector_key_mpls *mpls_mask,
2631 				    u8 lse_index)
2632 {
2633 	struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2634 	struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2635 	int err;
2636 
2637 	err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2638 			 lse_index + 1);
2639 	if (err)
2640 		return err;
2641 
2642 	if (lse_mask->mpls_ttl) {
2643 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2644 				 lse_key->mpls_ttl);
2645 		if (err)
2646 			return err;
2647 	}
2648 	if (lse_mask->mpls_bos) {
2649 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2650 				 lse_key->mpls_bos);
2651 		if (err)
2652 			return err;
2653 	}
2654 	if (lse_mask->mpls_tc) {
2655 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2656 				 lse_key->mpls_tc);
2657 		if (err)
2658 			return err;
2659 	}
2660 	if (lse_mask->mpls_label) {
2661 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2662 				  lse_key->mpls_label);
2663 		if (err)
2664 			return err;
2665 	}
2666 
2667 	return 0;
2668 }
2669 
2670 static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2671 				 struct flow_dissector_key_mpls *mpls_key,
2672 				 struct flow_dissector_key_mpls *mpls_mask)
2673 {
2674 	struct nlattr *opts;
2675 	struct nlattr *lse;
2676 	u8 lse_index;
2677 	int err;
2678 
2679 	opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2680 	if (!opts)
2681 		return -EMSGSIZE;
2682 
2683 	for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2684 		if (!(mpls_mask->used_lses & 1 << lse_index))
2685 			continue;
2686 
2687 		lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2688 		if (!lse) {
2689 			err = -EMSGSIZE;
2690 			goto err_opts;
2691 		}
2692 
2693 		err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2694 					       lse_index);
2695 		if (err)
2696 			goto err_opts_lse;
2697 		nla_nest_end(skb, lse);
2698 	}
2699 	nla_nest_end(skb, opts);
2700 
2701 	return 0;
2702 
2703 err_opts_lse:
2704 	nla_nest_cancel(skb, lse);
2705 err_opts:
2706 	nla_nest_cancel(skb, opts);
2707 
2708 	return err;
2709 }
2710 
2711 static int fl_dump_key_mpls(struct sk_buff *skb,
2712 			    struct flow_dissector_key_mpls *mpls_key,
2713 			    struct flow_dissector_key_mpls *mpls_mask)
2714 {
2715 	struct flow_dissector_mpls_lse *lse_mask;
2716 	struct flow_dissector_mpls_lse *lse_key;
2717 	int err;
2718 
2719 	if (!mpls_mask->used_lses)
2720 		return 0;
2721 
2722 	lse_mask = &mpls_mask->ls[0];
2723 	lse_key = &mpls_key->ls[0];
2724 
2725 	/* For backward compatibility, don't use the MPLS nested attributes if
2726 	 * the rule can be expressed using the old attributes.
2727 	 */
2728 	if (mpls_mask->used_lses & ~1 ||
2729 	    (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2730 	     !lse_mask->mpls_tc && !lse_mask->mpls_label))
2731 		return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2732 
2733 	if (lse_mask->mpls_ttl) {
2734 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2735 				 lse_key->mpls_ttl);
2736 		if (err)
2737 			return err;
2738 	}
2739 	if (lse_mask->mpls_tc) {
2740 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2741 				 lse_key->mpls_tc);
2742 		if (err)
2743 			return err;
2744 	}
2745 	if (lse_mask->mpls_label) {
2746 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2747 				  lse_key->mpls_label);
2748 		if (err)
2749 			return err;
2750 	}
2751 	if (lse_mask->mpls_bos) {
2752 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2753 				 lse_key->mpls_bos);
2754 		if (err)
2755 			return err;
2756 	}
2757 	return 0;
2758 }
2759 
2760 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2761 			  struct flow_dissector_key_ip *key,
2762 			  struct flow_dissector_key_ip *mask)
2763 {
2764 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2765 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2766 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2767 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2768 
2769 	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2770 	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2771 		return -1;
2772 
2773 	return 0;
2774 }
2775 
2776 static int fl_dump_key_vlan(struct sk_buff *skb,
2777 			    int vlan_id_key, int vlan_prio_key,
2778 			    struct flow_dissector_key_vlan *vlan_key,
2779 			    struct flow_dissector_key_vlan *vlan_mask)
2780 {
2781 	int err;
2782 
2783 	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2784 		return 0;
2785 	if (vlan_mask->vlan_id) {
2786 		err = nla_put_u16(skb, vlan_id_key,
2787 				  vlan_key->vlan_id);
2788 		if (err)
2789 			return err;
2790 	}
2791 	if (vlan_mask->vlan_priority) {
2792 		err = nla_put_u8(skb, vlan_prio_key,
2793 				 vlan_key->vlan_priority);
2794 		if (err)
2795 			return err;
2796 	}
2797 	return 0;
2798 }
2799 
2800 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2801 			    u32 *flower_key, u32 *flower_mask,
2802 			    u32 flower_flag_bit, u32 dissector_flag_bit)
2803 {
2804 	if (dissector_mask & dissector_flag_bit) {
2805 		*flower_mask |= flower_flag_bit;
2806 		if (dissector_key & dissector_flag_bit)
2807 			*flower_key |= flower_flag_bit;
2808 	}
2809 }
2810 
2811 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2812 {
2813 	u32 key, mask;
2814 	__be32 _key, _mask;
2815 	int err;
2816 
2817 	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2818 		return 0;
2819 
2820 	key = 0;
2821 	mask = 0;
2822 
2823 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2824 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2825 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2826 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2827 			FLOW_DIS_FIRST_FRAG);
2828 
2829 	_key = cpu_to_be32(key);
2830 	_mask = cpu_to_be32(mask);
2831 
2832 	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2833 	if (err)
2834 		return err;
2835 
2836 	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2837 }
2838 
2839 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2840 				  struct flow_dissector_key_enc_opts *enc_opts)
2841 {
2842 	struct geneve_opt *opt;
2843 	struct nlattr *nest;
2844 	int opt_off = 0;
2845 
2846 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2847 	if (!nest)
2848 		goto nla_put_failure;
2849 
2850 	while (enc_opts->len > opt_off) {
2851 		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2852 
2853 		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2854 				 opt->opt_class))
2855 			goto nla_put_failure;
2856 		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2857 			       opt->type))
2858 			goto nla_put_failure;
2859 		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2860 			    opt->length * 4, opt->opt_data))
2861 			goto nla_put_failure;
2862 
2863 		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2864 	}
2865 	nla_nest_end(skb, nest);
2866 	return 0;
2867 
2868 nla_put_failure:
2869 	nla_nest_cancel(skb, nest);
2870 	return -EMSGSIZE;
2871 }
2872 
2873 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2874 				 struct flow_dissector_key_enc_opts *enc_opts)
2875 {
2876 	struct vxlan_metadata *md;
2877 	struct nlattr *nest;
2878 
2879 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2880 	if (!nest)
2881 		goto nla_put_failure;
2882 
2883 	md = (struct vxlan_metadata *)&enc_opts->data[0];
2884 	if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2885 		goto nla_put_failure;
2886 
2887 	nla_nest_end(skb, nest);
2888 	return 0;
2889 
2890 nla_put_failure:
2891 	nla_nest_cancel(skb, nest);
2892 	return -EMSGSIZE;
2893 }
2894 
2895 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2896 				  struct flow_dissector_key_enc_opts *enc_opts)
2897 {
2898 	struct erspan_metadata *md;
2899 	struct nlattr *nest;
2900 
2901 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2902 	if (!nest)
2903 		goto nla_put_failure;
2904 
2905 	md = (struct erspan_metadata *)&enc_opts->data[0];
2906 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2907 		goto nla_put_failure;
2908 
2909 	if (md->version == 1 &&
2910 	    nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2911 		goto nla_put_failure;
2912 
2913 	if (md->version == 2 &&
2914 	    (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2915 			md->u.md2.dir) ||
2916 	     nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2917 			get_hwid(&md->u.md2))))
2918 		goto nla_put_failure;
2919 
2920 	nla_nest_end(skb, nest);
2921 	return 0;
2922 
2923 nla_put_failure:
2924 	nla_nest_cancel(skb, nest);
2925 	return -EMSGSIZE;
2926 }
2927 
2928 static int fl_dump_key_gtp_opt(struct sk_buff *skb,
2929 			       struct flow_dissector_key_enc_opts *enc_opts)
2930 
2931 {
2932 	struct gtp_pdu_session_info *session_info;
2933 	struct nlattr *nest;
2934 
2935 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GTP);
2936 	if (!nest)
2937 		goto nla_put_failure;
2938 
2939 	session_info = (struct gtp_pdu_session_info *)&enc_opts->data[0];
2940 
2941 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE,
2942 		       session_info->pdu_type))
2943 		goto nla_put_failure;
2944 
2945 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, session_info->qfi))
2946 		goto nla_put_failure;
2947 
2948 	nla_nest_end(skb, nest);
2949 	return 0;
2950 
2951 nla_put_failure:
2952 	nla_nest_cancel(skb, nest);
2953 	return -EMSGSIZE;
2954 }
2955 
2956 static int fl_dump_key_ct(struct sk_buff *skb,
2957 			  struct flow_dissector_key_ct *key,
2958 			  struct flow_dissector_key_ct *mask)
2959 {
2960 	if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2961 	    fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2962 			    &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2963 			    sizeof(key->ct_state)))
2964 		goto nla_put_failure;
2965 
2966 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2967 	    fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2968 			    &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2969 			    sizeof(key->ct_zone)))
2970 		goto nla_put_failure;
2971 
2972 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2973 	    fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2974 			    &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2975 			    sizeof(key->ct_mark)))
2976 		goto nla_put_failure;
2977 
2978 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2979 	    fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2980 			    &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2981 			    sizeof(key->ct_labels)))
2982 		goto nla_put_failure;
2983 
2984 	return 0;
2985 
2986 nla_put_failure:
2987 	return -EMSGSIZE;
2988 }
2989 
2990 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2991 			       struct flow_dissector_key_enc_opts *enc_opts)
2992 {
2993 	struct nlattr *nest;
2994 	int err;
2995 
2996 	if (!enc_opts->len)
2997 		return 0;
2998 
2999 	nest = nla_nest_start_noflag(skb, enc_opt_type);
3000 	if (!nest)
3001 		goto nla_put_failure;
3002 
3003 	switch (enc_opts->dst_opt_type) {
3004 	case TUNNEL_GENEVE_OPT:
3005 		err = fl_dump_key_geneve_opt(skb, enc_opts);
3006 		if (err)
3007 			goto nla_put_failure;
3008 		break;
3009 	case TUNNEL_VXLAN_OPT:
3010 		err = fl_dump_key_vxlan_opt(skb, enc_opts);
3011 		if (err)
3012 			goto nla_put_failure;
3013 		break;
3014 	case TUNNEL_ERSPAN_OPT:
3015 		err = fl_dump_key_erspan_opt(skb, enc_opts);
3016 		if (err)
3017 			goto nla_put_failure;
3018 		break;
3019 	case TUNNEL_GTP_OPT:
3020 		err = fl_dump_key_gtp_opt(skb, enc_opts);
3021 		if (err)
3022 			goto nla_put_failure;
3023 		break;
3024 	default:
3025 		goto nla_put_failure;
3026 	}
3027 	nla_nest_end(skb, nest);
3028 	return 0;
3029 
3030 nla_put_failure:
3031 	nla_nest_cancel(skb, nest);
3032 	return -EMSGSIZE;
3033 }
3034 
3035 static int fl_dump_key_enc_opt(struct sk_buff *skb,
3036 			       struct flow_dissector_key_enc_opts *key_opts,
3037 			       struct flow_dissector_key_enc_opts *msk_opts)
3038 {
3039 	int err;
3040 
3041 	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
3042 	if (err)
3043 		return err;
3044 
3045 	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
3046 }
3047 
3048 static int fl_dump_key(struct sk_buff *skb, struct net *net,
3049 		       struct fl_flow_key *key, struct fl_flow_key *mask)
3050 {
3051 	if (mask->meta.ingress_ifindex) {
3052 		struct net_device *dev;
3053 
3054 		dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
3055 		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
3056 			goto nla_put_failure;
3057 	}
3058 
3059 	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
3060 			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
3061 			    sizeof(key->eth.dst)) ||
3062 	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
3063 			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
3064 			    sizeof(key->eth.src)) ||
3065 	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
3066 			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
3067 			    sizeof(key->basic.n_proto)))
3068 		goto nla_put_failure;
3069 
3070 	if (mask->num_of_vlans.num_of_vlans) {
3071 		if (nla_put_u8(skb, TCA_FLOWER_KEY_NUM_OF_VLANS, key->num_of_vlans.num_of_vlans))
3072 			goto nla_put_failure;
3073 	}
3074 
3075 	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
3076 		goto nla_put_failure;
3077 
3078 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
3079 			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
3080 		goto nla_put_failure;
3081 
3082 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
3083 			     TCA_FLOWER_KEY_CVLAN_PRIO,
3084 			     &key->cvlan, &mask->cvlan) ||
3085 	    (mask->cvlan.vlan_tpid &&
3086 	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3087 			  key->cvlan.vlan_tpid)))
3088 		goto nla_put_failure;
3089 
3090 	if (mask->basic.n_proto) {
3091 		if (mask->cvlan.vlan_eth_type) {
3092 			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
3093 					 key->basic.n_proto))
3094 				goto nla_put_failure;
3095 		} else if (mask->vlan.vlan_eth_type) {
3096 			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3097 					 key->vlan.vlan_eth_type))
3098 				goto nla_put_failure;
3099 		}
3100 	}
3101 
3102 	if ((key->basic.n_proto == htons(ETH_P_IP) ||
3103 	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
3104 	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
3105 			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
3106 			    sizeof(key->basic.ip_proto)) ||
3107 	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
3108 		goto nla_put_failure;
3109 
3110 	if (mask->pppoe.session_id) {
3111 		if (nla_put_be16(skb, TCA_FLOWER_KEY_PPPOE_SID,
3112 				 key->pppoe.session_id))
3113 			goto nla_put_failure;
3114 	}
3115 	if (mask->basic.n_proto && mask->pppoe.ppp_proto) {
3116 		if (nla_put_be16(skb, TCA_FLOWER_KEY_PPP_PROTO,
3117 				 key->pppoe.ppp_proto))
3118 			goto nla_put_failure;
3119 	}
3120 
3121 	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3122 	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
3123 			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
3124 			     sizeof(key->ipv4.src)) ||
3125 	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
3126 			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
3127 			     sizeof(key->ipv4.dst))))
3128 		goto nla_put_failure;
3129 	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3130 		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
3131 				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
3132 				  sizeof(key->ipv6.src)) ||
3133 		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
3134 				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
3135 				  sizeof(key->ipv6.dst))))
3136 		goto nla_put_failure;
3137 
3138 	if (key->basic.ip_proto == IPPROTO_TCP &&
3139 	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
3140 			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
3141 			     sizeof(key->tp.src)) ||
3142 	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
3143 			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
3144 			     sizeof(key->tp.dst)) ||
3145 	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
3146 			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
3147 			     sizeof(key->tcp.flags))))
3148 		goto nla_put_failure;
3149 	else if (key->basic.ip_proto == IPPROTO_UDP &&
3150 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
3151 				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
3152 				  sizeof(key->tp.src)) ||
3153 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
3154 				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
3155 				  sizeof(key->tp.dst))))
3156 		goto nla_put_failure;
3157 	else if (key->basic.ip_proto == IPPROTO_SCTP &&
3158 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
3159 				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
3160 				  sizeof(key->tp.src)) ||
3161 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
3162 				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
3163 				  sizeof(key->tp.dst))))
3164 		goto nla_put_failure;
3165 	else if (key->basic.n_proto == htons(ETH_P_IP) &&
3166 		 key->basic.ip_proto == IPPROTO_ICMP &&
3167 		 (fl_dump_key_val(skb, &key->icmp.type,
3168 				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
3169 				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
3170 				  sizeof(key->icmp.type)) ||
3171 		  fl_dump_key_val(skb, &key->icmp.code,
3172 				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
3173 				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
3174 				  sizeof(key->icmp.code))))
3175 		goto nla_put_failure;
3176 	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
3177 		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
3178 		 (fl_dump_key_val(skb, &key->icmp.type,
3179 				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
3180 				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
3181 				  sizeof(key->icmp.type)) ||
3182 		  fl_dump_key_val(skb, &key->icmp.code,
3183 				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
3184 				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
3185 				  sizeof(key->icmp.code))))
3186 		goto nla_put_failure;
3187 	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
3188 		  key->basic.n_proto == htons(ETH_P_RARP)) &&
3189 		 (fl_dump_key_val(skb, &key->arp.sip,
3190 				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
3191 				  TCA_FLOWER_KEY_ARP_SIP_MASK,
3192 				  sizeof(key->arp.sip)) ||
3193 		  fl_dump_key_val(skb, &key->arp.tip,
3194 				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
3195 				  TCA_FLOWER_KEY_ARP_TIP_MASK,
3196 				  sizeof(key->arp.tip)) ||
3197 		  fl_dump_key_val(skb, &key->arp.op,
3198 				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
3199 				  TCA_FLOWER_KEY_ARP_OP_MASK,
3200 				  sizeof(key->arp.op)) ||
3201 		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
3202 				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
3203 				  sizeof(key->arp.sha)) ||
3204 		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
3205 				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
3206 				  sizeof(key->arp.tha))))
3207 		goto nla_put_failure;
3208 	else if (key->basic.ip_proto == IPPROTO_L2TP &&
3209 		 fl_dump_key_val(skb, &key->l2tpv3.session_id,
3210 				 TCA_FLOWER_KEY_L2TPV3_SID,
3211 				 &mask->l2tpv3.session_id,
3212 				 TCA_FLOWER_UNSPEC,
3213 				 sizeof(key->l2tpv3.session_id)))
3214 		goto nla_put_failure;
3215 
3216 	if ((key->basic.ip_proto == IPPROTO_TCP ||
3217 	     key->basic.ip_proto == IPPROTO_UDP ||
3218 	     key->basic.ip_proto == IPPROTO_SCTP) &&
3219 	     fl_dump_key_port_range(skb, key, mask))
3220 		goto nla_put_failure;
3221 
3222 	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3223 	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
3224 			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
3225 			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
3226 			    sizeof(key->enc_ipv4.src)) ||
3227 	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
3228 			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
3229 			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
3230 			     sizeof(key->enc_ipv4.dst))))
3231 		goto nla_put_failure;
3232 	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3233 		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
3234 			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
3235 			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
3236 			    sizeof(key->enc_ipv6.src)) ||
3237 		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
3238 				 TCA_FLOWER_KEY_ENC_IPV6_DST,
3239 				 &mask->enc_ipv6.dst,
3240 				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
3241 			    sizeof(key->enc_ipv6.dst))))
3242 		goto nla_put_failure;
3243 
3244 	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
3245 			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
3246 			    sizeof(key->enc_key_id)) ||
3247 	    fl_dump_key_val(skb, &key->enc_tp.src,
3248 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
3249 			    &mask->enc_tp.src,
3250 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
3251 			    sizeof(key->enc_tp.src)) ||
3252 	    fl_dump_key_val(skb, &key->enc_tp.dst,
3253 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
3254 			    &mask->enc_tp.dst,
3255 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
3256 			    sizeof(key->enc_tp.dst)) ||
3257 	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
3258 	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
3259 		goto nla_put_failure;
3260 
3261 	if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3262 		goto nla_put_failure;
3263 
3264 	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
3265 		goto nla_put_failure;
3266 
3267 	if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3268 			     &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3269 			     sizeof(key->hash.hash)))
3270 		goto nla_put_failure;
3271 
3272 	return 0;
3273 
3274 nla_put_failure:
3275 	return -EMSGSIZE;
3276 }
3277 
3278 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
3279 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3280 {
3281 	struct cls_fl_filter *f = fh;
3282 	struct nlattr *nest;
3283 	struct fl_flow_key *key, *mask;
3284 	bool skip_hw;
3285 
3286 	if (!f)
3287 		return skb->len;
3288 
3289 	t->tcm_handle = f->handle;
3290 
3291 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3292 	if (!nest)
3293 		goto nla_put_failure;
3294 
3295 	spin_lock(&tp->lock);
3296 
3297 	if (f->res.classid &&
3298 	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3299 		goto nla_put_failure_locked;
3300 
3301 	key = &f->key;
3302 	mask = &f->mask->key;
3303 	skip_hw = tc_skip_hw(f->flags);
3304 
3305 	if (fl_dump_key(skb, net, key, mask))
3306 		goto nla_put_failure_locked;
3307 
3308 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3309 		goto nla_put_failure_locked;
3310 
3311 	spin_unlock(&tp->lock);
3312 
3313 	if (!skip_hw)
3314 		fl_hw_update_stats(tp, f, rtnl_held);
3315 
3316 	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3317 		goto nla_put_failure;
3318 
3319 	if (tcf_exts_dump(skb, &f->exts))
3320 		goto nla_put_failure;
3321 
3322 	nla_nest_end(skb, nest);
3323 
3324 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3325 		goto nla_put_failure;
3326 
3327 	return skb->len;
3328 
3329 nla_put_failure_locked:
3330 	spin_unlock(&tp->lock);
3331 nla_put_failure:
3332 	nla_nest_cancel(skb, nest);
3333 	return -1;
3334 }
3335 
3336 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3337 			 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3338 {
3339 	struct cls_fl_filter *f = fh;
3340 	struct nlattr *nest;
3341 	bool skip_hw;
3342 
3343 	if (!f)
3344 		return skb->len;
3345 
3346 	t->tcm_handle = f->handle;
3347 
3348 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3349 	if (!nest)
3350 		goto nla_put_failure;
3351 
3352 	spin_lock(&tp->lock);
3353 
3354 	skip_hw = tc_skip_hw(f->flags);
3355 
3356 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3357 		goto nla_put_failure_locked;
3358 
3359 	spin_unlock(&tp->lock);
3360 
3361 	if (!skip_hw)
3362 		fl_hw_update_stats(tp, f, rtnl_held);
3363 
3364 	if (tcf_exts_terse_dump(skb, &f->exts))
3365 		goto nla_put_failure;
3366 
3367 	nla_nest_end(skb, nest);
3368 
3369 	return skb->len;
3370 
3371 nla_put_failure_locked:
3372 	spin_unlock(&tp->lock);
3373 nla_put_failure:
3374 	nla_nest_cancel(skb, nest);
3375 	return -1;
3376 }
3377 
3378 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3379 {
3380 	struct fl_flow_tmplt *tmplt = tmplt_priv;
3381 	struct fl_flow_key *key, *mask;
3382 	struct nlattr *nest;
3383 
3384 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3385 	if (!nest)
3386 		goto nla_put_failure;
3387 
3388 	key = &tmplt->dummy_key;
3389 	mask = &tmplt->mask;
3390 
3391 	if (fl_dump_key(skb, net, key, mask))
3392 		goto nla_put_failure;
3393 
3394 	nla_nest_end(skb, nest);
3395 
3396 	return skb->len;
3397 
3398 nla_put_failure:
3399 	nla_nest_cancel(skb, nest);
3400 	return -EMSGSIZE;
3401 }
3402 
3403 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3404 			  unsigned long base)
3405 {
3406 	struct cls_fl_filter *f = fh;
3407 
3408 	tc_cls_bind_class(classid, cl, q, &f->res, base);
3409 }
3410 
3411 static bool fl_delete_empty(struct tcf_proto *tp)
3412 {
3413 	struct cls_fl_head *head = fl_head_dereference(tp);
3414 
3415 	spin_lock(&tp->lock);
3416 	tp->deleting = idr_is_empty(&head->handle_idr);
3417 	spin_unlock(&tp->lock);
3418 
3419 	return tp->deleting;
3420 }
3421 
3422 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3423 	.kind		= "flower",
3424 	.classify	= fl_classify,
3425 	.init		= fl_init,
3426 	.destroy	= fl_destroy,
3427 	.get		= fl_get,
3428 	.put		= fl_put,
3429 	.change		= fl_change,
3430 	.delete		= fl_delete,
3431 	.delete_empty	= fl_delete_empty,
3432 	.walk		= fl_walk,
3433 	.reoffload	= fl_reoffload,
3434 	.hw_add		= fl_hw_add,
3435 	.hw_del		= fl_hw_del,
3436 	.dump		= fl_dump,
3437 	.terse_dump	= fl_terse_dump,
3438 	.bind_class	= fl_bind_class,
3439 	.tmplt_create	= fl_tmplt_create,
3440 	.tmplt_destroy	= fl_tmplt_destroy,
3441 	.tmplt_dump	= fl_tmplt_dump,
3442 	.owner		= THIS_MODULE,
3443 	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
3444 };
3445 
3446 static int __init cls_fl_init(void)
3447 {
3448 	return register_tcf_proto_ops(&cls_fl_ops);
3449 }
3450 
3451 static void __exit cls_fl_exit(void)
3452 {
3453 	unregister_tcf_proto_ops(&cls_fl_ops);
3454 }
3455 
3456 module_init(cls_fl_init);
3457 module_exit(cls_fl_exit);
3458 
3459 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3460 MODULE_DESCRIPTION("Flower classifier");
3461 MODULE_LICENSE("GPL v2");
3462