1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* -
3 * net/sched/act_ct.c Connection Tracking action
4 *
5 * Authors: Paul Blakey <paulb@mellanox.com>
6 * Yossi Kuperman <yossiku@mellanox.com>
7 * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
8 */
9
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/pkt_cls.h>
16 #include <linux/ip.h>
17 #include <linux/ipv6.h>
18 #include <linux/rhashtable.h>
19 #include <net/netlink.h>
20 #include <net/pkt_sched.h>
21 #include <net/pkt_cls.h>
22 #include <net/act_api.h>
23 #include <net/ip.h>
24 #include <net/ipv6_frag.h>
25 #include <uapi/linux/tc_act/tc_ct.h>
26 #include <net/tc_act/tc_ct.h>
27
28 #include <net/netfilter/nf_flow_table.h>
29 #include <net/netfilter/nf_conntrack.h>
30 #include <net/netfilter/nf_conntrack_core.h>
31 #include <net/netfilter/nf_conntrack_zones.h>
32 #include <net/netfilter/nf_conntrack_helper.h>
33 #include <net/netfilter/nf_conntrack_acct.h>
34 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
35 #include <uapi/linux/netfilter/nf_nat.h>
36
37 static struct workqueue_struct *act_ct_wq;
38 static struct rhashtable zones_ht;
39 static DEFINE_MUTEX(zones_mutex);
40
41 struct tcf_ct_flow_table {
42 struct rhash_head node; /* In zones tables */
43
44 struct rcu_work rwork;
45 struct nf_flowtable nf_ft;
46 refcount_t ref;
47 u16 zone;
48
49 bool dying;
50 };
51
52 static const struct rhashtable_params zones_params = {
53 .head_offset = offsetof(struct tcf_ct_flow_table, node),
54 .key_offset = offsetof(struct tcf_ct_flow_table, zone),
55 .key_len = sizeof_field(struct tcf_ct_flow_table, zone),
56 .automatic_shrinking = true,
57 };
58
59 static struct flow_action_entry *
tcf_ct_flow_table_flow_action_get_next(struct flow_action * flow_action)60 tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
61 {
62 int i = flow_action->num_entries++;
63
64 return &flow_action->entries[i];
65 }
66
tcf_ct_add_mangle_action(struct flow_action * action,enum flow_action_mangle_base htype,u32 offset,u32 mask,u32 val)67 static void tcf_ct_add_mangle_action(struct flow_action *action,
68 enum flow_action_mangle_base htype,
69 u32 offset,
70 u32 mask,
71 u32 val)
72 {
73 struct flow_action_entry *entry;
74
75 entry = tcf_ct_flow_table_flow_action_get_next(action);
76 entry->id = FLOW_ACTION_MANGLE;
77 entry->mangle.htype = htype;
78 entry->mangle.mask = ~mask;
79 entry->mangle.offset = offset;
80 entry->mangle.val = val;
81 }
82
83 /* The following nat helper functions check if the inverted reverse tuple
84 * (target) is different then the current dir tuple - meaning nat for ports
85 * and/or ip is needed, and add the relevant mangle actions.
86 */
87 static void
tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple * tuple,struct nf_conntrack_tuple target,struct flow_action * action)88 tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
89 struct nf_conntrack_tuple target,
90 struct flow_action *action)
91 {
92 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
93 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
94 offsetof(struct iphdr, saddr),
95 0xFFFFFFFF,
96 be32_to_cpu(target.src.u3.ip));
97 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
98 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
99 offsetof(struct iphdr, daddr),
100 0xFFFFFFFF,
101 be32_to_cpu(target.dst.u3.ip));
102 }
103
104 static void
tcf_ct_add_ipv6_addr_mangle_action(struct flow_action * action,union nf_inet_addr * addr,u32 offset)105 tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
106 union nf_inet_addr *addr,
107 u32 offset)
108 {
109 int i;
110
111 for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
112 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
113 i * sizeof(u32) + offset,
114 0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
115 }
116
117 static void
tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple * tuple,struct nf_conntrack_tuple target,struct flow_action * action)118 tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
119 struct nf_conntrack_tuple target,
120 struct flow_action *action)
121 {
122 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
123 tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
124 offsetof(struct ipv6hdr,
125 saddr));
126 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
127 tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
128 offsetof(struct ipv6hdr,
129 daddr));
130 }
131
132 static void
tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple * tuple,struct nf_conntrack_tuple target,struct flow_action * action)133 tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
134 struct nf_conntrack_tuple target,
135 struct flow_action *action)
136 {
137 __be16 target_src = target.src.u.tcp.port;
138 __be16 target_dst = target.dst.u.tcp.port;
139
140 if (target_src != tuple->src.u.tcp.port)
141 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
142 offsetof(struct tcphdr, source),
143 0xFFFF, be16_to_cpu(target_src));
144 if (target_dst != tuple->dst.u.tcp.port)
145 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
146 offsetof(struct tcphdr, dest),
147 0xFFFF, be16_to_cpu(target_dst));
148 }
149
150 static void
tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple * tuple,struct nf_conntrack_tuple target,struct flow_action * action)151 tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
152 struct nf_conntrack_tuple target,
153 struct flow_action *action)
154 {
155 __be16 target_src = target.src.u.udp.port;
156 __be16 target_dst = target.dst.u.udp.port;
157
158 if (target_src != tuple->src.u.udp.port)
159 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
160 offsetof(struct udphdr, source),
161 0xFFFF, be16_to_cpu(target_src));
162 if (target_dst != tuple->dst.u.udp.port)
163 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
164 offsetof(struct udphdr, dest),
165 0xFFFF, be16_to_cpu(target_dst));
166 }
167
tcf_ct_flow_table_add_action_meta(struct nf_conn * ct,enum ip_conntrack_dir dir,struct flow_action * action)168 static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
169 enum ip_conntrack_dir dir,
170 struct flow_action *action)
171 {
172 struct nf_conn_labels *ct_labels;
173 struct flow_action_entry *entry;
174 enum ip_conntrack_info ctinfo;
175 u32 *act_ct_labels;
176
177 entry = tcf_ct_flow_table_flow_action_get_next(action);
178 entry->id = FLOW_ACTION_CT_METADATA;
179 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
180 entry->ct_metadata.mark = ct->mark;
181 #endif
182 ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
183 IP_CT_ESTABLISHED_REPLY;
184 /* aligns with the CT reference on the SKB nf_ct_set */
185 entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
186 entry->ct_metadata.orig_dir = dir == IP_CT_DIR_ORIGINAL;
187
188 act_ct_labels = entry->ct_metadata.labels;
189 ct_labels = nf_ct_labels_find(ct);
190 if (ct_labels)
191 memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
192 else
193 memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
194 }
195
tcf_ct_flow_table_add_action_nat(struct net * net,struct nf_conn * ct,enum ip_conntrack_dir dir,struct flow_action * action)196 static int tcf_ct_flow_table_add_action_nat(struct net *net,
197 struct nf_conn *ct,
198 enum ip_conntrack_dir dir,
199 struct flow_action *action)
200 {
201 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
202 struct nf_conntrack_tuple target;
203
204 if (!(ct->status & IPS_NAT_MASK))
205 return 0;
206
207 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
208
209 switch (tuple->src.l3num) {
210 case NFPROTO_IPV4:
211 tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
212 action);
213 break;
214 case NFPROTO_IPV6:
215 tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
216 action);
217 break;
218 default:
219 return -EOPNOTSUPP;
220 }
221
222 switch (nf_ct_protonum(ct)) {
223 case IPPROTO_TCP:
224 tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
225 break;
226 case IPPROTO_UDP:
227 tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
228 break;
229 default:
230 return -EOPNOTSUPP;
231 }
232
233 return 0;
234 }
235
tcf_ct_flow_table_fill_actions(struct net * net,const struct flow_offload * flow,enum flow_offload_tuple_dir tdir,struct nf_flow_rule * flow_rule)236 static int tcf_ct_flow_table_fill_actions(struct net *net,
237 const struct flow_offload *flow,
238 enum flow_offload_tuple_dir tdir,
239 struct nf_flow_rule *flow_rule)
240 {
241 struct flow_action *action = &flow_rule->rule->action;
242 int num_entries = action->num_entries;
243 struct nf_conn *ct = flow->ct;
244 enum ip_conntrack_dir dir;
245 int i, err;
246
247 switch (tdir) {
248 case FLOW_OFFLOAD_DIR_ORIGINAL:
249 dir = IP_CT_DIR_ORIGINAL;
250 break;
251 case FLOW_OFFLOAD_DIR_REPLY:
252 dir = IP_CT_DIR_REPLY;
253 break;
254 default:
255 return -EOPNOTSUPP;
256 }
257
258 err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
259 if (err)
260 goto err_nat;
261
262 tcf_ct_flow_table_add_action_meta(ct, dir, action);
263 return 0;
264
265 err_nat:
266 /* Clear filled actions */
267 for (i = num_entries; i < action->num_entries; i++)
268 memset(&action->entries[i], 0, sizeof(action->entries[i]));
269 action->num_entries = num_entries;
270
271 return err;
272 }
273
274 static struct nf_flowtable_type flowtable_ct = {
275 .action = tcf_ct_flow_table_fill_actions,
276 .owner = THIS_MODULE,
277 };
278
tcf_ct_flow_table_get(struct tcf_ct_params * params)279 static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
280 {
281 struct tcf_ct_flow_table *ct_ft;
282 int err = -ENOMEM;
283
284 mutex_lock(&zones_mutex);
285 ct_ft = rhashtable_lookup_fast(&zones_ht, ¶ms->zone, zones_params);
286 if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
287 goto out_unlock;
288
289 ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
290 if (!ct_ft)
291 goto err_alloc;
292 refcount_set(&ct_ft->ref, 1);
293
294 ct_ft->zone = params->zone;
295 err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
296 if (err)
297 goto err_insert;
298
299 ct_ft->nf_ft.type = &flowtable_ct;
300 ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD |
301 NF_FLOWTABLE_COUNTER;
302 err = nf_flow_table_init(&ct_ft->nf_ft);
303 if (err)
304 goto err_init;
305
306 __module_get(THIS_MODULE);
307 out_unlock:
308 params->ct_ft = ct_ft;
309 params->nf_ft = &ct_ft->nf_ft;
310 mutex_unlock(&zones_mutex);
311
312 return 0;
313
314 err_init:
315 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
316 err_insert:
317 kfree(ct_ft);
318 err_alloc:
319 mutex_unlock(&zones_mutex);
320 return err;
321 }
322
tcf_ct_flow_table_cleanup_work(struct work_struct * work)323 static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
324 {
325 struct tcf_ct_flow_table *ct_ft;
326
327 ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
328 rwork);
329 nf_flow_table_free(&ct_ft->nf_ft);
330 kfree(ct_ft);
331
332 module_put(THIS_MODULE);
333 }
334
tcf_ct_flow_table_put(struct tcf_ct_params * params)335 static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
336 {
337 struct tcf_ct_flow_table *ct_ft = params->ct_ft;
338
339 if (refcount_dec_and_test(¶ms->ct_ft->ref)) {
340 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
341 INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
342 queue_rcu_work(act_ct_wq, &ct_ft->rwork);
343 }
344 }
345
tcf_ct_flow_table_add(struct tcf_ct_flow_table * ct_ft,struct nf_conn * ct,bool tcp)346 static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
347 struct nf_conn *ct,
348 bool tcp)
349 {
350 struct flow_offload *entry;
351 int err;
352
353 if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
354 return;
355
356 entry = flow_offload_alloc(ct);
357 if (!entry) {
358 WARN_ON_ONCE(1);
359 goto err_alloc;
360 }
361
362 if (tcp) {
363 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
364 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
365 }
366
367 err = flow_offload_add(&ct_ft->nf_ft, entry);
368 if (err)
369 goto err_add;
370
371 return;
372
373 err_add:
374 flow_offload_free(entry);
375 err_alloc:
376 clear_bit(IPS_OFFLOAD_BIT, &ct->status);
377 }
378
tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table * ct_ft,struct nf_conn * ct,enum ip_conntrack_info ctinfo)379 static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
380 struct nf_conn *ct,
381 enum ip_conntrack_info ctinfo)
382 {
383 bool tcp = false;
384
385 if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
386 return;
387
388 switch (nf_ct_protonum(ct)) {
389 case IPPROTO_TCP:
390 tcp = true;
391 if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
392 return;
393 break;
394 case IPPROTO_UDP:
395 break;
396 default:
397 return;
398 }
399
400 if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
401 ct->status & IPS_SEQ_ADJUST)
402 return;
403
404 tcf_ct_flow_table_add(ct_ft, ct, tcp);
405 }
406
407 static bool
tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff * skb,struct flow_offload_tuple * tuple,struct tcphdr ** tcph)408 tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
409 struct flow_offload_tuple *tuple,
410 struct tcphdr **tcph)
411 {
412 struct flow_ports *ports;
413 unsigned int thoff;
414 struct iphdr *iph;
415
416 if (!pskb_network_may_pull(skb, sizeof(*iph)))
417 return false;
418
419 iph = ip_hdr(skb);
420 thoff = iph->ihl * 4;
421
422 if (ip_is_fragment(iph) ||
423 unlikely(thoff != sizeof(struct iphdr)))
424 return false;
425
426 if (iph->protocol != IPPROTO_TCP &&
427 iph->protocol != IPPROTO_UDP)
428 return false;
429
430 if (iph->ttl <= 1)
431 return false;
432
433 if (!pskb_network_may_pull(skb, iph->protocol == IPPROTO_TCP ?
434 thoff + sizeof(struct tcphdr) :
435 thoff + sizeof(*ports)))
436 return false;
437
438 iph = ip_hdr(skb);
439 if (iph->protocol == IPPROTO_TCP)
440 *tcph = (void *)(skb_network_header(skb) + thoff);
441
442 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
443 tuple->src_v4.s_addr = iph->saddr;
444 tuple->dst_v4.s_addr = iph->daddr;
445 tuple->src_port = ports->source;
446 tuple->dst_port = ports->dest;
447 tuple->l3proto = AF_INET;
448 tuple->l4proto = iph->protocol;
449
450 return true;
451 }
452
453 static bool
tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff * skb,struct flow_offload_tuple * tuple,struct tcphdr ** tcph)454 tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
455 struct flow_offload_tuple *tuple,
456 struct tcphdr **tcph)
457 {
458 struct flow_ports *ports;
459 struct ipv6hdr *ip6h;
460 unsigned int thoff;
461
462 if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
463 return false;
464
465 ip6h = ipv6_hdr(skb);
466
467 if (ip6h->nexthdr != IPPROTO_TCP &&
468 ip6h->nexthdr != IPPROTO_UDP)
469 return false;
470
471 if (ip6h->hop_limit <= 1)
472 return false;
473
474 thoff = sizeof(*ip6h);
475 if (!pskb_network_may_pull(skb, ip6h->nexthdr == IPPROTO_TCP ?
476 thoff + sizeof(struct tcphdr) :
477 thoff + sizeof(*ports)))
478 return false;
479
480 ip6h = ipv6_hdr(skb);
481 if (ip6h->nexthdr == IPPROTO_TCP)
482 *tcph = (void *)(skb_network_header(skb) + thoff);
483
484 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
485 tuple->src_v6 = ip6h->saddr;
486 tuple->dst_v6 = ip6h->daddr;
487 tuple->src_port = ports->source;
488 tuple->dst_port = ports->dest;
489 tuple->l3proto = AF_INET6;
490 tuple->l4proto = ip6h->nexthdr;
491
492 return true;
493 }
494
tcf_ct_flow_table_lookup(struct tcf_ct_params * p,struct sk_buff * skb,u8 family)495 static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
496 struct sk_buff *skb,
497 u8 family)
498 {
499 struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
500 struct flow_offload_tuple_rhash *tuplehash;
501 struct flow_offload_tuple tuple = {};
502 enum ip_conntrack_info ctinfo;
503 struct tcphdr *tcph = NULL;
504 struct flow_offload *flow;
505 struct nf_conn *ct;
506 u8 dir;
507
508 /* Previously seen or loopback */
509 ct = nf_ct_get(skb, &ctinfo);
510 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
511 return false;
512
513 switch (family) {
514 case NFPROTO_IPV4:
515 if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
516 return false;
517 break;
518 case NFPROTO_IPV6:
519 if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
520 return false;
521 break;
522 default:
523 return false;
524 }
525
526 tuplehash = flow_offload_lookup(nf_ft, &tuple);
527 if (!tuplehash)
528 return false;
529
530 dir = tuplehash->tuple.dir;
531 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
532 ct = flow->ct;
533
534 if (tcph && (unlikely(tcph->fin || tcph->rst))) {
535 flow_offload_teardown(flow);
536 return false;
537 }
538
539 ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
540 IP_CT_ESTABLISHED_REPLY;
541
542 flow_offload_refresh(nf_ft, flow);
543 nf_conntrack_get(&ct->ct_general);
544 nf_ct_set(skb, ct, ctinfo);
545 if (nf_ft->flags & NF_FLOWTABLE_COUNTER)
546 nf_ct_acct_update(ct, dir, skb->len);
547
548 return true;
549 }
550
tcf_ct_flow_tables_init(void)551 static int tcf_ct_flow_tables_init(void)
552 {
553 return rhashtable_init(&zones_ht, &zones_params);
554 }
555
tcf_ct_flow_tables_uninit(void)556 static void tcf_ct_flow_tables_uninit(void)
557 {
558 rhashtable_destroy(&zones_ht);
559 }
560
561 static struct tc_action_ops act_ct_ops;
562 static unsigned int ct_net_id;
563
564 struct tc_ct_action_net {
565 struct tc_action_net tn; /* Must be first */
566 bool labels;
567 };
568
569 /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
tcf_ct_skb_nfct_cached(struct net * net,struct sk_buff * skb,u16 zone_id,bool force)570 static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
571 u16 zone_id, bool force)
572 {
573 enum ip_conntrack_info ctinfo;
574 struct nf_conn *ct;
575
576 ct = nf_ct_get(skb, &ctinfo);
577 if (!ct)
578 return false;
579 if (!net_eq(net, read_pnet(&ct->ct_net)))
580 return false;
581 if (nf_ct_zone(ct)->id != zone_id)
582 return false;
583
584 /* Force conntrack entry direction. */
585 if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
586 if (nf_ct_is_confirmed(ct))
587 nf_ct_kill(ct);
588
589 nf_conntrack_put(&ct->ct_general);
590 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
591
592 return false;
593 }
594
595 return true;
596 }
597
598 /* Trim the skb to the length specified by the IP/IPv6 header,
599 * removing any trailing lower-layer padding. This prepares the skb
600 * for higher-layer processing that assumes skb->len excludes padding
601 * (such as nf_ip_checksum). The caller needs to pull the skb to the
602 * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
603 */
tcf_ct_skb_network_trim(struct sk_buff * skb,int family)604 static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family)
605 {
606 unsigned int len;
607 int err;
608
609 switch (family) {
610 case NFPROTO_IPV4:
611 len = ntohs(ip_hdr(skb)->tot_len);
612 break;
613 case NFPROTO_IPV6:
614 len = sizeof(struct ipv6hdr)
615 + ntohs(ipv6_hdr(skb)->payload_len);
616 break;
617 default:
618 len = skb->len;
619 }
620
621 err = pskb_trim_rcsum(skb, len);
622
623 return err;
624 }
625
tcf_ct_skb_nf_family(struct sk_buff * skb)626 static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
627 {
628 u8 family = NFPROTO_UNSPEC;
629
630 switch (skb_protocol(skb, true)) {
631 case htons(ETH_P_IP):
632 family = NFPROTO_IPV4;
633 break;
634 case htons(ETH_P_IPV6):
635 family = NFPROTO_IPV6;
636 break;
637 default:
638 break;
639 }
640
641 return family;
642 }
643
tcf_ct_ipv4_is_fragment(struct sk_buff * skb,bool * frag)644 static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
645 {
646 unsigned int len;
647
648 len = skb_network_offset(skb) + sizeof(struct iphdr);
649 if (unlikely(skb->len < len))
650 return -EINVAL;
651 if (unlikely(!pskb_may_pull(skb, len)))
652 return -ENOMEM;
653
654 *frag = ip_is_fragment(ip_hdr(skb));
655 return 0;
656 }
657
tcf_ct_ipv6_is_fragment(struct sk_buff * skb,bool * frag)658 static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
659 {
660 unsigned int flags = 0, len, payload_ofs = 0;
661 unsigned short frag_off;
662 int nexthdr;
663
664 len = skb_network_offset(skb) + sizeof(struct ipv6hdr);
665 if (unlikely(skb->len < len))
666 return -EINVAL;
667 if (unlikely(!pskb_may_pull(skb, len)))
668 return -ENOMEM;
669
670 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
671 if (unlikely(nexthdr < 0))
672 return -EPROTO;
673
674 *frag = flags & IP6_FH_F_FRAG;
675 return 0;
676 }
677
tcf_ct_handle_fragments(struct net * net,struct sk_buff * skb,u8 family,u16 zone,bool * defrag)678 static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
679 u8 family, u16 zone, bool *defrag)
680 {
681 enum ip_conntrack_info ctinfo;
682 struct qdisc_skb_cb cb;
683 struct nf_conn *ct;
684 int err = 0;
685 bool frag;
686
687 /* Previously seen (loopback)? Ignore. */
688 ct = nf_ct_get(skb, &ctinfo);
689 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
690 return 0;
691
692 if (family == NFPROTO_IPV4)
693 err = tcf_ct_ipv4_is_fragment(skb, &frag);
694 else
695 err = tcf_ct_ipv6_is_fragment(skb, &frag);
696 if (err || !frag)
697 return err;
698
699 skb_get(skb);
700 cb = *qdisc_skb_cb(skb);
701
702 if (family == NFPROTO_IPV4) {
703 enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
704
705 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
706 local_bh_disable();
707 err = ip_defrag(net, skb, user);
708 local_bh_enable();
709 if (err && err != -EINPROGRESS)
710 return err;
711
712 if (!err) {
713 *defrag = true;
714 cb.mru = IPCB(skb)->frag_max_size;
715 }
716 } else { /* NFPROTO_IPV6 */
717 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
718 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
719
720 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
721 err = nf_ct_frag6_gather(net, skb, user);
722 if (err && err != -EINPROGRESS)
723 goto out_free;
724
725 if (!err) {
726 *defrag = true;
727 cb.mru = IP6CB(skb)->frag_max_size;
728 }
729 #else
730 err = -EOPNOTSUPP;
731 goto out_free;
732 #endif
733 }
734
735 if (err != -EINPROGRESS)
736 *qdisc_skb_cb(skb) = cb;
737 skb_clear_hash(skb);
738 skb->ignore_df = 1;
739 return err;
740
741 out_free:
742 kfree_skb(skb);
743 return err;
744 }
745
tcf_ct_params_free(struct rcu_head * head)746 static void tcf_ct_params_free(struct rcu_head *head)
747 {
748 struct tcf_ct_params *params = container_of(head,
749 struct tcf_ct_params, rcu);
750
751 tcf_ct_flow_table_put(params);
752
753 if (params->tmpl)
754 nf_conntrack_put(¶ms->tmpl->ct_general);
755 kfree(params);
756 }
757
758 #if IS_ENABLED(CONFIG_NF_NAT)
759 /* Modelled after nf_nat_ipv[46]_fn().
760 * range is only used for new, uninitialized NAT state.
761 * Returns either NF_ACCEPT or NF_DROP.
762 */
ct_nat_execute(struct sk_buff * skb,struct nf_conn * ct,enum ip_conntrack_info ctinfo,const struct nf_nat_range2 * range,enum nf_nat_manip_type maniptype)763 static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
764 enum ip_conntrack_info ctinfo,
765 const struct nf_nat_range2 *range,
766 enum nf_nat_manip_type maniptype)
767 {
768 __be16 proto = skb_protocol(skb, true);
769 int hooknum, err = NF_ACCEPT;
770
771 /* See HOOK2MANIP(). */
772 if (maniptype == NF_NAT_MANIP_SRC)
773 hooknum = NF_INET_LOCAL_IN; /* Source NAT */
774 else
775 hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */
776
777 switch (ctinfo) {
778 case IP_CT_RELATED:
779 case IP_CT_RELATED_REPLY:
780 if (proto == htons(ETH_P_IP) &&
781 ip_hdr(skb)->protocol == IPPROTO_ICMP) {
782 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
783 hooknum))
784 err = NF_DROP;
785 goto out;
786 } else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) {
787 __be16 frag_off;
788 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
789 int hdrlen = ipv6_skip_exthdr(skb,
790 sizeof(struct ipv6hdr),
791 &nexthdr, &frag_off);
792
793 if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
794 if (!nf_nat_icmpv6_reply_translation(skb, ct,
795 ctinfo,
796 hooknum,
797 hdrlen))
798 err = NF_DROP;
799 goto out;
800 }
801 }
802 /* Non-ICMP, fall thru to initialize if needed. */
803 fallthrough;
804 case IP_CT_NEW:
805 /* Seen it before? This can happen for loopback, retrans,
806 * or local packets.
807 */
808 if (!nf_nat_initialized(ct, maniptype)) {
809 /* Initialize according to the NAT action. */
810 err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
811 /* Action is set up to establish a new
812 * mapping.
813 */
814 ? nf_nat_setup_info(ct, range, maniptype)
815 : nf_nat_alloc_null_binding(ct, hooknum);
816 if (err != NF_ACCEPT)
817 goto out;
818 }
819 break;
820
821 case IP_CT_ESTABLISHED:
822 case IP_CT_ESTABLISHED_REPLY:
823 break;
824
825 default:
826 err = NF_DROP;
827 goto out;
828 }
829
830 err = nf_nat_packet(ct, ctinfo, hooknum, skb);
831 out:
832 return err;
833 }
834 #endif /* CONFIG_NF_NAT */
835
tcf_ct_act_set_mark(struct nf_conn * ct,u32 mark,u32 mask)836 static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
837 {
838 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
839 u32 new_mark;
840
841 if (!mask)
842 return;
843
844 new_mark = mark | (ct->mark & ~(mask));
845 if (ct->mark != new_mark) {
846 ct->mark = new_mark;
847 if (nf_ct_is_confirmed(ct))
848 nf_conntrack_event_cache(IPCT_MARK, ct);
849 }
850 #endif
851 }
852
tcf_ct_act_set_labels(struct nf_conn * ct,u32 * labels,u32 * labels_m)853 static void tcf_ct_act_set_labels(struct nf_conn *ct,
854 u32 *labels,
855 u32 *labels_m)
856 {
857 #if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
858 size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
859
860 if (!memchr_inv(labels_m, 0, labels_sz))
861 return;
862
863 nf_connlabels_replace(ct, labels, labels_m, 4);
864 #endif
865 }
866
tcf_ct_act_nat(struct sk_buff * skb,struct nf_conn * ct,enum ip_conntrack_info ctinfo,int ct_action,struct nf_nat_range2 * range,bool commit)867 static int tcf_ct_act_nat(struct sk_buff *skb,
868 struct nf_conn *ct,
869 enum ip_conntrack_info ctinfo,
870 int ct_action,
871 struct nf_nat_range2 *range,
872 bool commit)
873 {
874 #if IS_ENABLED(CONFIG_NF_NAT)
875 int err;
876 enum nf_nat_manip_type maniptype;
877
878 if (!(ct_action & TCA_CT_ACT_NAT))
879 return NF_ACCEPT;
880
881 /* Add NAT extension if not confirmed yet. */
882 if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct))
883 return NF_DROP; /* Can't NAT. */
884
885 if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) &&
886 (ctinfo != IP_CT_RELATED || commit)) {
887 /* NAT an established or related connection like before. */
888 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
889 /* This is the REPLY direction for a connection
890 * for which NAT was applied in the forward
891 * direction. Do the reverse NAT.
892 */
893 maniptype = ct->status & IPS_SRC_NAT
894 ? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC;
895 else
896 maniptype = ct->status & IPS_SRC_NAT
897 ? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST;
898 } else if (ct_action & TCA_CT_ACT_NAT_SRC) {
899 maniptype = NF_NAT_MANIP_SRC;
900 } else if (ct_action & TCA_CT_ACT_NAT_DST) {
901 maniptype = NF_NAT_MANIP_DST;
902 } else {
903 return NF_ACCEPT;
904 }
905
906 err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
907 if (err == NF_ACCEPT &&
908 ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
909 if (maniptype == NF_NAT_MANIP_SRC)
910 maniptype = NF_NAT_MANIP_DST;
911 else
912 maniptype = NF_NAT_MANIP_SRC;
913
914 err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
915 }
916 return err;
917 #else
918 return NF_ACCEPT;
919 #endif
920 }
921
tcf_ct_act(struct sk_buff * skb,const struct tc_action * a,struct tcf_result * res)922 static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
923 struct tcf_result *res)
924 {
925 struct net *net = dev_net(skb->dev);
926 bool cached, commit, clear, force;
927 enum ip_conntrack_info ctinfo;
928 struct tcf_ct *c = to_ct(a);
929 struct nf_conn *tmpl = NULL;
930 struct nf_hook_state state;
931 int nh_ofs, err, retval;
932 struct tcf_ct_params *p;
933 bool skip_add = false;
934 bool defrag = false;
935 struct nf_conn *ct;
936 u8 family;
937
938 p = rcu_dereference_bh(c->params);
939
940 retval = READ_ONCE(c->tcf_action);
941 commit = p->ct_action & TCA_CT_ACT_COMMIT;
942 clear = p->ct_action & TCA_CT_ACT_CLEAR;
943 force = p->ct_action & TCA_CT_ACT_FORCE;
944 tmpl = p->tmpl;
945
946 tcf_lastuse_update(&c->tcf_tm);
947
948 if (clear) {
949 qdisc_skb_cb(skb)->post_ct = false;
950 ct = nf_ct_get(skb, &ctinfo);
951 if (ct) {
952 nf_conntrack_put(&ct->ct_general);
953 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
954 }
955
956 goto out_clear;
957 }
958
959 family = tcf_ct_skb_nf_family(skb);
960 if (family == NFPROTO_UNSPEC)
961 goto drop;
962
963 /* The conntrack module expects to be working at L3.
964 * We also try to pull the IPv4/6 header to linear area
965 */
966 nh_ofs = skb_network_offset(skb);
967 skb_pull_rcsum(skb, nh_ofs);
968 err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
969 if (err == -EINPROGRESS) {
970 retval = TC_ACT_STOLEN;
971 goto out_clear;
972 }
973 if (err)
974 goto drop;
975
976 err = tcf_ct_skb_network_trim(skb, family);
977 if (err)
978 goto drop;
979
980 /* If we are recirculating packets to match on ct fields and
981 * committing with a separate ct action, then we don't need to
982 * actually run the packet through conntrack twice unless it's for a
983 * different zone.
984 */
985 cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
986 if (!cached) {
987 if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) {
988 skip_add = true;
989 goto do_nat;
990 }
991
992 /* Associate skb with specified zone. */
993 if (tmpl) {
994 nf_conntrack_put(skb_nfct(skb));
995 nf_conntrack_get(&tmpl->ct_general);
996 nf_ct_set(skb, tmpl, IP_CT_NEW);
997 }
998
999 state.hook = NF_INET_PRE_ROUTING;
1000 state.net = net;
1001 state.pf = family;
1002 err = nf_conntrack_in(skb, &state);
1003 if (err != NF_ACCEPT)
1004 goto out_push;
1005 }
1006
1007 do_nat:
1008 ct = nf_ct_get(skb, &ctinfo);
1009 if (!ct)
1010 goto out_push;
1011 nf_ct_deliver_cached_events(ct);
1012
1013 err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
1014 if (err != NF_ACCEPT)
1015 goto drop;
1016
1017 if (commit) {
1018 tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
1019 tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
1020
1021 /* This will take care of sending queued events
1022 * even if the connection is already confirmed.
1023 */
1024 nf_conntrack_confirm(skb);
1025 } else if (!skip_add) {
1026 tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
1027 }
1028
1029 out_push:
1030 skb_push_rcsum(skb, nh_ofs);
1031
1032 qdisc_skb_cb(skb)->post_ct = true;
1033 out_clear:
1034 tcf_action_update_bstats(&c->common, skb);
1035 if (defrag)
1036 qdisc_skb_cb(skb)->pkt_len = skb->len;
1037 return retval;
1038
1039 drop:
1040 tcf_action_inc_drop_qstats(&c->common);
1041 return TC_ACT_SHOT;
1042 }
1043
1044 static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
1045 [TCA_CT_ACTION] = { .type = NLA_U16 },
1046 [TCA_CT_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_ct)),
1047 [TCA_CT_ZONE] = { .type = NLA_U16 },
1048 [TCA_CT_MARK] = { .type = NLA_U32 },
1049 [TCA_CT_MARK_MASK] = { .type = NLA_U32 },
1050 [TCA_CT_LABELS] = { .type = NLA_BINARY,
1051 .len = 128 / BITS_PER_BYTE },
1052 [TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
1053 .len = 128 / BITS_PER_BYTE },
1054 [TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
1055 [TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
1056 [TCA_CT_NAT_IPV6_MIN] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1057 [TCA_CT_NAT_IPV6_MAX] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1058 [TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
1059 [TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
1060 };
1061
tcf_ct_fill_params_nat(struct tcf_ct_params * p,struct tc_ct * parm,struct nlattr ** tb,struct netlink_ext_ack * extack)1062 static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
1063 struct tc_ct *parm,
1064 struct nlattr **tb,
1065 struct netlink_ext_ack *extack)
1066 {
1067 struct nf_nat_range2 *range;
1068
1069 if (!(p->ct_action & TCA_CT_ACT_NAT))
1070 return 0;
1071
1072 if (!IS_ENABLED(CONFIG_NF_NAT)) {
1073 NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
1074 return -EOPNOTSUPP;
1075 }
1076
1077 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1078 return 0;
1079
1080 if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
1081 (p->ct_action & TCA_CT_ACT_NAT_DST)) {
1082 NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
1083 return -EOPNOTSUPP;
1084 }
1085
1086 range = &p->range;
1087 if (tb[TCA_CT_NAT_IPV4_MIN]) {
1088 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
1089
1090 p->ipv4_range = true;
1091 range->flags |= NF_NAT_RANGE_MAP_IPS;
1092 range->min_addr.ip =
1093 nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
1094
1095 range->max_addr.ip = max_attr ?
1096 nla_get_in_addr(max_attr) :
1097 range->min_addr.ip;
1098 } else if (tb[TCA_CT_NAT_IPV6_MIN]) {
1099 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
1100
1101 p->ipv4_range = false;
1102 range->flags |= NF_NAT_RANGE_MAP_IPS;
1103 range->min_addr.in6 =
1104 nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
1105
1106 range->max_addr.in6 = max_attr ?
1107 nla_get_in6_addr(max_attr) :
1108 range->min_addr.in6;
1109 }
1110
1111 if (tb[TCA_CT_NAT_PORT_MIN]) {
1112 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1113 range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
1114
1115 range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
1116 nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
1117 range->min_proto.all;
1118 }
1119
1120 return 0;
1121 }
1122
tcf_ct_set_key_val(struct nlattr ** tb,void * val,int val_type,void * mask,int mask_type,int len)1123 static void tcf_ct_set_key_val(struct nlattr **tb,
1124 void *val, int val_type,
1125 void *mask, int mask_type,
1126 int len)
1127 {
1128 if (!tb[val_type])
1129 return;
1130 nla_memcpy(val, tb[val_type], len);
1131
1132 if (!mask)
1133 return;
1134
1135 if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
1136 memset(mask, 0xff, len);
1137 else
1138 nla_memcpy(mask, tb[mask_type], len);
1139 }
1140
tcf_ct_fill_params(struct net * net,struct tcf_ct_params * p,struct tc_ct * parm,struct nlattr ** tb,struct netlink_ext_ack * extack)1141 static int tcf_ct_fill_params(struct net *net,
1142 struct tcf_ct_params *p,
1143 struct tc_ct *parm,
1144 struct nlattr **tb,
1145 struct netlink_ext_ack *extack)
1146 {
1147 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1148 struct nf_conntrack_zone zone;
1149 struct nf_conn *tmpl;
1150 int err;
1151
1152 p->zone = NF_CT_DEFAULT_ZONE_ID;
1153
1154 tcf_ct_set_key_val(tb,
1155 &p->ct_action, TCA_CT_ACTION,
1156 NULL, TCA_CT_UNSPEC,
1157 sizeof(p->ct_action));
1158
1159 if (p->ct_action & TCA_CT_ACT_CLEAR)
1160 return 0;
1161
1162 err = tcf_ct_fill_params_nat(p, parm, tb, extack);
1163 if (err)
1164 return err;
1165
1166 if (tb[TCA_CT_MARK]) {
1167 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1168 NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
1169 return -EOPNOTSUPP;
1170 }
1171 tcf_ct_set_key_val(tb,
1172 &p->mark, TCA_CT_MARK,
1173 &p->mark_mask, TCA_CT_MARK_MASK,
1174 sizeof(p->mark));
1175 }
1176
1177 if (tb[TCA_CT_LABELS]) {
1178 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1179 NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
1180 return -EOPNOTSUPP;
1181 }
1182
1183 if (!tn->labels) {
1184 NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
1185 return -EOPNOTSUPP;
1186 }
1187 tcf_ct_set_key_val(tb,
1188 p->labels, TCA_CT_LABELS,
1189 p->labels_mask, TCA_CT_LABELS_MASK,
1190 sizeof(p->labels));
1191 }
1192
1193 if (tb[TCA_CT_ZONE]) {
1194 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1195 NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
1196 return -EOPNOTSUPP;
1197 }
1198
1199 tcf_ct_set_key_val(tb,
1200 &p->zone, TCA_CT_ZONE,
1201 NULL, TCA_CT_UNSPEC,
1202 sizeof(p->zone));
1203 }
1204
1205 if (p->zone == NF_CT_DEFAULT_ZONE_ID)
1206 return 0;
1207
1208 nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
1209 tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
1210 if (!tmpl) {
1211 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
1212 return -ENOMEM;
1213 }
1214 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
1215 nf_conntrack_get(&tmpl->ct_general);
1216 p->tmpl = tmpl;
1217
1218 return 0;
1219 }
1220
tcf_ct_init(struct net * net,struct nlattr * nla,struct nlattr * est,struct tc_action ** a,int replace,int bind,bool rtnl_held,struct tcf_proto * tp,u32 flags,struct netlink_ext_ack * extack)1221 static int tcf_ct_init(struct net *net, struct nlattr *nla,
1222 struct nlattr *est, struct tc_action **a,
1223 int replace, int bind, bool rtnl_held,
1224 struct tcf_proto *tp, u32 flags,
1225 struct netlink_ext_ack *extack)
1226 {
1227 struct tc_action_net *tn = net_generic(net, ct_net_id);
1228 struct tcf_ct_params *params = NULL;
1229 struct nlattr *tb[TCA_CT_MAX + 1];
1230 struct tcf_chain *goto_ch = NULL;
1231 struct tc_ct *parm;
1232 struct tcf_ct *c;
1233 int err, res = 0;
1234 u32 index;
1235
1236 if (!nla) {
1237 NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
1238 return -EINVAL;
1239 }
1240
1241 err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
1242 if (err < 0)
1243 return err;
1244
1245 if (!tb[TCA_CT_PARMS]) {
1246 NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
1247 return -EINVAL;
1248 }
1249 parm = nla_data(tb[TCA_CT_PARMS]);
1250 index = parm->index;
1251 err = tcf_idr_check_alloc(tn, &index, a, bind);
1252 if (err < 0)
1253 return err;
1254
1255 if (!err) {
1256 err = tcf_idr_create_from_flags(tn, index, est, a,
1257 &act_ct_ops, bind, flags);
1258 if (err) {
1259 tcf_idr_cleanup(tn, index);
1260 return err;
1261 }
1262 res = ACT_P_CREATED;
1263 } else {
1264 if (bind)
1265 return 0;
1266
1267 if (!replace) {
1268 tcf_idr_release(*a, bind);
1269 return -EEXIST;
1270 }
1271 }
1272 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
1273 if (err < 0)
1274 goto cleanup;
1275
1276 c = to_ct(*a);
1277
1278 params = kzalloc(sizeof(*params), GFP_KERNEL);
1279 if (unlikely(!params)) {
1280 err = -ENOMEM;
1281 goto cleanup;
1282 }
1283
1284 err = tcf_ct_fill_params(net, params, parm, tb, extack);
1285 if (err)
1286 goto cleanup;
1287
1288 err = tcf_ct_flow_table_get(params);
1289 if (err)
1290 goto cleanup;
1291
1292 spin_lock_bh(&c->tcf_lock);
1293 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
1294 params = rcu_replace_pointer(c->params, params,
1295 lockdep_is_held(&c->tcf_lock));
1296 spin_unlock_bh(&c->tcf_lock);
1297
1298 if (goto_ch)
1299 tcf_chain_put_by_act(goto_ch);
1300 if (params)
1301 call_rcu(¶ms->rcu, tcf_ct_params_free);
1302
1303 return res;
1304
1305 cleanup:
1306 if (goto_ch)
1307 tcf_chain_put_by_act(goto_ch);
1308 kfree(params);
1309 tcf_idr_release(*a, bind);
1310 return err;
1311 }
1312
tcf_ct_cleanup(struct tc_action * a)1313 static void tcf_ct_cleanup(struct tc_action *a)
1314 {
1315 struct tcf_ct_params *params;
1316 struct tcf_ct *c = to_ct(a);
1317
1318 params = rcu_dereference_protected(c->params, 1);
1319 if (params)
1320 call_rcu(¶ms->rcu, tcf_ct_params_free);
1321 }
1322
tcf_ct_dump_key_val(struct sk_buff * skb,void * val,int val_type,void * mask,int mask_type,int len)1323 static int tcf_ct_dump_key_val(struct sk_buff *skb,
1324 void *val, int val_type,
1325 void *mask, int mask_type,
1326 int len)
1327 {
1328 int err;
1329
1330 if (mask && !memchr_inv(mask, 0, len))
1331 return 0;
1332
1333 err = nla_put(skb, val_type, len, val);
1334 if (err)
1335 return err;
1336
1337 if (mask_type != TCA_CT_UNSPEC) {
1338 err = nla_put(skb, mask_type, len, mask);
1339 if (err)
1340 return err;
1341 }
1342
1343 return 0;
1344 }
1345
tcf_ct_dump_nat(struct sk_buff * skb,struct tcf_ct_params * p)1346 static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
1347 {
1348 struct nf_nat_range2 *range = &p->range;
1349
1350 if (!(p->ct_action & TCA_CT_ACT_NAT))
1351 return 0;
1352
1353 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1354 return 0;
1355
1356 if (range->flags & NF_NAT_RANGE_MAP_IPS) {
1357 if (p->ipv4_range) {
1358 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
1359 range->min_addr.ip))
1360 return -1;
1361 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
1362 range->max_addr.ip))
1363 return -1;
1364 } else {
1365 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
1366 &range->min_addr.in6))
1367 return -1;
1368 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
1369 &range->max_addr.in6))
1370 return -1;
1371 }
1372 }
1373
1374 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
1375 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
1376 range->min_proto.all))
1377 return -1;
1378 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
1379 range->max_proto.all))
1380 return -1;
1381 }
1382
1383 return 0;
1384 }
1385
tcf_ct_dump(struct sk_buff * skb,struct tc_action * a,int bind,int ref)1386 static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
1387 int bind, int ref)
1388 {
1389 unsigned char *b = skb_tail_pointer(skb);
1390 struct tcf_ct *c = to_ct(a);
1391 struct tcf_ct_params *p;
1392
1393 struct tc_ct opt = {
1394 .index = c->tcf_index,
1395 .refcnt = refcount_read(&c->tcf_refcnt) - ref,
1396 .bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
1397 };
1398 struct tcf_t t;
1399
1400 spin_lock_bh(&c->tcf_lock);
1401 p = rcu_dereference_protected(c->params,
1402 lockdep_is_held(&c->tcf_lock));
1403 opt.action = c->tcf_action;
1404
1405 if (tcf_ct_dump_key_val(skb,
1406 &p->ct_action, TCA_CT_ACTION,
1407 NULL, TCA_CT_UNSPEC,
1408 sizeof(p->ct_action)))
1409 goto nla_put_failure;
1410
1411 if (p->ct_action & TCA_CT_ACT_CLEAR)
1412 goto skip_dump;
1413
1414 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1415 tcf_ct_dump_key_val(skb,
1416 &p->mark, TCA_CT_MARK,
1417 &p->mark_mask, TCA_CT_MARK_MASK,
1418 sizeof(p->mark)))
1419 goto nla_put_failure;
1420
1421 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1422 tcf_ct_dump_key_val(skb,
1423 p->labels, TCA_CT_LABELS,
1424 p->labels_mask, TCA_CT_LABELS_MASK,
1425 sizeof(p->labels)))
1426 goto nla_put_failure;
1427
1428 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1429 tcf_ct_dump_key_val(skb,
1430 &p->zone, TCA_CT_ZONE,
1431 NULL, TCA_CT_UNSPEC,
1432 sizeof(p->zone)))
1433 goto nla_put_failure;
1434
1435 if (tcf_ct_dump_nat(skb, p))
1436 goto nla_put_failure;
1437
1438 skip_dump:
1439 if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
1440 goto nla_put_failure;
1441
1442 tcf_tm_dump(&t, &c->tcf_tm);
1443 if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
1444 goto nla_put_failure;
1445 spin_unlock_bh(&c->tcf_lock);
1446
1447 return skb->len;
1448 nla_put_failure:
1449 spin_unlock_bh(&c->tcf_lock);
1450 nlmsg_trim(skb, b);
1451 return -1;
1452 }
1453
tcf_ct_walker(struct net * net,struct sk_buff * skb,struct netlink_callback * cb,int type,const struct tc_action_ops * ops,struct netlink_ext_ack * extack)1454 static int tcf_ct_walker(struct net *net, struct sk_buff *skb,
1455 struct netlink_callback *cb, int type,
1456 const struct tc_action_ops *ops,
1457 struct netlink_ext_ack *extack)
1458 {
1459 struct tc_action_net *tn = net_generic(net, ct_net_id);
1460
1461 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
1462 }
1463
tcf_ct_search(struct net * net,struct tc_action ** a,u32 index)1464 static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index)
1465 {
1466 struct tc_action_net *tn = net_generic(net, ct_net_id);
1467
1468 return tcf_idr_search(tn, a, index);
1469 }
1470
tcf_stats_update(struct tc_action * a,u64 bytes,u64 packets,u64 drops,u64 lastuse,bool hw)1471 static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
1472 u64 drops, u64 lastuse, bool hw)
1473 {
1474 struct tcf_ct *c = to_ct(a);
1475
1476 tcf_action_update_stats(a, bytes, packets, drops, hw);
1477 c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
1478 }
1479
1480 static struct tc_action_ops act_ct_ops = {
1481 .kind = "ct",
1482 .id = TCA_ID_CT,
1483 .owner = THIS_MODULE,
1484 .act = tcf_ct_act,
1485 .dump = tcf_ct_dump,
1486 .init = tcf_ct_init,
1487 .cleanup = tcf_ct_cleanup,
1488 .walk = tcf_ct_walker,
1489 .lookup = tcf_ct_search,
1490 .stats_update = tcf_stats_update,
1491 .size = sizeof(struct tcf_ct),
1492 };
1493
ct_init_net(struct net * net)1494 static __net_init int ct_init_net(struct net *net)
1495 {
1496 unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
1497 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1498
1499 if (nf_connlabels_get(net, n_bits - 1)) {
1500 tn->labels = false;
1501 pr_err("act_ct: Failed to set connlabels length");
1502 } else {
1503 tn->labels = true;
1504 }
1505
1506 return tc_action_net_init(net, &tn->tn, &act_ct_ops);
1507 }
1508
ct_exit_net(struct list_head * net_list)1509 static void __net_exit ct_exit_net(struct list_head *net_list)
1510 {
1511 struct net *net;
1512
1513 rtnl_lock();
1514 list_for_each_entry(net, net_list, exit_list) {
1515 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1516
1517 if (tn->labels)
1518 nf_connlabels_put(net);
1519 }
1520 rtnl_unlock();
1521
1522 tc_action_net_exit(net_list, ct_net_id);
1523 }
1524
1525 static struct pernet_operations ct_net_ops = {
1526 .init = ct_init_net,
1527 .exit_batch = ct_exit_net,
1528 .id = &ct_net_id,
1529 .size = sizeof(struct tc_ct_action_net),
1530 };
1531
ct_init_module(void)1532 static int __init ct_init_module(void)
1533 {
1534 int err;
1535
1536 act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
1537 if (!act_ct_wq)
1538 return -ENOMEM;
1539
1540 err = tcf_ct_flow_tables_init();
1541 if (err)
1542 goto err_tbl_init;
1543
1544 err = tcf_register_action(&act_ct_ops, &ct_net_ops);
1545 if (err)
1546 goto err_register;
1547
1548 static_branch_inc(&tcf_frag_xmit_count);
1549
1550 return 0;
1551
1552 err_register:
1553 tcf_ct_flow_tables_uninit();
1554 err_tbl_init:
1555 destroy_workqueue(act_ct_wq);
1556 return err;
1557 }
1558
ct_cleanup_module(void)1559 static void __exit ct_cleanup_module(void)
1560 {
1561 static_branch_dec(&tcf_frag_xmit_count);
1562 tcf_unregister_action(&act_ct_ops, &ct_net_ops);
1563 tcf_ct_flow_tables_uninit();
1564 destroy_workqueue(act_ct_wq);
1565 }
1566
1567 module_init(ct_init_module);
1568 module_exit(ct_cleanup_module);
1569 MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
1570 MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
1571 MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
1572 MODULE_DESCRIPTION("Connection tracking action");
1573 MODULE_LICENSE("GPL v2");
1574