1 /* Connection tracking via netlink socket. Allows for user space
2  * protocol helpers and general trouble making from userspace.
3  *
4  * (C) 2001 by Jay Schulist <jschlst@samba.org>
5  * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6  * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7  * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org>
8  *
9  * Initial connection tracking via netlink development funded and
10  * generally made possible by Network Robots, Inc. (www.networkrobots.com)
11  *
12  * Further development of this code funded by Astaro AG (http://www.astaro.com)
13  *
14  * This software may be used and distributed according to the terms
15  * of the GNU General Public License, incorporated herein by reference.
16  */
17 
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/rculist.h>
22 #include <linux/rculist_nulls.h>
23 #include <linux/types.h>
24 #include <linux/timer.h>
25 #include <linux/security.h>
26 #include <linux/skbuff.h>
27 #include <linux/errno.h>
28 #include <linux/netlink.h>
29 #include <linux/spinlock.h>
30 #include <linux/interrupt.h>
31 #include <linux/slab.h>
32 #include <linux/siphash.h>
33 
34 #include <linux/netfilter.h>
35 #include <net/netlink.h>
36 #include <net/sock.h>
37 #include <net/netfilter/nf_conntrack.h>
38 #include <net/netfilter/nf_conntrack_core.h>
39 #include <net/netfilter/nf_conntrack_expect.h>
40 #include <net/netfilter/nf_conntrack_helper.h>
41 #include <net/netfilter/nf_conntrack_seqadj.h>
42 #include <net/netfilter/nf_conntrack_l4proto.h>
43 #include <net/netfilter/nf_conntrack_tuple.h>
44 #include <net/netfilter/nf_conntrack_acct.h>
45 #include <net/netfilter/nf_conntrack_zones.h>
46 #include <net/netfilter/nf_conntrack_timestamp.h>
47 #include <net/netfilter/nf_conntrack_labels.h>
48 #include <net/netfilter/nf_conntrack_synproxy.h>
49 #if IS_ENABLED(CONFIG_NF_NAT)
50 #include <net/netfilter/nf_nat.h>
51 #include <net/netfilter/nf_nat_helper.h>
52 #endif
53 
54 #include <linux/netfilter/nfnetlink.h>
55 #include <linux/netfilter/nfnetlink_conntrack.h>
56 
57 MODULE_LICENSE("GPL");
58 
59 static int ctnetlink_dump_tuples_proto(struct sk_buff *skb,
60 				const struct nf_conntrack_tuple *tuple,
61 				const struct nf_conntrack_l4proto *l4proto)
62 {
63 	int ret = 0;
64 	struct nlattr *nest_parms;
65 
66 	nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO);
67 	if (!nest_parms)
68 		goto nla_put_failure;
69 	if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum))
70 		goto nla_put_failure;
71 
72 	if (likely(l4proto->tuple_to_nlattr))
73 		ret = l4proto->tuple_to_nlattr(skb, tuple);
74 
75 	nla_nest_end(skb, nest_parms);
76 
77 	return ret;
78 
79 nla_put_failure:
80 	return -1;
81 }
82 
83 static int ipv4_tuple_to_nlattr(struct sk_buff *skb,
84 				const struct nf_conntrack_tuple *tuple)
85 {
86 	if (nla_put_in_addr(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) ||
87 	    nla_put_in_addr(skb, CTA_IP_V4_DST, tuple->dst.u3.ip))
88 		return -EMSGSIZE;
89 	return 0;
90 }
91 
92 static int ipv6_tuple_to_nlattr(struct sk_buff *skb,
93 				const struct nf_conntrack_tuple *tuple)
94 {
95 	if (nla_put_in6_addr(skb, CTA_IP_V6_SRC, &tuple->src.u3.in6) ||
96 	    nla_put_in6_addr(skb, CTA_IP_V6_DST, &tuple->dst.u3.in6))
97 		return -EMSGSIZE;
98 	return 0;
99 }
100 
101 static int ctnetlink_dump_tuples_ip(struct sk_buff *skb,
102 				    const struct nf_conntrack_tuple *tuple)
103 {
104 	int ret = 0;
105 	struct nlattr *nest_parms;
106 
107 	nest_parms = nla_nest_start(skb, CTA_TUPLE_IP);
108 	if (!nest_parms)
109 		goto nla_put_failure;
110 
111 	switch (tuple->src.l3num) {
112 	case NFPROTO_IPV4:
113 		ret = ipv4_tuple_to_nlattr(skb, tuple);
114 		break;
115 	case NFPROTO_IPV6:
116 		ret = ipv6_tuple_to_nlattr(skb, tuple);
117 		break;
118 	}
119 
120 	nla_nest_end(skb, nest_parms);
121 
122 	return ret;
123 
124 nla_put_failure:
125 	return -1;
126 }
127 
128 static int ctnetlink_dump_tuples(struct sk_buff *skb,
129 				 const struct nf_conntrack_tuple *tuple)
130 {
131 	const struct nf_conntrack_l4proto *l4proto;
132 	int ret;
133 
134 	rcu_read_lock();
135 	ret = ctnetlink_dump_tuples_ip(skb, tuple);
136 
137 	if (ret >= 0) {
138 		l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
139 		ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
140 	}
141 	rcu_read_unlock();
142 	return ret;
143 }
144 
145 static int ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype,
146 				  const struct nf_conntrack_zone *zone, int dir)
147 {
148 	if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir)
149 		return 0;
150 	if (nla_put_be16(skb, attrtype, htons(zone->id)))
151 		goto nla_put_failure;
152 	return 0;
153 
154 nla_put_failure:
155 	return -1;
156 }
157 
158 static int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
159 {
160 	if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
161 		goto nla_put_failure;
162 	return 0;
163 
164 nla_put_failure:
165 	return -1;
166 }
167 
168 static int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
169 {
170 	long timeout = nf_ct_expires(ct) / HZ;
171 
172 	if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
173 		goto nla_put_failure;
174 	return 0;
175 
176 nla_put_failure:
177 	return -1;
178 }
179 
180 static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
181 {
182 	const struct nf_conntrack_l4proto *l4proto;
183 	struct nlattr *nest_proto;
184 	int ret;
185 
186 	l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
187 	if (!l4proto->to_nlattr)
188 		return 0;
189 
190 	nest_proto = nla_nest_start(skb, CTA_PROTOINFO);
191 	if (!nest_proto)
192 		goto nla_put_failure;
193 
194 	ret = l4proto->to_nlattr(skb, nest_proto, ct);
195 
196 	nla_nest_end(skb, nest_proto);
197 
198 	return ret;
199 
200 nla_put_failure:
201 	return -1;
202 }
203 
204 static int ctnetlink_dump_helpinfo(struct sk_buff *skb,
205 				   const struct nf_conn *ct)
206 {
207 	struct nlattr *nest_helper;
208 	const struct nf_conn_help *help = nfct_help(ct);
209 	struct nf_conntrack_helper *helper;
210 
211 	if (!help)
212 		return 0;
213 
214 	helper = rcu_dereference(help->helper);
215 	if (!helper)
216 		goto out;
217 
218 	nest_helper = nla_nest_start(skb, CTA_HELP);
219 	if (!nest_helper)
220 		goto nla_put_failure;
221 	if (nla_put_string(skb, CTA_HELP_NAME, helper->name))
222 		goto nla_put_failure;
223 
224 	if (helper->to_nlattr)
225 		helper->to_nlattr(skb, ct);
226 
227 	nla_nest_end(skb, nest_helper);
228 out:
229 	return 0;
230 
231 nla_put_failure:
232 	return -1;
233 }
234 
235 static int
236 dump_counters(struct sk_buff *skb, struct nf_conn_acct *acct,
237 	      enum ip_conntrack_dir dir, int type)
238 {
239 	enum ctattr_type attr = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
240 	struct nf_conn_counter *counter = acct->counter;
241 	struct nlattr *nest_count;
242 	u64 pkts, bytes;
243 
244 	if (type == IPCTNL_MSG_CT_GET_CTRZERO) {
245 		pkts = atomic64_xchg(&counter[dir].packets, 0);
246 		bytes = atomic64_xchg(&counter[dir].bytes, 0);
247 	} else {
248 		pkts = atomic64_read(&counter[dir].packets);
249 		bytes = atomic64_read(&counter[dir].bytes);
250 	}
251 
252 	nest_count = nla_nest_start(skb, attr);
253 	if (!nest_count)
254 		goto nla_put_failure;
255 
256 	if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts),
257 			 CTA_COUNTERS_PAD) ||
258 	    nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes),
259 			 CTA_COUNTERS_PAD))
260 		goto nla_put_failure;
261 
262 	nla_nest_end(skb, nest_count);
263 
264 	return 0;
265 
266 nla_put_failure:
267 	return -1;
268 }
269 
270 static int
271 ctnetlink_dump_acct(struct sk_buff *skb, const struct nf_conn *ct, int type)
272 {
273 	struct nf_conn_acct *acct = nf_conn_acct_find(ct);
274 
275 	if (!acct)
276 		return 0;
277 
278 	if (dump_counters(skb, acct, IP_CT_DIR_ORIGINAL, type) < 0)
279 		return -1;
280 	if (dump_counters(skb, acct, IP_CT_DIR_REPLY, type) < 0)
281 		return -1;
282 
283 	return 0;
284 }
285 
286 static int
287 ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
288 {
289 	struct nlattr *nest_count;
290 	const struct nf_conn_tstamp *tstamp;
291 
292 	tstamp = nf_conn_tstamp_find(ct);
293 	if (!tstamp)
294 		return 0;
295 
296 	nest_count = nla_nest_start(skb, CTA_TIMESTAMP);
297 	if (!nest_count)
298 		goto nla_put_failure;
299 
300 	if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start),
301 			 CTA_TIMESTAMP_PAD) ||
302 	    (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
303 					       cpu_to_be64(tstamp->stop),
304 					       CTA_TIMESTAMP_PAD)))
305 		goto nla_put_failure;
306 	nla_nest_end(skb, nest_count);
307 
308 	return 0;
309 
310 nla_put_failure:
311 	return -1;
312 }
313 
314 #ifdef CONFIG_NF_CONNTRACK_MARK
315 static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
316 {
317 	if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
318 		goto nla_put_failure;
319 	return 0;
320 
321 nla_put_failure:
322 	return -1;
323 }
324 #else
325 #define ctnetlink_dump_mark(a, b) (0)
326 #endif
327 
328 #ifdef CONFIG_NF_CONNTRACK_SECMARK
329 static int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
330 {
331 	struct nlattr *nest_secctx;
332 	int len, ret;
333 	char *secctx;
334 
335 	ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
336 	if (ret)
337 		return 0;
338 
339 	ret = -1;
340 	nest_secctx = nla_nest_start(skb, CTA_SECCTX);
341 	if (!nest_secctx)
342 		goto nla_put_failure;
343 
344 	if (nla_put_string(skb, CTA_SECCTX_NAME, secctx))
345 		goto nla_put_failure;
346 	nla_nest_end(skb, nest_secctx);
347 
348 	ret = 0;
349 nla_put_failure:
350 	security_release_secctx(secctx, len);
351 	return ret;
352 }
353 #else
354 #define ctnetlink_dump_secctx(a, b) (0)
355 #endif
356 
357 #ifdef CONFIG_NF_CONNTRACK_LABELS
358 static inline int ctnetlink_label_size(const struct nf_conn *ct)
359 {
360 	struct nf_conn_labels *labels = nf_ct_labels_find(ct);
361 
362 	if (!labels)
363 		return 0;
364 	return nla_total_size(sizeof(labels->bits));
365 }
366 
367 static int
368 ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
369 {
370 	struct nf_conn_labels *labels = nf_ct_labels_find(ct);
371 	unsigned int i;
372 
373 	if (!labels)
374 		return 0;
375 
376 	i = 0;
377 	do {
378 		if (labels->bits[i] != 0)
379 			return nla_put(skb, CTA_LABELS, sizeof(labels->bits),
380 				       labels->bits);
381 		i++;
382 	} while (i < ARRAY_SIZE(labels->bits));
383 
384 	return 0;
385 }
386 #else
387 #define ctnetlink_dump_labels(a, b) (0)
388 #define ctnetlink_label_size(a)	(0)
389 #endif
390 
391 #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
392 
393 static int ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct)
394 {
395 	struct nlattr *nest_parms;
396 
397 	if (!(ct->status & IPS_EXPECTED))
398 		return 0;
399 
400 	nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER);
401 	if (!nest_parms)
402 		goto nla_put_failure;
403 	if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0)
404 		goto nla_put_failure;
405 	nla_nest_end(skb, nest_parms);
406 
407 	return 0;
408 
409 nla_put_failure:
410 	return -1;
411 }
412 
413 static int
414 dump_ct_seq_adj(struct sk_buff *skb, const struct nf_ct_seqadj *seq, int type)
415 {
416 	struct nlattr *nest_parms;
417 
418 	nest_parms = nla_nest_start(skb, type);
419 	if (!nest_parms)
420 		goto nla_put_failure;
421 
422 	if (nla_put_be32(skb, CTA_SEQADJ_CORRECTION_POS,
423 			 htonl(seq->correction_pos)) ||
424 	    nla_put_be32(skb, CTA_SEQADJ_OFFSET_BEFORE,
425 			 htonl(seq->offset_before)) ||
426 	    nla_put_be32(skb, CTA_SEQADJ_OFFSET_AFTER,
427 			 htonl(seq->offset_after)))
428 		goto nla_put_failure;
429 
430 	nla_nest_end(skb, nest_parms);
431 
432 	return 0;
433 
434 nla_put_failure:
435 	return -1;
436 }
437 
438 static int ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, struct nf_conn *ct)
439 {
440 	struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
441 	struct nf_ct_seqadj *seq;
442 
443 	if (!(ct->status & IPS_SEQ_ADJUST) || !seqadj)
444 		return 0;
445 
446 	spin_lock_bh(&ct->lock);
447 	seq = &seqadj->seq[IP_CT_DIR_ORIGINAL];
448 	if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_ORIG) == -1)
449 		goto err;
450 
451 	seq = &seqadj->seq[IP_CT_DIR_REPLY];
452 	if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_REPLY) == -1)
453 		goto err;
454 
455 	spin_unlock_bh(&ct->lock);
456 	return 0;
457 err:
458 	spin_unlock_bh(&ct->lock);
459 	return -1;
460 }
461 
462 static int ctnetlink_dump_ct_synproxy(struct sk_buff *skb, struct nf_conn *ct)
463 {
464 	struct nf_conn_synproxy *synproxy = nfct_synproxy(ct);
465 	struct nlattr *nest_parms;
466 
467 	if (!synproxy)
468 		return 0;
469 
470 	nest_parms = nla_nest_start(skb, CTA_SYNPROXY);
471 	if (!nest_parms)
472 		goto nla_put_failure;
473 
474 	if (nla_put_be32(skb, CTA_SYNPROXY_ISN, htonl(synproxy->isn)) ||
475 	    nla_put_be32(skb, CTA_SYNPROXY_ITS, htonl(synproxy->its)) ||
476 	    nla_put_be32(skb, CTA_SYNPROXY_TSOFF, htonl(synproxy->tsoff)))
477 		goto nla_put_failure;
478 
479 	nla_nest_end(skb, nest_parms);
480 
481 	return 0;
482 
483 nla_put_failure:
484 	return -1;
485 }
486 
487 static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
488 {
489 	__be32 id = (__force __be32)nf_ct_get_id(ct);
490 
491 	if (nla_put_be32(skb, CTA_ID, id))
492 		goto nla_put_failure;
493 	return 0;
494 
495 nla_put_failure:
496 	return -1;
497 }
498 
499 static int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
500 {
501 	if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
502 		goto nla_put_failure;
503 	return 0;
504 
505 nla_put_failure:
506 	return -1;
507 }
508 
509 /* all these functions access ct->ext. Caller must either hold a reference
510  * on ct or prevent its deletion by holding either the bucket spinlock or
511  * pcpu dying list lock.
512  */
513 static int ctnetlink_dump_extinfo(struct sk_buff *skb,
514 				  struct nf_conn *ct, u32 type)
515 {
516 	if (ctnetlink_dump_acct(skb, ct, type) < 0 ||
517 	    ctnetlink_dump_timestamp(skb, ct) < 0 ||
518 	    ctnetlink_dump_helpinfo(skb, ct) < 0 ||
519 	    ctnetlink_dump_labels(skb, ct) < 0 ||
520 	    ctnetlink_dump_ct_seq_adj(skb, ct) < 0 ||
521 	    ctnetlink_dump_ct_synproxy(skb, ct) < 0)
522 		return -1;
523 
524 	return 0;
525 }
526 
527 static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
528 {
529 	if (ctnetlink_dump_status(skb, ct) < 0 ||
530 	    ctnetlink_dump_mark(skb, ct) < 0 ||
531 	    ctnetlink_dump_secctx(skb, ct) < 0 ||
532 	    ctnetlink_dump_id(skb, ct) < 0 ||
533 	    ctnetlink_dump_use(skb, ct) < 0 ||
534 	    ctnetlink_dump_master(skb, ct) < 0)
535 		return -1;
536 
537 	if (!test_bit(IPS_OFFLOAD_BIT, &ct->status) &&
538 	    (ctnetlink_dump_timeout(skb, ct) < 0 ||
539 	     ctnetlink_dump_protoinfo(skb, ct) < 0))
540 		return -1;
541 
542 	return 0;
543 }
544 
545 static int
546 ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
547 		    struct nf_conn *ct, bool extinfo)
548 {
549 	const struct nf_conntrack_zone *zone;
550 	struct nlmsghdr *nlh;
551 	struct nfgenmsg *nfmsg;
552 	struct nlattr *nest_parms;
553 	unsigned int flags = portid ? NLM_F_MULTI : 0, event;
554 
555 	event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_NEW);
556 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
557 	if (nlh == NULL)
558 		goto nlmsg_failure;
559 
560 	nfmsg = nlmsg_data(nlh);
561 	nfmsg->nfgen_family = nf_ct_l3num(ct);
562 	nfmsg->version      = NFNETLINK_V0;
563 	nfmsg->res_id	    = 0;
564 
565 	zone = nf_ct_zone(ct);
566 
567 	nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
568 	if (!nest_parms)
569 		goto nla_put_failure;
570 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
571 		goto nla_put_failure;
572 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
573 				   NF_CT_ZONE_DIR_ORIG) < 0)
574 		goto nla_put_failure;
575 	nla_nest_end(skb, nest_parms);
576 
577 	nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY);
578 	if (!nest_parms)
579 		goto nla_put_failure;
580 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
581 		goto nla_put_failure;
582 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
583 				   NF_CT_ZONE_DIR_REPL) < 0)
584 		goto nla_put_failure;
585 	nla_nest_end(skb, nest_parms);
586 
587 	if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
588 				   NF_CT_DEFAULT_ZONE_DIR) < 0)
589 		goto nla_put_failure;
590 
591 	if (ctnetlink_dump_info(skb, ct) < 0)
592 		goto nla_put_failure;
593 	if (extinfo && ctnetlink_dump_extinfo(skb, ct, type) < 0)
594 		goto nla_put_failure;
595 
596 	nlmsg_end(skb, nlh);
597 	return skb->len;
598 
599 nlmsg_failure:
600 nla_put_failure:
601 	nlmsg_cancel(skb, nlh);
602 	return -1;
603 }
604 
605 static const struct nla_policy cta_ip_nla_policy[CTA_IP_MAX + 1] = {
606 	[CTA_IP_V4_SRC]	= { .type = NLA_U32 },
607 	[CTA_IP_V4_DST]	= { .type = NLA_U32 },
608 	[CTA_IP_V6_SRC]	= { .len = sizeof(__be32) * 4 },
609 	[CTA_IP_V6_DST]	= { .len = sizeof(__be32) * 4 },
610 };
611 
612 #if defined(CONFIG_NETFILTER_NETLINK_GLUE_CT) || defined(CONFIG_NF_CONNTRACK_EVENTS)
613 static size_t ctnetlink_proto_size(const struct nf_conn *ct)
614 {
615 	const struct nf_conntrack_l4proto *l4proto;
616 	size_t len, len4 = 0;
617 
618 	len = nla_policy_len(cta_ip_nla_policy, CTA_IP_MAX + 1);
619 	len *= 3u; /* ORIG, REPLY, MASTER */
620 
621 	l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
622 	len += l4proto->nlattr_size;
623 	if (l4proto->nlattr_tuple_size) {
624 		len4 = l4proto->nlattr_tuple_size();
625 		len4 *= 3u; /* ORIG, REPLY, MASTER */
626 	}
627 
628 	return len + len4;
629 }
630 #endif
631 
632 static inline size_t ctnetlink_acct_size(const struct nf_conn *ct)
633 {
634 	if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT))
635 		return 0;
636 	return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
637 	       + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
638 	       + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
639 	       ;
640 }
641 
642 static inline int ctnetlink_secctx_size(const struct nf_conn *ct)
643 {
644 #ifdef CONFIG_NF_CONNTRACK_SECMARK
645 	int len, ret;
646 
647 	ret = security_secid_to_secctx(ct->secmark, NULL, &len);
648 	if (ret)
649 		return 0;
650 
651 	return nla_total_size(0) /* CTA_SECCTX */
652 	       + nla_total_size(sizeof(char) * len); /* CTA_SECCTX_NAME */
653 #else
654 	return 0;
655 #endif
656 }
657 
658 static inline size_t ctnetlink_timestamp_size(const struct nf_conn *ct)
659 {
660 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
661 	if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
662 		return 0;
663 	return nla_total_size(0) + 2 * nla_total_size_64bit(sizeof(uint64_t));
664 #else
665 	return 0;
666 #endif
667 }
668 
669 #ifdef CONFIG_NF_CONNTRACK_EVENTS
670 static size_t ctnetlink_nlmsg_size(const struct nf_conn *ct)
671 {
672 	return NLMSG_ALIGN(sizeof(struct nfgenmsg))
673 	       + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
674 	       + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
675 	       + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
676 	       + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
677 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
678 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
679 	       + ctnetlink_acct_size(ct)
680 	       + ctnetlink_timestamp_size(ct)
681 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
682 	       + nla_total_size(0) /* CTA_PROTOINFO */
683 	       + nla_total_size(0) /* CTA_HELP */
684 	       + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
685 	       + ctnetlink_secctx_size(ct)
686 #if IS_ENABLED(CONFIG_NF_NAT)
687 	       + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
688 	       + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
689 #endif
690 #ifdef CONFIG_NF_CONNTRACK_MARK
691 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
692 #endif
693 #ifdef CONFIG_NF_CONNTRACK_ZONES
694 	       + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
695 #endif
696 	       + ctnetlink_proto_size(ct)
697 	       + ctnetlink_label_size(ct)
698 	       ;
699 }
700 
701 static int
702 ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
703 {
704 	const struct nf_conntrack_zone *zone;
705 	struct net *net;
706 	struct nlmsghdr *nlh;
707 	struct nfgenmsg *nfmsg;
708 	struct nlattr *nest_parms;
709 	struct nf_conn *ct = item->ct;
710 	struct sk_buff *skb;
711 	unsigned int type;
712 	unsigned int flags = 0, group;
713 	int err;
714 
715 	if (events & (1 << IPCT_DESTROY)) {
716 		type = IPCTNL_MSG_CT_DELETE;
717 		group = NFNLGRP_CONNTRACK_DESTROY;
718 	} else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) {
719 		type = IPCTNL_MSG_CT_NEW;
720 		flags = NLM_F_CREATE|NLM_F_EXCL;
721 		group = NFNLGRP_CONNTRACK_NEW;
722 	} else if (events) {
723 		type = IPCTNL_MSG_CT_NEW;
724 		group = NFNLGRP_CONNTRACK_UPDATE;
725 	} else
726 		return 0;
727 
728 	net = nf_ct_net(ct);
729 	if (!item->report && !nfnetlink_has_listeners(net, group))
730 		return 0;
731 
732 	skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
733 	if (skb == NULL)
734 		goto errout;
735 
736 	type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, type);
737 	nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
738 	if (nlh == NULL)
739 		goto nlmsg_failure;
740 
741 	nfmsg = nlmsg_data(nlh);
742 	nfmsg->nfgen_family = nf_ct_l3num(ct);
743 	nfmsg->version	= NFNETLINK_V0;
744 	nfmsg->res_id	= 0;
745 
746 	zone = nf_ct_zone(ct);
747 
748 	nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
749 	if (!nest_parms)
750 		goto nla_put_failure;
751 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
752 		goto nla_put_failure;
753 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
754 				   NF_CT_ZONE_DIR_ORIG) < 0)
755 		goto nla_put_failure;
756 	nla_nest_end(skb, nest_parms);
757 
758 	nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY);
759 	if (!nest_parms)
760 		goto nla_put_failure;
761 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
762 		goto nla_put_failure;
763 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
764 				   NF_CT_ZONE_DIR_REPL) < 0)
765 		goto nla_put_failure;
766 	nla_nest_end(skb, nest_parms);
767 
768 	if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
769 				   NF_CT_DEFAULT_ZONE_DIR) < 0)
770 		goto nla_put_failure;
771 
772 	if (ctnetlink_dump_id(skb, ct) < 0)
773 		goto nla_put_failure;
774 
775 	if (ctnetlink_dump_status(skb, ct) < 0)
776 		goto nla_put_failure;
777 
778 	if (events & (1 << IPCT_DESTROY)) {
779 		if (ctnetlink_dump_acct(skb, ct, type) < 0 ||
780 		    ctnetlink_dump_timestamp(skb, ct) < 0)
781 			goto nla_put_failure;
782 	} else {
783 		if (ctnetlink_dump_timeout(skb, ct) < 0)
784 			goto nla_put_failure;
785 
786 		if (events & (1 << IPCT_PROTOINFO)
787 		    && ctnetlink_dump_protoinfo(skb, ct) < 0)
788 			goto nla_put_failure;
789 
790 		if ((events & (1 << IPCT_HELPER) || nfct_help(ct))
791 		    && ctnetlink_dump_helpinfo(skb, ct) < 0)
792 			goto nla_put_failure;
793 
794 #ifdef CONFIG_NF_CONNTRACK_SECMARK
795 		if ((events & (1 << IPCT_SECMARK) || ct->secmark)
796 		    && ctnetlink_dump_secctx(skb, ct) < 0)
797 			goto nla_put_failure;
798 #endif
799 		if (events & (1 << IPCT_LABEL) &&
800 		     ctnetlink_dump_labels(skb, ct) < 0)
801 			goto nla_put_failure;
802 
803 		if (events & (1 << IPCT_RELATED) &&
804 		    ctnetlink_dump_master(skb, ct) < 0)
805 			goto nla_put_failure;
806 
807 		if (events & (1 << IPCT_SEQADJ) &&
808 		    ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
809 			goto nla_put_failure;
810 
811 		if (events & (1 << IPCT_SYNPROXY) &&
812 		    ctnetlink_dump_ct_synproxy(skb, ct) < 0)
813 			goto nla_put_failure;
814 	}
815 
816 #ifdef CONFIG_NF_CONNTRACK_MARK
817 	if ((events & (1 << IPCT_MARK) || ct->mark)
818 	    && ctnetlink_dump_mark(skb, ct) < 0)
819 		goto nla_put_failure;
820 #endif
821 	nlmsg_end(skb, nlh);
822 	err = nfnetlink_send(skb, net, item->portid, group, item->report,
823 			     GFP_ATOMIC);
824 	if (err == -ENOBUFS || err == -EAGAIN)
825 		return -ENOBUFS;
826 
827 	return 0;
828 
829 nla_put_failure:
830 	nlmsg_cancel(skb, nlh);
831 nlmsg_failure:
832 	kfree_skb(skb);
833 errout:
834 	if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0)
835 		return -ENOBUFS;
836 
837 	return 0;
838 }
839 #endif /* CONFIG_NF_CONNTRACK_EVENTS */
840 
841 static int ctnetlink_done(struct netlink_callback *cb)
842 {
843 	if (cb->args[1])
844 		nf_ct_put((struct nf_conn *)cb->args[1]);
845 	kfree(cb->data);
846 	return 0;
847 }
848 
849 struct ctnetlink_filter {
850 	u8 family;
851 	struct {
852 		u_int32_t val;
853 		u_int32_t mask;
854 	} mark;
855 };
856 
857 static struct ctnetlink_filter *
858 ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
859 {
860 	struct ctnetlink_filter *filter;
861 
862 #ifndef CONFIG_NF_CONNTRACK_MARK
863 	if (cda[CTA_MARK] && cda[CTA_MARK_MASK])
864 		return ERR_PTR(-EOPNOTSUPP);
865 #endif
866 
867 	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
868 	if (filter == NULL)
869 		return ERR_PTR(-ENOMEM);
870 
871 	filter->family = family;
872 
873 #ifdef CONFIG_NF_CONNTRACK_MARK
874 	if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
875 		filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
876 		filter->mark.mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
877 	}
878 #endif
879 	return filter;
880 }
881 
882 static int ctnetlink_start(struct netlink_callback *cb)
883 {
884 	const struct nlattr * const *cda = cb->data;
885 	struct ctnetlink_filter *filter = NULL;
886 	struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
887 	u8 family = nfmsg->nfgen_family;
888 
889 	if (family || (cda[CTA_MARK] && cda[CTA_MARK_MASK])) {
890 		filter = ctnetlink_alloc_filter(cda, family);
891 		if (IS_ERR(filter))
892 			return PTR_ERR(filter);
893 	}
894 
895 	cb->data = filter;
896 	return 0;
897 }
898 
899 static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
900 {
901 	struct ctnetlink_filter *filter = data;
902 
903 	if (filter == NULL)
904 		goto out;
905 
906 	/* Match entries of a given L3 protocol number.
907 	 * If it is not specified, ie. l3proto == 0,
908 	 * then match everything.
909 	 */
910 	if (filter->family && nf_ct_l3num(ct) != filter->family)
911 		goto ignore_entry;
912 
913 #ifdef CONFIG_NF_CONNTRACK_MARK
914 	if ((ct->mark & filter->mark.mask) != filter->mark.val)
915 		goto ignore_entry;
916 #endif
917 
918 out:
919 	return 1;
920 
921 ignore_entry:
922 	return 0;
923 }
924 
925 static int
926 ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
927 {
928 	struct net *net = sock_net(skb->sk);
929 	struct nf_conn *ct, *last;
930 	struct nf_conntrack_tuple_hash *h;
931 	struct hlist_nulls_node *n;
932 	struct nf_conn *nf_ct_evict[8];
933 	int res, i;
934 	spinlock_t *lockp;
935 
936 	last = (struct nf_conn *)cb->args[1];
937 	i = 0;
938 
939 	local_bh_disable();
940 	for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
941 restart:
942 		while (i) {
943 			i--;
944 			if (nf_ct_should_gc(nf_ct_evict[i]))
945 				nf_ct_kill(nf_ct_evict[i]);
946 			nf_ct_put(nf_ct_evict[i]);
947 		}
948 
949 		lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS];
950 		nf_conntrack_lock(lockp);
951 		if (cb->args[0] >= nf_conntrack_htable_size) {
952 			spin_unlock(lockp);
953 			goto out;
954 		}
955 		hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]],
956 					   hnnode) {
957 			if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
958 				continue;
959 			ct = nf_ct_tuplehash_to_ctrack(h);
960 			if (nf_ct_is_expired(ct)) {
961 				if (i < ARRAY_SIZE(nf_ct_evict) &&
962 				    atomic_inc_not_zero(&ct->ct_general.use))
963 					nf_ct_evict[i++] = ct;
964 				continue;
965 			}
966 
967 			if (!net_eq(net, nf_ct_net(ct)))
968 				continue;
969 
970 			if (cb->args[1]) {
971 				if (ct != last)
972 					continue;
973 				cb->args[1] = 0;
974 			}
975 			if (!ctnetlink_filter_match(ct, cb->data))
976 				continue;
977 
978 			res =
979 			ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
980 					    cb->nlh->nlmsg_seq,
981 					    NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
982 					    ct, true);
983 			if (res < 0) {
984 				nf_conntrack_get(&ct->ct_general);
985 				cb->args[1] = (unsigned long)ct;
986 				spin_unlock(lockp);
987 				goto out;
988 			}
989 		}
990 		spin_unlock(lockp);
991 		if (cb->args[1]) {
992 			cb->args[1] = 0;
993 			goto restart;
994 		}
995 	}
996 out:
997 	local_bh_enable();
998 	if (last) {
999 		/* nf ct hash resize happened, now clear the leftover. */
1000 		if ((struct nf_conn *)cb->args[1] == last)
1001 			cb->args[1] = 0;
1002 
1003 		nf_ct_put(last);
1004 	}
1005 
1006 	while (i) {
1007 		i--;
1008 		if (nf_ct_should_gc(nf_ct_evict[i]))
1009 			nf_ct_kill(nf_ct_evict[i]);
1010 		nf_ct_put(nf_ct_evict[i]);
1011 	}
1012 
1013 	return skb->len;
1014 }
1015 
1016 static int ipv4_nlattr_to_tuple(struct nlattr *tb[],
1017 				struct nf_conntrack_tuple *t)
1018 {
1019 	if (!tb[CTA_IP_V4_SRC] || !tb[CTA_IP_V4_DST])
1020 		return -EINVAL;
1021 
1022 	t->src.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_SRC]);
1023 	t->dst.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_DST]);
1024 
1025 	return 0;
1026 }
1027 
1028 static int ipv6_nlattr_to_tuple(struct nlattr *tb[],
1029 				struct nf_conntrack_tuple *t)
1030 {
1031 	if (!tb[CTA_IP_V6_SRC] || !tb[CTA_IP_V6_DST])
1032 		return -EINVAL;
1033 
1034 	t->src.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_SRC]);
1035 	t->dst.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_DST]);
1036 
1037 	return 0;
1038 }
1039 
1040 static int ctnetlink_parse_tuple_ip(struct nlattr *attr,
1041 				    struct nf_conntrack_tuple *tuple)
1042 {
1043 	struct nlattr *tb[CTA_IP_MAX+1];
1044 	int ret = 0;
1045 
1046 	ret = nla_parse_nested_deprecated(tb, CTA_IP_MAX, attr, NULL, NULL);
1047 	if (ret < 0)
1048 		return ret;
1049 
1050 	ret = nla_validate_nested_deprecated(attr, CTA_IP_MAX,
1051 					     cta_ip_nla_policy, NULL);
1052 	if (ret)
1053 		return ret;
1054 
1055 	switch (tuple->src.l3num) {
1056 	case NFPROTO_IPV4:
1057 		ret = ipv4_nlattr_to_tuple(tb, tuple);
1058 		break;
1059 	case NFPROTO_IPV6:
1060 		ret = ipv6_nlattr_to_tuple(tb, tuple);
1061 		break;
1062 	}
1063 
1064 	return ret;
1065 }
1066 
1067 static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = {
1068 	[CTA_PROTO_NUM]	= { .type = NLA_U8 },
1069 };
1070 
1071 static int ctnetlink_parse_tuple_proto(struct nlattr *attr,
1072 				       struct nf_conntrack_tuple *tuple)
1073 {
1074 	const struct nf_conntrack_l4proto *l4proto;
1075 	struct nlattr *tb[CTA_PROTO_MAX+1];
1076 	int ret = 0;
1077 
1078 	ret = nla_parse_nested_deprecated(tb, CTA_PROTO_MAX, attr,
1079 					  proto_nla_policy, NULL);
1080 	if (ret < 0)
1081 		return ret;
1082 
1083 	if (!tb[CTA_PROTO_NUM])
1084 		return -EINVAL;
1085 	tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
1086 
1087 	rcu_read_lock();
1088 	l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
1089 
1090 	if (likely(l4proto->nlattr_to_tuple)) {
1091 		ret = nla_validate_nested_deprecated(attr, CTA_PROTO_MAX,
1092 						     l4proto->nla_policy,
1093 						     NULL);
1094 		if (ret == 0)
1095 			ret = l4proto->nlattr_to_tuple(tb, tuple);
1096 	}
1097 
1098 	rcu_read_unlock();
1099 
1100 	return ret;
1101 }
1102 
1103 static int
1104 ctnetlink_parse_zone(const struct nlattr *attr,
1105 		     struct nf_conntrack_zone *zone)
1106 {
1107 	nf_ct_zone_init(zone, NF_CT_DEFAULT_ZONE_ID,
1108 			NF_CT_DEFAULT_ZONE_DIR, 0);
1109 #ifdef CONFIG_NF_CONNTRACK_ZONES
1110 	if (attr)
1111 		zone->id = ntohs(nla_get_be16(attr));
1112 #else
1113 	if (attr)
1114 		return -EOPNOTSUPP;
1115 #endif
1116 	return 0;
1117 }
1118 
1119 static int
1120 ctnetlink_parse_tuple_zone(struct nlattr *attr, enum ctattr_type type,
1121 			   struct nf_conntrack_zone *zone)
1122 {
1123 	int ret;
1124 
1125 	if (zone->id != NF_CT_DEFAULT_ZONE_ID)
1126 		return -EINVAL;
1127 
1128 	ret = ctnetlink_parse_zone(attr, zone);
1129 	if (ret < 0)
1130 		return ret;
1131 
1132 	if (type == CTA_TUPLE_REPLY)
1133 		zone->dir = NF_CT_ZONE_DIR_REPL;
1134 	else
1135 		zone->dir = NF_CT_ZONE_DIR_ORIG;
1136 
1137 	return 0;
1138 }
1139 
1140 static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
1141 	[CTA_TUPLE_IP]		= { .type = NLA_NESTED },
1142 	[CTA_TUPLE_PROTO]	= { .type = NLA_NESTED },
1143 	[CTA_TUPLE_ZONE]	= { .type = NLA_U16 },
1144 };
1145 
1146 static int
1147 ctnetlink_parse_tuple(const struct nlattr * const cda[],
1148 		      struct nf_conntrack_tuple *tuple, u32 type,
1149 		      u_int8_t l3num, struct nf_conntrack_zone *zone)
1150 {
1151 	struct nlattr *tb[CTA_TUPLE_MAX+1];
1152 	int err;
1153 
1154 	memset(tuple, 0, sizeof(*tuple));
1155 
1156 	err = nla_parse_nested_deprecated(tb, CTA_TUPLE_MAX, cda[type],
1157 					  tuple_nla_policy, NULL);
1158 	if (err < 0)
1159 		return err;
1160 
1161 	if (!tb[CTA_TUPLE_IP])
1162 		return -EINVAL;
1163 
1164 	tuple->src.l3num = l3num;
1165 
1166 	err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple);
1167 	if (err < 0)
1168 		return err;
1169 
1170 	if (!tb[CTA_TUPLE_PROTO])
1171 		return -EINVAL;
1172 
1173 	err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple);
1174 	if (err < 0)
1175 		return err;
1176 
1177 	if (tb[CTA_TUPLE_ZONE]) {
1178 		if (!zone)
1179 			return -EINVAL;
1180 
1181 		err = ctnetlink_parse_tuple_zone(tb[CTA_TUPLE_ZONE],
1182 						 type, zone);
1183 		if (err < 0)
1184 			return err;
1185 	}
1186 
1187 	/* orig and expect tuples get DIR_ORIGINAL */
1188 	if (type == CTA_TUPLE_REPLY)
1189 		tuple->dst.dir = IP_CT_DIR_REPLY;
1190 	else
1191 		tuple->dst.dir = IP_CT_DIR_ORIGINAL;
1192 
1193 	return 0;
1194 }
1195 
1196 static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
1197 	[CTA_HELP_NAME]		= { .type = NLA_NUL_STRING,
1198 				    .len = NF_CT_HELPER_NAME_LEN - 1 },
1199 };
1200 
1201 static int ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
1202 				struct nlattr **helpinfo)
1203 {
1204 	int err;
1205 	struct nlattr *tb[CTA_HELP_MAX+1];
1206 
1207 	err = nla_parse_nested_deprecated(tb, CTA_HELP_MAX, attr,
1208 					  help_nla_policy, NULL);
1209 	if (err < 0)
1210 		return err;
1211 
1212 	if (!tb[CTA_HELP_NAME])
1213 		return -EINVAL;
1214 
1215 	*helper_name = nla_data(tb[CTA_HELP_NAME]);
1216 
1217 	if (tb[CTA_HELP_INFO])
1218 		*helpinfo = tb[CTA_HELP_INFO];
1219 
1220 	return 0;
1221 }
1222 
1223 static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
1224 	[CTA_TUPLE_ORIG]	= { .type = NLA_NESTED },
1225 	[CTA_TUPLE_REPLY]	= { .type = NLA_NESTED },
1226 	[CTA_STATUS] 		= { .type = NLA_U32 },
1227 	[CTA_PROTOINFO]		= { .type = NLA_NESTED },
1228 	[CTA_HELP]		= { .type = NLA_NESTED },
1229 	[CTA_NAT_SRC]		= { .type = NLA_NESTED },
1230 	[CTA_TIMEOUT] 		= { .type = NLA_U32 },
1231 	[CTA_MARK]		= { .type = NLA_U32 },
1232 	[CTA_ID]		= { .type = NLA_U32 },
1233 	[CTA_NAT_DST]		= { .type = NLA_NESTED },
1234 	[CTA_TUPLE_MASTER]	= { .type = NLA_NESTED },
1235 	[CTA_NAT_SEQ_ADJ_ORIG]  = { .type = NLA_NESTED },
1236 	[CTA_NAT_SEQ_ADJ_REPLY] = { .type = NLA_NESTED },
1237 	[CTA_ZONE]		= { .type = NLA_U16 },
1238 	[CTA_MARK_MASK]		= { .type = NLA_U32 },
1239 	[CTA_LABELS]		= { .type = NLA_BINARY,
1240 				    .len = NF_CT_LABELS_MAX_SIZE },
1241 	[CTA_LABELS_MASK]	= { .type = NLA_BINARY,
1242 				    .len = NF_CT_LABELS_MAX_SIZE },
1243 };
1244 
1245 static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data)
1246 {
1247 	if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
1248 		return 0;
1249 
1250 	return ctnetlink_filter_match(ct, data);
1251 }
1252 
1253 static int ctnetlink_flush_conntrack(struct net *net,
1254 				     const struct nlattr * const cda[],
1255 				     u32 portid, int report, u8 family)
1256 {
1257 	struct ctnetlink_filter *filter = NULL;
1258 
1259 	if (family || (cda[CTA_MARK] && cda[CTA_MARK_MASK])) {
1260 		filter = ctnetlink_alloc_filter(cda, family);
1261 		if (IS_ERR(filter))
1262 			return PTR_ERR(filter);
1263 	}
1264 
1265 	nf_ct_iterate_cleanup_net(net, ctnetlink_flush_iterate, filter,
1266 				  portid, report);
1267 	kfree(filter);
1268 
1269 	return 0;
1270 }
1271 
1272 static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
1273 				   struct sk_buff *skb,
1274 				   const struct nlmsghdr *nlh,
1275 				   const struct nlattr * const cda[],
1276 				   struct netlink_ext_ack *extack)
1277 {
1278 	struct nf_conntrack_tuple_hash *h;
1279 	struct nf_conntrack_tuple tuple;
1280 	struct nf_conn *ct;
1281 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1282 	struct nf_conntrack_zone zone;
1283 	int err;
1284 
1285 	err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1286 	if (err < 0)
1287 		return err;
1288 
1289 	if (cda[CTA_TUPLE_ORIG])
1290 		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
1291 					    nfmsg->nfgen_family, &zone);
1292 	else if (cda[CTA_TUPLE_REPLY])
1293 		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
1294 					    nfmsg->nfgen_family, &zone);
1295 	else {
1296 		u_int8_t u3 = nfmsg->version ? nfmsg->nfgen_family : AF_UNSPEC;
1297 
1298 		return ctnetlink_flush_conntrack(net, cda,
1299 						 NETLINK_CB(skb).portid,
1300 						 nlmsg_report(nlh), u3);
1301 	}
1302 
1303 	if (err < 0)
1304 		return err;
1305 
1306 	h = nf_conntrack_find_get(net, &zone, &tuple);
1307 	if (!h)
1308 		return -ENOENT;
1309 
1310 	ct = nf_ct_tuplehash_to_ctrack(h);
1311 
1312 	if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) {
1313 		nf_ct_put(ct);
1314 		return -EBUSY;
1315 	}
1316 
1317 	if (cda[CTA_ID]) {
1318 		__be32 id = nla_get_be32(cda[CTA_ID]);
1319 
1320 		if (id != (__force __be32)nf_ct_get_id(ct)) {
1321 			nf_ct_put(ct);
1322 			return -ENOENT;
1323 		}
1324 	}
1325 
1326 	nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(nlh));
1327 	nf_ct_put(ct);
1328 
1329 	return 0;
1330 }
1331 
1332 static int ctnetlink_get_conntrack(struct net *net, struct sock *ctnl,
1333 				   struct sk_buff *skb,
1334 				   const struct nlmsghdr *nlh,
1335 				   const struct nlattr * const cda[],
1336 				   struct netlink_ext_ack *extack)
1337 {
1338 	struct nf_conntrack_tuple_hash *h;
1339 	struct nf_conntrack_tuple tuple;
1340 	struct nf_conn *ct;
1341 	struct sk_buff *skb2 = NULL;
1342 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1343 	u_int8_t u3 = nfmsg->nfgen_family;
1344 	struct nf_conntrack_zone zone;
1345 	int err;
1346 
1347 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
1348 		struct netlink_dump_control c = {
1349 			.start = ctnetlink_start,
1350 			.dump = ctnetlink_dump_table,
1351 			.done = ctnetlink_done,
1352 			.data = (void *)cda,
1353 		};
1354 
1355 		return netlink_dump_start(ctnl, skb, nlh, &c);
1356 	}
1357 
1358 	err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1359 	if (err < 0)
1360 		return err;
1361 
1362 	if (cda[CTA_TUPLE_ORIG])
1363 		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
1364 					    u3, &zone);
1365 	else if (cda[CTA_TUPLE_REPLY])
1366 		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
1367 					    u3, &zone);
1368 	else
1369 		return -EINVAL;
1370 
1371 	if (err < 0)
1372 		return err;
1373 
1374 	h = nf_conntrack_find_get(net, &zone, &tuple);
1375 	if (!h)
1376 		return -ENOENT;
1377 
1378 	ct = nf_ct_tuplehash_to_ctrack(h);
1379 
1380 	err = -ENOMEM;
1381 	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1382 	if (skb2 == NULL) {
1383 		nf_ct_put(ct);
1384 		return -ENOMEM;
1385 	}
1386 
1387 	err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1388 				  NFNL_MSG_TYPE(nlh->nlmsg_type), ct, true);
1389 	nf_ct_put(ct);
1390 	if (err <= 0)
1391 		goto free;
1392 
1393 	err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1394 	if (err < 0)
1395 		goto out;
1396 
1397 	return 0;
1398 
1399 free:
1400 	kfree_skb(skb2);
1401 out:
1402 	/* this avoids a loop in nfnetlink. */
1403 	return err == -EAGAIN ? -ENOBUFS : err;
1404 }
1405 
1406 static int ctnetlink_done_list(struct netlink_callback *cb)
1407 {
1408 	if (cb->args[1])
1409 		nf_ct_put((struct nf_conn *)cb->args[1]);
1410 	return 0;
1411 }
1412 
1413 static int
1414 ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
1415 {
1416 	struct nf_conn *ct, *last;
1417 	struct nf_conntrack_tuple_hash *h;
1418 	struct hlist_nulls_node *n;
1419 	struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
1420 	u_int8_t l3proto = nfmsg->nfgen_family;
1421 	int res;
1422 	int cpu;
1423 	struct hlist_nulls_head *list;
1424 	struct net *net = sock_net(skb->sk);
1425 
1426 	if (cb->args[2])
1427 		return 0;
1428 
1429 	last = (struct nf_conn *)cb->args[1];
1430 
1431 	for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
1432 		struct ct_pcpu *pcpu;
1433 
1434 		if (!cpu_possible(cpu))
1435 			continue;
1436 
1437 		pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1438 		spin_lock_bh(&pcpu->lock);
1439 		list = dying ? &pcpu->dying : &pcpu->unconfirmed;
1440 restart:
1441 		hlist_nulls_for_each_entry(h, n, list, hnnode) {
1442 			ct = nf_ct_tuplehash_to_ctrack(h);
1443 			if (l3proto && nf_ct_l3num(ct) != l3proto)
1444 				continue;
1445 			if (cb->args[1]) {
1446 				if (ct != last)
1447 					continue;
1448 				cb->args[1] = 0;
1449 			}
1450 
1451 			/* We can't dump extension info for the unconfirmed
1452 			 * list because unconfirmed conntracks can have
1453 			 * ct->ext reallocated (and thus freed).
1454 			 *
1455 			 * In the dying list case ct->ext can't be free'd
1456 			 * until after we drop pcpu->lock.
1457 			 */
1458 			res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
1459 						  cb->nlh->nlmsg_seq,
1460 						  NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
1461 						  ct, dying ? true : false);
1462 			if (res < 0) {
1463 				if (!atomic_inc_not_zero(&ct->ct_general.use))
1464 					continue;
1465 				cb->args[0] = cpu;
1466 				cb->args[1] = (unsigned long)ct;
1467 				spin_unlock_bh(&pcpu->lock);
1468 				goto out;
1469 			}
1470 		}
1471 		if (cb->args[1]) {
1472 			cb->args[1] = 0;
1473 			goto restart;
1474 		}
1475 		spin_unlock_bh(&pcpu->lock);
1476 	}
1477 	cb->args[2] = 1;
1478 out:
1479 	if (last)
1480 		nf_ct_put(last);
1481 
1482 	return skb->len;
1483 }
1484 
1485 static int
1486 ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
1487 {
1488 	return ctnetlink_dump_list(skb, cb, true);
1489 }
1490 
1491 static int ctnetlink_get_ct_dying(struct net *net, struct sock *ctnl,
1492 				  struct sk_buff *skb,
1493 				  const struct nlmsghdr *nlh,
1494 				  const struct nlattr * const cda[],
1495 				  struct netlink_ext_ack *extack)
1496 {
1497 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
1498 		struct netlink_dump_control c = {
1499 			.dump = ctnetlink_dump_dying,
1500 			.done = ctnetlink_done_list,
1501 		};
1502 		return netlink_dump_start(ctnl, skb, nlh, &c);
1503 	}
1504 
1505 	return -EOPNOTSUPP;
1506 }
1507 
1508 static int
1509 ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
1510 {
1511 	return ctnetlink_dump_list(skb, cb, false);
1512 }
1513 
1514 static int ctnetlink_get_ct_unconfirmed(struct net *net, struct sock *ctnl,
1515 					struct sk_buff *skb,
1516 					const struct nlmsghdr *nlh,
1517 					const struct nlattr * const cda[],
1518 					struct netlink_ext_ack *extack)
1519 {
1520 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
1521 		struct netlink_dump_control c = {
1522 			.dump = ctnetlink_dump_unconfirmed,
1523 			.done = ctnetlink_done_list,
1524 		};
1525 		return netlink_dump_start(ctnl, skb, nlh, &c);
1526 	}
1527 
1528 	return -EOPNOTSUPP;
1529 }
1530 
1531 #if IS_ENABLED(CONFIG_NF_NAT)
1532 static int
1533 ctnetlink_parse_nat_setup(struct nf_conn *ct,
1534 			  enum nf_nat_manip_type manip,
1535 			  const struct nlattr *attr)
1536 {
1537 	struct nf_nat_hook *nat_hook;
1538 	int err;
1539 
1540 	nat_hook = rcu_dereference(nf_nat_hook);
1541 	if (!nat_hook) {
1542 #ifdef CONFIG_MODULES
1543 		rcu_read_unlock();
1544 		nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
1545 		if (request_module("nf-nat") < 0) {
1546 			nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1547 			rcu_read_lock();
1548 			return -EOPNOTSUPP;
1549 		}
1550 		nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1551 		rcu_read_lock();
1552 		nat_hook = rcu_dereference(nf_nat_hook);
1553 		if (nat_hook)
1554 			return -EAGAIN;
1555 #endif
1556 		return -EOPNOTSUPP;
1557 	}
1558 
1559 	err = nat_hook->parse_nat_setup(ct, manip, attr);
1560 	if (err == -EAGAIN) {
1561 #ifdef CONFIG_MODULES
1562 		rcu_read_unlock();
1563 		nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
1564 		if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) {
1565 			nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1566 			rcu_read_lock();
1567 			return -EOPNOTSUPP;
1568 		}
1569 		nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1570 		rcu_read_lock();
1571 #else
1572 		err = -EOPNOTSUPP;
1573 #endif
1574 	}
1575 	return err;
1576 }
1577 #endif
1578 
1579 static void
1580 __ctnetlink_change_status(struct nf_conn *ct, unsigned long on,
1581 			  unsigned long off)
1582 {
1583 	unsigned int bit;
1584 
1585 	/* Ignore these unchangable bits */
1586 	on &= ~IPS_UNCHANGEABLE_MASK;
1587 	off &= ~IPS_UNCHANGEABLE_MASK;
1588 
1589 	for (bit = 0; bit < __IPS_MAX_BIT; bit++) {
1590 		if (on & (1 << bit))
1591 			set_bit(bit, &ct->status);
1592 		else if (off & (1 << bit))
1593 			clear_bit(bit, &ct->status);
1594 	}
1595 }
1596 
1597 static int
1598 ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
1599 {
1600 	unsigned long d;
1601 	unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS]));
1602 	d = ct->status ^ status;
1603 
1604 	if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
1605 		/* unchangeable */
1606 		return -EBUSY;
1607 
1608 	if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
1609 		/* SEEN_REPLY bit can only be set */
1610 		return -EBUSY;
1611 
1612 	if (d & IPS_ASSURED && !(status & IPS_ASSURED))
1613 		/* ASSURED bit can only be set */
1614 		return -EBUSY;
1615 
1616 	__ctnetlink_change_status(ct, status, 0);
1617 	return 0;
1618 }
1619 
1620 static int
1621 ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
1622 {
1623 #if IS_ENABLED(CONFIG_NF_NAT)
1624 	int ret;
1625 
1626 	if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
1627 		return 0;
1628 
1629 	ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST,
1630 					cda[CTA_NAT_DST]);
1631 	if (ret < 0)
1632 		return ret;
1633 
1634 	return ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_SRC,
1635 					 cda[CTA_NAT_SRC]);
1636 #else
1637 	if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
1638 		return 0;
1639 	return -EOPNOTSUPP;
1640 #endif
1641 }
1642 
1643 static int ctnetlink_change_helper(struct nf_conn *ct,
1644 				   const struct nlattr * const cda[])
1645 {
1646 	struct nf_conntrack_helper *helper;
1647 	struct nf_conn_help *help = nfct_help(ct);
1648 	char *helpname = NULL;
1649 	struct nlattr *helpinfo = NULL;
1650 	int err;
1651 
1652 	err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1653 	if (err < 0)
1654 		return err;
1655 
1656 	/* don't change helper of sibling connections */
1657 	if (ct->master) {
1658 		/* If we try to change the helper to the same thing twice,
1659 		 * treat the second attempt as a no-op instead of returning
1660 		 * an error.
1661 		 */
1662 		err = -EBUSY;
1663 		if (help) {
1664 			rcu_read_lock();
1665 			helper = rcu_dereference(help->helper);
1666 			if (helper && !strcmp(helper->name, helpname))
1667 				err = 0;
1668 			rcu_read_unlock();
1669 		}
1670 
1671 		return err;
1672 	}
1673 
1674 	if (!strcmp(helpname, "")) {
1675 		if (help && help->helper) {
1676 			/* we had a helper before ... */
1677 			nf_ct_remove_expectations(ct);
1678 			RCU_INIT_POINTER(help->helper, NULL);
1679 		}
1680 
1681 		return 0;
1682 	}
1683 
1684 	rcu_read_lock();
1685 	helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1686 					    nf_ct_protonum(ct));
1687 	if (helper == NULL) {
1688 		rcu_read_unlock();
1689 		return -EOPNOTSUPP;
1690 	}
1691 
1692 	if (help) {
1693 		if (help->helper == helper) {
1694 			/* update private helper data if allowed. */
1695 			if (helper->from_nlattr)
1696 				helper->from_nlattr(helpinfo, ct);
1697 			err = 0;
1698 		} else
1699 			err = -EBUSY;
1700 	} else {
1701 		/* we cannot set a helper for an existing conntrack */
1702 		err = -EOPNOTSUPP;
1703 	}
1704 
1705 	rcu_read_unlock();
1706 	return err;
1707 }
1708 
1709 static int ctnetlink_change_timeout(struct nf_conn *ct,
1710 				    const struct nlattr * const cda[])
1711 {
1712 	u64 timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
1713 
1714 	if (timeout > INT_MAX)
1715 		timeout = INT_MAX;
1716 	ct->timeout = nfct_time_stamp + (u32)timeout;
1717 
1718 	if (test_bit(IPS_DYING_BIT, &ct->status))
1719 		return -ETIME;
1720 
1721 	return 0;
1722 }
1723 
1724 #if defined(CONFIG_NF_CONNTRACK_MARK)
1725 static void ctnetlink_change_mark(struct nf_conn *ct,
1726 				    const struct nlattr * const cda[])
1727 {
1728 	u32 mark, newmark, mask = 0;
1729 
1730 	if (cda[CTA_MARK_MASK])
1731 		mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
1732 
1733 	mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1734 	newmark = (ct->mark & mask) ^ mark;
1735 	if (newmark != ct->mark)
1736 		ct->mark = newmark;
1737 }
1738 #endif
1739 
1740 static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
1741 	[CTA_PROTOINFO_TCP]	= { .type = NLA_NESTED },
1742 	[CTA_PROTOINFO_DCCP]	= { .type = NLA_NESTED },
1743 	[CTA_PROTOINFO_SCTP]	= { .type = NLA_NESTED },
1744 };
1745 
1746 static int ctnetlink_change_protoinfo(struct nf_conn *ct,
1747 				      const struct nlattr * const cda[])
1748 {
1749 	const struct nlattr *attr = cda[CTA_PROTOINFO];
1750 	const struct nf_conntrack_l4proto *l4proto;
1751 	struct nlattr *tb[CTA_PROTOINFO_MAX+1];
1752 	int err = 0;
1753 
1754 	err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_MAX, attr,
1755 					  protoinfo_policy, NULL);
1756 	if (err < 0)
1757 		return err;
1758 
1759 	l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
1760 	if (l4proto->from_nlattr)
1761 		err = l4proto->from_nlattr(tb, ct);
1762 
1763 	return err;
1764 }
1765 
1766 static const struct nla_policy seqadj_policy[CTA_SEQADJ_MAX+1] = {
1767 	[CTA_SEQADJ_CORRECTION_POS]	= { .type = NLA_U32 },
1768 	[CTA_SEQADJ_OFFSET_BEFORE]	= { .type = NLA_U32 },
1769 	[CTA_SEQADJ_OFFSET_AFTER]	= { .type = NLA_U32 },
1770 };
1771 
1772 static int change_seq_adj(struct nf_ct_seqadj *seq,
1773 			  const struct nlattr * const attr)
1774 {
1775 	int err;
1776 	struct nlattr *cda[CTA_SEQADJ_MAX+1];
1777 
1778 	err = nla_parse_nested_deprecated(cda, CTA_SEQADJ_MAX, attr,
1779 					  seqadj_policy, NULL);
1780 	if (err < 0)
1781 		return err;
1782 
1783 	if (!cda[CTA_SEQADJ_CORRECTION_POS])
1784 		return -EINVAL;
1785 
1786 	seq->correction_pos =
1787 		ntohl(nla_get_be32(cda[CTA_SEQADJ_CORRECTION_POS]));
1788 
1789 	if (!cda[CTA_SEQADJ_OFFSET_BEFORE])
1790 		return -EINVAL;
1791 
1792 	seq->offset_before =
1793 		ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_BEFORE]));
1794 
1795 	if (!cda[CTA_SEQADJ_OFFSET_AFTER])
1796 		return -EINVAL;
1797 
1798 	seq->offset_after =
1799 		ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_AFTER]));
1800 
1801 	return 0;
1802 }
1803 
1804 static int
1805 ctnetlink_change_seq_adj(struct nf_conn *ct,
1806 			 const struct nlattr * const cda[])
1807 {
1808 	struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
1809 	int ret = 0;
1810 
1811 	if (!seqadj)
1812 		return 0;
1813 
1814 	spin_lock_bh(&ct->lock);
1815 	if (cda[CTA_SEQ_ADJ_ORIG]) {
1816 		ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_ORIGINAL],
1817 				     cda[CTA_SEQ_ADJ_ORIG]);
1818 		if (ret < 0)
1819 			goto err;
1820 
1821 		set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
1822 	}
1823 
1824 	if (cda[CTA_SEQ_ADJ_REPLY]) {
1825 		ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_REPLY],
1826 				     cda[CTA_SEQ_ADJ_REPLY]);
1827 		if (ret < 0)
1828 			goto err;
1829 
1830 		set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
1831 	}
1832 
1833 	spin_unlock_bh(&ct->lock);
1834 	return 0;
1835 err:
1836 	spin_unlock_bh(&ct->lock);
1837 	return ret;
1838 }
1839 
1840 static const struct nla_policy synproxy_policy[CTA_SYNPROXY_MAX + 1] = {
1841 	[CTA_SYNPROXY_ISN]	= { .type = NLA_U32 },
1842 	[CTA_SYNPROXY_ITS]	= { .type = NLA_U32 },
1843 	[CTA_SYNPROXY_TSOFF]	= { .type = NLA_U32 },
1844 };
1845 
1846 static int ctnetlink_change_synproxy(struct nf_conn *ct,
1847 				     const struct nlattr * const cda[])
1848 {
1849 	struct nf_conn_synproxy *synproxy = nfct_synproxy(ct);
1850 	struct nlattr *tb[CTA_SYNPROXY_MAX + 1];
1851 	int err;
1852 
1853 	if (!synproxy)
1854 		return 0;
1855 
1856 	err = nla_parse_nested_deprecated(tb, CTA_SYNPROXY_MAX,
1857 					  cda[CTA_SYNPROXY], synproxy_policy,
1858 					  NULL);
1859 	if (err < 0)
1860 		return err;
1861 
1862 	if (!tb[CTA_SYNPROXY_ISN] ||
1863 	    !tb[CTA_SYNPROXY_ITS] ||
1864 	    !tb[CTA_SYNPROXY_TSOFF])
1865 		return -EINVAL;
1866 
1867 	synproxy->isn = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ISN]));
1868 	synproxy->its = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ITS]));
1869 	synproxy->tsoff = ntohl(nla_get_be32(tb[CTA_SYNPROXY_TSOFF]));
1870 
1871 	return 0;
1872 }
1873 
1874 static int
1875 ctnetlink_attach_labels(struct nf_conn *ct, const struct nlattr * const cda[])
1876 {
1877 #ifdef CONFIG_NF_CONNTRACK_LABELS
1878 	size_t len = nla_len(cda[CTA_LABELS]);
1879 	const void *mask = cda[CTA_LABELS_MASK];
1880 
1881 	if (len & (sizeof(u32)-1)) /* must be multiple of u32 */
1882 		return -EINVAL;
1883 
1884 	if (mask) {
1885 		if (nla_len(cda[CTA_LABELS_MASK]) == 0 ||
1886 		    nla_len(cda[CTA_LABELS_MASK]) != len)
1887 			return -EINVAL;
1888 		mask = nla_data(cda[CTA_LABELS_MASK]);
1889 	}
1890 
1891 	len /= sizeof(u32);
1892 
1893 	return nf_connlabels_replace(ct, nla_data(cda[CTA_LABELS]), mask, len);
1894 #else
1895 	return -EOPNOTSUPP;
1896 #endif
1897 }
1898 
1899 static int
1900 ctnetlink_change_conntrack(struct nf_conn *ct,
1901 			   const struct nlattr * const cda[])
1902 {
1903 	int err;
1904 
1905 	/* only allow NAT changes and master assignation for new conntracks */
1906 	if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER])
1907 		return -EOPNOTSUPP;
1908 
1909 	if (cda[CTA_HELP]) {
1910 		err = ctnetlink_change_helper(ct, cda);
1911 		if (err < 0)
1912 			return err;
1913 	}
1914 
1915 	if (cda[CTA_TIMEOUT]) {
1916 		err = ctnetlink_change_timeout(ct, cda);
1917 		if (err < 0)
1918 			return err;
1919 	}
1920 
1921 	if (cda[CTA_STATUS]) {
1922 		err = ctnetlink_change_status(ct, cda);
1923 		if (err < 0)
1924 			return err;
1925 	}
1926 
1927 	if (cda[CTA_PROTOINFO]) {
1928 		err = ctnetlink_change_protoinfo(ct, cda);
1929 		if (err < 0)
1930 			return err;
1931 	}
1932 
1933 #if defined(CONFIG_NF_CONNTRACK_MARK)
1934 	if (cda[CTA_MARK])
1935 		ctnetlink_change_mark(ct, cda);
1936 #endif
1937 
1938 	if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
1939 		err = ctnetlink_change_seq_adj(ct, cda);
1940 		if (err < 0)
1941 			return err;
1942 	}
1943 
1944 	if (cda[CTA_SYNPROXY]) {
1945 		err = ctnetlink_change_synproxy(ct, cda);
1946 		if (err < 0)
1947 			return err;
1948 	}
1949 
1950 	if (cda[CTA_LABELS]) {
1951 		err = ctnetlink_attach_labels(ct, cda);
1952 		if (err < 0)
1953 			return err;
1954 	}
1955 
1956 	return 0;
1957 }
1958 
1959 static struct nf_conn *
1960 ctnetlink_create_conntrack(struct net *net,
1961 			   const struct nf_conntrack_zone *zone,
1962 			   const struct nlattr * const cda[],
1963 			   struct nf_conntrack_tuple *otuple,
1964 			   struct nf_conntrack_tuple *rtuple,
1965 			   u8 u3)
1966 {
1967 	struct nf_conn *ct;
1968 	int err = -EINVAL;
1969 	struct nf_conntrack_helper *helper;
1970 	struct nf_conn_tstamp *tstamp;
1971 	u64 timeout;
1972 
1973 	ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
1974 	if (IS_ERR(ct))
1975 		return ERR_PTR(-ENOMEM);
1976 
1977 	if (!cda[CTA_TIMEOUT])
1978 		goto err1;
1979 
1980 	timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
1981 	if (timeout > INT_MAX)
1982 		timeout = INT_MAX;
1983 	ct->timeout = (u32)timeout + nfct_time_stamp;
1984 
1985 	rcu_read_lock();
1986  	if (cda[CTA_HELP]) {
1987 		char *helpname = NULL;
1988 		struct nlattr *helpinfo = NULL;
1989 
1990 		err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1991  		if (err < 0)
1992 			goto err2;
1993 
1994 		helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1995 						    nf_ct_protonum(ct));
1996 		if (helper == NULL) {
1997 			rcu_read_unlock();
1998 #ifdef CONFIG_MODULES
1999 			if (request_module("nfct-helper-%s", helpname) < 0) {
2000 				err = -EOPNOTSUPP;
2001 				goto err1;
2002 			}
2003 
2004 			rcu_read_lock();
2005 			helper = __nf_conntrack_helper_find(helpname,
2006 							    nf_ct_l3num(ct),
2007 							    nf_ct_protonum(ct));
2008 			if (helper) {
2009 				err = -EAGAIN;
2010 				goto err2;
2011 			}
2012 			rcu_read_unlock();
2013 #endif
2014 			err = -EOPNOTSUPP;
2015 			goto err1;
2016 		} else {
2017 			struct nf_conn_help *help;
2018 
2019 			help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
2020 			if (help == NULL) {
2021 				err = -ENOMEM;
2022 				goto err2;
2023 			}
2024 			/* set private helper data if allowed. */
2025 			if (helper->from_nlattr)
2026 				helper->from_nlattr(helpinfo, ct);
2027 
2028 			/* not in hash table yet so not strictly necessary */
2029 			RCU_INIT_POINTER(help->helper, helper);
2030 		}
2031 	} else {
2032 		/* try an implicit helper assignation */
2033 		err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
2034 		if (err < 0)
2035 			goto err2;
2036 	}
2037 
2038 	err = ctnetlink_setup_nat(ct, cda);
2039 	if (err < 0)
2040 		goto err2;
2041 
2042 	nf_ct_acct_ext_add(ct, GFP_ATOMIC);
2043 	nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
2044 	nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
2045 	nf_ct_labels_ext_add(ct);
2046 	nfct_seqadj_ext_add(ct);
2047 	nfct_synproxy_ext_add(ct);
2048 
2049 	/* we must add conntrack extensions before confirmation. */
2050 	ct->status |= IPS_CONFIRMED;
2051 
2052 	if (cda[CTA_STATUS]) {
2053 		err = ctnetlink_change_status(ct, cda);
2054 		if (err < 0)
2055 			goto err2;
2056 	}
2057 
2058 	if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
2059 		err = ctnetlink_change_seq_adj(ct, cda);
2060 		if (err < 0)
2061 			goto err2;
2062 	}
2063 
2064 	memset(&ct->proto, 0, sizeof(ct->proto));
2065 	if (cda[CTA_PROTOINFO]) {
2066 		err = ctnetlink_change_protoinfo(ct, cda);
2067 		if (err < 0)
2068 			goto err2;
2069 	}
2070 
2071 	if (cda[CTA_SYNPROXY]) {
2072 		err = ctnetlink_change_synproxy(ct, cda);
2073 		if (err < 0)
2074 			goto err2;
2075 	}
2076 
2077 #if defined(CONFIG_NF_CONNTRACK_MARK)
2078 	if (cda[CTA_MARK])
2079 		ctnetlink_change_mark(ct, cda);
2080 #endif
2081 
2082 	/* setup master conntrack: this is a confirmed expectation */
2083 	if (cda[CTA_TUPLE_MASTER]) {
2084 		struct nf_conntrack_tuple master;
2085 		struct nf_conntrack_tuple_hash *master_h;
2086 		struct nf_conn *master_ct;
2087 
2088 		err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER,
2089 					    u3, NULL);
2090 		if (err < 0)
2091 			goto err2;
2092 
2093 		master_h = nf_conntrack_find_get(net, zone, &master);
2094 		if (master_h == NULL) {
2095 			err = -ENOENT;
2096 			goto err2;
2097 		}
2098 		master_ct = nf_ct_tuplehash_to_ctrack(master_h);
2099 		__set_bit(IPS_EXPECTED_BIT, &ct->status);
2100 		ct->master = master_ct;
2101 	}
2102 	tstamp = nf_conn_tstamp_find(ct);
2103 	if (tstamp)
2104 		tstamp->start = ktime_get_real_ns();
2105 
2106 	err = nf_conntrack_hash_check_insert(ct);
2107 	if (err < 0)
2108 		goto err2;
2109 
2110 	rcu_read_unlock();
2111 
2112 	return ct;
2113 
2114 err2:
2115 	rcu_read_unlock();
2116 err1:
2117 	nf_conntrack_free(ct);
2118 	return ERR_PTR(err);
2119 }
2120 
2121 static int ctnetlink_new_conntrack(struct net *net, struct sock *ctnl,
2122 				   struct sk_buff *skb,
2123 				   const struct nlmsghdr *nlh,
2124 				   const struct nlattr * const cda[],
2125 				   struct netlink_ext_ack *extack)
2126 {
2127 	struct nf_conntrack_tuple otuple, rtuple;
2128 	struct nf_conntrack_tuple_hash *h = NULL;
2129 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2130 	struct nf_conn *ct;
2131 	u_int8_t u3 = nfmsg->nfgen_family;
2132 	struct nf_conntrack_zone zone;
2133 	int err;
2134 
2135 	err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
2136 	if (err < 0)
2137 		return err;
2138 
2139 	if (cda[CTA_TUPLE_ORIG]) {
2140 		err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG,
2141 					    u3, &zone);
2142 		if (err < 0)
2143 			return err;
2144 	}
2145 
2146 	if (cda[CTA_TUPLE_REPLY]) {
2147 		err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY,
2148 					    u3, &zone);
2149 		if (err < 0)
2150 			return err;
2151 	}
2152 
2153 	if (cda[CTA_TUPLE_ORIG])
2154 		h = nf_conntrack_find_get(net, &zone, &otuple);
2155 	else if (cda[CTA_TUPLE_REPLY])
2156 		h = nf_conntrack_find_get(net, &zone, &rtuple);
2157 
2158 	if (h == NULL) {
2159 		err = -ENOENT;
2160 		if (nlh->nlmsg_flags & NLM_F_CREATE) {
2161 			enum ip_conntrack_events events;
2162 
2163 			if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
2164 				return -EINVAL;
2165 			if (otuple.dst.protonum != rtuple.dst.protonum)
2166 				return -EINVAL;
2167 
2168 			ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple,
2169 							&rtuple, u3);
2170 			if (IS_ERR(ct))
2171 				return PTR_ERR(ct);
2172 
2173 			err = 0;
2174 			if (test_bit(IPS_EXPECTED_BIT, &ct->status))
2175 				events = 1 << IPCT_RELATED;
2176 			else
2177 				events = 1 << IPCT_NEW;
2178 
2179 			if (cda[CTA_LABELS] &&
2180 			    ctnetlink_attach_labels(ct, cda) == 0)
2181 				events |= (1 << IPCT_LABEL);
2182 
2183 			nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
2184 						      (1 << IPCT_ASSURED) |
2185 						      (1 << IPCT_HELPER) |
2186 						      (1 << IPCT_PROTOINFO) |
2187 						      (1 << IPCT_SEQADJ) |
2188 						      (1 << IPCT_MARK) |
2189 						      (1 << IPCT_SYNPROXY) |
2190 						      events,
2191 						      ct, NETLINK_CB(skb).portid,
2192 						      nlmsg_report(nlh));
2193 			nf_ct_put(ct);
2194 		}
2195 
2196 		return err;
2197 	}
2198 	/* implicit 'else' */
2199 
2200 	err = -EEXIST;
2201 	ct = nf_ct_tuplehash_to_ctrack(h);
2202 	if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
2203 		err = ctnetlink_change_conntrack(ct, cda);
2204 		if (err == 0) {
2205 			nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
2206 						      (1 << IPCT_ASSURED) |
2207 						      (1 << IPCT_HELPER) |
2208 						      (1 << IPCT_LABEL) |
2209 						      (1 << IPCT_PROTOINFO) |
2210 						      (1 << IPCT_SEQADJ) |
2211 						      (1 << IPCT_MARK) |
2212 						      (1 << IPCT_SYNPROXY),
2213 						      ct, NETLINK_CB(skb).portid,
2214 						      nlmsg_report(nlh));
2215 		}
2216 	}
2217 
2218 	nf_ct_put(ct);
2219 	return err;
2220 }
2221 
2222 static int
2223 ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
2224 				__u16 cpu, const struct ip_conntrack_stat *st)
2225 {
2226 	struct nlmsghdr *nlh;
2227 	struct nfgenmsg *nfmsg;
2228 	unsigned int flags = portid ? NLM_F_MULTI : 0, event;
2229 
2230 	event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK,
2231 			      IPCTNL_MSG_CT_GET_STATS_CPU);
2232 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
2233 	if (nlh == NULL)
2234 		goto nlmsg_failure;
2235 
2236 	nfmsg = nlmsg_data(nlh);
2237 	nfmsg->nfgen_family = AF_UNSPEC;
2238 	nfmsg->version      = NFNETLINK_V0;
2239 	nfmsg->res_id	    = htons(cpu);
2240 
2241 	if (nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) ||
2242 	    nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) ||
2243 	    nla_put_be32(skb, CTA_STATS_IGNORE, htonl(st->ignore)) ||
2244 	    nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) ||
2245 	    nla_put_be32(skb, CTA_STATS_INSERT_FAILED,
2246 				htonl(st->insert_failed)) ||
2247 	    nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) ||
2248 	    nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) ||
2249 	    nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) ||
2250 	    nla_put_be32(skb, CTA_STATS_SEARCH_RESTART,
2251 				htonl(st->search_restart)))
2252 		goto nla_put_failure;
2253 
2254 	nlmsg_end(skb, nlh);
2255 	return skb->len;
2256 
2257 nla_put_failure:
2258 nlmsg_failure:
2259 	nlmsg_cancel(skb, nlh);
2260 	return -1;
2261 }
2262 
2263 static int
2264 ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
2265 {
2266 	int cpu;
2267 	struct net *net = sock_net(skb->sk);
2268 
2269 	if (cb->args[0] == nr_cpu_ids)
2270 		return 0;
2271 
2272 	for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
2273 		const struct ip_conntrack_stat *st;
2274 
2275 		if (!cpu_possible(cpu))
2276 			continue;
2277 
2278 		st = per_cpu_ptr(net->ct.stat, cpu);
2279 		if (ctnetlink_ct_stat_cpu_fill_info(skb,
2280 						    NETLINK_CB(cb->skb).portid,
2281 						    cb->nlh->nlmsg_seq,
2282 						    cpu, st) < 0)
2283 				break;
2284 	}
2285 	cb->args[0] = cpu;
2286 
2287 	return skb->len;
2288 }
2289 
2290 static int ctnetlink_stat_ct_cpu(struct net *net, struct sock *ctnl,
2291 				 struct sk_buff *skb,
2292 				 const struct nlmsghdr *nlh,
2293 				 const struct nlattr * const cda[],
2294 				 struct netlink_ext_ack *extack)
2295 {
2296 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
2297 		struct netlink_dump_control c = {
2298 			.dump = ctnetlink_ct_stat_cpu_dump,
2299 		};
2300 		return netlink_dump_start(ctnl, skb, nlh, &c);
2301 	}
2302 
2303 	return 0;
2304 }
2305 
2306 static int
2307 ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
2308 			    struct net *net)
2309 {
2310 	struct nlmsghdr *nlh;
2311 	struct nfgenmsg *nfmsg;
2312 	unsigned int flags = portid ? NLM_F_MULTI : 0, event;
2313 	unsigned int nr_conntracks = atomic_read(&net->ct.count);
2314 
2315 	event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_GET_STATS);
2316 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
2317 	if (nlh == NULL)
2318 		goto nlmsg_failure;
2319 
2320 	nfmsg = nlmsg_data(nlh);
2321 	nfmsg->nfgen_family = AF_UNSPEC;
2322 	nfmsg->version      = NFNETLINK_V0;
2323 	nfmsg->res_id	    = 0;
2324 
2325 	if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks)))
2326 		goto nla_put_failure;
2327 
2328 	if (nla_put_be32(skb, CTA_STATS_GLOBAL_MAX_ENTRIES, htonl(nf_conntrack_max)))
2329 		goto nla_put_failure;
2330 
2331 	nlmsg_end(skb, nlh);
2332 	return skb->len;
2333 
2334 nla_put_failure:
2335 nlmsg_failure:
2336 	nlmsg_cancel(skb, nlh);
2337 	return -1;
2338 }
2339 
2340 static int ctnetlink_stat_ct(struct net *net, struct sock *ctnl,
2341 			     struct sk_buff *skb, const struct nlmsghdr *nlh,
2342 			     const struct nlattr * const cda[],
2343 			     struct netlink_ext_ack *extack)
2344 {
2345 	struct sk_buff *skb2;
2346 	int err;
2347 
2348 	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2349 	if (skb2 == NULL)
2350 		return -ENOMEM;
2351 
2352 	err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).portid,
2353 					  nlh->nlmsg_seq,
2354 					  NFNL_MSG_TYPE(nlh->nlmsg_type),
2355 					  sock_net(skb->sk));
2356 	if (err <= 0)
2357 		goto free;
2358 
2359 	err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
2360 	if (err < 0)
2361 		goto out;
2362 
2363 	return 0;
2364 
2365 free:
2366 	kfree_skb(skb2);
2367 out:
2368 	/* this avoids a loop in nfnetlink. */
2369 	return err == -EAGAIN ? -ENOBUFS : err;
2370 }
2371 
2372 static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
2373 	[CTA_EXPECT_MASTER]	= { .type = NLA_NESTED },
2374 	[CTA_EXPECT_TUPLE]	= { .type = NLA_NESTED },
2375 	[CTA_EXPECT_MASK]	= { .type = NLA_NESTED },
2376 	[CTA_EXPECT_TIMEOUT]	= { .type = NLA_U32 },
2377 	[CTA_EXPECT_ID]		= { .type = NLA_U32 },
2378 	[CTA_EXPECT_HELP_NAME]	= { .type = NLA_NUL_STRING,
2379 				    .len = NF_CT_HELPER_NAME_LEN - 1 },
2380 	[CTA_EXPECT_ZONE]	= { .type = NLA_U16 },
2381 	[CTA_EXPECT_FLAGS]	= { .type = NLA_U32 },
2382 	[CTA_EXPECT_CLASS]	= { .type = NLA_U32 },
2383 	[CTA_EXPECT_NAT]	= { .type = NLA_NESTED },
2384 	[CTA_EXPECT_FN]		= { .type = NLA_NUL_STRING },
2385 };
2386 
2387 static struct nf_conntrack_expect *
2388 ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct,
2389 		       struct nf_conntrack_helper *helper,
2390 		       struct nf_conntrack_tuple *tuple,
2391 		       struct nf_conntrack_tuple *mask);
2392 
2393 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
2394 static size_t
2395 ctnetlink_glue_build_size(const struct nf_conn *ct)
2396 {
2397 	return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
2398 	       + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
2399 	       + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
2400 	       + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
2401 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
2402 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
2403 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
2404 	       + nla_total_size(0) /* CTA_PROTOINFO */
2405 	       + nla_total_size(0) /* CTA_HELP */
2406 	       + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
2407 	       + ctnetlink_secctx_size(ct)
2408 #if IS_ENABLED(CONFIG_NF_NAT)
2409 	       + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
2410 	       + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
2411 #endif
2412 #ifdef CONFIG_NF_CONNTRACK_MARK
2413 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
2414 #endif
2415 #ifdef CONFIG_NF_CONNTRACK_ZONES
2416 	       + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
2417 #endif
2418 	       + ctnetlink_proto_size(ct)
2419 	       ;
2420 }
2421 
2422 static struct nf_conn *ctnetlink_glue_get_ct(const struct sk_buff *skb,
2423 					     enum ip_conntrack_info *ctinfo)
2424 {
2425 	return nf_ct_get(skb, ctinfo);
2426 }
2427 
2428 static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
2429 {
2430 	const struct nf_conntrack_zone *zone;
2431 	struct nlattr *nest_parms;
2432 
2433 	zone = nf_ct_zone(ct);
2434 
2435 	nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG);
2436 	if (!nest_parms)
2437 		goto nla_put_failure;
2438 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
2439 		goto nla_put_failure;
2440 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
2441 				   NF_CT_ZONE_DIR_ORIG) < 0)
2442 		goto nla_put_failure;
2443 	nla_nest_end(skb, nest_parms);
2444 
2445 	nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY);
2446 	if (!nest_parms)
2447 		goto nla_put_failure;
2448 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
2449 		goto nla_put_failure;
2450 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
2451 				   NF_CT_ZONE_DIR_REPL) < 0)
2452 		goto nla_put_failure;
2453 	nla_nest_end(skb, nest_parms);
2454 
2455 	if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
2456 				   NF_CT_DEFAULT_ZONE_DIR) < 0)
2457 		goto nla_put_failure;
2458 
2459 	if (ctnetlink_dump_id(skb, ct) < 0)
2460 		goto nla_put_failure;
2461 
2462 	if (ctnetlink_dump_status(skb, ct) < 0)
2463 		goto nla_put_failure;
2464 
2465 	if (ctnetlink_dump_timeout(skb, ct) < 0)
2466 		goto nla_put_failure;
2467 
2468 	if (ctnetlink_dump_protoinfo(skb, ct) < 0)
2469 		goto nla_put_failure;
2470 
2471 	if (ctnetlink_dump_helpinfo(skb, ct) < 0)
2472 		goto nla_put_failure;
2473 
2474 #ifdef CONFIG_NF_CONNTRACK_SECMARK
2475 	if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0)
2476 		goto nla_put_failure;
2477 #endif
2478 	if (ct->master && ctnetlink_dump_master(skb, ct) < 0)
2479 		goto nla_put_failure;
2480 
2481 	if ((ct->status & IPS_SEQ_ADJUST) &&
2482 	    ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
2483 		goto nla_put_failure;
2484 
2485 	if (ctnetlink_dump_ct_synproxy(skb, ct) < 0)
2486 		goto nla_put_failure;
2487 
2488 #ifdef CONFIG_NF_CONNTRACK_MARK
2489 	if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0)
2490 		goto nla_put_failure;
2491 #endif
2492 	if (ctnetlink_dump_labels(skb, ct) < 0)
2493 		goto nla_put_failure;
2494 	return 0;
2495 
2496 nla_put_failure:
2497 	return -ENOSPC;
2498 }
2499 
2500 static int
2501 ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct,
2502 		     enum ip_conntrack_info ctinfo,
2503 		     u_int16_t ct_attr, u_int16_t ct_info_attr)
2504 {
2505 	struct nlattr *nest_parms;
2506 
2507 	nest_parms = nla_nest_start(skb, ct_attr);
2508 	if (!nest_parms)
2509 		goto nla_put_failure;
2510 
2511 	if (__ctnetlink_glue_build(skb, ct) < 0)
2512 		goto nla_put_failure;
2513 
2514 	nla_nest_end(skb, nest_parms);
2515 
2516 	if (nla_put_be32(skb, ct_info_attr, htonl(ctinfo)))
2517 		goto nla_put_failure;
2518 
2519 	return 0;
2520 
2521 nla_put_failure:
2522 	return -ENOSPC;
2523 }
2524 
2525 static int
2526 ctnetlink_update_status(struct nf_conn *ct, const struct nlattr * const cda[])
2527 {
2528 	unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS]));
2529 	unsigned long d = ct->status ^ status;
2530 
2531 	if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
2532 		/* SEEN_REPLY bit can only be set */
2533 		return -EBUSY;
2534 
2535 	if (d & IPS_ASSURED && !(status & IPS_ASSURED))
2536 		/* ASSURED bit can only be set */
2537 		return -EBUSY;
2538 
2539 	/* This check is less strict than ctnetlink_change_status()
2540 	 * because callers often flip IPS_EXPECTED bits when sending
2541 	 * an NFQA_CT attribute to the kernel.  So ignore the
2542 	 * unchangeable bits but do not error out. Also user programs
2543 	 * are allowed to clear the bits that they are allowed to change.
2544 	 */
2545 	__ctnetlink_change_status(ct, status, ~status);
2546 	return 0;
2547 }
2548 
2549 static int
2550 ctnetlink_glue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
2551 {
2552 	int err;
2553 
2554 	if (cda[CTA_TIMEOUT]) {
2555 		err = ctnetlink_change_timeout(ct, cda);
2556 		if (err < 0)
2557 			return err;
2558 	}
2559 	if (cda[CTA_STATUS]) {
2560 		err = ctnetlink_update_status(ct, cda);
2561 		if (err < 0)
2562 			return err;
2563 	}
2564 	if (cda[CTA_HELP]) {
2565 		err = ctnetlink_change_helper(ct, cda);
2566 		if (err < 0)
2567 			return err;
2568 	}
2569 	if (cda[CTA_LABELS]) {
2570 		err = ctnetlink_attach_labels(ct, cda);
2571 		if (err < 0)
2572 			return err;
2573 	}
2574 #if defined(CONFIG_NF_CONNTRACK_MARK)
2575 	if (cda[CTA_MARK]) {
2576 		ctnetlink_change_mark(ct, cda);
2577 	}
2578 #endif
2579 	return 0;
2580 }
2581 
2582 static int
2583 ctnetlink_glue_parse(const struct nlattr *attr, struct nf_conn *ct)
2584 {
2585 	struct nlattr *cda[CTA_MAX+1];
2586 	int ret;
2587 
2588 	ret = nla_parse_nested_deprecated(cda, CTA_MAX, attr, ct_nla_policy,
2589 					  NULL);
2590 	if (ret < 0)
2591 		return ret;
2592 
2593 	return ctnetlink_glue_parse_ct((const struct nlattr **)cda, ct);
2594 }
2595 
2596 static int ctnetlink_glue_exp_parse(const struct nlattr * const *cda,
2597 				    const struct nf_conn *ct,
2598 				    struct nf_conntrack_tuple *tuple,
2599 				    struct nf_conntrack_tuple *mask)
2600 {
2601 	int err;
2602 
2603 	err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE,
2604 				    nf_ct_l3num(ct), NULL);
2605 	if (err < 0)
2606 		return err;
2607 
2608 	return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK,
2609 				     nf_ct_l3num(ct), NULL);
2610 }
2611 
2612 static int
2613 ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
2614 			     u32 portid, u32 report)
2615 {
2616 	struct nlattr *cda[CTA_EXPECT_MAX+1];
2617 	struct nf_conntrack_tuple tuple, mask;
2618 	struct nf_conntrack_helper *helper = NULL;
2619 	struct nf_conntrack_expect *exp;
2620 	int err;
2621 
2622 	err = nla_parse_nested_deprecated(cda, CTA_EXPECT_MAX, attr,
2623 					  exp_nla_policy, NULL);
2624 	if (err < 0)
2625 		return err;
2626 
2627 	err = ctnetlink_glue_exp_parse((const struct nlattr * const *)cda,
2628 				       ct, &tuple, &mask);
2629 	if (err < 0)
2630 		return err;
2631 
2632 	if (cda[CTA_EXPECT_HELP_NAME]) {
2633 		const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2634 
2635 		helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
2636 						    nf_ct_protonum(ct));
2637 		if (helper == NULL)
2638 			return -EOPNOTSUPP;
2639 	}
2640 
2641 	exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct,
2642 				     helper, &tuple, &mask);
2643 	if (IS_ERR(exp))
2644 		return PTR_ERR(exp);
2645 
2646 	err = nf_ct_expect_related_report(exp, portid, report, 0);
2647 	nf_ct_expect_put(exp);
2648 	return err;
2649 }
2650 
2651 static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct,
2652 				  enum ip_conntrack_info ctinfo, int diff)
2653 {
2654 	if (!(ct->status & IPS_NAT_MASK))
2655 		return;
2656 
2657 	nf_ct_tcp_seqadj_set(skb, ct, ctinfo, diff);
2658 }
2659 
2660 static struct nfnl_ct_hook ctnetlink_glue_hook = {
2661 	.get_ct		= ctnetlink_glue_get_ct,
2662 	.build_size	= ctnetlink_glue_build_size,
2663 	.build		= ctnetlink_glue_build,
2664 	.parse		= ctnetlink_glue_parse,
2665 	.attach_expect	= ctnetlink_glue_attach_expect,
2666 	.seq_adjust	= ctnetlink_glue_seqadj,
2667 };
2668 #endif /* CONFIG_NETFILTER_NETLINK_GLUE_CT */
2669 
2670 /***********************************************************************
2671  * EXPECT
2672  ***********************************************************************/
2673 
2674 static int ctnetlink_exp_dump_tuple(struct sk_buff *skb,
2675 				    const struct nf_conntrack_tuple *tuple,
2676 				    u32 type)
2677 {
2678 	struct nlattr *nest_parms;
2679 
2680 	nest_parms = nla_nest_start(skb, type);
2681 	if (!nest_parms)
2682 		goto nla_put_failure;
2683 	if (ctnetlink_dump_tuples(skb, tuple) < 0)
2684 		goto nla_put_failure;
2685 	nla_nest_end(skb, nest_parms);
2686 
2687 	return 0;
2688 
2689 nla_put_failure:
2690 	return -1;
2691 }
2692 
2693 static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
2694 				   const struct nf_conntrack_tuple *tuple,
2695 				   const struct nf_conntrack_tuple_mask *mask)
2696 {
2697 	const struct nf_conntrack_l4proto *l4proto;
2698 	struct nf_conntrack_tuple m;
2699 	struct nlattr *nest_parms;
2700 	int ret;
2701 
2702 	memset(&m, 0xFF, sizeof(m));
2703 	memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
2704 	m.src.u.all = mask->src.u.all;
2705 	m.dst.protonum = tuple->dst.protonum;
2706 
2707 	nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK);
2708 	if (!nest_parms)
2709 		goto nla_put_failure;
2710 
2711 	rcu_read_lock();
2712 	ret = ctnetlink_dump_tuples_ip(skb, &m);
2713 	if (ret >= 0) {
2714 		l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
2715 		ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
2716 	}
2717 	rcu_read_unlock();
2718 
2719 	if (unlikely(ret < 0))
2720 		goto nla_put_failure;
2721 
2722 	nla_nest_end(skb, nest_parms);
2723 
2724 	return 0;
2725 
2726 nla_put_failure:
2727 	return -1;
2728 }
2729 
2730 static const union nf_inet_addr any_addr;
2731 
2732 static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp)
2733 {
2734 	static __read_mostly siphash_key_t exp_id_seed;
2735 	unsigned long a, b, c, d;
2736 
2737 	net_get_random_once(&exp_id_seed, sizeof(exp_id_seed));
2738 
2739 	a = (unsigned long)exp;
2740 	b = (unsigned long)exp->helper;
2741 	c = (unsigned long)exp->master;
2742 	d = (unsigned long)siphash(&exp->tuple, sizeof(exp->tuple), &exp_id_seed);
2743 
2744 #ifdef CONFIG_64BIT
2745 	return (__force __be32)siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &exp_id_seed);
2746 #else
2747 	return (__force __be32)siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &exp_id_seed);
2748 #endif
2749 }
2750 
2751 static int
2752 ctnetlink_exp_dump_expect(struct sk_buff *skb,
2753 			  const struct nf_conntrack_expect *exp)
2754 {
2755 	struct nf_conn *master = exp->master;
2756 	long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
2757 	struct nf_conn_help *help;
2758 #if IS_ENABLED(CONFIG_NF_NAT)
2759 	struct nlattr *nest_parms;
2760 	struct nf_conntrack_tuple nat_tuple = {};
2761 #endif
2762 	struct nf_ct_helper_expectfn *expfn;
2763 
2764 	if (timeout < 0)
2765 		timeout = 0;
2766 
2767 	if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0)
2768 		goto nla_put_failure;
2769 	if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0)
2770 		goto nla_put_failure;
2771 	if (ctnetlink_exp_dump_tuple(skb,
2772 				 &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
2773 				 CTA_EXPECT_MASTER) < 0)
2774 		goto nla_put_failure;
2775 
2776 #if IS_ENABLED(CONFIG_NF_NAT)
2777 	if (!nf_inet_addr_cmp(&exp->saved_addr, &any_addr) ||
2778 	    exp->saved_proto.all) {
2779 		nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT);
2780 		if (!nest_parms)
2781 			goto nla_put_failure;
2782 
2783 		if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)))
2784 			goto nla_put_failure;
2785 
2786 		nat_tuple.src.l3num = nf_ct_l3num(master);
2787 		nat_tuple.src.u3 = exp->saved_addr;
2788 		nat_tuple.dst.protonum = nf_ct_protonum(master);
2789 		nat_tuple.src.u = exp->saved_proto;
2790 
2791 		if (ctnetlink_exp_dump_tuple(skb, &nat_tuple,
2792 						CTA_EXPECT_NAT_TUPLE) < 0)
2793 	                goto nla_put_failure;
2794 	        nla_nest_end(skb, nest_parms);
2795 	}
2796 #endif
2797 	if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
2798 	    nla_put_be32(skb, CTA_EXPECT_ID, nf_expect_get_id(exp)) ||
2799 	    nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
2800 	    nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
2801 		goto nla_put_failure;
2802 	help = nfct_help(master);
2803 	if (help) {
2804 		struct nf_conntrack_helper *helper;
2805 
2806 		helper = rcu_dereference(help->helper);
2807 		if (helper &&
2808 		    nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name))
2809 			goto nla_put_failure;
2810 	}
2811 	expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
2812 	if (expfn != NULL &&
2813 	    nla_put_string(skb, CTA_EXPECT_FN, expfn->name))
2814 		goto nla_put_failure;
2815 
2816 	return 0;
2817 
2818 nla_put_failure:
2819 	return -1;
2820 }
2821 
2822 static int
2823 ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
2824 			int event, const struct nf_conntrack_expect *exp)
2825 {
2826 	struct nlmsghdr *nlh;
2827 	struct nfgenmsg *nfmsg;
2828 	unsigned int flags = portid ? NLM_F_MULTI : 0;
2829 
2830 	event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, event);
2831 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
2832 	if (nlh == NULL)
2833 		goto nlmsg_failure;
2834 
2835 	nfmsg = nlmsg_data(nlh);
2836 	nfmsg->nfgen_family = exp->tuple.src.l3num;
2837 	nfmsg->version	    = NFNETLINK_V0;
2838 	nfmsg->res_id	    = 0;
2839 
2840 	if (ctnetlink_exp_dump_expect(skb, exp) < 0)
2841 		goto nla_put_failure;
2842 
2843 	nlmsg_end(skb, nlh);
2844 	return skb->len;
2845 
2846 nlmsg_failure:
2847 nla_put_failure:
2848 	nlmsg_cancel(skb, nlh);
2849 	return -1;
2850 }
2851 
2852 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2853 static int
2854 ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
2855 {
2856 	struct nf_conntrack_expect *exp = item->exp;
2857 	struct net *net = nf_ct_exp_net(exp);
2858 	struct nlmsghdr *nlh;
2859 	struct nfgenmsg *nfmsg;
2860 	struct sk_buff *skb;
2861 	unsigned int type, group;
2862 	int flags = 0;
2863 
2864 	if (events & (1 << IPEXP_DESTROY)) {
2865 		type = IPCTNL_MSG_EXP_DELETE;
2866 		group = NFNLGRP_CONNTRACK_EXP_DESTROY;
2867 	} else if (events & (1 << IPEXP_NEW)) {
2868 		type = IPCTNL_MSG_EXP_NEW;
2869 		flags = NLM_F_CREATE|NLM_F_EXCL;
2870 		group = NFNLGRP_CONNTRACK_EXP_NEW;
2871 	} else
2872 		return 0;
2873 
2874 	if (!item->report && !nfnetlink_has_listeners(net, group))
2875 		return 0;
2876 
2877 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
2878 	if (skb == NULL)
2879 		goto errout;
2880 
2881 	type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, type);
2882 	nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
2883 	if (nlh == NULL)
2884 		goto nlmsg_failure;
2885 
2886 	nfmsg = nlmsg_data(nlh);
2887 	nfmsg->nfgen_family = exp->tuple.src.l3num;
2888 	nfmsg->version	    = NFNETLINK_V0;
2889 	nfmsg->res_id	    = 0;
2890 
2891 	if (ctnetlink_exp_dump_expect(skb, exp) < 0)
2892 		goto nla_put_failure;
2893 
2894 	nlmsg_end(skb, nlh);
2895 	nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC);
2896 	return 0;
2897 
2898 nla_put_failure:
2899 	nlmsg_cancel(skb, nlh);
2900 nlmsg_failure:
2901 	kfree_skb(skb);
2902 errout:
2903 	nfnetlink_set_err(net, 0, 0, -ENOBUFS);
2904 	return 0;
2905 }
2906 #endif
2907 static int ctnetlink_exp_done(struct netlink_callback *cb)
2908 {
2909 	if (cb->args[1])
2910 		nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]);
2911 	return 0;
2912 }
2913 
2914 static int
2915 ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2916 {
2917 	struct net *net = sock_net(skb->sk);
2918 	struct nf_conntrack_expect *exp, *last;
2919 	struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
2920 	u_int8_t l3proto = nfmsg->nfgen_family;
2921 
2922 	rcu_read_lock();
2923 	last = (struct nf_conntrack_expect *)cb->args[1];
2924 	for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
2925 restart:
2926 		hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]],
2927 					 hnode) {
2928 			if (l3proto && exp->tuple.src.l3num != l3proto)
2929 				continue;
2930 
2931 			if (!net_eq(nf_ct_net(exp->master), net))
2932 				continue;
2933 
2934 			if (cb->args[1]) {
2935 				if (exp != last)
2936 					continue;
2937 				cb->args[1] = 0;
2938 			}
2939 			if (ctnetlink_exp_fill_info(skb,
2940 						    NETLINK_CB(cb->skb).portid,
2941 						    cb->nlh->nlmsg_seq,
2942 						    IPCTNL_MSG_EXP_NEW,
2943 						    exp) < 0) {
2944 				if (!refcount_inc_not_zero(&exp->use))
2945 					continue;
2946 				cb->args[1] = (unsigned long)exp;
2947 				goto out;
2948 			}
2949 		}
2950 		if (cb->args[1]) {
2951 			cb->args[1] = 0;
2952 			goto restart;
2953 		}
2954 	}
2955 out:
2956 	rcu_read_unlock();
2957 	if (last)
2958 		nf_ct_expect_put(last);
2959 
2960 	return skb->len;
2961 }
2962 
2963 static int
2964 ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2965 {
2966 	struct nf_conntrack_expect *exp, *last;
2967 	struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
2968 	struct nf_conn *ct = cb->data;
2969 	struct nf_conn_help *help = nfct_help(ct);
2970 	u_int8_t l3proto = nfmsg->nfgen_family;
2971 
2972 	if (cb->args[0])
2973 		return 0;
2974 
2975 	rcu_read_lock();
2976 	last = (struct nf_conntrack_expect *)cb->args[1];
2977 restart:
2978 	hlist_for_each_entry_rcu(exp, &help->expectations, lnode) {
2979 		if (l3proto && exp->tuple.src.l3num != l3proto)
2980 			continue;
2981 		if (cb->args[1]) {
2982 			if (exp != last)
2983 				continue;
2984 			cb->args[1] = 0;
2985 		}
2986 		if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).portid,
2987 					    cb->nlh->nlmsg_seq,
2988 					    IPCTNL_MSG_EXP_NEW,
2989 					    exp) < 0) {
2990 			if (!refcount_inc_not_zero(&exp->use))
2991 				continue;
2992 			cb->args[1] = (unsigned long)exp;
2993 			goto out;
2994 		}
2995 	}
2996 	if (cb->args[1]) {
2997 		cb->args[1] = 0;
2998 		goto restart;
2999 	}
3000 	cb->args[0] = 1;
3001 out:
3002 	rcu_read_unlock();
3003 	if (last)
3004 		nf_ct_expect_put(last);
3005 
3006 	return skb->len;
3007 }
3008 
3009 static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl,
3010 				 struct sk_buff *skb,
3011 				 const struct nlmsghdr *nlh,
3012 				 const struct nlattr * const cda[],
3013 				 struct netlink_ext_ack *extack)
3014 {
3015 	int err;
3016 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3017 	u_int8_t u3 = nfmsg->nfgen_family;
3018 	struct nf_conntrack_tuple tuple;
3019 	struct nf_conntrack_tuple_hash *h;
3020 	struct nf_conn *ct;
3021 	struct nf_conntrack_zone zone;
3022 	struct netlink_dump_control c = {
3023 		.dump = ctnetlink_exp_ct_dump_table,
3024 		.done = ctnetlink_exp_done,
3025 	};
3026 
3027 	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
3028 				    u3, NULL);
3029 	if (err < 0)
3030 		return err;
3031 
3032 	err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
3033 	if (err < 0)
3034 		return err;
3035 
3036 	h = nf_conntrack_find_get(net, &zone, &tuple);
3037 	if (!h)
3038 		return -ENOENT;
3039 
3040 	ct = nf_ct_tuplehash_to_ctrack(h);
3041 	/* No expectation linked to this connection tracking. */
3042 	if (!nfct_help(ct)) {
3043 		nf_ct_put(ct);
3044 		return 0;
3045 	}
3046 
3047 	c.data = ct;
3048 
3049 	err = netlink_dump_start(ctnl, skb, nlh, &c);
3050 	nf_ct_put(ct);
3051 
3052 	return err;
3053 }
3054 
3055 static int ctnetlink_get_expect(struct net *net, struct sock *ctnl,
3056 				struct sk_buff *skb, const struct nlmsghdr *nlh,
3057 				const struct nlattr * const cda[],
3058 				struct netlink_ext_ack *extack)
3059 {
3060 	struct nf_conntrack_tuple tuple;
3061 	struct nf_conntrack_expect *exp;
3062 	struct sk_buff *skb2;
3063 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3064 	u_int8_t u3 = nfmsg->nfgen_family;
3065 	struct nf_conntrack_zone zone;
3066 	int err;
3067 
3068 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
3069 		if (cda[CTA_EXPECT_MASTER])
3070 			return ctnetlink_dump_exp_ct(net, ctnl, skb, nlh, cda,
3071 						     extack);
3072 		else {
3073 			struct netlink_dump_control c = {
3074 				.dump = ctnetlink_exp_dump_table,
3075 				.done = ctnetlink_exp_done,
3076 			};
3077 			return netlink_dump_start(ctnl, skb, nlh, &c);
3078 		}
3079 	}
3080 
3081 	err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
3082 	if (err < 0)
3083 		return err;
3084 
3085 	if (cda[CTA_EXPECT_TUPLE])
3086 		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
3087 					    u3, NULL);
3088 	else if (cda[CTA_EXPECT_MASTER])
3089 		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
3090 					    u3, NULL);
3091 	else
3092 		return -EINVAL;
3093 
3094 	if (err < 0)
3095 		return err;
3096 
3097 	exp = nf_ct_expect_find_get(net, &zone, &tuple);
3098 	if (!exp)
3099 		return -ENOENT;
3100 
3101 	if (cda[CTA_EXPECT_ID]) {
3102 		__be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
3103 
3104 		if (id != nf_expect_get_id(exp)) {
3105 			nf_ct_expect_put(exp);
3106 			return -ENOENT;
3107 		}
3108 	}
3109 
3110 	err = -ENOMEM;
3111 	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3112 	if (skb2 == NULL) {
3113 		nf_ct_expect_put(exp);
3114 		goto out;
3115 	}
3116 
3117 	rcu_read_lock();
3118 	err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).portid,
3119 				      nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
3120 	rcu_read_unlock();
3121 	nf_ct_expect_put(exp);
3122 	if (err <= 0)
3123 		goto free;
3124 
3125 	err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
3126 	if (err < 0)
3127 		goto out;
3128 
3129 	return 0;
3130 
3131 free:
3132 	kfree_skb(skb2);
3133 out:
3134 	/* this avoids a loop in nfnetlink. */
3135 	return err == -EAGAIN ? -ENOBUFS : err;
3136 }
3137 
3138 static bool expect_iter_name(struct nf_conntrack_expect *exp, void *data)
3139 {
3140 	const struct nf_conn_help *m_help;
3141 	const char *name = data;
3142 
3143 	m_help = nfct_help(exp->master);
3144 
3145 	return strcmp(m_help->helper->name, name) == 0;
3146 }
3147 
3148 static bool expect_iter_all(struct nf_conntrack_expect *exp, void *data)
3149 {
3150 	return true;
3151 }
3152 
3153 static int ctnetlink_del_expect(struct net *net, struct sock *ctnl,
3154 				struct sk_buff *skb, const struct nlmsghdr *nlh,
3155 				const struct nlattr * const cda[],
3156 				struct netlink_ext_ack *extack)
3157 {
3158 	struct nf_conntrack_expect *exp;
3159 	struct nf_conntrack_tuple tuple;
3160 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3161 	u_int8_t u3 = nfmsg->nfgen_family;
3162 	struct nf_conntrack_zone zone;
3163 	int err;
3164 
3165 	if (cda[CTA_EXPECT_TUPLE]) {
3166 		/* delete a single expect by tuple */
3167 		err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
3168 		if (err < 0)
3169 			return err;
3170 
3171 		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
3172 					    u3, NULL);
3173 		if (err < 0)
3174 			return err;
3175 
3176 		/* bump usage count to 2 */
3177 		exp = nf_ct_expect_find_get(net, &zone, &tuple);
3178 		if (!exp)
3179 			return -ENOENT;
3180 
3181 		if (cda[CTA_EXPECT_ID]) {
3182 			__be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
3183 			if (ntohl(id) != (u32)(unsigned long)exp) {
3184 				nf_ct_expect_put(exp);
3185 				return -ENOENT;
3186 			}
3187 		}
3188 
3189 		/* after list removal, usage count == 1 */
3190 		spin_lock_bh(&nf_conntrack_expect_lock);
3191 		if (del_timer(&exp->timeout)) {
3192 			nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid,
3193 						   nlmsg_report(nlh));
3194 			nf_ct_expect_put(exp);
3195 		}
3196 		spin_unlock_bh(&nf_conntrack_expect_lock);
3197 		/* have to put what we 'get' above.
3198 		 * after this line usage count == 0 */
3199 		nf_ct_expect_put(exp);
3200 	} else if (cda[CTA_EXPECT_HELP_NAME]) {
3201 		char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]);
3202 
3203 		nf_ct_expect_iterate_net(net, expect_iter_name, name,
3204 					 NETLINK_CB(skb).portid,
3205 					 nlmsg_report(nlh));
3206 	} else {
3207 		/* This basically means we have to flush everything*/
3208 		nf_ct_expect_iterate_net(net, expect_iter_all, NULL,
3209 					 NETLINK_CB(skb).portid,
3210 					 nlmsg_report(nlh));
3211 	}
3212 
3213 	return 0;
3214 }
3215 static int
3216 ctnetlink_change_expect(struct nf_conntrack_expect *x,
3217 			const struct nlattr * const cda[])
3218 {
3219 	if (cda[CTA_EXPECT_TIMEOUT]) {
3220 		if (!del_timer(&x->timeout))
3221 			return -ETIME;
3222 
3223 		x->timeout.expires = jiffies +
3224 			ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
3225 		add_timer(&x->timeout);
3226 	}
3227 	return 0;
3228 }
3229 
3230 static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
3231 	[CTA_EXPECT_NAT_DIR]	= { .type = NLA_U32 },
3232 	[CTA_EXPECT_NAT_TUPLE]	= { .type = NLA_NESTED },
3233 };
3234 
3235 static int
3236 ctnetlink_parse_expect_nat(const struct nlattr *attr,
3237 			   struct nf_conntrack_expect *exp,
3238 			   u_int8_t u3)
3239 {
3240 #if IS_ENABLED(CONFIG_NF_NAT)
3241 	struct nlattr *tb[CTA_EXPECT_NAT_MAX+1];
3242 	struct nf_conntrack_tuple nat_tuple = {};
3243 	int err;
3244 
3245 	err = nla_parse_nested_deprecated(tb, CTA_EXPECT_NAT_MAX, attr,
3246 					  exp_nat_nla_policy, NULL);
3247 	if (err < 0)
3248 		return err;
3249 
3250 	if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE])
3251 		return -EINVAL;
3252 
3253 	err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
3254 				    &nat_tuple, CTA_EXPECT_NAT_TUPLE,
3255 				    u3, NULL);
3256 	if (err < 0)
3257 		return err;
3258 
3259 	exp->saved_addr = nat_tuple.src.u3;
3260 	exp->saved_proto = nat_tuple.src.u;
3261 	exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
3262 
3263 	return 0;
3264 #else
3265 	return -EOPNOTSUPP;
3266 #endif
3267 }
3268 
3269 static struct nf_conntrack_expect *
3270 ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
3271 		       struct nf_conntrack_helper *helper,
3272 		       struct nf_conntrack_tuple *tuple,
3273 		       struct nf_conntrack_tuple *mask)
3274 {
3275 	u_int32_t class = 0;
3276 	struct nf_conntrack_expect *exp;
3277 	struct nf_conn_help *help;
3278 	int err;
3279 
3280 	help = nfct_help(ct);
3281 	if (!help)
3282 		return ERR_PTR(-EOPNOTSUPP);
3283 
3284 	if (cda[CTA_EXPECT_CLASS] && helper) {
3285 		class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
3286 		if (class > helper->expect_class_max)
3287 			return ERR_PTR(-EINVAL);
3288 	}
3289 	exp = nf_ct_expect_alloc(ct);
3290 	if (!exp)
3291 		return ERR_PTR(-ENOMEM);
3292 
3293 	if (cda[CTA_EXPECT_FLAGS]) {
3294 		exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
3295 		exp->flags &= ~NF_CT_EXPECT_USERSPACE;
3296 	} else {
3297 		exp->flags = 0;
3298 	}
3299 	if (cda[CTA_EXPECT_FN]) {
3300 		const char *name = nla_data(cda[CTA_EXPECT_FN]);
3301 		struct nf_ct_helper_expectfn *expfn;
3302 
3303 		expfn = nf_ct_helper_expectfn_find_by_name(name);
3304 		if (expfn == NULL) {
3305 			err = -EINVAL;
3306 			goto err_out;
3307 		}
3308 		exp->expectfn = expfn->expectfn;
3309 	} else
3310 		exp->expectfn = NULL;
3311 
3312 	exp->class = class;
3313 	exp->master = ct;
3314 	exp->helper = helper;
3315 	exp->tuple = *tuple;
3316 	exp->mask.src.u3 = mask->src.u3;
3317 	exp->mask.src.u.all = mask->src.u.all;
3318 
3319 	if (cda[CTA_EXPECT_NAT]) {
3320 		err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
3321 						 exp, nf_ct_l3num(ct));
3322 		if (err < 0)
3323 			goto err_out;
3324 	}
3325 	return exp;
3326 err_out:
3327 	nf_ct_expect_put(exp);
3328 	return ERR_PTR(err);
3329 }
3330 
3331 static int
3332 ctnetlink_create_expect(struct net *net,
3333 			const struct nf_conntrack_zone *zone,
3334 			const struct nlattr * const cda[],
3335 			u_int8_t u3, u32 portid, int report)
3336 {
3337 	struct nf_conntrack_tuple tuple, mask, master_tuple;
3338 	struct nf_conntrack_tuple_hash *h = NULL;
3339 	struct nf_conntrack_helper *helper = NULL;
3340 	struct nf_conntrack_expect *exp;
3341 	struct nf_conn *ct;
3342 	int err;
3343 
3344 	/* caller guarantees that those three CTA_EXPECT_* exist */
3345 	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
3346 				    u3, NULL);
3347 	if (err < 0)
3348 		return err;
3349 	err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK,
3350 				    u3, NULL);
3351 	if (err < 0)
3352 		return err;
3353 	err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER,
3354 				    u3, NULL);
3355 	if (err < 0)
3356 		return err;
3357 
3358 	/* Look for master conntrack of this expectation */
3359 	h = nf_conntrack_find_get(net, zone, &master_tuple);
3360 	if (!h)
3361 		return -ENOENT;
3362 	ct = nf_ct_tuplehash_to_ctrack(h);
3363 
3364 	rcu_read_lock();
3365 	if (cda[CTA_EXPECT_HELP_NAME]) {
3366 		const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
3367 
3368 		helper = __nf_conntrack_helper_find(helpname, u3,
3369 						    nf_ct_protonum(ct));
3370 		if (helper == NULL) {
3371 			rcu_read_unlock();
3372 #ifdef CONFIG_MODULES
3373 			if (request_module("nfct-helper-%s", helpname) < 0) {
3374 				err = -EOPNOTSUPP;
3375 				goto err_ct;
3376 			}
3377 			rcu_read_lock();
3378 			helper = __nf_conntrack_helper_find(helpname, u3,
3379 							    nf_ct_protonum(ct));
3380 			if (helper) {
3381 				err = -EAGAIN;
3382 				goto err_rcu;
3383 			}
3384 			rcu_read_unlock();
3385 #endif
3386 			err = -EOPNOTSUPP;
3387 			goto err_ct;
3388 		}
3389 	}
3390 
3391 	exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
3392 	if (IS_ERR(exp)) {
3393 		err = PTR_ERR(exp);
3394 		goto err_rcu;
3395 	}
3396 
3397 	err = nf_ct_expect_related_report(exp, portid, report, 0);
3398 	nf_ct_expect_put(exp);
3399 err_rcu:
3400 	rcu_read_unlock();
3401 err_ct:
3402 	nf_ct_put(ct);
3403 	return err;
3404 }
3405 
3406 static int ctnetlink_new_expect(struct net *net, struct sock *ctnl,
3407 				struct sk_buff *skb, const struct nlmsghdr *nlh,
3408 				const struct nlattr * const cda[],
3409 				struct netlink_ext_ack *extack)
3410 {
3411 	struct nf_conntrack_tuple tuple;
3412 	struct nf_conntrack_expect *exp;
3413 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3414 	u_int8_t u3 = nfmsg->nfgen_family;
3415 	struct nf_conntrack_zone zone;
3416 	int err;
3417 
3418 	if (!cda[CTA_EXPECT_TUPLE]
3419 	    || !cda[CTA_EXPECT_MASK]
3420 	    || !cda[CTA_EXPECT_MASTER])
3421 		return -EINVAL;
3422 
3423 	err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
3424 	if (err < 0)
3425 		return err;
3426 
3427 	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
3428 				    u3, NULL);
3429 	if (err < 0)
3430 		return err;
3431 
3432 	spin_lock_bh(&nf_conntrack_expect_lock);
3433 	exp = __nf_ct_expect_find(net, &zone, &tuple);
3434 	if (!exp) {
3435 		spin_unlock_bh(&nf_conntrack_expect_lock);
3436 		err = -ENOENT;
3437 		if (nlh->nlmsg_flags & NLM_F_CREATE) {
3438 			err = ctnetlink_create_expect(net, &zone, cda, u3,
3439 						      NETLINK_CB(skb).portid,
3440 						      nlmsg_report(nlh));
3441 		}
3442 		return err;
3443 	}
3444 
3445 	err = -EEXIST;
3446 	if (!(nlh->nlmsg_flags & NLM_F_EXCL))
3447 		err = ctnetlink_change_expect(exp, cda);
3448 	spin_unlock_bh(&nf_conntrack_expect_lock);
3449 
3450 	return err;
3451 }
3452 
3453 static int
3454 ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu,
3455 			     const struct ip_conntrack_stat *st)
3456 {
3457 	struct nlmsghdr *nlh;
3458 	struct nfgenmsg *nfmsg;
3459 	unsigned int flags = portid ? NLM_F_MULTI : 0, event;
3460 
3461 	event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK,
3462 			      IPCTNL_MSG_EXP_GET_STATS_CPU);
3463 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
3464 	if (nlh == NULL)
3465 		goto nlmsg_failure;
3466 
3467 	nfmsg = nlmsg_data(nlh);
3468 	nfmsg->nfgen_family = AF_UNSPEC;
3469 	nfmsg->version      = NFNETLINK_V0;
3470 	nfmsg->res_id	    = htons(cpu);
3471 
3472 	if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) ||
3473 	    nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) ||
3474 	    nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete)))
3475 		goto nla_put_failure;
3476 
3477 	nlmsg_end(skb, nlh);
3478 	return skb->len;
3479 
3480 nla_put_failure:
3481 nlmsg_failure:
3482 	nlmsg_cancel(skb, nlh);
3483 	return -1;
3484 }
3485 
3486 static int
3487 ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
3488 {
3489 	int cpu;
3490 	struct net *net = sock_net(skb->sk);
3491 
3492 	if (cb->args[0] == nr_cpu_ids)
3493 		return 0;
3494 
3495 	for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
3496 		const struct ip_conntrack_stat *st;
3497 
3498 		if (!cpu_possible(cpu))
3499 			continue;
3500 
3501 		st = per_cpu_ptr(net->ct.stat, cpu);
3502 		if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).portid,
3503 						 cb->nlh->nlmsg_seq,
3504 						 cpu, st) < 0)
3505 			break;
3506 	}
3507 	cb->args[0] = cpu;
3508 
3509 	return skb->len;
3510 }
3511 
3512 static int ctnetlink_stat_exp_cpu(struct net *net, struct sock *ctnl,
3513 				  struct sk_buff *skb,
3514 				  const struct nlmsghdr *nlh,
3515 				  const struct nlattr * const cda[],
3516 				  struct netlink_ext_ack *extack)
3517 {
3518 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
3519 		struct netlink_dump_control c = {
3520 			.dump = ctnetlink_exp_stat_cpu_dump,
3521 		};
3522 		return netlink_dump_start(ctnl, skb, nlh, &c);
3523 	}
3524 
3525 	return 0;
3526 }
3527 
3528 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3529 static struct nf_ct_event_notifier ctnl_notifier = {
3530 	.fcn = ctnetlink_conntrack_event,
3531 };
3532 
3533 static struct nf_exp_event_notifier ctnl_notifier_exp = {
3534 	.fcn = ctnetlink_expect_event,
3535 };
3536 #endif
3537 
3538 static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
3539 	[IPCTNL_MSG_CT_NEW]		= { .call = ctnetlink_new_conntrack,
3540 					    .attr_count = CTA_MAX,
3541 					    .policy = ct_nla_policy },
3542 	[IPCTNL_MSG_CT_GET] 		= { .call = ctnetlink_get_conntrack,
3543 					    .attr_count = CTA_MAX,
3544 					    .policy = ct_nla_policy },
3545 	[IPCTNL_MSG_CT_DELETE]  	= { .call = ctnetlink_del_conntrack,
3546 					    .attr_count = CTA_MAX,
3547 					    .policy = ct_nla_policy },
3548 	[IPCTNL_MSG_CT_GET_CTRZERO] 	= { .call = ctnetlink_get_conntrack,
3549 					    .attr_count = CTA_MAX,
3550 					    .policy = ct_nla_policy },
3551 	[IPCTNL_MSG_CT_GET_STATS_CPU]	= { .call = ctnetlink_stat_ct_cpu },
3552 	[IPCTNL_MSG_CT_GET_STATS]	= { .call = ctnetlink_stat_ct },
3553 	[IPCTNL_MSG_CT_GET_DYING]	= { .call = ctnetlink_get_ct_dying },
3554 	[IPCTNL_MSG_CT_GET_UNCONFIRMED]	= { .call = ctnetlink_get_ct_unconfirmed },
3555 };
3556 
3557 static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
3558 	[IPCTNL_MSG_EXP_GET]		= { .call = ctnetlink_get_expect,
3559 					    .attr_count = CTA_EXPECT_MAX,
3560 					    .policy = exp_nla_policy },
3561 	[IPCTNL_MSG_EXP_NEW]		= { .call = ctnetlink_new_expect,
3562 					    .attr_count = CTA_EXPECT_MAX,
3563 					    .policy = exp_nla_policy },
3564 	[IPCTNL_MSG_EXP_DELETE]		= { .call = ctnetlink_del_expect,
3565 					    .attr_count = CTA_EXPECT_MAX,
3566 					    .policy = exp_nla_policy },
3567 	[IPCTNL_MSG_EXP_GET_STATS_CPU]	= { .call = ctnetlink_stat_exp_cpu },
3568 };
3569 
3570 static const struct nfnetlink_subsystem ctnl_subsys = {
3571 	.name				= "conntrack",
3572 	.subsys_id			= NFNL_SUBSYS_CTNETLINK,
3573 	.cb_count			= IPCTNL_MSG_MAX,
3574 	.cb				= ctnl_cb,
3575 };
3576 
3577 static const struct nfnetlink_subsystem ctnl_exp_subsys = {
3578 	.name				= "conntrack_expect",
3579 	.subsys_id			= NFNL_SUBSYS_CTNETLINK_EXP,
3580 	.cb_count			= IPCTNL_MSG_EXP_MAX,
3581 	.cb				= ctnl_exp_cb,
3582 };
3583 
3584 MODULE_ALIAS("ip_conntrack_netlink");
3585 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
3586 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
3587 
3588 static int __net_init ctnetlink_net_init(struct net *net)
3589 {
3590 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3591 	int ret;
3592 
3593 	ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
3594 	if (ret < 0) {
3595 		pr_err("ctnetlink_init: cannot register notifier.\n");
3596 		goto err_out;
3597 	}
3598 
3599 	ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
3600 	if (ret < 0) {
3601 		pr_err("ctnetlink_init: cannot expect register notifier.\n");
3602 		goto err_unreg_notifier;
3603 	}
3604 #endif
3605 	return 0;
3606 
3607 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3608 err_unreg_notifier:
3609 	nf_conntrack_unregister_notifier(net, &ctnl_notifier);
3610 err_out:
3611 	return ret;
3612 #endif
3613 }
3614 
3615 static void ctnetlink_net_exit(struct net *net)
3616 {
3617 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3618 	nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
3619 	nf_conntrack_unregister_notifier(net, &ctnl_notifier);
3620 #endif
3621 }
3622 
3623 static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
3624 {
3625 	struct net *net;
3626 
3627 	list_for_each_entry(net, net_exit_list, exit_list)
3628 		ctnetlink_net_exit(net);
3629 
3630 	/* wait for other cpus until they are done with ctnl_notifiers */
3631 	synchronize_rcu();
3632 }
3633 
3634 static struct pernet_operations ctnetlink_net_ops = {
3635 	.init		= ctnetlink_net_init,
3636 	.exit_batch	= ctnetlink_net_exit_batch,
3637 };
3638 
3639 static int __init ctnetlink_init(void)
3640 {
3641 	int ret;
3642 
3643 	ret = nfnetlink_subsys_register(&ctnl_subsys);
3644 	if (ret < 0) {
3645 		pr_err("ctnetlink_init: cannot register with nfnetlink.\n");
3646 		goto err_out;
3647 	}
3648 
3649 	ret = nfnetlink_subsys_register(&ctnl_exp_subsys);
3650 	if (ret < 0) {
3651 		pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n");
3652 		goto err_unreg_subsys;
3653 	}
3654 
3655 	ret = register_pernet_subsys(&ctnetlink_net_ops);
3656 	if (ret < 0) {
3657 		pr_err("ctnetlink_init: cannot register pernet operations\n");
3658 		goto err_unreg_exp_subsys;
3659 	}
3660 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
3661 	/* setup interaction between nf_queue and nf_conntrack_netlink. */
3662 	RCU_INIT_POINTER(nfnl_ct_hook, &ctnetlink_glue_hook);
3663 #endif
3664 	return 0;
3665 
3666 err_unreg_exp_subsys:
3667 	nfnetlink_subsys_unregister(&ctnl_exp_subsys);
3668 err_unreg_subsys:
3669 	nfnetlink_subsys_unregister(&ctnl_subsys);
3670 err_out:
3671 	return ret;
3672 }
3673 
3674 static void __exit ctnetlink_exit(void)
3675 {
3676 	unregister_pernet_subsys(&ctnetlink_net_ops);
3677 	nfnetlink_subsys_unregister(&ctnl_exp_subsys);
3678 	nfnetlink_subsys_unregister(&ctnl_subsys);
3679 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
3680 	RCU_INIT_POINTER(nfnl_ct_hook, NULL);
3681 #endif
3682 	synchronize_rcu();
3683 }
3684 
3685 module_init(ctnetlink_init);
3686 module_exit(ctnetlink_exit);
3687