xref: /linux/include/net/xfrm.h (revision b8469721)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _NET_XFRM_H
3 #define _NET_XFRM_H
4 
5 #include <linux/compiler.h>
6 #include <linux/xfrm.h>
7 #include <linux/spinlock.h>
8 #include <linux/list.h>
9 #include <linux/skbuff.h>
10 #include <linux/socket.h>
11 #include <linux/pfkeyv2.h>
12 #include <linux/ipsec.h>
13 #include <linux/in6.h>
14 #include <linux/mutex.h>
15 #include <linux/audit.h>
16 #include <linux/slab.h>
17 #include <linux/refcount.h>
18 #include <linux/sockptr.h>
19 
20 #include <net/sock.h>
21 #include <net/dst.h>
22 #include <net/ip.h>
23 #include <net/route.h>
24 #include <net/ipv6.h>
25 #include <net/ip6_fib.h>
26 #include <net/flow.h>
27 #include <net/gro_cells.h>
28 
29 #include <linux/interrupt.h>
30 
31 #ifdef CONFIG_XFRM_STATISTICS
32 #include <net/snmp.h>
33 #endif
34 
35 #define XFRM_PROTO_ESP		50
36 #define XFRM_PROTO_AH		51
37 #define XFRM_PROTO_COMP		108
38 #define XFRM_PROTO_IPIP		4
39 #define XFRM_PROTO_IPV6		41
40 #define XFRM_PROTO_ROUTING	IPPROTO_ROUTING
41 #define XFRM_PROTO_DSTOPTS	IPPROTO_DSTOPTS
42 
43 #define XFRM_ALIGN4(len)	(((len) + 3) & ~3)
44 #define XFRM_ALIGN8(len)	(((len) + 7) & ~7)
45 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
46 	MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
47 #define MODULE_ALIAS_XFRM_TYPE(family, proto) \
48 	MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
49 #define MODULE_ALIAS_XFRM_OFFLOAD_TYPE(family, proto) \
50 	MODULE_ALIAS("xfrm-offload-" __stringify(family) "-" __stringify(proto))
51 
52 #ifdef CONFIG_XFRM_STATISTICS
53 #define XFRM_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
54 #define XFRM_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.xfrm_statistics, field, val)
55 #else
56 #define XFRM_INC_STATS(net, field)	((void)(net))
57 #define XFRM_ADD_STATS(net, field, val) ((void)(net))
58 #endif
59 
60 
61 /* Organization of SPD aka "XFRM rules"
62    ------------------------------------
63 
64    Basic objects:
65    - policy rule, struct xfrm_policy (=SPD entry)
66    - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
67    - instance of a transformer, struct xfrm_state (=SA)
68    - template to clone xfrm_state, struct xfrm_tmpl
69 
70    SPD is organized as hash table (for policies that meet minimum address prefix
71    length setting, net->xfrm.policy_hthresh).  Other policies are stored in
72    lists, sorted into rbtree ordered by destination and source address networks.
73    See net/xfrm/xfrm_policy.c for details.
74 
75    (To be compatible with existing pfkeyv2 implementations,
76    many rules with priority of 0x7fffffff are allowed to exist and
77    such rules are ordered in an unpredictable way, thanks to bsd folks.)
78 
79    If "action" is "block", then we prohibit the flow, otherwise:
80    if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
81    policy entry has list of up to XFRM_MAX_DEPTH transformations,
82    described by templates xfrm_tmpl. Each template is resolved
83    to a complete xfrm_state (see below) and we pack bundle of transformations
84    to a dst_entry returned to requester.
85 
86    dst -. xfrm  .-> xfrm_state #1
87     |---. child .-> dst -. xfrm .-> xfrm_state #2
88                      |---. child .-> dst -. xfrm .-> xfrm_state #3
89                                       |---. child .-> NULL
90 
91 
92    Resolution of xrfm_tmpl
93    -----------------------
94    Template contains:
95    1. ->mode		Mode: transport or tunnel
96    2. ->id.proto	Protocol: AH/ESP/IPCOMP
97    3. ->id.daddr	Remote tunnel endpoint, ignored for transport mode.
98       Q: allow to resolve security gateway?
99    4. ->id.spi          If not zero, static SPI.
100    5. ->saddr		Local tunnel endpoint, ignored for transport mode.
101    6. ->algos		List of allowed algos. Plain bitmask now.
102       Q: ealgos, aalgos, calgos. What a mess...
103    7. ->share		Sharing mode.
104       Q: how to implement private sharing mode? To add struct sock* to
105       flow id?
106 
107    Having this template we search through SAD searching for entries
108    with appropriate mode/proto/algo, permitted by selector.
109    If no appropriate entry found, it is requested from key manager.
110 
111    PROBLEMS:
112    Q: How to find all the bundles referring to a physical path for
113       PMTU discovery? Seems, dst should contain list of all parents...
114       and enter to infinite locking hierarchy disaster.
115       No! It is easier, we will not search for them, let them find us.
116       We add genid to each dst plus pointer to genid of raw IP route,
117       pmtu disc will update pmtu on raw IP route and increase its genid.
118       dst_check() will see this for top level and trigger resyncing
119       metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
120  */
121 
122 struct xfrm_state_walk {
123 	struct list_head	all;
124 	u8			state;
125 	u8			dying;
126 	u8			proto;
127 	u32			seq;
128 	struct xfrm_address_filter *filter;
129 };
130 
131 enum {
132 	XFRM_DEV_OFFLOAD_IN = 1,
133 	XFRM_DEV_OFFLOAD_OUT,
134 	XFRM_DEV_OFFLOAD_FWD,
135 };
136 
137 enum {
138 	XFRM_DEV_OFFLOAD_UNSPECIFIED,
139 	XFRM_DEV_OFFLOAD_CRYPTO,
140 	XFRM_DEV_OFFLOAD_PACKET,
141 };
142 
143 enum {
144 	XFRM_DEV_OFFLOAD_FLAG_ACQ = 1,
145 };
146 
147 struct xfrm_dev_offload {
148 	struct net_device	*dev;
149 	netdevice_tracker	dev_tracker;
150 	struct net_device	*real_dev;
151 	unsigned long		offload_handle;
152 	u8			dir : 2;
153 	u8			type : 2;
154 	u8			flags : 2;
155 };
156 
157 struct xfrm_mode {
158 	u8 encap;
159 	u8 family;
160 	u8 flags;
161 };
162 
163 /* Flags for xfrm_mode. */
164 enum {
165 	XFRM_MODE_FLAG_TUNNEL = 1,
166 };
167 
168 enum xfrm_replay_mode {
169 	XFRM_REPLAY_MODE_LEGACY,
170 	XFRM_REPLAY_MODE_BMP,
171 	XFRM_REPLAY_MODE_ESN,
172 };
173 
174 /* Full description of state of transformer. */
175 struct xfrm_state {
176 	possible_net_t		xs_net;
177 	union {
178 		struct hlist_node	gclist;
179 		struct hlist_node	bydst;
180 	};
181 	union {
182 		struct hlist_node	dev_gclist;
183 		struct hlist_node	bysrc;
184 	};
185 	struct hlist_node	byspi;
186 	struct hlist_node	byseq;
187 
188 	refcount_t		refcnt;
189 	spinlock_t		lock;
190 
191 	struct xfrm_id		id;
192 	struct xfrm_selector	sel;
193 	struct xfrm_mark	mark;
194 	u32			if_id;
195 	u32			tfcpad;
196 
197 	u32			genid;
198 
199 	/* Key manager bits */
200 	struct xfrm_state_walk	km;
201 
202 	/* Parameters of this state. */
203 	struct {
204 		u32		reqid;
205 		u8		mode;
206 		u8		replay_window;
207 		u8		aalgo, ealgo, calgo;
208 		u8		flags;
209 		u16		family;
210 		xfrm_address_t	saddr;
211 		int		header_len;
212 		int		trailer_len;
213 		u32		extra_flags;
214 		struct xfrm_mark	smark;
215 	} props;
216 
217 	struct xfrm_lifetime_cfg lft;
218 
219 	/* Data for transformer */
220 	struct xfrm_algo_auth	*aalg;
221 	struct xfrm_algo	*ealg;
222 	struct xfrm_algo	*calg;
223 	struct xfrm_algo_aead	*aead;
224 	const char		*geniv;
225 
226 	/* mapping change rate limiting */
227 	__be16 new_mapping_sport;
228 	u32 new_mapping;	/* seconds */
229 	u32 mapping_maxage;	/* seconds for input SA */
230 
231 	/* Data for encapsulator */
232 	struct xfrm_encap_tmpl	*encap;
233 	struct sock __rcu	*encap_sk;
234 
235 	/* NAT keepalive */
236 	u32			nat_keepalive_interval; /* seconds */
237 	time64_t		nat_keepalive_expiration;
238 
239 	/* Data for care-of address */
240 	xfrm_address_t	*coaddr;
241 
242 	/* IPComp needs an IPIP tunnel for handling uncompressed packets */
243 	struct xfrm_state	*tunnel;
244 
245 	/* If a tunnel, number of users + 1 */
246 	atomic_t		tunnel_users;
247 
248 	/* State for replay detection */
249 	struct xfrm_replay_state replay;
250 	struct xfrm_replay_state_esn *replay_esn;
251 
252 	/* Replay detection state at the time we sent the last notification */
253 	struct xfrm_replay_state preplay;
254 	struct xfrm_replay_state_esn *preplay_esn;
255 
256 	/* replay detection mode */
257 	enum xfrm_replay_mode    repl_mode;
258 	/* internal flag that only holds state for delayed aevent at the
259 	 * moment
260 	*/
261 	u32			xflags;
262 
263 	/* Replay detection notification settings */
264 	u32			replay_maxage;
265 	u32			replay_maxdiff;
266 
267 	/* Replay detection notification timer */
268 	struct timer_list	rtimer;
269 
270 	/* Statistics */
271 	struct xfrm_stats	stats;
272 
273 	struct xfrm_lifetime_cur curlft;
274 	struct hrtimer		mtimer;
275 
276 	struct xfrm_dev_offload xso;
277 
278 	/* used to fix curlft->add_time when changing date */
279 	long		saved_tmo;
280 
281 	/* Last used time */
282 	time64_t		lastused;
283 
284 	struct page_frag xfrag;
285 
286 	/* Reference to data common to all the instances of this
287 	 * transformer. */
288 	const struct xfrm_type	*type;
289 	struct xfrm_mode	inner_mode;
290 	struct xfrm_mode	inner_mode_iaf;
291 	struct xfrm_mode	outer_mode;
292 
293 	const struct xfrm_type_offload	*type_offload;
294 
295 	/* Security context */
296 	struct xfrm_sec_ctx	*security;
297 
298 	/* Private data of this transformer, format is opaque,
299 	 * interpreted by xfrm_type methods. */
300 	void			*data;
301 	u8			dir;
302 };
303 
xs_net(struct xfrm_state * x)304 static inline struct net *xs_net(struct xfrm_state *x)
305 {
306 	return read_pnet(&x->xs_net);
307 }
308 
309 /* xflags - make enum if more show up */
310 #define XFRM_TIME_DEFER	1
311 #define XFRM_SOFT_EXPIRE 2
312 
313 enum {
314 	XFRM_STATE_VOID,
315 	XFRM_STATE_ACQ,
316 	XFRM_STATE_VALID,
317 	XFRM_STATE_ERROR,
318 	XFRM_STATE_EXPIRED,
319 	XFRM_STATE_DEAD
320 };
321 
322 /* callback structure passed from either netlink or pfkey */
323 struct km_event {
324 	union {
325 		u32 hard;
326 		u32 proto;
327 		u32 byid;
328 		u32 aevent;
329 		u32 type;
330 	} data;
331 
332 	u32	seq;
333 	u32	portid;
334 	u32	event;
335 	struct net *net;
336 };
337 
338 struct xfrm_if_decode_session_result {
339 	struct net *net;
340 	u32 if_id;
341 };
342 
343 struct xfrm_if_cb {
344 	bool (*decode_session)(struct sk_buff *skb,
345 			       unsigned short family,
346 			       struct xfrm_if_decode_session_result *res);
347 };
348 
349 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
350 void xfrm_if_unregister_cb(void);
351 
352 struct xfrm_dst_lookup_params {
353 	struct net *net;
354 	int tos;
355 	int oif;
356 	xfrm_address_t *saddr;
357 	xfrm_address_t *daddr;
358 	u32 mark;
359 	__u8 ipproto;
360 	union flowi_uli uli;
361 };
362 
363 struct net_device;
364 struct xfrm_type;
365 struct xfrm_dst;
366 struct xfrm_policy_afinfo {
367 	struct dst_ops		*dst_ops;
368 	struct dst_entry	*(*dst_lookup)(const struct xfrm_dst_lookup_params *params);
369 	int			(*get_saddr)(xfrm_address_t *saddr,
370 					     const struct xfrm_dst_lookup_params *params);
371 	int			(*fill_dst)(struct xfrm_dst *xdst,
372 					    struct net_device *dev,
373 					    const struct flowi *fl);
374 	struct dst_entry	*(*blackhole_route)(struct net *net, struct dst_entry *orig);
375 };
376 
377 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family);
378 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
379 void km_policy_notify(struct xfrm_policy *xp, int dir,
380 		      const struct km_event *c);
381 void km_state_notify(struct xfrm_state *x, const struct km_event *c);
382 
383 struct xfrm_tmpl;
384 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t,
385 	     struct xfrm_policy *pol);
386 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
387 int __xfrm_state_delete(struct xfrm_state *x);
388 
389 struct xfrm_state_afinfo {
390 	u8				family;
391 	u8				proto;
392 
393 	const struct xfrm_type_offload *type_offload_esp;
394 
395 	const struct xfrm_type		*type_esp;
396 	const struct xfrm_type		*type_ipip;
397 	const struct xfrm_type		*type_ipip6;
398 	const struct xfrm_type		*type_comp;
399 	const struct xfrm_type		*type_ah;
400 	const struct xfrm_type		*type_routing;
401 	const struct xfrm_type		*type_dstopts;
402 
403 	int			(*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
404 	int			(*transport_finish)(struct sk_buff *skb,
405 						    int async);
406 	void			(*local_error)(struct sk_buff *skb, u32 mtu);
407 };
408 
409 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
410 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
411 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
412 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family);
413 
414 struct xfrm_input_afinfo {
415 	u8			family;
416 	bool			is_ipip;
417 	int			(*callback)(struct sk_buff *skb, u8 protocol,
418 					    int err);
419 };
420 
421 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo);
422 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo);
423 
424 void xfrm_flush_gc(void);
425 void xfrm_state_delete_tunnel(struct xfrm_state *x);
426 
427 struct xfrm_type {
428 	struct module		*owner;
429 	u8			proto;
430 	u8			flags;
431 #define XFRM_TYPE_NON_FRAGMENT	1
432 #define XFRM_TYPE_REPLAY_PROT	2
433 #define XFRM_TYPE_LOCAL_COADDR	4
434 #define XFRM_TYPE_REMOTE_COADDR	8
435 
436 	int			(*init_state)(struct xfrm_state *x,
437 					      struct netlink_ext_ack *extack);
438 	void			(*destructor)(struct xfrm_state *);
439 	int			(*input)(struct xfrm_state *, struct sk_buff *skb);
440 	int			(*output)(struct xfrm_state *, struct sk_buff *pskb);
441 	int			(*reject)(struct xfrm_state *, struct sk_buff *,
442 					  const struct flowi *);
443 };
444 
445 int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
446 void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
447 
448 struct xfrm_type_offload {
449 	struct module	*owner;
450 	u8		proto;
451 	void		(*encap)(struct xfrm_state *, struct sk_buff *pskb);
452 	int		(*input_tail)(struct xfrm_state *x, struct sk_buff *skb);
453 	int		(*xmit)(struct xfrm_state *, struct sk_buff *pskb, netdev_features_t features);
454 };
455 
456 int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family);
457 void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family);
458 
xfrm_af2proto(unsigned int family)459 static inline int xfrm_af2proto(unsigned int family)
460 {
461 	switch(family) {
462 	case AF_INET:
463 		return IPPROTO_IPIP;
464 	case AF_INET6:
465 		return IPPROTO_IPV6;
466 	default:
467 		return 0;
468 	}
469 }
470 
xfrm_ip2inner_mode(struct xfrm_state * x,int ipproto)471 static inline const struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
472 {
473 	if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
474 	    (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6))
475 		return &x->inner_mode;
476 	else
477 		return &x->inner_mode_iaf;
478 }
479 
480 struct xfrm_tmpl {
481 /* id in template is interpreted as:
482  * daddr - destination of tunnel, may be zero for transport mode.
483  * spi   - zero to acquire spi. Not zero if spi is static, then
484  *	   daddr must be fixed too.
485  * proto - AH/ESP/IPCOMP
486  */
487 	struct xfrm_id		id;
488 
489 /* Source address of tunnel. Ignored, if it is not a tunnel. */
490 	xfrm_address_t		saddr;
491 
492 	unsigned short		encap_family;
493 
494 	u32			reqid;
495 
496 /* Mode: transport, tunnel etc. */
497 	u8			mode;
498 
499 /* Sharing mode: unique, this session only, this user only etc. */
500 	u8			share;
501 
502 /* May skip this transfomration if no SA is found */
503 	u8			optional;
504 
505 /* Skip aalgos/ealgos/calgos checks. */
506 	u8			allalgs;
507 
508 /* Bit mask of algos allowed for acquisition */
509 	u32			aalgos;
510 	u32			ealgos;
511 	u32			calgos;
512 };
513 
514 #define XFRM_MAX_DEPTH		6
515 #define XFRM_MAX_OFFLOAD_DEPTH	1
516 
517 struct xfrm_policy_walk_entry {
518 	struct list_head	all;
519 	u8			dead;
520 };
521 
522 struct xfrm_policy_walk {
523 	struct xfrm_policy_walk_entry walk;
524 	u8 type;
525 	u32 seq;
526 };
527 
528 struct xfrm_policy_queue {
529 	struct sk_buff_head	hold_queue;
530 	struct timer_list	hold_timer;
531 	unsigned long		timeout;
532 };
533 
534 /**
535  *	struct xfrm_policy - xfrm policy
536  *	@xp_net: network namespace the policy lives in
537  *	@bydst: hlist node for SPD hash table or rbtree list
538  *	@byidx: hlist node for index hash table
539  *	@lock: serialize changes to policy structure members
540  *	@refcnt: reference count, freed once it reaches 0
541  *	@pos: kernel internal tie-breaker to determine age of policy
542  *	@timer: timer
543  *	@genid: generation, used to invalidate old policies
544  *	@priority: priority, set by userspace
545  *	@index:  policy index (autogenerated)
546  *	@if_id: virtual xfrm interface id
547  *	@mark: packet mark
548  *	@selector: selector
549  *	@lft: liftime configuration data
550  *	@curlft: liftime state
551  *	@walk: list head on pernet policy list
552  *	@polq: queue to hold packets while aqcuire operaion in progress
553  *	@bydst_reinsert: policy tree node needs to be merged
554  *	@type: XFRM_POLICY_TYPE_MAIN or _SUB
555  *	@action: XFRM_POLICY_ALLOW or _BLOCK
556  *	@flags: XFRM_POLICY_LOCALOK, XFRM_POLICY_ICMP
557  *	@xfrm_nr: number of used templates in @xfrm_vec
558  *	@family: protocol family
559  *	@security: SELinux security label
560  *	@xfrm_vec: array of templates to resolve state
561  *	@rcu: rcu head, used to defer memory release
562  *	@xdo: hardware offload state
563  */
564 struct xfrm_policy {
565 	possible_net_t		xp_net;
566 	struct hlist_node	bydst;
567 	struct hlist_node	byidx;
568 
569 	/* This lock only affects elements except for entry. */
570 	rwlock_t		lock;
571 	refcount_t		refcnt;
572 	u32			pos;
573 	struct timer_list	timer;
574 
575 	atomic_t		genid;
576 	u32			priority;
577 	u32			index;
578 	u32			if_id;
579 	struct xfrm_mark	mark;
580 	struct xfrm_selector	selector;
581 	struct xfrm_lifetime_cfg lft;
582 	struct xfrm_lifetime_cur curlft;
583 	struct xfrm_policy_walk_entry walk;
584 	struct xfrm_policy_queue polq;
585 	bool                    bydst_reinsert;
586 	u8			type;
587 	u8			action;
588 	u8			flags;
589 	u8			xfrm_nr;
590 	u16			family;
591 	struct xfrm_sec_ctx	*security;
592 	struct xfrm_tmpl       	xfrm_vec[XFRM_MAX_DEPTH];
593 	struct rcu_head		rcu;
594 
595 	struct xfrm_dev_offload xdo;
596 };
597 
xp_net(const struct xfrm_policy * xp)598 static inline struct net *xp_net(const struct xfrm_policy *xp)
599 {
600 	return read_pnet(&xp->xp_net);
601 }
602 
603 struct xfrm_kmaddress {
604 	xfrm_address_t          local;
605 	xfrm_address_t          remote;
606 	u32			reserved;
607 	u16			family;
608 };
609 
610 struct xfrm_migrate {
611 	xfrm_address_t		old_daddr;
612 	xfrm_address_t		old_saddr;
613 	xfrm_address_t		new_daddr;
614 	xfrm_address_t		new_saddr;
615 	u8			proto;
616 	u8			mode;
617 	u16			reserved;
618 	u32			reqid;
619 	u16			old_family;
620 	u16			new_family;
621 };
622 
623 #define XFRM_KM_TIMEOUT                30
624 /* what happened */
625 #define XFRM_REPLAY_UPDATE	XFRM_AE_CR
626 #define XFRM_REPLAY_TIMEOUT	XFRM_AE_CE
627 
628 /* default aevent timeout in units of 100ms */
629 #define XFRM_AE_ETIME			10
630 /* Async Event timer multiplier */
631 #define XFRM_AE_ETH_M			10
632 /* default seq threshold size */
633 #define XFRM_AE_SEQT_SIZE		2
634 
635 struct xfrm_mgr {
636 	struct list_head	list;
637 	int			(*notify)(struct xfrm_state *x, const struct km_event *c);
638 	int			(*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp);
639 	struct xfrm_policy	*(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
640 	int			(*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
641 	int			(*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
642 	int			(*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
643 	int			(*migrate)(const struct xfrm_selector *sel,
644 					   u8 dir, u8 type,
645 					   const struct xfrm_migrate *m,
646 					   int num_bundles,
647 					   const struct xfrm_kmaddress *k,
648 					   const struct xfrm_encap_tmpl *encap);
649 	bool			(*is_alive)(const struct km_event *c);
650 };
651 
652 void xfrm_register_km(struct xfrm_mgr *km);
653 void xfrm_unregister_km(struct xfrm_mgr *km);
654 
655 struct xfrm_tunnel_skb_cb {
656 	union {
657 		struct inet_skb_parm h4;
658 		struct inet6_skb_parm h6;
659 	} header;
660 
661 	union {
662 		struct ip_tunnel *ip4;
663 		struct ip6_tnl *ip6;
664 	} tunnel;
665 };
666 
667 #define XFRM_TUNNEL_SKB_CB(__skb) ((struct xfrm_tunnel_skb_cb *)&((__skb)->cb[0]))
668 
669 /*
670  * This structure is used for the duration where packets are being
671  * transformed by IPsec.  As soon as the packet leaves IPsec the
672  * area beyond the generic IP part may be overwritten.
673  */
674 struct xfrm_skb_cb {
675 	struct xfrm_tunnel_skb_cb header;
676 
677         /* Sequence number for replay protection. */
678 	union {
679 		struct {
680 			__u32 low;
681 			__u32 hi;
682 		} output;
683 		struct {
684 			__be32 low;
685 			__be32 hi;
686 		} input;
687 	} seq;
688 };
689 
690 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
691 
692 /*
693  * This structure is used by the afinfo prepare_input/prepare_output functions
694  * to transmit header information to the mode input/output functions.
695  */
696 struct xfrm_mode_skb_cb {
697 	struct xfrm_tunnel_skb_cb header;
698 
699 	/* Copied from header for IPv4, always set to zero and DF for IPv6. */
700 	__be16 id;
701 	__be16 frag_off;
702 
703 	/* IP header length (excluding options or extension headers). */
704 	u8 ihl;
705 
706 	/* TOS for IPv4, class for IPv6. */
707 	u8 tos;
708 
709 	/* TTL for IPv4, hop limitfor IPv6. */
710 	u8 ttl;
711 
712 	/* Protocol for IPv4, NH for IPv6. */
713 	u8 protocol;
714 
715 	/* Option length for IPv4, zero for IPv6. */
716 	u8 optlen;
717 
718 	/* Used by IPv6 only, zero for IPv4. */
719 	u8 flow_lbl[3];
720 };
721 
722 #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
723 
724 /*
725  * This structure is used by the input processing to locate the SPI and
726  * related information.
727  */
728 struct xfrm_spi_skb_cb {
729 	struct xfrm_tunnel_skb_cb header;
730 
731 	unsigned int daddroff;
732 	unsigned int family;
733 	__be32 seq;
734 };
735 
736 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
737 
738 #ifdef CONFIG_AUDITSYSCALL
xfrm_audit_start(const char * op)739 static inline struct audit_buffer *xfrm_audit_start(const char *op)
740 {
741 	struct audit_buffer *audit_buf = NULL;
742 
743 	if (audit_enabled == AUDIT_OFF)
744 		return NULL;
745 	audit_buf = audit_log_start(audit_context(), GFP_ATOMIC,
746 				    AUDIT_MAC_IPSEC_EVENT);
747 	if (audit_buf == NULL)
748 		return NULL;
749 	audit_log_format(audit_buf, "op=%s", op);
750 	return audit_buf;
751 }
752 
xfrm_audit_helper_usrinfo(bool task_valid,struct audit_buffer * audit_buf)753 static inline void xfrm_audit_helper_usrinfo(bool task_valid,
754 					     struct audit_buffer *audit_buf)
755 {
756 	const unsigned int auid = from_kuid(&init_user_ns, task_valid ?
757 					    audit_get_loginuid(current) :
758 					    INVALID_UID);
759 	const unsigned int ses = task_valid ? audit_get_sessionid(current) :
760 		AUDIT_SID_UNSET;
761 
762 	audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
763 	audit_log_task_context(audit_buf);
764 }
765 
766 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid);
767 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
768 			      bool task_valid);
769 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid);
770 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid);
771 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
772 				      struct sk_buff *skb);
773 void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb,
774 			     __be32 net_seq);
775 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
776 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi,
777 			       __be32 net_seq);
778 void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb,
779 			      u8 proto);
780 #else
781 
xfrm_audit_policy_add(struct xfrm_policy * xp,int result,bool task_valid)782 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
783 					 bool task_valid)
784 {
785 }
786 
xfrm_audit_policy_delete(struct xfrm_policy * xp,int result,bool task_valid)787 static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
788 					    bool task_valid)
789 {
790 }
791 
xfrm_audit_state_add(struct xfrm_state * x,int result,bool task_valid)792 static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
793 					bool task_valid)
794 {
795 }
796 
xfrm_audit_state_delete(struct xfrm_state * x,int result,bool task_valid)797 static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
798 					   bool task_valid)
799 {
800 }
801 
xfrm_audit_state_replay_overflow(struct xfrm_state * x,struct sk_buff * skb)802 static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
803 					     struct sk_buff *skb)
804 {
805 }
806 
xfrm_audit_state_replay(struct xfrm_state * x,struct sk_buff * skb,__be32 net_seq)807 static inline void xfrm_audit_state_replay(struct xfrm_state *x,
808 					   struct sk_buff *skb, __be32 net_seq)
809 {
810 }
811 
xfrm_audit_state_notfound_simple(struct sk_buff * skb,u16 family)812 static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
813 				      u16 family)
814 {
815 }
816 
xfrm_audit_state_notfound(struct sk_buff * skb,u16 family,__be32 net_spi,__be32 net_seq)817 static inline void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
818 				      __be32 net_spi, __be32 net_seq)
819 {
820 }
821 
xfrm_audit_state_icvfail(struct xfrm_state * x,struct sk_buff * skb,u8 proto)822 static inline void xfrm_audit_state_icvfail(struct xfrm_state *x,
823 				     struct sk_buff *skb, u8 proto)
824 {
825 }
826 #endif /* CONFIG_AUDITSYSCALL */
827 
xfrm_pol_hold(struct xfrm_policy * policy)828 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
829 {
830 	if (likely(policy != NULL))
831 		refcount_inc(&policy->refcnt);
832 }
833 
834 void xfrm_policy_destroy(struct xfrm_policy *policy);
835 
xfrm_pol_put(struct xfrm_policy * policy)836 static inline void xfrm_pol_put(struct xfrm_policy *policy)
837 {
838 	if (refcount_dec_and_test(&policy->refcnt))
839 		xfrm_policy_destroy(policy);
840 }
841 
xfrm_pols_put(struct xfrm_policy ** pols,int npols)842 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
843 {
844 	int i;
845 	for (i = npols - 1; i >= 0; --i)
846 		xfrm_pol_put(pols[i]);
847 }
848 
849 void __xfrm_state_destroy(struct xfrm_state *, bool);
850 
__xfrm_state_put(struct xfrm_state * x)851 static inline void __xfrm_state_put(struct xfrm_state *x)
852 {
853 	refcount_dec(&x->refcnt);
854 }
855 
xfrm_state_put(struct xfrm_state * x)856 static inline void xfrm_state_put(struct xfrm_state *x)
857 {
858 	if (refcount_dec_and_test(&x->refcnt))
859 		__xfrm_state_destroy(x, false);
860 }
861 
xfrm_state_put_sync(struct xfrm_state * x)862 static inline void xfrm_state_put_sync(struct xfrm_state *x)
863 {
864 	if (refcount_dec_and_test(&x->refcnt))
865 		__xfrm_state_destroy(x, true);
866 }
867 
xfrm_state_hold(struct xfrm_state * x)868 static inline void xfrm_state_hold(struct xfrm_state *x)
869 {
870 	refcount_inc(&x->refcnt);
871 }
872 
addr_match(const void * token1,const void * token2,unsigned int prefixlen)873 static inline bool addr_match(const void *token1, const void *token2,
874 			      unsigned int prefixlen)
875 {
876 	const __be32 *a1 = token1;
877 	const __be32 *a2 = token2;
878 	unsigned int pdw;
879 	unsigned int pbi;
880 
881 	pdw = prefixlen >> 5;	  /* num of whole u32 in prefix */
882 	pbi = prefixlen &  0x1f;  /* num of bits in incomplete u32 in prefix */
883 
884 	if (pdw)
885 		if (memcmp(a1, a2, pdw << 2))
886 			return false;
887 
888 	if (pbi) {
889 		__be32 mask;
890 
891 		mask = htonl((0xffffffff) << (32 - pbi));
892 
893 		if ((a1[pdw] ^ a2[pdw]) & mask)
894 			return false;
895 	}
896 
897 	return true;
898 }
899 
addr4_match(__be32 a1,__be32 a2,u8 prefixlen)900 static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
901 {
902 	/* C99 6.5.7 (3): u32 << 32 is undefined behaviour */
903 	if (sizeof(long) == 4 && prefixlen == 0)
904 		return true;
905 	return !((a1 ^ a2) & htonl(~0UL << (32 - prefixlen)));
906 }
907 
908 static __inline__
xfrm_flowi_sport(const struct flowi * fl,const union flowi_uli * uli)909 __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
910 {
911 	__be16 port;
912 	switch(fl->flowi_proto) {
913 	case IPPROTO_TCP:
914 	case IPPROTO_UDP:
915 	case IPPROTO_UDPLITE:
916 	case IPPROTO_SCTP:
917 		port = uli->ports.sport;
918 		break;
919 	case IPPROTO_ICMP:
920 	case IPPROTO_ICMPV6:
921 		port = htons(uli->icmpt.type);
922 		break;
923 	case IPPROTO_MH:
924 		port = htons(uli->mht.type);
925 		break;
926 	case IPPROTO_GRE:
927 		port = htons(ntohl(uli->gre_key) >> 16);
928 		break;
929 	default:
930 		port = 0;	/*XXX*/
931 	}
932 	return port;
933 }
934 
935 static __inline__
xfrm_flowi_dport(const struct flowi * fl,const union flowi_uli * uli)936 __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli)
937 {
938 	__be16 port;
939 	switch(fl->flowi_proto) {
940 	case IPPROTO_TCP:
941 	case IPPROTO_UDP:
942 	case IPPROTO_UDPLITE:
943 	case IPPROTO_SCTP:
944 		port = uli->ports.dport;
945 		break;
946 	case IPPROTO_ICMP:
947 	case IPPROTO_ICMPV6:
948 		port = htons(uli->icmpt.code);
949 		break;
950 	case IPPROTO_GRE:
951 		port = htons(ntohl(uli->gre_key) & 0xffff);
952 		break;
953 	default:
954 		port = 0;	/*XXX*/
955 	}
956 	return port;
957 }
958 
959 bool xfrm_selector_match(const struct xfrm_selector *sel,
960 			 const struct flowi *fl, unsigned short family);
961 
962 #ifdef CONFIG_SECURITY_NETWORK_XFRM
963 /*	If neither has a context --> match
964  * 	Otherwise, both must have a context and the sids, doi, alg must match
965  */
xfrm_sec_ctx_match(struct xfrm_sec_ctx * s1,struct xfrm_sec_ctx * s2)966 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
967 {
968 	return ((!s1 && !s2) ||
969 		(s1 && s2 &&
970 		 (s1->ctx_sid == s2->ctx_sid) &&
971 		 (s1->ctx_doi == s2->ctx_doi) &&
972 		 (s1->ctx_alg == s2->ctx_alg)));
973 }
974 #else
xfrm_sec_ctx_match(struct xfrm_sec_ctx * s1,struct xfrm_sec_ctx * s2)975 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
976 {
977 	return true;
978 }
979 #endif
980 
981 /* A struct encoding bundle of transformations to apply to some set of flow.
982  *
983  * xdst->child points to the next element of bundle.
984  * dst->xfrm  points to an instanse of transformer.
985  *
986  * Due to unfortunate limitations of current routing cache, which we
987  * have no time to fix, it mirrors struct rtable and bound to the same
988  * routing key, including saddr,daddr. However, we can have many of
989  * bundles differing by session id. All the bundles grow from a parent
990  * policy rule.
991  */
992 struct xfrm_dst {
993 	union {
994 		struct dst_entry	dst;
995 		struct rtable		rt;
996 		struct rt6_info		rt6;
997 	} u;
998 	struct dst_entry *route;
999 	struct dst_entry *child;
1000 	struct dst_entry *path;
1001 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1002 	int num_pols, num_xfrms;
1003 	u32 xfrm_genid;
1004 	u32 policy_genid;
1005 	u32 route_mtu_cached;
1006 	u32 child_mtu_cached;
1007 	u32 route_cookie;
1008 	u32 path_cookie;
1009 };
1010 
xfrm_dst_path(const struct dst_entry * dst)1011 static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
1012 {
1013 #ifdef CONFIG_XFRM
1014 	if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
1015 		const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
1016 
1017 		return xdst->path;
1018 	}
1019 #endif
1020 	return (struct dst_entry *) dst;
1021 }
1022 
xfrm_dst_child(const struct dst_entry * dst)1023 static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
1024 {
1025 #ifdef CONFIG_XFRM
1026 	if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
1027 		struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1028 		return xdst->child;
1029 	}
1030 #endif
1031 	return NULL;
1032 }
1033 
1034 #ifdef CONFIG_XFRM
xfrm_dst_set_child(struct xfrm_dst * xdst,struct dst_entry * child)1035 static inline void xfrm_dst_set_child(struct xfrm_dst *xdst, struct dst_entry *child)
1036 {
1037 	xdst->child = child;
1038 }
1039 
xfrm_dst_destroy(struct xfrm_dst * xdst)1040 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
1041 {
1042 	xfrm_pols_put(xdst->pols, xdst->num_pols);
1043 	dst_release(xdst->route);
1044 	if (likely(xdst->u.dst.xfrm))
1045 		xfrm_state_put(xdst->u.dst.xfrm);
1046 }
1047 #endif
1048 
1049 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
1050 
1051 struct xfrm_if_parms {
1052 	int link;		/* ifindex of underlying L2 interface */
1053 	u32 if_id;		/* interface identifier */
1054 	bool collect_md;
1055 };
1056 
1057 struct xfrm_if {
1058 	struct xfrm_if __rcu *next;	/* next interface in list */
1059 	struct net_device *dev;		/* virtual device associated with interface */
1060 	struct net *net;		/* netns for packet i/o */
1061 	struct xfrm_if_parms p;		/* interface parms */
1062 
1063 	struct gro_cells gro_cells;
1064 };
1065 
1066 struct xfrm_offload {
1067 	/* Output sequence number for replay protection on offloading. */
1068 	struct {
1069 		__u32 low;
1070 		__u32 hi;
1071 	} seq;
1072 
1073 	__u32			flags;
1074 #define	SA_DELETE_REQ		1
1075 #define	CRYPTO_DONE		2
1076 #define	CRYPTO_NEXT_DONE	4
1077 #define	CRYPTO_FALLBACK		8
1078 #define	XFRM_GSO_SEGMENT	16
1079 #define	XFRM_GRO		32
1080 /* 64 is free */
1081 #define	XFRM_DEV_RESUME		128
1082 #define	XFRM_XMIT		256
1083 
1084 	__u32			status;
1085 #define CRYPTO_SUCCESS				1
1086 #define CRYPTO_GENERIC_ERROR			2
1087 #define CRYPTO_TRANSPORT_AH_AUTH_FAILED		4
1088 #define CRYPTO_TRANSPORT_ESP_AUTH_FAILED	8
1089 #define CRYPTO_TUNNEL_AH_AUTH_FAILED		16
1090 #define CRYPTO_TUNNEL_ESP_AUTH_FAILED		32
1091 #define CRYPTO_INVALID_PACKET_SYNTAX		64
1092 #define CRYPTO_INVALID_PROTOCOL			128
1093 
1094 	/* Used to keep whole l2 header for transport mode GRO */
1095 	__u32			orig_mac_len;
1096 
1097 	__u8			proto;
1098 	__u8			inner_ipproto;
1099 };
1100 
1101 struct sec_path {
1102 	int			len;
1103 	int			olen;
1104 	int			verified_cnt;
1105 
1106 	struct xfrm_state	*xvec[XFRM_MAX_DEPTH];
1107 	struct xfrm_offload	ovec[XFRM_MAX_OFFLOAD_DEPTH];
1108 };
1109 
1110 struct sec_path *secpath_set(struct sk_buff *skb);
1111 
1112 static inline void
secpath_reset(struct sk_buff * skb)1113 secpath_reset(struct sk_buff *skb)
1114 {
1115 #ifdef CONFIG_XFRM
1116 	skb_ext_del(skb, SKB_EXT_SEC_PATH);
1117 #endif
1118 }
1119 
1120 static inline int
xfrm_addr_any(const xfrm_address_t * addr,unsigned short family)1121 xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
1122 {
1123 	switch (family) {
1124 	case AF_INET:
1125 		return addr->a4 == 0;
1126 	case AF_INET6:
1127 		return ipv6_addr_any(&addr->in6);
1128 	}
1129 	return 0;
1130 }
1131 
1132 static inline int
__xfrm4_state_addr_cmp(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x)1133 __xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1134 {
1135 	return	(tmpl->saddr.a4 &&
1136 		 tmpl->saddr.a4 != x->props.saddr.a4);
1137 }
1138 
1139 static inline int
__xfrm6_state_addr_cmp(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x)1140 __xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1141 {
1142 	return	(!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
1143 		 !ipv6_addr_equal((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
1144 }
1145 
1146 static inline int
xfrm_state_addr_cmp(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x,unsigned short family)1147 xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family)
1148 {
1149 	switch (family) {
1150 	case AF_INET:
1151 		return __xfrm4_state_addr_cmp(tmpl, x);
1152 	case AF_INET6:
1153 		return __xfrm6_state_addr_cmp(tmpl, x);
1154 	}
1155 	return !0;
1156 }
1157 
1158 #ifdef CONFIG_XFRM
xfrm_input_state(struct sk_buff * skb)1159 static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
1160 {
1161 	struct sec_path *sp = skb_sec_path(skb);
1162 
1163 	return sp->xvec[sp->len - 1];
1164 }
1165 #endif
1166 
xfrm_offload(struct sk_buff * skb)1167 static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
1168 {
1169 #ifdef CONFIG_XFRM
1170 	struct sec_path *sp = skb_sec_path(skb);
1171 
1172 	if (!sp || !sp->olen || sp->len != sp->olen)
1173 		return NULL;
1174 
1175 	return &sp->ovec[sp->olen - 1];
1176 #else
1177 	return NULL;
1178 #endif
1179 }
1180 
1181 #ifdef CONFIG_XFRM
1182 int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
1183 			unsigned short family);
1184 
__xfrm_check_nopolicy(struct net * net,struct sk_buff * skb,int dir)1185 static inline bool __xfrm_check_nopolicy(struct net *net, struct sk_buff *skb,
1186 					 int dir)
1187 {
1188 	if (!net->xfrm.policy_count[dir] && !secpath_exists(skb))
1189 		return net->xfrm.policy_default[dir] == XFRM_USERPOLICY_ACCEPT;
1190 
1191 	return false;
1192 }
1193 
__xfrm_check_dev_nopolicy(struct sk_buff * skb,int dir,unsigned short family)1194 static inline bool __xfrm_check_dev_nopolicy(struct sk_buff *skb,
1195 					     int dir, unsigned short family)
1196 {
1197 	if (dir != XFRM_POLICY_OUT && family == AF_INET) {
1198 		/* same dst may be used for traffic originating from
1199 		 * devices with different policy settings.
1200 		 */
1201 		return IPCB(skb)->flags & IPSKB_NOPOLICY;
1202 	}
1203 	return skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY);
1204 }
1205 
__xfrm_policy_check2(struct sock * sk,int dir,struct sk_buff * skb,unsigned int family,int reverse)1206 static inline int __xfrm_policy_check2(struct sock *sk, int dir,
1207 				       struct sk_buff *skb,
1208 				       unsigned int family, int reverse)
1209 {
1210 	struct net *net = dev_net(skb->dev);
1211 	int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
1212 	struct xfrm_offload *xo = xfrm_offload(skb);
1213 	struct xfrm_state *x;
1214 
1215 	if (sk && sk->sk_policy[XFRM_POLICY_IN])
1216 		return __xfrm_policy_check(sk, ndir, skb, family);
1217 
1218 	if (xo) {
1219 		x = xfrm_input_state(skb);
1220 		if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
1221 			return (xo->flags & CRYPTO_DONE) &&
1222 			       (xo->status & CRYPTO_SUCCESS);
1223 	}
1224 
1225 	return __xfrm_check_nopolicy(net, skb, dir) ||
1226 	       __xfrm_check_dev_nopolicy(skb, dir, family) ||
1227 	       __xfrm_policy_check(sk, ndir, skb, family);
1228 }
1229 
xfrm_policy_check(struct sock * sk,int dir,struct sk_buff * skb,unsigned short family)1230 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1231 {
1232 	return __xfrm_policy_check2(sk, dir, skb, family, 0);
1233 }
1234 
xfrm4_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1235 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1236 {
1237 	return xfrm_policy_check(sk, dir, skb, AF_INET);
1238 }
1239 
xfrm6_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1240 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1241 {
1242 	return xfrm_policy_check(sk, dir, skb, AF_INET6);
1243 }
1244 
xfrm4_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1245 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1246 					     struct sk_buff *skb)
1247 {
1248 	return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1);
1249 }
1250 
xfrm6_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1251 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1252 					     struct sk_buff *skb)
1253 {
1254 	return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
1255 }
1256 
1257 int __xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl,
1258 			  unsigned int family, int reverse);
1259 
xfrm_decode_session(struct net * net,struct sk_buff * skb,struct flowi * fl,unsigned int family)1260 static inline int xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl,
1261 				      unsigned int family)
1262 {
1263 	return __xfrm_decode_session(net, skb, fl, family, 0);
1264 }
1265 
xfrm_decode_session_reverse(struct net * net,struct sk_buff * skb,struct flowi * fl,unsigned int family)1266 static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb,
1267 					      struct flowi *fl,
1268 					      unsigned int family)
1269 {
1270 	return __xfrm_decode_session(net, skb, fl, family, 1);
1271 }
1272 
1273 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
1274 
xfrm_route_forward(struct sk_buff * skb,unsigned short family)1275 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1276 {
1277 	struct net *net = dev_net(skb->dev);
1278 
1279 	if (!net->xfrm.policy_count[XFRM_POLICY_OUT] &&
1280 	    net->xfrm.policy_default[XFRM_POLICY_OUT] == XFRM_USERPOLICY_ACCEPT)
1281 		return true;
1282 
1283 	return (skb_dst(skb)->flags & DST_NOXFRM) ||
1284 	       __xfrm_route_forward(skb, family);
1285 }
1286 
xfrm4_route_forward(struct sk_buff * skb)1287 static inline int xfrm4_route_forward(struct sk_buff *skb)
1288 {
1289 	return xfrm_route_forward(skb, AF_INET);
1290 }
1291 
xfrm6_route_forward(struct sk_buff * skb)1292 static inline int xfrm6_route_forward(struct sk_buff *skb)
1293 {
1294 	return xfrm_route_forward(skb, AF_INET6);
1295 }
1296 
1297 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk);
1298 
xfrm_sk_clone_policy(struct sock * sk,const struct sock * osk)1299 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1300 {
1301 	if (!sk_fullsock(osk))
1302 		return 0;
1303 	sk->sk_policy[0] = NULL;
1304 	sk->sk_policy[1] = NULL;
1305 	if (unlikely(osk->sk_policy[0] || osk->sk_policy[1]))
1306 		return __xfrm_sk_clone_policy(sk, osk);
1307 	return 0;
1308 }
1309 
1310 int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
1311 
xfrm_sk_free_policy(struct sock * sk)1312 static inline void xfrm_sk_free_policy(struct sock *sk)
1313 {
1314 	struct xfrm_policy *pol;
1315 
1316 	pol = rcu_dereference_protected(sk->sk_policy[0], 1);
1317 	if (unlikely(pol != NULL)) {
1318 		xfrm_policy_delete(pol, XFRM_POLICY_MAX);
1319 		sk->sk_policy[0] = NULL;
1320 	}
1321 	pol = rcu_dereference_protected(sk->sk_policy[1], 1);
1322 	if (unlikely(pol != NULL)) {
1323 		xfrm_policy_delete(pol, XFRM_POLICY_MAX+1);
1324 		sk->sk_policy[1] = NULL;
1325 	}
1326 }
1327 
1328 #else
1329 
xfrm_sk_free_policy(struct sock * sk)1330 static inline void xfrm_sk_free_policy(struct sock *sk) {}
xfrm_sk_clone_policy(struct sock * sk,const struct sock * osk)1331 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; }
xfrm6_route_forward(struct sk_buff * skb)1332 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
xfrm4_route_forward(struct sk_buff * skb)1333 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
xfrm6_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1334 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1335 {
1336 	return 1;
1337 }
xfrm4_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1338 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1339 {
1340 	return 1;
1341 }
xfrm_policy_check(struct sock * sk,int dir,struct sk_buff * skb,unsigned short family)1342 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1343 {
1344 	return 1;
1345 }
xfrm_decode_session_reverse(struct net * net,struct sk_buff * skb,struct flowi * fl,unsigned int family)1346 static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb,
1347 					      struct flowi *fl,
1348 					      unsigned int family)
1349 {
1350 	return -ENOSYS;
1351 }
xfrm4_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1352 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1353 					     struct sk_buff *skb)
1354 {
1355 	return 1;
1356 }
xfrm6_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1357 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1358 					     struct sk_buff *skb)
1359 {
1360 	return 1;
1361 }
1362 #endif
1363 
1364 static __inline__
xfrm_flowi_daddr(const struct flowi * fl,unsigned short family)1365 xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family)
1366 {
1367 	switch (family){
1368 	case AF_INET:
1369 		return (xfrm_address_t *)&fl->u.ip4.daddr;
1370 	case AF_INET6:
1371 		return (xfrm_address_t *)&fl->u.ip6.daddr;
1372 	}
1373 	return NULL;
1374 }
1375 
1376 static __inline__
xfrm_flowi_saddr(const struct flowi * fl,unsigned short family)1377 xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family)
1378 {
1379 	switch (family){
1380 	case AF_INET:
1381 		return (xfrm_address_t *)&fl->u.ip4.saddr;
1382 	case AF_INET6:
1383 		return (xfrm_address_t *)&fl->u.ip6.saddr;
1384 	}
1385 	return NULL;
1386 }
1387 
1388 static __inline__
xfrm_flowi_addr_get(const struct flowi * fl,xfrm_address_t * saddr,xfrm_address_t * daddr,unsigned short family)1389 void xfrm_flowi_addr_get(const struct flowi *fl,
1390 			 xfrm_address_t *saddr, xfrm_address_t *daddr,
1391 			 unsigned short family)
1392 {
1393 	switch(family) {
1394 	case AF_INET:
1395 		memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4));
1396 		memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
1397 		break;
1398 	case AF_INET6:
1399 		saddr->in6 = fl->u.ip6.saddr;
1400 		daddr->in6 = fl->u.ip6.daddr;
1401 		break;
1402 	}
1403 }
1404 
1405 static __inline__ int
__xfrm4_state_addr_check(const struct xfrm_state * x,const xfrm_address_t * daddr,const xfrm_address_t * saddr)1406 __xfrm4_state_addr_check(const struct xfrm_state *x,
1407 			 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1408 {
1409 	if (daddr->a4 == x->id.daddr.a4 &&
1410 	    (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
1411 		return 1;
1412 	return 0;
1413 }
1414 
1415 static __inline__ int
__xfrm6_state_addr_check(const struct xfrm_state * x,const xfrm_address_t * daddr,const xfrm_address_t * saddr)1416 __xfrm6_state_addr_check(const struct xfrm_state *x,
1417 			 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1418 {
1419 	if (ipv6_addr_equal((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
1420 	    (ipv6_addr_equal((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr) ||
1421 	     ipv6_addr_any((struct in6_addr *)saddr) ||
1422 	     ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
1423 		return 1;
1424 	return 0;
1425 }
1426 
1427 static __inline__ int
xfrm_state_addr_check(const struct xfrm_state * x,const xfrm_address_t * daddr,const xfrm_address_t * saddr,unsigned short family)1428 xfrm_state_addr_check(const struct xfrm_state *x,
1429 		      const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1430 		      unsigned short family)
1431 {
1432 	switch (family) {
1433 	case AF_INET:
1434 		return __xfrm4_state_addr_check(x, daddr, saddr);
1435 	case AF_INET6:
1436 		return __xfrm6_state_addr_check(x, daddr, saddr);
1437 	}
1438 	return 0;
1439 }
1440 
1441 static __inline__ int
xfrm_state_addr_flow_check(const struct xfrm_state * x,const struct flowi * fl,unsigned short family)1442 xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl,
1443 			   unsigned short family)
1444 {
1445 	switch (family) {
1446 	case AF_INET:
1447 		return __xfrm4_state_addr_check(x,
1448 						(const xfrm_address_t *)&fl->u.ip4.daddr,
1449 						(const xfrm_address_t *)&fl->u.ip4.saddr);
1450 	case AF_INET6:
1451 		return __xfrm6_state_addr_check(x,
1452 						(const xfrm_address_t *)&fl->u.ip6.daddr,
1453 						(const xfrm_address_t *)&fl->u.ip6.saddr);
1454 	}
1455 	return 0;
1456 }
1457 
xfrm_state_kern(const struct xfrm_state * x)1458 static inline int xfrm_state_kern(const struct xfrm_state *x)
1459 {
1460 	return atomic_read(&x->tunnel_users);
1461 }
1462 
xfrm_id_proto_valid(u8 proto)1463 static inline bool xfrm_id_proto_valid(u8 proto)
1464 {
1465 	switch (proto) {
1466 	case IPPROTO_AH:
1467 	case IPPROTO_ESP:
1468 	case IPPROTO_COMP:
1469 #if IS_ENABLED(CONFIG_IPV6)
1470 	case IPPROTO_ROUTING:
1471 	case IPPROTO_DSTOPTS:
1472 #endif
1473 		return true;
1474 	default:
1475 		return false;
1476 	}
1477 }
1478 
1479 /* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
xfrm_id_proto_match(u8 proto,u8 userproto)1480 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
1481 {
1482 	return (!userproto || proto == userproto ||
1483 		(userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
1484 						  proto == IPPROTO_ESP ||
1485 						  proto == IPPROTO_COMP)));
1486 }
1487 
1488 /*
1489  * xfrm algorithm information
1490  */
1491 struct xfrm_algo_aead_info {
1492 	char *geniv;
1493 	u16 icv_truncbits;
1494 };
1495 
1496 struct xfrm_algo_auth_info {
1497 	u16 icv_truncbits;
1498 	u16 icv_fullbits;
1499 };
1500 
1501 struct xfrm_algo_encr_info {
1502 	char *geniv;
1503 	u16 blockbits;
1504 	u16 defkeybits;
1505 };
1506 
1507 struct xfrm_algo_comp_info {
1508 	u16 threshold;
1509 };
1510 
1511 struct xfrm_algo_desc {
1512 	char *name;
1513 	char *compat;
1514 	u8 available:1;
1515 	u8 pfkey_supported:1;
1516 	union {
1517 		struct xfrm_algo_aead_info aead;
1518 		struct xfrm_algo_auth_info auth;
1519 		struct xfrm_algo_encr_info encr;
1520 		struct xfrm_algo_comp_info comp;
1521 	} uinfo;
1522 	struct sadb_alg desc;
1523 };
1524 
1525 /* XFRM protocol handlers.  */
1526 struct xfrm4_protocol {
1527 	int (*handler)(struct sk_buff *skb);
1528 	int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
1529 			     int encap_type);
1530 	int (*cb_handler)(struct sk_buff *skb, int err);
1531 	int (*err_handler)(struct sk_buff *skb, u32 info);
1532 
1533 	struct xfrm4_protocol __rcu *next;
1534 	int priority;
1535 };
1536 
1537 struct xfrm6_protocol {
1538 	int (*handler)(struct sk_buff *skb);
1539 	int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
1540 			     int encap_type);
1541 	int (*cb_handler)(struct sk_buff *skb, int err);
1542 	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1543 			   u8 type, u8 code, int offset, __be32 info);
1544 
1545 	struct xfrm6_protocol __rcu *next;
1546 	int priority;
1547 };
1548 
1549 /* XFRM tunnel handlers.  */
1550 struct xfrm_tunnel {
1551 	int (*handler)(struct sk_buff *skb);
1552 	int (*cb_handler)(struct sk_buff *skb, int err);
1553 	int (*err_handler)(struct sk_buff *skb, u32 info);
1554 
1555 	struct xfrm_tunnel __rcu *next;
1556 	int priority;
1557 };
1558 
1559 struct xfrm6_tunnel {
1560 	int (*handler)(struct sk_buff *skb);
1561 	int (*cb_handler)(struct sk_buff *skb, int err);
1562 	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1563 			   u8 type, u8 code, int offset, __be32 info);
1564 	struct xfrm6_tunnel __rcu *next;
1565 	int priority;
1566 };
1567 
1568 void xfrm_init(void);
1569 void xfrm4_init(void);
1570 int xfrm_state_init(struct net *net);
1571 void xfrm_state_fini(struct net *net);
1572 void xfrm4_state_init(void);
1573 void xfrm4_protocol_init(void);
1574 #ifdef CONFIG_XFRM
1575 int xfrm6_init(void);
1576 void xfrm6_fini(void);
1577 int xfrm6_state_init(void);
1578 void xfrm6_state_fini(void);
1579 int xfrm6_protocol_init(void);
1580 void xfrm6_protocol_fini(void);
1581 #else
xfrm6_init(void)1582 static inline int xfrm6_init(void)
1583 {
1584 	return 0;
1585 }
xfrm6_fini(void)1586 static inline void xfrm6_fini(void)
1587 {
1588 	;
1589 }
1590 #endif
1591 
1592 #ifdef CONFIG_XFRM_STATISTICS
1593 int xfrm_proc_init(struct net *net);
1594 void xfrm_proc_fini(struct net *net);
1595 #endif
1596 
1597 int xfrm_sysctl_init(struct net *net);
1598 #ifdef CONFIG_SYSCTL
1599 void xfrm_sysctl_fini(struct net *net);
1600 #else
xfrm_sysctl_fini(struct net * net)1601 static inline void xfrm_sysctl_fini(struct net *net)
1602 {
1603 }
1604 #endif
1605 
1606 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
1607 			  struct xfrm_address_filter *filter);
1608 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1609 		    int (*func)(struct xfrm_state *, int, void*), void *);
1610 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
1611 struct xfrm_state *xfrm_state_alloc(struct net *net);
1612 void xfrm_state_free(struct xfrm_state *x);
1613 struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
1614 				   const xfrm_address_t *saddr,
1615 				   const struct flowi *fl,
1616 				   struct xfrm_tmpl *tmpl,
1617 				   struct xfrm_policy *pol, int *err,
1618 				   unsigned short family, u32 if_id);
1619 struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
1620 				       xfrm_address_t *daddr,
1621 				       xfrm_address_t *saddr,
1622 				       unsigned short family,
1623 				       u8 mode, u8 proto, u32 reqid);
1624 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1625 					      unsigned short family);
1626 int xfrm_state_check_expire(struct xfrm_state *x);
1627 void xfrm_state_update_stats(struct net *net);
1628 #ifdef CONFIG_XFRM_OFFLOAD
xfrm_dev_state_update_stats(struct xfrm_state * x)1629 static inline void xfrm_dev_state_update_stats(struct xfrm_state *x)
1630 {
1631 	struct xfrm_dev_offload *xdo = &x->xso;
1632 	struct net_device *dev = READ_ONCE(xdo->dev);
1633 
1634 	if (dev && dev->xfrmdev_ops &&
1635 	    dev->xfrmdev_ops->xdo_dev_state_update_stats)
1636 		dev->xfrmdev_ops->xdo_dev_state_update_stats(x);
1637 
1638 }
1639 #else
xfrm_dev_state_update_stats(struct xfrm_state * x)1640 static inline void xfrm_dev_state_update_stats(struct xfrm_state *x) {}
1641 #endif
1642 void xfrm_state_insert(struct xfrm_state *x);
1643 int xfrm_state_add(struct xfrm_state *x);
1644 int xfrm_state_update(struct xfrm_state *x);
1645 struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
1646 				     const xfrm_address_t *daddr, __be32 spi,
1647 				     u8 proto, unsigned short family);
1648 struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1649 					    const xfrm_address_t *daddr,
1650 					    const xfrm_address_t *saddr,
1651 					    u8 proto,
1652 					    unsigned short family);
1653 #ifdef CONFIG_XFRM_SUB_POLICY
1654 void xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1655 		    unsigned short family);
1656 void xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1657 		     unsigned short family);
1658 #else
xfrm_tmpl_sort(struct xfrm_tmpl ** d,struct xfrm_tmpl ** s,int n,unsigned short family)1659 static inline void xfrm_tmpl_sort(struct xfrm_tmpl **d, struct xfrm_tmpl **s,
1660 				  int n, unsigned short family)
1661 {
1662 }
1663 
xfrm_state_sort(struct xfrm_state ** d,struct xfrm_state ** s,int n,unsigned short family)1664 static inline void xfrm_state_sort(struct xfrm_state **d, struct xfrm_state **s,
1665 				   int n, unsigned short family)
1666 {
1667 }
1668 #endif
1669 
1670 struct xfrmk_sadinfo {
1671 	u32 sadhcnt; /* current hash bkts */
1672 	u32 sadhmcnt; /* max allowed hash bkts */
1673 	u32 sadcnt; /* current running count */
1674 };
1675 
1676 struct xfrmk_spdinfo {
1677 	u32 incnt;
1678 	u32 outcnt;
1679 	u32 fwdcnt;
1680 	u32 inscnt;
1681 	u32 outscnt;
1682 	u32 fwdscnt;
1683 	u32 spdhcnt;
1684 	u32 spdhmcnt;
1685 };
1686 
1687 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1688 int xfrm_state_delete(struct xfrm_state *x);
1689 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
1690 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
1691 int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
1692 			  bool task_valid);
1693 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
1694 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
1695 u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
1696 int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack);
1697 u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
1698 int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload,
1699 		      struct netlink_ext_ack *extack);
1700 int xfrm_init_state(struct xfrm_state *x);
1701 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
1702 int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1703 int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
1704 			 int (*finish)(struct net *, struct sock *,
1705 				       struct sk_buff *));
1706 int xfrm_trans_queue(struct sk_buff *skb,
1707 		     int (*finish)(struct net *, struct sock *,
1708 				   struct sk_buff *));
1709 int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err);
1710 int xfrm_output(struct sock *sk, struct sk_buff *skb);
1711 
1712 #if IS_ENABLED(CONFIG_NET_PKTGEN)
1713 int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb);
1714 #endif
1715 
1716 void xfrm_local_error(struct sk_buff *skb, int mtu);
1717 int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1718 		    int encap_type);
1719 int xfrm4_transport_finish(struct sk_buff *skb, int async);
1720 int xfrm4_rcv(struct sk_buff *skb);
1721 
xfrm4_rcv_spi(struct sk_buff * skb,int nexthdr,__be32 spi)1722 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1723 {
1724 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
1725 	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
1726 	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
1727 	return xfrm_input(skb, nexthdr, spi, 0);
1728 }
1729 
1730 int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1731 int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
1732 int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
1733 int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
1734 int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1735 void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
1736 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
1737 		  struct ip6_tnl *t);
1738 int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1739 		    int encap_type);
1740 int xfrm6_transport_finish(struct sk_buff *skb, int async);
1741 int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t);
1742 int xfrm6_rcv(struct sk_buff *skb);
1743 int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1744 		     xfrm_address_t *saddr, u8 proto);
1745 void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
1746 int xfrm6_protocol_register(struct xfrm6_protocol *handler, unsigned char protocol);
1747 int xfrm6_protocol_deregister(struct xfrm6_protocol *handler, unsigned char protocol);
1748 int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
1749 int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
1750 __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
1751 __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
1752 int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1753 
1754 #ifdef CONFIG_XFRM
1755 void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu);
1756 int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1757 int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1758 struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
1759 					struct sk_buff *skb);
1760 struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
1761 					struct sk_buff *skb);
1762 int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval,
1763 		     int optlen);
1764 #else
xfrm_user_policy(struct sock * sk,int optname,sockptr_t optval,int optlen)1765 static inline int xfrm_user_policy(struct sock *sk, int optname,
1766 				   sockptr_t optval, int optlen)
1767 {
1768  	return -ENOPROTOOPT;
1769 }
1770 #endif
1771 
1772 struct dst_entry *__xfrm_dst_lookup(int family, const struct xfrm_dst_lookup_params *params);
1773 
1774 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
1775 
1776 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
1777 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1778 		     int (*func)(struct xfrm_policy *, int, int, void*),
1779 		     void *);
1780 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
1781 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1782 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net,
1783 					  const struct xfrm_mark *mark,
1784 					  u32 if_id, u8 type, int dir,
1785 					  struct xfrm_selector *sel,
1786 					  struct xfrm_sec_ctx *ctx, int delete,
1787 					  int *err);
1788 struct xfrm_policy *xfrm_policy_byid(struct net *net,
1789 				     const struct xfrm_mark *mark, u32 if_id,
1790 				     u8 type, int dir, u32 id, int delete,
1791 				     int *err);
1792 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
1793 void xfrm_policy_hash_rebuild(struct net *net);
1794 u32 xfrm_get_acqseq(void);
1795 int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack);
1796 int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi,
1797 		   struct netlink_ext_ack *extack);
1798 struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
1799 				 u8 mode, u32 reqid, u32 if_id, u8 proto,
1800 				 const xfrm_address_t *daddr,
1801 				 const xfrm_address_t *saddr, int create,
1802 				 unsigned short family);
1803 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
1804 
1805 #ifdef CONFIG_XFRM_MIGRATE
1806 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1807 	       const struct xfrm_migrate *m, int num_bundles,
1808 	       const struct xfrm_kmaddress *k,
1809 	       const struct xfrm_encap_tmpl *encap);
1810 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
1811 						u32 if_id);
1812 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
1813 				      struct xfrm_migrate *m,
1814 				      struct xfrm_encap_tmpl *encap);
1815 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1816 		 struct xfrm_migrate *m, int num_bundles,
1817 		 struct xfrm_kmaddress *k, struct net *net,
1818 		 struct xfrm_encap_tmpl *encap, u32 if_id,
1819 		 struct netlink_ext_ack *extack);
1820 #endif
1821 
1822 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
1823 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
1824 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel,
1825 	      xfrm_address_t *addr);
1826 
1827 void xfrm_input_init(void);
1828 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
1829 
1830 void xfrm_probe_algs(void);
1831 int xfrm_count_pfkey_auth_supported(void);
1832 int xfrm_count_pfkey_enc_supported(void);
1833 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
1834 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
1835 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
1836 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
1837 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
1838 struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
1839 struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
1840 struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
1841 struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
1842 					    int probe);
1843 
xfrm6_addr_equal(const xfrm_address_t * a,const xfrm_address_t * b)1844 static inline bool xfrm6_addr_equal(const xfrm_address_t *a,
1845 				    const xfrm_address_t *b)
1846 {
1847 	return ipv6_addr_equal((const struct in6_addr *)a,
1848 			       (const struct in6_addr *)b);
1849 }
1850 
xfrm_addr_equal(const xfrm_address_t * a,const xfrm_address_t * b,sa_family_t family)1851 static inline bool xfrm_addr_equal(const xfrm_address_t *a,
1852 				   const xfrm_address_t *b,
1853 				   sa_family_t family)
1854 {
1855 	switch (family) {
1856 	default:
1857 	case AF_INET:
1858 		return ((__force u32)a->a4 ^ (__force u32)b->a4) == 0;
1859 	case AF_INET6:
1860 		return xfrm6_addr_equal(a, b);
1861 	}
1862 }
1863 
xfrm_policy_id2dir(u32 index)1864 static inline int xfrm_policy_id2dir(u32 index)
1865 {
1866 	return index & 7;
1867 }
1868 
1869 #ifdef CONFIG_XFRM
1870 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq);
1871 int xfrm_replay_check(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
1872 void xfrm_replay_notify(struct xfrm_state *x, int event);
1873 int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb);
1874 int xfrm_replay_recheck(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
1875 
xfrm_aevent_is_on(struct net * net)1876 static inline int xfrm_aevent_is_on(struct net *net)
1877 {
1878 	struct sock *nlsk;
1879 	int ret = 0;
1880 
1881 	rcu_read_lock();
1882 	nlsk = rcu_dereference(net->xfrm.nlsk);
1883 	if (nlsk)
1884 		ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1885 	rcu_read_unlock();
1886 	return ret;
1887 }
1888 
xfrm_acquire_is_on(struct net * net)1889 static inline int xfrm_acquire_is_on(struct net *net)
1890 {
1891 	struct sock *nlsk;
1892 	int ret = 0;
1893 
1894 	rcu_read_lock();
1895 	nlsk = rcu_dereference(net->xfrm.nlsk);
1896 	if (nlsk)
1897 		ret = netlink_has_listeners(nlsk, XFRMNLGRP_ACQUIRE);
1898 	rcu_read_unlock();
1899 
1900 	return ret;
1901 }
1902 #endif
1903 
aead_len(struct xfrm_algo_aead * alg)1904 static inline unsigned int aead_len(struct xfrm_algo_aead *alg)
1905 {
1906 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1907 }
1908 
xfrm_alg_len(const struct xfrm_algo * alg)1909 static inline unsigned int xfrm_alg_len(const struct xfrm_algo *alg)
1910 {
1911 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1912 }
1913 
xfrm_alg_auth_len(const struct xfrm_algo_auth * alg)1914 static inline unsigned int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg)
1915 {
1916 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1917 }
1918 
xfrm_replay_state_esn_len(struct xfrm_replay_state_esn * replay_esn)1919 static inline unsigned int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay_esn)
1920 {
1921 	return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32);
1922 }
1923 
1924 #ifdef CONFIG_XFRM_MIGRATE
xfrm_replay_clone(struct xfrm_state * x,struct xfrm_state * orig)1925 static inline int xfrm_replay_clone(struct xfrm_state *x,
1926 				     struct xfrm_state *orig)
1927 {
1928 
1929 	x->replay_esn = kmemdup(orig->replay_esn,
1930 				xfrm_replay_state_esn_len(orig->replay_esn),
1931 				GFP_KERNEL);
1932 	if (!x->replay_esn)
1933 		return -ENOMEM;
1934 	x->preplay_esn = kmemdup(orig->preplay_esn,
1935 				 xfrm_replay_state_esn_len(orig->preplay_esn),
1936 				 GFP_KERNEL);
1937 	if (!x->preplay_esn)
1938 		return -ENOMEM;
1939 
1940 	return 0;
1941 }
1942 
xfrm_algo_aead_clone(struct xfrm_algo_aead * orig)1943 static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig)
1944 {
1945 	return kmemdup(orig, aead_len(orig), GFP_KERNEL);
1946 }
1947 
1948 
xfrm_algo_clone(struct xfrm_algo * orig)1949 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
1950 {
1951 	return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
1952 }
1953 
xfrm_algo_auth_clone(struct xfrm_algo_auth * orig)1954 static inline struct xfrm_algo_auth *xfrm_algo_auth_clone(struct xfrm_algo_auth *orig)
1955 {
1956 	return kmemdup(orig, xfrm_alg_auth_len(orig), GFP_KERNEL);
1957 }
1958 
xfrm_states_put(struct xfrm_state ** states,int n)1959 static inline void xfrm_states_put(struct xfrm_state **states, int n)
1960 {
1961 	int i;
1962 	for (i = 0; i < n; i++)
1963 		xfrm_state_put(*(states + i));
1964 }
1965 
xfrm_states_delete(struct xfrm_state ** states,int n)1966 static inline void xfrm_states_delete(struct xfrm_state **states, int n)
1967 {
1968 	int i;
1969 	for (i = 0; i < n; i++)
1970 		xfrm_state_delete(*(states + i));
1971 }
1972 #endif
1973 
1974 void __init xfrm_dev_init(void);
1975 
1976 #ifdef CONFIG_XFRM_OFFLOAD
1977 void xfrm_dev_resume(struct sk_buff *skb);
1978 void xfrm_dev_backlog(struct softnet_data *sd);
1979 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
1980 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
1981 		       struct xfrm_user_offload *xuo,
1982 		       struct netlink_ext_ack *extack);
1983 int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
1984 			struct xfrm_user_offload *xuo, u8 dir,
1985 			struct netlink_ext_ack *extack);
1986 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
1987 void xfrm_dev_state_delete(struct xfrm_state *x);
1988 void xfrm_dev_state_free(struct xfrm_state *x);
1989 
xfrm_dev_state_advance_esn(struct xfrm_state * x)1990 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
1991 {
1992 	struct xfrm_dev_offload *xso = &x->xso;
1993 	struct net_device *dev = READ_ONCE(xso->dev);
1994 
1995 	if (dev && dev->xfrmdev_ops->xdo_dev_state_advance_esn)
1996 		dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
1997 }
1998 
xfrm_dst_offload_ok(struct dst_entry * dst)1999 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
2000 {
2001 	struct xfrm_state *x = dst->xfrm;
2002 	struct xfrm_dst *xdst;
2003 
2004 	if (!x || !x->type_offload)
2005 		return false;
2006 
2007 	xdst = (struct xfrm_dst *) dst;
2008 	if (!x->xso.offload_handle && !xdst->child->xfrm)
2009 		return true;
2010 	if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) &&
2011 	    !xdst->child->xfrm)
2012 		return true;
2013 
2014 	return false;
2015 }
2016 
xfrm_dev_policy_delete(struct xfrm_policy * x)2017 static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
2018 {
2019 	struct xfrm_dev_offload *xdo = &x->xdo;
2020 	struct net_device *dev = xdo->dev;
2021 
2022 	if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_policy_delete)
2023 		dev->xfrmdev_ops->xdo_dev_policy_delete(x);
2024 }
2025 
xfrm_dev_policy_free(struct xfrm_policy * x)2026 static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
2027 {
2028 	struct xfrm_dev_offload *xdo = &x->xdo;
2029 	struct net_device *dev = xdo->dev;
2030 
2031 	if (dev && dev->xfrmdev_ops) {
2032 		if (dev->xfrmdev_ops->xdo_dev_policy_free)
2033 			dev->xfrmdev_ops->xdo_dev_policy_free(x);
2034 		xdo->dev = NULL;
2035 		netdev_put(dev, &xdo->dev_tracker);
2036 	}
2037 }
2038 #else
xfrm_dev_resume(struct sk_buff * skb)2039 static inline void xfrm_dev_resume(struct sk_buff *skb)
2040 {
2041 }
2042 
xfrm_dev_backlog(struct softnet_data * sd)2043 static inline void xfrm_dev_backlog(struct softnet_data *sd)
2044 {
2045 }
2046 
validate_xmit_xfrm(struct sk_buff * skb,netdev_features_t features,bool * again)2047 static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
2048 {
2049 	return skb;
2050 }
2051 
xfrm_dev_state_add(struct net * net,struct xfrm_state * x,struct xfrm_user_offload * xuo,struct netlink_ext_ack * extack)2052 static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo, struct netlink_ext_ack *extack)
2053 {
2054 	return 0;
2055 }
2056 
xfrm_dev_state_delete(struct xfrm_state * x)2057 static inline void xfrm_dev_state_delete(struct xfrm_state *x)
2058 {
2059 }
2060 
xfrm_dev_state_free(struct xfrm_state * x)2061 static inline void xfrm_dev_state_free(struct xfrm_state *x)
2062 {
2063 }
2064 
xfrm_dev_policy_add(struct net * net,struct xfrm_policy * xp,struct xfrm_user_offload * xuo,u8 dir,struct netlink_ext_ack * extack)2065 static inline int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
2066 				      struct xfrm_user_offload *xuo, u8 dir,
2067 				      struct netlink_ext_ack *extack)
2068 {
2069 	return 0;
2070 }
2071 
xfrm_dev_policy_delete(struct xfrm_policy * x)2072 static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
2073 {
2074 }
2075 
xfrm_dev_policy_free(struct xfrm_policy * x)2076 static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
2077 {
2078 }
2079 
xfrm_dev_offload_ok(struct sk_buff * skb,struct xfrm_state * x)2080 static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
2081 {
2082 	return false;
2083 }
2084 
xfrm_dev_state_advance_esn(struct xfrm_state * x)2085 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
2086 {
2087 }
2088 
xfrm_dst_offload_ok(struct dst_entry * dst)2089 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
2090 {
2091 	return false;
2092 }
2093 #endif
2094 
xfrm_mark_get(struct nlattr ** attrs,struct xfrm_mark * m)2095 static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
2096 {
2097 	if (attrs[XFRMA_MARK])
2098 		memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark));
2099 	else
2100 		m->v = m->m = 0;
2101 
2102 	return m->v & m->m;
2103 }
2104 
xfrm_mark_put(struct sk_buff * skb,const struct xfrm_mark * m)2105 static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
2106 {
2107 	int ret = 0;
2108 
2109 	if (m->m | m->v)
2110 		ret = nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
2111 	return ret;
2112 }
2113 
xfrm_smark_get(__u32 mark,struct xfrm_state * x)2114 static inline __u32 xfrm_smark_get(__u32 mark, struct xfrm_state *x)
2115 {
2116 	struct xfrm_mark *m = &x->props.smark;
2117 
2118 	return (m->v & m->m) | (mark & ~m->m);
2119 }
2120 
xfrm_if_id_put(struct sk_buff * skb,__u32 if_id)2121 static inline int xfrm_if_id_put(struct sk_buff *skb, __u32 if_id)
2122 {
2123 	int ret = 0;
2124 
2125 	if (if_id)
2126 		ret = nla_put_u32(skb, XFRMA_IF_ID, if_id);
2127 	return ret;
2128 }
2129 
xfrm_tunnel_check(struct sk_buff * skb,struct xfrm_state * x,unsigned int family)2130 static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
2131 				    unsigned int family)
2132 {
2133 	bool tunnel = false;
2134 
2135 	switch(family) {
2136 	case AF_INET:
2137 		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
2138 			tunnel = true;
2139 		break;
2140 	case AF_INET6:
2141 		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
2142 			tunnel = true;
2143 		break;
2144 	}
2145 	if (tunnel && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL))
2146 		return -EINVAL;
2147 
2148 	return 0;
2149 }
2150 
2151 extern const int xfrm_msg_min[XFRM_NR_MSGTYPES];
2152 extern const struct nla_policy xfrma_policy[XFRMA_MAX+1];
2153 
2154 struct xfrm_translator {
2155 	/* Allocate frag_list and put compat translation there */
2156 	int (*alloc_compat)(struct sk_buff *skb, const struct nlmsghdr *src);
2157 
2158 	/* Allocate nlmsg with 64-bit translaton of received 32-bit message */
2159 	struct nlmsghdr *(*rcv_msg_compat)(const struct nlmsghdr *nlh,
2160 			int maxtype, const struct nla_policy *policy,
2161 			struct netlink_ext_ack *extack);
2162 
2163 	/* Translate 32-bit user_policy from sockptr */
2164 	int (*xlate_user_policy_sockptr)(u8 **pdata32, int optlen);
2165 
2166 	struct module *owner;
2167 };
2168 
2169 #if IS_ENABLED(CONFIG_XFRM_USER_COMPAT)
2170 extern int xfrm_register_translator(struct xfrm_translator *xtr);
2171 extern int xfrm_unregister_translator(struct xfrm_translator *xtr);
2172 extern struct xfrm_translator *xfrm_get_translator(void);
2173 extern void xfrm_put_translator(struct xfrm_translator *xtr);
2174 #else
xfrm_get_translator(void)2175 static inline struct xfrm_translator *xfrm_get_translator(void)
2176 {
2177 	return NULL;
2178 }
xfrm_put_translator(struct xfrm_translator * xtr)2179 static inline void xfrm_put_translator(struct xfrm_translator *xtr)
2180 {
2181 }
2182 #endif
2183 
2184 #if IS_ENABLED(CONFIG_IPV6)
xfrm6_local_dontfrag(const struct sock * sk)2185 static inline bool xfrm6_local_dontfrag(const struct sock *sk)
2186 {
2187 	int proto;
2188 
2189 	if (!sk || sk->sk_family != AF_INET6)
2190 		return false;
2191 
2192 	proto = sk->sk_protocol;
2193 	if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
2194 		return inet6_test_bit(DONTFRAG, sk);
2195 
2196 	return false;
2197 }
2198 #endif
2199 
2200 #if (IS_BUILTIN(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
2201     (IS_MODULE(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
2202 
2203 extern struct metadata_dst __percpu *xfrm_bpf_md_dst;
2204 
2205 int register_xfrm_interface_bpf(void);
2206 
2207 #else
2208 
register_xfrm_interface_bpf(void)2209 static inline int register_xfrm_interface_bpf(void)
2210 {
2211 	return 0;
2212 }
2213 
2214 #endif
2215 
2216 #if IS_ENABLED(CONFIG_DEBUG_INFO_BTF)
2217 int register_xfrm_state_bpf(void);
2218 #else
register_xfrm_state_bpf(void)2219 static inline int register_xfrm_state_bpf(void)
2220 {
2221 	return 0;
2222 }
2223 #endif
2224 
2225 int xfrm_nat_keepalive_init(unsigned short family);
2226 void xfrm_nat_keepalive_fini(unsigned short family);
2227 int xfrm_nat_keepalive_net_init(struct net *net);
2228 int xfrm_nat_keepalive_net_fini(struct net *net);
2229 void xfrm_nat_keepalive_state_updated(struct xfrm_state *x);
2230 
2231 #endif	/* _NET_XFRM_H */
2232