xref: /linux/include/net/netfilter/nf_flow_table.h (revision 87b3593b)
1 #ifndef _NF_FLOW_TABLE_H
2 #define _NF_FLOW_TABLE_H
3 
4 #include <linux/in.h>
5 #include <linux/in6.h>
6 #include <linux/netdevice.h>
7 #include <linux/rhashtable-types.h>
8 #include <linux/rcupdate.h>
9 #include <linux/netfilter.h>
10 #include <linux/netfilter/nf_conntrack_tuple_common.h>
11 #include <net/flow_offload.h>
12 #include <net/dst.h>
13 #include <linux/if_pppox.h>
14 #include <linux/ppp_defs.h>
15 
16 struct nf_flowtable;
17 struct nf_flow_rule;
18 struct flow_offload;
19 enum flow_offload_tuple_dir;
20 
21 struct nf_flow_key {
22 	struct flow_dissector_key_meta			meta;
23 	struct flow_dissector_key_control		control;
24 	struct flow_dissector_key_control		enc_control;
25 	struct flow_dissector_key_basic			basic;
26 	struct flow_dissector_key_vlan			vlan;
27 	struct flow_dissector_key_vlan			cvlan;
28 	union {
29 		struct flow_dissector_key_ipv4_addrs	ipv4;
30 		struct flow_dissector_key_ipv6_addrs	ipv6;
31 	};
32 	struct flow_dissector_key_keyid			enc_key_id;
33 	union {
34 		struct flow_dissector_key_ipv4_addrs	enc_ipv4;
35 		struct flow_dissector_key_ipv6_addrs	enc_ipv6;
36 	};
37 	struct flow_dissector_key_tcp			tcp;
38 	struct flow_dissector_key_ports			tp;
39 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
40 
41 struct nf_flow_match {
42 	struct flow_dissector	dissector;
43 	struct nf_flow_key	key;
44 	struct nf_flow_key	mask;
45 };
46 
47 struct nf_flow_rule {
48 	struct nf_flow_match	match;
49 	struct flow_rule	*rule;
50 };
51 
52 struct nf_flowtable_type {
53 	struct list_head		list;
54 	int				family;
55 	int				(*init)(struct nf_flowtable *ft);
56 	bool				(*gc)(const struct flow_offload *flow);
57 	int				(*setup)(struct nf_flowtable *ft,
58 						 struct net_device *dev,
59 						 enum flow_block_command cmd);
60 	int				(*action)(struct net *net,
61 						  struct flow_offload *flow,
62 						  enum flow_offload_tuple_dir dir,
63 						  struct nf_flow_rule *flow_rule);
64 	void				(*free)(struct nf_flowtable *ft);
65 	void				(*get)(struct nf_flowtable *ft);
66 	void				(*put)(struct nf_flowtable *ft);
67 	nf_hookfn			*hook;
68 	struct module			*owner;
69 };
70 
71 enum nf_flowtable_flags {
72 	NF_FLOWTABLE_HW_OFFLOAD		= 0x1,	/* NFT_FLOWTABLE_HW_OFFLOAD */
73 	NF_FLOWTABLE_COUNTER		= 0x2,	/* NFT_FLOWTABLE_COUNTER */
74 };
75 
76 struct nf_flowtable {
77 	unsigned int			flags;		/* readonly in datapath */
78 	int				priority;	/* control path (padding hole) */
79 	struct rhashtable		rhashtable;	/* datapath, read-mostly members come first */
80 
81 	struct list_head		list;		/* slowpath parts */
82 	const struct nf_flowtable_type	*type;
83 	struct delayed_work		gc_work;
84 	struct flow_block		flow_block;
85 	struct rw_semaphore		flow_block_lock; /* Guards flow_block */
86 	possible_net_t			net;
87 };
88 
nf_flowtable_hw_offload(struct nf_flowtable * flowtable)89 static inline bool nf_flowtable_hw_offload(struct nf_flowtable *flowtable)
90 {
91 	return flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD;
92 }
93 
94 enum flow_offload_tuple_dir {
95 	FLOW_OFFLOAD_DIR_ORIGINAL = IP_CT_DIR_ORIGINAL,
96 	FLOW_OFFLOAD_DIR_REPLY = IP_CT_DIR_REPLY,
97 };
98 #define FLOW_OFFLOAD_DIR_MAX	IP_CT_DIR_MAX
99 
100 enum flow_offload_xmit_type {
101 	FLOW_OFFLOAD_XMIT_UNSPEC	= 0,
102 	FLOW_OFFLOAD_XMIT_NEIGH,
103 	FLOW_OFFLOAD_XMIT_XFRM,
104 	FLOW_OFFLOAD_XMIT_DIRECT,
105 	FLOW_OFFLOAD_XMIT_TC,
106 };
107 
108 #define NF_FLOW_TABLE_ENCAP_MAX		2
109 
110 struct flow_offload_tuple {
111 	union {
112 		struct in_addr		src_v4;
113 		struct in6_addr		src_v6;
114 	};
115 	union {
116 		struct in_addr		dst_v4;
117 		struct in6_addr		dst_v6;
118 	};
119 	struct {
120 		__be16			src_port;
121 		__be16			dst_port;
122 	};
123 
124 	int				iifidx;
125 
126 	u8				l3proto;
127 	u8				l4proto;
128 	struct {
129 		u16			id;
130 		__be16			proto;
131 	} encap[NF_FLOW_TABLE_ENCAP_MAX];
132 
133 	/* All members above are keys for lookups, see flow_offload_hash(). */
134 	struct { }			__hash;
135 
136 	u8				dir:2,
137 					xmit_type:3,
138 					encap_num:2,
139 					in_vlan_ingress:2;
140 	u16				mtu;
141 	union {
142 		struct {
143 			struct dst_entry *dst_cache;
144 			u32		dst_cookie;
145 		};
146 		struct {
147 			u32		ifidx;
148 			u32		hw_ifidx;
149 			u8		h_source[ETH_ALEN];
150 			u8		h_dest[ETH_ALEN];
151 		} out;
152 		struct {
153 			u32		iifidx;
154 		} tc;
155 	};
156 };
157 
158 struct flow_offload_tuple_rhash {
159 	struct rhash_head		node;
160 	struct flow_offload_tuple	tuple;
161 };
162 
163 enum nf_flow_flags {
164 	NF_FLOW_SNAT,
165 	NF_FLOW_DNAT,
166 	NF_FLOW_TEARDOWN,
167 	NF_FLOW_HW,
168 	NF_FLOW_HW_DYING,
169 	NF_FLOW_HW_DEAD,
170 	NF_FLOW_HW_PENDING,
171 	NF_FLOW_HW_BIDIRECTIONAL,
172 	NF_FLOW_HW_ESTABLISHED,
173 };
174 
175 enum flow_offload_type {
176 	NF_FLOW_OFFLOAD_UNSPEC	= 0,
177 	NF_FLOW_OFFLOAD_ROUTE,
178 };
179 
180 struct flow_offload {
181 	struct flow_offload_tuple_rhash		tuplehash[FLOW_OFFLOAD_DIR_MAX];
182 	struct nf_conn				*ct;
183 	unsigned long				flags;
184 	u16					type;
185 	u32					timeout;
186 	struct rcu_head				rcu_head;
187 };
188 
189 #define NF_FLOW_TIMEOUT (30 * HZ)
190 #define nf_flowtable_time_stamp	(u32)jiffies
191 
192 unsigned long flow_offload_get_timeout(struct flow_offload *flow);
193 
nf_flow_timeout_delta(unsigned int timeout)194 static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
195 {
196 	return (__s32)(timeout - nf_flowtable_time_stamp);
197 }
198 
199 struct nf_flow_route {
200 	struct {
201 		struct dst_entry		*dst;
202 		struct {
203 			u32			ifindex;
204 			struct {
205 				u16		id;
206 				__be16		proto;
207 			} encap[NF_FLOW_TABLE_ENCAP_MAX];
208 			u8			num_encaps:2,
209 						ingress_vlans:2;
210 		} in;
211 		struct {
212 			u32			ifindex;
213 			u32			hw_ifindex;
214 			u8			h_source[ETH_ALEN];
215 			u8			h_dest[ETH_ALEN];
216 		} out;
217 		enum flow_offload_xmit_type	xmit_type;
218 	} tuple[FLOW_OFFLOAD_DIR_MAX];
219 };
220 
221 struct flow_offload *flow_offload_alloc(struct nf_conn *ct);
222 void flow_offload_free(struct flow_offload *flow);
223 
224 static inline int
nf_flow_table_offload_add_cb(struct nf_flowtable * flow_table,flow_setup_cb_t * cb,void * cb_priv)225 nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
226 			     flow_setup_cb_t *cb, void *cb_priv)
227 {
228 	struct flow_block *block = &flow_table->flow_block;
229 	struct flow_block_cb *block_cb;
230 	int err = 0;
231 
232 	down_write(&flow_table->flow_block_lock);
233 	block_cb = flow_block_cb_lookup(block, cb, cb_priv);
234 	if (block_cb) {
235 		err = -EEXIST;
236 		goto unlock;
237 	}
238 
239 	block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL);
240 	if (IS_ERR(block_cb)) {
241 		err = PTR_ERR(block_cb);
242 		goto unlock;
243 	}
244 
245 	list_add_tail(&block_cb->list, &block->cb_list);
246 	up_write(&flow_table->flow_block_lock);
247 
248 	if (flow_table->type->get)
249 		flow_table->type->get(flow_table);
250 	return 0;
251 
252 unlock:
253 	up_write(&flow_table->flow_block_lock);
254 	return err;
255 }
256 
257 static inline void
nf_flow_table_offload_del_cb(struct nf_flowtable * flow_table,flow_setup_cb_t * cb,void * cb_priv)258 nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
259 			     flow_setup_cb_t *cb, void *cb_priv)
260 {
261 	struct flow_block *block = &flow_table->flow_block;
262 	struct flow_block_cb *block_cb;
263 
264 	down_write(&flow_table->flow_block_lock);
265 	block_cb = flow_block_cb_lookup(block, cb, cb_priv);
266 	if (block_cb) {
267 		list_del(&block_cb->list);
268 		flow_block_cb_free(block_cb);
269 	} else {
270 		WARN_ON(true);
271 	}
272 	up_write(&flow_table->flow_block_lock);
273 
274 	if (flow_table->type->put)
275 		flow_table->type->put(flow_table);
276 }
277 
278 void flow_offload_route_init(struct flow_offload *flow,
279 			     struct nf_flow_route *route);
280 
281 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
282 void flow_offload_refresh(struct nf_flowtable *flow_table,
283 			  struct flow_offload *flow, bool force);
284 
285 struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
286 						     struct flow_offload_tuple *tuple);
287 void nf_flow_table_gc_run(struct nf_flowtable *flow_table);
288 void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
289 			      struct net_device *dev);
290 void nf_flow_table_cleanup(struct net_device *dev);
291 
292 int nf_flow_table_init(struct nf_flowtable *flow_table);
293 void nf_flow_table_free(struct nf_flowtable *flow_table);
294 
295 void flow_offload_teardown(struct flow_offload *flow);
296 
297 void nf_flow_snat_port(const struct flow_offload *flow,
298 		       struct sk_buff *skb, unsigned int thoff,
299 		       u8 protocol, enum flow_offload_tuple_dir dir);
300 void nf_flow_dnat_port(const struct flow_offload *flow,
301 		       struct sk_buff *skb, unsigned int thoff,
302 		       u8 protocol, enum flow_offload_tuple_dir dir);
303 
304 struct flow_ports {
305 	__be16 source, dest;
306 };
307 
308 unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
309 				     const struct nf_hook_state *state);
310 unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
311 				       const struct nf_hook_state *state);
312 
313 #define MODULE_ALIAS_NF_FLOWTABLE(family)	\
314 	MODULE_ALIAS("nf-flowtable-" __stringify(family))
315 
316 void nf_flow_offload_add(struct nf_flowtable *flowtable,
317 			 struct flow_offload *flow);
318 void nf_flow_offload_del(struct nf_flowtable *flowtable,
319 			 struct flow_offload *flow);
320 void nf_flow_offload_stats(struct nf_flowtable *flowtable,
321 			   struct flow_offload *flow);
322 
323 void nf_flow_table_offload_flush(struct nf_flowtable *flowtable);
324 void nf_flow_table_offload_flush_cleanup(struct nf_flowtable *flowtable);
325 
326 int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
327 				struct net_device *dev,
328 				enum flow_block_command cmd);
329 int nf_flow_rule_route_ipv4(struct net *net, struct flow_offload *flow,
330 			    enum flow_offload_tuple_dir dir,
331 			    struct nf_flow_rule *flow_rule);
332 int nf_flow_rule_route_ipv6(struct net *net, struct flow_offload *flow,
333 			    enum flow_offload_tuple_dir dir,
334 			    struct nf_flow_rule *flow_rule);
335 
336 int nf_flow_table_offload_init(void);
337 void nf_flow_table_offload_exit(void);
338 
__nf_flow_pppoe_proto(const struct sk_buff * skb)339 static inline __be16 __nf_flow_pppoe_proto(const struct sk_buff *skb)
340 {
341 	__be16 proto;
342 
343 	proto = *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
344 			     sizeof(struct pppoe_hdr)));
345 	switch (proto) {
346 	case htons(PPP_IP):
347 		return htons(ETH_P_IP);
348 	case htons(PPP_IPV6):
349 		return htons(ETH_P_IPV6);
350 	}
351 
352 	return 0;
353 }
354 
nf_flow_pppoe_proto(struct sk_buff * skb,__be16 * inner_proto)355 static inline bool nf_flow_pppoe_proto(struct sk_buff *skb, __be16 *inner_proto)
356 {
357 	if (!pskb_may_pull(skb, PPPOE_SES_HLEN))
358 		return false;
359 
360 	*inner_proto = __nf_flow_pppoe_proto(skb);
361 
362 	return true;
363 }
364 
365 #define NF_FLOW_TABLE_STAT_INC(net, count) __this_cpu_inc((net)->ft.stat->count)
366 #define NF_FLOW_TABLE_STAT_DEC(net, count) __this_cpu_dec((net)->ft.stat->count)
367 #define NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count)	\
368 	this_cpu_inc((net)->ft.stat->count)
369 #define NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count)	\
370 	this_cpu_dec((net)->ft.stat->count)
371 
372 #ifdef CONFIG_NF_FLOW_TABLE_PROCFS
373 int nf_flow_table_init_proc(struct net *net);
374 void nf_flow_table_fini_proc(struct net *net);
375 #else
nf_flow_table_init_proc(struct net * net)376 static inline int nf_flow_table_init_proc(struct net *net)
377 {
378 	return 0;
379 }
380 
nf_flow_table_fini_proc(struct net * net)381 static inline void nf_flow_table_fini_proc(struct net *net)
382 {
383 }
384 #endif /* CONFIG_NF_FLOW_TABLE_PROCFS */
385 
386 #endif /* _NF_FLOW_TABLE_H */
387