xref: /dragonfly/sys/netinet/ip_flow.c (revision 927da715)
1 /*-
2  * Copyright (c) 1998 The NetBSD Foundation, Inc.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to The NetBSD Foundation
6  * by the 3am Software Foundry ("3am").  It was developed by Matt Thomas.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the NetBSD
19  *	Foundation, Inc. and its contributors.
20  * 4. Neither the name of The NetBSD Foundation nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  *
36  * $FreeBSD: src/sys/netinet/ip_flow.c,v 1.9.2.2 2001/11/04 17:35:31 luigi Exp $
37  * $DragonFly: src/sys/netinet/ip_flow.c,v 1.15 2008/07/27 10:06:57 sephe Exp $
38  */
39 
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/protosw.h>
45 #include <sys/socket.h>
46 #include <sys/sysctl.h>
47 #include <sys/thread2.h>
48 
49 #include <machine/smp.h>
50 
51 #include <net/if.h>
52 #include <net/route.h>
53 #include <net/netisr.h>
54 #include <net/netmsg2.h>
55 
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 #include <netinet/in_var.h>
59 #include <netinet/ip_var.h>
60 #include <netinet/ip_flow.h>
61 
62 #define	IPFLOW_TIMER		(5 * PR_SLOWHZ)
63 #define IPFLOW_HASHBITS		6	/* should not be a multiple of 8 */
64 #define	IPFLOW_HASHSIZE		(1 << IPFLOW_HASHBITS)
65 #define	IPFLOW_MAX		256
66 
67 #define IPFLOW_RTENTRY_ISDOWN(rt) \
68 	(((rt)->rt_flags & RTF_UP) == 0 || ((rt)->rt_ifp->if_flags & IFF_UP) == 0)
69 
70 #define ipflow_inuse		ipflow_inuse_pcpu[mycpuid]
71 #define ipflows			ipflows_pcpu[mycpuid]
72 
73 static LIST_HEAD(ipflowhead, ipflow) ipflows_pcpu[MAXCPU][IPFLOW_HASHSIZE];
74 static int		ipflow_inuse_pcpu[MAXCPU];
75 static struct netmsg	ipflow_timo_netmsgs[MAXCPU];
76 static int		ipflow_active = 0;
77 
78 SYSCTL_NODE(_net_inet_ip, OID_AUTO, ipflow, CTLFLAG_RW, 0, "ip flow");
79 SYSCTL_INT(_net_inet_ip, IPCTL_FASTFORWARDING, fastforwarding, CTLFLAG_RW,
80 	   &ipflow_active, 0, "Enable flow-based IP forwarding");
81 
82 static MALLOC_DEFINE(M_IPFLOW, "ip_flow", "IP flow");
83 
84 static unsigned
85 ipflow_hash(struct in_addr dst, struct in_addr src, unsigned tos)
86 {
87 	unsigned hash = tos;
88 	int idx;
89 
90 	for (idx = 0; idx < 32; idx += IPFLOW_HASHBITS)
91 		hash += (dst.s_addr >> (32 - idx)) + (src.s_addr >> idx);
92 	return hash & (IPFLOW_HASHSIZE-1);
93 }
94 
95 static struct ipflow *
96 ipflow_lookup(const struct ip *ip)
97 {
98 	unsigned hash;
99 	struct ipflow *ipf;
100 
101 	hash = ipflow_hash(ip->ip_dst, ip->ip_src, ip->ip_tos);
102 
103 	crit_enter();
104 	ipf = LIST_FIRST(&ipflows[hash]);
105 	while (ipf != NULL) {
106 		if (ip->ip_dst.s_addr == ipf->ipf_dst.s_addr &&
107 		    ip->ip_src.s_addr == ipf->ipf_src.s_addr &&
108 		    ip->ip_tos == ipf->ipf_tos)
109 			break;
110 		ipf = LIST_NEXT(ipf, ipf_next);
111 	}
112 	crit_exit();
113 
114 	return ipf;
115 }
116 
117 int
118 ipflow_fastforward(struct mbuf *m)
119 {
120 	struct ip *ip;
121 	struct ipflow *ipf;
122 	struct rtentry *rt;
123 	struct sockaddr *dst;
124 	struct ifnet *ifp;
125 	int error;
126 
127 	/*
128 	 * Are we forwarding packets?  Big enough for an IP packet?
129 	 */
130 	if (!ipforwarding || !ipflow_active || m->m_len < sizeof(struct ip))
131 		return 0;
132 
133 	/*
134 	 * IP header with no option and valid version and length
135 	 */
136 	ip = mtod(m, struct ip *);
137 	if (ip->ip_v != IPVERSION || ip->ip_hl != (sizeof(struct ip) >> 2) ||
138 	    ntohs(ip->ip_len) > m->m_pkthdr.len)
139 		return 0;
140 
141 	/*
142 	 * Find a flow.
143 	 */
144 	ipf = ipflow_lookup(ip);
145 	if (ipf == NULL)
146 		return 0;
147 
148 	/*
149 	 * Route and interface still up?
150 	 */
151 	rt = ipf->ipf_ro.ro_rt;
152 	if (IPFLOW_RTENTRY_ISDOWN(rt))
153 		return 0;
154 	ifp = rt->rt_ifp;
155 
156 	/*
157 	 * Packet size OK?  TTL?
158 	 */
159 	if (m->m_pkthdr.len > ifp->if_mtu || ip->ip_ttl <= IPTTLDEC)
160 		return 0;
161 
162 	/*
163 	 * Everything checks out and so we can forward this packet.
164 	 * Modify the TTL and incrementally change the checksum.
165 	 */
166 	ip->ip_ttl -= IPTTLDEC;
167 	if (ip->ip_sum >= htons(0xffff - (IPTTLDEC << 8)))
168 		ip->ip_sum += htons(IPTTLDEC << 8) + 1;
169 	else
170 		ip->ip_sum += htons(IPTTLDEC << 8);
171 
172 	/*
173 	 * Send the packet on its way.  All we can get back is ENOBUFS
174 	 */
175 	ipf->ipf_uses++;
176 	ipf->ipf_timer = IPFLOW_TIMER;
177 
178 	if (rt->rt_flags & RTF_GATEWAY)
179 		dst = rt->rt_gateway;
180 	else
181 		dst = &ipf->ipf_ro.ro_dst;
182 
183 	error = ifp->if_output(ifp, m, dst, rt);
184 	if (error) {
185 		if (error == ENOBUFS)
186 			ipf->ipf_dropped++;
187 		else
188 			ipf->ipf_errors++;
189 	}
190 	return 1;
191 }
192 
193 static void
194 ipflow_addstats(struct ipflow *ipf)
195 {
196 	ipf->ipf_ro.ro_rt->rt_use += ipf->ipf_uses;
197 	ipstat.ips_cantforward += ipf->ipf_errors + ipf->ipf_dropped;
198 	ipstat.ips_forward += ipf->ipf_uses;
199 	ipstat.ips_fastforward += ipf->ipf_uses;
200 }
201 
202 static void
203 ipflow_free(struct ipflow *ipf)
204 {
205 	/*
206 	 * Remove the flow from the hash table (at elevated IPL).
207 	 * Once it's off the list, we can deal with it at normal
208 	 * network IPL.
209 	 */
210 	crit_enter();
211 	LIST_REMOVE(ipf, ipf_next);
212 
213 	KKASSERT(ipflow_inuse > 0);
214 	ipflow_inuse--;
215 	crit_exit();
216 
217 	ipflow_addstats(ipf);
218 	RTFREE(ipf->ipf_ro.ro_rt);
219 	kfree(ipf, M_IPFLOW);
220 }
221 
222 static struct ipflow *
223 ipflow_reap(void)
224 {
225 	struct ipflow *ipf, *maybe_ipf = NULL;
226 	int idx;
227 
228 	crit_enter();
229 	for (idx = 0; idx < IPFLOW_HASHSIZE; idx++) {
230 		ipf = LIST_FIRST(&ipflows[idx]);
231 		while (ipf != NULL) {
232 			/*
233 			 * If this no longer points to a valid route
234 			 * reclaim it.
235 			 */
236 			if ((ipf->ipf_ro.ro_rt->rt_flags & RTF_UP) == 0)
237 				goto done;
238 
239 			/*
240 			 * choose the one that's been least recently used
241 			 * or has had the least uses in the last 1.5
242 			 * intervals.
243 			 */
244 			if (maybe_ipf == NULL ||
245 			    ipf->ipf_timer < maybe_ipf->ipf_timer ||
246 			    (ipf->ipf_timer == maybe_ipf->ipf_timer &&
247 			     ipf->ipf_last_uses + ipf->ipf_uses <
248 			     maybe_ipf->ipf_last_uses + maybe_ipf->ipf_uses))
249 				maybe_ipf = ipf;
250 			ipf = LIST_NEXT(ipf, ipf_next);
251 		}
252 	}
253 	ipf = maybe_ipf;
254 done:
255 	/*
256 	 * Remove the entry from the flow table.
257 	 */
258 	LIST_REMOVE(ipf, ipf_next);
259 	crit_exit();
260 
261 	ipflow_addstats(ipf);
262 	RTFREE(ipf->ipf_ro.ro_rt);
263 	return ipf;
264 }
265 
266 static void
267 ipflow_timo_dispatch(struct netmsg *nmsg)
268 {
269 	struct ipflow *ipf;
270 	int idx;
271 
272 	crit_enter();
273 	lwkt_replymsg(&nmsg->nm_lmsg, 0);	/* reply ASAP */
274 
275 	for (idx = 0; idx < IPFLOW_HASHSIZE; idx++) {
276 		ipf = LIST_FIRST(&ipflows[idx]);
277 		while (ipf != NULL) {
278 			struct ipflow *next_ipf = LIST_NEXT(ipf, ipf_next);
279 
280 			if (--ipf->ipf_timer == 0) {
281 				ipflow_free(ipf);
282 			} else {
283 				ipf->ipf_last_uses = ipf->ipf_uses;
284 				ipf->ipf_ro.ro_rt->rt_use += ipf->ipf_uses;
285 				ipstat.ips_forward += ipf->ipf_uses;
286 				ipstat.ips_fastforward += ipf->ipf_uses;
287 				ipf->ipf_uses = 0;
288 			}
289 			ipf = next_ipf;
290 		}
291 	}
292 	crit_exit();
293 }
294 
295 static void
296 ipflow_timo_ipi(void *arg __unused)
297 {
298 	struct lwkt_msg *msg = &ipflow_timo_netmsgs[mycpuid].nm_lmsg;
299 
300 	crit_enter();
301 	if (msg->ms_flags & MSGF_DONE)
302 		lwkt_sendmsg(cpu_portfn(mycpuid), msg);
303 	crit_exit();
304 }
305 
306 void
307 ipflow_slowtimo(void)
308 {
309 #ifdef SMP
310 	lwkt_send_ipiq_mask(smp_active_mask, ipflow_timo_ipi, NULL);
311 #else
312 	ipflow_timo_ipi(NULL);
313 #endif
314 }
315 
316 void
317 ipflow_create(const struct route *ro, struct mbuf *m)
318 {
319 	const struct ip *const ip = mtod(m, struct ip *);
320 	struct ipflow *ipf;
321 	unsigned hash;
322 
323 	/*
324 	 * Don't create cache entries for ICMP messages.
325 	 */
326 	if (!ipflow_active || ip->ip_p == IPPROTO_ICMP)
327 		return;
328 
329 	/*
330 	 * See if an existing flow struct exists.  If so remove it from it's
331 	 * list and free the old route.  If not, try to malloc a new one
332 	 * (if we aren't at our limit).
333 	 */
334 	ipf = ipflow_lookup(ip);
335 	if (ipf == NULL) {
336 		if (ipflow_inuse == IPFLOW_MAX) {
337 			ipf = ipflow_reap();
338 		} else {
339 			ipf = kmalloc(sizeof(*ipf), M_IPFLOW,
340 				      M_INTWAIT | M_NULLOK);
341 			if (ipf == NULL)
342 				return;
343 			ipflow_inuse++;
344 		}
345 		bzero(ipf, sizeof(*ipf));
346 	} else {
347 		crit_enter();
348 		LIST_REMOVE(ipf, ipf_next);
349 		crit_exit();
350 
351 		ipflow_addstats(ipf);
352 		RTFREE(ipf->ipf_ro.ro_rt);
353 		ipf->ipf_uses = ipf->ipf_last_uses = 0;
354 		ipf->ipf_errors = ipf->ipf_dropped = 0;
355 	}
356 
357 	/*
358 	 * Fill in the updated information.
359 	 */
360 	ipf->ipf_ro = *ro;
361 	ro->ro_rt->rt_refcnt++;
362 	ipf->ipf_dst = ip->ip_dst;
363 	ipf->ipf_src = ip->ip_src;
364 	ipf->ipf_tos = ip->ip_tos;
365 	ipf->ipf_timer = IPFLOW_TIMER;
366 
367 	/*
368 	 * Insert into the approriate bucket of the flow table.
369 	 */
370 	hash = ipflow_hash(ip->ip_dst, ip->ip_src, ip->ip_tos);
371 	crit_enter();
372 	LIST_INSERT_HEAD(&ipflows[hash], ipf, ipf_next);
373 	crit_exit();
374 }
375 
376 static void
377 ipflow_init(void)
378 {
379 	char oid_name[32];
380 	int i;
381 
382 	for (i = 0; i < ncpus; ++i) {
383 		netmsg_init(&ipflow_timo_netmsgs[i], &netisr_adone_rport, 0,
384 			    ipflow_timo_dispatch);
385 
386 		ksnprintf(oid_name, sizeof(oid_name), "inuse%d", i);
387 
388 		SYSCTL_ADD_INT(NULL,
389 		SYSCTL_STATIC_CHILDREN(_net_inet_ip_ipflow),
390 		OID_AUTO, oid_name, CTLFLAG_RD, &ipflow_inuse_pcpu[i], 0,
391 		"# of ip flow being used");
392 	}
393 }
394 SYSINIT(arp, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY, ipflow_init, 0);
395