xref: /freebsd/sys/netgraph/netflow/netflow.c (revision aa0a1e58)
1 /*-
2  * Copyright (c) 2010-2011 Alexander V. Chernikov <melifaro@ipfw.ru>
3  * Copyright (c) 2004-2005 Gleb Smirnoff <glebius@FreeBSD.org>
4  * Copyright (c) 2001-2003 Roman V. Palagin <romanp@unshadow.net>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $SourceForge: netflow.c,v 1.41 2004/09/05 11:41:10 glebius Exp $
29  */
30 
31 static const char rcs_id[] =
32     "@(#) $FreeBSD$";
33 
34 #include "opt_inet6.h"
35 #include "opt_route.h"
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/limits.h>
39 #include <sys/mbuf.h>
40 #include <sys/syslog.h>
41 #include <sys/systm.h>
42 #include <sys/socket.h>
43 #include <sys/endian.h>
44 
45 #include <machine/atomic.h>
46 #include <machine/stdarg.h>
47 
48 #include <net/if.h>
49 #include <net/route.h>
50 #include <net/ethernet.h>
51 #include <netinet/in.h>
52 #include <netinet/in_systm.h>
53 #include <netinet/ip.h>
54 #include <netinet/ip6.h>
55 #include <netinet/tcp.h>
56 #include <netinet/udp.h>
57 
58 #include <netgraph/ng_message.h>
59 #include <netgraph/netgraph.h>
60 
61 #include <netgraph/netflow/netflow.h>
62 #include <netgraph/netflow/netflow_v9.h>
63 #include <netgraph/netflow/ng_netflow.h>
64 
65 #define	NBUCKETS	(65536)		/* must be power of 2 */
66 
67 /* This hash is for TCP or UDP packets. */
68 #define FULL_HASH(addr1, addr2, port1, port2)	\
69 	(((addr1 ^ (addr1 >> 16) ^ 		\
70 	htons(addr2 ^ (addr2 >> 16))) ^ 	\
71 	port1 ^ htons(port2)) &			\
72 	(NBUCKETS - 1))
73 
74 /* This hash is for all other IP packets. */
75 #define ADDR_HASH(addr1, addr2)			\
76 	((addr1 ^ (addr1 >> 16) ^ 		\
77 	htons(addr2 ^ (addr2 >> 16))) &		\
78 	(NBUCKETS - 1))
79 
80 /* Macros to shorten logical constructions */
81 /* XXX: priv must exist in namespace */
82 #define	INACTIVE(fle)	(time_uptime - fle->f.last > priv->info.nfinfo_inact_t)
83 #define	AGED(fle)	(time_uptime - fle->f.first > priv->info.nfinfo_act_t)
84 #define	ISFREE(fle)	(fle->f.packets == 0)
85 
86 /*
87  * 4 is a magical number: statistically number of 4-packet flows is
88  * bigger than 5,6,7...-packet flows by an order of magnitude. Most UDP/ICMP
89  * scans are 1 packet (~ 90% of flow cache). TCP scans are 2-packet in case
90  * of reachable host and 4-packet otherwise.
91  */
92 #define	SMALL(fle)	(fle->f.packets <= 4)
93 
94 
95 MALLOC_DECLARE(M_NETFLOW_HASH);
96 MALLOC_DEFINE(M_NETFLOW_HASH, "netflow_hash", "NetFlow hash");
97 
98 static int export_add(item_p, struct flow_entry *);
99 static int export_send(priv_p, fib_export_p, item_p, int);
100 
101 static int hash_insert(priv_p, struct flow_hash_entry *, struct flow_rec *, int, uint8_t);
102 #ifdef INET6
103 static int hash6_insert(priv_p, struct flow6_hash_entry *, struct flow6_rec *, int, uint8_t);
104 #endif
105 
106 static __inline void expire_flow(priv_p, fib_export_p, struct flow_entry *, int);
107 
108 /*
109  * Generate hash for a given flow record.
110  *
111  * FIB is not used here, because:
112  * most VRFS will carry public IPv4 addresses which are unique even
113  * without FIB private addresses can overlap, but this is worked out
114  * via flow_rec bcmp() containing fib id. In IPv6 world addresses are
115  * all globally unique (it's not fully true, there is FC00::/7 for example,
116  * but chances of address overlap are MUCH smaller)
117  */
118 static __inline uint32_t
119 ip_hash(struct flow_rec *r)
120 {
121 	switch (r->r_ip_p) {
122 	case IPPROTO_TCP:
123 	case IPPROTO_UDP:
124 		return FULL_HASH(r->r_src.s_addr, r->r_dst.s_addr,
125 		    r->r_sport, r->r_dport);
126 	default:
127 		return ADDR_HASH(r->r_src.s_addr, r->r_dst.s_addr);
128 	}
129 }
130 
131 #ifdef INET6
132 /* Generate hash for a given flow6 record. Use lower 4 octets from v6 addresses */
133 static __inline uint32_t
134 ip6_hash(struct flow6_rec *r)
135 {
136 	switch (r->r_ip_p) {
137 	case IPPROTO_TCP:
138 	case IPPROTO_UDP:
139 		return FULL_HASH(r->src.r_src6.__u6_addr.__u6_addr32[3],
140 		    r->dst.r_dst6.__u6_addr.__u6_addr32[3], r->r_sport,
141 		    r->r_dport);
142 	default:
143 		return ADDR_HASH(r->src.r_src6.__u6_addr.__u6_addr32[3],
144 		    r->dst.r_dst6.__u6_addr.__u6_addr32[3]);
145  	}
146 }
147 #endif
148 
149 /* This is callback from uma(9), called on alloc. */
150 static int
151 uma_ctor_flow(void *mem, int size, void *arg, int how)
152 {
153 	priv_p priv = (priv_p )arg;
154 
155 	if (atomic_load_acq_32(&priv->info.nfinfo_used) >= CACHESIZE)
156 		return (ENOMEM);
157 
158 	atomic_add_32(&priv->info.nfinfo_used, 1);
159 
160 	return (0);
161 }
162 
163 /* This is callback from uma(9), called on free. */
164 static void
165 uma_dtor_flow(void *mem, int size, void *arg)
166 {
167 	priv_p priv = (priv_p )arg;
168 
169 	atomic_subtract_32(&priv->info.nfinfo_used, 1);
170 }
171 
172 #ifdef INET6
173 /* This is callback from uma(9), called on alloc. */
174 static int
175 uma_ctor_flow6(void *mem, int size, void *arg, int how)
176 {
177 	priv_p priv = (priv_p )arg;
178 
179 	if (atomic_load_acq_32(&priv->info.nfinfo_used6) >= CACHESIZE)
180 		return (ENOMEM);
181 
182 	atomic_add_32(&priv->info.nfinfo_used6, 1);
183 
184 	return (0);
185 }
186 
187 /* This is callback from uma(9), called on free. */
188 static void
189 uma_dtor_flow6(void *mem, int size, void *arg)
190 {
191 	priv_p priv = (priv_p )arg;
192 
193 	atomic_subtract_32(&priv->info.nfinfo_used6, 1);
194 }
195 #endif
196 
197 /*
198  * Detach export datagram from priv, if there is any.
199  * If there is no, allocate a new one.
200  */
201 static item_p
202 get_export_dgram(priv_p priv, fib_export_p fe)
203 {
204 	item_p	item = NULL;
205 
206 	mtx_lock(&fe->export_mtx);
207 	if (fe->exp.item != NULL) {
208 		item = fe->exp.item;
209 		fe->exp.item = NULL;
210 	}
211 	mtx_unlock(&fe->export_mtx);
212 
213 	if (item == NULL) {
214 		struct netflow_v5_export_dgram *dgram;
215 		struct mbuf *m;
216 
217 		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
218 		if (m == NULL)
219 			return (NULL);
220 		item = ng_package_data(m, NG_NOFLAGS);
221 		if (item == NULL)
222 			return (NULL);
223 		dgram = mtod(m, struct netflow_v5_export_dgram *);
224 		dgram->header.count = 0;
225 		dgram->header.version = htons(NETFLOW_V5);
226 		dgram->header.pad = 0;
227 
228 	}
229 
230 	return (item);
231 }
232 
233 /*
234  * Re-attach incomplete datagram back to priv.
235  * If there is already another one, then send incomplete. */
236 static void
237 return_export_dgram(priv_p priv, fib_export_p fe, item_p item, int flags)
238 {
239 	/*
240 	 * It may happen on SMP, that some thread has already
241 	 * put its item there, in this case we bail out and
242 	 * send what we have to collector.
243 	 */
244 	mtx_lock(&fe->export_mtx);
245 	if (fe->exp.item == NULL) {
246 		fe->exp.item = item;
247 		mtx_unlock(&fe->export_mtx);
248 	} else {
249 		mtx_unlock(&fe->export_mtx);
250 		export_send(priv, fe, item, flags);
251 	}
252 }
253 
254 /*
255  * The flow is over. Call export_add() and free it. If datagram is
256  * full, then call export_send().
257  */
258 static __inline void
259 expire_flow(priv_p priv, fib_export_p fe, struct flow_entry *fle, int flags)
260 {
261 	struct netflow_export_item exp;
262 	uint16_t version = fle->f.version;
263 
264 	if ((priv->export != NULL) && (version == IPVERSION)) {
265 		exp.item = get_export_dgram(priv, fe);
266 		if (exp.item == NULL) {
267 			atomic_add_32(&priv->info.nfinfo_export_failed, 1);
268 			if (priv->export9 != NULL)
269 				atomic_add_32(&priv->info.nfinfo_export9_failed, 1);
270 			/* fle definitely contains IPv4 flow */
271 			uma_zfree_arg(priv->zone, fle, priv);
272 			return;
273 		}
274 
275 		if (export_add(exp.item, fle) > 0)
276 			export_send(priv, fe, exp.item, flags);
277 		else
278 			return_export_dgram(priv, fe, exp.item, NG_QUEUE);
279 	}
280 
281 	if (priv->export9 != NULL) {
282 		exp.item9 = get_export9_dgram(priv, fe, &exp.item9_opt);
283 		if (exp.item9 == NULL) {
284 			atomic_add_32(&priv->info.nfinfo_export9_failed, 1);
285 			if (version == IPVERSION)
286 				uma_zfree_arg(priv->zone, fle, priv);
287 #ifdef INET6
288 			else if (version == IP6VERSION)
289 				uma_zfree_arg(priv->zone6, fle, priv);
290 #endif
291 			else
292 				panic("ng_netflow: Unknown IP proto: %d", version);
293 			return;
294 		}
295 
296 		if (export9_add(exp.item9, exp.item9_opt, fle) > 0)
297 			export9_send(priv, fe, exp.item9, exp.item9_opt, flags);
298 		else
299 			return_export9_dgram(priv, fe, exp.item9, exp.item9_opt, NG_QUEUE);
300 	}
301 
302 	if (version == IPVERSION)
303 		uma_zfree_arg(priv->zone, fle, priv);
304 #ifdef INET6
305 	else if (version == IP6VERSION)
306 		uma_zfree_arg(priv->zone6, fle, priv);
307 #endif
308 }
309 
310 /* Get a snapshot of node statistics */
311 void
312 ng_netflow_copyinfo(priv_p priv, struct ng_netflow_info *i)
313 {
314 	/* XXX: atomic */
315 	memcpy((void *)i, (void *)&priv->info, sizeof(priv->info));
316 }
317 
318 /*
319  * Insert a record into defined slot.
320  *
321  * First we get for us a free flow entry, then fill in all
322  * possible fields in it.
323  *
324  * TODO: consider dropping hash mutex while filling in datagram,
325  * as this was done in previous version. Need to test & profile
326  * to be sure.
327  */
328 static __inline int
329 hash_insert(priv_p priv, struct flow_hash_entry *hsh, struct flow_rec *r,
330 	int plen, uint8_t tcp_flags)
331 {
332 	struct flow_entry *fle;
333 	struct sockaddr_in sin;
334 	struct rtentry *rt;
335 
336 	mtx_assert(&hsh->mtx, MA_OWNED);
337 
338 	fle = uma_zalloc_arg(priv->zone, priv, M_NOWAIT);
339 	if (fle == NULL) {
340 		atomic_add_32(&priv->info.nfinfo_alloc_failed, 1);
341 		return (ENOMEM);
342 	}
343 
344 	/*
345 	 * Now fle is totally ours. It is detached from all lists,
346 	 * we can safely edit it.
347 	 */
348 
349 	fle->f.version = IPVERSION;
350 	bcopy(r, &fle->f.r, sizeof(struct flow_rec));
351 	fle->f.bytes = plen;
352 	fle->f.packets = 1;
353 	fle->f.tcp_flags = tcp_flags;
354 
355 	fle->f.first = fle->f.last = time_uptime;
356 
357 	/*
358 	 * First we do route table lookup on destination address. So we can
359 	 * fill in out_ifx, dst_mask, nexthop, and dst_as in future releases.
360 	 */
361 	bzero(&sin, sizeof(sin));
362 	sin.sin_len = sizeof(struct sockaddr_in);
363 	sin.sin_family = AF_INET;
364 	sin.sin_addr = fle->f.r.r_dst;
365 	rt = rtalloc1_fib((struct sockaddr *)&sin, 0, 0, r->fib);
366 	if (rt != NULL) {
367 		fle->f.fle_o_ifx = rt->rt_ifp->if_index;
368 
369 		if (rt->rt_flags & RTF_GATEWAY &&
370 		    rt->rt_gateway->sa_family == AF_INET)
371 			fle->f.next_hop =
372 			    ((struct sockaddr_in *)(rt->rt_gateway))->sin_addr;
373 
374 		if (rt_mask(rt))
375 			fle->f.dst_mask = bitcount32(((struct sockaddr_in *)
376 			    rt_mask(rt))->sin_addr.s_addr);
377 		else if (rt->rt_flags & RTF_HOST)
378 			/* Give up. We can't determine mask :( */
379 			fle->f.dst_mask = 32;
380 
381 		RTFREE_LOCKED(rt);
382 	}
383 
384 	/* Do route lookup on source address, to fill in src_mask. */
385 	bzero(&sin, sizeof(sin));
386 	sin.sin_len = sizeof(struct sockaddr_in);
387 	sin.sin_family = AF_INET;
388 	sin.sin_addr = fle->f.r.r_src;
389 	rt = rtalloc1_fib((struct sockaddr *)&sin, 0, 0, r->fib);
390 	if (rt != NULL) {
391 		if (rt_mask(rt))
392 			fle->f.src_mask = bitcount32(((struct sockaddr_in *)
393 			    rt_mask(rt))->sin_addr.s_addr);
394 		else if (rt->rt_flags & RTF_HOST)
395 			/* Give up. We can't determine mask :( */
396 			fle->f.src_mask = 32;
397 
398 		RTFREE_LOCKED(rt);
399 	}
400 
401 	/* Push new flow at the and of hash. */
402 	TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
403 
404 	return (0);
405 }
406 
407 #ifdef INET6
408 /* XXX: make normal function, instead of.. */
409 #define ipv6_masklen(x)		bitcount32((x).__u6_addr.__u6_addr32[0]) + \
410 				bitcount32((x).__u6_addr.__u6_addr32[1]) + \
411 				bitcount32((x).__u6_addr.__u6_addr32[2]) + \
412 				bitcount32((x).__u6_addr.__u6_addr32[3])
413 /* XXX: Do we need inline here ? */
414 static __inline int
415 hash6_insert(priv_p priv, struct flow6_hash_entry *hsh6, struct flow6_rec *r,
416 	int plen, uint8_t tcp_flags)
417 {
418 	struct flow6_entry *fle6;
419 	struct sockaddr_in6 *src, *dst;
420 	struct rtentry *rt;
421 	struct route_in6 rin6;
422 
423 	mtx_assert(&hsh6->mtx, MA_OWNED);
424 
425 	fle6 = uma_zalloc_arg(priv->zone6, priv, M_NOWAIT);
426 	if (fle6 == NULL) {
427 		atomic_add_32(&priv->info.nfinfo_alloc_failed, 1);
428 		return (ENOMEM);
429 	}
430 
431 	/*
432 	 * Now fle is totally ours. It is detached from all lists,
433 	 * we can safely edit it.
434 	 */
435 
436 	fle6->f.version = IP6VERSION;
437 	bcopy(r, &fle6->f.r, sizeof(struct flow6_rec));
438 	fle6->f.bytes = plen;
439 	fle6->f.packets = 1;
440 	fle6->f.tcp_flags = tcp_flags;
441 
442 	fle6->f.first = fle6->f.last = time_uptime;
443 
444 	/*
445 	 * First we do route table lookup on destination address. So we can
446 	 * fill in out_ifx, dst_mask, nexthop, and dst_as in future releases.
447 	 */
448 	bzero(&rin6, sizeof(struct route_in6));
449 	dst = (struct sockaddr_in6 *)&rin6.ro_dst;
450 	dst->sin6_len = sizeof(struct sockaddr_in6);
451 	dst->sin6_family = AF_INET6;
452 	dst->sin6_addr = r->dst.r_dst6;
453 
454 	rin6.ro_rt = rtalloc1_fib((struct sockaddr *)dst, 0, 0, r->fib);
455 
456 	if (rin6.ro_rt != NULL) {
457 		rt = rin6.ro_rt;
458 		fle6->f.fle_o_ifx = rt->rt_ifp->if_index;
459 
460 		if (rt->rt_flags & RTF_GATEWAY &&
461 		    rt->rt_gateway->sa_family == AF_INET6)
462 			fle6->f.n.next_hop6 =
463 			    ((struct sockaddr_in6 *)(rt->rt_gateway))->sin6_addr;
464 
465 		if (rt_mask(rt))
466 			fle6->f.dst_mask = ipv6_masklen(((struct sockaddr_in6 *)rt_mask(rt))->sin6_addr);
467 		else
468 			fle6->f.dst_mask = 128;
469 
470 		RTFREE_LOCKED(rt);
471 	}
472 
473 	/* Do route lookup on source address, to fill in src_mask. */
474 	bzero(&rin6, sizeof(struct route_in6));
475 	src = (struct sockaddr_in6 *)&rin6.ro_dst;
476 	src->sin6_len = sizeof(struct sockaddr_in6);
477 	src->sin6_family = AF_INET6;
478 	src->sin6_addr = r->src.r_src6;
479 
480 	rin6.ro_rt = rtalloc1_fib((struct sockaddr *)src, 0, 0, r->fib);
481 
482 	if (rin6.ro_rt != NULL) {
483 		rt = rin6.ro_rt;
484 
485 		if (rt_mask(rt))
486 			fle6->f.src_mask = ipv6_masklen(((struct sockaddr_in6 *)rt_mask(rt))->sin6_addr);
487 		else
488 			fle6->f.src_mask = 128;
489 
490 		RTFREE_LOCKED(rt);
491 	}
492 
493 	/* Push new flow at the and of hash. */
494 	TAILQ_INSERT_TAIL(&hsh6->head, fle6, fle6_hash);
495 
496 	return (0);
497 }
498 #endif
499 
500 
501 /*
502  * Non-static functions called from ng_netflow.c
503  */
504 
505 /* Allocate memory and set up flow cache */
506 int
507 ng_netflow_cache_init(priv_p priv)
508 {
509 	struct flow_hash_entry *hsh;
510 #ifdef INET6
511 	struct flow6_hash_entry *hsh6;
512 #endif
513 	int i;
514 
515 	/* Initialize cache UMA zone. */
516 	priv->zone = uma_zcreate("NetFlow IPv4 cache", sizeof(struct flow_entry),
517 	    uma_ctor_flow, uma_dtor_flow, NULL, NULL, UMA_ALIGN_CACHE, 0);
518 	uma_zone_set_max(priv->zone, CACHESIZE);
519 #ifdef INET6
520 	priv->zone6 = uma_zcreate("NetFlow IPv6 cache", sizeof(struct flow6_entry),
521 	    uma_ctor_flow6, uma_dtor_flow6, NULL, NULL, UMA_ALIGN_CACHE, 0);
522 	uma_zone_set_max(priv->zone6, CACHESIZE);
523 #endif
524 
525 	/* Allocate hash. */
526 	priv->hash = malloc(NBUCKETS * sizeof(struct flow_hash_entry),
527 	    M_NETFLOW_HASH, M_WAITOK | M_ZERO);
528 
529 	/* Initialize hash. */
530 	for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++) {
531 		mtx_init(&hsh->mtx, "hash mutex", NULL, MTX_DEF);
532 		TAILQ_INIT(&hsh->head);
533 	}
534 
535 #ifdef INET6
536 	/* Allocate hash. */
537 	priv->hash6 = malloc(NBUCKETS * sizeof(struct flow6_hash_entry),
538 	    M_NETFLOW_HASH, M_WAITOK | M_ZERO);
539 
540 	/* Initialize hash. */
541 	for (i = 0, hsh6 = priv->hash6; i < NBUCKETS; i++, hsh6++) {
542 		mtx_init(&hsh6->mtx, "hash mutex", NULL, MTX_DEF);
543 		TAILQ_INIT(&hsh6->head);
544 	}
545 #endif
546 
547 	ng_netflow_v9_cache_init(priv);
548 	CTR0(KTR_NET, "ng_netflow startup()");
549 
550 	return (0);
551 }
552 
553 /* Initialize new FIB table for v5 and v9 */
554 int
555 ng_netflow_fib_init(priv_p priv, int fib)
556 {
557 	fib_export_p	fe = priv_to_fib(priv, fib);
558 
559 	CTR1(KTR_NET, "ng_netflow(): fib init: %d", fib);
560 
561 	if (fe != NULL)
562 		return (0);
563 
564 	if ((fe = malloc(sizeof(struct fib_export), M_NETGRAPH, M_NOWAIT | M_ZERO)) == NULL)
565 		return (1);
566 
567 	mtx_init(&fe->export_mtx, "export dgram lock", NULL, MTX_DEF);
568 	mtx_init(&fe->export9_mtx, "export9 dgram lock", NULL, MTX_DEF);
569 	fe->fib = fib;
570 	fe->domain_id = fib;
571 
572 	if (atomic_cmpset_ptr((volatile uintptr_t *)&priv->fib_data[fib], (uintptr_t)NULL, (uintptr_t)fe) == 0) {
573 		/* FIB already set up by other ISR */
574 		CTR3(KTR_NET, "ng_netflow(): fib init: %d setup %p but got %p", fib, fe, priv_to_fib(priv, fib));
575 		mtx_destroy(&fe->export_mtx);
576 		mtx_destroy(&fe->export9_mtx);
577 		free(fe, M_NETGRAPH);
578 	} else {
579 		/* Increase counter for statistics */
580 		CTR3(KTR_NET, "ng_netflow(): fib %d setup to %p (%p)", fib, fe, priv_to_fib(priv, fib));
581 		atomic_fetchadd_32(&priv->info.nfinfo_alloc_fibs, 1);
582 	}
583 
584 	return (0);
585 }
586 
587 /* Free all flow cache memory. Called from node close method. */
588 void
589 ng_netflow_cache_flush(priv_p priv)
590 {
591 	struct flow_entry	*fle, *fle1;
592 	struct flow_hash_entry	*hsh;
593 #ifdef INET6
594 	struct flow6_entry	*fle6, *fle61;
595 	struct flow6_hash_entry	*hsh6;
596 #endif
597 	struct netflow_export_item exp;
598 	fib_export_p fe;
599 	int i;
600 
601 	bzero(&exp, sizeof(exp));
602 
603 	/*
604 	 * We are going to free probably billable data.
605 	 * Expire everything before freeing it.
606 	 * No locking is required since callout is already drained.
607 	 */
608 	for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++)
609 		TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
610 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
611 			fe = priv_to_fib(priv, fle->f.r.fib);
612 			expire_flow(priv, fe, fle, NG_QUEUE);
613 		}
614 #ifdef INET6
615 	for (hsh6 = priv->hash6, i = 0; i < NBUCKETS; hsh6++, i++)
616 		TAILQ_FOREACH_SAFE(fle6, &hsh6->head, fle6_hash, fle61) {
617 			TAILQ_REMOVE(&hsh6->head, fle6, fle6_hash);
618 			fe = priv_to_fib(priv, fle6->f.r.fib);
619 			expire_flow(priv, fe, (struct flow_entry *)fle6, NG_QUEUE);
620 		}
621 #endif
622 
623 	uma_zdestroy(priv->zone);
624 	/* Destroy hash mutexes. */
625 	for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++)
626 		mtx_destroy(&hsh->mtx);
627 
628 	/* Free hash memory. */
629 	if (priv->hash != NULL)
630 		free(priv->hash, M_NETFLOW_HASH);
631 #ifdef INET6
632 	uma_zdestroy(priv->zone6);
633 	/* Destroy hash mutexes. */
634 	for (i = 0, hsh6 = priv->hash6; i < NBUCKETS; i++, hsh6++)
635 		mtx_destroy(&hsh6->mtx);
636 
637 	/* Free hash memory. */
638 	if (priv->hash6 != NULL)
639 		free(priv->hash6, M_NETFLOW_HASH);
640 #endif
641 
642 	for (i = 0; i < RT_NUMFIBS; i++) {
643 		if ((fe = priv_to_fib(priv, i)) == NULL)
644 			continue;
645 
646 		if (fe->exp.item != NULL)
647 			export_send(priv, fe, fe->exp.item, NG_QUEUE);
648 
649 		if (fe->exp.item9 != NULL)
650 			export9_send(priv, fe, fe->exp.item9, fe->exp.item9_opt, NG_QUEUE);
651 
652 		mtx_destroy(&fe->export_mtx);
653 		mtx_destroy(&fe->export9_mtx);
654 		free(fe, M_NETGRAPH);
655 	}
656 
657 	ng_netflow_v9_cache_flush(priv);
658 }
659 
660 /* Insert packet from into flow cache. */
661 int
662 ng_netflow_flow_add(priv_p priv, fib_export_p fe, struct ip *ip, caddr_t upper_ptr, uint8_t upper_proto,
663 		uint8_t is_frag, unsigned int src_if_index)
664 {
665 	register struct flow_entry	*fle, *fle1;
666 	struct flow_hash_entry	*hsh;
667 	struct flow_rec		r;
668 	int			hlen, plen;
669 	int			error = 0;
670 	uint8_t			tcp_flags = 0;
671 	uint16_t		eproto;
672 
673 	/* Try to fill flow_rec r */
674 	bzero(&r, sizeof(r));
675 	/* check version */
676 	if (ip->ip_v != IPVERSION)
677 		return (EINVAL);
678 
679 	/* verify min header length */
680 	hlen = ip->ip_hl << 2;
681 
682 	if (hlen < sizeof(struct ip))
683 		return (EINVAL);
684 
685 	eproto = ETHERTYPE_IP;
686 	/* Assume L4 template by default */
687 	r.flow_type = NETFLOW_V9_FLOW_V4_L4;
688 
689 	r.r_src = ip->ip_src;
690 	r.r_dst = ip->ip_dst;
691 	r.fib = fe->fib;
692 
693 	/* save packet length */
694 	plen = ntohs(ip->ip_len);
695 
696 	r.r_ip_p = ip->ip_p;
697 	r.r_tos = ip->ip_tos;
698 
699 	r.r_i_ifx = src_if_index;
700 
701 	/*
702 	 * XXX NOTE: only first fragment of fragmented TCP, UDP and
703 	 * ICMP packet will be recorded with proper s_port and d_port.
704 	 * Following fragments will be recorded simply as IP packet with
705 	 * ip_proto = ip->ip_p and s_port, d_port set to zero.
706 	 * I know, it looks like bug. But I don't want to re-implement
707 	 * ip packet assebmling here. Anyway, (in)famous trafd works this way -
708 	 * and nobody complains yet :)
709 	 */
710 	if ((ip->ip_off & htons(IP_OFFMASK)) == 0)
711 		switch(r.r_ip_p) {
712 		case IPPROTO_TCP:
713 		{
714 			register struct tcphdr *tcp;
715 
716 			tcp = (struct tcphdr *)((caddr_t )ip + hlen);
717 			r.r_sport = tcp->th_sport;
718 			r.r_dport = tcp->th_dport;
719 			tcp_flags = tcp->th_flags;
720 			break;
721 		}
722 			case IPPROTO_UDP:
723 			r.r_ports = *(uint32_t *)((caddr_t )ip + hlen);
724 			break;
725 		}
726 
727 	atomic_fetchadd_32(&priv->info.nfinfo_packets, 1);
728 	/* XXX: atomic */
729 	priv->info.nfinfo_bytes += plen;
730 
731 	/* Find hash slot. */
732 	hsh = &priv->hash[ip_hash(&r)];
733 
734 	mtx_lock(&hsh->mtx);
735 
736 	/*
737 	 * Go through hash and find our entry. If we encounter an
738 	 * entry, that should be expired, purge it. We do a reverse
739 	 * search since most active entries are first, and most
740 	 * searches are done on most active entries.
741 	 */
742 	TAILQ_FOREACH_REVERSE_SAFE(fle, &hsh->head, fhead, fle_hash, fle1) {
743 		if (bcmp(&r, &fle->f.r, sizeof(struct flow_rec)) == 0)
744 			break;
745 		if ((INACTIVE(fle) && SMALL(fle)) || AGED(fle)) {
746 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
747 			expire_flow(priv, priv_to_fib(priv, fle->f.r.fib), fle, NG_QUEUE);
748 			atomic_add_32(&priv->info.nfinfo_act_exp, 1);
749 		}
750 	}
751 
752 	if (fle) {			/* An existent entry. */
753 
754 		fle->f.bytes += plen;
755 		fle->f.packets ++;
756 		fle->f.tcp_flags |= tcp_flags;
757 		fle->f.last = time_uptime;
758 
759 		/*
760 		 * We have the following reasons to expire flow in active way:
761 		 * - it hit active timeout
762 		 * - a TCP connection closed
763 		 * - it is going to overflow counter
764 		 */
765 		if (tcp_flags & TH_FIN || tcp_flags & TH_RST || AGED(fle) ||
766 		    (fle->f.bytes >= (CNTR_MAX - IF_MAXMTU)) ) {
767 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
768 			expire_flow(priv, priv_to_fib(priv, fle->f.r.fib), fle, NG_QUEUE);
769 			atomic_add_32(&priv->info.nfinfo_act_exp, 1);
770 		} else {
771 			/*
772 			 * It is the newest, move it to the tail,
773 			 * if it isn't there already. Next search will
774 			 * locate it quicker.
775 			 */
776 			if (fle != TAILQ_LAST(&hsh->head, fhead)) {
777 				TAILQ_REMOVE(&hsh->head, fle, fle_hash);
778 				TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
779 			}
780 		}
781 	} else				/* A new flow entry. */
782 		error = hash_insert(priv, hsh, &r, plen, tcp_flags);
783 
784 	mtx_unlock(&hsh->mtx);
785 
786 	return (error);
787 }
788 
789 #ifdef INET6
790 /* Insert IPv6 packet from into flow cache. */
791 int
792 ng_netflow_flow6_add(priv_p priv, fib_export_p fe, struct ip6_hdr *ip6, caddr_t upper_ptr, uint8_t upper_proto,
793 		uint8_t is_frag, unsigned int src_if_index)
794 {
795 	register struct flow6_entry	*fle6 = NULL, *fle61;
796 	struct flow6_hash_entry		*hsh6;
797 	struct flow6_rec		r;
798 	int			plen;
799 	int			error = 0;
800 	uint8_t			tcp_flags = 0;
801 
802 	/* check version */
803 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION)
804 		return (EINVAL);
805 
806 	bzero(&r, sizeof(r));
807 
808 	r.src.r_src6 = ip6->ip6_src;
809 	r.dst.r_dst6 = ip6->ip6_dst;
810 	r.fib = fe->fib;
811 
812 	/* Assume L4 template by default */
813 	r.flow_type = NETFLOW_V9_FLOW_V6_L4;
814 
815 	/* save packet length */
816 	plen = ntohs(ip6->ip6_plen) + sizeof(struct ip6_hdr);
817 
818 	/* XXX: set DSCP/CoS value */
819 #if 0
820 	r.r_tos = ip->ip_tos;
821 #endif
822 	if (is_frag == 0) {
823 		switch(upper_proto) {
824 		case IPPROTO_TCP:
825 		{
826 			register struct tcphdr *tcp;
827 
828 			tcp = (struct tcphdr *)upper_ptr;
829 			r.r_ports = *(uint32_t *)upper_ptr;
830 			tcp_flags = tcp->th_flags;
831 			break;
832 		}
833  		case IPPROTO_UDP:
834 		case IPPROTO_SCTP:
835 		{
836 			r.r_ports = *(uint32_t *)upper_ptr;
837 			break;
838 		}
839 
840 		}
841 	}
842 
843 	r.r_ip_p = upper_proto;
844 	r.r_i_ifx = src_if_index;
845 
846 	atomic_fetchadd_32(&priv->info.nfinfo_packets6, 1);
847 	/* XXX: atomic */
848 	priv->info.nfinfo_bytes6 += plen;
849 
850 	/* Find hash slot. */
851 	hsh6 = &priv->hash6[ip6_hash(&r)];
852 
853 	mtx_lock(&hsh6->mtx);
854 
855 	/*
856 	 * Go through hash and find our entry. If we encounter an
857 	 * entry, that should be expired, purge it. We do a reverse
858 	 * search since most active entries are first, and most
859 	 * searches are done on most active entries.
860 	 */
861 	TAILQ_FOREACH_REVERSE_SAFE(fle6, &hsh6->head, f6head, fle6_hash, fle61) {
862 		if (fle6->f.version != IP6VERSION)
863 			continue;
864 		if (bcmp(&r, &fle6->f.r, sizeof(struct flow6_rec)) == 0)
865 			break;
866 		if ((INACTIVE(fle6) && SMALL(fle6)) || AGED(fle6)) {
867 			TAILQ_REMOVE(&hsh6->head, fle6, fle6_hash);
868 			expire_flow(priv, priv_to_fib(priv, fle6->f.r.fib), (struct flow_entry *)fle6, NG_QUEUE);
869 			atomic_add_32(&priv->info.nfinfo_act_exp, 1);
870 		}
871 	}
872 
873 	if (fle6 != NULL) {			/* An existent entry. */
874 
875 		fle6->f.bytes += plen;
876 		fle6->f.packets ++;
877 		fle6->f.tcp_flags |= tcp_flags;
878 		fle6->f.last = time_uptime;
879 
880 		/*
881 		 * We have the following reasons to expire flow in active way:
882 		 * - it hit active timeout
883 		 * - a TCP connection closed
884 		 * - it is going to overflow counter
885 		 */
886 		if (tcp_flags & TH_FIN || tcp_flags & TH_RST || AGED(fle6) ||
887 		    (fle6->f.bytes >= (CNTR_MAX - IF_MAXMTU)) ) {
888 			TAILQ_REMOVE(&hsh6->head, fle6, fle6_hash);
889 			expire_flow(priv, priv_to_fib(priv, fle6->f.r.fib), (struct flow_entry *)fle6, NG_QUEUE);
890 			atomic_add_32(&priv->info.nfinfo_act_exp, 1);
891 		} else {
892 			/*
893 			 * It is the newest, move it to the tail,
894 			 * if it isn't there already. Next search will
895 			 * locate it quicker.
896 			 */
897 			if (fle6 != TAILQ_LAST(&hsh6->head, f6head)) {
898 				TAILQ_REMOVE(&hsh6->head, fle6, fle6_hash);
899 				TAILQ_INSERT_TAIL(&hsh6->head, fle6, fle6_hash);
900 			}
901 		}
902 	} else				/* A new flow entry. */
903 		error = hash6_insert(priv, hsh6, &r, plen, tcp_flags);
904 
905 	mtx_unlock(&hsh6->mtx);
906 
907 	return (error);
908 }
909 #endif
910 
911 /*
912  * Return records from cache to userland.
913  *
914  * TODO: matching particular IP should be done in kernel, here.
915  * XXX: IPv6 flows will return random data
916  */
917 int
918 ng_netflow_flow_show(priv_p priv, uint32_t last, struct ng_mesg *resp)
919 {
920 	struct flow_hash_entry	*hsh;
921 	struct flow_entry	*fle;
922 	struct ngnf_flows	*data;
923 	int	i;
924 
925 	data = (struct ngnf_flows *)resp->data;
926 	data->last = 0;
927 	data->nentries = 0;
928 
929 	/* Check if this is a first run */
930 	if (last == 0) {
931 		hsh = priv->hash;
932 		i = 0;
933 	} else {
934 		if (last > NBUCKETS-1)
935 			return (EINVAL);
936 		hsh = priv->hash + last;
937 		i = last;
938 	}
939 
940 	/*
941 	 * We will transfer not more than NREC_AT_ONCE. More data
942 	 * will come in next message.
943 	 * We send current hash index to userland, and userland should
944 	 * return it back to us. Then, we will restart with new entry.
945 	 *
946 	 * The resulting cache snapshot is inaccurate for the
947 	 * following reasons:
948 	 *  - we skip locked hash entries
949 	 *  - we bail out, if someone wants our entry
950 	 *  - we skip rest of entry, when hit NREC_AT_ONCE
951 	 */
952 	for (; i < NBUCKETS; hsh++, i++) {
953 		if (mtx_trylock(&hsh->mtx) == 0)
954 			continue;
955 
956 		TAILQ_FOREACH(fle, &hsh->head, fle_hash) {
957 			if (hsh->mtx.mtx_lock & MTX_CONTESTED)
958 				break;
959 
960 			bcopy(&fle->f, &(data->entries[data->nentries]),
961 			    sizeof(fle->f));
962 			data->nentries++;
963 			if (data->nentries == NREC_AT_ONCE) {
964 				mtx_unlock(&hsh->mtx);
965 				if (++i < NBUCKETS)
966 					data->last = i;
967 				return (0);
968 			}
969 		}
970 		mtx_unlock(&hsh->mtx);
971 	}
972 
973 	return (0);
974 }
975 
976 /* We have full datagram in privdata. Send it to export hook. */
977 static int
978 export_send(priv_p priv, fib_export_p fe, item_p item, int flags)
979 {
980 	struct mbuf *m = NGI_M(item);
981 	struct netflow_v5_export_dgram *dgram = mtod(m,
982 					struct netflow_v5_export_dgram *);
983 	struct netflow_v5_header *header = &dgram->header;
984 	struct timespec ts;
985 	int error = 0;
986 
987 	/* Fill mbuf header. */
988 	m->m_len = m->m_pkthdr.len = sizeof(struct netflow_v5_record) *
989 	   header->count + sizeof(struct netflow_v5_header);
990 
991 	/* Fill export header. */
992 	header->sys_uptime = htonl(MILLIUPTIME(time_uptime));
993 	getnanotime(&ts);
994 	header->unix_secs  = htonl(ts.tv_sec);
995 	header->unix_nsecs = htonl(ts.tv_nsec);
996 	header->engine_type = 0;
997 	header->engine_id = fe->domain_id;
998 	header->pad = 0;
999 	header->flow_seq = htonl(atomic_fetchadd_32(&fe->flow_seq,
1000 	    header->count));
1001 	header->count = htons(header->count);
1002 
1003 	if (priv->export != NULL)
1004 		NG_FWD_ITEM_HOOK_FLAGS(error, item, priv->export, flags);
1005 	else
1006 		NG_FREE_ITEM(item);
1007 
1008 	return (error);
1009 }
1010 
1011 
1012 /* Add export record to dgram. */
1013 static int
1014 export_add(item_p item, struct flow_entry *fle)
1015 {
1016 	struct netflow_v5_export_dgram *dgram = mtod(NGI_M(item),
1017 					struct netflow_v5_export_dgram *);
1018 	struct netflow_v5_header *header = &dgram->header;
1019 	struct netflow_v5_record *rec;
1020 
1021 	rec = &dgram->r[header->count];
1022 	header->count ++;
1023 
1024 	KASSERT(header->count <= NETFLOW_V5_MAX_RECORDS,
1025 	    ("ng_netflow: export too big"));
1026 
1027 	/* Fill in export record. */
1028 	rec->src_addr = fle->f.r.r_src.s_addr;
1029 	rec->dst_addr = fle->f.r.r_dst.s_addr;
1030 	rec->next_hop = fle->f.next_hop.s_addr;
1031 	rec->i_ifx    = htons(fle->f.fle_i_ifx);
1032 	rec->o_ifx    = htons(fle->f.fle_o_ifx);
1033 	rec->packets  = htonl(fle->f.packets);
1034 	rec->octets   = htonl(fle->f.bytes);
1035 	rec->first    = htonl(MILLIUPTIME(fle->f.first));
1036 	rec->last     = htonl(MILLIUPTIME(fle->f.last));
1037 	rec->s_port   = fle->f.r.r_sport;
1038 	rec->d_port   = fle->f.r.r_dport;
1039 	rec->flags    = fle->f.tcp_flags;
1040 	rec->prot     = fle->f.r.r_ip_p;
1041 	rec->tos      = fle->f.r.r_tos;
1042 	rec->dst_mask = fle->f.dst_mask;
1043 	rec->src_mask = fle->f.src_mask;
1044 	rec->pad1     = 0;
1045 	rec->pad2     = 0;
1046 
1047 	/* Not supported fields. */
1048 	rec->src_as = rec->dst_as = 0;
1049 
1050 	if (header->count == NETFLOW_V5_MAX_RECORDS)
1051 		return (1); /* end of datagram */
1052 	else
1053 		return (0);
1054 }
1055 
1056 /* Periodic flow expiry run. */
1057 void
1058 ng_netflow_expire(void *arg)
1059 {
1060 	struct flow_entry	*fle, *fle1;
1061 	struct flow_hash_entry	*hsh;
1062 #ifdef INET6
1063 	struct flow6_entry	*fle6, *fle61;
1064 	struct flow6_hash_entry	*hsh6;
1065 #endif
1066 	priv_p			priv = (priv_p )arg;
1067 	uint32_t		used;
1068 	int			i;
1069 
1070 	/*
1071 	 * Going through all the cache.
1072 	 */
1073 	for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++) {
1074 		/*
1075 		 * Skip entries, that are already being worked on.
1076 		 */
1077 		if (mtx_trylock(&hsh->mtx) == 0)
1078 			continue;
1079 
1080 		used = atomic_load_acq_32(&priv->info.nfinfo_used);
1081 		TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
1082 			/*
1083 			 * Interrupt thread wants this entry!
1084 			 * Quick! Quick! Bail out!
1085 			 */
1086 			if (hsh->mtx.mtx_lock & MTX_CONTESTED)
1087 				break;
1088 
1089 			/*
1090 			 * Don't expire aggressively while hash collision
1091 			 * ratio is predicted small.
1092 			 */
1093 			if (used <= (NBUCKETS*2) && !INACTIVE(fle))
1094 				break;
1095 
1096 			if ((INACTIVE(fle) && (SMALL(fle) ||
1097 			    (used > (NBUCKETS*2)))) || AGED(fle)) {
1098 				TAILQ_REMOVE(&hsh->head, fle, fle_hash);
1099 				expire_flow(priv, priv_to_fib(priv, fle->f.r.fib), fle, NG_NOFLAGS);
1100 				used--;
1101 				atomic_add_32(&priv->info.nfinfo_inact_exp, 1);
1102 			}
1103 		}
1104 		mtx_unlock(&hsh->mtx);
1105 	}
1106 
1107 #ifdef INET6
1108 	for (hsh6 = priv->hash6, i = 0; i < NBUCKETS; hsh6++, i++) {
1109 		/*
1110 		 * Skip entries, that are already being worked on.
1111 		 */
1112 		if (mtx_trylock(&hsh6->mtx) == 0)
1113 			continue;
1114 
1115 		used = atomic_load_acq_32(&priv->info.nfinfo_used6);
1116 		TAILQ_FOREACH_SAFE(fle6, &hsh6->head, fle6_hash, fle61) {
1117 			/*
1118 			 * Interrupt thread wants this entry!
1119 			 * Quick! Quick! Bail out!
1120 			 */
1121 			if (hsh6->mtx.mtx_lock & MTX_CONTESTED)
1122 				break;
1123 
1124 			/*
1125 			 * Don't expire aggressively while hash collision
1126 			 * ratio is predicted small.
1127 			 */
1128 			if (used <= (NBUCKETS*2) && !INACTIVE(fle6))
1129 				break;
1130 
1131 			if ((INACTIVE(fle6) && (SMALL(fle6) ||
1132 			    (used > (NBUCKETS*2)))) || AGED(fle6)) {
1133 				TAILQ_REMOVE(&hsh6->head, fle6, fle6_hash);
1134 				expire_flow(priv, priv_to_fib(priv, fle6->f.r.fib), (struct flow_entry *)fle6, NG_NOFLAGS);
1135 				used--;
1136 				atomic_add_32(&priv->info.nfinfo_inact_exp, 1);
1137 			}
1138 		}
1139 		mtx_unlock(&hsh6->mtx);
1140 	}
1141 #endif
1142 
1143 	/* Schedule next expire. */
1144 	callout_reset(&priv->exp_callout, (1*hz), &ng_netflow_expire,
1145 	    (void *)priv);
1146 }
1147