xref: /dragonfly/sys/netinet/ip_flow.c (revision cf6b3eb1)
1 /*-
2  * Copyright (c) 1998 The NetBSD Foundation, Inc.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to The NetBSD Foundation
6  * by the 3am Software Foundry ("3am").  It was developed by Matt Thomas.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the NetBSD
19  *	Foundation, Inc. and its contributors.
20  * 4. Neither the name of The NetBSD Foundation nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  *
36  * $FreeBSD: src/sys/netinet/ip_flow.c,v 1.9.2.2 2001/11/04 17:35:31 luigi Exp $
37  */
38 
39 #include <sys/param.h>
40 #include <sys/kernel.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/protosw.h>
44 #include <sys/socket.h>
45 #include <sys/sysctl.h>
46 #include <sys/thread2.h>
47 #include <sys/in_cksum.h>
48 
49 #include <machine/smp.h>
50 
51 #include <net/if.h>
52 #include <net/if_var.h>
53 #include <net/route.h>
54 #include <net/netisr2.h>
55 #include <net/netmsg2.h>
56 
57 #include <netinet/in.h>
58 #include <netinet/ip.h>
59 #include <netinet/in_var.h>
60 #include <netinet/ip_var.h>
61 #include <netinet/ip_flow.h>
62 
63 #define IPFLOW_TIMEOUT_FREQ	2	/* 2/second */
64 #define IPFLOW_TIMEOUT		(hz / IPFLOW_TIMEOUT_FREQ)
65 
66 #define	IPFLOW_TIMER		(5 * IPFLOW_TIMEOUT_FREQ)
67 #define IPFLOW_HASHBITS		6	/* should not be a multiple of 8 */
68 #define	IPFLOW_HASHSIZE		(1 << IPFLOW_HASHBITS)
69 #define	IPFLOW_MAX		256
70 
71 #define IPFLOW_RTENTRY_ISDOWN(rt) \
72 	(((rt)->rt_flags & RTF_UP) == 0 || \
73 	 ((rt)->rt_ifp->if_flags & IFF_UP) == 0)
74 
75 struct netmsg_ipfaddr {
76 	struct netmsg_base base;
77 	struct in_addr	ipf_addr;
78 };
79 
80 struct ipflow {
81 	LIST_ENTRY(ipflow) ipf_hash;	/* next ipflow in hash bucket */
82 	LIST_ENTRY(ipflow) ipf_list;	/* next ipflow in list */
83 
84 	struct in_addr ipf_dst;		/* destination address */
85 	struct in_addr ipf_src;		/* source address */
86 	uint8_t ipf_tos;		/* type-of-service */
87 
88 	uint8_t ipf_flags;		/* see IPFLOW_FLAG_ */
89 	uint8_t ipf_pad[2];		/* explicit pad */
90 	int ipf_refcnt;			/* reference count */
91 
92 	struct route ipf_ro;		/* associated route entry */
93 	u_long ipf_uses;		/* number of uses in this period */
94 
95 	int ipf_timer;			/* remaining lifetime of this entry */
96 	u_long ipf_dropped;		/* ENOBUFS returned by if_output */
97 	u_long ipf_errors;		/* other errors returned by if_output */
98 	u_long ipf_last_uses;		/* number of uses in last period */
99 } __cachealign;
100 LIST_HEAD(ipflowhead, ipflow);
101 
102 #define IPFLOW_FLAG_ONLIST	0x1
103 
104 struct ipflow_pcpu {
105 	struct ipflowhead	ipf_table[IPFLOW_HASHSIZE];
106 	struct ipflowhead	ipf_list;
107 	int			ipf_inuse;
108 	struct callout		ipf_timeo;
109 	struct netmsg_base	ipf_timeo_netmsg;
110 } __cachealign;
111 
112 static struct ipflow_pcpu	*ipflow_pcpu_data;
113 
114 #define ipflow_inuse		ipflow_pcpu_data[mycpuid].ipf_inuse
115 #define ipflowtable		ipflow_pcpu_data[mycpuid].ipf_table
116 #define ipflowlist		ipflow_pcpu_data[mycpuid].ipf_list
117 
118 static int			ipflow_active = 0;
119 
120 #define IPFLOW_REFCNT_INIT	1
121 
122 /* ipflow is alive and active */
123 #define IPFLOW_IS_ACTIVE(ipf)	((ipf)->ipf_refcnt > IPFLOW_REFCNT_INIT)
124 /* ipflow is alive but not active */
125 #define IPFLOW_NOT_ACTIVE(ipf)	((ipf)->ipf_refcnt == IPFLOW_REFCNT_INIT)
126 
127 #define IPFLOW_REF(ipf) \
128 do { \
129 	KKASSERT((ipf)->ipf_refcnt > 0); \
130 	(ipf)->ipf_refcnt++; \
131 } while (0)
132 
133 #define IPFLOW_FREE(ipf) \
134 do { \
135 	KKASSERT((ipf)->ipf_refcnt > 0); \
136 	(ipf)->ipf_refcnt--; \
137 	if ((ipf)->ipf_refcnt == 0) \
138 		ipflow_free((ipf)); \
139 } while (0)
140 
141 #define IPFLOW_INSERT(bucket, ipf) \
142 do { \
143 	KKASSERT(((ipf)->ipf_flags & IPFLOW_FLAG_ONLIST) == 0); \
144 	(ipf)->ipf_flags |= IPFLOW_FLAG_ONLIST; \
145 	LIST_INSERT_HEAD((bucket), (ipf), ipf_hash); \
146 	LIST_INSERT_HEAD(&ipflowlist, (ipf), ipf_list); \
147 } while (0)
148 
149 #define IPFLOW_REMOVE(ipf) \
150 do { \
151 	KKASSERT((ipf)->ipf_flags & IPFLOW_FLAG_ONLIST); \
152 	(ipf)->ipf_flags &= ~IPFLOW_FLAG_ONLIST; \
153 	LIST_REMOVE((ipf), ipf_hash); \
154 	LIST_REMOVE((ipf), ipf_list); \
155 } while (0)
156 
157 SYSCTL_NODE(_net_inet_ip, OID_AUTO, ipflow, CTLFLAG_RW, 0, "ip flow");
158 SYSCTL_INT(_net_inet_ip, IPCTL_FASTFORWARDING, fastforwarding, CTLFLAG_RW,
159 	   &ipflow_active, 0, "Enable flow-based IP forwarding");
160 
161 static MALLOC_DEFINE(M_IPFLOW, "ip_flow", "IP flow");
162 
163 static void	ipflow_free(struct ipflow *);
164 static void	ipflow_timeo(void *);
165 
166 static unsigned
167 ipflow_hash(struct in_addr dst, struct in_addr src, unsigned tos)
168 {
169 	unsigned hash = tos + src.s_addr;
170 	int idx;
171 
172 	for (idx = IPFLOW_HASHBITS; idx < 32; idx += IPFLOW_HASHBITS)
173 		hash += (dst.s_addr >> (32 - idx)) + (src.s_addr >> idx);
174 	return hash & (IPFLOW_HASHSIZE-1);
175 }
176 
177 static struct ipflow *
178 ipflow_lookup(const struct ip *ip)
179 {
180 	unsigned hash;
181 	struct ipflow *ipf;
182 
183 	hash = ipflow_hash(ip->ip_dst, ip->ip_src, ip->ip_tos);
184 	LIST_FOREACH(ipf, &ipflowtable[hash], ipf_hash) {
185 		if (ip->ip_dst.s_addr == ipf->ipf_dst.s_addr &&
186 		    ip->ip_src.s_addr == ipf->ipf_src.s_addr &&
187 		    ip->ip_tos == ipf->ipf_tos)
188 			break;
189 	}
190 	return ipf;
191 }
192 
193 int
194 ipflow_fastforward(struct mbuf *m)
195 {
196 	struct ip *ip;
197 	struct ipflow *ipf;
198 	struct rtentry *rt;
199 	struct sockaddr *dst;
200 	struct ifnet *ifp;
201 	int error, iplen;
202 
203 	ASSERT_NETISR_NCPUS(mycpuid);
204 
205 	/*
206 	 * Are we forwarding packets?
207 	 */
208 	if (!ipforwarding || !ipflow_active)
209 		return 0;
210 
211 	/*
212 	 * Was packet received as a link-level multicast or broadcast?
213 	 * If so, don't try to fast forward..
214 	 */
215 	if (m->m_flags & (M_BCAST | M_MCAST))
216 		return 0;
217 
218 	/* length checks already done in ip_hashfn() */
219 	KASSERT(m->m_len >= sizeof(struct ip), ("IP header not in one mbuf"));
220 	ip = mtod(m, struct ip *);
221 
222 	/*
223 	 * IP header with no option and valid version
224 	 */
225 	if (ip->ip_v != IPVERSION || ip->ip_hl != (sizeof(struct ip) >> 2))
226 		return 0;
227 
228 	iplen = ntohs(ip->ip_len);
229 	/* length checks already done in ip_hashfn() */
230 	KASSERT(iplen >= sizeof(struct ip),
231 		("total length less then header length"));
232 	KASSERT(m->m_pkthdr.len >= iplen, ("mbuf too short"));
233 
234 	/*
235 	 * Find a flow.
236 	 */
237 	ipf = ipflow_lookup(ip);
238 	if (ipf == NULL)
239 		return 0;
240 
241 	/*
242 	 * Verify the IP header checksum.
243 	 */
244 	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
245 		if (!(m->m_pkthdr.csum_flags & CSUM_IP_VALID))
246 			return 0;
247 	} else {
248 		/* Must compute it ourselves. */
249 		if (in_cksum_hdr(ip) != 0)
250 			return 0;
251 	}
252 
253 	/*
254 	 * Route and interface still up?
255 	 */
256 	rt = ipf->ipf_ro.ro_rt;
257 	if (IPFLOW_RTENTRY_ISDOWN(rt))
258 		return 0;
259 	ifp = rt->rt_ifp;
260 
261 	/*
262 	 * Packet size OK?  TTL?
263 	 */
264 	if (m->m_pkthdr.len > ifp->if_mtu || ip->ip_ttl <= IPTTLDEC)
265 		return 0;
266 
267 	/*
268 	 * Clear any in-bound checksum flags for this packet.
269 	 */
270 	m->m_pkthdr.csum_flags = 0;
271 
272 	/*
273 	 * Everything checks out and so we can forward this packet.
274 	 * Modify the TTL and incrementally change the checksum.
275 	 *
276 	 * This method of adding the checksum works on either endian CPU.
277 	 * If htons() is inlined, all the arithmetic is folded; otherwise
278 	 * the htons()s are combined by CSE due to the __const__ attribute.
279 	 *
280 	 * Don't bother using HW checksumming here -- the incremental
281 	 * update is pretty fast.
282 	 */
283 	ip->ip_ttl -= IPTTLDEC;
284 	if (ip->ip_sum >= (uint16_t)~htons(IPTTLDEC << 8))
285 		ip->ip_sum -= ~htons(IPTTLDEC << 8);
286 	else
287 		ip->ip_sum += htons(IPTTLDEC << 8);
288 
289 	/*
290 	 * Trim the packet in case it's too long..
291 	 */
292 	if (m->m_pkthdr.len > iplen) {
293 		if (m->m_len == m->m_pkthdr.len) {
294 			m->m_len = iplen;
295 			m->m_pkthdr.len = iplen;
296 		} else {
297 			m_adj(m, iplen - m->m_pkthdr.len);
298 		}
299 	}
300 
301 	/*
302 	 * Send the packet on its way.  All we can get back is ENOBUFS
303 	 */
304 	ipf->ipf_uses++;
305 	ipf->ipf_timer = IPFLOW_TIMER;
306 
307 	if (rt->rt_flags & RTF_GATEWAY)
308 		dst = rt->rt_gateway;
309 	else
310 		dst = &ipf->ipf_ro.ro_dst;
311 
312 	/*
313 	 * Reference count this ipflow, before the possible blocking
314 	 * ifnet.if_output(), so this ipflow will not be changed or
315 	 * reaped behind our back.
316 	 */
317 	IPFLOW_REF(ipf);
318 
319 	error = ifp->if_output(ifp, m, dst, rt);
320 	if (error) {
321 		if (error == ENOBUFS)
322 			ipf->ipf_dropped++;
323 		else
324 			ipf->ipf_errors++;
325 	}
326 
327 	IPFLOW_FREE(ipf);
328 	return 1;
329 }
330 
331 static void
332 ipflow_addstats(struct ipflow *ipf)
333 {
334 	ipf->ipf_ro.ro_rt->rt_use += ipf->ipf_uses;
335 	ipstat.ips_cantforward += ipf->ipf_errors + ipf->ipf_dropped;
336 	ipstat.ips_total += ipf->ipf_uses;
337 	ipstat.ips_forward += ipf->ipf_uses;
338 	ipstat.ips_fastforward += ipf->ipf_uses;
339 }
340 
341 static void
342 ipflow_free(struct ipflow *ipf)
343 {
344 	KKASSERT(ipf->ipf_refcnt == 0);
345 	KKASSERT((ipf->ipf_flags & IPFLOW_FLAG_ONLIST) == 0);
346 
347 	KKASSERT(ipflow_inuse > 0);
348 	ipflow_inuse--;
349 
350 	ipflow_addstats(ipf);
351 	RTFREE(ipf->ipf_ro.ro_rt);
352 	kfree(ipf, M_IPFLOW);
353 }
354 
355 static void
356 ipflow_reset(struct ipflow *ipf)
357 {
358 	ipflow_addstats(ipf);
359 	RTFREE(ipf->ipf_ro.ro_rt);
360 	ipf->ipf_uses = ipf->ipf_last_uses = 0;
361 	ipf->ipf_errors = ipf->ipf_dropped = 0;
362 }
363 
364 static struct ipflow *
365 ipflow_reap(void)
366 {
367 	struct ipflow *ipf, *maybe_ipf = NULL;
368 
369 	LIST_FOREACH(ipf, &ipflowlist, ipf_list) {
370 		/*
371 		 * Skip actively used ipflow
372 		 */
373 		if (IPFLOW_IS_ACTIVE(ipf))
374 			continue;
375 
376 		/*
377 		 * If this no longer points to a valid route
378 		 * reclaim it.
379 		 */
380 		if ((ipf->ipf_ro.ro_rt->rt_flags & RTF_UP) == 0)
381 			goto done;
382 
383 		/*
384 		 * choose the one that's been least recently used
385 		 * or has had the least uses in the last 1.5
386 		 * intervals.
387 		 */
388 		if (maybe_ipf == NULL ||
389 		    ipf->ipf_timer < maybe_ipf->ipf_timer ||
390 		    (ipf->ipf_timer == maybe_ipf->ipf_timer &&
391 		     ipf->ipf_last_uses + ipf->ipf_uses <
392 		     maybe_ipf->ipf_last_uses + maybe_ipf->ipf_uses))
393 			maybe_ipf = ipf;
394 	}
395 	if (maybe_ipf == NULL)
396 		return NULL;
397 
398 	ipf = maybe_ipf;
399 done:
400 	/*
401 	 * Remove the entry from the flow table and reset its states
402 	 */
403 	IPFLOW_REMOVE(ipf);
404 	ipflow_reset(ipf);
405 	return ipf;
406 }
407 
408 static void
409 ipflow_timeo_dispatch(netmsg_t nmsg)
410 {
411 	struct ipflow *ipf, *next_ipf;
412 	struct ipflow_pcpu *pcpu;
413 	int cpuid = mycpuid;
414 
415 	ASSERT_NETISR_NCPUS(cpuid);
416 
417 	crit_enter();
418 	lwkt_replymsg(&nmsg->lmsg, 0);	/* reply ASAP */
419 	crit_exit();
420 
421 	LIST_FOREACH_MUTABLE(ipf, &ipflowlist, ipf_list, next_ipf) {
422 		if (--ipf->ipf_timer == 0) {
423 			IPFLOW_REMOVE(ipf);
424 			IPFLOW_FREE(ipf);
425 		} else {
426 			ipf->ipf_last_uses = ipf->ipf_uses;
427 			ipf->ipf_ro.ro_rt->rt_use += ipf->ipf_uses;
428 			ipstat.ips_total += ipf->ipf_uses;
429 			ipstat.ips_forward += ipf->ipf_uses;
430 			ipstat.ips_fastforward += ipf->ipf_uses;
431 			ipf->ipf_uses = 0;
432 		}
433 	}
434 
435 	pcpu = &ipflow_pcpu_data[cpuid];
436 	callout_reset(&pcpu->ipf_timeo, IPFLOW_TIMEOUT, ipflow_timeo, pcpu);
437 }
438 
439 static void
440 ipflow_timeo(void *xpcpu)
441 {
442 	struct ipflow_pcpu *pcpu = xpcpu;
443 	struct lwkt_msg *msg = &pcpu->ipf_timeo_netmsg.lmsg;
444 
445 	crit_enter();
446 	if (msg->ms_flags & MSGF_DONE)
447 		lwkt_sendmsg_oncpu(netisr_cpuport(mycpuid), msg);
448 	crit_exit();
449 }
450 
451 void
452 ipflow_create(const struct route *ro, struct mbuf *m)
453 {
454 	const struct ip *const ip = mtod(m, struct ip *);
455 	struct ipflow *ipf;
456 	unsigned hash;
457 
458 	ASSERT_NETISR_NCPUS(mycpuid);
459 
460 	/*
461 	 * Don't create cache entries for ICMP messages.
462 	 */
463 	if (!ipflow_active || ip->ip_p == IPPROTO_ICMP)
464 		return;
465 
466 	/*
467 	 * See if an existing flow struct exists.  If so remove it from it's
468 	 * list and free the old route.  If not, try to malloc a new one
469 	 * (if we aren't at our limit).
470 	 */
471 	ipf = ipflow_lookup(ip);
472 	if (ipf == NULL) {
473 		if (ipflow_inuse == IPFLOW_MAX) {
474 			ipf = ipflow_reap();
475 			if (ipf == NULL)
476 				return;
477 		} else {
478 			ipf = kmalloc(sizeof(*ipf), M_IPFLOW,
479 				      M_NOWAIT | M_ZERO);
480 			if (ipf == NULL)
481 				return;
482 			ipf->ipf_refcnt = IPFLOW_REFCNT_INIT;
483 
484 			ipflow_inuse++;
485 		}
486 	} else {
487 		if (IPFLOW_NOT_ACTIVE(ipf)) {
488 			IPFLOW_REMOVE(ipf);
489 			ipflow_reset(ipf);
490 		} else {
491 			/* This ipflow is being used; don't change it */
492 			KKASSERT(IPFLOW_IS_ACTIVE(ipf));
493 			return;
494 		}
495 	}
496 	/* This ipflow should not be actively used */
497 	KKASSERT(IPFLOW_NOT_ACTIVE(ipf));
498 
499 	/*
500 	 * Fill in the updated information.
501 	 */
502 	ipf->ipf_ro = *ro;
503 	ro->ro_rt->rt_refcnt++;
504 	ipf->ipf_dst = ip->ip_dst;
505 	ipf->ipf_src = ip->ip_src;
506 	ipf->ipf_tos = ip->ip_tos;
507 	ipf->ipf_timer = IPFLOW_TIMER;
508 
509 	/*
510 	 * Insert into the approriate bucket of the flow table.
511 	 */
512 	hash = ipflow_hash(ip->ip_dst, ip->ip_src, ip->ip_tos);
513 	IPFLOW_INSERT(&ipflowtable[hash], ipf);
514 }
515 
516 void
517 ipflow_flush_oncpu(void)
518 {
519 	struct ipflow *ipf;
520 
521 	/*
522 	 * FIXME: netisr_ncpus
523 	 * Change this into assert, after routes are duplicated
524 	 * to only netisr_ncpus.
525 	 */
526 	if (mycpuid >= netisr_ncpus)
527 		return;
528 
529 	while ((ipf = LIST_FIRST(&ipflowlist)) != NULL) {
530 		IPFLOW_REMOVE(ipf);
531 		IPFLOW_FREE(ipf);
532 	}
533 }
534 
535 static void
536 ipflow_ifaddr_handler(netmsg_t nmsg)
537 {
538 	struct netmsg_ipfaddr *amsg = (struct netmsg_ipfaddr *)nmsg;
539 	struct ipflow *ipf, *next_ipf;
540 
541 	LIST_FOREACH_MUTABLE(ipf, &ipflowlist, ipf_list, next_ipf) {
542 		if (ipf->ipf_dst.s_addr == amsg->ipf_addr.s_addr ||
543 		    ipf->ipf_src.s_addr == amsg->ipf_addr.s_addr) {
544 			IPFLOW_REMOVE(ipf);
545 			IPFLOW_FREE(ipf);
546 		}
547 	}
548 	netisr_forwardmsg(&nmsg->base, mycpuid + 1);
549 }
550 
551 static void
552 ipflow_ifaddr(void *arg __unused, struct ifnet *ifp __unused,
553 	      enum ifaddr_event event, struct ifaddr *ifa)
554 {
555 	struct netmsg_ipfaddr amsg;
556 
557 	if (ifa->ifa_addr->sa_family != AF_INET)
558 		return;
559 
560 	/* Only add/change events need to be handled */
561 	switch (event) {
562 	case IFADDR_EVENT_ADD:
563 	case IFADDR_EVENT_CHANGE:
564 		break;
565 
566 	case IFADDR_EVENT_DELETE:
567 		return;
568 	}
569 
570 	netmsg_init(&amsg.base, NULL, &curthread->td_msgport,
571 		    MSGF_PRIORITY, ipflow_ifaddr_handler);
572 	amsg.ipf_addr = ifatoia(ifa)->ia_addr.sin_addr;
573 
574 	netisr_domsg(&amsg.base, 0);
575 }
576 
577 static void
578 ipflow_init(void)
579 {
580 	char oid_name[32];
581 	int i;
582 
583 	ipflow_pcpu_data = kmalloc(sizeof(struct ipflow_pcpu) * netisr_ncpus,
584 	    M_IPFLOW, M_WAITOK | M_ZERO);
585 
586 	for (i = 0; i < netisr_ncpus; ++i) {
587 		struct ipflow_pcpu *pcpu = &ipflow_pcpu_data[i];
588 
589 		netmsg_init(&pcpu->ipf_timeo_netmsg, NULL, &netisr_adone_rport,
590 		    MSGF_PRIORITY, ipflow_timeo_dispatch);
591 		callout_init_mp(&pcpu->ipf_timeo);
592 
593 		ksnprintf(oid_name, sizeof(oid_name), "inuse%d", i);
594 
595 		SYSCTL_ADD_INT(NULL,
596 		    SYSCTL_STATIC_CHILDREN(_net_inet_ip_ipflow), OID_AUTO,
597 		    oid_name, CTLFLAG_RD, &pcpu->ipf_inuse, 0,
598 		    "# of ip flow being used");
599 
600 		callout_reset_bycpu(&pcpu->ipf_timeo, IPFLOW_TIMEOUT,
601 		    ipflow_timeo, pcpu, i);
602 	}
603 	EVENTHANDLER_REGISTER(ifaddr_event, ipflow_ifaddr, NULL,
604 			      EVENTHANDLER_PRI_ANY);
605 }
606 SYSINIT(arp, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY, ipflow_init, 0);
607