1 /* $NetBSD: ip6_flow.c,v 1.29 2016/07/26 05:53:30 ozaki-r Exp $ */
2
3 /*-
4 * Copyright (c) 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by the 3am Software Foundry ("3am"). It was developed by Liam J. Foy
9 * <liamjfoy@netbsd.org> and Matt Thomas <matt@netbsd.org>.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 *
32 * IPv6 version was developed by Liam J. Foy. Original source existed in IPv4
33 * format developed by Matt Thomas. Thanks to Joerg Sonnenberger, Matt
34 * Thomas and Christos Zoulas.
35 *
36 * Thanks to Liverpool John Moores University, especially Dr. David Llewellyn-Jones
37 * for providing resources (to test) and Professor Madjid Merabti.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: ip6_flow.c,v 1.29 2016/07/26 05:53:30 ozaki-r Exp $");
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/domain.h>
48 #include <sys/protosw.h>
49 #include <sys/socket.h>
50 #include <sys/socketvar.h>
51 #include <sys/time.h>
52 #include <sys/kernel.h>
53 #include <sys/pool.h>
54 #include <sys/sysctl.h>
55 #include <sys/workqueue.h>
56 #include <sys/atomic.h>
57
58 #include <net/if.h>
59 #include <net/if_dl.h>
60 #include <net/route.h>
61 #include <net/pfil.h>
62
63 #include <netinet/in.h>
64 #include <netinet6/in6_var.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/ip6.h>
67 #include <netinet6/ip6_var.h>
68 #include <netinet6/ip6_private.h>
69
70 /*
71 * IPv6 Fast Forward caches/hashes flows from one source to destination.
72 *
73 * Upon a successful forward IPv6FF caches and hashes details such as the
74 * route, source and destination. Once another packet is received matching
75 * the source and destination the packet is forwarded straight onto if_output
76 * using the cached details.
77 *
78 * Example:
79 * ether/fddi_input -> ip6flow_fastforward -> if_output
80 */
81
82 static struct pool ip6flow_pool;
83
84 LIST_HEAD(ip6flowhead, ip6flow);
85
86 /*
87 * We could use IPv4 defines (IPFLOW_HASHBITS) but we'll
88 * use our own (possibly for future expansion).
89 */
90 #define IP6FLOW_TIMER (5 * PR_SLOWHZ)
91 #define IP6FLOW_DEFAULT_HASHSIZE (1 << IP6FLOW_HASHBITS)
92
93 /*
94 * ip6_flow.c internal lock.
95 * If we use softnet_lock, it would cause recursive lock.
96 *
97 * This is a tentative workaround.
98 * We should make it scalable somehow in the future.
99 */
100 static kmutex_t ip6flow_lock;
101 static struct ip6flowhead *ip6flowtable = NULL;
102 static struct ip6flowhead ip6flowlist;
103 static int ip6flow_inuse;
104
105 static void ip6flow_slowtimo_work(struct work *, void *);
106 static struct workqueue *ip6flow_slowtimo_wq;
107 static struct work ip6flow_slowtimo_wk;
108
109 /*
110 * Insert an ip6flow into the list.
111 */
112 #define IP6FLOW_INSERT(bucket, ip6f) \
113 do { \
114 LIST_INSERT_HEAD((bucket), (ip6f), ip6f_hash); \
115 LIST_INSERT_HEAD(&ip6flowlist, (ip6f), ip6f_list); \
116 } while (/*CONSTCOND*/ 0)
117
118 /*
119 * Remove an ip6flow from the list.
120 */
121 #define IP6FLOW_REMOVE(ip6f) \
122 do { \
123 LIST_REMOVE((ip6f), ip6f_hash); \
124 LIST_REMOVE((ip6f), ip6f_list); \
125 } while (/*CONSTCOND*/ 0)
126
127 #ifndef IP6FLOW_DEFAULT
128 #define IP6FLOW_DEFAULT 256
129 #endif
130
131 int ip6_maxflows = IP6FLOW_DEFAULT;
132 int ip6_hashsize = IP6FLOW_DEFAULT_HASHSIZE;
133
134 /*
135 * Calculate hash table position.
136 */
137 static size_t
ip6flow_hash(const struct ip6_hdr * ip6)138 ip6flow_hash(const struct ip6_hdr *ip6)
139 {
140 size_t hash;
141 uint32_t dst_sum, src_sum;
142 size_t idx;
143
144 src_sum = ip6->ip6_src.s6_addr32[0] + ip6->ip6_src.s6_addr32[1]
145 + ip6->ip6_src.s6_addr32[2] + ip6->ip6_src.s6_addr32[3];
146 dst_sum = ip6->ip6_dst.s6_addr32[0] + ip6->ip6_dst.s6_addr32[1]
147 + ip6->ip6_dst.s6_addr32[2] + ip6->ip6_dst.s6_addr32[3];
148
149 hash = ip6->ip6_flow;
150
151 for (idx = 0; idx < 32; idx += IP6FLOW_HASHBITS)
152 hash += (dst_sum >> (32 - idx)) + (src_sum >> idx);
153
154 return hash & (ip6_hashsize-1);
155 }
156
157 /*
158 * Check to see if a flow already exists - if so return it.
159 */
160 static struct ip6flow *
ip6flow_lookup(const struct ip6_hdr * ip6)161 ip6flow_lookup(const struct ip6_hdr *ip6)
162 {
163 size_t hash;
164 struct ip6flow *ip6f;
165
166 KASSERT(mutex_owned(&ip6flow_lock));
167
168 hash = ip6flow_hash(ip6);
169
170 LIST_FOREACH(ip6f, &ip6flowtable[hash], ip6f_hash) {
171 if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6f->ip6f_dst)
172 && IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &ip6f->ip6f_src)
173 && ip6f->ip6f_flow == ip6->ip6_flow) {
174 /* A cached flow has been found. */
175 return ip6f;
176 }
177 }
178
179 return NULL;
180 }
181
182 void
ip6flow_poolinit(void)183 ip6flow_poolinit(void)
184 {
185
186 pool_init(&ip6flow_pool, sizeof(struct ip6flow), 0, 0, 0, "ip6flowpl",
187 NULL, IPL_NET);
188 }
189
190 /*
191 * Allocate memory and initialise lists. This function is called
192 * from ip6_init and called there after to resize the hash table.
193 * If a newly sized table cannot be malloc'ed we just continue
194 * to use the old one.
195 */
196 static int
ip6flow_init_locked(int table_size)197 ip6flow_init_locked(int table_size)
198 {
199 struct ip6flowhead *new_table;
200 size_t i;
201
202 KASSERT(mutex_owned(&ip6flow_lock));
203
204 new_table = (struct ip6flowhead *)malloc(sizeof(struct ip6flowhead) *
205 table_size, M_RTABLE, M_NOWAIT);
206
207 if (new_table == NULL)
208 return 1;
209
210 if (ip6flowtable != NULL)
211 free(ip6flowtable, M_RTABLE);
212
213 ip6flowtable = new_table;
214 ip6_hashsize = table_size;
215
216 LIST_INIT(&ip6flowlist);
217 for (i = 0; i < ip6_hashsize; i++)
218 LIST_INIT(&ip6flowtable[i]);
219
220 return 0;
221 }
222
223 int
ip6flow_init(int table_size)224 ip6flow_init(int table_size)
225 {
226 int ret, error;
227
228 error = workqueue_create(&ip6flow_slowtimo_wq, "ip6flow_slowtimo",
229 ip6flow_slowtimo_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
230 if (error != 0)
231 panic("%s: workqueue_create failed (%d)\n", __func__, error);
232
233 mutex_init(&ip6flow_lock, MUTEX_DEFAULT, IPL_NONE);
234
235 mutex_enter(&ip6flow_lock);
236 ret = ip6flow_init_locked(table_size);
237 mutex_exit(&ip6flow_lock);
238
239 return ret;
240 }
241
242 /*
243 * IPv6 Fast Forward routine. Attempt to forward the packet -
244 * if any problems are found return to the main IPv6 input
245 * routine to deal with.
246 */
247 int
ip6flow_fastforward(struct mbuf ** mp)248 ip6flow_fastforward(struct mbuf **mp)
249 {
250 struct ip6flow *ip6f;
251 struct ip6_hdr *ip6;
252 struct rtentry *rt;
253 struct mbuf *m;
254 const struct sockaddr *dst;
255 int error;
256 int ret = 0;
257
258 mutex_enter(&ip6flow_lock);
259
260 /*
261 * Are we forwarding packets and have flows?
262 */
263 if (!ip6_forwarding || ip6flow_inuse == 0)
264 goto out;
265
266 m = *mp;
267 /*
268 * At least size of IPv6 Header?
269 */
270 if (m->m_len < sizeof(struct ip6_hdr))
271 goto out;
272 /*
273 * Was packet received as a link-level multicast or broadcast?
274 * If so, don't try to fast forward.
275 */
276 if ((m->m_flags & (M_BCAST|M_MCAST)) != 0)
277 goto out;
278
279 if (IP6_HDR_ALIGNED_P(mtod(m, const void *)) == 0) {
280 if ((m = m_copyup(m, sizeof(struct ip6_hdr),
281 (max_linkhdr + 3) & ~3)) == NULL) {
282 goto out;
283 }
284 *mp = m;
285 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
286 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
287 goto out;
288 }
289 *mp = m;
290 }
291
292 ip6 = mtod(m, struct ip6_hdr *);
293
294 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
295 /* Bad version. */
296 goto out;
297 }
298
299 /*
300 * If we have a hop-by-hop extension we must process it.
301 * We just leave this up to ip6_input to deal with.
302 */
303 if (ip6->ip6_nxt == IPPROTO_HOPOPTS)
304 goto out;
305
306 /*
307 * Attempt to find a flow.
308 */
309 if ((ip6f = ip6flow_lookup(ip6)) == NULL) {
310 /* No flow found. */
311 goto out;
312 }
313
314 /*
315 * Route and interface still up?
316 */
317 if ((rt = rtcache_validate(&ip6f->ip6f_ro)) == NULL ||
318 (rt->rt_ifp->if_flags & IFF_UP) == 0 ||
319 (rt->rt_flags & RTF_BLACKHOLE) != 0)
320 goto out;
321
322 /*
323 * Packet size greater than MTU?
324 */
325 if (m->m_pkthdr.len > rt->rt_ifp->if_mtu) {
326 /* Return to main IPv6 input function. */
327 goto out;
328 }
329
330 /*
331 * Clear any in-bound checksum flags for this packet.
332 */
333 m->m_pkthdr.csum_flags = 0;
334
335 if (ip6->ip6_hlim <= IPV6_HLIMDEC)
336 goto out;
337
338 /* Decrement hop limit (same as TTL) */
339 ip6->ip6_hlim -= IPV6_HLIMDEC;
340
341 if (rt->rt_flags & RTF_GATEWAY)
342 dst = rt->rt_gateway;
343 else
344 dst = rtcache_getdst(&ip6f->ip6f_ro);
345
346 PRT_SLOW_ARM(ip6f->ip6f_timer, IP6FLOW_TIMER);
347
348 ip6f->ip6f_uses++;
349
350 /* Send on its way - straight to the interface output routine. */
351 if ((error = if_output_lock(rt->rt_ifp, rt->rt_ifp, m, dst, rt)) != 0) {
352 ip6f->ip6f_dropped++;
353 } else {
354 ip6f->ip6f_forwarded++;
355 }
356 ret = 1;
357 out:
358 mutex_exit(&ip6flow_lock);
359 return ret;
360 }
361
362 /*
363 * Add the IPv6 flow statistics to the main IPv6 statistics.
364 */
365 static void
ip6flow_addstats(const struct ip6flow * ip6f)366 ip6flow_addstats(const struct ip6flow *ip6f)
367 {
368 struct rtentry *rt;
369 uint64_t *ip6s;
370
371 if ((rt = rtcache_validate(&ip6f->ip6f_ro)) != NULL)
372 rt->rt_use += ip6f->ip6f_uses;
373 ip6s = IP6_STAT_GETREF();
374 ip6s[IP6_STAT_FASTFORWARDFLOWS] = ip6flow_inuse;
375 ip6s[IP6_STAT_CANTFORWARD] += ip6f->ip6f_dropped;
376 ip6s[IP6_STAT_ODROPPED] += ip6f->ip6f_dropped;
377 ip6s[IP6_STAT_TOTAL] += ip6f->ip6f_uses;
378 ip6s[IP6_STAT_FORWARD] += ip6f->ip6f_forwarded;
379 ip6s[IP6_STAT_FASTFORWARD] += ip6f->ip6f_forwarded;
380 IP6_STAT_PUTREF();
381 }
382
383 /*
384 * Add statistics and free the flow.
385 */
386 static void
ip6flow_free(struct ip6flow * ip6f)387 ip6flow_free(struct ip6flow *ip6f)
388 {
389
390 KASSERT(mutex_owned(&ip6flow_lock));
391
392 /*
393 * Remove the flow from the hash table (at elevated IPL).
394 * Once it's off the list, we can deal with it at normal
395 * network IPL.
396 */
397 IP6FLOW_REMOVE(ip6f);
398
399 ip6flow_inuse--;
400 ip6flow_addstats(ip6f);
401 rtcache_free(&ip6f->ip6f_ro);
402 pool_put(&ip6flow_pool, ip6f);
403 }
404
405 static struct ip6flow *
ip6flow_reap_locked(int just_one)406 ip6flow_reap_locked(int just_one)
407 {
408
409 KASSERT(mutex_owned(&ip6flow_lock));
410
411 while (just_one || ip6flow_inuse > ip6_maxflows) {
412 struct ip6flow *ip6f, *maybe_ip6f = NULL;
413
414 ip6f = LIST_FIRST(&ip6flowlist);
415 while (ip6f != NULL) {
416 /*
417 * If this no longer points to a valid route -
418 * reclaim it.
419 */
420 if (rtcache_validate(&ip6f->ip6f_ro) == NULL)
421 goto done;
422 /*
423 * choose the one that's been least recently
424 * used or has had the least uses in the
425 * last 1.5 intervals.
426 */
427 if (maybe_ip6f == NULL ||
428 ip6f->ip6f_timer < maybe_ip6f->ip6f_timer ||
429 (ip6f->ip6f_timer == maybe_ip6f->ip6f_timer &&
430 ip6f->ip6f_last_uses + ip6f->ip6f_uses <
431 maybe_ip6f->ip6f_last_uses +
432 maybe_ip6f->ip6f_uses))
433 maybe_ip6f = ip6f;
434 ip6f = LIST_NEXT(ip6f, ip6f_list);
435 }
436 ip6f = maybe_ip6f;
437 done:
438 /*
439 * Remove the entry from the flow table
440 */
441 IP6FLOW_REMOVE(ip6f);
442
443 rtcache_free(&ip6f->ip6f_ro);
444 if (just_one) {
445 ip6flow_addstats(ip6f);
446 return ip6f;
447 }
448 ip6flow_inuse--;
449 ip6flow_addstats(ip6f);
450 pool_put(&ip6flow_pool, ip6f);
451 }
452 return NULL;
453 }
454
455 /*
456 * Reap one or more flows - ip6flow_reap may remove
457 * multiple flows if net.inet6.ip6.maxflows is reduced.
458 */
459 struct ip6flow *
ip6flow_reap(int just_one)460 ip6flow_reap(int just_one)
461 {
462 struct ip6flow *ip6f;
463
464 mutex_enter(&ip6flow_lock);
465 ip6f = ip6flow_reap_locked(just_one);
466 mutex_exit(&ip6flow_lock);
467 return ip6f;
468 }
469
470 static unsigned int ip6flow_work_enqueued = 0;
471
472 void
ip6flow_slowtimo_work(struct work * wk,void * arg)473 ip6flow_slowtimo_work(struct work *wk, void *arg)
474 {
475 struct ip6flow *ip6f, *next_ip6f;
476
477 /* We can allow enqueuing another work at this point */
478 atomic_swap_uint(&ip6flow_work_enqueued, 0);
479
480 mutex_enter(softnet_lock);
481 mutex_enter(&ip6flow_lock);
482 KERNEL_LOCK(1, NULL);
483
484 for (ip6f = LIST_FIRST(&ip6flowlist); ip6f != NULL; ip6f = next_ip6f) {
485 next_ip6f = LIST_NEXT(ip6f, ip6f_list);
486 if (PRT_SLOW_ISEXPIRED(ip6f->ip6f_timer) ||
487 rtcache_validate(&ip6f->ip6f_ro) == NULL) {
488 ip6flow_free(ip6f);
489 } else {
490 ip6f->ip6f_last_uses = ip6f->ip6f_uses;
491 ip6flow_addstats(ip6f);
492 ip6f->ip6f_uses = 0;
493 ip6f->ip6f_dropped = 0;
494 ip6f->ip6f_forwarded = 0;
495 }
496 }
497
498 KERNEL_UNLOCK_ONE(NULL);
499 mutex_exit(&ip6flow_lock);
500 mutex_exit(softnet_lock);
501 }
502
503 void
ip6flow_slowtimo(void)504 ip6flow_slowtimo(void)
505 {
506
507 /* Avoid enqueuing another work when one is already enqueued */
508 if (atomic_swap_uint(&ip6flow_work_enqueued, 1) == 1)
509 return;
510
511 workqueue_enqueue(ip6flow_slowtimo_wq, &ip6flow_slowtimo_wk, NULL);
512 }
513
514 /*
515 * We have successfully forwarded a packet using the normal
516 * IPv6 stack. Now create/update a flow.
517 */
518 void
ip6flow_create(const struct route * ro,struct mbuf * m)519 ip6flow_create(const struct route *ro, struct mbuf *m)
520 {
521 const struct ip6_hdr *ip6;
522 struct ip6flow *ip6f;
523 size_t hash;
524
525 mutex_enter(&ip6flow_lock);
526
527 ip6 = mtod(m, const struct ip6_hdr *);
528
529 /*
530 * If IPv6 Fast Forward is disabled, don't create a flow.
531 * It can be disabled by setting net.inet6.ip6.maxflows to 0.
532 *
533 * Don't create a flow for ICMPv6 messages.
534 */
535 if (ip6_maxflows == 0 || ip6->ip6_nxt == IPPROTO_IPV6_ICMP) {
536 mutex_exit(&ip6flow_lock);
537 return;
538 }
539
540 KERNEL_LOCK(1, NULL);
541
542 /*
543 * See if an existing flow exists. If so:
544 * - Remove the flow
545 * - Add flow statistics
546 * - Free the route
547 * - Reset statistics
548 *
549 * If a flow doesn't exist allocate a new one if
550 * ip6_maxflows hasn't reached its limit. If it has
551 * been reached, reap some flows.
552 */
553 ip6f = ip6flow_lookup(ip6);
554 if (ip6f == NULL) {
555 if (ip6flow_inuse >= ip6_maxflows) {
556 ip6f = ip6flow_reap_locked(1);
557 } else {
558 ip6f = pool_get(&ip6flow_pool, PR_NOWAIT);
559 if (ip6f == NULL)
560 goto out;
561 ip6flow_inuse++;
562 }
563 memset(ip6f, 0, sizeof(*ip6f));
564 } else {
565 IP6FLOW_REMOVE(ip6f);
566
567 ip6flow_addstats(ip6f);
568 rtcache_free(&ip6f->ip6f_ro);
569 ip6f->ip6f_uses = 0;
570 ip6f->ip6f_last_uses = 0;
571 ip6f->ip6f_dropped = 0;
572 ip6f->ip6f_forwarded = 0;
573 }
574
575 /*
576 * Fill in the updated/new details.
577 */
578 rtcache_copy(&ip6f->ip6f_ro, ro);
579 ip6f->ip6f_dst = ip6->ip6_dst;
580 ip6f->ip6f_src = ip6->ip6_src;
581 ip6f->ip6f_flow = ip6->ip6_flow;
582 PRT_SLOW_ARM(ip6f->ip6f_timer, IP6FLOW_TIMER);
583
584 /*
585 * Insert into the approriate bucket of the flow table.
586 */
587 hash = ip6flow_hash(ip6);
588 IP6FLOW_INSERT(&ip6flowtable[hash], ip6f);
589
590 out:
591 KERNEL_UNLOCK_ONE(NULL);
592 mutex_exit(&ip6flow_lock);
593 }
594
595 /*
596 * Invalidate/remove all flows - if new_size is positive we
597 * resize the hash table.
598 */
599 int
ip6flow_invalidate_all(int new_size)600 ip6flow_invalidate_all(int new_size)
601 {
602 struct ip6flow *ip6f, *next_ip6f;
603 int error;
604
605 error = 0;
606
607 mutex_enter(&ip6flow_lock);
608
609 for (ip6f = LIST_FIRST(&ip6flowlist); ip6f != NULL; ip6f = next_ip6f) {
610 next_ip6f = LIST_NEXT(ip6f, ip6f_list);
611 ip6flow_free(ip6f);
612 }
613
614 if (new_size)
615 error = ip6flow_init_locked(new_size);
616
617 mutex_exit(&ip6flow_lock);
618
619 return error;
620 }
621