1 /*
2     pmacct (Promiscuous mode IP Accounting package)
3     pmacct is Copyright (C) 2003-2019 by Paolo Lucente
4 */
5 
6 /*
7     This program is free software; you can redistribute it and/or modify
8     it under the terms of the GNU General Public License as published by
9     the Free Software Foundation; either version 2 of the License, or
10     (at your option) any later version.
11 
12     This program is distributed in the hope that it will be useful,
13     but WITHOUT ANY WARRANTY; without even the implied warranty of
14     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15     GNU General Public License for more details.
16 
17     You should have received a copy of the GNU General Public License
18     along with this program; if no, write to the Free Software
19     Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21 
22 /* includes */
23 #include "pmacct.h"
24 #include "addr.h"
25 #include "pmacct-data.h"
26 #include "plugin_hooks.h"
27 #include "ip_flow.h"
28 #include "classifier.h"
29 #include "jhash.h"
30 
31 /* Global variables */
32 struct ip_flow **ip_flow_table;
33 struct flow_lru_l flow_lru_list;
34 
35 struct ip_flow6 **ip_flow_table6;
36 struct flow_lru_l6 flow_lru_list6;
37 
38 u_int32_t flt_total_nodes;
39 time_t flt_prune_deadline;
40 time_t flt_emergency_prune;
41 time_t flow_generic_lifetime;
42 time_t flow_tcpest_lifetime;
43 u_int32_t flt_trivial_hash_rnd = 140281; /* ummmh */
44 
45 u_int32_t flt6_total_nodes;
46 time_t flt6_prune_deadline;
47 time_t flt6_emergency_prune;
48 
init_ip_flow_handler()49 void init_ip_flow_handler()
50 {
51   init_ip4_flow_handler();
52   init_ip6_flow_handler();
53 }
54 
init_ip4_flow_handler()55 void init_ip4_flow_handler()
56 {
57   int size;
58 
59   if (config.flow_bufsz) flt_total_nodes = config.flow_bufsz / sizeof(struct ip_flow);
60   else flt_total_nodes = DEFAULT_FLOW_BUFFER_SIZE / sizeof(struct ip_flow);
61 
62   if (!config.flow_hashsz) config.flow_hashsz = FLOW_TABLE_HASHSZ;
63   size = sizeof(struct ip_flow) * config.flow_hashsz;
64   ip_flow_table = (struct ip_flow **) malloc(size);
65   assert(ip_flow_table);
66 
67   memset(ip_flow_table, 0, size);
68   flow_lru_list.root = (struct ip_flow *) malloc(sizeof(struct ip_flow));
69   flow_lru_list.last = flow_lru_list.root;
70   memset(flow_lru_list.root, 0, sizeof(struct ip_flow));
71   flt_prune_deadline = time(NULL)+FLOW_TABLE_PRUNE_INTERVAL;
72   flt_emergency_prune = 0;
73 
74   if (config.flow_lifetime) flow_generic_lifetime = config.flow_lifetime;
75   else flow_generic_lifetime = FLOW_GENERIC_LIFETIME;
76 
77   if (config.flow_tcp_lifetime) flow_tcpest_lifetime = config.flow_tcp_lifetime;
78   else {
79     if (config.classifiers_path) flow_tcpest_lifetime = FLOW_TCPEST_LIFETIME;
80     else flow_tcpest_lifetime = flow_generic_lifetime;
81   }
82 }
83 
ip_flow_handler(struct packet_ptrs * pptrs)84 void ip_flow_handler(struct packet_ptrs *pptrs)
85 {
86   struct timeval now;
87 
88   gettimeofday(&now, NULL);
89 
90   if (now.tv_sec > flt_prune_deadline) {
91     prune_old_flows(&now);
92     flt_prune_deadline = now.tv_sec+FLOW_TABLE_PRUNE_INTERVAL;
93   }
94 
95   find_flow(&now, pptrs);
96 }
97 
evaluate_tcp_flags(struct timeval * now,struct packet_ptrs * pptrs,struct ip_flow_common * fp,unsigned int idx)98 void evaluate_tcp_flags(struct timeval *now, struct packet_ptrs *pptrs, struct ip_flow_common *fp, unsigned int idx)
99 {
100   unsigned int rev = idx ? 0 : 1;
101 
102   if (fp->proto == IPPROTO_TCP) {
103     /* evaluating the transition to the ESTABLISHED state: we need to be as much
104        precise as possible as the lifetime for an established flow is quite high.
105        We check that we have a) SYN flag on a forward direction, b) SYN+ACK on the
106        reverse one and c) that cur_seqno == syn_seqno+1 holds */
107     if (fp->tcp_flags[idx] & TH_SYN && fp->tcp_flags[rev] & TH_SYN &&
108 	fp->tcp_flags[rev] & TH_ACK) {
109       if (ntohl(((struct pm_tcphdr *)pptrs->tlh_ptr)->th_seq) == fp->last_tcp_seq+1) {
110 	/* The flow successfully entered the ESTABLISHED state: clearing flags */
111 	fp->tcp_flags[idx] = FALSE;
112 	fp->tcp_flags[rev] = FALSE;
113       }
114     }
115 
116     if (pptrs->tcp_flags) {
117       if (pptrs->tcp_flags & TH_SYN) {
118 	fp->tcp_flags[idx] = TH_SYN;
119 	if (pptrs->tcp_flags & TH_ACK) fp->tcp_flags[idx] |= TH_ACK;
120 	else fp->last_tcp_seq = ntohl(((struct pm_tcphdr *)pptrs->tlh_ptr)->th_seq);
121       }
122 
123       if (pptrs->tcp_flags & TH_FIN || pptrs->tcp_flags & TH_RST) {
124         fp->tcp_flags[idx] = pptrs->tcp_flags;
125         fp->tcp_flags[rev] = pptrs->tcp_flags;
126       }
127     }
128   }
129 }
130 
clear_tcp_flow_cmn(struct ip_flow_common * fp,unsigned int idx)131 void clear_tcp_flow_cmn(struct ip_flow_common *fp, unsigned int idx)
132 {
133   fp->last[idx].tv_sec = 0;
134   fp->last[idx].tv_usec = 0;
135   fp->tcp_flags[idx] = 0;
136   fp->class[idx] = 0;
137   memset(&fp->cst[idx], 0, CSSz);
138 }
139 
find_flow(struct timeval * now,struct packet_ptrs * pptrs)140 void find_flow(struct timeval *now, struct packet_ptrs *pptrs)
141 {
142   struct pm_iphdr my_iph;
143   struct pm_tcphdr my_tlh;
144   struct pm_iphdr *iphp = &my_iph;
145   struct pm_tlhdr *tlhp = (struct pm_tlhdr *) &my_tlh;
146   struct ip_flow *fp, *candidate = NULL, *last_seen = NULL;
147   unsigned int idx, bucket;
148 
149   memcpy(&my_iph, pptrs->iph_ptr, IP4HdrSz);
150   memcpy(&my_tlh, pptrs->tlh_ptr, MyTCPHdrSz);
151   idx = normalize_flow(&iphp->ip_src.s_addr, &iphp->ip_dst.s_addr, &tlhp->src_port, &tlhp->dst_port);
152   bucket = hash_flow(iphp->ip_src.s_addr, iphp->ip_dst.s_addr, tlhp->src_port, tlhp->dst_port, iphp->ip_p);
153 
154   for (fp = ip_flow_table[bucket]; fp; fp = fp->next) {
155     if (fp->ip_src == iphp->ip_src.s_addr && fp->ip_dst == iphp->ip_dst.s_addr &&
156 	fp->port_src == tlhp->src_port && fp->port_dst == tlhp->dst_port &&
157 	fp->cmn.proto == iphp->ip_p) {
158       /* flow found; will check for its lifetime */
159       if (!is_expired_uni(now, &fp->cmn, idx)) {
160 	/* still valid flow */
161 	evaluate_tcp_flags(now, pptrs, &fp->cmn, idx);
162 	fp->cmn.last[idx].tv_sec = now->tv_sec;
163 	fp->cmn.last[idx].tv_usec = now->tv_usec;
164 	pptrs->new_flow = FALSE;
165 	if (config.classifiers_path) evaluate_classifiers(pptrs, &fp->cmn, idx);
166 	return;
167       }
168       else {
169 	/* stale flow: will start a new one */
170 	clear_tcp_flow_cmn(&fp->cmn, idx);
171 	evaluate_tcp_flags(now, pptrs, &fp->cmn, idx);
172 	fp->cmn.last[idx].tv_sec = now->tv_sec;
173 	fp->cmn.last[idx].tv_usec = now->tv_usec;
174 	pptrs->new_flow = TRUE;
175 	if (config.classifiers_path) evaluate_classifiers(pptrs, &fp->cmn, idx);
176 	return;
177       }
178     }
179     if (!candidate && is_expired(now, &fp->cmn)) candidate = fp;
180     last_seen = fp;
181   }
182 
183   if (candidate) create_flow(now, candidate, TRUE, bucket, pptrs, iphp, tlhp, idx);
184   else create_flow(now, last_seen, FALSE, bucket, pptrs, iphp, tlhp, idx);
185 }
186 
create_flow(struct timeval * now,struct ip_flow * fp,u_int8_t is_candidate,unsigned int bucket,struct packet_ptrs * pptrs,struct pm_iphdr * iphp,struct pm_tlhdr * tlhp,unsigned int idx)187 void create_flow(struct timeval *now, struct ip_flow *fp, u_int8_t is_candidate, unsigned int bucket, struct packet_ptrs *pptrs,
188 		 struct pm_iphdr *iphp, struct pm_tlhdr *tlhp, unsigned int idx)
189 {
190   struct ip_flow *newf;
191 
192   if (!flt_total_nodes) {
193     if (now->tv_sec > flt_emergency_prune+FLOW_TABLE_EMER_PRUNE_INTERVAL) {
194       Log(LOG_INFO, "INFO ( %s/core ): Flow/4 buffer full. Skipping flows.\n", config.name);
195       flt_emergency_prune = now->tv_sec;
196       prune_old_flows(now);
197     }
198     pptrs->new_flow = FALSE;
199     return;
200   }
201 
202   if (fp) {
203     /* a 'not candidate' is simply the tail (last node) of the
204        list. We need to allocate a new node */
205     if (!is_candidate) {
206       newf = (struct ip_flow *) malloc(sizeof(struct ip_flow));
207       if (!newf) {
208 	if (now->tv_sec > flt_emergency_prune+FLOW_TABLE_EMER_PRUNE_INTERVAL) {
209 	  Log(LOG_INFO, "INFO ( %s/core ): Flow/4 buffer finished memory. Skipping flows.\n", config.name);
210 	  flt_emergency_prune = now->tv_sec;
211 	  prune_old_flows(now);
212 	}
213 	pptrs->new_flow = FALSE;
214 	return;
215       }
216       else flt_total_nodes--;
217       memset(newf, 0, sizeof(struct ip_flow));
218       fp->next = newf;
219       newf->prev = fp;
220       flow_lru_list.last->lru_next = newf; /* placing new node as LRU tail */
221       newf->lru_prev = flow_lru_list.last;
222       flow_lru_list.last = newf;
223       fp = newf;
224     }
225     else {
226       if (fp->lru_next) { /* if fp->lru_next==NULL the node is already the tail */
227         fp->lru_prev->lru_next = fp->lru_next;
228 	fp->lru_next->lru_prev = fp->lru_prev;
229 	flow_lru_list.last->lru_next = fp;
230 	fp->lru_prev = flow_lru_list.last;
231 	fp->lru_next = NULL;
232 	flow_lru_list.last = fp;
233       }
234       clear_context_chain(&fp->cmn, 0);
235       clear_context_chain(&fp->cmn, 1);
236       memset(&fp->cmn, 0, sizeof(struct ip_flow_common));
237     }
238   }
239   else {
240     /*	we don't have any pointer to existing flows; this is because the
241 	current bucket doesn't contain any node; we'll allocate the first
242 	one */
243     fp = (struct ip_flow *) malloc(sizeof(struct ip_flow));
244     if (!fp) {
245       if (now->tv_sec > flt_emergency_prune+FLOW_TABLE_EMER_PRUNE_INTERVAL) {
246         Log(LOG_INFO, "INFO ( %s/core ): Flow/4 buffer finished memory. Skipping flows.\n", config.name);
247         flt_emergency_prune = now->tv_sec;
248         prune_old_flows(now);
249       }
250       pptrs->new_flow = FALSE;
251       return;
252     }
253     else flt_total_nodes--;
254     memset(fp, 0, sizeof(struct ip_flow));
255     ip_flow_table[bucket] = fp;
256     flow_lru_list.last->lru_next = fp; /* placing new node as LRU tail */
257     fp->lru_prev = flow_lru_list.last;
258     flow_lru_list.last = fp;
259   }
260 
261   fp->ip_src = iphp->ip_src.s_addr;
262   fp->ip_dst = iphp->ip_dst.s_addr;
263   fp->port_src = tlhp->src_port;
264   fp->port_dst = tlhp->dst_port;
265   fp->cmn.proto = iphp->ip_p;
266   fp->cmn.bucket = bucket;
267   evaluate_tcp_flags(now, pptrs, &fp->cmn, idx);
268   fp->cmn.last[idx].tv_sec = now->tv_sec;
269   fp->cmn.last[idx].tv_usec = now->tv_usec;
270 
271   pptrs->new_flow = TRUE;
272   if (config.classifiers_path) evaluate_classifiers(pptrs, &fp->cmn, idx);
273 }
274 
prune_old_flows(struct timeval * now)275 void prune_old_flows(struct timeval *now)
276 {
277   struct ip_flow *fp, *temp, *last_seen = flow_lru_list.root;
278 
279   fp = flow_lru_list.root->lru_next;
280   while (fp) {
281     if (is_expired(now, &fp->cmn)) {
282       /* we found a stale element; we'll prune it */
283       if (fp->lru_next) temp = fp->lru_next;
284       else temp = NULL;
285 
286       /* rearranging bucket's pointers */
287       if (fp->prev && fp->next) {
288 	fp->prev->next = fp->next;
289         fp->next->prev = fp->prev;
290       }
291       else if (fp->prev) fp->prev->next = NULL;
292       else if (fp->next) {
293 	ip_flow_table[fp->cmn.bucket] = fp->next;
294 	fp->next->prev = NULL;
295       }
296       else ip_flow_table[fp->cmn.bucket] = NULL;
297 
298       /* rearranging LRU pointers */
299       if (fp->lru_next) {
300         fp->lru_next->lru_prev = fp->lru_prev;
301         fp->lru_prev->lru_next = fp->lru_next;
302       }
303       else fp->lru_prev->lru_next = NULL;
304 
305       clear_context_chain(&fp->cmn, 0);
306       clear_context_chain(&fp->cmn, 1);
307       free(fp);
308       flt_total_nodes++;
309 
310       if (temp) fp = temp;
311       else fp = NULL;
312     }
313     else {
314       last_seen = fp;
315       fp = fp->lru_next;
316     }
317   }
318 
319   flow_lru_list.last = last_seen;
320 }
321 
normalize_flow(u_int32_t * ip_src,u_int32_t * ip_dst,u_int16_t * port_src,u_int16_t * port_dst)322 unsigned int normalize_flow(u_int32_t *ip_src, u_int32_t *ip_dst,
323                 u_int16_t *port_src, u_int16_t *port_dst)
324 {
325   u_int16_t port_tmp;
326   u_int32_t ip_tmp;
327 
328   if (*port_src < *port_dst) {
329     port_tmp = *port_src;
330     *port_src = *port_dst;
331     *port_dst = port_tmp;
332 
333     ip_tmp = *ip_src;
334     *ip_src = *ip_dst;
335     *ip_dst = ip_tmp;
336 
337     return TRUE; /* reverse flow */
338   }
339 
340   if (*port_src == *port_dst) {
341     if (*ip_src < *ip_dst) {
342       ip_tmp = *ip_src;
343       *ip_src = *ip_dst;
344       *ip_dst = ip_tmp;
345 
346       return TRUE; /* reverse flow */
347     }
348   }
349 
350   return FALSE; /* forward flow */
351 }
352 
353 /* hash_flow() is taken (it has another name there) from Linux kernel 2.4;
354    see full credits contained in jhash.h */
hash_flow(u_int32_t ip_src,u_int32_t ip_dst,u_int16_t port_src,u_int16_t port_dst,u_int8_t proto)355 unsigned int hash_flow(u_int32_t ip_src, u_int32_t ip_dst,
356 		u_int16_t port_src, u_int16_t port_dst, u_int8_t proto)
357 {
358   return jhash_3words((u_int32_t)(port_src ^ port_dst) << 16 | proto, ip_src, ip_dst, flt_trivial_hash_rnd) & (config.flow_hashsz-1);
359 }
360 
361 /* is_expired() checks for the expiration of the bi-directional flow; returns: TRUE if
362    a) the TCP flow is expired or, b) the non-TCP flow scores 2 points; FALSE in any other
363    case. This function will also contain any further semi-stateful evaluation of specific
364    protocols */
is_expired(struct timeval * now,struct ip_flow_common * fp)365 unsigned int is_expired(struct timeval *now, struct ip_flow_common *fp)
366 {
367   int forward = 0, reverse = 0;
368 
369   forward = is_expired_uni(now, fp, 0);
370   reverse = is_expired_uni(now, fp, 1);
371 
372   if (forward && reverse) return TRUE;
373   else return FALSE;
374 }
375 
376 /* is_expired_uni() checks for the expiration of the uni-directional flow; returns: TRUE
377    if the flow has expired; FALSE in any other case. */
is_expired_uni(struct timeval * now,struct ip_flow_common * fp,unsigned int idx)378 unsigned int is_expired_uni(struct timeval *now, struct ip_flow_common *fp, unsigned int idx)
379 {
380   if (fp->proto == IPPROTO_TCP) {
381     /* tcp_flags == 0 ==> the TCP flow is in ESTABLISHED mode */
382     if (!fp->tcp_flags[idx]) {
383       if (now->tv_sec > fp->last[idx].tv_sec+flow_tcpest_lifetime) return TRUE;
384     }
385     else {
386       if (fp->tcp_flags[idx] & TH_SYN && now->tv_sec > fp->last[idx].tv_sec+FLOW_TCPSYN_LIFETIME) return TRUE;
387       if (fp->tcp_flags[idx] & TH_FIN && now->tv_sec > fp->last[idx].tv_sec+FLOW_TCPFIN_LIFETIME) return TRUE;
388       if (fp->tcp_flags[idx] & TH_RST && now->tv_sec > fp->last[idx].tv_sec+FLOW_TCPRST_LIFETIME) return TRUE;
389     }
390   }
391   else {
392     if (now->tv_sec > fp->last[idx].tv_sec+flow_generic_lifetime) return TRUE;
393   }
394 
395   return FALSE;
396 }
397 
init_ip6_flow_handler()398 void init_ip6_flow_handler()
399 {
400   int size;
401 
402   if (config.flow_bufsz) flt6_total_nodes = config.flow_bufsz / sizeof(struct ip_flow6);
403   else flt6_total_nodes = DEFAULT_FLOW_BUFFER_SIZE / sizeof(struct ip_flow6);
404 
405   if (!config.flow_hashsz) config.flow_hashsz = FLOW_TABLE_HASHSZ;
406   size = sizeof(struct ip_flow6) * config.flow_hashsz;
407   ip_flow_table6 = (struct ip_flow6 **) malloc(size);
408 
409   memset(ip_flow_table6, 0, size);
410   flow_lru_list6.root = (struct ip_flow6 *) malloc(sizeof(struct ip_flow6));
411   flow_lru_list6.last = flow_lru_list6.root;
412   memset(flow_lru_list6.root, 0, sizeof(struct ip_flow6));
413   flt6_prune_deadline = time(NULL)+FLOW_TABLE_PRUNE_INTERVAL;
414   flt6_emergency_prune = 0;
415 
416   if (config.flow_lifetime) flow_generic_lifetime = config.flow_lifetime;
417   else flow_generic_lifetime = FLOW_GENERIC_LIFETIME;
418 
419   if (config.flow_tcp_lifetime) flow_tcpest_lifetime = config.flow_tcp_lifetime;
420   else {
421     if (config.classifiers_path) flow_tcpest_lifetime = FLOW_TCPEST_LIFETIME;
422     else flow_tcpest_lifetime = flow_generic_lifetime;
423   }
424 }
425 
ip_flow6_handler(struct packet_ptrs * pptrs)426 void ip_flow6_handler(struct packet_ptrs *pptrs)
427 {
428   struct timeval now;
429 
430   gettimeofday(&now, NULL);
431 
432   if (now.tv_sec > flt6_prune_deadline) {
433     prune_old_flows6(&now);
434     flt6_prune_deadline = now.tv_sec+FLOW_TABLE_PRUNE_INTERVAL;
435   }
436 
437   find_flow6(&now, pptrs);
438 }
439 
hash_flow6(u_int32_t id,struct in6_addr * saddr,struct in6_addr * daddr)440 unsigned int hash_flow6(u_int32_t id, struct in6_addr *saddr, struct in6_addr *daddr)
441 {
442         u_int32_t a, b, c;
443 	u_int32_t *src = (u_int32_t *)saddr, *dst = (u_int32_t *)daddr;
444 
445         a = src[0];
446         b = src[1];
447         c = src[2];
448 
449         a += JHASH_GOLDEN_RATIO;
450         b += JHASH_GOLDEN_RATIO;
451         c += flt_trivial_hash_rnd;
452         __jhash_mix(a, b, c);
453 
454         a += src[3];
455         b += dst[0];
456         c += dst[1];
457         __jhash_mix(a, b, c);
458 
459         a += dst[2];
460         b += dst[3];
461         c += id;
462         __jhash_mix(a, b, c);
463 
464         return c & (config.flow_hashsz - 1);
465 }
466 
normalize_flow6(struct in6_addr * saddr,struct in6_addr * daddr,u_int16_t * port_src,u_int16_t * port_dst)467 unsigned int normalize_flow6(struct in6_addr *saddr, struct in6_addr *daddr,
468 				u_int16_t *port_src, u_int16_t *port_dst)
469 {
470   struct in6_addr taddr;
471   u_int16_t port_tmp;
472 
473   if (*port_src < *port_dst) {
474     port_tmp = *port_src;
475     *port_src = *port_dst;
476     *port_dst = port_tmp;
477 
478     ip6_addr_cpy(&taddr, saddr);
479     ip6_addr_cpy(saddr, daddr);
480     ip6_addr_cpy(daddr, &taddr);
481 
482     return TRUE; /* reverse flow */
483   }
484 
485   if (*port_src == *port_dst) {
486     if (ip6_addr_cmp(saddr, daddr) < 0) {
487       ip6_addr_cpy(&taddr, saddr);
488       ip6_addr_cpy(saddr, daddr);
489       ip6_addr_cpy(daddr, &taddr);
490 
491       return TRUE; /* reverse flow */
492     }
493   }
494 
495   return FALSE; /* forward flow */
496 }
497 
find_flow6(struct timeval * now,struct packet_ptrs * pptrs)498 void find_flow6(struct timeval *now, struct packet_ptrs *pptrs)
499 {
500   struct ip6_hdr my_iph;
501   struct pm_tcphdr my_tlh;
502   struct ip6_hdr *iphp = &my_iph;
503   struct pm_tlhdr *tlhp = (struct pm_tlhdr *) &my_tlh;
504   struct ip_flow6 *fp, *candidate = NULL, *last_seen = NULL;
505   unsigned int idx, bucket;
506 
507   memcpy(&my_iph, pptrs->iph_ptr, IP6HdrSz);
508   memcpy(&my_tlh, pptrs->tlh_ptr, MyTCPHdrSz);
509   idx = normalize_flow6(&iphp->ip6_src, &iphp->ip6_dst, &tlhp->src_port, &tlhp->dst_port);
510   bucket = hash_flow6((tlhp->src_port << 16) | tlhp->dst_port, &iphp->ip6_src, &iphp->ip6_dst);
511 
512   for (fp = ip_flow_table6[bucket]; fp; fp = fp->next) {
513     if (!ip6_addr_cmp(&fp->ip_src, &iphp->ip6_src) && !ip6_addr_cmp(&fp->ip_dst, &iphp->ip6_dst) &&
514         fp->port_src == tlhp->src_port && fp->port_dst == tlhp->dst_port &&
515 	fp->cmn.proto == pptrs->l4_proto) {
516       /* flow found; will check for its lifetime */
517       if (!is_expired_uni(now, &fp->cmn, idx)) {
518         /* still valid flow */
519 	evaluate_tcp_flags(now, pptrs, &fp->cmn, idx);
520 	fp->cmn.last[idx].tv_sec = now->tv_sec;
521 	fp->cmn.last[idx].tv_usec = now->tv_usec;
522 	pptrs->new_flow = FALSE;
523 	if (config.classifiers_path) evaluate_classifiers(pptrs, &fp->cmn, idx);
524 	return;
525       }
526       else {
527         /* stale flow: will start a new one */
528 	clear_tcp_flow_cmn(&fp->cmn, idx);
529 	evaluate_tcp_flags(now, pptrs, &fp->cmn, idx);
530 	fp->cmn.last[idx].tv_sec = now->tv_sec;
531 	fp->cmn.last[idx].tv_usec = now->tv_usec;
532 	pptrs->new_flow = TRUE;
533 	if (config.classifiers_path) evaluate_classifiers(pptrs, &fp->cmn, idx);
534 	return;
535       }
536     }
537     if (!candidate && is_expired(now, &fp->cmn)) candidate = fp;
538     last_seen = fp;
539   }
540 
541   if (candidate) create_flow6(now, candidate, TRUE, bucket, pptrs, iphp, tlhp, idx);
542   else create_flow6(now, last_seen, FALSE, bucket, pptrs, iphp, tlhp, idx);
543 }
544 
create_flow6(struct timeval * now,struct ip_flow6 * fp,u_int8_t is_candidate,unsigned int bucket,struct packet_ptrs * pptrs,struct ip6_hdr * iphp,struct pm_tlhdr * tlhp,unsigned int idx)545 void create_flow6(struct timeval *now, struct ip_flow6 *fp, u_int8_t is_candidate, unsigned int bucket,
546 	          struct packet_ptrs *pptrs, struct ip6_hdr *iphp, struct pm_tlhdr *tlhp, unsigned int idx)
547 {
548   struct ip_flow6 *newf;
549 
550   if (!flt6_total_nodes) {
551     if (now->tv_sec > flt6_emergency_prune+FLOW_TABLE_EMER_PRUNE_INTERVAL) {
552       Log(LOG_INFO, "INFO ( %s/core ): Flow/6 buffer full. Skipping flows.\n", config.name);
553       flt6_emergency_prune = now->tv_sec;
554       prune_old_flows6(now);
555     }
556     pptrs->new_flow = FALSE;
557     return;
558   }
559 
560   if (fp) {
561     /* a 'not candidate' is simply the tail (last node) of the
562        list. We need to allocate a new node */
563     if (!is_candidate) {
564       newf = (struct ip_flow6 *) malloc(sizeof(struct ip_flow6));
565       if (!newf) {
566 	if (now->tv_sec > flt6_emergency_prune+FLOW_TABLE_EMER_PRUNE_INTERVAL) {
567 	  Log(LOG_INFO, "INFO ( %s/core ): Flow/6 buffer full. Skipping flows.\n", config.name);
568 	  flt6_emergency_prune = now->tv_sec;
569 	  prune_old_flows6(now);
570 	}
571         pptrs->new_flow = FALSE;
572 	return;
573       }
574       else flt6_total_nodes--;
575       memset(newf, 0, sizeof(struct ip_flow6));
576       fp->next = newf;
577       newf->prev = fp;
578       flow_lru_list6.last->lru_next = newf; /* placing new node as LRU tail */
579       newf->lru_prev = flow_lru_list6.last;
580       flow_lru_list6.last = newf;
581       fp = newf;
582     }
583     else {
584       if (fp->lru_next) { /* if fp->lru_next==NULL the node is already the tail */
585         fp->lru_prev->lru_next = fp->lru_next;
586         fp->lru_next->lru_prev = fp->lru_prev;
587         flow_lru_list6.last->lru_next = fp;
588         fp->lru_prev = flow_lru_list6.last;
589         fp->lru_next = NULL;
590         flow_lru_list6.last = fp;
591       }
592       clear_context_chain(&fp->cmn, 0);
593       clear_context_chain(&fp->cmn, 1);
594       memset(&fp->cmn, 0, sizeof(struct ip_flow_common));
595     }
596   }
597   else {
598     /* we don't have any fragment pointer; this is because current
599        bucket doesn't contain any node; we'll allocate first one */
600     fp = (struct ip_flow6 *) malloc(sizeof(struct ip_flow6));
601     if (!fp) {
602       if (now->tv_sec > flt6_emergency_prune+FLOW_TABLE_EMER_PRUNE_INTERVAL) {
603         Log(LOG_INFO, "INFO ( %s/core ): Flow/6 buffer full. Skipping flows.\n", config.name);
604         flt6_emergency_prune = now->tv_sec;
605         prune_old_flows6(now);
606       }
607       pptrs->new_flow = FALSE;
608       return;
609     }
610     else flt6_total_nodes--;
611     memset(fp, 0, sizeof(struct ip_flow6));
612     ip_flow_table6[bucket] = fp;
613     flow_lru_list6.last->lru_next = fp; /* placing new node as LRU tail */
614     fp->lru_prev = flow_lru_list6.last;
615     flow_lru_list6.last = fp;
616   }
617 
618   ip6_addr_cpy(&fp->ip_src, &iphp->ip6_src);
619   ip6_addr_cpy(&fp->ip_dst, &iphp->ip6_dst);
620   fp->port_src = tlhp->src_port;
621   fp->port_dst = tlhp->dst_port;
622   fp->cmn.proto = pptrs->l4_proto;
623   fp->cmn.bucket = bucket;
624   evaluate_tcp_flags(now, pptrs, &fp->cmn, idx);
625   fp->cmn.last[idx].tv_sec = now->tv_sec;
626   fp->cmn.last[idx].tv_usec = now->tv_usec;
627 
628   pptrs->new_flow = TRUE;
629   if (config.classifiers_path) evaluate_classifiers(pptrs, &fp->cmn, idx);
630 }
631 
prune_old_flows6(struct timeval * now)632 void prune_old_flows6(struct timeval *now)
633 {
634   struct ip_flow6 *fp, *temp, *last_seen = flow_lru_list6.root;
635 
636   fp = flow_lru_list6.root->lru_next;
637   while (fp) {
638     if (is_expired(now, &fp->cmn)) {
639       /* we found a stale element; we'll prune it */
640       if (fp->lru_next) temp = fp->lru_next;
641       else temp = NULL;
642 
643       /* rearranging bucket's pointers */
644       if (fp->prev && fp->next) {
645         fp->prev->next = fp->next;
646         fp->next->prev = fp->prev;
647       }
648       else if (fp->prev) fp->prev->next = NULL;
649       else if (fp->next) {
650         ip_flow_table6[fp->cmn.bucket] = fp->next;
651         fp->next->prev = NULL;
652       }
653       else ip_flow_table6[fp->cmn.bucket] = NULL;
654 
655       /* rearranging LRU pointers */
656       if (fp->lru_next) {
657         fp->lru_next->lru_prev = fp->lru_prev;
658         fp->lru_prev->lru_next = fp->lru_next;
659       }
660       else fp->lru_prev->lru_next = NULL;
661 
662       clear_context_chain(&fp->cmn, 0);
663       clear_context_chain(&fp->cmn, 1);
664       free(fp);
665       flt6_total_nodes++;
666 
667       if (temp) fp = temp;
668       else fp = NULL;
669     }
670     else {
671       last_seen = fp;
672       fp = fp->lru_next;
673     }
674   }
675 
676   flow_lru_list6.last = last_seen;
677 }
678