1 /*
2     pmacct (Promiscuous mode IP Accounting package)
3     pmacct is Copyright (C) 2003-2019 by Paolo Lucente
4 */
5 
6 /*
7     This program is free software; you can redistribute it and/or modify
8     it under the terms of the GNU General Public License as published by
9     the Free Software Foundation; either version 2 of the License, or
10     (at your option) any later version.
11 
12     This program is distributed in the hope that it will be useful,
13     but WITHOUT ANY WARRANTY; without even the implied warranty of
14     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15     GNU General Public License for more details.
16 
17     You should have received a copy of the GNU General Public License
18     along with this program; if no, write to the Free Software
19     Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21 
22 /* includes */
23 #include "pmacct.h"
24 #include "addr.h"
25 #include "pmacct-data.h"
26 #include "plugin_hooks.h"
27 #include "ip_frag.h"
28 #include "jhash.h"
29 
30 /* global variables */
31 struct ip_fragment *ipft[IPFT_HASHSZ];
32 struct lru_l lru_list;
33 
34 struct ip6_fragment *ipft6[IPFT_HASHSZ];
35 struct lru_l6 lru_list6;
36 
37 u_int32_t ipft_total_nodes;
38 time_t prune_deadline;
39 time_t emergency_prune;
40 u_int32_t trivial_hash_rnd = 140281; /* ummmh */
41 
42 u_int32_t ipft6_total_nodes;
43 time_t prune_deadline6;
44 time_t emergency_prune6;
45 
enable_ip_fragment_handler()46 void enable_ip_fragment_handler()
47 {
48   if (!config.handle_fragments) {
49     config.handle_fragments = TRUE;
50     init_ip_fragment_handler();
51   }
52 }
53 
init_ip_fragment_handler()54 void init_ip_fragment_handler()
55 {
56   init_ip4_fragment_handler();
57   init_ip6_fragment_handler();
58 }
59 
init_ip4_fragment_handler()60 void init_ip4_fragment_handler()
61 {
62   if (config.frag_bufsz) ipft_total_nodes = config.frag_bufsz / sizeof(struct ip_fragment);
63   else ipft_total_nodes = DEFAULT_FRAG_BUFFER_SIZE / sizeof(struct ip_fragment);
64 
65   memset(ipft, 0, sizeof(ipft));
66   lru_list.root = (struct ip_fragment *) malloc(sizeof(struct ip_fragment));
67   lru_list.last = lru_list.root;
68   memset(lru_list.root, 0, sizeof(struct ip_fragment));
69   prune_deadline = time(NULL)+PRUNE_INTERVAL;
70   emergency_prune = 0;
71 }
72 
ip_fragment_handler(struct packet_ptrs * pptrs)73 int ip_fragment_handler(struct packet_ptrs *pptrs)
74 {
75   u_int32_t now = time(NULL);
76 
77   if (now > prune_deadline) {
78     prune_old_fragments(now, PRUNE_OFFSET);
79     prune_deadline = now+PRUNE_INTERVAL;
80   }
81   return find_fragment(now, pptrs);
82 }
83 
find_fragment(u_int32_t now,struct packet_ptrs * pptrs)84 int find_fragment(u_int32_t now, struct packet_ptrs *pptrs)
85 {
86   struct pm_iphdr *iphp = (struct pm_iphdr *)pptrs->iph_ptr;
87   struct ip_fragment *fp, *candidate = NULL, *last_seen = NULL;
88   unsigned int bucket = hash_fragment(iphp->ip_id, iphp->ip_src.s_addr,
89 				      iphp->ip_dst.s_addr, iphp->ip_p);
90   int ret;
91 
92   for (fp = ipft[bucket]; fp; fp = fp->next) {
93     if (fp->ip_id == iphp->ip_id && fp->ip_src == iphp->ip_src.s_addr &&
94 	fp->ip_dst == iphp->ip_dst.s_addr && fp->ip_p == iphp->ip_p) {
95       /* fragment found; will check for its deadline */
96       if (fp->deadline > now) {
97 	if (fp->got_first) {
98 	  // pptrs->tlh_ptr = fp->tlhdr;
99 	  memcpy(pptrs->tlh_ptr, fp->tlhdr, MyTLHdrSz);
100 
101 	  pptrs->frag_first_found = TRUE;
102 	  return TRUE;
103 	}
104 	else {
105 	  if (!(iphp->ip_off & htons(IP_OFFMASK))) {
106 	    /* we got our first fragment */
107 	    fp->got_first = TRUE;
108 	    memcpy(fp->tlhdr, pptrs->tlh_ptr, MyTLHdrSz);
109 
110 	    pptrs->frag_sum_bytes = fp->a;
111 	    pptrs->frag_sum_pkts = fp->pa;
112 	    fp->pa = 0;
113 	    fp->a = 0;
114 
115 	    pptrs->frag_first_found = TRUE;
116             return TRUE;
117 	  }
118 	  else { /* we still don't have the first fragment; increase accumulators */
119 	    if (!config.ext_sampling_rate) {
120 	      fp->pa++;
121 	      fp->a += ntohs(iphp->ip_len);
122 	    }
123 
124 	    pptrs->frag_first_found = FALSE;
125 	    return FALSE;
126 	  }
127 	}
128       }
129       else {
130 	candidate = fp;
131 	if (!candidate->got_first) notify_orphan_fragment(candidate);
132 	goto create;
133       }
134     }
135     if ((fp->deadline < now) && !candidate) {
136       candidate = fp;
137       if (!candidate->got_first) notify_orphan_fragment(candidate);
138     }
139     last_seen = fp;
140   }
141 
142   create:
143   if (candidate) ret = create_fragment(now, candidate, TRUE, bucket, pptrs);
144   else ret = create_fragment(now, last_seen, FALSE, bucket, pptrs);
145 
146   pptrs->frag_first_found = ret;
147   return ret;
148 }
149 
create_fragment(u_int32_t now,struct ip_fragment * fp,u_int8_t is_candidate,unsigned int bucket,struct packet_ptrs * pptrs)150 int create_fragment(u_int32_t now, struct ip_fragment *fp, u_int8_t is_candidate, unsigned int bucket, struct packet_ptrs *pptrs)
151 {
152   struct pm_iphdr *iphp = (struct pm_iphdr *)pptrs->iph_ptr;
153   struct ip_fragment *newf;
154 
155   if (!ipft_total_nodes) {
156     if (now > emergency_prune+EMER_PRUNE_INTERVAL) {
157       Log(LOG_INFO, "INFO ( %s/core ): Fragment/4 buffer full. Skipping fragments.\n", config.name);
158       emergency_prune = now;
159       prune_old_fragments(now, 0);
160     }
161     return FALSE;
162   }
163 
164   if (fp) {
165     /* a 'not candidate' is simply the tail (last node) of the
166        list. We need to allocate a new node */
167     if (!is_candidate) {
168       newf = (struct ip_fragment *) malloc(sizeof(struct ip_fragment));
169       if (!newf) {
170 	if (now > emergency_prune+EMER_PRUNE_INTERVAL) {
171 	  Log(LOG_INFO, "INFO ( %s/core ): Fragment/4 buffer full. Skipping fragments.\n", config.name);
172 	  emergency_prune = now;
173 	  prune_old_fragments(now, 0);
174 	}
175 	return FALSE;
176       }
177       else ipft_total_nodes--;
178       memset(newf, 0, sizeof(struct ip_fragment));
179       fp->next = newf;
180       newf->prev = fp;
181       lru_list.last->lru_next = newf; /* placing new node as LRU tail */
182       newf->lru_prev = lru_list.last;
183       lru_list.last = newf;
184       fp = newf;
185     }
186     else {
187       if (fp->lru_next) { /* if fp->lru_next==NULL the node is already the tail */
188         fp->lru_prev->lru_next = fp->lru_next;
189 	fp->lru_next->lru_prev = fp->lru_prev;
190 	lru_list.last->lru_next = fp;
191 	fp->lru_prev = lru_list.last;
192 	fp->lru_next = NULL;
193 	lru_list.last = fp;
194       }
195     }
196   }
197   else {
198     /* we don't have any fragment pointer; this is because current
199        bucket doesn't contain any node; we'll allocate first one */
200     fp = (struct ip_fragment *) malloc(sizeof(struct ip_fragment));
201     if (!fp) {
202       if (now > emergency_prune+EMER_PRUNE_INTERVAL) {
203         Log(LOG_INFO, "INFO ( %s/core ): Fragment/4 buffer full. Skipping fragments.\n", config.name);
204         emergency_prune = now;
205         prune_old_fragments(now, 0);
206       }
207       return FALSE;
208     }
209     else ipft_total_nodes--;
210     memset(fp, 0, sizeof(struct ip_fragment));
211     ipft[bucket] = fp;
212     lru_list.last->lru_next = fp; /* placing new node as LRU tail */
213     fp->lru_prev = lru_list.last;
214     lru_list.last = fp;
215   }
216 
217   fp->deadline = now+IPF_TIMEOUT;
218   fp->ip_id = iphp->ip_id;
219   fp->ip_p = iphp->ip_p;
220   fp->ip_src = iphp->ip_src.s_addr;
221   fp->ip_dst = iphp->ip_dst.s_addr;
222   fp->bucket = bucket;
223 
224   if (!(iphp->ip_off & htons(IP_OFFMASK))) {
225     /* it's a first fragment */
226     fp->got_first = TRUE;
227     memcpy(fp->tlhdr, pptrs->tlh_ptr, MyTLHdrSz);
228     return TRUE;
229   }
230   else {
231     /* not a first fragment; increase accumulators */
232     if (!config.ext_sampling_rate) {
233       fp->pa++;
234       fp->a = ntohs(iphp->ip_len);
235     }
236     return FALSE;
237   }
238 }
239 
prune_old_fragments(u_int32_t now,u_int32_t off)240 void prune_old_fragments(u_int32_t now, u_int32_t off)
241 {
242   struct ip_fragment *fp, *temp;
243   u_int32_t deadline = now-off;
244 
245   fp = lru_list.root->lru_next;
246   while (fp) {
247     if (deadline > fp->deadline) {
248       /* we found a stale element; we'll prune it */
249       if (fp->lru_next) temp = fp->lru_next;
250       else temp = NULL;
251 
252       /* rearranging bucket's pointers */
253       if (fp->prev && fp->next) {
254 	fp->prev->next = fp->next;
255         fp->next->prev = fp->prev;
256       }
257       else if (fp->prev) fp->prev->next = NULL;
258       else if (fp->next) {
259 	ipft[fp->bucket] = fp->next;
260 	fp->next->prev = NULL;
261       }
262       else ipft[fp->bucket] = NULL;
263 
264       free(fp);
265       ipft_total_nodes++;
266 
267       if (temp) fp = temp;
268       else fp = NULL;
269     }
270     else break;
271   }
272 
273   if (fp) {
274     fp->lru_prev = lru_list.root;
275     lru_list.root->lru_next = fp;
276   }
277   else lru_list.last = lru_list.root;
278 }
279 
280 /* hash_fragment() is taken (it has another name there) from Linux kernel 2.4;
281    see full credits contained in jhash.h */
hash_fragment(u_int16_t id,u_int32_t src,u_int32_t dst,u_int8_t proto)282 unsigned int hash_fragment(u_int16_t id, u_int32_t src, u_int32_t dst, u_int8_t proto)
283 {
284   return jhash_3words((u_int32_t)id << 16 | proto, src, dst, trivial_hash_rnd) & (IPFT_HASHSZ-1);
285 }
286 
notify_orphan_fragment(struct ip_fragment * frag)287 void notify_orphan_fragment(struct ip_fragment *frag)
288 {
289   struct host_addr a;
290   char src_host[INET_ADDRSTRLEN], dst_host[INET_ADDRSTRLEN];
291   u_int16_t id;
292 
293   a.family = AF_INET;
294   memcpy(&a.address.ipv4, &frag->ip_src, 4);
295   addr_to_str(src_host, &a);
296   memcpy(&a.address.ipv4, &frag->ip_dst, 4);
297   addr_to_str(dst_host, &a);
298   id = ntohs(frag->ip_id);
299   Log(LOG_DEBUG, "DEBUG ( %s/core ): Expiring orphan fragment: ip_src=%s ip_dst=%s proto=%u id=%u\n",
300 		  config.name, src_host, dst_host, frag->ip_p, id);
301 }
302 
init_ip6_fragment_handler()303 void init_ip6_fragment_handler()
304 {
305   if (config.frag_bufsz) ipft6_total_nodes = config.frag_bufsz / sizeof(struct ip6_fragment);
306   else ipft6_total_nodes = DEFAULT_FRAG_BUFFER_SIZE / sizeof(struct ip6_fragment);
307 
308   memset(ipft6, 0, sizeof(ipft6));
309   lru_list6.root = (struct ip6_fragment *) malloc(sizeof(struct ip6_fragment));
310   lru_list6.last = lru_list6.root;
311   memset(lru_list6.root, 0, sizeof(struct ip6_fragment));
312   prune_deadline6 = time(NULL)+PRUNE_INTERVAL;
313   emergency_prune6 = 0;
314 }
315 
ip6_fragment_handler(struct packet_ptrs * pptrs,struct ip6_frag * fhdr)316 int ip6_fragment_handler(struct packet_ptrs *pptrs, struct ip6_frag *fhdr)
317 {
318   u_int32_t now = time(NULL);
319 
320   if (now > prune_deadline6) {
321     prune_old_fragments6(now, PRUNE_OFFSET);
322     prune_deadline6 = now+PRUNE_INTERVAL;
323   }
324   return find_fragment6(now, pptrs, fhdr);
325 }
326 
hash_fragment6(u_int32_t id,struct in6_addr * saddr,struct in6_addr * daddr)327 unsigned int hash_fragment6(u_int32_t id, struct in6_addr *saddr, struct in6_addr *daddr)
328 {
329         u_int32_t a, b, c;
330 	u_int32_t *src = (u_int32_t *)saddr, *dst = (u_int32_t *)daddr;
331 
332         a = src[0];
333         b = src[1];
334         c = src[2];
335 
336         a += JHASH_GOLDEN_RATIO;
337         b += JHASH_GOLDEN_RATIO;
338         c += trivial_hash_rnd;
339         __jhash_mix(a, b, c);
340 
341         a += src[3];
342         b += dst[0];
343         c += dst[1];
344         __jhash_mix(a, b, c);
345 
346         a += dst[2];
347         b += dst[3];
348         c += id;
349         __jhash_mix(a, b, c);
350 
351         return c & (IPFT_HASHSZ - 1);
352 }
353 
find_fragment6(u_int32_t now,struct packet_ptrs * pptrs,struct ip6_frag * fhdr)354 int find_fragment6(u_int32_t now, struct packet_ptrs *pptrs, struct ip6_frag *fhdr)
355 {
356   struct ip6_hdr *iphp = (struct ip6_hdr *)pptrs->iph_ptr;
357   struct ip6_fragment *fp, *candidate = NULL, *last_seen = NULL;
358   unsigned int bucket = hash_fragment6(fhdr->ip6f_ident, &iphp->ip6_src, &iphp->ip6_dst);
359 
360   for (fp = ipft6[bucket]; fp; fp = fp->next) {
361     if (fp->id == fhdr->ip6f_ident && !ip6_addr_cmp(&fp->src, &iphp->ip6_src) &&
362         !ip6_addr_cmp(&fp->dst, &iphp->ip6_dst)) {
363       /* fragment found; will check for its deadline */
364       if (fp->deadline > now) {
365         if (fp->got_first) {
366           // pptrs->tlh_ptr = fp->tlhdr;
367           memcpy(pptrs->tlh_ptr, fp->tlhdr, MyTLHdrSz);
368           return TRUE;
369         }
370         else {
371           if (!(fhdr->ip6f_offlg & htons(IP6F_OFF_MASK))) {
372             /* we got our first fragment */
373             fp->got_first = TRUE;
374             memcpy(fp->tlhdr, pptrs->tlh_ptr, MyTLHdrSz);
375 
376 	    pptrs->frag_sum_bytes = fp->a;
377 	    pptrs->frag_sum_pkts = fp->pa;
378             fp->pa = 0;
379             fp->a = 0;
380             return TRUE;
381           }
382           else { /* we still don't have the first fragment; increase accumulators */
383 	    if (!config.ext_sampling_rate) {
384 	      fp->pa++;
385               fp->a += IP6HdrSz+ntohs(iphp->ip6_plen);
386 	    }
387             return FALSE;
388           }
389         }
390       }
391       else {
392         candidate = fp;
393 	if (!candidate->got_first) notify_orphan_fragment6(candidate);
394         goto create;
395       }
396     }
397     if ((fp->deadline < now) && !candidate) {
398       candidate = fp;
399       if (!candidate->got_first) notify_orphan_fragment6(candidate);
400     }
401     last_seen = fp;
402   }
403 
404   create:
405   if (candidate) return create_fragment6(now, candidate, TRUE, bucket, pptrs, fhdr);
406   else return create_fragment6(now, last_seen, FALSE, bucket, pptrs, fhdr);
407 }
408 
create_fragment6(u_int32_t now,struct ip6_fragment * fp,u_int8_t is_candidate,unsigned int bucket,struct packet_ptrs * pptrs,struct ip6_frag * fhdr)409 int create_fragment6(u_int32_t now, struct ip6_fragment *fp, u_int8_t is_candidate, unsigned int bucket,
410 			struct packet_ptrs *pptrs, struct ip6_frag *fhdr)
411 {
412   struct ip6_hdr *iphp = (struct ip6_hdr *)pptrs->iph_ptr;
413   struct ip6_fragment *newf;
414 
415   if (!ipft6_total_nodes) {
416     if (now > emergency_prune6+EMER_PRUNE_INTERVAL) {
417       Log(LOG_INFO, "INFO ( %s/core ): Fragment/6 buffer full. Skipping fragments.\n", config.name);
418       emergency_prune6 = now;
419       prune_old_fragments6(now, 0);
420     }
421     return FALSE;
422   }
423 
424   if (fp) {
425     /* a 'not candidate' is simply the tail (last node) of the
426        list. We need to allocate a new node */
427     if (!is_candidate) {
428       newf = (struct ip6_fragment *) malloc(sizeof(struct ip6_fragment));
429       if (!newf) {
430 	if (now > emergency_prune6+EMER_PRUNE_INTERVAL) {
431 	  Log(LOG_INFO, "INFO ( %s/core ): Fragment/6 buffer full. Skipping fragments.\n", config.name);
432 	  emergency_prune6 = now;
433 	  prune_old_fragments6(now, 0);
434 	}
435 	return FALSE;
436       }
437       else ipft6_total_nodes--;
438       memset(newf, 0, sizeof(struct ip6_fragment));
439       fp->next = newf;
440       newf->prev = fp;
441       lru_list6.last->lru_next = newf; /* placing new node as LRU tail */
442       newf->lru_prev = lru_list6.last;
443       lru_list6.last = newf;
444       fp = newf;
445     }
446     else {
447       if (fp->lru_next) { /* if fp->lru_next==NULL the node is already the tail */
448         fp->lru_prev->lru_next = fp->lru_next;
449         fp->lru_next->lru_prev = fp->lru_prev;
450         lru_list6.last->lru_next = fp;
451         fp->lru_prev = lru_list6.last;
452         fp->lru_next = NULL;
453         lru_list6.last = fp;
454       }
455     }
456   }
457   else {
458     /* we don't have any fragment pointer; this is because current
459        bucket doesn't contain any node; we'll allocate first one */
460     fp = (struct ip6_fragment *) malloc(sizeof(struct ip6_fragment));
461     if (!fp) {
462       if (now > emergency_prune6+EMER_PRUNE_INTERVAL) {
463         Log(LOG_INFO, "INFO ( %s/core ): Fragment/6 buffer full. Skipping fragments.\n", config.name);
464         emergency_prune6 = now;
465         prune_old_fragments6(now, 0);
466       }
467       return FALSE;
468     }
469     else ipft6_total_nodes--;
470     memset(fp, 0, sizeof(struct ip6_fragment));
471     ipft6[bucket] = fp;
472     lru_list6.last->lru_next = fp; /* placing new node as LRU tail */
473     fp->lru_prev = lru_list6.last;
474     lru_list6.last = fp;
475   }
476 
477   fp->deadline = now+IPF_TIMEOUT;
478   fp->id = fhdr->ip6f_ident;
479   ip6_addr_cpy(&fp->src, &iphp->ip6_src);
480   ip6_addr_cpy(&fp->dst, &iphp->ip6_dst);
481   fp->bucket = bucket;
482 
483   if (!(fhdr->ip6f_offlg & htons(IP6F_OFF_MASK))) {
484     /* it's a first fragment */
485     fp->got_first = TRUE;
486     memcpy(fp->tlhdr, pptrs->tlh_ptr, MyTLHdrSz);
487     return TRUE;
488   }
489   else {
490     /* not a first fragment; increase accumulators */
491     if (!config.ext_sampling_rate) {
492       fp->pa++;
493       fp->a = IP6HdrSz+ntohs(iphp->ip6_plen);
494     }
495     return FALSE;
496   }
497 }
498 
prune_old_fragments6(u_int32_t now,u_int32_t off)499 void prune_old_fragments6(u_int32_t now, u_int32_t off)
500 {
501   struct ip6_fragment *fp, *temp;
502   u_int32_t deadline = now-off;
503 
504   fp = lru_list6.root->lru_next;
505   while (fp) {
506     if (deadline > fp->deadline) {
507       /* we found a stale element; we'll prune it */
508       if (fp->lru_next) temp = fp->lru_next;
509       else temp = NULL;
510 
511       /* rearranging bucket's pointers */
512       if (fp->prev && fp->next) {
513         fp->prev->next = fp->next;
514         fp->next->prev = fp->prev;
515       }
516       else if (fp->prev) fp->prev->next = NULL;
517       else if (fp->next) {
518         ipft6[fp->bucket] = fp->next;
519         fp->next->prev = NULL;
520       }
521       else ipft6[fp->bucket] = NULL;
522 
523       free(fp);
524       ipft6_total_nodes++;
525 
526       if (temp) fp = temp;
527       else fp = NULL;
528     }
529     else break;
530   }
531 
532   if (fp) {
533     fp->lru_prev = lru_list6.root;
534     lru_list6.root->lru_next = fp;
535   }
536   else lru_list6.last = lru_list6.root;
537 }
538 
notify_orphan_fragment6(struct ip6_fragment * frag)539 void notify_orphan_fragment6(struct ip6_fragment *frag)
540 {
541   struct host_addr a;
542   char src_host[INET6_ADDRSTRLEN], dst_host[INET6_ADDRSTRLEN];
543   u_int32_t id;
544 
545   a.family = AF_INET6;
546   ip6_addr_cpy(&a.address.ipv6, &frag->src);
547   addr_to_str(src_host, &a);
548   ip6_addr_cpy(&a.address.ipv6, &frag->dst);
549   addr_to_str(dst_host, &a);
550   id = ntohl(frag->id);
551   Log(LOG_DEBUG, "DEBUG ( %s/core ): Expiring orphan fragment: ip_src=%s ip_dst=%s id=%u\n",
552 			config.name, src_host, dst_host, id);
553 }
554