1 /*
2   Copyright (c) 1999 Rafal Wojtczuk <nergal@7bulls.com>. All rights reserved.
3   See the file COPYING for license details.
4  */
5 
6 #include <config.h>
7 #include <sys/types.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <unistd.h>
12 #include <netinet/in.h>
13 #include <netinet/in_systm.h>
14 #include <netinet/ip.h>
15 #include <netinet/tcp.h>
16 #include <netinet/ip_icmp.h>
17 
18 #include "checksum.h"
19 #include "scan.h"
20 #include "tcp.h"
21 #include "util.h"
22 #include "nids.h"
23 #include "hash.h"
24 
25 #if ! HAVE_TCP_STATES
26 enum {
27   TCP_ESTABLISHED = 1,
28   TCP_SYN_SENT,
29   TCP_SYN_RECV,
30   TCP_FIN_WAIT1,
31   TCP_FIN_WAIT2,
32   TCP_TIME_WAIT,
33   TCP_CLOSE,
34   TCP_CLOSE_WAIT,
35   TCP_LAST_ACK,
36   TCP_LISTEN,
37   TCP_CLOSING			/* now a valid state */
38 };
39 
40 #endif
41 
42 #define FIN_SENT 120
43 #define FIN_CONFIRMED 121
44 #define COLLECT_cc 1
45 #define COLLECT_sc 2
46 #define COLLECT_ccu 4
47 #define COLLECT_scu 8
48 
49 #define EXP_SEQ (snd->first_data_seq + rcv->count + rcv->urg_count)
50 
51 extern struct proc_node *tcp_procs;
52 
53 static struct tcp_stream **tcp_stream_table;
54 static struct tcp_stream *streams_pool;
55 static int tcp_num = 0;
56 static int tcp_stream_table_size;
57 static int max_stream;
58 static struct tcp_stream *tcp_latest = 0, *tcp_oldest = 0;
59 static struct tcp_stream *free_streams;
60 static struct ip *ugly_iphdr;
61 struct tcp_timeout *nids_tcp_timeouts = 0;
62 
purge_queue(struct half_stream * h)63 static void purge_queue(struct half_stream * h)
64 {
65   struct skbuff *tmp, *p = h->list;
66 
67   while (p) {
68     free(p->data);
69     tmp = p->next;
70     free(p);
71     p = tmp;
72   }
73   h->list = h->listtail = 0;
74   h->rmem_alloc = 0;
75 }
76 
77 static void
add_tcp_closing_timeout(struct tcp_stream * a_tcp)78 add_tcp_closing_timeout(struct tcp_stream * a_tcp)
79 {
80   struct tcp_timeout *to;
81   struct tcp_timeout *newto;
82 
83   if (!nids_params.tcp_workarounds)
84     return;
85   newto = malloc(sizeof (struct tcp_timeout));
86   if (!newto)
87       nids_params.no_mem("add_tcp_closing_timeout");
88   newto->a_tcp = a_tcp;
89   newto->timeout.tv_sec = nids_last_pcap_header->ts.tv_sec + 10;
90   newto->prev = 0;
91   for (newto->next = to = nids_tcp_timeouts; to; newto->next = to = to->next) {
92     if (to->a_tcp == a_tcp) {
93       free(newto);
94       return;
95     }
96     if (to->timeout.tv_sec > newto->timeout.tv_sec)
97       break;
98     newto->prev = to;
99   }
100   if (!newto->prev)
101     nids_tcp_timeouts = newto;
102   else
103     newto->prev->next = newto;
104   if (newto->next)
105     newto->next->prev = newto;
106 }
107 
108 static void
del_tcp_closing_timeout(struct tcp_stream * a_tcp)109 del_tcp_closing_timeout(struct tcp_stream * a_tcp)
110 {
111   struct tcp_timeout *to;
112 
113   if (!nids_params.tcp_workarounds)
114     return;
115   for (to = nids_tcp_timeouts; to; to = to->next)
116     if (to->a_tcp == a_tcp)
117       break;
118   if (!to)
119     return;
120   if (!to->prev)
121     nids_tcp_timeouts = to->next;
122   else
123     to->prev->next = to->next;
124   if (to->next)
125     to->next->prev = to->prev;
126   free(to);
127 }
128 
129 void
nids_free_tcp_stream(struct tcp_stream * a_tcp)130 nids_free_tcp_stream(struct tcp_stream * a_tcp)
131 {
132   int hash_index = a_tcp->hash_index;
133   struct lurker_node *i, *j;
134 
135   del_tcp_closing_timeout(a_tcp);
136   purge_queue(&a_tcp->server);
137   purge_queue(&a_tcp->client);
138 
139   if (a_tcp->next_node)
140     a_tcp->next_node->prev_node = a_tcp->prev_node;
141   if (a_tcp->prev_node)
142     a_tcp->prev_node->next_node = a_tcp->next_node;
143   else
144     tcp_stream_table[hash_index] = a_tcp->next_node;
145   if (a_tcp->client.data)
146     free(a_tcp->client.data);
147   if (a_tcp->server.data)
148     free(a_tcp->server.data);
149   if (a_tcp->next_time)
150     a_tcp->next_time->prev_time = a_tcp->prev_time;
151   if (a_tcp->prev_time)
152     a_tcp->prev_time->next_time = a_tcp->next_time;
153   if (a_tcp == tcp_oldest)
154     tcp_oldest = a_tcp->prev_time;
155   if (a_tcp == tcp_latest)
156     tcp_latest = a_tcp->next_time;
157 
158   i = a_tcp->listeners;
159 
160   while (i) {
161     j = i->next;
162     free(i);
163     i = j;
164   }
165   a_tcp->next_free = free_streams;
166   free_streams = a_tcp;
167   tcp_num--;
168 }
169 
170 void
tcp_check_timeouts(struct timeval * now)171 tcp_check_timeouts(struct timeval *now)
172 {
173   struct tcp_timeout *to;
174   struct tcp_timeout *next;
175   struct lurker_node *i;
176 
177   for (to = nids_tcp_timeouts; to; to = next) {
178     if (now->tv_sec < to->timeout.tv_sec)
179       return;
180     to->a_tcp->nids_state = NIDS_TIMED_OUT;
181     for (i = to->a_tcp->listeners; i; i = i->next)
182       (i->item) (to->a_tcp, &i->data);
183     next = to->next;
184     nids_free_tcp_stream(to->a_tcp);
185   }
186 }
187 
188 static int
mk_hash_index(struct tuple4 addr)189 mk_hash_index(struct tuple4 addr)
190 {
191   int hash=mkhash(addr.saddr, addr.source, addr.daddr, addr.dest);
192   return hash % tcp_stream_table_size;
193 }
194 
get_ts(struct tcphdr * this_tcphdr,unsigned int * ts)195 static int get_ts(struct tcphdr * this_tcphdr, unsigned int * ts)
196 {
197   int len = 4 * this_tcphdr->th_off;
198   unsigned int tmp_ts;
199   unsigned char * options = (unsigned char*)(this_tcphdr + 1);
200   int ind = 0, ret = 0;
201   while (ind <=  len - (int)sizeof (struct tcphdr) - 10 )
202   	switch (options[ind]) {
203 		case 0: /* TCPOPT_EOL */
204 			return ret;
205 		case 1: /* TCPOPT_NOP */
206 			ind++;
207 			continue;
208   		case 8: /* TCPOPT_TIMESTAMP */
209 	  		memcpy((char*)&tmp_ts, options + ind + 2, 4);
210   			*ts=ntohl(tmp_ts);
211 			ret = 1;
212 			/* no break, intentionally */
213 		default:
214 			if (options[ind+1] < 2 ) /* "silly option" */
215 				return ret;
216 			ind += options[ind+1];
217 	}
218 
219   return ret;
220 }
221 
get_wscale(struct tcphdr * this_tcphdr,unsigned int * ws)222 static int get_wscale(struct tcphdr * this_tcphdr, unsigned int * ws)
223 {
224   int len = 4 * this_tcphdr->th_off;
225   unsigned int tmp_ws;
226   unsigned char * options = (unsigned char*)(this_tcphdr + 1);
227   int ind = 0, ret = 0;
228   *ws=1;
229   while (ind <=  len - (int)sizeof (struct tcphdr) - 3 )
230   	switch (options[ind]) {
231 		case 0: /* TCPOPT_EOL */
232 			return ret;
233 		case 1: /* TCPOPT_NOP */
234 			ind++;
235 			continue;
236   		case 3: /* TCPOPT_WSCALE */
237   			tmp_ws=options[ind+2];
238   			if (tmp_ws>14)
239   				tmp_ws=14;
240 			*ws=1<<tmp_ws;
241 			ret = 1;
242 			/* no break, intentionally */
243 		default:
244 			if (options[ind+1] < 2 ) /* "silly option" */
245 				return ret;
246 			ind += options[ind+1];
247 	}
248 
249   return ret;
250 }
251 
252 
253 
254 
255 static void
add_new_tcp(struct tcphdr * this_tcphdr,struct ip * this_iphdr)256 add_new_tcp(struct tcphdr * this_tcphdr, struct ip * this_iphdr)
257 {
258   struct tcp_stream *tolink;
259   struct tcp_stream *a_tcp;
260   int hash_index;
261   struct tuple4 addr;
262 
263   addr.source = ntohs(this_tcphdr->th_sport);
264   addr.dest = ntohs(this_tcphdr->th_dport);
265   addr.saddr = this_iphdr->ip_src.s_addr;
266   addr.daddr = this_iphdr->ip_dst.s_addr;
267   hash_index = mk_hash_index(addr);
268 
269   if (tcp_num > max_stream) {
270     struct lurker_node *i;
271     int orig_client_state=tcp_oldest->client.state;
272     tcp_oldest->nids_state = NIDS_TIMED_OUT;
273     for (i = tcp_oldest->listeners; i; i = i->next)
274       (i->item) (tcp_oldest, &i->data);
275     nids_free_tcp_stream(tcp_oldest);
276     if (orig_client_state!=TCP_SYN_SENT)
277       nids_params.syslog(NIDS_WARN_TCP, NIDS_WARN_TCP_TOOMUCH, ugly_iphdr, this_tcphdr);
278   }
279   a_tcp = free_streams;
280   if (!a_tcp) {
281     fprintf(stderr, "gdb me ...\n");
282     pause();
283   }
284   free_streams = a_tcp->next_free;
285 
286   tcp_num++;
287   tolink = tcp_stream_table[hash_index];
288   memset(a_tcp, 0, sizeof(struct tcp_stream));
289   a_tcp->hash_index = hash_index;
290   a_tcp->addr = addr;
291   a_tcp->client.state = TCP_SYN_SENT;
292   a_tcp->client.seq = ntohl(this_tcphdr->th_seq) + 1;
293   a_tcp->client.first_data_seq = a_tcp->client.seq;
294   a_tcp->client.window = ntohs(this_tcphdr->th_win);
295   a_tcp->client.ts_on = get_ts(this_tcphdr, &a_tcp->client.curr_ts);
296   a_tcp->client.wscale_on = get_wscale(this_tcphdr, &a_tcp->client.wscale);
297   a_tcp->server.state = TCP_CLOSE;
298   a_tcp->next_node = tolink;
299   a_tcp->prev_node = 0;
300   if (tolink)
301     tolink->prev_node = a_tcp;
302   tcp_stream_table[hash_index] = a_tcp;
303   a_tcp->next_time = tcp_latest;
304   a_tcp->prev_time = 0;
305   if (!tcp_oldest)
306     tcp_oldest = a_tcp;
307   if (tcp_latest)
308     tcp_latest->prev_time = a_tcp;
309   tcp_latest = a_tcp;
310 }
311 
312 static void
add2buf(struct half_stream * rcv,char * data,int datalen)313 add2buf(struct half_stream * rcv, char *data, int datalen)
314 {
315   int toalloc;
316 
317   if (datalen + rcv->count - rcv->offset > rcv->bufsize) {
318     if (!rcv->data) {
319       if (datalen < 2048)
320 	toalloc = 4096;
321       else
322 	toalloc = datalen * 2;
323       rcv->data = malloc(toalloc);
324       rcv->bufsize = toalloc;
325     }
326     else {
327       if (datalen < rcv->bufsize)
328       	toalloc = 2 * rcv->bufsize;
329       else
330       	toalloc = rcv->bufsize + 2*datalen;
331       rcv->data = realloc(rcv->data, toalloc);
332       rcv->bufsize = toalloc;
333     }
334     if (!rcv->data)
335       nids_params.no_mem("add2buf");
336   }
337   memcpy(rcv->data + rcv->count - rcv->offset, data, datalen);
338   rcv->count_new = datalen;
339   rcv->count += datalen;
340 }
341 
342 static void
ride_lurkers(struct tcp_stream * a_tcp,char mask)343 ride_lurkers(struct tcp_stream * a_tcp, char mask)
344 {
345   struct lurker_node *i;
346   char cc, sc, ccu, scu;
347 
348   for (i = a_tcp->listeners; i; i = i->next)
349     if (i->whatto & mask) {
350       cc = a_tcp->client.collect;
351       sc = a_tcp->server.collect;
352       ccu = a_tcp->client.collect_urg;
353       scu = a_tcp->server.collect_urg;
354 
355       (i->item) (a_tcp, &i->data);
356       if (cc < a_tcp->client.collect)
357 	i->whatto |= COLLECT_cc;
358       if (ccu < a_tcp->client.collect_urg)
359 	i->whatto |= COLLECT_ccu;
360       if (sc < a_tcp->server.collect)
361 	i->whatto |= COLLECT_sc;
362       if (scu < a_tcp->server.collect_urg)
363 	i->whatto |= COLLECT_scu;
364       if (cc > a_tcp->client.collect)
365 	i->whatto &= ~COLLECT_cc;
366       if (ccu > a_tcp->client.collect_urg)
367 	i->whatto &= ~COLLECT_ccu;
368       if (sc > a_tcp->server.collect)
369 	i->whatto &= ~COLLECT_sc;
370       if (scu > a_tcp->server.collect_urg)
371 	i->whatto &= ~COLLECT_scu;
372     }
373 }
374 
375 static void
notify(struct tcp_stream * a_tcp,struct half_stream * rcv)376 notify(struct tcp_stream * a_tcp, struct half_stream * rcv)
377 {
378   struct lurker_node *i, **prev_addr;
379   char mask;
380 
381   if (rcv->count_new_urg) {
382     if (!rcv->collect_urg)
383       return;
384     if (rcv == &a_tcp->client)
385       mask = COLLECT_ccu;
386     else
387       mask = COLLECT_scu;
388     ride_lurkers(a_tcp, mask);
389     goto prune_listeners;
390   }
391   if (rcv->collect) {
392     if (rcv == &a_tcp->client)
393       mask = COLLECT_cc;
394     else
395       mask = COLLECT_sc;
396    do {
397 	int total;
398 		a_tcp->read = rcv->count - rcv->offset;
399 		  total=a_tcp->read;
400 
401 	    ride_lurkers(a_tcp, mask);
402 	    if (a_tcp->read>total-rcv->count_new)
403 	    	rcv->count_new=total-a_tcp->read;
404 
405 	    if (a_tcp->read > 0) {
406 	      memmove(rcv->data, rcv->data + a_tcp->read, rcv->count - rcv->offset - a_tcp->read);
407 	      rcv->offset += a_tcp->read;
408 	    }
409 	}while (nids_params.one_loop_less && a_tcp->read>0 && rcv->count_new);
410 // we know that if one_loop_less!=0, we have only one callback to notify
411    rcv->count_new=0;
412   }
413  prune_listeners:
414   prev_addr = &a_tcp->listeners;
415   i = a_tcp->listeners;
416   while (i)
417     if (!i->whatto) {
418       *prev_addr = i->next;
419       free(i);
420       i = *prev_addr;
421     }
422     else {
423       prev_addr = &i->next;
424       i = i->next;
425     }
426 }
427 
428 static void
add_from_skb(struct tcp_stream * a_tcp,struct half_stream * rcv,struct half_stream * snd,u_char * data,int datalen,u_int this_seq,char fin,char urg,u_int urg_ptr)429 add_from_skb(struct tcp_stream * a_tcp, struct half_stream * rcv,
430 	     struct half_stream * snd,
431 	     u_char *data, int datalen,
432 	     u_int this_seq, char fin, char urg, u_int urg_ptr)
433 {
434   u_int lost = EXP_SEQ - this_seq;
435   int to_copy, to_copy2;
436 
437   if (urg && after(urg_ptr, EXP_SEQ - 1) &&
438       (!rcv->urg_seen || after(urg_ptr, rcv->urg_ptr))) {
439     rcv->urg_ptr = urg_ptr;
440     rcv->urg_seen = 1;
441   }
442   if (rcv->urg_seen && after(rcv->urg_ptr + 1, this_seq + lost) &&
443       before(rcv->urg_ptr, this_seq + datalen)) {
444     to_copy = rcv->urg_ptr - (this_seq + lost);
445     if (to_copy > 0) {
446       if (rcv->collect) {
447 	add2buf(rcv, (char *)(data + lost), to_copy);
448 	notify(a_tcp, rcv);
449       }
450       else {
451 	rcv->count += to_copy;
452 	rcv->offset = rcv->count; /* clear the buffer */
453       }
454     }
455     rcv->urgdata = data[rcv->urg_ptr - this_seq];
456     rcv->count_new_urg = 1;
457     notify(a_tcp, rcv);
458     rcv->count_new_urg = 0;
459     rcv->urg_seen = 0;
460     rcv->urg_count++;
461     to_copy2 = this_seq + datalen - rcv->urg_ptr - 1;
462     if (to_copy2 > 0) {
463       if (rcv->collect) {
464 	add2buf(rcv, (char *)(data + lost + to_copy + 1), to_copy2);
465 	notify(a_tcp, rcv);
466       }
467       else {
468 	rcv->count += to_copy2;
469 	rcv->offset = rcv->count; /* clear the buffer */
470       }
471     }
472   }
473   else {
474     if (datalen - lost > 0) {
475       if (rcv->collect) {
476 	add2buf(rcv, (char *)(data + lost), datalen - lost);
477 	notify(a_tcp, rcv);
478       }
479       else {
480 	rcv->count += datalen - lost;
481 	rcv->offset = rcv->count; /* clear the buffer */
482       }
483     }
484   }
485   if (fin) {
486     snd->state = FIN_SENT;
487     if (rcv->state == TCP_CLOSING)
488       add_tcp_closing_timeout(a_tcp);
489   }
490 }
491 
492 static void
tcp_queue(struct tcp_stream * a_tcp,struct tcphdr * this_tcphdr,struct half_stream * snd,struct half_stream * rcv,char * data,int datalen,int skblen)493 tcp_queue(struct tcp_stream * a_tcp, struct tcphdr * this_tcphdr,
494 	  struct half_stream * snd, struct half_stream * rcv,
495 	  char *data, int datalen, int skblen
496 	  )
497 {
498   u_int this_seq = ntohl(this_tcphdr->th_seq);
499   struct skbuff *pakiet, *tmp;
500 
501   /*
502    * Did we get anything new to ack?
503    */
504 
505   if (!after(this_seq, EXP_SEQ)) {
506     if (after(this_seq + datalen + (this_tcphdr->th_flags & TH_FIN), EXP_SEQ)) {
507       /* the packet straddles our window end */
508       get_ts(this_tcphdr, &snd->curr_ts);
509       add_from_skb(a_tcp, rcv, snd, (u_char *)data, datalen, this_seq,
510 		   (this_tcphdr->th_flags & TH_FIN),
511 		   (this_tcphdr->th_flags & TH_URG),
512 		   ntohs(this_tcphdr->th_urp) + this_seq - 1);
513       /*
514        * Do we have any old packets to ack that the above
515        * made visible? (Go forward from skb)
516        */
517       pakiet = rcv->list;
518       while (pakiet) {
519 	if (after(pakiet->seq, EXP_SEQ))
520 	  break;
521 	if (after(pakiet->seq + pakiet->len + pakiet->fin, EXP_SEQ)) {
522 	  add_from_skb(a_tcp, rcv, snd, pakiet->data,
523 		       pakiet->len, pakiet->seq, pakiet->fin, pakiet->urg,
524 		       pakiet->urg_ptr + pakiet->seq - 1);
525         }
526 	rcv->rmem_alloc -= pakiet->truesize;
527 	if (pakiet->prev)
528 	  pakiet->prev->next = pakiet->next;
529 	else
530 	  rcv->list = pakiet->next;
531 	if (pakiet->next)
532 	  pakiet->next->prev = pakiet->prev;
533 	else
534 	  rcv->listtail = pakiet->prev;
535 	tmp = pakiet->next;
536 	free(pakiet->data);
537 	free(pakiet);
538 	pakiet = tmp;
539       }
540     }
541     else
542       return;
543   }
544   else {
545     struct skbuff *p = rcv->listtail;
546 
547     pakiet = mknew(struct skbuff);
548     pakiet->truesize = skblen;
549     rcv->rmem_alloc += pakiet->truesize;
550     pakiet->len = datalen;
551     pakiet->data = malloc(datalen);
552     if (!pakiet->data)
553       nids_params.no_mem("tcp_queue");
554     memcpy(pakiet->data, data, datalen);
555     pakiet->fin = (this_tcphdr->th_flags & TH_FIN);
556     /* Some Cisco - at least - hardware accept to close a TCP connection
557      * even though packets were lost before the first TCP FIN packet and
558      * never retransmitted; this violates RFC 793, but since it really
559      * happens, it has to be dealt with... The idea is to introduce a 10s
560      * timeout after TCP FIN packets were sent by both sides so that
561      * corresponding libnids resources can be released instead of waiting
562      * for retransmissions which will never happen.  -- Sebastien Raveau
563      */
564     if (pakiet->fin) {
565       snd->state = TCP_CLOSING;
566       if (rcv->state == FIN_SENT || rcv->state == FIN_CONFIRMED)
567 	add_tcp_closing_timeout(a_tcp);
568     }
569     pakiet->seq = this_seq;
570     pakiet->urg = (this_tcphdr->th_flags & TH_URG);
571     pakiet->urg_ptr = ntohs(this_tcphdr->th_urp);
572     for (;;) {
573       if (!p || !after(p->seq, this_seq))
574 	break;
575       p = p->prev;
576     }
577     if (!p) {
578       pakiet->prev = 0;
579       pakiet->next = rcv->list;
580       if (rcv->list)
581          rcv->list->prev = pakiet;
582       rcv->list = pakiet;
583       if (!rcv->listtail)
584 	rcv->listtail = pakiet;
585     }
586     else {
587       pakiet->next = p->next;
588       p->next = pakiet;
589       pakiet->prev = p;
590       if (pakiet->next)
591 	pakiet->next->prev = pakiet;
592       else
593 	rcv->listtail = pakiet;
594     }
595   }
596 }
597 
598 static void
prune_queue(struct half_stream * rcv,struct tcphdr * this_tcphdr)599 prune_queue(struct half_stream * rcv, struct tcphdr * this_tcphdr)
600 {
601   struct skbuff *tmp, *p = rcv->list;
602 
603   nids_params.syslog(NIDS_WARN_TCP, NIDS_WARN_TCP_BIGQUEUE, ugly_iphdr, this_tcphdr);
604   while (p) {
605     free(p->data);
606     tmp = p->next;
607     free(p);
608     p = tmp;
609   }
610   rcv->list = rcv->listtail = 0;
611   rcv->rmem_alloc = 0;
612 }
613 
614 static void
handle_ack(struct half_stream * snd,u_int acknum)615 handle_ack(struct half_stream * snd, u_int acknum)
616 {
617   int ackdiff;
618 
619   ackdiff = acknum - snd->ack_seq;
620   if (ackdiff > 0) {
621     snd->ack_seq = acknum;
622   }
623 }
624 #if 0
625 static void
626 check_flags(struct ip * iph, struct tcphdr * th)
627 {
628   u_char flag = *(((u_char *) th) + 13);
629   if (flag & 0x40 || flag & 0x80)
630     nids_params.syslog(NIDS_WARN_TCP, NIDS_WARN_TCP_BADFLAGS, iph, th);
631 //ECN is really the only cause of these warnings...
632 }
633 #endif
634 
635 struct tcp_stream *
find_stream(struct tcphdr * this_tcphdr,struct ip * this_iphdr,int * from_client)636 find_stream(struct tcphdr * this_tcphdr, struct ip * this_iphdr,
637 	    int *from_client)
638 {
639   struct tuple4 this_addr, reversed;
640   struct tcp_stream *a_tcp;
641 
642   this_addr.source = ntohs(this_tcphdr->th_sport);
643   this_addr.dest = ntohs(this_tcphdr->th_dport);
644   this_addr.saddr = this_iphdr->ip_src.s_addr;
645   this_addr.daddr = this_iphdr->ip_dst.s_addr;
646   a_tcp = nids_find_tcp_stream(&this_addr);
647   if (a_tcp) {
648     *from_client = 1;
649     return a_tcp;
650   }
651   reversed.source = ntohs(this_tcphdr->th_dport);
652   reversed.dest = ntohs(this_tcphdr->th_sport);
653   reversed.saddr = this_iphdr->ip_dst.s_addr;
654   reversed.daddr = this_iphdr->ip_src.s_addr;
655   a_tcp = nids_find_tcp_stream(&reversed);
656   if (a_tcp) {
657     *from_client = 0;
658     return a_tcp;
659   }
660   return 0;
661 }
662 
663 struct tcp_stream *
nids_find_tcp_stream(struct tuple4 * addr)664 nids_find_tcp_stream(struct tuple4 *addr)
665 {
666   int hash_index;
667   struct tcp_stream *a_tcp;
668 
669   hash_index = mk_hash_index(*addr);
670   for (a_tcp = tcp_stream_table[hash_index];
671        a_tcp && memcmp(&a_tcp->addr, addr, sizeof (struct tuple4));
672        a_tcp = a_tcp->next_node);
673   return a_tcp ? a_tcp : 0;
674 }
675 
676 
tcp_exit(void)677 void tcp_exit(void)
678 {
679   int i;
680   struct lurker_node *j;
681   struct tcp_stream *a_tcp, *t_tcp;
682 
683   if (!tcp_stream_table || !streams_pool)
684     return;
685   for (i = 0; i < tcp_stream_table_size; i++) {
686     a_tcp = tcp_stream_table[i];
687     while(a_tcp) {
688       t_tcp = a_tcp;
689       a_tcp = a_tcp->next_node;
690       for (j = t_tcp->listeners; j; j = j->next) {
691           t_tcp->nids_state = NIDS_EXITING;
692 	  (j->item)(t_tcp, &j->data);
693       }
694       nids_free_tcp_stream(t_tcp);
695     }
696   }
697   free(tcp_stream_table);
698   tcp_stream_table = NULL;
699   free(streams_pool);
700   streams_pool = NULL;
701   /* FIXME: anything else we should free? */
702   /* yes plz.. */
703   tcp_latest = tcp_oldest = NULL;
704   tcp_num = 0;
705 }
706 
707 void
process_tcp(u_char * data,int skblen)708 process_tcp(u_char * data, int skblen)
709 {
710   struct ip *this_iphdr = (struct ip *)data;
711   struct tcphdr *this_tcphdr = (struct tcphdr *)(data + 4 * this_iphdr->ip_hl);
712   int datalen, iplen;
713   int from_client = 1;
714   unsigned int tmp_ts;
715   struct tcp_stream *a_tcp;
716   struct half_stream *snd, *rcv;
717 
718   ugly_iphdr = this_iphdr;
719   iplen = ntohs(this_iphdr->ip_len);
720   if ((unsigned)iplen < 4 * this_iphdr->ip_hl + sizeof(struct tcphdr)) {
721     nids_params.syslog(NIDS_WARN_TCP, NIDS_WARN_TCP_HDR, this_iphdr,
722 		       this_tcphdr);
723     return;
724   } // ktos sie bawi
725 
726   datalen = iplen - 4 * this_iphdr->ip_hl - 4 * this_tcphdr->th_off;
727 
728   if (datalen < 0) {
729     nids_params.syslog(NIDS_WARN_TCP, NIDS_WARN_TCP_HDR, this_iphdr,
730 		       this_tcphdr);
731     return;
732   } // ktos sie bawi
733 
734   if ((this_iphdr->ip_src.s_addr | this_iphdr->ip_dst.s_addr) == 0) {
735     nids_params.syslog(NIDS_WARN_TCP, NIDS_WARN_TCP_HDR, this_iphdr,
736 		       this_tcphdr);
737     return;
738   }
739   if (!(this_tcphdr->th_flags & TH_ACK))
740     detect_scan(this_iphdr);
741   if (!nids_params.n_tcp_streams) return;
742   if (my_tcp_check(this_tcphdr, iplen - 4 * this_iphdr->ip_hl,
743 		   this_iphdr->ip_src.s_addr, this_iphdr->ip_dst.s_addr)) {
744     nids_params.syslog(NIDS_WARN_TCP, NIDS_WARN_TCP_HDR, this_iphdr,
745 		       this_tcphdr);
746     return;
747   }
748 #if 0
749   check_flags(this_iphdr, this_tcphdr);
750 //ECN
751 #endif
752   if (!(a_tcp = find_stream(this_tcphdr, this_iphdr, &from_client))) {
753     if ((this_tcphdr->th_flags & TH_SYN) &&
754 	!(this_tcphdr->th_flags & TH_ACK) &&
755 	!(this_tcphdr->th_flags & TH_RST))
756       add_new_tcp(this_tcphdr, this_iphdr);
757     return;
758   }
759   if (from_client) {
760     snd = &a_tcp->client;
761     rcv = &a_tcp->server;
762   }
763   else {
764     rcv = &a_tcp->client;
765     snd = &a_tcp->server;
766   }
767   if ((this_tcphdr->th_flags & TH_SYN)) {
768     if (from_client || a_tcp->client.state != TCP_SYN_SENT ||
769       a_tcp->server.state != TCP_CLOSE || !(this_tcphdr->th_flags & TH_ACK))
770       return;
771     if (a_tcp->client.seq != ntohl(this_tcphdr->th_ack))
772       return;
773     a_tcp->server.state = TCP_SYN_RECV;
774     a_tcp->server.seq = ntohl(this_tcphdr->th_seq) + 1;
775     a_tcp->server.first_data_seq = a_tcp->server.seq;
776     a_tcp->server.ack_seq = ntohl(this_tcphdr->th_ack);
777     a_tcp->server.window = ntohs(this_tcphdr->th_win);
778     if (a_tcp->client.ts_on) {
779     	a_tcp->server.ts_on = get_ts(this_tcphdr, &a_tcp->server.curr_ts);
780 	if (!a_tcp->server.ts_on)
781 		a_tcp->client.ts_on = 0;
782     } else a_tcp->server.ts_on = 0;
783     if (a_tcp->client.wscale_on) {
784     	a_tcp->server.wscale_on = get_wscale(this_tcphdr, &a_tcp->server.wscale);
785 	if (!a_tcp->server.wscale_on) {
786 		a_tcp->client.wscale_on = 0;
787 		a_tcp->client.wscale  = 1;
788 		a_tcp->server.wscale = 1;
789 	}
790     } else {
791     	a_tcp->server.wscale_on = 0;
792     	a_tcp->server.wscale = 1;
793     }
794     return;
795   }
796   if (
797   	! (  !datalen && ntohl(this_tcphdr->th_seq) == rcv->ack_seq  )
798   	&&
799   	( !before(ntohl(this_tcphdr->th_seq), rcv->ack_seq + rcv->window*rcv->wscale) ||
800           before(ntohl(this_tcphdr->th_seq) + datalen, rcv->ack_seq)
801         )
802      )
803      return;
804 
805   if ((this_tcphdr->th_flags & TH_RST)) {
806     if (a_tcp->nids_state == NIDS_DATA) {
807       struct lurker_node *i;
808 
809       a_tcp->nids_state = NIDS_RESET;
810       for (i = a_tcp->listeners; i; i = i->next)
811 	(i->item) (a_tcp, &i->data);
812     }
813     nids_free_tcp_stream(a_tcp);
814     return;
815   }
816 
817   /* PAWS check */
818   if (rcv->ts_on && get_ts(this_tcphdr, &tmp_ts) &&
819   	before(tmp_ts, snd->curr_ts))
820   return;
821 
822   if ((this_tcphdr->th_flags & TH_ACK)) {
823     if (from_client && a_tcp->client.state == TCP_SYN_SENT &&
824 	a_tcp->server.state == TCP_SYN_RECV) {
825       if (ntohl(this_tcphdr->th_ack) == a_tcp->server.seq) {
826 	a_tcp->client.state = TCP_ESTABLISHED;
827 	a_tcp->client.ack_seq = ntohl(this_tcphdr->th_ack);
828 	{
829 	  struct proc_node *i;
830 	  struct lurker_node *j;
831 	  void *data;
832 
833 	  a_tcp->server.state = TCP_ESTABLISHED;
834 	  a_tcp->nids_state = NIDS_JUST_EST;
835 	  for (i = tcp_procs; i; i = i->next) {
836 	    char whatto = 0;
837 	    char cc = a_tcp->client.collect;
838 	    char sc = a_tcp->server.collect;
839 	    char ccu = a_tcp->client.collect_urg;
840 	    char scu = a_tcp->server.collect_urg;
841 
842 	    (i->item) (a_tcp, &data);
843 	    if (cc < a_tcp->client.collect)
844 	      whatto |= COLLECT_cc;
845 	    if (ccu < a_tcp->client.collect_urg)
846 	      whatto |= COLLECT_ccu;
847 	    if (sc < a_tcp->server.collect)
848 	      whatto |= COLLECT_sc;
849 	    if (scu < a_tcp->server.collect_urg)
850 	      whatto |= COLLECT_scu;
851 	    if (nids_params.one_loop_less) {
852 	    		if (a_tcp->client.collect >=2) {
853 	    			a_tcp->client.collect=cc;
854 	    			whatto&=~COLLECT_cc;
855 	    		}
856 	    		if (a_tcp->server.collect >=2 ) {
857 	    			a_tcp->server.collect=sc;
858 	    			whatto&=~COLLECT_sc;
859 	    		}
860 	    }
861 	    if (whatto) {
862 	      j = mknew(struct lurker_node);
863 	      j->item = i->item;
864 	      j->data = data;
865 	      j->whatto = whatto;
866 	      j->next = a_tcp->listeners;
867 	      a_tcp->listeners = j;
868 	    }
869 	  }
870 	  if (!a_tcp->listeners) {
871 	    nids_free_tcp_stream(a_tcp);
872 	    return;
873 	  }
874 	  a_tcp->nids_state = NIDS_DATA;
875 	}
876       }
877       // return;
878     }
879   }
880   if ((this_tcphdr->th_flags & TH_ACK)) {
881     handle_ack(snd, ntohl(this_tcphdr->th_ack));
882     if (rcv->state == FIN_SENT)
883       rcv->state = FIN_CONFIRMED;
884     if (rcv->state == FIN_CONFIRMED && snd->state == FIN_CONFIRMED) {
885       struct lurker_node *i;
886 
887       a_tcp->nids_state = NIDS_CLOSE;
888       for (i = a_tcp->listeners; i; i = i->next)
889 	(i->item) (a_tcp, &i->data);
890       nids_free_tcp_stream(a_tcp);
891       return;
892     }
893   }
894   if (datalen + (this_tcphdr->th_flags & TH_FIN) > 0)
895     tcp_queue(a_tcp, this_tcphdr, snd, rcv,
896 	      (char *) (this_tcphdr) + 4 * this_tcphdr->th_off,
897 	      datalen, skblen);
898   snd->window = ntohs(this_tcphdr->th_win);
899   if (rcv->rmem_alloc > 65535)
900     prune_queue(rcv, this_tcphdr);
901   if (!a_tcp->listeners)
902     nids_free_tcp_stream(a_tcp);
903 }
904 
905 void
nids_discard(struct tcp_stream * a_tcp,int num)906 nids_discard(struct tcp_stream * a_tcp, int num)
907 {
908   if (num < a_tcp->read)
909     a_tcp->read = num;
910 }
911 
912 void
nids_register_tcp(void (* x))913 nids_register_tcp(void (*x))
914 {
915   register_callback(&tcp_procs, x);
916 }
917 
918 void
nids_unregister_tcp(void (* x))919 nids_unregister_tcp(void (*x))
920 {
921   unregister_callback(&tcp_procs, x);
922 }
923 
924 int
tcp_init(int size)925 tcp_init(int size)
926 {
927   int i;
928   struct tcp_timeout *tmp;
929 
930   if (!size) return 0;
931   tcp_stream_table_size = size;
932   tcp_stream_table = calloc(tcp_stream_table_size, sizeof(char *));
933   if (!tcp_stream_table) {
934     nids_params.no_mem("tcp_init");
935     return -1;
936   }
937   max_stream = 3 * tcp_stream_table_size / 4;
938   streams_pool = (struct tcp_stream *) malloc((max_stream + 1) * sizeof(struct tcp_stream));
939   if (!streams_pool) {
940     nids_params.no_mem("tcp_init");
941     return -1;
942   }
943   for (i = 0; i < max_stream; i++)
944     streams_pool[i].next_free = &(streams_pool[i + 1]);
945   streams_pool[max_stream].next_free = 0;
946   free_streams = streams_pool;
947   init_hash();
948   while (nids_tcp_timeouts) {
949     tmp = nids_tcp_timeouts->next;
950     free(nids_tcp_timeouts);
951     nids_tcp_timeouts = tmp;
952   }
953   return 0;
954 }
955 
956 #if HAVE_ICMPHDR
957 #define STRUCT_ICMP struct icmphdr
958 #define ICMP_CODE   code
959 #define ICMP_TYPE   type
960 #else
961 #define STRUCT_ICMP struct icmp
962 #define ICMP_CODE   icmp_code
963 #define ICMP_TYPE   icmp_type
964 #endif
965 
966 #ifndef ICMP_DEST_UNREACH
967 #define ICMP_DEST_UNREACH ICMP_UNREACH
968 #define ICMP_PROT_UNREACH ICMP_UNREACH_PROTOCOL
969 #define ICMP_PORT_UNREACH ICMP_UNREACH_PORT
970 #define NR_ICMP_UNREACH   ICMP_MAXTYPE
971 #endif
972 
973 
974 void
process_icmp(u_char * data)975 process_icmp(u_char * data)
976 {
977   struct ip *iph = (struct ip *) data;
978   struct ip *orig_ip;
979   STRUCT_ICMP *pkt;
980   struct tcphdr *th;
981   struct half_stream *hlf;
982   int match_addr;
983   struct tcp_stream *a_tcp;
984   struct lurker_node *i;
985 
986   int from_client;
987   /* we will use unsigned, to suppress warning; we must be careful with
988      possible wrap when substracting
989      the following is ok, as the ip header has already been sanitized */
990   unsigned int len = ntohs(iph->ip_len) - (iph->ip_hl << 2);
991 
992   if (len < sizeof(STRUCT_ICMP))
993     return;
994   pkt = (STRUCT_ICMP *) (data + (iph->ip_hl << 2));
995   if (ip_compute_csum((char *) pkt, len))
996     return;
997   if (pkt->ICMP_TYPE != ICMP_DEST_UNREACH)
998     return;
999   /* ok due to check 7 lines above */
1000   len -= sizeof(STRUCT_ICMP);
1001   // sizeof(struct icmp) is not what we want here
1002 
1003   if (len < sizeof(struct ip))
1004     return;
1005 
1006   orig_ip = (struct ip *) (((char *) pkt) + 8);
1007   if (len < (unsigned)(orig_ip->ip_hl << 2) + 8)
1008      return;
1009   /* subtraction ok due to the check above */
1010   len -= orig_ip->ip_hl << 2;
1011   if ((pkt->ICMP_CODE & 15) == ICMP_PROT_UNREACH ||
1012       (pkt->ICMP_CODE & 15) == ICMP_PORT_UNREACH)
1013     match_addr = 1;
1014   else
1015     match_addr = 0;
1016   if (pkt->ICMP_CODE > NR_ICMP_UNREACH)
1017     return;
1018   if (match_addr && (iph->ip_src.s_addr != orig_ip->ip_dst.s_addr))
1019     return;
1020   if (orig_ip->ip_p != IPPROTO_TCP)
1021     return;
1022   th = (struct tcphdr *) (((char *) orig_ip) + (orig_ip->ip_hl << 2));
1023   if (!(a_tcp = find_stream(th, orig_ip, &from_client)))
1024     return;
1025   if (a_tcp->addr.dest == iph->ip_dst.s_addr)
1026     hlf = &a_tcp->server;
1027   else
1028     hlf = &a_tcp->client;
1029   if (hlf->state != TCP_SYN_SENT && hlf->state != TCP_SYN_RECV)
1030     return;
1031   a_tcp->nids_state = NIDS_RESET;
1032   for (i = a_tcp->listeners; i; i = i->next)
1033     (i->item) (a_tcp, &i->data);
1034   nids_free_tcp_stream(a_tcp);
1035 }
1036