1 //--------------------------------------------------------------------------
2 // Copyright (C) 2015-2021 Cisco and/or its affiliates. All rights reserved.
3 //
4 // This program is free software; you can redistribute it and/or modify it
5 // under the terms of the GNU General Public License Version 2 as published
6 // by the Free Software Foundation. You may not use, modify or distribute
7 // this program under any other version of the GNU General Public License.
8 //
9 // This program is distributed in the hope that it will be useful, but
10 // WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // General Public License for more details.
13 //
14 // You should have received a copy of the GNU General Public License along
15 // with this program; if not, write to the Free Software Foundation, Inc.,
16 // 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 //--------------------------------------------------------------------------
18
19 // tcp_stream_tracker.cpp author davis mcpherson <davmcphe@cisco.com>
20 // Created on: Jun 24, 2015
21
22 #ifdef HAVE_CONFIG_H
23 #include "config.h"
24 #endif
25
26 #include "tcp_stream_tracker.h"
27
28 #include <daq.h>
29
30 #include "log/messages.h"
31 #include "main/analyzer.h"
32 #include "main/snort.h"
33 #include "memory/memory_cap.h"
34 #include "packet_io/active.h"
35 #include "profiler/profiler_defs.h"
36 #include "protocols/eth.h"
37
38 #include "held_packet_queue.h"
39 #include "segment_overlap_editor.h"
40 #include "tcp_normalizers.h"
41 #include "tcp_reassemblers.h"
42 #include "tcp_session.h"
43
44 using namespace snort;
45
46 THREAD_LOCAL HeldPacketQueue* hpq = nullptr;
47
48 const std::list<HeldPacket>::iterator TcpStreamTracker::null_iterator { };
49
50 const char* tcp_state_names[] =
51 {
52 "TCP_LISTEN", "TCP_SYN_SENT", "TCP_SYN_RECV",
53 "TCP_ESTABLISHED",
54 "TCP_FIN_WAIT1", "TCP_FIN_WAIT2", "TCP_CLOSE_WAIT", "TCP_CLOSING",
55 "TCP_LAST_ACK", "TCP_TIME_WAIT", "TCP_CLOSED",
56 "TCP_STATE_NONE"
57 };
58
59 const char* tcp_event_names[] = {
60 "TCP_SYN_SENT_EVENT", "TCP_SYN_RECV_EVENT",
61 "TCP_SYN_ACK_SENT_EVENT", "TCP_SYN_ACK_RECV_EVENT",
62 "TCP_ACK_SENT_EVENT", "TCP_ACK_RECV_EVENT",
63 "TCP_DATA_SEG_SENT_EVENT", "TCP_DATA_SEG_RECV_EVENT",
64 "TCP_FIN_SENT_EVENT", "TCP_FIN_RECV_EVENT",
65 "TCP_RST_SENT_EVENT", "TCP_RST_RECV_EVENT"
66 };
67
TcpStreamTracker(bool client)68 TcpStreamTracker::TcpStreamTracker(bool client) :
69 tcp_state(client ? TCP_STATE_NONE : TCP_LISTEN), client_tracker(client),
70 held_packet(null_iterator)
71 { }
72
~TcpStreamTracker()73 TcpStreamTracker::~TcpStreamTracker()
74 { delete splitter; }
75
set_tcp_event(const TcpSegmentDescriptor & tsd)76 TcpStreamTracker::TcpEvent TcpStreamTracker::set_tcp_event(const TcpSegmentDescriptor& tsd)
77 {
78 bool talker;
79 const tcp::TCPHdr* tcph = tsd.get_tcph();
80
81 if ( tsd.is_packet_from_client() )
82 talker = ( client_tracker ) ? true : false;
83 else
84 talker = ( client_tracker ) ? false : true;
85
86 // FIXIT-P would a lookup table help perf? the code would be a little cleaner too.
87 if ( talker )
88 {
89 // talker events
90 if ( tcph->is_syn_only() )
91 tcp_event = TCP_SYN_SENT_EVENT;
92 else if ( tcph->is_syn_ack() )
93 tcp_event = TCP_SYN_ACK_SENT_EVENT;
94 else if ( tcph->is_rst() )
95 tcp_event = TCP_RST_SENT_EVENT;
96 else if ( tcph->is_fin( ) )
97 tcp_event = TCP_FIN_SENT_EVENT;
98 else if ( tcph->is_ack() || tcph->is_psh() )
99 {
100 if ( tsd.is_data_segment() )
101 tcp_event = TCP_DATA_SEG_SENT_EVENT;
102 else
103 tcp_event = TCP_ACK_SENT_EVENT;
104 }
105 else
106 {
107 // count no flags set on the talker side...
108 tcpStats.no_flags_set++;
109 tcp_event = TCP_NO_FLAGS_EVENT;
110 }
111 }
112 else
113 {
114 // listener events
115 if ( tcph->is_syn_only() )
116 {
117 tcp_event = TCP_SYN_RECV_EVENT;
118 tcpStats.syns++;
119 if ( tcp_state == TcpStreamTracker::TCP_LISTEN )
120 DataBus::publish(STREAM_TCP_SYN_EVENT, tsd.get_pkt());
121 }
122 else if ( tcph->is_syn_ack() )
123 {
124 tcp_event = TCP_SYN_ACK_RECV_EVENT;
125 tcpStats.syn_acks++;
126 if ( tcp_state == TcpStreamTracker::TCP_SYN_SENT or
127 (!Stream::is_midstream(tsd.get_flow()) and
128 (tcp_state == TcpStreamTracker::TCP_LISTEN or
129 tcp_state == TcpStreamTracker::TCP_STATE_NONE)) )
130 DataBus::publish(STREAM_TCP_SYN_ACK_EVENT, tsd.get_pkt());
131 }
132 else if ( tcph->is_rst() )
133 {
134 tcp_event = TCP_RST_RECV_EVENT;
135 tcpStats.resets++;
136 }
137 else if ( tcph->is_fin( ) )
138 {
139 tcp_event = TCP_FIN_RECV_EVENT;
140 tcpStats.fins++;
141 }
142 else if ( tcph->is_ack() || tcph->is_psh() )
143 {
144 if ( tsd.is_data_segment() )
145 tcp_event = TCP_DATA_SEG_RECV_EVENT;
146 else
147 tcp_event = TCP_ACK_RECV_EVENT;
148 }
149 else
150 {
151 tcp_event = TCP_NO_FLAGS_EVENT;
152 }
153 }
154
155 return tcp_event;
156 }
157
158 // Use a for loop and byte comparison, which has proven to be faster on pipelined architectures
159 // compared to a memcmp (setup for memcmp is slow). Not using a 4 byte and 2 byte long because
160 // there is no guarantee of memory alignment (and thus performance issues similar to memcmp).
compare_mac_addresses(const uint8_t eth_addr[])161 bool TcpStreamTracker::compare_mac_addresses(const uint8_t eth_addr[])
162 {
163 if ( !mac_addr_valid )
164 return true;
165
166 for ( int i = 0; i < 6; ++i )
167 if ( mac_addr[i] != eth_addr[i] )
168 return false;
169
170 return true;
171 }
172
cache_mac_address(const TcpSegmentDescriptor & tsd,uint8_t direction)173 void TcpStreamTracker::cache_mac_address(const TcpSegmentDescriptor& tsd, uint8_t direction)
174 {
175 /* Not Ethernet based, nothing to do */
176 if ( tsd.get_pkt()->is_eth() )
177 {
178 // if flag is set, guaranteed to have an eth layer
179 const eth::EtherHdr* eh = layer::get_eth_layer(tsd.get_pkt() );
180
181 if ( direction == FROM_CLIENT )
182 {
183 if ( client_tracker )
184 for ( int i = 0; i < 6; i++ )
185 mac_addr[i] = eh->ether_src[i];
186 else
187 for ( int i = 0; i < 6; i++ )
188 mac_addr[i] = eh->ether_dst[i];
189 }
190 else
191 {
192 if ( client_tracker )
193 for ( int i = 0; i < 6; i++ )
194 mac_addr[i] = eh->ether_dst[i];
195 else
196 for ( int i = 0; i < 6; i++ )
197 mac_addr[i] = eh->ether_src[i];
198 }
199
200 mac_addr_valid = true;
201 }
202 }
203
init_tcp_state()204 void TcpStreamTracker::init_tcp_state()
205 {
206 tcp_state = ( client_tracker ) ?
207 TcpStreamTracker::TCP_STATE_NONE : TcpStreamTracker::TCP_LISTEN;
208
209 snd_una = snd_nxt = snd_wnd = 0;
210 rcv_nxt = r_win_base = iss = irs = 0;
211 ts_last = ts_last_packet = 0;
212 small_seg_count = 0;
213 wscale = 0;
214 mss = 0;
215 tf_flags = 0;
216 mac_addr_valid = false;
217 fin_final_seq = 0;
218 fin_seq_status = TcpStreamTracker::FIN_NOT_SEEN;
219 fin_seq_set = false;
220 rst_pkt_sent = false;
221 order = 0;
222 held_packet = null_iterator;
223 flush_policy = STREAM_FLPOLICY_IGNORE;
224 reassembler.reset();
225 }
226
227 //-------------------------------------------------------------------------
228 // flush policy stuff
229 //-------------------------------------------------------------------------
230
init_flush_policy()231 void TcpStreamTracker::init_flush_policy()
232 {
233 if ( !splitter )
234 flush_policy = STREAM_FLPOLICY_IGNORE;
235 else if ( normalizer.is_tcp_ips_enabled() )
236 flush_policy = STREAM_FLPOLICY_ON_DATA;
237 else
238 flush_policy = STREAM_FLPOLICY_ON_ACK;
239 }
240
set_splitter(StreamSplitter * ss)241 void TcpStreamTracker::set_splitter(StreamSplitter* ss)
242 {
243 if ( splitter )
244 delete splitter;
245
246 splitter = ss;
247
248 if ( !splitter )
249 flush_policy = STREAM_FLPOLICY_IGNORE;
250 else
251 reassembler.setup_paf();
252 }
253
set_splitter(const Flow * flow)254 void TcpStreamTracker::set_splitter(const Flow* flow)
255 {
256 Inspector* ins = flow->gadget;
257
258 if ( !ins )
259 ins = flow->clouseau;
260
261 if ( ins )
262 set_splitter(ins->get_splitter(!client_tracker) );
263 else
264 set_splitter(new AtomSplitter(!client_tracker) );
265 }
266
init_on_syn_sent(TcpSegmentDescriptor & tsd)267 void TcpStreamTracker::init_on_syn_sent(TcpSegmentDescriptor& tsd)
268 {
269 tsd.get_flow()->set_session_flags(SSNFLAG_SEEN_CLIENT);
270 if ( tsd.get_tcph()->are_flags_set(TH_CWR | TH_ECE) )
271 tsd.get_flow()->set_session_flags(SSNFLAG_ECN_CLIENT_QUERY);
272
273 iss = tsd.get_seq();
274 snd_una = iss;
275 snd_nxt = tsd.get_end_seq();
276 snd_wnd = tsd.get_wnd();
277
278 ts_last_packet = tsd.get_packet_timestamp();
279 tf_flags |= normalizer.get_tcp_timestamp(tsd, false);
280 ts_last = tsd.get_timestamp();
281 if (ts_last == 0)
282 tf_flags |= TF_TSTAMP_ZERO;
283 tf_flags |= tsd.init_mss(&mss);
284 tf_flags |= tsd.init_wscale(&wscale);
285
286 cache_mac_address(tsd, FROM_CLIENT);
287 tcp_state = TcpStreamTracker::TCP_SYN_SENT;
288 tcpStats.sessions_on_syn++;
289 }
290
init_on_syn_recv(TcpSegmentDescriptor & tsd)291 void TcpStreamTracker::init_on_syn_recv(TcpSegmentDescriptor& tsd)
292 {
293 irs = tsd.get_seq();
294
295 rcv_nxt = tsd.get_seq() + 1;
296 r_win_base = tsd.get_seq() + 1;
297 reassembler.set_seglist_base_seq(tsd.get_seq() + 1);
298
299 cache_mac_address(tsd, FROM_CLIENT);
300 tcp_state = TcpStreamTracker::TCP_SYN_RECV;
301 }
302
init_on_synack_sent(TcpSegmentDescriptor & tsd)303 void TcpStreamTracker::init_on_synack_sent(TcpSegmentDescriptor& tsd)
304 {
305 tsd.get_flow()->set_session_flags(SSNFLAG_SEEN_SERVER);
306 if (tsd.get_tcph()->are_flags_set(TH_CWR | TH_ECE))
307 tsd.get_flow()->set_session_flags(SSNFLAG_ECN_SERVER_REPLY);
308
309 iss = tsd.get_seq();
310 irs = tsd.get_ack() - 1;
311 snd_una = tsd.get_seq();
312 snd_nxt = tsd.get_end_seq();
313 snd_wnd = tsd.get_wnd();
314
315 r_win_base = tsd.get_ack();
316 rcv_nxt = tsd.get_ack();
317 reassembler.set_seglist_base_seq(tsd.get_ack() );
318
319 ts_last_packet = tsd.get_packet_timestamp();
320 tf_flags |= normalizer.get_tcp_timestamp(tsd, false);
321 ts_last = tsd.get_timestamp();
322 if ( ts_last == 0 )
323 tf_flags |= TF_TSTAMP_ZERO;
324 tf_flags |= tsd.init_mss(&mss);
325 tf_flags |= tsd.init_wscale(&wscale);
326
327 cache_mac_address(tsd, FROM_SERVER);
328 tcp_state = TcpStreamTracker::TCP_SYN_RECV;
329 tcpStats.sessions_on_syn_ack++;
330 }
331
init_on_synack_recv(TcpSegmentDescriptor & tsd)332 void TcpStreamTracker::init_on_synack_recv(TcpSegmentDescriptor& tsd)
333 {
334 iss = tsd.get_ack() - 1;
335 irs = tsd.get_seq();
336 snd_una = tsd.get_ack();
337 snd_nxt = snd_una;
338
339 rcv_nxt = tsd.get_seq() + 1;
340 r_win_base = tsd.get_seq() + 1;
341 reassembler.set_seglist_base_seq(tsd.get_seq() + 1);
342
343 cache_mac_address(tsd, FROM_SERVER);
344 tcp_state = TcpStreamTracker::TCP_ESTABLISHED;
345 }
346
init_on_3whs_ack_sent(TcpSegmentDescriptor & tsd)347 void TcpStreamTracker::init_on_3whs_ack_sent(TcpSegmentDescriptor& tsd)
348 {
349 tsd.get_flow()->set_session_flags(SSNFLAG_SEEN_CLIENT);
350
351 if ( tsd.get_tcph()->are_flags_set(TH_CWR | TH_ECE) )
352 tsd.get_flow()->set_session_flags(SSNFLAG_ECN_CLIENT_QUERY);
353
354 iss = tsd.get_seq();
355 snd_una = tsd.get_seq();
356 snd_nxt = snd_una;
357 snd_wnd = tsd.get_wnd();
358
359 r_win_base = tsd.get_ack();
360 rcv_nxt = tsd.get_ack();
361
362 ts_last_packet = tsd.get_packet_timestamp();
363 tf_flags |= normalizer.get_tcp_timestamp(tsd, false);
364 ts_last = tsd.get_timestamp();
365 if (ts_last == 0)
366 tf_flags |= TF_TSTAMP_ZERO;
367 tf_flags |= tsd.init_wscale(&wscale);
368
369 cache_mac_address(tsd, FROM_CLIENT);
370 tcp_state = TcpStreamTracker::TCP_ESTABLISHED;
371 }
372
init_on_3whs_ack_recv(TcpSegmentDescriptor & tsd)373 void TcpStreamTracker::init_on_3whs_ack_recv(TcpSegmentDescriptor& tsd)
374 {
375 iss = tsd.get_ack() - 1;
376 irs = tsd.get_seq();
377 snd_una = tsd.get_ack();
378 snd_nxt = snd_una;
379
380 rcv_nxt = tsd.get_seq();
381 r_win_base = tsd.get_seq();
382 reassembler.set_seglist_base_seq(tsd.get_seq() + 1);
383
384 cache_mac_address(tsd, FROM_CLIENT);
385 tcp_state = TcpStreamTracker::TCP_ESTABLISHED;
386 tcpStats.sessions_on_3way++;
387 }
388
init_on_data_seg_sent(TcpSegmentDescriptor & tsd)389 void TcpStreamTracker::init_on_data_seg_sent(TcpSegmentDescriptor& tsd)
390 {
391 Flow* flow = tsd.get_flow();
392
393 if ( flow->ssn_state.direction == FROM_CLIENT )
394 flow->set_session_flags(SSNFLAG_SEEN_CLIENT);
395 else
396 flow->set_session_flags(SSNFLAG_SEEN_SERVER);
397
398 iss = tsd.get_seq();
399 irs = tsd.get_ack();
400 snd_una = tsd.get_seq();
401 snd_nxt = snd_una + tsd.get_len();
402 snd_wnd = tsd.get_wnd();
403
404 r_win_base = tsd.get_ack();
405 rcv_nxt = tsd.get_ack();
406 reassembler.set_seglist_base_seq(tsd.get_ack());
407 reinit_seg_base = true;
408
409 ts_last_packet = tsd.get_packet_timestamp();
410 tf_flags |= normalizer.get_tcp_timestamp(tsd, false);
411 ts_last = tsd.get_timestamp();
412 if (ts_last == 0)
413 tf_flags |= TF_TSTAMP_ZERO;
414 tf_flags |= tsd.init_wscale(&wscale);
415
416 cache_mac_address(tsd, tsd.get_direction() );
417 tcp_state = TcpStreamTracker::TCP_ESTABLISHED;
418 }
419
init_on_data_seg_recv(TcpSegmentDescriptor & tsd)420 void TcpStreamTracker::init_on_data_seg_recv(TcpSegmentDescriptor& tsd)
421 {
422 iss = tsd.get_ack();
423 irs = tsd.get_seq();
424 snd_una = tsd.get_ack();
425 snd_nxt = snd_una;
426 snd_wnd = 0; /* reset later */
427
428 rcv_nxt = tsd.get_seq();
429 r_win_base = tsd.get_seq();
430 reassembler.set_seglist_base_seq(tsd.get_seq());
431
432 cache_mac_address(tsd, tsd.get_direction() );
433 tcp_state = TcpStreamTracker::TCP_ESTABLISHED;
434 tcpStats.sessions_on_data++;
435 }
436
finish_server_init(TcpSegmentDescriptor & tsd)437 void TcpStreamTracker::finish_server_init(TcpSegmentDescriptor& tsd)
438 {
439 iss = tsd.get_seq();
440 snd_una = tsd.get_seq();
441 snd_nxt = tsd.get_end_seq();
442 snd_wnd = tsd.get_wnd();
443
444 // FIXIT-M move this to fin handler for syn_recv state ..
445 //if ( tcph->is_fin() )
446 // server->set_snd_nxt(server->get_snd_nxt() - 1);
447
448 tf_flags |= normalizer.get_tcp_timestamp(tsd, false);
449 ts_last = tsd.get_timestamp();
450 if ( ts_last != 0 )
451 ts_last_packet = tsd.get_packet_timestamp();
452 else
453 tf_flags |= TF_TSTAMP_ZERO;
454
455 tf_flags |= ( tsd.init_mss(&mss) | tsd.init_wscale(&wscale) );
456 }
457
finish_client_init(TcpSegmentDescriptor & tsd)458 void TcpStreamTracker::finish_client_init(TcpSegmentDescriptor& tsd)
459 {
460 Flow* flow = tsd.get_flow();
461
462 rcv_nxt = tsd.get_end_seq();
463
464 if ( !( flow->session_state & STREAM_STATE_MIDSTREAM ) )
465 {
466 reassembler.set_seglist_base_seq(tsd.get_seq() + 1);
467 r_win_base = tsd.get_end_seq();
468 }
469 else
470 {
471 reassembler.set_seglist_base_seq(tsd.get_seq() );
472 r_win_base = tsd.get_seq();
473 }
474 }
475
update_tracker_ack_recv(TcpSegmentDescriptor & tsd)476 void TcpStreamTracker::update_tracker_ack_recv(TcpSegmentDescriptor& tsd)
477 {
478 if ( SEQ_GT(tsd.get_ack(), snd_una) )
479 {
480 snd_una = tsd.get_ack();
481 if ( snd_nxt < snd_una )
482 snd_nxt = snd_una;
483 }
484 }
485
486 // In no-ack policy, data is implicitly acked immediately.
update_tracker_no_ack_recv(TcpSegmentDescriptor & tsd)487 void TcpStreamTracker::update_tracker_no_ack_recv(TcpSegmentDescriptor& tsd)
488 {
489 snd_una = snd_nxt = tsd.get_end_seq();
490 }
491
update_tracker_no_ack_sent(TcpSegmentDescriptor & tsd)492 void TcpStreamTracker::update_tracker_no_ack_sent(TcpSegmentDescriptor& tsd)
493 {
494 r_win_base = tsd.get_end_seq();
495 reassembler.flush_on_ack_policy(tsd.get_pkt());
496 }
497
update_tracker_ack_sent(TcpSegmentDescriptor & tsd)498 void TcpStreamTracker::update_tracker_ack_sent(TcpSegmentDescriptor& tsd)
499 {
500 if ( SEQ_GT(tsd.get_end_seq(), snd_nxt) )
501 snd_nxt = tsd.get_end_seq();
502
503 if ( SEQ_GEQ(tsd.get_ack(), r_win_base) )
504 {
505 if ( SEQ_GT(tsd.get_ack(), r_win_base) )
506 r_win_base = tsd.get_ack();
507
508 snd_wnd = tsd.get_wnd();
509 }
510
511 if ( ( fin_seq_status == TcpStreamTracker::FIN_WITH_SEQ_SEEN )
512 && SEQ_EQ(r_win_base, fin_final_seq) )
513 {
514 fin_seq_status = TcpStreamTracker::FIN_WITH_SEQ_ACKED;
515 }
516
517 reassembler.flush_on_ack_policy(tsd.get_pkt());
518 }
519
update_on_3whs_ack(TcpSegmentDescriptor & tsd)520 bool TcpStreamTracker::update_on_3whs_ack(TcpSegmentDescriptor& tsd)
521 {
522 bool good_ack = is_ack_valid(tsd.get_ack());
523
524 if ( good_ack )
525 {
526 Flow* flow = tsd.get_flow();
527
528 irs = tsd.get_seq();
529 finish_client_init(tsd);
530 update_tracker_ack_recv(tsd);
531 flow->set_session_flags(SSNFLAG_ESTABLISHED);
532 flow->session_state |= ( STREAM_STATE_ACK | STREAM_STATE_ESTABLISHED );
533 tcp_state = TcpStreamTracker::TCP_ESTABLISHED;
534 }
535
536 return good_ack;
537 }
538
update_on_rst_recv(TcpSegmentDescriptor & tsd)539 bool TcpStreamTracker::update_on_rst_recv(TcpSegmentDescriptor& tsd)
540 {
541 normalizer.trim_rst_payload(tsd);
542 bool good_rst = normalizer.validate_rst(tsd);
543 if ( good_rst )
544 {
545 Flow* flow = tsd.get_flow();
546
547 flow->set_session_flags(SSNFLAG_RESET);
548 if ( normalizer.is_tcp_ips_enabled() )
549 tcp_state = TcpStreamTracker::TCP_CLOSED;
550 }
551 else
552 {
553 session->tel.set_tcp_event(EVENT_BAD_RST);
554 normalizer.packet_dropper(tsd, NORM_TCP_BLOCK);
555 session->set_pkt_action_flag(ACTION_BAD_PKT);
556 }
557
558 return good_rst;
559 }
560
update_on_rst_sent()561 void TcpStreamTracker::update_on_rst_sent()
562 {
563 tcp_state = TcpStreamTracker::TCP_CLOSED;
564 rst_pkt_sent = true;
565 }
566
update_on_fin_recv(TcpSegmentDescriptor & tsd)567 bool TcpStreamTracker::update_on_fin_recv(TcpSegmentDescriptor& tsd)
568 {
569 if ( SEQ_LT(tsd.get_end_seq(), r_win_base) )
570 return false;
571
572 //--------------------------------------------------
573 // FIXIT-L don't bump rcv_nxt unless FIN is in seq
574 // because it causes bogus 129:5 cases
575 // but doing so causes extra gaps
576 if ( SEQ_EQ(tsd.get_end_seq(), rcv_nxt) )
577 rcv_nxt++;
578 else
579 fin_seq_adjust = 1;
580
581 // set final seq # any packet rx'ed with seq > is bad
582 if ( !fin_seq_set )
583 {
584 fin_final_seq = tsd.get_end_seq();
585 fin_seq_set = true;
586 if( tsd.get_len() == 0 )
587 fin_seq_status = TcpStreamTracker::FIN_WITH_SEQ_SEEN;
588 }
589
590 return true;
591 }
592
update_on_fin_sent(TcpSegmentDescriptor & tsd)593 bool TcpStreamTracker::update_on_fin_sent(TcpSegmentDescriptor& tsd)
594 {
595 update_tracker_ack_sent(tsd);
596 snd_nxt++;
597 return true;
598 }
599
is_segment_seq_valid(TcpSegmentDescriptor & tsd)600 bool TcpStreamTracker::is_segment_seq_valid(TcpSegmentDescriptor& tsd)
601 {
602 bool valid_seq = true;
603
604 int right_ok;
605 uint32_t left_seq;
606
607 if ( SEQ_LT(rcv_nxt, r_win_base) )
608 left_seq = rcv_nxt;
609 else
610 left_seq = r_win_base;
611
612 if ( tsd.is_data_segment() )
613 right_ok = SEQ_GT(tsd.get_end_seq(), left_seq);
614 else
615 right_ok = SEQ_GEQ(tsd.get_end_seq(), left_seq);
616
617 if ( right_ok )
618 {
619 uint32_t win = normalizer.get_stream_window(tsd);
620
621 if ( SEQ_LEQ(tsd.get_seq(), r_win_base + win) )
622 return true;
623 else
624 valid_seq = false;
625 }
626 else
627 valid_seq = false;
628
629 return valid_seq;
630 }
631
set_held_packet(Packet * p)632 bool TcpStreamTracker::set_held_packet(Packet* p)
633 {
634 if ( held_packet != null_iterator )
635 return false;
636
637 held_packet = hpq->append(p->daq_msg, p->ptrs.tcph->seq(), *this);
638 held_pkt_seq = p->ptrs.tcph->seq();
639
640 tcpStats.total_packets_held++;
641 if ( ++tcpStats.current_packets_held > tcpStats.max_packets_held )
642 tcpStats.max_packets_held = tcpStats.current_packets_held;
643
644 return true;
645 }
646
perform_fin_recv_flush(TcpSegmentDescriptor & tsd)647 void TcpStreamTracker::perform_fin_recv_flush(TcpSegmentDescriptor& tsd)
648 {
649 if ( tsd.is_data_segment() )
650 session->handle_data_segment(tsd);
651 else if ( flush_policy == STREAM_FLPOLICY_ON_DATA and SEQ_EQ(tsd.get_seq(), rcv_nxt) )
652 reassembler.flush_queued_segments(tsd.get_flow(), true, tsd.get_pkt());
653 }
654
perform_partial_flush()655 uint32_t TcpStreamTracker::perform_partial_flush()
656 {
657 uint32_t flushed = 0;
658 if ( held_packet != null_iterator )
659 flushed = reassembler.perform_partial_flush(session->flow);
660 return flushed;
661 }
662
is_retransmit_of_held_packet(Packet * cp)663 bool TcpStreamTracker::is_retransmit_of_held_packet(Packet* cp)
664 {
665 if ( (held_packet == null_iterator) or ( cp->daq_msg == held_packet->get_daq_msg() ) )
666 return false;
667
668 uint32_t next_send_seq = cp->ptrs.tcph->seq() + (uint32_t)cp->dsize;
669 if ( SEQ_LEQ(cp->ptrs.tcph->seq(), held_packet->get_seq_num()) and SEQ_GT(next_send_seq, held_packet->get_seq_num()) )
670 {
671 tcpStats.held_packet_rexmits++;
672 return true;
673 }
674
675 return false;
676 }
677
finalize_held_packet(Packet * cp)678 void TcpStreamTracker::finalize_held_packet(Packet* cp)
679 {
680 if ( held_packet != null_iterator )
681 {
682 DAQ_Msg_h msg = held_packet->get_daq_msg();
683
684 if ( cp->active->packet_was_dropped() )
685 {
686 DAQ_Verdict verdict = held_packet->has_expired() ? DAQ_VERDICT_BLACKLIST : DAQ_VERDICT_BLOCK;
687 Analyzer::get_local_analyzer()->finalize_daq_message(msg, verdict);
688 tcpStats.held_packets_dropped++;
689 }
690 else
691 {
692 if ( cp->active->packet_retry_requested() )
693 {
694 tcpStats.held_packet_retries++;
695 Analyzer::get_local_analyzer()->add_to_retry_queue(msg);
696 }
697 else
698 {
699 Analyzer::get_local_analyzer()->finalize_daq_message(msg, DAQ_VERDICT_PASS);
700 tcpStats.held_packets_passed++;
701 }
702
703 TcpStreamSession* tcp_session = (TcpStreamSession*)cp->flow->session;
704 tcp_session->held_packet_dir = SSN_DIR_NONE;
705 }
706
707 hpq->erase(held_packet);
708 held_packet = null_iterator;
709 tcpStats.current_packets_held--;
710 }
711
712 if (cp->active->is_packet_held())
713 cp->active->cancel_packet_hold();
714 }
715
finalize_held_packet(Flow * flow)716 void TcpStreamTracker::finalize_held_packet(Flow* flow)
717 {
718 if ( held_packet != null_iterator )
719 {
720 DAQ_Msg_h msg = held_packet->get_daq_msg();
721
722 if ( (flow->session_state & STREAM_STATE_BLOCK_PENDING) ||
723 (flow->ssn_state.session_flags & SSNFLAG_BLOCK) )
724 {
725 DAQ_Verdict verdict = held_packet->has_expired() ? DAQ_VERDICT_BLACKLIST : DAQ_VERDICT_BLOCK;
726 Analyzer::get_local_analyzer()->finalize_daq_message(msg, verdict);
727 tcpStats.held_packets_dropped++;
728 }
729 else
730 {
731 TcpStreamSession* tcp_session = (TcpStreamSession*)flow->session;
732 tcp_session->held_packet_dir = SSN_DIR_NONE;
733 Analyzer::get_local_analyzer()->finalize_daq_message(msg, DAQ_VERDICT_PASS);
734 tcpStats.held_packets_passed++;
735 }
736
737 hpq->erase(held_packet);
738 held_packet = null_iterator;
739 tcpStats.current_packets_held--;
740 }
741 }
742
release_held_packets(const timeval & cur_time,int max_remove)743 bool TcpStreamTracker::release_held_packets(const timeval& cur_time, int max_remove)
744 {
745 bool is_front_expired = false;
746 if ( hpq )
747 is_front_expired = hpq->execute(cur_time, max_remove);
748 return is_front_expired;
749 }
750
set_held_packet_timeout(const uint32_t ms)751 void TcpStreamTracker::set_held_packet_timeout(const uint32_t ms)
752 {
753 assert(hpq);
754 hpq->set_timeout(ms);
755 }
756
adjust_expiration(uint32_t new_timeout_ms,const timeval & now)757 bool TcpStreamTracker::adjust_expiration(uint32_t new_timeout_ms, const timeval& now)
758 {
759 assert(hpq);
760 return hpq->adjust_expiration(new_timeout_ms, now);
761 }
762
thread_init()763 void TcpStreamTracker::thread_init()
764 {
765 assert(!hpq);
766 hpq = new HeldPacketQueue();
767 }
768
thread_term()769 void TcpStreamTracker::thread_term()
770 {
771 assert(hpq->empty());
772 delete hpq;
773 hpq = nullptr;
774 }
775