1 /* Copyright (C) 2017 Open Information Security Foundation
2  *
3  * You can copy, redistribute or modify this Program under the terms of
4  * the GNU General Public License version 2 as published by the Free
5  * Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * version 2 along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15  * 02110-1301, USA.
16  */
17 
18 /**
19  *  \file
20  *  \author Victor Julien <victor@inliniac.net>
21  *
22  * Tracks chunk based file transfers. Chunks may be transfered out
23  * of order, but cannot be transfered in parallel. So only one
24  * chunk at a time.
25  *
26  * GAP handling. If a data gap is encountered, the file is truncated
27  * and new data is no longer pushed down to the lower level APIs.
28  * The tracker does continue to follow the file.
29  */
30 
31 use crate::core::*;
32 use std::collections::HashMap;
33 use std::collections::hash_map::Entry::{Occupied, Vacant};
34 use crate::filecontainer::*;
35 
36 #[derive(Debug)]
37 pub struct FileChunk {
38     contains_gap: bool,
39     chunk: Vec<u8>,
40 }
41 
42 impl FileChunk {
new(size: u32) -> FileChunk43     pub fn new(size: u32) -> FileChunk {
44         FileChunk {
45             contains_gap: false,
46             chunk: Vec::with_capacity(size as usize),
47         }
48     }
49 }
50 
51 #[derive(Debug)]
52 pub struct FileTransferTracker {
53     pub tracked: u64,
54     cur_ooo: u64,   // how many bytes do we have queued from ooo chunks
55     track_id: u32,
56     chunk_left: u32,
57 
58     pub tx_id: u64,
59 
60     fill_bytes: u8,
61     pub file_open: bool,
62     chunk_is_last: bool,
63     chunk_is_ooo: bool,
64     file_is_truncated: bool,
65 
66     chunks: HashMap<u64, FileChunk>,
67     cur_ooo_chunk_offset: u64,
68 }
69 
70 impl FileTransferTracker {
new() -> FileTransferTracker71     pub fn new() -> FileTransferTracker {
72         FileTransferTracker {
73             tracked:0,
74             cur_ooo:0,
75             track_id:0,
76             chunk_left:0,
77             tx_id:0,
78             fill_bytes:0,
79             file_open:false,
80             chunk_is_last:false,
81             chunk_is_ooo:false,
82             file_is_truncated:false,
83             cur_ooo_chunk_offset:0,
84             chunks:HashMap::new(),
85         }
86     }
87 
is_done(&self) -> bool88     pub fn is_done(&self) -> bool {
89         self.file_open == false
90     }
91 
open(&mut self, config: &'static SuricataFileContext, files: &mut FileContainer, flags: u16, name: &[u8]) -> i3292     fn open(&mut self, config: &'static SuricataFileContext,
93             files: &mut FileContainer, flags: u16, name: &[u8]) -> i32
94     {
95         let r = files.file_open(config, &self.track_id, name, flags);
96         if r == 0 {
97             files.file_set_txid_on_last_file(self.tx_id);
98             self.file_open = true;
99         }
100         r
101     }
102 
close(&mut self, files: &mut FileContainer, flags: u16)103     pub fn close(&mut self, files: &mut FileContainer, flags: u16) {
104         if !self.file_is_truncated {
105             SCLogDebug!("closing file with id {}", self.track_id);
106             files.file_close(&self.track_id, flags);
107         }
108         self.file_open = false;
109         self.tracked = 0;
110     }
111 
trunc(&mut self, files: &mut FileContainer, flags: u16)112     pub fn trunc (&mut self, files: &mut FileContainer, flags: u16) {
113         if self.file_is_truncated || !self.file_open {
114             return;
115         }
116         let myflags = flags | 1; // TODO util-file.c::FILE_TRUNCATED
117         files.file_close(&self.track_id, myflags);
118         SCLogDebug!("truncated file");
119         self.file_is_truncated = true;
120     }
121 
create(&mut self, _name: &[u8], _file_size: u64)122     pub fn create(&mut self, _name: &[u8], _file_size: u64) {
123         if self.file_open == true { panic!("close existing file first"); }
124 
125         SCLogDebug!("CREATE: name {:?} file_size {}", _name, _file_size);
126     }
127 
new_chunk(&mut self, config: &'static SuricataFileContext, files: &mut FileContainer, flags: u16, name: &[u8], data: &[u8], chunk_offset: u64, chunk_size: u32, fill_bytes: u8, is_last: bool, xid: &u32) -> u32128     pub fn new_chunk(&mut self, config: &'static SuricataFileContext,
129             files: &mut FileContainer, flags: u16,
130             name: &[u8], data: &[u8], chunk_offset: u64, chunk_size: u32,
131             fill_bytes: u8, is_last: bool, xid: &u32) -> u32
132     {
133         if self.chunk_left != 0 || self.fill_bytes != 0 {
134             SCLogDebug!("current chunk incomplete: truncating");
135             self.trunc(files, flags);
136         }
137 
138         SCLogDebug!("NEW CHUNK: chunk_size {} fill_bytes {}", chunk_size, fill_bytes);
139 
140         // for now assume that is_last means its really the last chunk
141         // so no out of order chunks coming after. This means that if
142         // the last chunk is out or order, we've missed chunks before.
143         if chunk_offset != self.tracked {
144             SCLogDebug!("NEW CHUNK IS OOO: expected {}, got {}", self.tracked, chunk_offset);
145             if is_last {
146                 SCLogDebug!("last chunk is out of order, this means we missed data before");
147                 self.trunc(files, flags);
148             }
149             self.chunk_is_ooo = true;
150             self.cur_ooo_chunk_offset = chunk_offset;
151         }
152 
153         self.chunk_left = chunk_size;
154         self.fill_bytes = fill_bytes;
155         self.chunk_is_last = is_last;
156 
157         if self.file_open == false {
158             SCLogDebug!("NEW CHUNK: FILE OPEN");
159             self.track_id = *xid;
160             self.open(config, files, flags, name);
161         }
162 
163         if self.file_open == true {
164             let res = self.update(files, flags, data, 0);
165             SCLogDebug!("NEW CHUNK: update res {:?}", res);
166             return res;
167         }
168 
169         0
170     }
171 
172     /// update the file tracker
173     /// If gap_size > 0 'data' should not be used.
174     /// return how much we consumed of data
update(&mut self, files: &mut FileContainer, flags: u16, data: &[u8], gap_size: u32) -> u32175     pub fn update(&mut self, files: &mut FileContainer, flags: u16, data: &[u8], gap_size: u32) -> u32 {
176         let mut consumed = 0 as usize;
177         let is_gap = gap_size > 0;
178         if is_gap || gap_size > 0 {
179             SCLogDebug!("is_gap {} size {} ooo? {}", is_gap, gap_size, self.chunk_is_ooo);
180         }
181 
182         if self.chunk_left == 0 && self.fill_bytes == 0 {
183             //SCLogDebug!("UPDATE: nothing to do");
184             if self.chunk_is_last == true {
185                 SCLogDebug!("last empty chunk, closing");
186                 self.close(files, flags);
187                 self.chunk_is_last = false;
188             }
189             return 0
190         } else if self.chunk_left == 0 {
191             SCLogDebug!("FILL BYTES {} from prev run", self.fill_bytes);
192             if data.len() >= self.fill_bytes as usize {
193                 consumed += self.fill_bytes as usize;
194                 self.fill_bytes = 0;
195                 SCLogDebug!("CHUNK(pre) fill bytes now 0");
196             } else {
197                 consumed += data.len();
198                 self.fill_bytes -= data.len() as u8;
199                 SCLogDebug!("CHUNK(pre) fill bytes now still {}", self.fill_bytes);
200             }
201             SCLogDebug!("FILL BYTES: returning {}", consumed);
202             return consumed as u32
203         }
204         SCLogDebug!("UPDATE: data {} chunk_left {}", data.len(), self.chunk_left);
205 
206         if self.chunk_left > 0 {
207             if self.chunk_left <= data.len() as u32 {
208                 let d = &data[0..self.chunk_left as usize];
209 
210                 if self.chunk_is_ooo == false {
211                     let res = files.file_append(&self.track_id, d, is_gap);
212                     match res {
213                         0   => { },
214                         -2  => {
215                             self.file_is_truncated = true;
216                         },
217                         _ => {
218                             SCLogDebug!("got error so truncing file");
219                             self.file_is_truncated = true;
220                         },
221                     }
222 
223                     self.tracked += self.chunk_left as u64;
224                 } else {
225                     SCLogDebug!("UPDATE: appending data {} to ooo chunk at offset {}/{}",
226                             d.len(), self.cur_ooo_chunk_offset, self.tracked);
227                     let c = match self.chunks.entry(self.cur_ooo_chunk_offset) {
228                         Vacant(entry) => {
229                             entry.insert(FileChunk::new(self.chunk_left))
230                         },
231                         Occupied(entry) => entry.into_mut(),
232                     };
233                     self.cur_ooo += d.len() as u64;
234                     c.contains_gap |= is_gap;
235                     c.chunk.extend(d);
236                 }
237 
238                 consumed += self.chunk_left as usize;
239                 if self.fill_bytes > 0 {
240                     let extra = data.len() - self.chunk_left as usize;
241                     if extra >= self.fill_bytes as usize {
242                         consumed += self.fill_bytes as usize;
243                         self.fill_bytes = 0;
244                         SCLogDebug!("CHUNK(post) fill bytes now 0");
245                     } else {
246                         consumed += extra;
247                         self.fill_bytes -= extra as u8;
248                         SCLogDebug!("CHUNK(post) fill bytes now still {}", self.fill_bytes);
249                     }
250                     self.chunk_left = 0;
251                 } else {
252                     self.chunk_left = 0;
253 
254                     if self.chunk_is_ooo == false {
255                         loop {
256                             let _offset = self.tracked;
257                             match self.chunks.remove(&self.tracked) {
258                                 Some(c) => {
259                                     let res = files.file_append(&self.track_id, &c.chunk, c.contains_gap);
260                                     match res {
261                                         0   => { },
262                                         -2  => {
263                                             self.file_is_truncated = true;
264                                         },
265                                         _ => {
266                                             SCLogDebug!("got error so truncing file");
267                                             self.file_is_truncated = true;
268                                         },
269                                     }
270 
271                                     self.tracked += c.chunk.len() as u64;
272                                     self.cur_ooo -= c.chunk.len() as u64;
273 
274                                     SCLogDebug!("STORED OOO CHUNK at offset {}, tracked now {}, stored len {}", _offset, self.tracked, c.chunk.len());
275                                 },
276                                 _ => {
277                                     SCLogDebug!("NO STORED CHUNK found at _offset {}", self.tracked);
278                                     break;
279                                 },
280                             };
281                         }
282                     } else {
283                         SCLogDebug!("UPDATE: complete ooo chunk. Offset {}", self.cur_ooo_chunk_offset);
284 
285                         self.chunk_is_ooo = false;
286                         self.cur_ooo_chunk_offset = 0;
287                     }
288                 }
289                 if self.chunk_is_last == true {
290                     SCLogDebug!("last chunk, closing");
291                     self.close(files, flags);
292                     self.chunk_is_last = false;
293                 } else {
294                     SCLogDebug!("NOT last chunk, keep going");
295                 }
296 
297             } else {
298                 if self.chunk_is_ooo == false {
299                     let res = files.file_append(&self.track_id, data, is_gap);
300                     match res {
301                         0   => { },
302                         -2  => {
303                             self.file_is_truncated = true;
304                         },
305                         _ => {
306                             SCLogDebug!("got error so truncing file");
307                             self.file_is_truncated = true;
308                         },
309                     }
310                     self.tracked += data.len() as u64;
311                 } else {
312                     let c = match self.chunks.entry(self.cur_ooo_chunk_offset) {
313                         Vacant(entry) => entry.insert(FileChunk::new(32768)),
314                         Occupied(entry) => entry.into_mut(),
315                     };
316                     c.chunk.extend(data);
317                     c.contains_gap |= is_gap;
318                     self.cur_ooo += data.len() as u64;
319                 }
320 
321                 self.chunk_left -= data.len() as u32;
322                 consumed += data.len();
323             }
324         }
325         consumed as u32
326     }
327 
get_queued_size(&self) -> u64328     pub fn get_queued_size(&self) -> u64 {
329         self.cur_ooo
330     }
331 }
332