1 /*********************************************************************/
2 // dar - disk archive - a backup/restoration program
3 // Copyright (C) 2002-2052 Denis Corbin
4 //
5 // This program is free software; you can redistribute it and/or
6 // modify it under the terms of the GNU General Public License
7 // as published by the Free Software Foundation; either version 2
8 // of the License, or (at your option) any later version.
9 //
10 // This program is distributed in the hope that it will be useful,
11 // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 // GNU General Public License for more details.
14 //
15 // You should have received a copy of the GNU General Public License
16 // along with this program; if not, write to the Free Software
17 // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
18 //
19 // to contact the author : http://dar.linux.free.fr/email.html
20 /*********************************************************************/
21 
22 #include "../my_config.h"
23 
24 extern "C"
25 {
26 #if HAVE_LIMITS_H
27 #include <limits.h>
28 #endif
29 
30 #if HAVE_STRING_H
31 #include <string.h>
32 #endif
33 
34 #if HAVE_STRINGS_H
35 #include <strings.h>
36 #endif
37 
38 #if STDC_HEADERS
39 # include <string.h>
40 #else
41 # if !HAVE_STRCHR
42 #  define strchr index
43 #  define strrchr rindex
44 # endif
45     char *strchr (), *strrchr ();
46 # if !HAVE_MEMCPY
47 #  define memcpy(d, s, n) bcopy ((s), (d), (n))
48 #  define memmove(d, s, n) bcopy ((s), (d), (n))
49 # endif
50 #endif
51 }
52 
53 #include "cache.hpp"
54 
55 using namespace std;
56 
57 namespace libdar
58 {
59 
cache(generic_file & hidden,bool shift_mode,U_I x_size)60     cache::cache(generic_file & hidden,
61 		 bool shift_mode,
62 		 U_I x_size) : generic_file(hidden.get_mode())
63 				   // except if hidden is read-only we provide read-write facility in
64 				   // the cache, for what is out of the cache we check
65 				   // the underlying object mode
66     {
67 	    // sanity checks
68 	if(x_size < 10)
69 	    throw Erange("cache::cache", gettext("wrong value given as initial_size argument while initializing cache"));
70 
71 	ref = & hidden;
72 	buffer = nullptr;
73 	alloc_buffer(x_size);
74 	next = 0;
75 	last = 0;
76 	first_to_write = size;
77 	buffer_offset = ref->get_position();
78 	shifted_mode = shift_mode;
79     }
80 
~cache()81     cache::~cache()
82     {
83 	try
84 	{
85 	    flush_write();
86 	}
87 	catch(...)
88 	{
89 		// ignore all exceptions
90 	}
91 	release_buffer();
92     }
93 
skippable(skippability direction,const infinint & amount)94     bool cache::skippable(skippability direction, const infinint & amount)
95     {
96 	infinint in_cache = available_in_cache(direction);
97 
98 	    // either available data is enough to assure skippability or we
99 	    // calculate the direction and amount to ask to the lower layer (ref)
100 
101 	if(in_cache >= amount)
102 	    return true;
103 	else
104 	{
105 	    switch(direction)
106 	    {
107 	    case skip_forward:
108 		if(ref->get_position() <= buffer_offset)
109 		    return ref->skippable(direction, buffer_offset - ref->get_position() + next + amount);
110 		else
111 		{
112 		    infinint backw = ref->get_position() - buffer_offset;
113 		    infinint forw = amount + next;
114 		    if(backw >= forw)
115 			return ref->skippable(skip_backward, backw - forw);
116 		    else
117 			return ref->skippable(skip_forward, forw - backw);
118 		}
119 	    case skip_backward:
120 		if(ref->get_position() >= buffer_offset)
121 		{
122 		    infinint backw = ref->get_position() - buffer_offset  + amount;
123 		    infinint forw = next;
124 		    if(backw >= forw)
125 			return ref->skippable(skip_backward, backw - forw);
126 		    else
127 			return ref->skippable(skip_forward, forw - backw);
128 		}
129 		else
130 		{
131 		    infinint backw = amount;
132 		    infinint forw = buffer_offset - ref->get_position()  + next;
133 		    if(backw >= forw)
134 			return ref->skippable(skip_backward, backw - forw);
135 		    else
136 			return ref->skippable(skip_forward, forw - backw);
137 		}
138 	    default:
139 		throw SRC_BUG;
140 	    }
141 	}
142     }
143 
skip(const infinint & pos)144     bool cache::skip(const infinint & pos)
145     {
146 	if(is_terminated())
147 	    throw SRC_BUG;
148 
149 	if(pos >= buffer_offset && pos <= buffer_offset + last)
150 	{
151 		// skipping inside the buffer is possible
152 
153 	    infinint tmp_next = pos - buffer_offset;
154 
155 		// assigning to next its new value to reflect the skip() operation
156 
157 	    next = 0;
158 	    tmp_next.unstack(next);
159 	    if(!tmp_next.is_zero())
160 		throw SRC_BUG;
161 	    return true;
162 	}
163 	else // skipping would lead the current position to be outside the buffer
164 	{
165 	    bool ret;
166 
167 	    if(need_flush_write())
168 		flush_write();
169 	    next = last = 0;
170 	    ret = ref->skip(pos);
171 	    buffer_offset = ref->get_position();
172 
173 	    return ret;
174 	}
175     }
176 
skip_to_eof()177     bool cache::skip_to_eof()
178     {
179 	bool ret;
180 
181 	if(is_terminated())
182 	    throw SRC_BUG;
183 
184 	if(need_flush_write())
185 	    flush_write();
186 	next = last = 0;
187 	ret = ref->skip_to_eof();
188 	buffer_offset = ref->get_position();
189 
190 	return ret;
191     }
192 
skip_relative(S_I x)193     bool cache::skip_relative(S_I x)
194     {
195 	skippability dir = x >= 0 ? skip_forward : skip_backward;
196 	U_I in_cache = available_in_cache(dir);
197 	U_I abs_x = x >= 0 ? x : -x;
198 
199 	if(is_terminated())
200 	    throw SRC_BUG;
201 
202 	if(abs_x <= in_cache) // skipping within cache
203 	{
204 	    next += x; // note that x is a *signed* integer
205 
206 		// sanity checks
207 
208 	    if(next > last)
209 		throw SRC_BUG;
210 	    return true;
211 	}
212 	else // must replace data in cache to skip
213 	{
214 	    if(need_flush_write())
215 		flush_write();
216 
217 	    switch(dir)
218 	    {
219 	    case skip_forward:
220 		return skip(buffer_offset + abs_x);
221 	    case skip_backward:
222 		if(buffer_offset < abs_x)
223 		    return false;
224 		else
225 		    return skip(buffer_offset - abs_x);
226 	    default:
227 		throw SRC_BUG;
228 	    }
229 	}
230     }
231 
inherited_read(char * a,U_I x_size)232     U_I cache::inherited_read(char *a, U_I x_size)
233     {
234 	U_I ret = 0;
235 	bool eof = false;
236 
237 	do
238 	{
239 	    if(next >= last) // no more data to read from cache
240 	    {
241 		if(need_flush_write())
242 		    flush_write();
243 		if(x_size - ret < size)
244 		{
245 		    fulfill_read(); // may fail if underlying is write_only (exception thrown)
246 		    if(next >= last) // could not read anymore data
247 			eof = true;
248 		}
249 		else // reading the remaining directly from lower layer
250 		{
251 		    ret += ref->read(a + ret, x_size - ret); // may fail if underlying is write_only
252 		    if(ret < x_size)
253 			eof = true;
254 		    clear_buffer();   // force clearing whatever is shifted_mode
255 		    buffer_offset = ref->get_position();
256 		}
257 	    }
258 
259 	    if(!eof && ret < x_size)
260 	    {
261 		U_I needed = x_size - ret;
262 		U_I avail = last - next;
263 		U_I min = avail > needed ? needed : avail;
264 
265 		if(min > 0)
266 		{
267 		    (void)memcpy(a+ret, buffer + next, min);
268 		    ret += min;
269 		    next += min;
270 		}
271 		else
272 		    throw SRC_BUG;
273 	    }
274 	}
275 	while(ret < x_size && !eof);
276 
277 	return ret;
278     }
279 
280 
inherited_write(const char * a,U_I x_size)281     void cache::inherited_write(const char *a, U_I x_size)
282     {
283 	U_I wrote = 0;
284 	U_I avail, remaining;
285 
286 	while(wrote < x_size)
287 	{
288 	    avail = size - next;
289 	    if(avail == 0) // we need to flush the cache
290 	    {
291 		try
292 		{
293 		    if(need_flush_write())
294 			flush_write();    // may fail if underlying is read_only
295 		    avail = size - next;
296 		}
297 		catch(...)
298 		{
299 			// ignoring the bytes written so far from
300 			// the given argument to inherited_write in
301 			// order to stay coherent with the view of the caller
302 		    if(next < wrote)
303 			throw SRC_BUG;
304 		    next -= wrote;
305 		    throw;
306 		}
307 	    }
308 
309 	    remaining = x_size - wrote;
310 	    if(avail < remaining && !need_flush_write())
311 	    {
312 		    // less data in cache than to be wrote and no write pending data  in cache
313 		    // we write directly to the lower layer
314 
315 		buffer_offset += next;
316 		next = last = 0;
317 		try
318 		{
319 		    ref->skip(buffer_offset);
320 		    ref->write(a + wrote, remaining); // may fail if underlying is read_only or user interruption
321 		}
322 		catch(...)
323 		{
324 		    infinint wrote_i = wrote;
325 			// ignoring the bytes written so far from
326 			// the given argument to inherited_write in
327 			// order to stay coherent with the view of the caller
328 		    if(buffer_offset < wrote_i)
329 			throw SRC_BUG;
330 		    buffer_offset -= wrote_i;
331 		    ref->skip(buffer_offset);
332 		    throw;
333 		}
334 		wrote = x_size;
335 		buffer_offset += remaining;
336 	    }
337 	    else // filling cache with data
338 	    {
339 		U_I min = remaining < avail ? remaining : avail;
340 		if(first_to_write >= last)
341 		    first_to_write = next;
342 		(void)memcpy(buffer + next, a + wrote, min);
343 		wrote += min;
344 		next += min;
345 		if(last < next)
346 		    last = next;
347 	    }
348 	}
349     }
350 
alloc_buffer(size_t x_size)351     void cache::alloc_buffer(size_t x_size)
352     {
353 	if(buffer != nullptr)
354 	    throw SRC_BUG;
355 
356 	if(get_pool() != nullptr)
357 	    buffer = (char *)get_pool()->alloc(x_size);
358 	else
359 	    buffer = new (nothrow) char[x_size];
360 
361 	if(buffer == nullptr)
362 	    throw Ememory("cache::alloc_buffer");
363 	size = x_size;
364     }
365 
release_buffer()366     void cache::release_buffer()
367     {
368 	if(buffer == nullptr)
369 	    throw SRC_BUG;
370 
371 	if(get_pool() != nullptr)
372 	    get_pool()->release(buffer);
373 	else
374 	    delete [] buffer;
375 	buffer = nullptr;
376 	size = 0;
377     }
378 
shift_by_half()379     void cache::shift_by_half()
380     {
381 	U_I half = last / 2;
382 	U_I reste = last % 2;
383 
384 	if(next < half)
385 	    return; // current position would be out of the buffer so we don't shift
386 	if(first_to_write < half)
387 	    throw SRC_BUG;
388 	if(last > 1)
389 	{
390 	    (void)memmove(buffer, buffer + half, half + reste);
391 	    if(need_flush_write())
392 		first_to_write -= half;
393 	    else
394 		first_to_write = size;
395 	    next -= half;
396 	    last -= half;
397 	}
398 	buffer_offset += half;
399     }
400 
clear_buffer()401     void cache::clear_buffer()
402     {
403 	if(need_flush_write())
404 	    throw SRC_BUG;
405 	buffer_offset += next;
406 	next = last = 0;
407     }
408 
flush_write()409     void cache::flush_write()
410     {
411 	if(get_mode() == gf_read_only)
412 	    return; // nothing to flush
413 
414 	    // flushing the cache
415 
416 	if(need_flush_write()) // we have something to flush
417 	{
418 	    ref->skip(buffer_offset + first_to_write);
419 	    ref->write(buffer + first_to_write, last - first_to_write);
420 	}
421 	first_to_write = size;
422 
423 	if(shifted_mode)
424 	    shift_by_half();
425 	else
426 	    clear_buffer();
427     }
428 
fulfill_read()429     void cache::fulfill_read()
430     {
431 	U_I lu;
432 
433 	if(get_mode() == gf_write_only)
434 	    return; // nothing to fill
435 
436 	    // flushing / shifting the cache contents to make room to receive new data
437 
438 	if(shifted_mode)
439 	    shift_by_half();
440 	else
441 	    clear_buffer();
442 
443 	    ///////
444 	    // some data may remain in the cache, we need to preserve them !!!
445 	    // this occurres when a shift by half of the buffer has been done just before
446 	    ///////
447 
448 	ref->skip(buffer_offset + last);
449 	lu = ref->read(buffer + last, size - last); // may fail if underlying is write_only
450 	last += lu;
451     }
452 
available_in_cache(skippability direction) const453     U_I cache::available_in_cache(skippability direction) const
454     {
455 	U_I ret;
456 
457 	switch(direction)
458 	{
459 	case skip_forward:
460 	    ret = last - next;
461 	    break;
462 	case skip_backward:
463 	    ret = next;
464 	    break;
465 	default:
466 	    throw SRC_BUG;
467 	}
468 
469 	return ret;
470     }
471 
472 } // end of namespace
473