1 // Copyright 2011 Google Inc. All Rights Reserved.
2 //
3 // Redistribution and use in source and binary forms, with or without
4 // modification, are permitted provided that the following conditions are
5 // met:
6 //
7 //     * Redistributions of source code must retain the above copyright
8 // notice, this list of conditions and the following disclaimer.
9 //     * Redistributions in binary form must reproduce the above
10 // copyright notice, this list of conditions and the following disclaimer
11 // in the documentation and/or other materials provided with the
12 // distribution.
13 //     * Neither the name of Google Inc. nor the names of its
14 // contributors may be used to endorse or promote products derived from
15 // this software without specific prior written permission.
16 //
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 //
29 // Various stubs for the unit tests for the open-source version of Snappy.
30 
31 #ifdef HAVE_CONFIG_H
32 #include "config.h"
33 #endif
34 
35 #ifdef HAVE_WINDOWS_H
36 #include <windows.h>
37 #endif
38 
39 #include "snappy-test.h"
40 
41 #include <algorithm>
42 
43 DEFINE_bool(run_microbenchmarks, true,
44             "Run microbenchmarks before doing anything else.");
45 
46 namespace snappy {
47 
ReadTestDataFile(const string & base,size_t size_limit)48 string ReadTestDataFile(const string& base, size_t size_limit) {
49   string contents;
50   const char* srcdir = getenv("srcdir");  // This is set by Automake.
51   string prefix;
52   if (srcdir) {
53     prefix = string(srcdir) + "/";
54   }
55   file::GetContents(prefix + "testdata/" + base, &contents, file::Defaults()
56       ).CheckSuccess();
57   if (size_limit > 0) {
58     contents = contents.substr(0, size_limit);
59   }
60   return contents;
61 }
62 
ReadTestDataFile(const string & base)63 string ReadTestDataFile(const string& base) {
64   return ReadTestDataFile(base, 0);
65 }
66 
StringPrintf(const char * format,...)67 string StringPrintf(const char* format, ...) {
68   char buf[4096];
69   va_list ap;
70   va_start(ap, format);
71   vsnprintf(buf, sizeof(buf), format, ap);
72   va_end(ap);
73   return buf;
74 }
75 
76 bool benchmark_running = false;
77 int64 benchmark_real_time_us = 0;
78 int64 benchmark_cpu_time_us = 0;
79 string *benchmark_label = NULL;
80 int64 benchmark_bytes_processed = 0;
81 
ResetBenchmarkTiming()82 void ResetBenchmarkTiming() {
83   benchmark_real_time_us = 0;
84   benchmark_cpu_time_us = 0;
85 }
86 
87 #ifdef WIN32
88 LARGE_INTEGER benchmark_start_real;
89 FILETIME benchmark_start_cpu;
90 #else  // WIN32
91 struct timeval benchmark_start_real;
92 struct rusage benchmark_start_cpu;
93 #endif  // WIN32
94 
StartBenchmarkTiming()95 void StartBenchmarkTiming() {
96 #ifdef WIN32
97   QueryPerformanceCounter(&benchmark_start_real);
98   FILETIME dummy;
99   CHECK(GetProcessTimes(
100       GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_start_cpu));
101 #else
102   gettimeofday(&benchmark_start_real, NULL);
103   if (getrusage(RUSAGE_SELF, &benchmark_start_cpu) == -1) {
104     perror("getrusage(RUSAGE_SELF)");
105     exit(1);
106   }
107 #endif
108   benchmark_running = true;
109 }
110 
StopBenchmarkTiming()111 void StopBenchmarkTiming() {
112   if (!benchmark_running) {
113     return;
114   }
115 
116 #ifdef WIN32
117   LARGE_INTEGER benchmark_stop_real;
118   LARGE_INTEGER benchmark_frequency;
119   QueryPerformanceCounter(&benchmark_stop_real);
120   QueryPerformanceFrequency(&benchmark_frequency);
121 
122   double elapsed_real = static_cast<double>(
123       benchmark_stop_real.QuadPart - benchmark_start_real.QuadPart) /
124       benchmark_frequency.QuadPart;
125   benchmark_real_time_us += elapsed_real * 1e6 + 0.5;
126 
127   FILETIME benchmark_stop_cpu, dummy;
128   CHECK(GetProcessTimes(
129       GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_stop_cpu));
130 
131   ULARGE_INTEGER start_ulargeint;
132   start_ulargeint.LowPart = benchmark_start_cpu.dwLowDateTime;
133   start_ulargeint.HighPart = benchmark_start_cpu.dwHighDateTime;
134 
135   ULARGE_INTEGER stop_ulargeint;
136   stop_ulargeint.LowPart = benchmark_stop_cpu.dwLowDateTime;
137   stop_ulargeint.HighPart = benchmark_stop_cpu.dwHighDateTime;
138 
139   benchmark_cpu_time_us +=
140       (stop_ulargeint.QuadPart - start_ulargeint.QuadPart + 5) / 10;
141 #else  // WIN32
142   struct timeval benchmark_stop_real;
143   gettimeofday(&benchmark_stop_real, NULL);
144   benchmark_real_time_us +=
145       1000000 * (benchmark_stop_real.tv_sec - benchmark_start_real.tv_sec);
146   benchmark_real_time_us +=
147       (benchmark_stop_real.tv_usec - benchmark_start_real.tv_usec);
148 
149   struct rusage benchmark_stop_cpu;
150   if (getrusage(RUSAGE_SELF, &benchmark_stop_cpu) == -1) {
151     perror("getrusage(RUSAGE_SELF)");
152     exit(1);
153   }
154   benchmark_cpu_time_us += 1000000 * (benchmark_stop_cpu.ru_utime.tv_sec -
155                                       benchmark_start_cpu.ru_utime.tv_sec);
156   benchmark_cpu_time_us += (benchmark_stop_cpu.ru_utime.tv_usec -
157                             benchmark_start_cpu.ru_utime.tv_usec);
158 #endif  // WIN32
159 
160   benchmark_running = false;
161 }
162 
SetBenchmarkLabel(const string & str)163 void SetBenchmarkLabel(const string& str) {
164   if (benchmark_label) {
165     delete benchmark_label;
166   }
167   benchmark_label = new string(str);
168 }
169 
SetBenchmarkBytesProcessed(int64 bytes)170 void SetBenchmarkBytesProcessed(int64 bytes) {
171   benchmark_bytes_processed = bytes;
172 }
173 
174 struct BenchmarkRun {
175   int64 real_time_us;
176   int64 cpu_time_us;
177 };
178 
179 struct BenchmarkCompareCPUTime {
operator ()snappy::BenchmarkCompareCPUTime180   bool operator() (const BenchmarkRun& a, const BenchmarkRun& b) const {
181     return a.cpu_time_us < b.cpu_time_us;
182   }
183 };
184 
Run()185 void Benchmark::Run() {
186   for (int test_case_num = start_; test_case_num <= stop_; ++test_case_num) {
187     // Run a few iterations first to find out approximately how fast
188     // the benchmark is.
189     const int kCalibrateIterations = 100;
190     ResetBenchmarkTiming();
191     StartBenchmarkTiming();
192     (*function_)(kCalibrateIterations, test_case_num);
193     StopBenchmarkTiming();
194 
195     // Let each test case run for about 200ms, but at least as many
196     // as we used to calibrate.
197     // Run five times and pick the median.
198     const int kNumRuns = 5;
199     const int kMedianPos = kNumRuns / 2;
200     int num_iterations = 0;
201     if (benchmark_real_time_us > 0) {
202       num_iterations = 200000 * kCalibrateIterations / benchmark_real_time_us;
203     }
204     num_iterations = max(num_iterations, kCalibrateIterations);
205     BenchmarkRun benchmark_runs[kNumRuns];
206 
207     for (int run = 0; run < kNumRuns; ++run) {
208       ResetBenchmarkTiming();
209       StartBenchmarkTiming();
210       (*function_)(num_iterations, test_case_num);
211       StopBenchmarkTiming();
212 
213       benchmark_runs[run].real_time_us = benchmark_real_time_us;
214       benchmark_runs[run].cpu_time_us = benchmark_cpu_time_us;
215     }
216 
217     string heading = StringPrintf("%s/%d", name_.c_str(), test_case_num);
218     string human_readable_speed;
219 
220     nth_element(benchmark_runs,
221                 benchmark_runs + kMedianPos,
222                 benchmark_runs + kNumRuns,
223                 BenchmarkCompareCPUTime());
224     int64 real_time_us = benchmark_runs[kMedianPos].real_time_us;
225     int64 cpu_time_us = benchmark_runs[kMedianPos].cpu_time_us;
226     if (cpu_time_us <= 0) {
227       human_readable_speed = "?";
228     } else {
229       int64 bytes_per_second =
230           benchmark_bytes_processed * 1000000 / cpu_time_us;
231       if (bytes_per_second < 1024) {
232         human_readable_speed = StringPrintf("%dB/s", bytes_per_second);
233       } else if (bytes_per_second < 1024 * 1024) {
234         human_readable_speed = StringPrintf(
235             "%.1fkB/s", bytes_per_second / 1024.0f);
236       } else if (bytes_per_second < 1024 * 1024 * 1024) {
237         human_readable_speed = StringPrintf(
238             "%.1fMB/s", bytes_per_second / (1024.0f * 1024.0f));
239       } else {
240         human_readable_speed = StringPrintf(
241             "%.1fGB/s", bytes_per_second / (1024.0f * 1024.0f * 1024.0f));
242       }
243     }
244 
245     fprintf(stderr,
246 #ifdef WIN32
247             "%-18s %10I64d %10I64d %10d %s  %s\n",
248 #else
249             "%-18s %10lld %10lld %10d %s  %s\n",
250 #endif
251             heading.c_str(),
252             static_cast<long long>(real_time_us * 1000 / num_iterations),
253             static_cast<long long>(cpu_time_us * 1000 / num_iterations),
254             num_iterations,
255             human_readable_speed.c_str(),
256             benchmark_label->c_str());
257   }
258 }
259 
260 #ifdef HAVE_LIBZ
261 
ZLib()262 ZLib::ZLib()
263     : comp_init_(false),
264       uncomp_init_(false) {
265   Reinit();
266 }
267 
~ZLib()268 ZLib::~ZLib() {
269   if (comp_init_)   { deflateEnd(&comp_stream_); }
270   if (uncomp_init_) { inflateEnd(&uncomp_stream_); }
271 }
272 
Reinit()273 void ZLib::Reinit() {
274   compression_level_ = Z_DEFAULT_COMPRESSION;
275   window_bits_ = MAX_WBITS;
276   mem_level_ =  8;  // DEF_MEM_LEVEL
277   if (comp_init_) {
278     deflateEnd(&comp_stream_);
279     comp_init_ = false;
280   }
281   if (uncomp_init_) {
282     inflateEnd(&uncomp_stream_);
283     uncomp_init_ = false;
284   }
285   first_chunk_ = true;
286 }
287 
Reset()288 void ZLib::Reset() {
289   first_chunk_ = true;
290 }
291 
292 // --------- COMPRESS MODE
293 
294 // Initialization method to be called if we hit an error while
295 // compressing. On hitting an error, call this method before returning
296 // the error.
CompressErrorInit()297 void ZLib::CompressErrorInit() {
298   deflateEnd(&comp_stream_);
299   comp_init_ = false;
300   Reset();
301 }
302 
DeflateInit()303 int ZLib::DeflateInit() {
304   return deflateInit2(&comp_stream_,
305                       compression_level_,
306                       Z_DEFLATED,
307                       window_bits_,
308                       mem_level_,
309                       Z_DEFAULT_STRATEGY);
310 }
311 
CompressInit(Bytef * dest,uLongf * destLen,const Bytef * source,uLong * sourceLen)312 int ZLib::CompressInit(Bytef *dest, uLongf *destLen,
313                        const Bytef *source, uLong *sourceLen) {
314   int err;
315 
316   comp_stream_.next_in = (Bytef*)source;
317   comp_stream_.avail_in = (uInt)*sourceLen;
318   if ((uLong)comp_stream_.avail_in != *sourceLen) return Z_BUF_ERROR;
319   comp_stream_.next_out = dest;
320   comp_stream_.avail_out = (uInt)*destLen;
321   if ((uLong)comp_stream_.avail_out != *destLen) return Z_BUF_ERROR;
322 
323   if ( !first_chunk_ )   // only need to set up stream the first time through
324     return Z_OK;
325 
326   if (comp_init_) {      // we've already initted it
327     err = deflateReset(&comp_stream_);
328     if (err != Z_OK) {
329       LOG(WARNING) << "ERROR: Can't reset compress object; creating a new one";
330       deflateEnd(&comp_stream_);
331       comp_init_ = false;
332     }
333   }
334   if (!comp_init_) {     // first use
335     comp_stream_.zalloc = (alloc_func)0;
336     comp_stream_.zfree = (free_func)0;
337     comp_stream_.opaque = (voidpf)0;
338     err = DeflateInit();
339     if (err != Z_OK) return err;
340     comp_init_ = true;
341   }
342   return Z_OK;
343 }
344 
345 // In a perfect world we'd always have the full buffer to compress
346 // when the time came, and we could just call Compress().  Alas, we
347 // want to do chunked compression on our webserver.  In this
348 // application, we compress the header, send it off, then compress the
349 // results, send them off, then compress the footer.  Thus we need to
350 // use the chunked compression features of zlib.
CompressAtMostOrAll(Bytef * dest,uLongf * destLen,const Bytef * source,uLong * sourceLen,int flush_mode)351 int ZLib::CompressAtMostOrAll(Bytef *dest, uLongf *destLen,
352                               const Bytef *source, uLong *sourceLen,
353                               int flush_mode) {   // Z_FULL_FLUSH or Z_FINISH
354   int err;
355 
356   if ( (err=CompressInit(dest, destLen, source, sourceLen)) != Z_OK )
357     return err;
358 
359   // This is used to figure out how many bytes we wrote *this chunk*
360   int compressed_size = comp_stream_.total_out;
361 
362   // Some setup happens only for the first chunk we compress in a run
363   if ( first_chunk_ ) {
364     first_chunk_ = false;
365   }
366 
367   // flush_mode is Z_FINISH for all mode, Z_SYNC_FLUSH for incremental
368   // compression.
369   err = deflate(&comp_stream_, flush_mode);
370 
371   *sourceLen = comp_stream_.avail_in;
372 
373   if ((err == Z_STREAM_END || err == Z_OK)
374       && comp_stream_.avail_in == 0
375       && comp_stream_.avail_out != 0 ) {
376     // we processed everything ok and the output buffer was large enough.
377     ;
378   } else if (err == Z_STREAM_END && comp_stream_.avail_in > 0) {
379     return Z_BUF_ERROR;                            // should never happen
380   } else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) {
381     // an error happened
382     CompressErrorInit();
383     return err;
384   } else if (comp_stream_.avail_out == 0) {     // not enough space
385     err = Z_BUF_ERROR;
386   }
387 
388   assert(err == Z_OK || err == Z_STREAM_END || err == Z_BUF_ERROR);
389   if (err == Z_STREAM_END)
390     err = Z_OK;
391 
392   // update the crc and other metadata
393   compressed_size = comp_stream_.total_out - compressed_size;  // delta
394   *destLen = compressed_size;
395 
396   return err;
397 }
398 
CompressChunkOrAll(Bytef * dest,uLongf * destLen,const Bytef * source,uLong sourceLen,int flush_mode)399 int ZLib::CompressChunkOrAll(Bytef *dest, uLongf *destLen,
400                              const Bytef *source, uLong sourceLen,
401                              int flush_mode) {   // Z_FULL_FLUSH or Z_FINISH
402   const int ret =
403     CompressAtMostOrAll(dest, destLen, source, &sourceLen, flush_mode);
404   if (ret == Z_BUF_ERROR)
405     CompressErrorInit();
406   return ret;
407 }
408 
409 // This routine only initializes the compression stream once.  Thereafter, it
410 // just does a deflateReset on the stream, which should be faster.
Compress(Bytef * dest,uLongf * destLen,const Bytef * source,uLong sourceLen)411 int ZLib::Compress(Bytef *dest, uLongf *destLen,
412                    const Bytef *source, uLong sourceLen) {
413   int err;
414   if ( (err=CompressChunkOrAll(dest, destLen, source, sourceLen,
415                                Z_FINISH)) != Z_OK )
416     return err;
417   Reset();         // reset for next call to Compress
418 
419   return Z_OK;
420 }
421 
422 
423 // --------- UNCOMPRESS MODE
424 
InflateInit()425 int ZLib::InflateInit() {
426   return inflateInit2(&uncomp_stream_, MAX_WBITS);
427 }
428 
429 // Initialization method to be called if we hit an error while
430 // uncompressing. On hitting an error, call this method before
431 // returning the error.
UncompressErrorInit()432 void ZLib::UncompressErrorInit() {
433   inflateEnd(&uncomp_stream_);
434   uncomp_init_ = false;
435   Reset();
436 }
437 
UncompressInit(Bytef * dest,uLongf * destLen,const Bytef * source,uLong * sourceLen)438 int ZLib::UncompressInit(Bytef *dest, uLongf *destLen,
439                          const Bytef *source, uLong *sourceLen) {
440   int err;
441 
442   uncomp_stream_.next_in = (Bytef*)source;
443   uncomp_stream_.avail_in = (uInt)*sourceLen;
444   // Check for source > 64K on 16-bit machine:
445   if ((uLong)uncomp_stream_.avail_in != *sourceLen) return Z_BUF_ERROR;
446 
447   uncomp_stream_.next_out = dest;
448   uncomp_stream_.avail_out = (uInt)*destLen;
449   if ((uLong)uncomp_stream_.avail_out != *destLen) return Z_BUF_ERROR;
450 
451   if ( !first_chunk_ )   // only need to set up stream the first time through
452     return Z_OK;
453 
454   if (uncomp_init_) {    // we've already initted it
455     err = inflateReset(&uncomp_stream_);
456     if (err != Z_OK) {
457       LOG(WARNING)
458         << "ERROR: Can't reset uncompress object; creating a new one";
459       UncompressErrorInit();
460     }
461   }
462   if (!uncomp_init_) {
463     uncomp_stream_.zalloc = (alloc_func)0;
464     uncomp_stream_.zfree = (free_func)0;
465     uncomp_stream_.opaque = (voidpf)0;
466     err = InflateInit();
467     if (err != Z_OK) return err;
468     uncomp_init_ = true;
469   }
470   return Z_OK;
471 }
472 
473 // If you compressed your data a chunk at a time, with CompressChunk,
474 // you can uncompress it a chunk at a time with UncompressChunk.
475 // Only difference bewteen chunked and unchunked uncompression
476 // is the flush mode we use: Z_SYNC_FLUSH (chunked) or Z_FINISH (unchunked).
UncompressAtMostOrAll(Bytef * dest,uLongf * destLen,const Bytef * source,uLong * sourceLen,int flush_mode)477 int ZLib::UncompressAtMostOrAll(Bytef *dest, uLongf *destLen,
478                                 const Bytef *source, uLong *sourceLen,
479                                 int flush_mode) {  // Z_SYNC_FLUSH or Z_FINISH
480   int err = Z_OK;
481 
482   if ( (err=UncompressInit(dest, destLen, source, sourceLen)) != Z_OK ) {
483     LOG(WARNING) << "UncompressInit: Error: " << err << " SourceLen: "
484                  << *sourceLen;
485     return err;
486   }
487 
488   // This is used to figure out how many output bytes we wrote *this chunk*:
489   const uLong old_total_out = uncomp_stream_.total_out;
490 
491   // This is used to figure out how many input bytes we read *this chunk*:
492   const uLong old_total_in = uncomp_stream_.total_in;
493 
494   // Some setup happens only for the first chunk we compress in a run
495   if ( first_chunk_ ) {
496     first_chunk_ = false;                          // so we don't do this again
497 
498     // For the first chunk *only* (to avoid infinite troubles), we let
499     // there be no actual data to uncompress.  This sometimes triggers
500     // when the input is only the gzip header, say.
501     if ( *sourceLen == 0 ) {
502       *destLen = 0;
503       return Z_OK;
504     }
505   }
506 
507   // We'll uncompress as much as we can.  If we end OK great, otherwise
508   // if we get an error that seems to be the gzip footer, we store the
509   // gzip footer and return OK, otherwise we return the error.
510 
511   // flush_mode is Z_SYNC_FLUSH for chunked mode, Z_FINISH for all mode.
512   err = inflate(&uncomp_stream_, flush_mode);
513 
514   // Figure out how many bytes of the input zlib slurped up:
515   const uLong bytes_read = uncomp_stream_.total_in - old_total_in;
516   CHECK_LE(source + bytes_read, source + *sourceLen);
517   *sourceLen = uncomp_stream_.avail_in;
518 
519   if ((err == Z_STREAM_END || err == Z_OK)  // everything went ok
520              && uncomp_stream_.avail_in == 0) {    // and we read it all
521     ;
522   } else if (err == Z_STREAM_END && uncomp_stream_.avail_in > 0) {
523     LOG(WARNING)
524       << "UncompressChunkOrAll: Received some extra data, bytes total: "
525       << uncomp_stream_.avail_in << " bytes: "
526       << string(reinterpret_cast<const char *>(uncomp_stream_.next_in),
527                 min(int(uncomp_stream_.avail_in), 20));
528     UncompressErrorInit();
529     return Z_DATA_ERROR;       // what's the extra data for?
530   } else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) {
531     // an error happened
532     LOG(WARNING) << "UncompressChunkOrAll: Error: " << err
533                  << " avail_out: " << uncomp_stream_.avail_out;
534     UncompressErrorInit();
535     return err;
536   } else if (uncomp_stream_.avail_out == 0) {
537     err = Z_BUF_ERROR;
538   }
539 
540   assert(err == Z_OK || err == Z_BUF_ERROR || err == Z_STREAM_END);
541   if (err == Z_STREAM_END)
542     err = Z_OK;
543 
544   *destLen = uncomp_stream_.total_out - old_total_out;  // size for this call
545 
546   return err;
547 }
548 
UncompressChunkOrAll(Bytef * dest,uLongf * destLen,const Bytef * source,uLong sourceLen,int flush_mode)549 int ZLib::UncompressChunkOrAll(Bytef *dest, uLongf *destLen,
550                                const Bytef *source, uLong sourceLen,
551                                int flush_mode) {  // Z_SYNC_FLUSH or Z_FINISH
552   const int ret =
553     UncompressAtMostOrAll(dest, destLen, source, &sourceLen, flush_mode);
554   if (ret == Z_BUF_ERROR)
555     UncompressErrorInit();
556   return ret;
557 }
558 
UncompressAtMost(Bytef * dest,uLongf * destLen,const Bytef * source,uLong * sourceLen)559 int ZLib::UncompressAtMost(Bytef *dest, uLongf *destLen,
560                           const Bytef *source, uLong *sourceLen) {
561   return UncompressAtMostOrAll(dest, destLen, source, sourceLen, Z_SYNC_FLUSH);
562 }
563 
564 // We make sure we've uncompressed everything, that is, the current
565 // uncompress stream is at a compressed-buffer-EOF boundary.  In gzip
566 // mode, we also check the gzip footer to make sure we pass the gzip
567 // consistency checks.  We RETURN true iff both types of checks pass.
UncompressChunkDone()568 bool ZLib::UncompressChunkDone() {
569   assert(!first_chunk_ && uncomp_init_);
570   // Make sure we're at the end-of-compressed-data point.  This means
571   // if we call inflate with Z_FINISH we won't consume any input or
572   // write any output
573   Bytef dummyin, dummyout;
574   uLongf dummylen = 0;
575   if ( UncompressChunkOrAll(&dummyout, &dummylen, &dummyin, 0, Z_FINISH)
576        != Z_OK ) {
577     return false;
578   }
579 
580   // Make sure that when we exit, we can start a new round of chunks later
581   Reset();
582 
583   return true;
584 }
585 
586 // Uncompresses the source buffer into the destination buffer.
587 // The destination buffer must be long enough to hold the entire
588 // decompressed contents.
589 //
590 // We only initialize the uncomp_stream once.  Thereafter, we use
591 // inflateReset, which should be faster.
592 //
593 // Returns Z_OK on success, otherwise, it returns a zlib error code.
Uncompress(Bytef * dest,uLongf * destLen,const Bytef * source,uLong sourceLen)594 int ZLib::Uncompress(Bytef *dest, uLongf *destLen,
595                      const Bytef *source, uLong sourceLen) {
596   int err;
597   if ( (err=UncompressChunkOrAll(dest, destLen, source, sourceLen,
598                                  Z_FINISH)) != Z_OK ) {
599     Reset();                           // let us try to compress again
600     return err;
601   }
602   if ( !UncompressChunkDone() )        // calls Reset()
603     return Z_DATA_ERROR;
604   return Z_OK;  // stream_end is ok
605 }
606 
607 #endif  // HAVE_LIBZ
608 
609 }  // namespace snappy
610