1 /* This file has been backported by dev-tools/lz4-rebaser.sh
2  * from upstream lz4 commit fdf2ef5809ca875c4545 (v1.9.2)
3  */
4 #ifdef HAVE_CONFIG_H
5 #include "config.h"
6 #elif defined(_MSC_VER)
7 #include "config-msvc.h"
8 #endif
9 
10 #ifdef NEED_COMPAT_LZ4
11 /*
12    LZ4 - Fast LZ compression algorithm
13    Copyright (C) 2011-present, Yann Collet.
14 
15    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
16 
17    Redistribution and use in source and binary forms, with or without
18    modification, are permitted provided that the following conditions are
19    met:
20 
21        * Redistributions of source code must retain the above copyright
22    notice, this list of conditions and the following disclaimer.
23        * Redistributions in binary form must reproduce the above
24    copyright notice, this list of conditions and the following disclaimer
25    in the documentation and/or other materials provided with the
26    distribution.
27 
28    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 
40    You can contact the author at :
41     - LZ4 homepage : http://www.lz4.org
42     - LZ4 source repository : https://github.com/lz4/lz4
43 */
44 
45 /*-************************************
46 *  Tuning parameters
47 **************************************/
48 /*
49  * LZ4_HEAPMODE :
50  * Select how default compression functions will allocate memory for their hash table,
51  * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
52  */
53 #ifndef LZ4_HEAPMODE
54 #  define LZ4_HEAPMODE 0
55 #endif
56 
57 /*
58  * ACCELERATION_DEFAULT :
59  * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
60  */
61 #define ACCELERATION_DEFAULT 1
62 
63 
64 /*-************************************
65 *  CPU Feature Detection
66 **************************************/
67 /* LZ4_FORCE_MEMORY_ACCESS
68  * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
69  * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
70  * The below switch allow to select different access method for improved performance.
71  * Method 0 (default) : use `memcpy()`. Safe and portable.
72  * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
73  *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
74  * Method 2 : direct access. This method is portable but violate C standard.
75  *            It can generate buggy code on targets which assembly generation depends on alignment.
76  *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
77  * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
78  * Prefer these methods in priority order (0 > 1 > 2)
79  */
80 #ifndef LZ4_FORCE_MEMORY_ACCESS   /* can be defined externally */
81 #  if defined(__GNUC__) && \
82   ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \
83   || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
84 #    define LZ4_FORCE_MEMORY_ACCESS 2
85 #  elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__)
86 #    define LZ4_FORCE_MEMORY_ACCESS 1
87 #  endif
88 #endif
89 
90 /*
91  * LZ4_FORCE_SW_BITCOUNT
92  * Define this parameter if your target system or compiler does not support hardware bit count
93  */
94 #if defined(_MSC_VER) && defined(_WIN32_WCE)   /* Visual Studio for WinCE doesn't support Hardware bit count */
95 #  define LZ4_FORCE_SW_BITCOUNT
96 #endif
97 
98 
99 
100 /*-************************************
101 *  Dependency
102 **************************************/
103 /*
104  * LZ4_SRC_INCLUDED:
105  * Amalgamation flag, whether lz4.c is included
106  */
107 #ifndef LZ4_SRC_INCLUDED
108 #  define LZ4_SRC_INCLUDED 1
109 #endif
110 
111 #ifndef LZ4_STATIC_LINKING_ONLY
112 #define LZ4_STATIC_LINKING_ONLY
113 #endif
114 
115 #ifndef LZ4_DISABLE_DEPRECATE_WARNINGS
116 #define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
117 #endif
118 
119 #define LZ4_STATIC_LINKING_ONLY  /* LZ4_DISTANCE_MAX */
120 #include "compat-lz4.h"
121 /* see also "memory routines" below */
122 
123 
124 /*-************************************
125 *  Compiler Options
126 **************************************/
127 #ifdef _MSC_VER    /* Visual Studio */
128 #  include <intrin.h>
129 #  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
130 #  pragma warning(disable : 4293)        /* disable: C4293: too large shift (32-bits) */
131 #endif  /* _MSC_VER */
132 
133 #ifndef LZ4_FORCE_INLINE
134 #  ifdef _MSC_VER    /* Visual Studio */
135 #    define LZ4_FORCE_INLINE static __forceinline
136 #  else
137 #    if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
138 #      ifdef __GNUC__
139 #        define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
140 #      else
141 #        define LZ4_FORCE_INLINE static inline
142 #      endif
143 #    else
144 #      define LZ4_FORCE_INLINE static
145 #    endif /* __STDC_VERSION__ */
146 #  endif  /* _MSC_VER */
147 #endif /* LZ4_FORCE_INLINE */
148 
149 /* LZ4_FORCE_O2_GCC_PPC64LE and LZ4_FORCE_O2_INLINE_GCC_PPC64LE
150  * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8,
151  * together with a simple 8-byte copy loop as a fall-back path.
152  * However, this optimization hurts the decompression speed by >30%,
153  * because the execution does not go to the optimized loop
154  * for typical compressible data, and all of the preamble checks
155  * before going to the fall-back path become useless overhead.
156  * This optimization happens only with the -O3 flag, and -O2 generates
157  * a simple 8-byte copy loop.
158  * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8
159  * functions are annotated with __attribute__((optimize("O2"))),
160  * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute
161  * of LZ4_wildCopy8 does not affect the compression speed.
162  */
163 #if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__)
164 #  define LZ4_FORCE_O2_GCC_PPC64LE __attribute__((optimize("O2")))
165 #  define LZ4_FORCE_O2_INLINE_GCC_PPC64LE __attribute__((optimize("O2"))) LZ4_FORCE_INLINE
166 #else
167 #  define LZ4_FORCE_O2_GCC_PPC64LE
168 #  define LZ4_FORCE_O2_INLINE_GCC_PPC64LE static
169 #endif
170 
171 #if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
172 #  define expect(expr,value)    (__builtin_expect ((expr),(value)) )
173 #else
174 #  define expect(expr,value)    (expr)
175 #endif
176 
177 #ifndef likely
178 #define likely(expr)     expect((expr) != 0, 1)
179 #endif
180 #ifndef unlikely
181 #define unlikely(expr)   expect((expr) != 0, 0)
182 #endif
183 
184 
185 /*-************************************
186 *  Memory routines
187 **************************************/
188 #include <stdlib.h>   /* malloc, calloc, free */
189 #define ALLOC(s)          malloc(s)
190 #define ALLOC_AND_ZERO(s) calloc(1,s)
191 #define FREEMEM(p)        free(p)
192 #include <string.h>   /* memset, memcpy */
193 #define MEM_INIT(p,v,s)   memset((p),(v),(s))
194 
195 
196 /*-************************************
197 *  Common Constants
198 **************************************/
199 #define MINMATCH 4
200 
201 #define WILDCOPYLENGTH 8
202 #define LASTLITERALS   5   /* see ../doc/lz4_Block_format.md#parsing-restrictions */
203 #define MFLIMIT       12   /* see ../doc/lz4_Block_format.md#parsing-restrictions */
204 #define MATCH_SAFEGUARD_DISTANCE  ((2*WILDCOPYLENGTH) - MINMATCH)   /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */
205 #define FASTLOOP_SAFE_DISTANCE 64
206 static const int LZ4_minLength = (MFLIMIT+1);
207 
208 #define KB *(1 <<10)
209 #define MB *(1 <<20)
210 #define GB *(1U<<30)
211 
212 #define LZ4_DISTANCE_ABSOLUTE_MAX 65535
213 #if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX)   /* max supported by LZ4 format */
214 #  error "LZ4_DISTANCE_MAX is too big : must be <= 65535"
215 #endif
216 
217 #define ML_BITS  4
218 #define ML_MASK  ((1U<<ML_BITS)-1)
219 #define RUN_BITS (8-ML_BITS)
220 #define RUN_MASK ((1U<<RUN_BITS)-1)
221 
222 
223 /*-************************************
224 *  Error detection
225 **************************************/
226 #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
227 #  include <assert.h>
228 #else
229 #  ifndef assert
230 #    define assert(condition) ((void)0)
231 #  endif
232 #endif
233 
234 #define LZ4_STATIC_ASSERT(c)   { enum { LZ4_static_assert = 1/(int)(!!(c)) }; }   /* use after variable declarations */
235 
236 #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
237 #  include <stdio.h>
238 static int g_debuglog_enable = 1;
239 #  define DEBUGLOG(l, ...) {                                  \
240                 if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) {  \
241                     fprintf(stderr, __FILE__ ": ");           \
242                     fprintf(stderr, __VA_ARGS__);             \
243                     fprintf(stderr, " \n");                   \
244             }   }
245 #else
246 #  define DEBUGLOG(l, ...)      {}    /* disabled */
247 #endif
248 
249 
250 /*-************************************
251 *  Types
252 **************************************/
253 #if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
254 # include <stdint.h>
255   typedef  uint8_t BYTE;
256   typedef uint16_t U16;
257   typedef uint32_t U32;
258   typedef  int32_t S32;
259   typedef uint64_t U64;
260   typedef uintptr_t uptrval;
261 #else
262   typedef unsigned char       BYTE;
263   typedef unsigned short      U16;
264   typedef unsigned int        U32;
265   typedef   signed int        S32;
266   typedef unsigned long long  U64;
267   typedef size_t              uptrval;   /* generally true, except OpenVMS-64 */
268 #endif
269 
270 #if defined(__x86_64__)
271   typedef U64    reg_t;   /* 64-bits in x32 mode */
272 #else
273   typedef size_t reg_t;   /* 32-bits in x32 mode */
274 #endif
275 
276 typedef enum {
277     notLimited = 0,
278     limitedOutput = 1,
279     fillOutput = 2
280 } limitedOutput_directive;
281 
282 
283 /*-************************************
284 *  Reading and writing into memory
285 **************************************/
LZ4_isLittleEndian(void)286 static unsigned LZ4_isLittleEndian(void)
287 {
288     const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental */
289     return one.c[0];
290 }
291 
292 
293 #if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
294 /* lie to the compiler about data alignment; use with caution */
295 
LZ4_read16(const void * memPtr)296 static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }
LZ4_read32(const void * memPtr)297 static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }
LZ4_read_ARCH(const void * memPtr)298 static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }
299 
LZ4_write16(void * memPtr,U16 value)300 static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
LZ4_write32(void * memPtr,U32 value)301 static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
302 
303 #elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
304 
305 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
306 /* currently only defined for gcc and icc */
307 typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) unalign;
308 
LZ4_read16(const void * ptr)309 static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
LZ4_read32(const void * ptr)310 static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
LZ4_read_ARCH(const void * ptr)311 static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; }
312 
LZ4_write16(void * memPtr,U16 value)313 static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
LZ4_write32(void * memPtr,U32 value)314 static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
315 
316 #else  /* safe and portable access using memcpy() */
317 
LZ4_read16(const void * memPtr)318 static U16 LZ4_read16(const void* memPtr)
319 {
320     U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
321 }
322 
LZ4_read32(const void * memPtr)323 static U32 LZ4_read32(const void* memPtr)
324 {
325     U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
326 }
327 
LZ4_read_ARCH(const void * memPtr)328 static reg_t LZ4_read_ARCH(const void* memPtr)
329 {
330     reg_t val; memcpy(&val, memPtr, sizeof(val)); return val;
331 }
332 
LZ4_write16(void * memPtr,U16 value)333 static void LZ4_write16(void* memPtr, U16 value)
334 {
335     memcpy(memPtr, &value, sizeof(value));
336 }
337 
LZ4_write32(void * memPtr,U32 value)338 static void LZ4_write32(void* memPtr, U32 value)
339 {
340     memcpy(memPtr, &value, sizeof(value));
341 }
342 
343 #endif /* LZ4_FORCE_MEMORY_ACCESS */
344 
345 
LZ4_readLE16(const void * memPtr)346 static U16 LZ4_readLE16(const void* memPtr)
347 {
348     if (LZ4_isLittleEndian()) {
349         return LZ4_read16(memPtr);
350     } else {
351         const BYTE* p = (const BYTE*)memPtr;
352         return (U16)((U16)p[0] + (p[1]<<8));
353     }
354 }
355 
LZ4_writeLE16(void * memPtr,U16 value)356 static void LZ4_writeLE16(void* memPtr, U16 value)
357 {
358     if (LZ4_isLittleEndian()) {
359         LZ4_write16(memPtr, value);
360     } else {
361         BYTE* p = (BYTE*)memPtr;
362         p[0] = (BYTE) value;
363         p[1] = (BYTE)(value>>8);
364     }
365 }
366 
367 /* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
368 LZ4_FORCE_O2_INLINE_GCC_PPC64LE
LZ4_wildCopy8(void * dstPtr,const void * srcPtr,void * dstEnd)369 void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd)
370 {
371     BYTE* d = (BYTE*)dstPtr;
372     const BYTE* s = (const BYTE*)srcPtr;
373     BYTE* const e = (BYTE*)dstEnd;
374 
375     do { memcpy(d,s,8); d+=8; s+=8; } while (d<e);
376 }
377 
378 static const unsigned inc32table[8] = {0, 1, 2,  1,  0,  4, 4, 4};
379 static const int      dec64table[8] = {0, 0, 0, -1, -4,  1, 2, 3};
380 
381 
382 #ifndef LZ4_FAST_DEC_LOOP
383 #  if defined(__i386__) || defined(__x86_64__)
384 #    define LZ4_FAST_DEC_LOOP 1
385 #  elif defined(__aarch64__) && !defined(__clang__)
386      /* On aarch64, we disable this optimization for clang because on certain
387       * mobile chipsets and clang, it reduces performance. For more information
388       * refer to https://github.com/lz4/lz4/pull/707. */
389 #    define LZ4_FAST_DEC_LOOP 1
390 #  else
391 #    define LZ4_FAST_DEC_LOOP 0
392 #  endif
393 #endif
394 
395 #if LZ4_FAST_DEC_LOOP
396 
397 LZ4_FORCE_O2_INLINE_GCC_PPC64LE void
LZ4_memcpy_using_offset_base(BYTE * dstPtr,const BYTE * srcPtr,BYTE * dstEnd,const size_t offset)398 LZ4_memcpy_using_offset_base(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
399 {
400     if (offset < 8) {
401         dstPtr[0] = srcPtr[0];
402         dstPtr[1] = srcPtr[1];
403         dstPtr[2] = srcPtr[2];
404         dstPtr[3] = srcPtr[3];
405         srcPtr += inc32table[offset];
406         memcpy(dstPtr+4, srcPtr, 4);
407         srcPtr -= dec64table[offset];
408         dstPtr += 8;
409     } else {
410         memcpy(dstPtr, srcPtr, 8);
411         dstPtr += 8;
412         srcPtr += 8;
413     }
414 
415     LZ4_wildCopy8(dstPtr, srcPtr, dstEnd);
416 }
417 
418 /* customized variant of memcpy, which can overwrite up to 32 bytes beyond dstEnd
419  * this version copies two times 16 bytes (instead of one time 32 bytes)
420  * because it must be compatible with offsets >= 16. */
421 LZ4_FORCE_O2_INLINE_GCC_PPC64LE void
LZ4_wildCopy32(void * dstPtr,const void * srcPtr,void * dstEnd)422 LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)
423 {
424     BYTE* d = (BYTE*)dstPtr;
425     const BYTE* s = (const BYTE*)srcPtr;
426     BYTE* const e = (BYTE*)dstEnd;
427 
428     do { memcpy(d,s,16); memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e);
429 }
430 
431 /* LZ4_memcpy_using_offset()  presumes :
432  * - dstEnd >= dstPtr + MINMATCH
433  * - there is at least 8 bytes available to write after dstEnd */
434 LZ4_FORCE_O2_INLINE_GCC_PPC64LE void
LZ4_memcpy_using_offset(BYTE * dstPtr,const BYTE * srcPtr,BYTE * dstEnd,const size_t offset)435 LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
436 {
437     BYTE v[8];
438 
439     assert(dstEnd >= dstPtr + MINMATCH);
440     LZ4_write32(dstPtr, 0);   /* silence an msan warning when offset==0 */
441 
442     switch(offset) {
443     case 1:
444         memset(v, *srcPtr, 8);
445         break;
446     case 2:
447         memcpy(v, srcPtr, 2);
448         memcpy(&v[2], srcPtr, 2);
449         memcpy(&v[4], &v[0], 4);
450         break;
451     case 4:
452         memcpy(v, srcPtr, 4);
453         memcpy(&v[4], srcPtr, 4);
454         break;
455     default:
456         LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);
457         return;
458     }
459 
460     memcpy(dstPtr, v, 8);
461     dstPtr += 8;
462     while (dstPtr < dstEnd) {
463         memcpy(dstPtr, v, 8);
464         dstPtr += 8;
465     }
466 }
467 #endif
468 
469 
470 /*-************************************
471 *  Common functions
472 **************************************/
LZ4_NbCommonBytes(reg_t val)473 static unsigned LZ4_NbCommonBytes (reg_t val)
474 {
475     if (LZ4_isLittleEndian()) {
476         if (sizeof(val)==8) {
477 #       if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
478             unsigned long r = 0;
479             _BitScanForward64( &r, (U64)val );
480             return (int)(r>>3);
481 #       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
482             return (unsigned)__builtin_ctzll((U64)val) >> 3;
483 #       else
484             static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
485                                                      0, 3, 1, 3, 1, 4, 2, 7,
486                                                      0, 2, 3, 6, 1, 5, 3, 5,
487                                                      1, 3, 4, 4, 2, 5, 6, 7,
488                                                      7, 0, 1, 2, 3, 3, 4, 6,
489                                                      2, 6, 5, 5, 3, 4, 5, 6,
490                                                      7, 1, 2, 4, 6, 4, 4, 5,
491                                                      7, 2, 6, 5, 7, 6, 7, 7 };
492             return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
493 #       endif
494         } else /* 32 bits */ {
495 #       if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
496             unsigned long r;
497             _BitScanForward( &r, (U32)val );
498             return (int)(r>>3);
499 #       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
500             return (unsigned)__builtin_ctz((U32)val) >> 3;
501 #       else
502             static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
503                                                      3, 2, 2, 1, 3, 2, 0, 1,
504                                                      3, 3, 1, 2, 2, 2, 2, 0,
505                                                      3, 1, 2, 0, 1, 0, 1, 1 };
506             return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
507 #       endif
508         }
509     } else   /* Big Endian CPU */ {
510         if (sizeof(val)==8) {   /* 64-bits */
511 #       if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
512             unsigned long r = 0;
513             _BitScanReverse64( &r, val );
514             return (unsigned)(r>>3);
515 #       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
516             return (unsigned)__builtin_clzll((U64)val) >> 3;
517 #       else
518             static const U32 by32 = sizeof(val)*4;  /* 32 on 64 bits (goal), 16 on 32 bits.
519                 Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.
520                 Note that this code path is never triggered in 32-bits mode. */
521             unsigned r;
522             if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; }
523             if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
524             r += (!val);
525             return r;
526 #       endif
527         } else /* 32 bits */ {
528 #       if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
529             unsigned long r = 0;
530             _BitScanReverse( &r, (unsigned long)val );
531             return (unsigned)(r>>3);
532 #       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
533             return (unsigned)__builtin_clz((U32)val) >> 3;
534 #       else
535             unsigned r;
536             if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
537             r += (!val);
538             return r;
539 #       endif
540         }
541     }
542 }
543 
544 #define STEPSIZE sizeof(reg_t)
545 LZ4_FORCE_INLINE
LZ4_count(const BYTE * pIn,const BYTE * pMatch,const BYTE * pInLimit)546 unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
547 {
548     const BYTE* const pStart = pIn;
549 
550     if (likely(pIn < pInLimit-(STEPSIZE-1))) {
551         reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
552         if (!diff) {
553             pIn+=STEPSIZE; pMatch+=STEPSIZE;
554         } else {
555             return LZ4_NbCommonBytes(diff);
556     }   }
557 
558     while (likely(pIn < pInLimit-(STEPSIZE-1))) {
559         reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
560         if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
561         pIn += LZ4_NbCommonBytes(diff);
562         return (unsigned)(pIn - pStart);
563     }
564 
565     if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
566     if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
567     if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
568     return (unsigned)(pIn - pStart);
569 }
570 
571 
572 #ifndef LZ4_COMMONDEFS_ONLY
573 /*-************************************
574 *  Local Constants
575 **************************************/
576 static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
577 static const U32 LZ4_skipTrigger = 6;  /* Increase this value ==> compression run slower on incompressible data */
578 
579 
580 /*-************************************
581 *  Local Structures and types
582 **************************************/
583 typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
584 
585 /**
586  * This enum distinguishes several different modes of accessing previous
587  * content in the stream.
588  *
589  * - noDict        : There is no preceding content.
590  * - withPrefix64k : Table entries up to ctx->dictSize before the current blob
591  *                   blob being compressed are valid and refer to the preceding
592  *                   content (of length ctx->dictSize), which is available
593  *                   contiguously preceding in memory the content currently
594  *                   being compressed.
595  * - usingExtDict  : Like withPrefix64k, but the preceding content is somewhere
596  *                   else in memory, starting at ctx->dictionary with length
597  *                   ctx->dictSize.
598  * - usingDictCtx  : Like usingExtDict, but everything concerning the preceding
599  *                   content is in a separate context, pointed to by
600  *                   ctx->dictCtx. ctx->dictionary, ctx->dictSize, and table
601  *                   entries in the current context that refer to positions
602  *                   preceding the beginning of the current compression are
603  *                   ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx
604  *                   ->dictSize describe the location and size of the preceding
605  *                   content, and matches are found by looking in the ctx
606  *                   ->dictCtx->hashTable.
607  */
608 typedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive;
609 typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
610 
611 
612 /*-************************************
613 *  Local Utils
614 **************************************/
LZ4_versionNumber(void)615 int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
LZ4_versionString(void)616 const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
LZ4_compressBound(int isize)617 int LZ4_compressBound(int isize)  { return LZ4_COMPRESSBOUND(isize); }
LZ4_sizeofState()618 int LZ4_sizeofState() { return LZ4_STREAMSIZE; }
619 
620 
621 /*-************************************
622 *  Internal Definitions used in Tests
623 **************************************/
624 #if defined (__cplusplus)
625 extern "C" {
626 #endif
627 
628 int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize);
629 
630 int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
631                                      int compressedSize, int maxOutputSize,
632                                      const void* dictStart, size_t dictSize);
633 
634 #if defined (__cplusplus)
635 }
636 #endif
637 
638 /*-******************************
639 *  Compression functions
640 ********************************/
LZ4_hash4(U32 sequence,tableType_t const tableType)641 static U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
642 {
643     if (tableType == byU16)
644         return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
645     else
646         return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
647 }
648 
LZ4_hash5(U64 sequence,tableType_t const tableType)649 static U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
650 {
651     const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
652     if (LZ4_isLittleEndian()) {
653         const U64 prime5bytes = 889523592379ULL;
654         return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
655     } else {
656         const U64 prime8bytes = 11400714785074694791ULL;
657         return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
658     }
659 }
660 
LZ4_hashPosition(const void * const p,tableType_t const tableType)661 LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
662 {
663     if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
664     return LZ4_hash4(LZ4_read32(p), tableType);
665 }
666 
LZ4_clearHash(U32 h,void * tableBase,tableType_t const tableType)667 static void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)
668 {
669     switch (tableType)
670     {
671     default: /* fallthrough */
672     case clearedTable: { /* illegal! */ assert(0); return; }
673     case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; }
674     case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; }
675     case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; }
676     }
677 }
678 
LZ4_putIndexOnHash(U32 idx,U32 h,void * tableBase,tableType_t const tableType)679 static void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)
680 {
681     switch (tableType)
682     {
683     default: /* fallthrough */
684     case clearedTable: /* fallthrough */
685     case byPtr: { /* illegal! */ assert(0); return; }
686     case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; }
687     case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; }
688     }
689 }
690 
LZ4_putPositionOnHash(const BYTE * p,U32 h,void * tableBase,tableType_t const tableType,const BYTE * srcBase)691 static void LZ4_putPositionOnHash(const BYTE* p, U32 h,
692                                   void* tableBase, tableType_t const tableType,
693                             const BYTE* srcBase)
694 {
695     switch (tableType)
696     {
697     case clearedTable: { /* illegal! */ assert(0); return; }
698     case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
699     case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
700     case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
701     }
702 }
703 
LZ4_putPosition(const BYTE * p,void * tableBase,tableType_t tableType,const BYTE * srcBase)704 LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
705 {
706     U32 const h = LZ4_hashPosition(p, tableType);
707     LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
708 }
709 
710 /* LZ4_getIndexOnHash() :
711  * Index of match position registered in hash table.
712  * hash position must be calculated by using base+index, or dictBase+index.
713  * Assumption 1 : only valid if tableType == byU32 or byU16.
714  * Assumption 2 : h is presumed valid (within limits of hash table)
715  */
LZ4_getIndexOnHash(U32 h,const void * tableBase,tableType_t tableType)716 static U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)
717 {
718     LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2);
719     if (tableType == byU32) {
720         const U32* const hashTable = (const U32*) tableBase;
721         assert(h < (1U << (LZ4_MEMORY_USAGE-2)));
722         return hashTable[h];
723     }
724     if (tableType == byU16) {
725         const U16* const hashTable = (const U16*) tableBase;
726         assert(h < (1U << (LZ4_MEMORY_USAGE-1)));
727         return hashTable[h];
728     }
729     assert(0); return 0;  /* forbidden case */
730 }
731 
LZ4_getPositionOnHash(U32 h,const void * tableBase,tableType_t tableType,const BYTE * srcBase)732 static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase)
733 {
734     if (tableType == byPtr) { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }
735     if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; return hashTable[h] + srcBase; }
736     { const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; }   /* default, to ensure a return */
737 }
738 
739 LZ4_FORCE_INLINE const BYTE*
LZ4_getPosition(const BYTE * p,const void * tableBase,tableType_t tableType,const BYTE * srcBase)740 LZ4_getPosition(const BYTE* p,
741                 const void* tableBase, tableType_t tableType,
742                 const BYTE* srcBase)
743 {
744     U32 const h = LZ4_hashPosition(p, tableType);
745     return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
746 }
747 
748 LZ4_FORCE_INLINE void
LZ4_prepareTable(LZ4_stream_t_internal * const cctx,const int inputSize,const tableType_t tableType)749 LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
750            const int inputSize,
751            const tableType_t tableType) {
752     /* If compression failed during the previous step, then the context
753      * is marked as dirty, therefore, it has to be fully reset.
754      */
755     if (cctx->dirty) {
756         DEBUGLOG(5, "LZ4_prepareTable: Full reset for %p", cctx);
757         MEM_INIT(cctx, 0, sizeof(LZ4_stream_t_internal));
758         return;
759     }
760 
761     /* If the table hasn't been used, it's guaranteed to be zeroed out, and is
762      * therefore safe to use no matter what mode we're in. Otherwise, we figure
763      * out if it's safe to leave as is or whether it needs to be reset.
764      */
765     if (cctx->tableType != clearedTable) {
766         assert(inputSize >= 0);
767         if (cctx->tableType != tableType
768           || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)
769           || ((tableType == byU32) && cctx->currentOffset > 1 GB)
770           || tableType == byPtr
771           || inputSize >= 4 KB)
772         {
773             DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx);
774             MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
775             cctx->currentOffset = 0;
776             cctx->tableType = clearedTable;
777         } else {
778             DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)");
779         }
780     }
781 
782     /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back, is faster
783      * than compressing without a gap. However, compressing with
784      * currentOffset == 0 is faster still, so we preserve that case.
785      */
786     if (cctx->currentOffset != 0 && tableType == byU32) {
787         DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset");
788         cctx->currentOffset += 64 KB;
789     }
790 
791     /* Finally, clear history */
792     cctx->dictCtx = NULL;
793     cctx->dictionary = NULL;
794     cctx->dictSize = 0;
795 }
796 
797 /** LZ4_compress_generic() :
798     inlined, to ensure branches are decided at compilation time */
LZ4_compress_generic(LZ4_stream_t_internal * const cctx,const char * const source,char * const dest,const int inputSize,int * inputConsumed,const int maxOutputSize,const limitedOutput_directive outputDirective,const tableType_t tableType,const dict_directive dictDirective,const dictIssue_directive dictIssue,const int acceleration)799 LZ4_FORCE_INLINE int LZ4_compress_generic(
800                  LZ4_stream_t_internal* const cctx,
801                  const char* const source,
802                  char* const dest,
803                  const int inputSize,
804                  int *inputConsumed, /* only written when outputDirective == fillOutput */
805                  const int maxOutputSize,
806                  const limitedOutput_directive outputDirective,
807                  const tableType_t tableType,
808                  const dict_directive dictDirective,
809                  const dictIssue_directive dictIssue,
810                  const int acceleration)
811 {
812     int result;
813     const BYTE* ip = (const BYTE*) source;
814 
815     U32 const startIndex = cctx->currentOffset;
816     const BYTE* base = (const BYTE*) source - startIndex;
817     const BYTE* lowLimit;
818 
819     const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;
820     const BYTE* const dictionary =
821         dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;
822     const U32 dictSize =
823         dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;
824     const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0;   /* make indexes in dictCtx comparable with index in current context */
825 
826     int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);
827     U32 const prefixIdxLimit = startIndex - dictSize;   /* used when dictDirective == dictSmall */
828     const BYTE* const dictEnd = dictionary + dictSize;
829     const BYTE* anchor = (const BYTE*) source;
830     const BYTE* const iend = ip + inputSize;
831     const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;
832     const BYTE* const matchlimit = iend - LASTLITERALS;
833 
834     /* the dictCtx currentOffset is indexed on the start of the dictionary,
835      * while a dictionary in the current context precedes the currentOffset */
836     const BYTE* dictBase = (dictDirective == usingDictCtx) ?
837                             dictionary + dictSize - dictCtx->currentOffset :
838                             dictionary + dictSize - startIndex;
839 
840     BYTE* op = (BYTE*) dest;
841     BYTE* const olimit = op + maxOutputSize;
842 
843     U32 offset = 0;
844     U32 forwardH;
845 
846     DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, tableType=%u", inputSize, tableType);
847     /* If init conditions are not met, we don't have to mark stream
848      * as having dirty context, since no action was taken yet */
849     if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */
850     if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; }           /* Unsupported inputSize, too large (or negative) */
851     if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) { return 0; }  /* Size too large (not within 64K limit) */
852     if (tableType==byPtr) assert(dictDirective==noDict);      /* only supported use case with byPtr */
853     assert(acceleration >= 1);
854 
855     lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);
856 
857     /* Update context state */
858     if (dictDirective == usingDictCtx) {
859         /* Subsequent linked blocks can't use the dictionary. */
860         /* Instead, they use the block we just compressed. */
861         cctx->dictCtx = NULL;
862         cctx->dictSize = (U32)inputSize;
863     } else {
864         cctx->dictSize += (U32)inputSize;
865     }
866     cctx->currentOffset += (U32)inputSize;
867     cctx->tableType = (U16)tableType;
868 
869     if (inputSize<LZ4_minLength) goto _last_literals;        /* Input too small, no compression (all literals) */
870 
871     /* First Byte */
872     LZ4_putPosition(ip, cctx->hashTable, tableType, base);
873     ip++; forwardH = LZ4_hashPosition(ip, tableType);
874 
875     /* Main Loop */
876     for ( ; ; ) {
877         const BYTE* match;
878         BYTE* token;
879         const BYTE* filledIp;
880 
881         /* Find a match */
882         if (tableType == byPtr) {
883             const BYTE* forwardIp = ip;
884             int step = 1;
885             int searchMatchNb = acceleration << LZ4_skipTrigger;
886             do {
887                 U32 const h = forwardH;
888                 ip = forwardIp;
889                 forwardIp += step;
890                 step = (searchMatchNb++ >> LZ4_skipTrigger);
891 
892                 if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
893                 assert(ip < mflimitPlusOne);
894 
895                 match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
896                 forwardH = LZ4_hashPosition(forwardIp, tableType);
897                 LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
898 
899             } while ( (match+LZ4_DISTANCE_MAX < ip)
900                    || (LZ4_read32(match) != LZ4_read32(ip)) );
901 
902         } else {   /* byU32, byU16 */
903 
904             const BYTE* forwardIp = ip;
905             int step = 1;
906             int searchMatchNb = acceleration << LZ4_skipTrigger;
907             do {
908                 U32 const h = forwardH;
909                 U32 const current = (U32)(forwardIp - base);
910                 U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
911                 assert(matchIndex <= current);
912                 assert(forwardIp - base < (ptrdiff_t)(2 GB - 1));
913                 ip = forwardIp;
914                 forwardIp += step;
915                 step = (searchMatchNb++ >> LZ4_skipTrigger);
916 
917                 if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
918                 assert(ip < mflimitPlusOne);
919 
920                 if (dictDirective == usingDictCtx) {
921                     if (matchIndex < startIndex) {
922                         /* there was no match, try the dictionary */
923                         assert(tableType == byU32);
924                         matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
925                         match = dictBase + matchIndex;
926                         matchIndex += dictDelta;   /* make dictCtx index comparable with current context */
927                         lowLimit = dictionary;
928                     } else {
929                         match = base + matchIndex;
930                         lowLimit = (const BYTE*)source;
931                     }
932                 } else if (dictDirective==usingExtDict) {
933                     if (matchIndex < startIndex) {
934                         DEBUGLOG(7, "extDict candidate: matchIndex=%5u  <  startIndex=%5u", matchIndex, startIndex);
935                         assert(startIndex - matchIndex >= MINMATCH);
936                         match = dictBase + matchIndex;
937                         lowLimit = dictionary;
938                     } else {
939                         match = base + matchIndex;
940                         lowLimit = (const BYTE*)source;
941                     }
942                 } else {   /* single continuous memory segment */
943                     match = base + matchIndex;
944                 }
945                 forwardH = LZ4_hashPosition(forwardIp, tableType);
946                 LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
947 
948                 DEBUGLOG(7, "candidate at pos=%u  (offset=%u \n", matchIndex, current - matchIndex);
949                 if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; }    /* match outside of valid area */
950                 assert(matchIndex < current);
951                 if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX))
952                   && (matchIndex+LZ4_DISTANCE_MAX < current)) {
953                     continue;
954                 } /* too far */
955                 assert((current - matchIndex) <= LZ4_DISTANCE_MAX);  /* match now expected within distance */
956 
957                 if (LZ4_read32(match) == LZ4_read32(ip)) {
958                     if (maybe_extMem) offset = current - matchIndex;
959                     break;   /* match found */
960                 }
961 
962             } while(1);
963         }
964 
965         /* Catch up */
966         filledIp = ip;
967         while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
968 
969         /* Encode Literals */
970         {   unsigned const litLength = (unsigned)(ip - anchor);
971             token = op++;
972             if ((outputDirective == limitedOutput) &&  /* Check output buffer overflow */
973                 (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) {
974                 return 0;   /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
975             }
976             if ((outputDirective == fillOutput) &&
977                 (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) {
978                 op--;
979                 goto _last_literals;
980             }
981             if (litLength >= RUN_MASK) {
982                 int len = (int)(litLength - RUN_MASK);
983                 *token = (RUN_MASK<<ML_BITS);
984                 for(; len >= 255 ; len-=255) *op++ = 255;
985                 *op++ = (BYTE)len;
986             }
987             else *token = (BYTE)(litLength<<ML_BITS);
988 
989             /* Copy Literals */
990             LZ4_wildCopy8(op, anchor, op+litLength);
991             op+=litLength;
992             DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
993                         (int)(anchor-(const BYTE*)source), litLength, (int)(ip-(const BYTE*)source));
994         }
995 
996 _next_match:
997         /* at this stage, the following variables must be correctly set :
998          * - ip : at start of LZ operation
999          * - match : at start of previous pattern occurence; can be within current prefix, or within extDict
1000          * - offset : if maybe_ext_memSegment==1 (constant)
1001          * - lowLimit : must be == dictionary to mean "match is within extDict"; must be == source otherwise
1002          * - token and *token : position to write 4-bits for match length; higher 4-bits for literal length supposed already written
1003          */
1004 
1005         if ((outputDirective == fillOutput) &&
1006             (op + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit)) {
1007             /* the match was too close to the end, rewind and go to last literals */
1008             op = token;
1009             goto _last_literals;
1010         }
1011 
1012         /* Encode Offset */
1013         if (maybe_extMem) {   /* static test */
1014             DEBUGLOG(6, "             with offset=%u  (ext if > %i)", offset, (int)(ip - (const BYTE*)source));
1015             assert(offset <= LZ4_DISTANCE_MAX && offset > 0);
1016             LZ4_writeLE16(op, (U16)offset); op+=2;
1017         } else  {
1018             DEBUGLOG(6, "             with offset=%u  (same segment)", (U32)(ip - match));
1019             assert(ip-match <= LZ4_DISTANCE_MAX);
1020             LZ4_writeLE16(op, (U16)(ip - match)); op+=2;
1021         }
1022 
1023         /* Encode MatchLength */
1024         {   unsigned matchCode;
1025 
1026             if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx)
1027               && (lowLimit==dictionary) /* match within extDict */ ) {
1028                 const BYTE* limit = ip + (dictEnd-match);
1029                 assert(dictEnd > match);
1030                 if (limit > matchlimit) limit = matchlimit;
1031                 matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
1032                 ip += (size_t)matchCode + MINMATCH;
1033                 if (ip==limit) {
1034                     unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit);
1035                     matchCode += more;
1036                     ip += more;
1037                 }
1038                 DEBUGLOG(6, "             with matchLength=%u starting in extDict", matchCode+MINMATCH);
1039             } else {
1040                 matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
1041                 ip += (size_t)matchCode + MINMATCH;
1042                 DEBUGLOG(6, "             with matchLength=%u", matchCode+MINMATCH);
1043             }
1044 
1045             if ((outputDirective) &&    /* Check output buffer overflow */
1046                 (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) {
1047                 if (outputDirective == fillOutput) {
1048                     /* Match description too long : reduce it */
1049                     U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255;
1050                     ip -= matchCode - newMatchCode;
1051                     assert(newMatchCode < matchCode);
1052                     matchCode = newMatchCode;
1053                     if (unlikely(ip <= filledIp)) {
1054                         /* We have already filled up to filledIp so if ip ends up less than filledIp
1055                          * we have positions in the hash table beyond the current position. This is
1056                          * a problem if we reuse the hash table. So we have to remove these positions
1057                          * from the hash table.
1058                          */
1059                         const BYTE* ptr;
1060                         DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip));
1061                         for (ptr = ip; ptr <= filledIp; ++ptr) {
1062                             U32 const h = LZ4_hashPosition(ptr, tableType);
1063                             LZ4_clearHash(h, cctx->hashTable, tableType);
1064                         }
1065                     }
1066                 } else {
1067                     assert(outputDirective == limitedOutput);
1068                     return 0;   /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
1069                 }
1070             }
1071             if (matchCode >= ML_MASK) {
1072                 *token += ML_MASK;
1073                 matchCode -= ML_MASK;
1074                 LZ4_write32(op, 0xFFFFFFFF);
1075                 while (matchCode >= 4*255) {
1076                     op+=4;
1077                     LZ4_write32(op, 0xFFFFFFFF);
1078                     matchCode -= 4*255;
1079                 }
1080                 op += matchCode / 255;
1081                 *op++ = (BYTE)(matchCode % 255);
1082             } else
1083                 *token += (BYTE)(matchCode);
1084         }
1085         /* Ensure we have enough space for the last literals. */
1086         assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit));
1087 
1088         anchor = ip;
1089 
1090         /* Test end of chunk */
1091         if (ip >= mflimitPlusOne) break;
1092 
1093         /* Fill table */
1094         LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
1095 
1096         /* Test next position */
1097         if (tableType == byPtr) {
1098 
1099             match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
1100             LZ4_putPosition(ip, cctx->hashTable, tableType, base);
1101             if ( (match+LZ4_DISTANCE_MAX >= ip)
1102               && (LZ4_read32(match) == LZ4_read32(ip)) )
1103             { token=op++; *token=0; goto _next_match; }
1104 
1105         } else {   /* byU32, byU16 */
1106 
1107             U32 const h = LZ4_hashPosition(ip, tableType);
1108             U32 const current = (U32)(ip-base);
1109             U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
1110             assert(matchIndex < current);
1111             if (dictDirective == usingDictCtx) {
1112                 if (matchIndex < startIndex) {
1113                     /* there was no match, try the dictionary */
1114                     matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
1115                     match = dictBase + matchIndex;
1116                     lowLimit = dictionary;   /* required for match length counter */
1117                     matchIndex += dictDelta;
1118                 } else {
1119                     match = base + matchIndex;
1120                     lowLimit = (const BYTE*)source;  /* required for match length counter */
1121                 }
1122             } else if (dictDirective==usingExtDict) {
1123                 if (matchIndex < startIndex) {
1124                     match = dictBase + matchIndex;
1125                     lowLimit = dictionary;   /* required for match length counter */
1126                 } else {
1127                     match = base + matchIndex;
1128                     lowLimit = (const BYTE*)source;   /* required for match length counter */
1129                 }
1130             } else {   /* single memory segment */
1131                 match = base + matchIndex;
1132             }
1133             LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
1134             assert(matchIndex < current);
1135             if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1)
1136               && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current))
1137               && (LZ4_read32(match) == LZ4_read32(ip)) ) {
1138                 token=op++;
1139                 *token=0;
1140                 if (maybe_extMem) offset = current - matchIndex;
1141                 DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
1142                             (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source));
1143                 goto _next_match;
1144             }
1145         }
1146 
1147         /* Prepare next loop */
1148         forwardH = LZ4_hashPosition(++ip, tableType);
1149 
1150     }
1151 
1152 _last_literals:
1153     /* Encode Last Literals */
1154     {   size_t lastRun = (size_t)(iend - anchor);
1155         if ( (outputDirective) &&  /* Check output buffer overflow */
1156             (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) {
1157             if (outputDirective == fillOutput) {
1158                 /* adapt lastRun to fill 'dst' */
1159                 assert(olimit >= op);
1160                 lastRun  = (size_t)(olimit-op) - 1;
1161                 lastRun -= (lastRun+240)/255;
1162             } else {
1163                 assert(outputDirective == limitedOutput);
1164                 return 0;   /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
1165             }
1166         }
1167         if (lastRun >= RUN_MASK) {
1168             size_t accumulator = lastRun - RUN_MASK;
1169             *op++ = RUN_MASK << ML_BITS;
1170             for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
1171             *op++ = (BYTE) accumulator;
1172         } else {
1173             *op++ = (BYTE)(lastRun<<ML_BITS);
1174         }
1175         memcpy(op, anchor, lastRun);
1176         ip = anchor + lastRun;
1177         op += lastRun;
1178     }
1179 
1180     if (outputDirective == fillOutput) {
1181         *inputConsumed = (int) (((const char*)ip)-source);
1182     }
1183     DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, (int)(((char*)op) - dest));
1184     result = (int)(((char*)op) - dest);
1185     assert(result > 0);
1186     return result;
1187 }
1188 
1189 
LZ4_compress_fast_extState(void * state,const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)1190 int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
1191 {
1192     LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse;
1193     assert(ctx != NULL);
1194     if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
1195     if (maxOutputSize >= LZ4_compressBound(inputSize)) {
1196         if (inputSize < LZ4_64Klimit) {
1197             return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
1198         } else {
1199             const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1200             return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
1201         }
1202     } else {
1203         if (inputSize < LZ4_64Klimit) {
1204             return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
1205         } else {
1206             const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1207             return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration);
1208         }
1209     }
1210 }
1211 
1212 /**
1213  * LZ4_compress_fast_extState_fastReset() :
1214  * A variant of LZ4_compress_fast_extState().
1215  *
1216  * Using this variant avoids an expensive initialization step. It is only safe
1217  * to call if the state buffer is known to be correctly initialized already
1218  * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of
1219  * "correctly initialized").
1220  */
LZ4_compress_fast_extState_fastReset(void * state,const char * src,char * dst,int srcSize,int dstCapacity,int acceleration)1221 int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
1222 {
1223     LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
1224     if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
1225 
1226     if (dstCapacity >= LZ4_compressBound(srcSize)) {
1227         if (srcSize < LZ4_64Klimit) {
1228             const tableType_t tableType = byU16;
1229             LZ4_prepareTable(ctx, srcSize, tableType);
1230             if (ctx->currentOffset) {
1231                 return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration);
1232             } else {
1233                 return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
1234             }
1235         } else {
1236             const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1237             LZ4_prepareTable(ctx, srcSize, tableType);
1238             return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
1239         }
1240     } else {
1241         if (srcSize < LZ4_64Klimit) {
1242             const tableType_t tableType = byU16;
1243             LZ4_prepareTable(ctx, srcSize, tableType);
1244             if (ctx->currentOffset) {
1245                 return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration);
1246             } else {
1247                 return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
1248             }
1249         } else {
1250             const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1251             LZ4_prepareTable(ctx, srcSize, tableType);
1252             return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
1253         }
1254     }
1255 }
1256 
1257 
LZ4_compress_fast(const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)1258 int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
1259 {
1260     int result;
1261 #if (LZ4_HEAPMODE)
1262     LZ4_stream_t* ctxPtr = ALLOC(sizeof(LZ4_stream_t));   /* malloc-calloc always properly aligned */
1263     if (ctxPtr == NULL) return 0;
1264 #else
1265     LZ4_stream_t ctx;
1266     LZ4_stream_t* const ctxPtr = &ctx;
1267 #endif
1268     result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
1269 
1270 #if (LZ4_HEAPMODE)
1271     FREEMEM(ctxPtr);
1272 #endif
1273     return result;
1274 }
1275 
1276 
LZ4_compress_default(const char * src,char * dst,int srcSize,int maxOutputSize)1277 int LZ4_compress_default(const char* src, char* dst, int srcSize, int maxOutputSize)
1278 {
1279     return LZ4_compress_fast(src, dst, srcSize, maxOutputSize, 1);
1280 }
1281 
1282 
1283 /* hidden debug function */
1284 /* strangely enough, gcc generates faster code when this function is uncommented, even if unused */
LZ4_compress_fast_force(const char * src,char * dst,int srcSize,int dstCapacity,int acceleration)1285 int LZ4_compress_fast_force(const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
1286 {
1287     LZ4_stream_t ctx;
1288     LZ4_initStream(&ctx, sizeof(ctx));
1289 
1290     if (srcSize < LZ4_64Klimit) {
1291         return LZ4_compress_generic(&ctx.internal_donotuse, src, dst, srcSize, NULL, dstCapacity, limitedOutput, byU16,    noDict, noDictIssue, acceleration);
1292     } else {
1293         tableType_t const addrMode = (sizeof(void*) > 4) ? byU32 : byPtr;
1294         return LZ4_compress_generic(&ctx.internal_donotuse, src, dst, srcSize, NULL, dstCapacity, limitedOutput, addrMode, noDict, noDictIssue, acceleration);
1295     }
1296 }
1297 
1298 
1299 /* Note!: This function leaves the stream in an unclean/broken state!
1300  * It is not safe to subsequently use the same state with a _fastReset() or
1301  * _continue() call without resetting it. */
LZ4_compress_destSize_extState(LZ4_stream_t * state,const char * src,char * dst,int * srcSizePtr,int targetDstSize)1302 static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
1303 {
1304     void* const s = LZ4_initStream(state, sizeof (*state));
1305     assert(s != NULL); (void)s;
1306 
1307     if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) {  /* compression success is guaranteed */
1308         return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
1309     } else {
1310         if (*srcSizePtr < LZ4_64Klimit) {
1311             return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1);
1312         } else {
1313             tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1314             return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, 1);
1315     }   }
1316 }
1317 
1318 
LZ4_compress_destSize(const char * src,char * dst,int * srcSizePtr,int targetDstSize)1319 int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
1320 {
1321 #if (LZ4_HEAPMODE)
1322     LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));   /* malloc-calloc always properly aligned */
1323     if (ctx == NULL) return 0;
1324 #else
1325     LZ4_stream_t ctxBody;
1326     LZ4_stream_t* ctx = &ctxBody;
1327 #endif
1328 
1329     int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
1330 
1331 #if (LZ4_HEAPMODE)
1332     FREEMEM(ctx);
1333 #endif
1334     return result;
1335 }
1336 
1337 
1338 
1339 /*-******************************
1340 *  Streaming functions
1341 ********************************/
1342 
LZ4_createStream(void)1343 LZ4_stream_t* LZ4_createStream(void)
1344 {
1345     LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));
1346     LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal));    /* A compilation error here means LZ4_STREAMSIZE is not large enough */
1347     DEBUGLOG(4, "LZ4_createStream %p", lz4s);
1348     if (lz4s == NULL) return NULL;
1349     LZ4_initStream(lz4s, sizeof(*lz4s));
1350     return lz4s;
1351 }
1352 
1353 #ifndef _MSC_VER  /* for some reason, Visual fails the aligment test on 32-bit x86 :
1354                      it reports an aligment of 8-bytes,
1355                      while actually aligning LZ4_stream_t on 4 bytes. */
LZ4_stream_t_alignment(void)1356 static size_t LZ4_stream_t_alignment(void)
1357 {
1358     struct { char c; LZ4_stream_t t; } t_a;
1359     return sizeof(t_a) - sizeof(t_a.t);
1360 }
1361 #endif
1362 
LZ4_initStream(void * buffer,size_t size)1363 LZ4_stream_t* LZ4_initStream (void* buffer, size_t size)
1364 {
1365     DEBUGLOG(5, "LZ4_initStream");
1366     if (buffer == NULL) { return NULL; }
1367     if (size < sizeof(LZ4_stream_t)) { return NULL; }
1368 #ifndef _MSC_VER  /* for some reason, Visual fails the aligment test on 32-bit x86 :
1369                      it reports an aligment of 8-bytes,
1370                      while actually aligning LZ4_stream_t on 4 bytes. */
1371     if (((size_t)buffer) & (LZ4_stream_t_alignment() - 1)) { return NULL; } /* alignment check */
1372 #endif
1373     MEM_INIT(buffer, 0, sizeof(LZ4_stream_t));
1374     return (LZ4_stream_t*)buffer;
1375 }
1376 
1377 /* resetStream is now deprecated,
1378  * prefer initStream() which is more general */
LZ4_resetStream(LZ4_stream_t * LZ4_stream)1379 void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
1380 {
1381     DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
1382     MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t));
1383 }
1384 
LZ4_resetStream_fast(LZ4_stream_t * ctx)1385 void LZ4_resetStream_fast(LZ4_stream_t* ctx) {
1386     LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);
1387 }
1388 
LZ4_freeStream(LZ4_stream_t * LZ4_stream)1389 int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
1390 {
1391     if (!LZ4_stream) return 0;   /* support free on NULL */
1392     DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream);
1393     FREEMEM(LZ4_stream);
1394     return (0);
1395 }
1396 
1397 
1398 #define HASH_UNIT sizeof(reg_t)
LZ4_loadDict(LZ4_stream_t * LZ4_dict,const char * dictionary,int dictSize)1399 int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
1400 {
1401     LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
1402     const tableType_t tableType = byU32;
1403     const BYTE* p = (const BYTE*)dictionary;
1404     const BYTE* const dictEnd = p + dictSize;
1405     const BYTE* base;
1406 
1407     DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict);
1408 
1409     /* It's necessary to reset the context,
1410      * and not just continue it with prepareTable()
1411      * to avoid any risk of generating overflowing matchIndex
1412      * when compressing using this dictionary */
1413     LZ4_resetStream(LZ4_dict);
1414 
1415     /* We always increment the offset by 64 KB, since, if the dict is longer,
1416      * we truncate it to the last 64k, and if it's shorter, we still want to
1417      * advance by a whole window length so we can provide the guarantee that
1418      * there are only valid offsets in the window, which allows an optimization
1419      * in LZ4_compress_fast_continue() where it uses noDictIssue even when the
1420      * dictionary isn't a full 64k. */
1421     dict->currentOffset += 64 KB;
1422 
1423     if (dictSize < (int)HASH_UNIT) {
1424         return 0;
1425     }
1426 
1427     if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
1428     base = dictEnd - dict->currentOffset;
1429     dict->dictionary = p;
1430     dict->dictSize = (U32)(dictEnd - p);
1431     dict->tableType = tableType;
1432 
1433     while (p <= dictEnd-HASH_UNIT) {
1434         LZ4_putPosition(p, dict->hashTable, tableType, base);
1435         p+=3;
1436     }
1437 
1438     return (int)dict->dictSize;
1439 }
1440 
LZ4_attach_dictionary(LZ4_stream_t * workingStream,const LZ4_stream_t * dictionaryStream)1441 void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream) {
1442     const LZ4_stream_t_internal* dictCtx = dictionaryStream == NULL ? NULL :
1443         &(dictionaryStream->internal_donotuse);
1444 
1445     DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)",
1446              workingStream, dictionaryStream,
1447              dictCtx != NULL ? dictCtx->dictSize : 0);
1448 
1449     /* Calling LZ4_resetStream_fast() here makes sure that changes will not be
1450      * erased by subsequent calls to LZ4_resetStream_fast() in case stream was
1451      * marked as having dirty context, e.g. requiring full reset.
1452      */
1453     LZ4_resetStream_fast(workingStream);
1454 
1455     if (dictCtx != NULL) {
1456         /* If the current offset is zero, we will never look in the
1457          * external dictionary context, since there is no value a table
1458          * entry can take that indicate a miss. In that case, we need
1459          * to bump the offset to something non-zero.
1460          */
1461         if (workingStream->internal_donotuse.currentOffset == 0) {
1462             workingStream->internal_donotuse.currentOffset = 64 KB;
1463         }
1464 
1465         /* Don't actually attach an empty dictionary.
1466          */
1467         if (dictCtx->dictSize == 0) {
1468             dictCtx = NULL;
1469         }
1470     }
1471     workingStream->internal_donotuse.dictCtx = dictCtx;
1472 }
1473 
1474 
LZ4_renormDictT(LZ4_stream_t_internal * LZ4_dict,int nextSize)1475 static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize)
1476 {
1477     assert(nextSize >= 0);
1478     if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) {   /* potential ptrdiff_t overflow (32-bits mode) */
1479         /* rescale hash table */
1480         U32 const delta = LZ4_dict->currentOffset - 64 KB;
1481         const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
1482         int i;
1483         DEBUGLOG(4, "LZ4_renormDictT");
1484         for (i=0; i<LZ4_HASH_SIZE_U32; i++) {
1485             if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
1486             else LZ4_dict->hashTable[i] -= delta;
1487         }
1488         LZ4_dict->currentOffset = 64 KB;
1489         if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
1490         LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
1491     }
1492 }
1493 
1494 
LZ4_compress_fast_continue(LZ4_stream_t * LZ4_stream,const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)1495 int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
1496                                 const char* source, char* dest,
1497                                 int inputSize, int maxOutputSize,
1498                                 int acceleration)
1499 {
1500     const tableType_t tableType = byU32;
1501     LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse;
1502     const BYTE* dictEnd = streamPtr->dictionary + streamPtr->dictSize;
1503 
1504     DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i)", inputSize);
1505 
1506     if (streamPtr->dirty) { return 0; } /* Uninitialized structure detected */
1507     LZ4_renormDictT(streamPtr, inputSize);   /* avoid index overflow */
1508     if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
1509 
1510     /* invalidate tiny dictionaries */
1511     if ( (streamPtr->dictSize-1 < 4-1)   /* intentional underflow */
1512       && (dictEnd != (const BYTE*)source) ) {
1513         DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, streamPtr->dictionary);
1514         streamPtr->dictSize = 0;
1515         streamPtr->dictionary = (const BYTE*)source;
1516         dictEnd = (const BYTE*)source;
1517     }
1518 
1519     /* Check overlapping input/dictionary space */
1520     {   const BYTE* sourceEnd = (const BYTE*) source + inputSize;
1521         if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) {
1522             streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
1523             if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
1524             if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
1525             streamPtr->dictionary = dictEnd - streamPtr->dictSize;
1526         }
1527     }
1528 
1529     /* prefix mode : source data follows dictionary */
1530     if (dictEnd == (const BYTE*)source) {
1531         if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
1532             return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration);
1533         else
1534             return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration);
1535     }
1536 
1537     /* external dictionary mode */
1538     {   int result;
1539         if (streamPtr->dictCtx) {
1540             /* We depend here on the fact that dictCtx'es (produced by
1541              * LZ4_loadDict) guarantee that their tables contain no references
1542              * to offsets between dictCtx->currentOffset - 64 KB and
1543              * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe
1544              * to use noDictIssue even when the dict isn't a full 64 KB.
1545              */
1546             if (inputSize > 4 KB) {
1547                 /* For compressing large blobs, it is faster to pay the setup
1548                  * cost to copy the dictionary's tables into the active context,
1549                  * so that the compression loop is only looking into one table.
1550                  */
1551                 memcpy(streamPtr, streamPtr->dictCtx, sizeof(LZ4_stream_t));
1552                 result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
1553             } else {
1554                 result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);
1555             }
1556         } else {
1557             if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
1558                 result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration);
1559             } else {
1560                 result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
1561             }
1562         }
1563         streamPtr->dictionary = (const BYTE*)source;
1564         streamPtr->dictSize = (U32)inputSize;
1565         return result;
1566     }
1567 }
1568 
1569 
1570 /* Hidden debug function, to force-test external dictionary mode */
LZ4_compress_forceExtDict(LZ4_stream_t * LZ4_dict,const char * source,char * dest,int srcSize)1571 int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)
1572 {
1573     LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
1574     int result;
1575 
1576     LZ4_renormDictT(streamPtr, srcSize);
1577 
1578     if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
1579         result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1);
1580     } else {
1581         result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
1582     }
1583 
1584     streamPtr->dictionary = (const BYTE*)source;
1585     streamPtr->dictSize = (U32)srcSize;
1586 
1587     return result;
1588 }
1589 
1590 
1591 /*! LZ4_saveDict() :
1592  *  If previously compressed data block is not guaranteed to remain available at its memory location,
1593  *  save it into a safer place (char* safeBuffer).
1594  *  Note : you don't need to call LZ4_loadDict() afterwards,
1595  *         dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue().
1596  *  Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.
1597  */
LZ4_saveDict(LZ4_stream_t * LZ4_dict,char * safeBuffer,int dictSize)1598 int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
1599 {
1600     LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
1601     const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
1602 
1603     if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */
1604     if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }
1605 
1606     memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
1607 
1608     dict->dictionary = (const BYTE*)safeBuffer;
1609     dict->dictSize = (U32)dictSize;
1610 
1611     return dictSize;
1612 }
1613 
1614 
1615 
1616 /*-*******************************
1617  *  Decompression functions
1618  ********************************/
1619 
1620 typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
1621 typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
1622 
1623 #undef MIN
1624 #define MIN(a,b)    ( (a) < (b) ? (a) : (b) )
1625 
1626 /* Read the variable-length literal or match length.
1627  *
1628  * ip - pointer to use as input.
1629  * lencheck - end ip.  Return an error if ip advances >= lencheck.
1630  * loop_check - check ip >= lencheck in body of loop.  Returns loop_error if so.
1631  * initial_check - check ip >= lencheck before start of loop.  Returns initial_error if so.
1632  * error (output) - error code.  Should be set to 0 before call.
1633  */
1634 typedef enum { loop_error = -2, initial_error = -1, ok = 0 } variable_length_error;
1635 LZ4_FORCE_INLINE unsigned
read_variable_length(const BYTE ** ip,const BYTE * lencheck,int loop_check,int initial_check,variable_length_error * error)1636 read_variable_length(const BYTE**ip, const BYTE* lencheck, int loop_check, int initial_check, variable_length_error* error)
1637 {
1638   unsigned length = 0;
1639   unsigned s;
1640   if (initial_check && unlikely((*ip) >= lencheck)) {    /* overflow detection */
1641     *error = initial_error;
1642     return length;
1643   }
1644   do {
1645     s = **ip;
1646     (*ip)++;
1647     length += s;
1648     if (loop_check && unlikely((*ip) >= lencheck)) {    /* overflow detection */
1649       *error = loop_error;
1650       return length;
1651     }
1652   } while (s==255);
1653 
1654   return length;
1655 }
1656 
1657 /*! LZ4_decompress_generic() :
1658  *  This generic decompression function covers all use cases.
1659  *  It shall be instantiated several times, using different sets of directives.
1660  *  Note that it is important for performance that this function really get inlined,
1661  *  in order to remove useless branches during compilation optimization.
1662  */
1663 LZ4_FORCE_INLINE int
LZ4_decompress_generic(const char * const src,char * const dst,int srcSize,int outputSize,endCondition_directive endOnInput,earlyEnd_directive partialDecoding,dict_directive dict,const BYTE * const lowPrefix,const BYTE * const dictStart,const size_t dictSize)1664 LZ4_decompress_generic(
1665                  const char* const src,
1666                  char* const dst,
1667                  int srcSize,
1668                  int outputSize,         /* If endOnInput==endOnInputSize, this value is `dstCapacity` */
1669 
1670                  endCondition_directive endOnInput,   /* endOnOutputSize, endOnInputSize */
1671                  earlyEnd_directive partialDecoding,  /* full, partial */
1672                  dict_directive dict,                 /* noDict, withPrefix64k, usingExtDict */
1673                  const BYTE* const lowPrefix,  /* always <= dst, == dst when no prefix */
1674                  const BYTE* const dictStart,  /* only if dict==usingExtDict */
1675                  const size_t dictSize         /* note : = 0 if noDict */
1676                  )
1677 {
1678     if (src == NULL) { return -1; }
1679 
1680     {   const BYTE* ip = (const BYTE*) src;
1681         const BYTE* const iend = ip + srcSize;
1682 
1683         BYTE* op = (BYTE*) dst;
1684         BYTE* const oend = op + outputSize;
1685         BYTE* cpy;
1686 
1687         const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize;
1688 
1689         const int safeDecode = (endOnInput==endOnInputSize);
1690         const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
1691 
1692 
1693         /* Set up the "end" pointers for the shortcut. */
1694         const BYTE* const shortiend = iend - (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
1695         const BYTE* const shortoend = oend - (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
1696 
1697         const BYTE* match;
1698         size_t offset;
1699         unsigned token;
1700         size_t length;
1701 
1702 
1703         DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)", srcSize, outputSize);
1704 
1705         /* Special cases */
1706         assert(lowPrefix <= op);
1707         if ((endOnInput) && (unlikely(outputSize==0))) {
1708             /* Empty output buffer */
1709             if (partialDecoding) return 0;
1710             return ((srcSize==1) && (*ip==0)) ? 0 : -1;
1711         }
1712         if ((!endOnInput) && (unlikely(outputSize==0))) { return (*ip==0 ? 1 : -1); }
1713         if ((endOnInput) && unlikely(srcSize==0)) { return -1; }
1714 
1715 	/* Currently the fast loop shows a regression on qualcomm arm chips. */
1716 #if LZ4_FAST_DEC_LOOP
1717         if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {
1718             DEBUGLOG(6, "skip fast decode loop");
1719             goto safe_decode;
1720         }
1721 
1722         /* Fast loop : decode sequences as long as output < iend-FASTLOOP_SAFE_DISTANCE */
1723         while (1) {
1724             /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */
1725             assert(oend - op >= FASTLOOP_SAFE_DISTANCE);
1726             if (endOnInput) { assert(ip < iend); }
1727             token = *ip++;
1728             length = token >> ML_BITS;  /* literal length */
1729 
1730             assert(!endOnInput || ip <= iend); /* ip < iend before the increment */
1731 
1732             /* decode literal length */
1733             if (length == RUN_MASK) {
1734                 variable_length_error error = ok;
1735                 length += read_variable_length(&ip, iend-RUN_MASK, endOnInput, endOnInput, &error);
1736                 if (error == initial_error) { goto _output_error; }
1737                 if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
1738                 if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
1739 
1740                 /* copy literals */
1741                 cpy = op+length;
1742                 LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
1743                 if (endOnInput) {  /* LZ4_decompress_safe() */
1744                     if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }
1745                     LZ4_wildCopy32(op, ip, cpy);
1746                 } else {   /* LZ4_decompress_fast() */
1747                     if (cpy>oend-8) { goto safe_literal_copy; }
1748                     LZ4_wildCopy8(op, ip, cpy); /* LZ4_decompress_fast() cannot copy more than 8 bytes at a time :
1749                                                  * it doesn't know input length, and only relies on end-of-block properties */
1750                 }
1751                 ip += length; op = cpy;
1752             } else {
1753                 cpy = op+length;
1754                 if (endOnInput) {  /* LZ4_decompress_safe() */
1755                     DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length);
1756                     /* We don't need to check oend, since we check it once for each loop below */
1757                     if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; }
1758                     /* Literals can only be 14, but hope compilers optimize if we copy by a register size */
1759                     memcpy(op, ip, 16);
1760                 } else {  /* LZ4_decompress_fast() */
1761                     /* LZ4_decompress_fast() cannot copy more than 8 bytes at a time :
1762                      * it doesn't know input length, and relies on end-of-block properties */
1763                     memcpy(op, ip, 8);
1764                     if (length > 8) { memcpy(op+8, ip+8, 8); }
1765                 }
1766                 ip += length; op = cpy;
1767             }
1768 
1769             /* get offset */
1770             offset = LZ4_readLE16(ip); ip+=2;
1771             match = op - offset;
1772             assert(match <= op);
1773 
1774             /* get matchlength */
1775             length = token & ML_MASK;
1776 
1777             if (length == ML_MASK) {
1778               variable_length_error error = ok;
1779               if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
1780               length += read_variable_length(&ip, iend - LASTLITERALS + 1, endOnInput, 0, &error);
1781               if (error != ok) { goto _output_error; }
1782                 if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */
1783                 length += MINMATCH;
1784                 if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
1785                     goto safe_match_copy;
1786                 }
1787             } else {
1788                 length += MINMATCH;
1789                 if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
1790                     goto safe_match_copy;
1791                 }
1792 
1793                 /* Fastpath check: Avoids a branch in LZ4_wildCopy32 if true */
1794                 if ((dict == withPrefix64k) || (match >= lowPrefix)) {
1795                     if (offset >= 8) {
1796                         assert(match >= lowPrefix);
1797                         assert(match <= op);
1798                         assert(op + 18 <= oend);
1799 
1800                         memcpy(op, match, 8);
1801                         memcpy(op+8, match+8, 8);
1802                         memcpy(op+16, match+16, 2);
1803                         op += length;
1804                         continue;
1805             }   }   }
1806 
1807             if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
1808             /* match starting within external dictionary */
1809             if ((dict==usingExtDict) && (match < lowPrefix)) {
1810                 if (unlikely(op+length > oend-LASTLITERALS)) {
1811                     if (partialDecoding) {
1812                         length = MIN(length, (size_t)(oend-op));  /* reach end of buffer */
1813                     } else {
1814                         goto _output_error;  /* end-of-block condition violated */
1815                 }   }
1816 
1817                 if (length <= (size_t)(lowPrefix-match)) {
1818                     /* match fits entirely within external dictionary : just copy */
1819                     memmove(op, dictEnd - (lowPrefix-match), length);
1820                     op += length;
1821                 } else {
1822                     /* match stretches into both external dictionary and current block */
1823                     size_t const copySize = (size_t)(lowPrefix - match);
1824                     size_t const restSize = length - copySize;
1825                     memcpy(op, dictEnd - copySize, copySize);
1826                     op += copySize;
1827                     if (restSize > (size_t)(op - lowPrefix)) {  /* overlap copy */
1828                         BYTE* const endOfMatch = op + restSize;
1829                         const BYTE* copyFrom = lowPrefix;
1830                         while (op < endOfMatch) { *op++ = *copyFrom++; }
1831                     } else {
1832                         memcpy(op, lowPrefix, restSize);
1833                         op += restSize;
1834                 }   }
1835                 continue;
1836             }
1837 
1838             /* copy match within block */
1839             cpy = op + length;
1840 
1841             assert((op <= oend) && (oend-op >= 32));
1842             if (unlikely(offset<16)) {
1843                 LZ4_memcpy_using_offset(op, match, cpy, offset);
1844             } else {
1845                 LZ4_wildCopy32(op, match, cpy);
1846             }
1847 
1848             op = cpy;   /* wildcopy correction */
1849         }
1850     safe_decode:
1851 #endif
1852 
1853         /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */
1854         while (1) {
1855             token = *ip++;
1856             length = token >> ML_BITS;  /* literal length */
1857 
1858             assert(!endOnInput || ip <= iend); /* ip < iend before the increment */
1859 
1860             /* A two-stage shortcut for the most common case:
1861              * 1) If the literal length is 0..14, and there is enough space,
1862              * enter the shortcut and copy 16 bytes on behalf of the literals
1863              * (in the fast mode, only 8 bytes can be safely copied this way).
1864              * 2) Further if the match length is 4..18, copy 18 bytes in a similar
1865              * manner; but we ensure that there's enough space in the output for
1866              * those 18 bytes earlier, upon entering the shortcut (in other words,
1867              * there is a combined check for both stages).
1868              */
1869             if ( (endOnInput ? length != RUN_MASK : length <= 8)
1870                 /* strictly "less than" on input, to re-enter the loop with at least one byte */
1871               && likely((endOnInput ? ip < shortiend : 1) & (op <= shortoend)) ) {
1872                 /* Copy the literals */
1873                 memcpy(op, ip, endOnInput ? 16 : 8);
1874                 op += length; ip += length;
1875 
1876                 /* The second stage: prepare for match copying, decode full info.
1877                  * If it doesn't work out, the info won't be wasted. */
1878                 length = token & ML_MASK; /* match length */
1879                 offset = LZ4_readLE16(ip); ip += 2;
1880                 match = op - offset;
1881                 assert(match <= op); /* check overflow */
1882 
1883                 /* Do not deal with overlapping matches. */
1884                 if ( (length != ML_MASK)
1885                   && (offset >= 8)
1886                   && (dict==withPrefix64k || match >= lowPrefix) ) {
1887                     /* Copy the match. */
1888                     memcpy(op + 0, match + 0, 8);
1889                     memcpy(op + 8, match + 8, 8);
1890                     memcpy(op +16, match +16, 2);
1891                     op += length + MINMATCH;
1892                     /* Both stages worked, load the next token. */
1893                     continue;
1894                 }
1895 
1896                 /* The second stage didn't work out, but the info is ready.
1897                  * Propel it right to the point of match copying. */
1898                 goto _copy_match;
1899             }
1900 
1901             /* decode literal length */
1902             if (length == RUN_MASK) {
1903                 variable_length_error error = ok;
1904                 length += read_variable_length(&ip, iend-RUN_MASK, endOnInput, endOnInput, &error);
1905                 if (error == initial_error) { goto _output_error; }
1906                 if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
1907                 if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
1908             }
1909 
1910             /* copy literals */
1911             cpy = op+length;
1912 #if LZ4_FAST_DEC_LOOP
1913         safe_literal_copy:
1914 #endif
1915             LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
1916             if ( ((endOnInput) && ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) )
1917               || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) )
1918             {
1919                 /* We've either hit the input parsing restriction or the output parsing restriction.
1920                  * If we've hit the input parsing condition then this must be the last sequence.
1921                  * If we've hit the output parsing condition then we are either using partialDecoding
1922                  * or we've hit the output parsing condition.
1923                  */
1924                 if (partialDecoding) {
1925                     /* Since we are partial decoding we may be in this block because of the output parsing
1926                      * restriction, which is not valid since the output buffer is allowed to be undersized.
1927                      */
1928                     assert(endOnInput);
1929                     /* If we're in this block because of the input parsing condition, then we must be on the
1930                      * last sequence (or invalid), so we must check that we exactly consume the input.
1931                      */
1932                     if ((ip+length>iend-(2+1+LASTLITERALS)) && (ip+length != iend)) { goto _output_error; }
1933                     assert(ip+length <= iend);
1934                     /* We are finishing in the middle of a literals segment.
1935                      * Break after the copy.
1936                      */
1937                     if (cpy > oend) {
1938                         cpy = oend;
1939                         assert(op<=oend);
1940                         length = (size_t)(oend-op);
1941                     }
1942                     assert(ip+length <= iend);
1943                 } else {
1944                     /* We must be on the last sequence because of the parsing limitations so check
1945                      * that we exactly regenerate the original size (must be exact when !endOnInput).
1946                      */
1947                     if ((!endOnInput) && (cpy != oend)) { goto _output_error; }
1948                      /* We must be on the last sequence (or invalid) because of the parsing limitations
1949                       * so check that we exactly consume the input and don't overrun the output buffer.
1950                       */
1951                     if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) { goto _output_error; }
1952                 }
1953                 memmove(op, ip, length);  /* supports overlapping memory regions, which only matters for in-place decompression scenarios */
1954                 ip += length;
1955                 op += length;
1956                 /* Necessarily EOF when !partialDecoding. When partialDecoding
1957                  * it is EOF if we've either filled the output buffer or hit
1958                  * the input parsing restriction.
1959                  */
1960                 if (!partialDecoding || (cpy == oend) || (ip == iend)) {
1961                     break;
1962                 }
1963             } else {
1964                 LZ4_wildCopy8(op, ip, cpy);   /* may overwrite up to WILDCOPYLENGTH beyond cpy */
1965                 ip += length; op = cpy;
1966             }
1967 
1968             /* get offset */
1969             offset = LZ4_readLE16(ip); ip+=2;
1970             match = op - offset;
1971 
1972             /* get matchlength */
1973             length = token & ML_MASK;
1974 
1975     _copy_match:
1976             if (length == ML_MASK) {
1977               variable_length_error error = ok;
1978               length += read_variable_length(&ip, iend - LASTLITERALS + 1, endOnInput, 0, &error);
1979               if (error != ok) goto _output_error;
1980                 if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error;   /* overflow detection */
1981             }
1982             length += MINMATCH;
1983 
1984 #if LZ4_FAST_DEC_LOOP
1985         safe_match_copy:
1986 #endif
1987             if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error;   /* Error : offset outside buffers */
1988             /* match starting within external dictionary */
1989             if ((dict==usingExtDict) && (match < lowPrefix)) {
1990                 if (unlikely(op+length > oend-LASTLITERALS)) {
1991                     if (partialDecoding) length = MIN(length, (size_t)(oend-op));
1992                     else goto _output_error;   /* doesn't respect parsing restriction */
1993                 }
1994 
1995                 if (length <= (size_t)(lowPrefix-match)) {
1996                     /* match fits entirely within external dictionary : just copy */
1997                     memmove(op, dictEnd - (lowPrefix-match), length);
1998                     op += length;
1999                 } else {
2000                     /* match stretches into both external dictionary and current block */
2001                     size_t const copySize = (size_t)(lowPrefix - match);
2002                     size_t const restSize = length - copySize;
2003                     memcpy(op, dictEnd - copySize, copySize);
2004                     op += copySize;
2005                     if (restSize > (size_t)(op - lowPrefix)) {  /* overlap copy */
2006                         BYTE* const endOfMatch = op + restSize;
2007                         const BYTE* copyFrom = lowPrefix;
2008                         while (op < endOfMatch) *op++ = *copyFrom++;
2009                     } else {
2010                         memcpy(op, lowPrefix, restSize);
2011                         op += restSize;
2012                 }   }
2013                 continue;
2014             }
2015             assert(match >= lowPrefix);
2016 
2017             /* copy match within block */
2018             cpy = op + length;
2019 
2020             /* partialDecoding : may end anywhere within the block */
2021             assert(op<=oend);
2022             if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {
2023                 size_t const mlen = MIN(length, (size_t)(oend-op));
2024                 const BYTE* const matchEnd = match + mlen;
2025                 BYTE* const copyEnd = op + mlen;
2026                 if (matchEnd > op) {   /* overlap copy */
2027                     while (op < copyEnd) { *op++ = *match++; }
2028                 } else {
2029                     memcpy(op, match, mlen);
2030                 }
2031                 op = copyEnd;
2032                 if (op == oend) { break; }
2033                 continue;
2034             }
2035 
2036             if (unlikely(offset<8)) {
2037                 LZ4_write32(op, 0);   /* silence msan warning when offset==0 */
2038                 op[0] = match[0];
2039                 op[1] = match[1];
2040                 op[2] = match[2];
2041                 op[3] = match[3];
2042                 match += inc32table[offset];
2043                 memcpy(op+4, match, 4);
2044                 match -= dec64table[offset];
2045             } else {
2046                 memcpy(op, match, 8);
2047                 match += 8;
2048             }
2049             op += 8;
2050 
2051             if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {
2052                 BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1);
2053                 if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
2054                 if (op < oCopyLimit) {
2055                     LZ4_wildCopy8(op, match, oCopyLimit);
2056                     match += oCopyLimit - op;
2057                     op = oCopyLimit;
2058                 }
2059                 while (op < cpy) { *op++ = *match++; }
2060             } else {
2061                 memcpy(op, match, 8);
2062                 if (length > 16)  { LZ4_wildCopy8(op+8, match+8, cpy); }
2063             }
2064             op = cpy;   /* wildcopy correction */
2065         }
2066 
2067         /* end of decoding */
2068         if (endOnInput) {
2069            return (int) (((char*)op)-dst);     /* Nb of output bytes decoded */
2070        } else {
2071            return (int) (((const char*)ip)-src);   /* Nb of input bytes read */
2072        }
2073 
2074         /* Overflow error detected */
2075     _output_error:
2076         return (int) (-(((const char*)ip)-src))-1;
2077     }
2078 }
2079 
2080 
2081 /*===== Instantiate the API decoding functions. =====*/
2082 
2083 LZ4_FORCE_O2_GCC_PPC64LE
LZ4_decompress_safe(const char * source,char * dest,int compressedSize,int maxDecompressedSize)2084 int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
2085 {
2086     return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize,
2087                                   endOnInputSize, decode_full_block, noDict,
2088                                   (BYTE*)dest, NULL, 0);
2089 }
2090 
2091 LZ4_FORCE_O2_GCC_PPC64LE
LZ4_decompress_safe_partial(const char * src,char * dst,int compressedSize,int targetOutputSize,int dstCapacity)2092 int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity)
2093 {
2094     dstCapacity = MIN(targetOutputSize, dstCapacity);
2095     return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
2096                                   endOnInputSize, partial_decode,
2097                                   noDict, (BYTE*)dst, NULL, 0);
2098 }
2099 
2100 LZ4_FORCE_O2_GCC_PPC64LE
LZ4_decompress_fast(const char * source,char * dest,int originalSize)2101 int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
2102 {
2103     return LZ4_decompress_generic(source, dest, 0, originalSize,
2104                                   endOnOutputSize, decode_full_block, withPrefix64k,
2105                                   (BYTE*)dest - 64 KB, NULL, 0);
2106 }
2107 
2108 /*===== Instantiate a few more decoding cases, used more than once. =====*/
2109 
2110 LZ4_FORCE_O2_GCC_PPC64LE /* Exported, an obsolete API function. */
LZ4_decompress_safe_withPrefix64k(const char * source,char * dest,int compressedSize,int maxOutputSize)2111 int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
2112 {
2113     return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
2114                                   endOnInputSize, decode_full_block, withPrefix64k,
2115                                   (BYTE*)dest - 64 KB, NULL, 0);
2116 }
2117 
2118 /* Another obsolete API function, paired with the previous one. */
LZ4_decompress_fast_withPrefix64k(const char * source,char * dest,int originalSize)2119 int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
2120 {
2121     /* LZ4_decompress_fast doesn't validate match offsets,
2122      * and thus serves well with any prefixed dictionary. */
2123     return LZ4_decompress_fast(source, dest, originalSize);
2124 }
2125 
2126 LZ4_FORCE_O2_GCC_PPC64LE
LZ4_decompress_safe_withSmallPrefix(const char * source,char * dest,int compressedSize,int maxOutputSize,size_t prefixSize)2127 static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize,
2128                                                size_t prefixSize)
2129 {
2130     return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
2131                                   endOnInputSize, decode_full_block, noDict,
2132                                   (BYTE*)dest-prefixSize, NULL, 0);
2133 }
2134 
2135 LZ4_FORCE_O2_GCC_PPC64LE
LZ4_decompress_safe_forceExtDict(const char * source,char * dest,int compressedSize,int maxOutputSize,const void * dictStart,size_t dictSize)2136 int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
2137                                      int compressedSize, int maxOutputSize,
2138                                      const void* dictStart, size_t dictSize)
2139 {
2140     return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
2141                                   endOnInputSize, decode_full_block, usingExtDict,
2142                                   (BYTE*)dest, (const BYTE*)dictStart, dictSize);
2143 }
2144 
2145 LZ4_FORCE_O2_GCC_PPC64LE
LZ4_decompress_fast_extDict(const char * source,char * dest,int originalSize,const void * dictStart,size_t dictSize)2146 static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize,
2147                                        const void* dictStart, size_t dictSize)
2148 {
2149     return LZ4_decompress_generic(source, dest, 0, originalSize,
2150                                   endOnOutputSize, decode_full_block, usingExtDict,
2151                                   (BYTE*)dest, (const BYTE*)dictStart, dictSize);
2152 }
2153 
2154 /* The "double dictionary" mode, for use with e.g. ring buffers: the first part
2155  * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
2156  * These routines are used only once, in LZ4_decompress_*_continue().
2157  */
2158 LZ4_FORCE_INLINE
LZ4_decompress_safe_doubleDict(const char * source,char * dest,int compressedSize,int maxOutputSize,size_t prefixSize,const void * dictStart,size_t dictSize)2159 int LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize,
2160                                    size_t prefixSize, const void* dictStart, size_t dictSize)
2161 {
2162     return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
2163                                   endOnInputSize, decode_full_block, usingExtDict,
2164                                   (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
2165 }
2166 
2167 LZ4_FORCE_INLINE
LZ4_decompress_fast_doubleDict(const char * source,char * dest,int originalSize,size_t prefixSize,const void * dictStart,size_t dictSize)2168 int LZ4_decompress_fast_doubleDict(const char* source, char* dest, int originalSize,
2169                                    size_t prefixSize, const void* dictStart, size_t dictSize)
2170 {
2171     return LZ4_decompress_generic(source, dest, 0, originalSize,
2172                                   endOnOutputSize, decode_full_block, usingExtDict,
2173                                   (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
2174 }
2175 
2176 /*===== streaming decompression functions =====*/
2177 
LZ4_createStreamDecode(void)2178 LZ4_streamDecode_t* LZ4_createStreamDecode(void)
2179 {
2180     LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t));
2181     LZ4_STATIC_ASSERT(LZ4_STREAMDECODESIZE >= sizeof(LZ4_streamDecode_t_internal));    /* A compilation error here means LZ4_STREAMDECODESIZE is not large enough */
2182     return lz4s;
2183 }
2184 
LZ4_freeStreamDecode(LZ4_streamDecode_t * LZ4_stream)2185 int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
2186 {
2187     if (LZ4_stream == NULL) { return 0; }  /* support free on NULL */
2188     FREEMEM(LZ4_stream);
2189     return 0;
2190 }
2191 
2192 /*! LZ4_setStreamDecode() :
2193  *  Use this function to instruct where to find the dictionary.
2194  *  This function is not necessary if previous data is still available where it was decoded.
2195  *  Loading a size of 0 is allowed (same effect as no dictionary).
2196  * @return : 1 if OK, 0 if error
2197  */
LZ4_setStreamDecode(LZ4_streamDecode_t * LZ4_streamDecode,const char * dictionary,int dictSize)2198 int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
2199 {
2200     LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
2201     lz4sd->prefixSize = (size_t) dictSize;
2202     lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;
2203     lz4sd->externalDict = NULL;
2204     lz4sd->extDictSize  = 0;
2205     return 1;
2206 }
2207 
2208 /*! LZ4_decoderRingBufferSize() :
2209  *  when setting a ring buffer for streaming decompression (optional scenario),
2210  *  provides the minimum size of this ring buffer
2211  *  to be compatible with any source respecting maxBlockSize condition.
2212  *  Note : in a ring buffer scenario,
2213  *  blocks are presumed decompressed next to each other.
2214  *  When not enough space remains for next block (remainingSize < maxBlockSize),
2215  *  decoding resumes from beginning of ring buffer.
2216  * @return : minimum ring buffer size,
2217  *           or 0 if there is an error (invalid maxBlockSize).
2218  */
LZ4_decoderRingBufferSize(int maxBlockSize)2219 int LZ4_decoderRingBufferSize(int maxBlockSize)
2220 {
2221     if (maxBlockSize < 0) return 0;
2222     if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0;
2223     if (maxBlockSize < 16) maxBlockSize = 16;
2224     return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize);
2225 }
2226 
2227 /*
2228 *_continue() :
2229     These decoding functions allow decompression of multiple blocks in "streaming" mode.
2230     Previously decoded blocks must still be available at the memory position where they were decoded.
2231     If it's not possible, save the relevant part of decoded data into a safe buffer,
2232     and indicate where it stands using LZ4_setStreamDecode()
2233 */
2234 LZ4_FORCE_O2_GCC_PPC64LE
LZ4_decompress_safe_continue(LZ4_streamDecode_t * LZ4_streamDecode,const char * source,char * dest,int compressedSize,int maxOutputSize)2235 int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
2236 {
2237     LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
2238     int result;
2239 
2240     if (lz4sd->prefixSize == 0) {
2241         /* The first call, no dictionary yet. */
2242         assert(lz4sd->extDictSize == 0);
2243         result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
2244         if (result <= 0) return result;
2245         lz4sd->prefixSize = (size_t)result;
2246         lz4sd->prefixEnd = (BYTE*)dest + result;
2247     } else if (lz4sd->prefixEnd == (BYTE*)dest) {
2248         /* They're rolling the current segment. */
2249         if (lz4sd->prefixSize >= 64 KB - 1)
2250             result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);
2251         else if (lz4sd->extDictSize == 0)
2252             result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize,
2253                                                          lz4sd->prefixSize);
2254         else
2255             result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize,
2256                                                     lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
2257         if (result <= 0) return result;
2258         lz4sd->prefixSize += (size_t)result;
2259         lz4sd->prefixEnd  += result;
2260     } else {
2261         /* The buffer wraps around, or they're switching to another buffer. */
2262         lz4sd->extDictSize = lz4sd->prefixSize;
2263         lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
2264         result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize,
2265                                                   lz4sd->externalDict, lz4sd->extDictSize);
2266         if (result <= 0) return result;
2267         lz4sd->prefixSize = (size_t)result;
2268         lz4sd->prefixEnd  = (BYTE*)dest + result;
2269     }
2270 
2271     return result;
2272 }
2273 
2274 LZ4_FORCE_O2_GCC_PPC64LE
LZ4_decompress_fast_continue(LZ4_streamDecode_t * LZ4_streamDecode,const char * source,char * dest,int originalSize)2275 int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize)
2276 {
2277     LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
2278     int result;
2279     assert(originalSize >= 0);
2280 
2281     if (lz4sd->prefixSize == 0) {
2282         assert(lz4sd->extDictSize == 0);
2283         result = LZ4_decompress_fast(source, dest, originalSize);
2284         if (result <= 0) return result;
2285         lz4sd->prefixSize = (size_t)originalSize;
2286         lz4sd->prefixEnd = (BYTE*)dest + originalSize;
2287     } else if (lz4sd->prefixEnd == (BYTE*)dest) {
2288         if (lz4sd->prefixSize >= 64 KB - 1 || lz4sd->extDictSize == 0)
2289             result = LZ4_decompress_fast(source, dest, originalSize);
2290         else
2291             result = LZ4_decompress_fast_doubleDict(source, dest, originalSize,
2292                                                     lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
2293         if (result <= 0) return result;
2294         lz4sd->prefixSize += (size_t)originalSize;
2295         lz4sd->prefixEnd  += originalSize;
2296     } else {
2297         lz4sd->extDictSize = lz4sd->prefixSize;
2298         lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
2299         result = LZ4_decompress_fast_extDict(source, dest, originalSize,
2300                                              lz4sd->externalDict, lz4sd->extDictSize);
2301         if (result <= 0) return result;
2302         lz4sd->prefixSize = (size_t)originalSize;
2303         lz4sd->prefixEnd  = (BYTE*)dest + originalSize;
2304     }
2305 
2306     return result;
2307 }
2308 
2309 
2310 /*
2311 Advanced decoding functions :
2312 *_usingDict() :
2313     These decoding functions work the same as "_continue" ones,
2314     the dictionary must be explicitly provided within parameters
2315 */
2316 
LZ4_decompress_safe_usingDict(const char * source,char * dest,int compressedSize,int maxOutputSize,const char * dictStart,int dictSize)2317 int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
2318 {
2319     if (dictSize==0)
2320         return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
2321     if (dictStart+dictSize == dest) {
2322         if (dictSize >= 64 KB - 1) {
2323             return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);
2324         }
2325         assert(dictSize >= 0);
2326         return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize);
2327     }
2328     assert(dictSize >= 0);
2329     return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize);
2330 }
2331 
LZ4_decompress_fast_usingDict(const char * source,char * dest,int originalSize,const char * dictStart,int dictSize)2332 int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
2333 {
2334     if (dictSize==0 || dictStart+dictSize == dest)
2335         return LZ4_decompress_fast(source, dest, originalSize);
2336     assert(dictSize >= 0);
2337     return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize);
2338 }
2339 
2340 
2341 /*=*************************************************
2342 *  Obsolete Functions
2343 ***************************************************/
2344 /* obsolete compression functions */
LZ4_compress_limitedOutput(const char * source,char * dest,int inputSize,int maxOutputSize)2345 int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
2346 {
2347     return LZ4_compress_default(source, dest, inputSize, maxOutputSize);
2348 }
LZ4_compress(const char * src,char * dest,int srcSize)2349 int LZ4_compress(const char* src, char* dest, int srcSize)
2350 {
2351     return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize));
2352 }
LZ4_compress_limitedOutput_withState(void * state,const char * src,char * dst,int srcSize,int dstSize)2353 int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize)
2354 {
2355     return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);
2356 }
LZ4_compress_withState(void * state,const char * src,char * dst,int srcSize)2357 int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize)
2358 {
2359     return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1);
2360 }
LZ4_compress_limitedOutput_continue(LZ4_stream_t * LZ4_stream,const char * src,char * dst,int srcSize,int dstCapacity)2361 int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity)
2362 {
2363     return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1);
2364 }
LZ4_compress_continue(LZ4_stream_t * LZ4_stream,const char * source,char * dest,int inputSize)2365 int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize)
2366 {
2367     return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1);
2368 }
2369 
2370 /*
2371 These decompression functions are deprecated and should no longer be used.
2372 They are only provided here for compatibility with older user programs.
2373 - LZ4_uncompress is totally equivalent to LZ4_decompress_fast
2374 - LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
2375 */
LZ4_uncompress(const char * source,char * dest,int outputSize)2376 int LZ4_uncompress (const char* source, char* dest, int outputSize)
2377 {
2378     return LZ4_decompress_fast(source, dest, outputSize);
2379 }
LZ4_uncompress_unknownOutputSize(const char * source,char * dest,int isize,int maxOutputSize)2380 int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize)
2381 {
2382     return LZ4_decompress_safe(source, dest, isize, maxOutputSize);
2383 }
2384 
2385 /* Obsolete Streaming functions */
2386 
LZ4_sizeofStreamState()2387 int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; }
2388 
LZ4_resetStreamState(void * state,char * inputBuffer)2389 int LZ4_resetStreamState(void* state, char* inputBuffer)
2390 {
2391     (void)inputBuffer;
2392     LZ4_resetStream((LZ4_stream_t*)state);
2393     return 0;
2394 }
2395 
LZ4_create(char * inputBuffer)2396 void* LZ4_create (char* inputBuffer)
2397 {
2398     (void)inputBuffer;
2399     return LZ4_createStream();
2400 }
2401 
LZ4_slideInputBuffer(void * state)2402 char* LZ4_slideInputBuffer (void* state)
2403 {
2404     /* avoid const char * -> char * conversion warning */
2405     return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary;
2406 }
2407 
2408 #endif   /* LZ4_COMMONDEFS_ONLY */
2409 #endif /* NEED_COMPAT_LZ4 */
2410