1 /*
2    LZ4 - Fast LZ compression algorithm
3    Copyright (C) 2011-2017, Yann Collet.
4 
5    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6 
7    Redistribution and use in source and binary forms, with or without
8    modification, are permitted provided that the following conditions are
9    met:
10 
11        * Redistributions of source code must retain the above copyright
12    notice, this list of conditions and the following disclaimer.
13        * Redistributions in binary form must reproduce the above
14    copyright notice, this list of conditions and the following disclaimer
15    in the documentation and/or other materials provided with the
16    distribution.
17 
18    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 
30    You can contact the author at :
31     - LZ4 homepage : http://www.lz4.org
32     - LZ4 source repository : https://github.com/lz4/lz4
33 
34    Tweaked for proper compliation in Bacula by KES 27 Jan 2019
35 */
36 
37 
38 /*-************************************
39 *  Tuning parameters
40 **************************************/
41 /*
42  * LZ4_HEAPMODE :
43  * Select how default compression functions will allocate memory for their hash table,
44  * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
45  */
46 #ifndef LZ4_HEAPMODE
47 #  define LZ4_HEAPMODE 0
48 #endif
49 
50 /*
51  * ACCELERATION_DEFAULT :
52  * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
53  */
54 #define ACCELERATION_DEFAULT 1
55 
56 
57 /*-************************************
58 *  CPU Feature Detection
59 **************************************/
60 /* LZ4_FORCE_MEMORY_ACCESS
61  * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
62  * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
63  * The below switch allow to select different access method for improved performance.
64  * Method 0 (default) : use `memcpy()`. Safe and portable.
65  * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
66  *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
67  * Method 2 : direct access. This method is portable but violate C standard.
68  *            It can generate buggy code on targets which assembly generation depends on alignment.
69  *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
70  * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
71  * Prefer these methods in priority order (0 > 1 > 2)
72  */
73 #ifndef LZ4_FORCE_MEMORY_ACCESS   /* can be defined externally */
74 #  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
75 #    define LZ4_FORCE_MEMORY_ACCESS 2
76 #  elif defined(__INTEL_COMPILER) || defined(__GNUC__)
77 #    define LZ4_FORCE_MEMORY_ACCESS 1
78 #  endif
79 #endif
80 
81 /*
82  * LZ4_FORCE_SW_BITCOUNT
83  * Define this parameter if your target system or compiler does not support hardware bit count
84  */
85 #if defined(_MSC_VER) && defined(_WIN32_WCE)   /* Visual Studio for Windows CE does not support Hardware bit count */
86 #  define LZ4_FORCE_SW_BITCOUNT
87 #endif
88 
89 
90 /*-************************************
91 *  Dependency
92 **************************************/
93 #include "lz4.h"
94 /* see also "memory routines" below */
95 
96 
97 /*-************************************
98 *  Compiler Options
99 **************************************/
100 #ifdef _MSC_VER    /* Visual Studio */
101 #  include <intrin.h>
102 #  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
103 #  pragma warning(disable : 4293)        /* disable: C4293: too large shift (32-bits) */
104 #endif  /* _MSC_VER */
105 
106 #ifndef LZ4_FORCE_INLINE
107 #  ifdef _MSC_VER    /* Visual Studio */
108 #    define LZ4_FORCE_INLINE static __forceinline
109 #  else
110 #    if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
111 #      ifdef __GNUC__
112 #        define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
113 #      else
114 #        define LZ4_FORCE_INLINE static inline
115 #      endif
116 #    else
117 #      define LZ4_FORCE_INLINE static
118 #    endif /* __STDC_VERSION__ */
119 #  endif  /* _MSC_VER */
120 #endif /* LZ4_FORCE_INLINE */
121 
122 #if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
123 #  define expect(expr,value)    (__builtin_expect ((expr),(value)) )
124 #else
125 #  define expect(expr,value)    (expr)
126 #endif
127 
128 #define likely(expr)     expect((expr) != 0, 1)
129 #define unlikely(expr)   expect((expr) != 0, 0)
130 
131 
132 /*-************************************
133 *  Memory routines
134 **************************************/
135 #include <stdlib.h>   /* malloc, calloc, free */
136 #define ALLOCATOR(n,s) calloc(n,s)
137 #define FREEMEM        free
138 #include <string.h>   /* memset, memcpy */
139 #define MEM_INIT       memset
140 
141 
142 /*-************************************
143 *  Basic Types
144 **************************************/
145 #if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
146 # include <stdint.h>
147   typedef  uint8_t BYTE;
148   typedef uint16_t U16;
149   typedef uint32_t U32;
150   typedef  int32_t S32;
151   typedef uint64_t U64;
152   typedef uintptr_t uptrval;
153 #else
154   typedef unsigned char       BYTE;
155   typedef unsigned short      U16;
156   typedef unsigned int        U32;
157   typedef   signed int        S32;
158   typedef unsigned long long  U64;
159   typedef size_t              uptrval;   /* generally true, except OpenVMS-64 */
160 #endif
161 
162 #if defined(__x86_64__)
163   typedef U64    reg_t;   /* 64-bits in x32 mode */
164 #else
165   typedef size_t reg_t;   /* 32-bits in x32 mode */
166 #endif
167 
168 /*-************************************
169 *  Reading and writing into memory
170 **************************************/
LZ4_isLittleEndian(void)171 static unsigned LZ4_isLittleEndian(void)
172 {
173     const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental */
174     return one.c[0];
175 }
176 
177 
178 #if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
179 /* lie to the compiler about data alignment; use with caution */
180 
LZ4_read16(const void * memPtr)181 static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }
LZ4_read32(const void * memPtr)182 static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }
LZ4_read_ARCH(const void * memPtr)183 static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }
184 
LZ4_write16(void * memPtr,U16 value)185 static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
LZ4_write32(void * memPtr,U32 value)186 static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
187 
188 #elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
189 
190 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
191 /* currently only defined for gcc and icc */
192 typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) unalign;
193 
LZ4_read16(const void * ptr)194 static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
LZ4_read32(const void * ptr)195 static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
LZ4_read_ARCH(const void * ptr)196 static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; }
197 
LZ4_write16(void * memPtr,U16 value)198 static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
LZ4_write32(void * memPtr,U32 value)199 static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
200 
201 #else  /* safe and portable access through memcpy() */
202 
LZ4_read16(const void * memPtr)203 static U16 LZ4_read16(const void* memPtr)
204 {
205     U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
206 }
207 
LZ4_read32(const void * memPtr)208 static U32 LZ4_read32(const void* memPtr)
209 {
210     U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
211 }
212 
LZ4_read_ARCH(const void * memPtr)213 static reg_t LZ4_read_ARCH(const void* memPtr)
214 {
215     reg_t val; memcpy(&val, memPtr, sizeof(val)); return val;
216 }
217 
LZ4_write16(void * memPtr,U16 value)218 static void LZ4_write16(void* memPtr, U16 value)
219 {
220     memcpy(memPtr, &value, sizeof(value));
221 }
222 
LZ4_write32(void * memPtr,U32 value)223 static void LZ4_write32(void* memPtr, U32 value)
224 {
225     memcpy(memPtr, &value, sizeof(value));
226 }
227 
228 #endif /* LZ4_FORCE_MEMORY_ACCESS */
229 
230 
LZ4_readLE16(const void * memPtr)231 static U16 LZ4_readLE16(const void* memPtr)
232 {
233     if (LZ4_isLittleEndian()) {
234         return LZ4_read16(memPtr);
235     } else {
236         const BYTE* p = (const BYTE*)memPtr;
237         return (U16)((U16)p[0] + (p[1]<<8));
238     }
239 }
240 
LZ4_writeLE16(void * memPtr,U16 value)241 static void LZ4_writeLE16(void* memPtr, U16 value)
242 {
243     if (LZ4_isLittleEndian()) {
244         LZ4_write16(memPtr, value);
245     } else {
246         BYTE* p = (BYTE*)memPtr;
247         p[0] = (BYTE) value;
248         p[1] = (BYTE)(value>>8);
249     }
250 }
251 
LZ4_copy8(void * dst,const void * src)252 static void LZ4_copy8(void* dst, const void* src)
253 {
254     memcpy(dst,src,8);
255 }
256 
257 /* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
LZ4_wildCopy(void * dstPtr,const void * srcPtr,void * dstEnd)258 static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd)
259 {
260     BYTE* d = (BYTE*)dstPtr;
261     const BYTE* s = (const BYTE*)srcPtr;
262     BYTE* const e = (BYTE*)dstEnd;
263 
264     do { LZ4_copy8(d,s); d+=8; s+=8; } while (d<e);
265 }
266 
267 
268 /*-************************************
269 *  Common Constants
270 **************************************/
271 #define MINMATCH 4
272 
273 #define WILDCOPYLENGTH 8
274 #define LASTLITERALS 5
275 #define MFLIMIT (WILDCOPYLENGTH+MINMATCH)
276 static const int LZ4_minLength = (MFLIMIT+1);
277 
278 #define KB *(1 <<10)
279 #define MB *(1 <<20)
280 #define GB *(1U<<30)
281 
282 #define MAXD_LOG 16
283 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
284 
285 #define ML_BITS  4
286 #define ML_MASK  ((1U<<ML_BITS)-1)
287 #define RUN_BITS (8-ML_BITS)
288 #define RUN_MASK ((1U<<RUN_BITS)-1)
289 
290 
291 /*-************************************
292 *  Error detection
293 **************************************/
294 #define LZ4_STATIC_ASSERT(c)    { enum { LZ4_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
295 
296 #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
297 #  include <stdio.h>
298 #  define DEBUGLOG(l, ...) {                          \
299                 if (l<=LZ4_DEBUG) {                   \
300                     fprintf(stderr, __FILE__ ": ");   \
301                     fprintf(stderr, __VA_ARGS__);     \
302                     fprintf(stderr, " \n");           \
303             }   }
304 #else
305 #  define DEBUGLOG(l, ...)      {}    /* disabled */
306 #endif
307 
308 
309 /*-************************************
310 *  Common functions
311 **************************************/
LZ4_NbCommonBytes(reg_t val)312 static unsigned LZ4_NbCommonBytes (reg_t val)
313 {
314     if (LZ4_isLittleEndian()) {
315         if (sizeof(val)==8) {
316 #       if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
317             unsigned long r = 0;
318             _BitScanForward64( &r, (U64)val );
319             return (int)(r>>3);
320 #       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
321             return (__builtin_ctzll((U64)val) >> 3);
322 #       else
323             static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
324             return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
325 #       endif
326         } else /* 32 bits */ {
327 #       if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
328             unsigned long r;
329             _BitScanForward( &r, (U32)val );
330             return (int)(r>>3);
331 #       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
332             return (__builtin_ctz((U32)val) >> 3);
333 #       else
334             static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
335             return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
336 #       endif
337         }
338     } else   /* Big Endian CPU */ {
339         if (sizeof(val)==8) {
340 #       if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
341             unsigned long r = 0;
342             _BitScanReverse64( &r, val );
343             return (unsigned)(r>>3);
344 #       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
345             return (__builtin_clzll((U64)val) >> 3);
346 #       else
347             unsigned r;
348             if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
349             if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
350             r += (!val);
351             return r;
352 #       endif
353         } else /* 32 bits */ {
354 #       if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
355             unsigned long r = 0;
356             _BitScanReverse( &r, (unsigned long)val );
357             return (unsigned)(r>>3);
358 #       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
359             return (__builtin_clz((U32)val) >> 3);
360 #       else
361             unsigned r;
362             if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
363             r += (!val);
364             return r;
365 #       endif
366         }
367     }
368 }
369 
370 #define STEPSIZE sizeof(reg_t)
LZ4_count(const BYTE * pIn,const BYTE * pMatch,const BYTE * pInLimit)371 static unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
372 {
373     const BYTE* const pStart = pIn;
374 
375     while (likely(pIn<pInLimit-(STEPSIZE-1))) {
376         reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
377         if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
378         pIn += LZ4_NbCommonBytes(diff);
379         return (unsigned)(pIn - pStart);
380     }
381 
382     if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
383     if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
384     if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
385     return (unsigned)(pIn - pStart);
386 }
387 
388 
389 #ifndef LZ4_COMMONDEFS_ONLY
390 /*-************************************
391 *  Local Constants
392 **************************************/
393 static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
394 static const U32 LZ4_skipTrigger = 6;  /* Increase this value ==> compression run slower on incompressible data */
395 
396 
397 /*-************************************
398 *  Local Structures and types
399 **************************************/
400 typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
401 typedef enum { byPtr, byU32, byU16 } tableType_t;
402 
403 typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
404 typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
405 
406 typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
407 typedef enum { full = 0, partial = 1 } earlyEnd_directive;
408 
409 
410 /*-************************************
411 *  Local Utils
412 **************************************/
LZ4_versionNumber(void)413 int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
LZ4_versionString(void)414 const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
LZ4_compressBound(int isize)415 int LZ4_compressBound(int isize)  { return LZ4_COMPRESSBOUND(isize); }
LZ4_sizeofState()416 int LZ4_sizeofState() { return LZ4_STREAMSIZE; }
417 
418 
419 /*-******************************
420 *  Compression functions
421 ********************************/
LZ4_hash4(U32 sequence,tableType_t const tableType)422 static U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
423 {
424     if (tableType == byU16)
425         return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
426     else
427         return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
428 }
429 
LZ4_hash5(U64 sequence,tableType_t const tableType)430 static U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
431 {
432     static const U64 prime5bytes = 889523592379ULL;
433     static const U64 prime8bytes = 11400714785074694791ULL;
434     const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
435     if (LZ4_isLittleEndian())
436         return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
437     else
438         return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
439 }
440 
LZ4_hashPosition(const void * const p,tableType_t const tableType)441 LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
442 {
443     if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
444     return LZ4_hash4(LZ4_read32(p), tableType);
445 }
446 
LZ4_putPositionOnHash(const BYTE * p,U32 h,void * tableBase,tableType_t const tableType,const BYTE * srcBase)447 static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t const tableType, const BYTE* srcBase)
448 {
449     switch (tableType)
450     {
451     case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
452     case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
453     case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
454     }
455 }
456 
LZ4_putPosition(const BYTE * p,void * tableBase,tableType_t tableType,const BYTE * srcBase)457 LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
458 {
459     U32 const h = LZ4_hashPosition(p, tableType);
460     LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
461 }
462 
LZ4_getPositionOnHash(U32 h,void * tableBase,tableType_t tableType,const BYTE * srcBase)463 static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
464 {
465     if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; }
466     if (tableType == byU32) { const U32* const hashTable = (U32*) tableBase; return hashTable[h] + srcBase; }
467     { const U16* const hashTable = (U16*) tableBase; return hashTable[h] + srcBase; }   /* default, to ensure a return */
468 }
469 
LZ4_getPosition(const BYTE * p,void * tableBase,tableType_t tableType,const BYTE * srcBase)470 LZ4_FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
471 {
472     U32 const h = LZ4_hashPosition(p, tableType);
473     return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
474 }
475 
476 
477 /** LZ4_compress_generic() :
478     inlined, to ensure branches are decided at compilation time */
LZ4_compress_generic(LZ4_stream_t_internal * const cctx,const char * const source,char * const dest,const int inputSize,const int maxOutputSize,const limitedOutput_directive outputLimited,const tableType_t tableType,const dict_directive dict,const dictIssue_directive dictIssue,const U32 acceleration)479 LZ4_FORCE_INLINE int LZ4_compress_generic(
480                  LZ4_stream_t_internal* const cctx,
481                  const char* const source,
482                  char* const dest,
483                  const int inputSize,
484                  const int maxOutputSize,
485                  const limitedOutput_directive outputLimited,
486                  const tableType_t tableType,
487                  const dict_directive dict,
488                  const dictIssue_directive dictIssue,
489                  const U32 acceleration)
490 {
491     const BYTE* ip = (const BYTE*) source;
492     const BYTE* base;
493     const BYTE* lowLimit;
494     const BYTE* const lowRefLimit = ip - cctx->dictSize;
495     const BYTE* const dictionary = cctx->dictionary;
496     const BYTE* const dictEnd = dictionary + cctx->dictSize;
497     const ptrdiff_t dictDelta = dictEnd - (const BYTE*)source;
498     const BYTE* anchor = (const BYTE*) source;
499     const BYTE* const iend = ip + inputSize;
500     const BYTE* const mflimit = iend - MFLIMIT;
501     const BYTE* const matchlimit = iend - LASTLITERALS;
502 
503     BYTE* op = (BYTE*) dest;
504     BYTE* const olimit = op + maxOutputSize;
505 
506     U32 forwardH;
507 
508     /* Init conditions */
509     if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0;   /* Unsupported inputSize, too large (or negative) */
510     switch(dict)
511     {
512     case noDict:
513     default:
514         base = (const BYTE*)source;
515         lowLimit = (const BYTE*)source;
516         break;
517     case withPrefix64k:
518         base = (const BYTE*)source - cctx->currentOffset;
519         lowLimit = (const BYTE*)source - cctx->dictSize;
520         break;
521     case usingExtDict:
522         base = (const BYTE*)source - cctx->currentOffset;
523         lowLimit = (const BYTE*)source;
524         break;
525     }
526     if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0;   /* Size too large (not within 64K limit) */
527     if (inputSize<LZ4_minLength) goto _last_literals;                  /* Input too small, no compression (all literals) */
528 
529     /* First Byte */
530     LZ4_putPosition(ip, cctx->hashTable, tableType, base);
531     ip++; forwardH = LZ4_hashPosition(ip, tableType);
532 
533     /* Main Loop */
534     for ( ; ; ) {
535         ptrdiff_t refDelta = 0;
536         const BYTE* match;
537         BYTE* token;
538 
539         /* Find a match */
540         {   const BYTE* forwardIp = ip;
541             unsigned step = 1;
542             unsigned searchMatchNb = acceleration << LZ4_skipTrigger;
543             do {
544                 U32 const h = forwardH;
545                 ip = forwardIp;
546                 forwardIp += step;
547                 step = (searchMatchNb++ >> LZ4_skipTrigger);
548 
549                 if (unlikely(forwardIp > mflimit)) goto _last_literals;
550 
551                 match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
552                 if (dict==usingExtDict) {
553                     if (match < (const BYTE*)source) {
554                         refDelta = dictDelta;
555                         lowLimit = dictionary;
556                     } else {
557                         refDelta = 0;
558                         lowLimit = (const BYTE*)source;
559                 }   }
560                 forwardH = LZ4_hashPosition(forwardIp, tableType);
561                 LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
562 
563             } while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0)
564                 || ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
565                 || (LZ4_read32(match+refDelta) != LZ4_read32(ip)) );
566         }
567 
568         /* Catch up */
569         while (((ip>anchor) & (match+refDelta > lowLimit)) && (unlikely(ip[-1]==match[refDelta-1]))) { ip--; match--; }
570 
571         /* Encode Literals */
572         {   unsigned const litLength = (unsigned)(ip - anchor);
573             token = op++;
574             if ((outputLimited) &&  /* Check output buffer overflow */
575                 (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)))
576                 return 0;
577             if (litLength >= RUN_MASK) {
578                 int len = (int)litLength-RUN_MASK;
579                 *token = (RUN_MASK<<ML_BITS);
580                 for(; len >= 255 ; len-=255) *op++ = 255;
581                 *op++ = (BYTE)len;
582             }
583             else *token = (BYTE)(litLength<<ML_BITS);
584 
585             /* Copy Literals */
586             LZ4_wildCopy(op, anchor, op+litLength);
587             op+=litLength;
588         }
589 
590 _next_match:
591         /* Encode Offset */
592         LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
593 
594         /* Encode MatchLength */
595         {   unsigned matchCode;
596 
597             if ((dict==usingExtDict) && (lowLimit==dictionary)) {
598                 const BYTE* limit;
599                 match += refDelta;
600                 limit = ip + (dictEnd-match);
601                 if (limit > matchlimit) limit = matchlimit;
602                 matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
603                 ip += MINMATCH + matchCode;
604                 if (ip==limit) {
605                     unsigned const more = LZ4_count(ip, (const BYTE*)source, matchlimit);
606                     matchCode += more;
607                     ip += more;
608                 }
609             } else {
610                 matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
611                 ip += MINMATCH + matchCode;
612             }
613 
614             if ( outputLimited &&    /* Check output buffer overflow */
615                 (unlikely(op + (1 + LASTLITERALS) + (matchCode>>8) > olimit)) )
616                 return 0;
617             if (matchCode >= ML_MASK) {
618                 *token += ML_MASK;
619                 matchCode -= ML_MASK;
620                 LZ4_write32(op, 0xFFFFFFFF);
621                 while (matchCode >= 4*255) op+=4, LZ4_write32(op, 0xFFFFFFFF), matchCode -= 4*255;
622                 op += matchCode / 255;
623                 *op++ = (BYTE)(matchCode % 255);
624             } else
625                 *token += (BYTE)(matchCode);
626         }
627 
628         anchor = ip;
629 
630         /* Test end of chunk */
631         if (ip > mflimit) break;
632 
633         /* Fill table */
634         LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
635 
636         /* Test next position */
637         match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
638         if (dict==usingExtDict) {
639             if (match < (const BYTE*)source) {
640                 refDelta = dictDelta;
641                 lowLimit = dictionary;
642             } else {
643                 refDelta = 0;
644                 lowLimit = (const BYTE*)source;
645         }   }
646         LZ4_putPosition(ip, cctx->hashTable, tableType, base);
647         if ( ((dictIssue==dictSmall) ? (match>=lowRefLimit) : 1)
648             && (match+MAX_DISTANCE>=ip)
649             && (LZ4_read32(match+refDelta)==LZ4_read32(ip)) )
650         { token=op++; *token=0; goto _next_match; }
651 
652         /* Prepare next loop */
653         forwardH = LZ4_hashPosition(++ip, tableType);
654     }
655 
656 _last_literals:
657     /* Encode Last Literals */
658     {   size_t const lastRun = (size_t)(iend - anchor);
659         if ( (outputLimited) &&  /* Check output buffer overflow */
660             ((op - (BYTE*)dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize) )
661             return 0;
662         if (lastRun >= RUN_MASK) {
663             size_t accumulator = lastRun - RUN_MASK;
664             *op++ = RUN_MASK << ML_BITS;
665             for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
666             *op++ = (BYTE) accumulator;
667         } else {
668             *op++ = (BYTE)(lastRun<<ML_BITS);
669         }
670         memcpy(op, anchor, lastRun);
671         op += lastRun;
672     }
673 
674     /* End */
675     return (int) (((char*)op)-dest);
676 }
677 
678 
LZ4_compress_fast_extState(void * state,const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)679 int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
680 {
681     LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
682     LZ4_resetStream((LZ4_stream_t*)state);
683     if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
684 
685     if (maxOutputSize >= LZ4_compressBound(inputSize)) {
686         if (inputSize < LZ4_64Klimit)
687             return LZ4_compress_generic(ctx, source, dest, inputSize,             0,    notLimited,                        byU16, noDict, noDictIssue, acceleration);
688         else
689             return LZ4_compress_generic(ctx, source, dest, inputSize,             0,    notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration);
690     } else {
691         if (inputSize < LZ4_64Klimit)
692             return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput,                        byU16, noDict, noDictIssue, acceleration);
693         else
694             return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration);
695     }
696 }
697 
698 
LZ4_compress_fast(const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)699 int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
700 {
701 #if (LZ4_HEAPMODE)
702     void* ctxPtr = ALLOCATOR(1, sizeof(LZ4_stream_t));   /* malloc-calloc always properly aligned */
703 #else
704     LZ4_stream_t ctx;
705     void* const ctxPtr = &ctx;
706 #endif
707 
708     int const result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
709 
710 #if (LZ4_HEAPMODE)
711     FREEMEM(ctxPtr);
712 #endif
713     return result;
714 }
715 
716 
LZ4_compress_default(const char * source,char * dest,int inputSize,int maxOutputSize)717 int LZ4_compress_default(const char* source, char* dest, int inputSize, int maxOutputSize)
718 {
719     return LZ4_compress_fast(source, dest, inputSize, maxOutputSize, 1);
720 }
721 
722 
723 /* hidden debug function */
724 /* strangely enough, gcc generates faster code when this function is uncommented, even if unused */
LZ4_compress_fast_force(const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)725 int LZ4_compress_fast_force(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
726 {
727     LZ4_stream_t ctx;
728     LZ4_resetStream(&ctx);
729 
730     if (inputSize < LZ4_64Klimit)
731         return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, byU16,                        noDict, noDictIssue, acceleration);
732     else
733         return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, sizeof(void*)==8 ? byU32 : byPtr, noDict, noDictIssue, acceleration);
734 }
735 
736 
737 /*-******************************
738 *  *_destSize() variant
739 ********************************/
740 
LZ4_compress_destSize_generic(LZ4_stream_t_internal * const ctx,const char * const src,char * const dst,int * const srcSizePtr,const int targetDstSize,const tableType_t tableType)741 static int LZ4_compress_destSize_generic(
742                        LZ4_stream_t_internal* const ctx,
743                  const char* const src,
744                        char* const dst,
745                        int*  const srcSizePtr,
746                  const int targetDstSize,
747                  const tableType_t tableType)
748 {
749     const BYTE* ip = (const BYTE*) src;
750     const BYTE* base = (const BYTE*) src;
751     const BYTE* lowLimit = (const BYTE*) src;
752     const BYTE* anchor = ip;
753     const BYTE* const iend = ip + *srcSizePtr;
754     const BYTE* const mflimit = iend - MFLIMIT;
755     const BYTE* const matchlimit = iend - LASTLITERALS;
756 
757     BYTE* op = (BYTE*) dst;
758     BYTE* const oend = op + targetDstSize;
759     BYTE* const oMaxLit = op + targetDstSize - 2 /* offset */ - 8 /* because 8+MINMATCH==MFLIMIT */ - 1 /* token */;
760     BYTE* const oMaxMatch = op + targetDstSize - (LASTLITERALS + 1 /* token */);
761     BYTE* const oMaxSeq = oMaxLit - 1 /* token */;
762 
763     U32 forwardH;
764 
765 
766     /* Init conditions */
767     if (targetDstSize < 1) return 0;                                     /* Impossible to store anything */
768     if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0;            /* Unsupported input size, too large (or negative) */
769     if ((tableType == byU16) && (*srcSizePtr>=LZ4_64Klimit)) return 0;   /* Size too large (not within 64K limit) */
770     if (*srcSizePtr<LZ4_minLength) goto _last_literals;                  /* Input too small, no compression (all literals) */
771 
772     /* First Byte */
773     *srcSizePtr = 0;
774     LZ4_putPosition(ip, ctx->hashTable, tableType, base);
775     ip++; forwardH = LZ4_hashPosition(ip, tableType);
776 
777     /* Main Loop */
778     for ( ; ; ) {
779         const BYTE* match;
780         BYTE* token;
781 
782         /* Find a match */
783         {   const BYTE* forwardIp = ip;
784             unsigned step = 1;
785             unsigned searchMatchNb = 1 << LZ4_skipTrigger;
786 
787             do {
788                 U32 h = forwardH;
789                 ip = forwardIp;
790                 forwardIp += step;
791                 step = (searchMatchNb++ >> LZ4_skipTrigger);
792 
793                 if (unlikely(forwardIp > mflimit)) goto _last_literals;
794 
795                 match = LZ4_getPositionOnHash(h, ctx->hashTable, tableType, base);
796                 forwardH = LZ4_hashPosition(forwardIp, tableType);
797                 LZ4_putPositionOnHash(ip, h, ctx->hashTable, tableType, base);
798 
799             } while ( ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
800                 || (LZ4_read32(match) != LZ4_read32(ip)) );
801         }
802 
803         /* Catch up */
804         while ((ip>anchor) && (match > lowLimit) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
805 
806         /* Encode Literal length */
807         {   unsigned litLength = (unsigned)(ip - anchor);
808             token = op++;
809             if (op + ((litLength+240)/255) + litLength > oMaxLit) {
810                 /* Not enough space for a last match */
811                 op--;
812                 goto _last_literals;
813             }
814             if (litLength>=RUN_MASK) {
815                 unsigned len = litLength - RUN_MASK;
816                 *token=(RUN_MASK<<ML_BITS);
817                 for(; len >= 255 ; len-=255) *op++ = 255;
818                 *op++ = (BYTE)len;
819             }
820             else *token = (BYTE)(litLength<<ML_BITS);
821 
822             /* Copy Literals */
823             LZ4_wildCopy(op, anchor, op+litLength);
824             op += litLength;
825         }
826 
827 _next_match:
828         /* Encode Offset */
829         LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
830 
831         /* Encode MatchLength */
832         {   size_t matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
833 
834             if (op + ((matchLength+240)/255) > oMaxMatch) {
835                 /* Match description too long : reduce it */
836                 matchLength = (15-1) + (oMaxMatch-op) * 255;
837             }
838             ip += MINMATCH + matchLength;
839 
840             if (matchLength>=ML_MASK) {
841                 *token += ML_MASK;
842                 matchLength -= ML_MASK;
843                 while (matchLength >= 255) { matchLength-=255; *op++ = 255; }
844                 *op++ = (BYTE)matchLength;
845             }
846             else *token += (BYTE)(matchLength);
847         }
848 
849         anchor = ip;
850 
851         /* Test end of block */
852         if (ip > mflimit) break;
853         if (op > oMaxSeq) break;
854 
855         /* Fill table */
856         LZ4_putPosition(ip-2, ctx->hashTable, tableType, base);
857 
858         /* Test next position */
859         match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
860         LZ4_putPosition(ip, ctx->hashTable, tableType, base);
861         if ( (match+MAX_DISTANCE>=ip)
862             && (LZ4_read32(match)==LZ4_read32(ip)) )
863         { token=op++; *token=0; goto _next_match; }
864 
865         /* Prepare next loop */
866         forwardH = LZ4_hashPosition(++ip, tableType);
867     }
868 
869 _last_literals:
870     /* Encode Last Literals */
871     {   size_t lastRunSize = (size_t)(iend - anchor);
872         if (op + 1 /* token */ + ((lastRunSize+240)/255) /* litLength */ + lastRunSize /* literals */ > oend) {
873             /* adapt lastRunSize to fill 'dst' */
874             lastRunSize  = (oend-op) - 1;
875             lastRunSize -= (lastRunSize+240)/255;
876         }
877         ip = anchor + lastRunSize;
878 
879         if (lastRunSize >= RUN_MASK) {
880             size_t accumulator = lastRunSize - RUN_MASK;
881             *op++ = RUN_MASK << ML_BITS;
882             for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
883             *op++ = (BYTE) accumulator;
884         } else {
885             *op++ = (BYTE)(lastRunSize<<ML_BITS);
886         }
887         memcpy(op, anchor, lastRunSize);
888         op += lastRunSize;
889     }
890 
891     /* End */
892     *srcSizePtr = (int) (((const char*)ip)-src);
893     return (int) (((char*)op)-dst);
894 }
895 
896 
LZ4_compress_destSize_extState(LZ4_stream_t * state,const char * src,char * dst,int * srcSizePtr,int targetDstSize)897 static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
898 {
899     LZ4_resetStream(state);
900 
901     if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) {  /* compression success is guaranteed */
902         return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
903     } else {
904         if (*srcSizePtr < LZ4_64Klimit)
905             return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, byU16);
906         else
907             return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, sizeof(void*)==8 ? byU32 : byPtr);
908     }
909 }
910 
911 
LZ4_compress_destSize(const char * src,char * dst,int * srcSizePtr,int targetDstSize)912 int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
913 {
914 #if (LZ4_HEAPMODE)
915     LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOCATOR(1, sizeof(LZ4_stream_t));   /* malloc-calloc always properly aligned */
916 #else
917     LZ4_stream_t ctxBody;
918     LZ4_stream_t* ctx = &ctxBody;
919 #endif
920 
921     int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
922 
923 #if (LZ4_HEAPMODE)
924     FREEMEM(ctx);
925 #endif
926     return result;
927 }
928 
929 
930 
931 /*-******************************
932 *  Streaming functions
933 ********************************/
934 
LZ4_createStream(void)935 LZ4_stream_t* LZ4_createStream(void)
936 {
937     LZ4_stream_t* lz4s = (LZ4_stream_t*)ALLOCATOR(8, LZ4_STREAMSIZE_U64);
938     LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal));    /* A compilation error here means LZ4_STREAMSIZE is not large enough */
939     LZ4_resetStream(lz4s);
940     return lz4s;
941 }
942 
LZ4_resetStream(LZ4_stream_t * LZ4_stream)943 void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
944 {
945     MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t));
946 }
947 
LZ4_freeStream(LZ4_stream_t * LZ4_stream)948 int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
949 {
950     if (!LZ4_stream) return 0;   /* support free on NULL */
951     FREEMEM(LZ4_stream);
952     return (0);
953 }
954 
955 
956 #define HASH_UNIT sizeof(reg_t)
LZ4_loadDict(LZ4_stream_t * LZ4_dict,const char * dictionary,int dictSize)957 int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
958 {
959     LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
960     const BYTE* p = (const BYTE*)dictionary;
961     const BYTE* const dictEnd = p + dictSize;
962     const BYTE* base;
963 
964     if ((dict->initCheck) || (dict->currentOffset > 1 GB))  /* Uninitialized structure, or reuse overflow */
965         LZ4_resetStream(LZ4_dict);
966 
967     if (dictSize < (int)HASH_UNIT) {
968         dict->dictionary = NULL;
969         dict->dictSize = 0;
970         return 0;
971     }
972 
973     if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
974     dict->currentOffset += 64 KB;
975     base = p - dict->currentOffset;
976     dict->dictionary = p;
977     dict->dictSize = (U32)(dictEnd - p);
978     dict->currentOffset += dict->dictSize;
979 
980     while (p <= dictEnd-HASH_UNIT) {
981         LZ4_putPosition(p, dict->hashTable, byU32, base);
982         p+=3;
983     }
984 
985     return dict->dictSize;
986 }
987 
988 
LZ4_renormDictT(LZ4_stream_t_internal * LZ4_dict,const BYTE * src)989 static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src)
990 {
991     if ((LZ4_dict->currentOffset > 0x80000000) ||
992         ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) {   /* address space overflow */
993         /* rescale hash table */
994         U32 const delta = LZ4_dict->currentOffset - 64 KB;
995         const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
996         int i;
997         for (i=0; i<LZ4_HASH_SIZE_U32; i++) {
998             if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
999             else LZ4_dict->hashTable[i] -= delta;
1000         }
1001         LZ4_dict->currentOffset = 64 KB;
1002         if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
1003         LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
1004     }
1005 }
1006 
1007 
LZ4_compress_fast_continue(LZ4_stream_t * LZ4_stream,const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)1008 int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
1009 {
1010     LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse;
1011     const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
1012 
1013     const BYTE* smallest = (const BYTE*) source;
1014     if (streamPtr->initCheck) return 0;   /* Uninitialized structure detected */
1015     if ((streamPtr->dictSize>0) && (smallest>dictEnd)) smallest = dictEnd;
1016     LZ4_renormDictT(streamPtr, smallest);
1017     if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
1018 
1019     /* Check overlapping input/dictionary space */
1020     {   const BYTE* sourceEnd = (const BYTE*) source + inputSize;
1021         if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) {
1022             streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
1023             if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
1024             if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
1025             streamPtr->dictionary = dictEnd - streamPtr->dictSize;
1026         }
1027     }
1028 
1029     /* prefix mode : source data follows dictionary */
1030     if (dictEnd == (const BYTE*)source) {
1031         int result;
1032         if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
1033             result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, dictSmall, acceleration);
1034         else
1035             result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, noDictIssue, acceleration);
1036         streamPtr->dictSize += (U32)inputSize;
1037         streamPtr->currentOffset += (U32)inputSize;
1038         return result;
1039     }
1040 
1041     /* external dictionary mode */
1042     {   int result;
1043         if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
1044             result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, dictSmall, acceleration);
1045         else
1046             result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, noDictIssue, acceleration);
1047         streamPtr->dictionary = (const BYTE*)source;
1048         streamPtr->dictSize = (U32)inputSize;
1049         streamPtr->currentOffset += (U32)inputSize;
1050         return result;
1051     }
1052 }
1053 
1054 
1055 /* Hidden debug function, to force external dictionary mode */
LZ4_compress_forceExtDict(LZ4_stream_t * LZ4_dict,const char * source,char * dest,int inputSize)1056 int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int inputSize)
1057 {
1058     LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
1059     int result;
1060     const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
1061 
1062     const BYTE* smallest = dictEnd;
1063     if (smallest > (const BYTE*) source) smallest = (const BYTE*) source;
1064     LZ4_renormDictT(streamPtr, smallest);
1065 
1066     result = LZ4_compress_generic(streamPtr, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
1067 
1068     streamPtr->dictionary = (const BYTE*)source;
1069     streamPtr->dictSize = (U32)inputSize;
1070     streamPtr->currentOffset += (U32)inputSize;
1071 
1072     return result;
1073 }
1074 
1075 
1076 /*! LZ4_saveDict() :
1077  *  If previously compressed data block is not guaranteed to remain available at its memory location,
1078  *  save it into a safer place (char* safeBuffer).
1079  *  Note : you don't need to call LZ4_loadDict() afterwards,
1080  *         dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue().
1081  *  Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.
1082  */
LZ4_saveDict(LZ4_stream_t * LZ4_dict,char * safeBuffer,int dictSize)1083 int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
1084 {
1085     LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
1086     const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
1087 
1088     if ((U32)dictSize > 64 KB) dictSize = 64 KB;   /* useless to define a dictionary > 64 KB */
1089     if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize;
1090 
1091     memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
1092 
1093     dict->dictionary = (const BYTE*)safeBuffer;
1094     dict->dictSize = (U32)dictSize;
1095 
1096     return dictSize;
1097 }
1098 
1099 
1100 
1101 /*-*****************************
1102 *  Decompression functions
1103 *******************************/
1104 /*! LZ4_decompress_generic() :
1105  *  This generic decompression function cover all use cases.
1106  *  It shall be instantiated several times, using different sets of directives
1107  *  Note that it is important this generic function is really inlined,
1108  *  in order to remove useless branches during compilation optimization.
1109  */
LZ4_decompress_generic(const char * const source,char * const dest,int inputSize,int outputSize,int endOnInput,int partialDecoding,int targetOutputSize,int dict,const BYTE * const lowPrefix,const BYTE * const dictStart,const size_t dictSize)1110 LZ4_FORCE_INLINE int LZ4_decompress_generic(
1111                  const char* const source,
1112                  char* const dest,
1113                  int inputSize,
1114                  int outputSize,         /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */
1115 
1116                  int endOnInput,         /* endOnOutputSize, endOnInputSize */
1117                  int partialDecoding,    /* full, partial */
1118                  int targetOutputSize,   /* only used if partialDecoding==partial */
1119                  int dict,               /* noDict, withPrefix64k, usingExtDict */
1120                  const BYTE* const lowPrefix,  /* == dest when no prefix */
1121                  const BYTE* const dictStart,  /* only if dict==usingExtDict */
1122                  const size_t dictSize         /* note : = 0 if noDict */
1123                  )
1124 {
1125     /* Local Variables */
1126     const BYTE* ip = (const BYTE*) source;
1127     const BYTE* const iend = ip + inputSize;
1128 
1129     BYTE* op = (BYTE*) dest;
1130     BYTE* const oend = op + outputSize;
1131     BYTE* cpy;
1132     BYTE* oexit = op + targetOutputSize;
1133     const BYTE* const lowLimit = lowPrefix - dictSize;
1134 
1135     const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize;
1136     const unsigned dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};
1137     const int dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
1138 
1139     const int safeDecode = (endOnInput==endOnInputSize);
1140     const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
1141 
1142 
1143     /* Special cases */
1144     if ((partialDecoding) && (oexit > oend-MFLIMIT)) oexit = oend-MFLIMIT;                        /* targetOutputSize too high => decode everything */
1145     if ((endOnInput) && (unlikely(outputSize==0))) return ((inputSize==1) && (*ip==0)) ? 0 : -1;  /* Empty output buffer */
1146     if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1);
1147 
1148     /* Main Loop : decode sequences */
1149     while (1) {
1150         size_t length;
1151         const BYTE* match;
1152         size_t offset;
1153 
1154         /* get literal length */
1155         unsigned const token = *ip++;
1156         if ((length=(token>>ML_BITS)) == RUN_MASK) {
1157             unsigned s;
1158             do {
1159                 s = *ip++;
1160                 length += s;
1161             } while ( likely(endOnInput ? ip<iend-RUN_MASK : 1) & (s==255) );
1162             if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) goto _output_error;   /* overflow detection */
1163             if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) goto _output_error;   /* overflow detection */
1164         }
1165 
1166         /* copy literals */
1167         cpy = op+length;
1168         if ( ((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
1169             || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) )
1170         {
1171             if (partialDecoding) {
1172                 if (cpy > oend) goto _output_error;                           /* Error : write attempt beyond end of output buffer */
1173                 if ((endOnInput) && (ip+length > iend)) goto _output_error;   /* Error : read attempt beyond end of input buffer */
1174             } else {
1175                 if ((!endOnInput) && (cpy != oend)) goto _output_error;       /* Error : block decoding must stop exactly there */
1176                 if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error;   /* Error : input must be consumed */
1177             }
1178             memcpy(op, ip, length);
1179             ip += length;
1180             op += length;
1181             break;     /* Necessarily EOF, due to parsing restrictions */
1182         }
1183         LZ4_wildCopy(op, ip, cpy);
1184         ip += length; op = cpy;
1185 
1186         /* get offset */
1187         offset = LZ4_readLE16(ip); ip+=2;
1188         match = op - offset;
1189         if ((checkOffset) && (unlikely(match < lowLimit))) goto _output_error;   /* Error : offset outside buffers */
1190         LZ4_write32(op, (U32)offset);   /* costs ~1%; silence an msan warning when offset==0 */
1191 
1192         /* get matchlength */
1193         length = token & ML_MASK;
1194         if (length == ML_MASK) {
1195             unsigned s;
1196             do {
1197                 s = *ip++;
1198                 if ((endOnInput) && (ip > iend-LASTLITERALS)) goto _output_error;
1199                 length += s;
1200             } while (s==255);
1201             if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error;   /* overflow detection */
1202         }
1203         length += MINMATCH;
1204 
1205         /* check external dictionary */
1206         if ((dict==usingExtDict) && (match < lowPrefix)) {
1207             if (unlikely(op+length > oend-LASTLITERALS)) goto _output_error;   /* doesn't respect parsing restriction */
1208 
1209             if (length <= (size_t)(lowPrefix-match)) {
1210                 /* match can be copied as a single segment from external dictionary */
1211                 memmove(op, dictEnd - (lowPrefix-match), length);
1212                 op += length;
1213             } else {
1214                 /* match encompass external dictionary and current block */
1215                 size_t const copySize = (size_t)(lowPrefix-match);
1216                 size_t const restSize = length - copySize;
1217                 memcpy(op, dictEnd - copySize, copySize);
1218                 op += copySize;
1219                 if (restSize > (size_t)(op-lowPrefix)) {  /* overlap copy */
1220                     BYTE* const endOfMatch = op + restSize;
1221                     const BYTE* copyFrom = lowPrefix;
1222                     while (op < endOfMatch) *op++ = *copyFrom++;
1223                 } else {
1224                     memcpy(op, lowPrefix, restSize);
1225                     op += restSize;
1226             }   }
1227             continue;
1228         }
1229 
1230         /* copy match within block */
1231         cpy = op + length;
1232         if (unlikely(offset<8)) {
1233             const int dec64 = dec64table[offset];
1234             op[0] = match[0];
1235             op[1] = match[1];
1236             op[2] = match[2];
1237             op[3] = match[3];
1238             match += dec32table[offset];
1239             memcpy(op+4, match, 4);
1240             match -= dec64;
1241         } else { LZ4_copy8(op, match); match+=8; }
1242         op += 8;
1243 
1244         if (unlikely(cpy>oend-12)) {
1245             BYTE* const oCopyLimit = oend-(WILDCOPYLENGTH-1);
1246             if (cpy > oend-LASTLITERALS) goto _output_error;    /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
1247             if (op < oCopyLimit) {
1248                 LZ4_wildCopy(op, match, oCopyLimit);
1249                 match += oCopyLimit - op;
1250                 op = oCopyLimit;
1251             }
1252             while (op<cpy) *op++ = *match++;
1253         } else {
1254             LZ4_copy8(op, match);
1255             if (length>16) LZ4_wildCopy(op+8, match+8, cpy);
1256         }
1257         op=cpy;   /* correction */
1258     }
1259 
1260     /* end of decoding */
1261     if (endOnInput)
1262        return (int) (((char*)op)-dest);     /* Nb of output bytes decoded */
1263     else
1264        return (int) (((const char*)ip)-source);   /* Nb of input bytes read */
1265 
1266     /* Overflow error detected */
1267 _output_error:
1268     return (int) (-(((const char*)ip)-source))-1;
1269 }
1270 
1271 
LZ4_decompress_safe(const char * source,char * dest,int compressedSize,int maxDecompressedSize)1272 int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
1273 {
1274     return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, noDict, (BYTE*)dest, NULL, 0);
1275 }
1276 
LZ4_decompress_safe_partial(const char * source,char * dest,int compressedSize,int targetOutputSize,int maxDecompressedSize)1277 int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize)
1278 {
1279     return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, partial, targetOutputSize, noDict, (BYTE*)dest, NULL, 0);
1280 }
1281 
LZ4_decompress_fast(const char * source,char * dest,int originalSize)1282 int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
1283 {
1284     return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)(dest - 64 KB), NULL, 64 KB);
1285 }
1286 
1287 
1288 /*===== streaming decompression functions =====*/
1289 
LZ4_createStreamDecode(void)1290 LZ4_streamDecode_t* LZ4_createStreamDecode(void)
1291 {
1292     LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOCATOR(1, sizeof(LZ4_streamDecode_t));
1293     return lz4s;
1294 }
1295 
LZ4_freeStreamDecode(LZ4_streamDecode_t * LZ4_stream)1296 int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
1297 {
1298     if (!LZ4_stream) return 0;   /* support free on NULL */
1299     FREEMEM(LZ4_stream);
1300     return 0;
1301 }
1302 
1303 /*!
1304  * LZ4_setStreamDecode() :
1305  * Use this function to instruct where to find the dictionary.
1306  * This function is not necessary if previous data is still available where it was decoded.
1307  * Loading a size of 0 is allowed (same effect as no dictionary).
1308  * Return : 1 if OK, 0 if error
1309  */
LZ4_setStreamDecode(LZ4_streamDecode_t * LZ4_streamDecode,const char * dictionary,int dictSize)1310 int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
1311 {
1312     LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
1313     lz4sd->prefixSize = (size_t) dictSize;
1314     lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;
1315     lz4sd->externalDict = NULL;
1316     lz4sd->extDictSize  = 0;
1317     return 1;
1318 }
1319 
1320 /*
1321 *_continue() :
1322     These decoding functions allow decompression of multiple blocks in "streaming" mode.
1323     Previously decoded blocks must still be available at the memory position where they were decoded.
1324     If it's not possible, save the relevant part of decoded data into a safe buffer,
1325     and indicate where it stands using LZ4_setStreamDecode()
1326 */
LZ4_decompress_safe_continue(LZ4_streamDecode_t * LZ4_streamDecode,const char * source,char * dest,int compressedSize,int maxOutputSize)1327 int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
1328 {
1329     LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
1330     int result;
1331 
1332     if (lz4sd->prefixEnd == (BYTE*)dest) {
1333         result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1334                                         endOnInputSize, full, 0,
1335                                         usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
1336         if (result <= 0) return result;
1337         lz4sd->prefixSize += result;
1338         lz4sd->prefixEnd  += result;
1339     } else {
1340         lz4sd->extDictSize = lz4sd->prefixSize;
1341         lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
1342         result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1343                                         endOnInputSize, full, 0,
1344                                         usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
1345         if (result <= 0) return result;
1346         lz4sd->prefixSize = result;
1347         lz4sd->prefixEnd  = (BYTE*)dest + result;
1348     }
1349 
1350     return result;
1351 }
1352 
LZ4_decompress_fast_continue(LZ4_streamDecode_t * LZ4_streamDecode,const char * source,char * dest,int originalSize)1353 int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize)
1354 {
1355     LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
1356     int result;
1357 
1358     if (lz4sd->prefixEnd == (BYTE*)dest) {
1359         result = LZ4_decompress_generic(source, dest, 0, originalSize,
1360                                         endOnOutputSize, full, 0,
1361                                         usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
1362         if (result <= 0) return result;
1363         lz4sd->prefixSize += originalSize;
1364         lz4sd->prefixEnd  += originalSize;
1365     } else {
1366         lz4sd->extDictSize = lz4sd->prefixSize;
1367         lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
1368         result = LZ4_decompress_generic(source, dest, 0, originalSize,
1369                                         endOnOutputSize, full, 0,
1370                                         usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
1371         if (result <= 0) return result;
1372         lz4sd->prefixSize = originalSize;
1373         lz4sd->prefixEnd  = (BYTE*)dest + originalSize;
1374     }
1375 
1376     return result;
1377 }
1378 
1379 
1380 /*
1381 Advanced decoding functions :
1382 *_usingDict() :
1383     These decoding functions work the same as "_continue" ones,
1384     the dictionary must be explicitly provided within parameters
1385 */
1386 
LZ4_decompress_usingDict_generic(const char * source,char * dest,int compressedSize,int maxOutputSize,int safe,const char * dictStart,int dictSize)1387 LZ4_FORCE_INLINE int LZ4_decompress_usingDict_generic(const char* source, char* dest, int compressedSize, int maxOutputSize, int safe, const char* dictStart, int dictSize)
1388 {
1389     if (dictSize==0)
1390         return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest, NULL, 0);
1391     if (dictStart+dictSize == dest) {
1392         if (dictSize >= (int)(64 KB - 1))
1393             return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, withPrefix64k, (BYTE*)dest-64 KB, NULL, 0);
1394         return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest-dictSize, NULL, 0);
1395     }
1396     return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
1397 }
1398 
LZ4_decompress_safe_usingDict(const char * source,char * dest,int compressedSize,int maxOutputSize,const char * dictStart,int dictSize)1399 int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
1400 {
1401     return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize);
1402 }
1403 
LZ4_decompress_fast_usingDict(const char * source,char * dest,int originalSize,const char * dictStart,int dictSize)1404 int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
1405 {
1406     return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize);
1407 }
1408 
1409 /* debug function */
LZ4_decompress_safe_forceExtDict(const char * source,char * dest,int compressedSize,int maxOutputSize,const char * dictStart,int dictSize)1410 int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
1411 {
1412     return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
1413 }
1414 
1415 
1416 /*=*************************************************
1417 *  Obsolete Functions
1418 ***************************************************/
1419 /* obsolete compression functions */
LZ4_compress_limitedOutput(const char * source,char * dest,int inputSize,int maxOutputSize)1420 int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) { return LZ4_compress_default(source, dest, inputSize, maxOutputSize); }
LZ4_compress(const char * source,char * dest,int inputSize)1421 int LZ4_compress(const char* source, char* dest, int inputSize) { return LZ4_compress_default(source, dest, inputSize, LZ4_compressBound(inputSize)); }
LZ4_compress_limitedOutput_withState(void * state,const char * src,char * dst,int srcSize,int dstSize)1422 int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); }
LZ4_compress_withState(void * state,const char * src,char * dst,int srcSize)1423 int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); }
LZ4_compress_limitedOutput_continue(LZ4_stream_t * LZ4_stream,const char * src,char * dst,int srcSize,int maxDstSize)1424 int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, maxDstSize, 1); }
LZ4_compress_continue(LZ4_stream_t * LZ4_stream,const char * source,char * dest,int inputSize)1425 int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) { return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); }
1426 
1427 /*
1428 These function names are deprecated and should no longer be used.
1429 They are only provided here for compatibility with older user programs.
1430 - LZ4_uncompress is totally equivalent to LZ4_decompress_fast
1431 - LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
1432 */
LZ4_uncompress(const char * source,char * dest,int outputSize)1433 int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); }
LZ4_uncompress_unknownOutputSize(const char * source,char * dest,int isize,int maxOutputSize)1434 int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); }
1435 
1436 
1437 /* Obsolete Streaming functions */
1438 
LZ4_sizeofStreamState()1439 int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; }
1440 
LZ4_init(LZ4_stream_t * lz4ds,BYTE * base)1441 static void LZ4_init(LZ4_stream_t* lz4ds, BYTE* base)
1442 {
1443     MEM_INIT(lz4ds, 0, sizeof(LZ4_stream_t));
1444     lz4ds->internal_donotuse.bufferStart = base;
1445 }
1446 
LZ4_resetStreamState(void * state,char * inputBuffer)1447 int LZ4_resetStreamState(void* state, char* inputBuffer)
1448 {
1449     if ((((uptrval)state) & 3) != 0) return 1;   /* Error : pointer is not aligned on 4-bytes boundary */
1450     LZ4_init((LZ4_stream_t*)state, (BYTE*)inputBuffer);
1451     return 0;
1452 }
1453 
LZ4_create(char * inputBuffer)1454 void* LZ4_create (char* inputBuffer)
1455 {
1456     LZ4_stream_t* lz4ds = (LZ4_stream_t*)ALLOCATOR(8, sizeof(LZ4_stream_t));
1457     LZ4_init (lz4ds, (BYTE*)inputBuffer);
1458     return lz4ds;
1459 }
1460 
LZ4_slideInputBuffer(void * LZ4_Data)1461 char* LZ4_slideInputBuffer (void* LZ4_Data)
1462 {
1463     LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)LZ4_Data)->internal_donotuse;
1464     int dictSize = LZ4_saveDict((LZ4_stream_t*)LZ4_Data, (char*)ctx->bufferStart, 64 KB);
1465     return (char*)(ctx->bufferStart + dictSize);
1466 }
1467 
1468 /* Obsolete streaming decompression functions */
1469 
LZ4_decompress_safe_withPrefix64k(const char * source,char * dest,int compressedSize,int maxOutputSize)1470 int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
1471 {
1472     return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
1473 }
1474 
LZ4_decompress_fast_withPrefix64k(const char * source,char * dest,int originalSize)1475 int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
1476 {
1477     return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
1478 }
1479 
1480 #endif   /* LZ4_COMMONDEFS_ONLY */
1481