1 #include "svn_private_config.h"
2 #ifdef SVN_INTERNAL_LZ4
3 /*
4    LZ4 - Fast LZ compression algorithm
5    Copyright (C) 2011-2016, Yann Collet.
6 
7    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
8 
9    Redistribution and use in source and binary forms, with or without
10    modification, are permitted provided that the following conditions are
11    met:
12 
13        * Redistributions of source code must retain the above copyright
14    notice, this list of conditions and the following disclaimer.
15        * Redistributions in binary form must reproduce the above
16    copyright notice, this list of conditions and the following disclaimer
17    in the documentation and/or other materials provided with the
18    distribution.
19 
20    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 
32    You can contact the author at :
33     - LZ4 homepage : http://www.lz4.org
34     - LZ4 source repository : https://github.com/lz4/lz4
35 */
36 
37 
38 /*-************************************
39 *  Tuning parameters
40 **************************************/
41 /*
42  * HEAPMODE :
43  * Select how default compression functions will allocate memory for their hash table,
44  * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
45  */
46 #ifndef HEAPMODE
47 #  define HEAPMODE 0
48 #endif
49 
50 /*
51  * ACCELERATION_DEFAULT :
52  * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
53  */
54 #define ACCELERATION_DEFAULT 1
55 
56 
57 /*-************************************
58 *  CPU Feature Detection
59 **************************************/
60 /* LZ4_FORCE_MEMORY_ACCESS
61  * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
62  * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
63  * The below switch allow to select different access method for improved performance.
64  * Method 0 (default) : use `memcpy()`. Safe and portable.
65  * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
66  *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
67  * Method 2 : direct access. This method is portable but violate C standard.
68  *            It can generate buggy code on targets which generate assembly depending on alignment.
69  *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
70  * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
71  * Prefer these methods in priority order (0 > 1 > 2)
72  */
73 #ifndef LZ4_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
74 #  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
75 #    define LZ4_FORCE_MEMORY_ACCESS 2
76 #  elif defined(__INTEL_COMPILER) || \
77   (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
78 #    define LZ4_FORCE_MEMORY_ACCESS 1
79 #  endif
80 #endif
81 
82 /*
83  * LZ4_FORCE_SW_BITCOUNT
84  * Define this parameter if your target system or compiler does not support hardware bit count
85  */
86 #if defined(_MSC_VER) && defined(_WIN32_WCE)   /* Visual Studio for Windows CE does not support Hardware bit count */
87 #  define LZ4_FORCE_SW_BITCOUNT
88 #endif
89 
90 
91 /*-************************************
92 *  Dependency
93 **************************************/
94 #include "lz4internal.h"
95 /* see also "memory routines" below */
96 
97 /* Silence GCC's -Wmissing-prototypes warning. */
98 int LZ4_compress_fast_force(const char*, char*, int, int, int);
99 int LZ4_compress_forceExtDict (LZ4_stream_t*, const char*, char*, int);
100 int LZ4_decompress_safe_forceExtDict(const char*, char*, int, int, const char*, int);
101 int LZ4_uncompress (const char*, char*, int);
102 int LZ4_uncompress_unknownOutputSize (const char*, char*, int, int);
103 
104 /*-************************************
105 *  Compiler Options
106 **************************************/
107 #ifdef _MSC_VER    /* Visual Studio */
108 #  define FORCE_INLINE static __forceinline
109 #  include <intrin.h>
110 #  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
111 #  pragma warning(disable : 4293)        /* disable: C4293: too large shift (32-bits) */
112 #else
113 #  if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)   /* C99 */
114 #    if defined(__GNUC__) || defined(__clang__)
115 #      define FORCE_INLINE static inline __attribute__((always_inline))
116 #    else
117 #      define FORCE_INLINE static inline
118 #    endif
119 #  else
120 #    define FORCE_INLINE static
121 #  endif   /* __STDC_VERSION__ */
122 #endif  /* _MSC_VER */
123 
124 #if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
125 #  define expect(expr,value)    (__builtin_expect ((expr),(value)) )
126 #else
127 #  define expect(expr,value)    (expr)
128 #endif
129 
130 #define likely(expr)     expect((expr) != 0, 1)
131 #define unlikely(expr)   expect((expr) != 0, 0)
132 
133 
134 /*-************************************
135 *  Memory routines
136 **************************************/
137 #include <stdlib.h>   /* malloc, calloc, free */
138 #define ALLOCATOR(n,s) calloc(n,s)
139 #define FREEMEM        free
140 #include <string.h>   /* memset, memcpy */
141 #define MEM_INIT       memset
142 
143 
144 /*-************************************
145 *  Basic Types
146 **************************************/
147 #if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
148 # include <stdint.h>
149   typedef  uint8_t BYTE;
150   typedef uint16_t U16;
151   typedef uint32_t U32;
152   typedef  int32_t S32;
153   typedef uint64_t U64;
154   typedef uintptr_t uptrval;
155 #else
156   typedef unsigned char       BYTE;
157   typedef unsigned short      U16;
158   typedef unsigned int        U32;
159   typedef   signed int        S32;
160   typedef unsigned long long  U64;
161   typedef size_t              uptrval;   /* generally true, except OpenVMS-64 */
162 #endif
163 
164 #if defined(__x86_64__)
165   typedef U64    reg_t;   /* 64-bits in x32 mode */
166 #else
167   typedef size_t reg_t;   /* 32-bits in x32 mode */
168 #endif
169 
170 /*-************************************
171 *  Reading and writing into memory
172 **************************************/
LZ4_isLittleEndian(void)173 static unsigned LZ4_isLittleEndian(void)
174 {
175     const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental */
176     return one.c[0];
177 }
178 
179 
180 #if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
181 /* lie to the compiler about data alignment; use with caution */
182 
LZ4_read16(const void * memPtr)183 static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }
LZ4_read32(const void * memPtr)184 static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }
LZ4_read_ARCH(const void * memPtr)185 static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }
186 
LZ4_write16(void * memPtr,U16 value)187 static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
LZ4_write32(void * memPtr,U32 value)188 static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
189 
190 #elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
191 
192 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
193 /* currently only defined for gcc and icc */
194 typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) unalign;
195 
LZ4_read16(const void * ptr)196 static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
LZ4_read32(const void * ptr)197 static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
LZ4_read_ARCH(const void * ptr)198 static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; }
199 
LZ4_write16(void * memPtr,U16 value)200 static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
LZ4_write32(void * memPtr,U32 value)201 static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
202 
203 #else  /* safe and portable access through memcpy() */
204 
LZ4_read16(const void * memPtr)205 static U16 LZ4_read16(const void* memPtr)
206 {
207     U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
208 }
209 
LZ4_read32(const void * memPtr)210 static U32 LZ4_read32(const void* memPtr)
211 {
212     U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
213 }
214 
LZ4_read_ARCH(const void * memPtr)215 static reg_t LZ4_read_ARCH(const void* memPtr)
216 {
217     reg_t val; memcpy(&val, memPtr, sizeof(val)); return val;
218 }
219 
LZ4_write16(void * memPtr,U16 value)220 static void LZ4_write16(void* memPtr, U16 value)
221 {
222     memcpy(memPtr, &value, sizeof(value));
223 }
224 
LZ4_write32(void * memPtr,U32 value)225 static void LZ4_write32(void* memPtr, U32 value)
226 {
227     memcpy(memPtr, &value, sizeof(value));
228 }
229 
230 #endif /* LZ4_FORCE_MEMORY_ACCESS */
231 
232 
LZ4_readLE16(const void * memPtr)233 static U16 LZ4_readLE16(const void* memPtr)
234 {
235     if (LZ4_isLittleEndian()) {
236         return LZ4_read16(memPtr);
237     } else {
238         const BYTE* p = (const BYTE*)memPtr;
239         return (U16)((U16)p[0] + (p[1]<<8));
240     }
241 }
242 
LZ4_writeLE16(void * memPtr,U16 value)243 static void LZ4_writeLE16(void* memPtr, U16 value)
244 {
245     if (LZ4_isLittleEndian()) {
246         LZ4_write16(memPtr, value);
247     } else {
248         BYTE* p = (BYTE*)memPtr;
249         p[0] = (BYTE) value;
250         p[1] = (BYTE)(value>>8);
251     }
252 }
253 
LZ4_copy8(void * dst,const void * src)254 static void LZ4_copy8(void* dst, const void* src)
255 {
256     memcpy(dst,src,8);
257 }
258 
259 /* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
LZ4_wildCopy(void * dstPtr,const void * srcPtr,void * dstEnd)260 static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd)
261 {
262     BYTE* d = (BYTE*)dstPtr;
263     const BYTE* s = (const BYTE*)srcPtr;
264     BYTE* const e = (BYTE*)dstEnd;
265 
266     do { LZ4_copy8(d,s); d+=8; s+=8; } while (d<e);
267 }
268 
269 
270 /*-************************************
271 *  Common Constants
272 **************************************/
273 #define MINMATCH 4
274 
275 #define WILDCOPYLENGTH 8
276 #define LASTLITERALS 5
277 #define MFLIMIT (WILDCOPYLENGTH+MINMATCH)
278 static const int LZ4_minLength = (MFLIMIT+1);
279 
280 #define KB *(1 <<10)
281 #define MB *(1 <<20)
282 #define GB *(1U<<30)
283 
284 #define MAXD_LOG 16
285 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
286 
287 #define ML_BITS  4
288 #define ML_MASK  ((1U<<ML_BITS)-1)
289 #define RUN_BITS (8-ML_BITS)
290 #define RUN_MASK ((1U<<RUN_BITS)-1)
291 
292 
293 /*-************************************
294 *  Common Utils
295 **************************************/
296 #define LZ4_STATIC_ASSERT(c)    { enum { LZ4_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
297 
298 
299 /*-************************************
300 *  Common functions
301 **************************************/
LZ4_NbCommonBytes(register reg_t val)302 static unsigned LZ4_NbCommonBytes (register reg_t val)
303 {
304     if (LZ4_isLittleEndian()) {
305         if (sizeof(val)==8) {
306 #       if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
307             unsigned long r = 0;
308             _BitScanForward64( &r, (U64)val );
309             return (int)(r>>3);
310 #       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
311             return (__builtin_ctzll((U64)val) >> 3);
312 #       else
313             static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
314             return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
315 #       endif
316         } else /* 32 bits */ {
317 #       if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
318             unsigned long r;
319             _BitScanForward( &r, (U32)val );
320             return (int)(r>>3);
321 #       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
322             return (__builtin_ctz((U32)val) >> 3);
323 #       else
324             static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
325             return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
326 #       endif
327         }
328     } else   /* Big Endian CPU */ {
329         if (sizeof(val)==8) {
330 #       if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
331             unsigned long r = 0;
332             _BitScanReverse64( &r, val );
333             return (unsigned)(r>>3);
334 #       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
335             return (__builtin_clzll((U64)val) >> 3);
336 #       else
337             unsigned r;
338             if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
339             if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
340             r += (!val);
341             return r;
342 #       endif
343         } else /* 32 bits */ {
344 #       if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
345             unsigned long r = 0;
346             _BitScanReverse( &r, (unsigned long)val );
347             return (unsigned)(r>>3);
348 #       elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
349             return (__builtin_clz((U32)val) >> 3);
350 #       else
351             unsigned r;
352             if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
353             r += (!val);
354             return r;
355 #       endif
356         }
357     }
358 }
359 
360 #define STEPSIZE sizeof(reg_t)
LZ4_count(const BYTE * pIn,const BYTE * pMatch,const BYTE * pInLimit)361 static unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
362 {
363     const BYTE* const pStart = pIn;
364 
365     while (likely(pIn<pInLimit-(STEPSIZE-1))) {
366         reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
367         if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
368         pIn += LZ4_NbCommonBytes(diff);
369         return (unsigned)(pIn - pStart);
370     }
371 
372     if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
373     if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
374     if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
375     return (unsigned)(pIn - pStart);
376 }
377 
378 
379 #ifndef LZ4_COMMONDEFS_ONLY
380 /*-************************************
381 *  Local Constants
382 **************************************/
383 static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
384 static const U32 LZ4_skipTrigger = 6;  /* Increase this value ==> compression run slower on incompressible data */
385 
386 
387 /*-************************************
388 *  Local Structures and types
389 **************************************/
390 typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
391 typedef enum { byPtr, byU32, byU16 } tableType_t;
392 
393 typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
394 typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
395 
396 typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
397 typedef enum { full = 0, partial = 1 } earlyEnd_directive;
398 
399 
400 /*-************************************
401 *  Local Utils
402 **************************************/
LZ4_versionNumber(void)403 int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
LZ4_versionString(void)404 const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
LZ4_compressBound(int isize)405 int LZ4_compressBound(int isize)  { return LZ4_COMPRESSBOUND(isize); }
LZ4_sizeofState(void)406 int LZ4_sizeofState(void) { return LZ4_STREAMSIZE; }
407 
408 
409 /*-******************************
410 *  Compression functions
411 ********************************/
LZ4_hash4(U32 sequence,tableType_t const tableType)412 static U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
413 {
414     if (tableType == byU16)
415         return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
416     else
417         return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
418 }
419 
LZ4_hash5(U64 sequence,tableType_t const tableType)420 static U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
421 {
422     static const U64 prime5bytes = 889523592379ULL;
423     static const U64 prime8bytes = 11400714785074694791ULL;
424     const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
425     if (LZ4_isLittleEndian())
426         return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
427     else
428         return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
429 }
430 
LZ4_hashPosition(const void * const p,tableType_t const tableType)431 FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
432 {
433     if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
434     return LZ4_hash4(LZ4_read32(p), tableType);
435 }
436 
LZ4_putPositionOnHash(const BYTE * p,U32 h,void * tableBase,tableType_t const tableType,const BYTE * srcBase)437 static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t const tableType, const BYTE* srcBase)
438 {
439     switch (tableType)
440     {
441     case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
442     case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
443     case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
444     }
445 }
446 
LZ4_putPosition(const BYTE * p,void * tableBase,tableType_t tableType,const BYTE * srcBase)447 FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
448 {
449     U32 const h = LZ4_hashPosition(p, tableType);
450     LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
451 }
452 
LZ4_getPositionOnHash(U32 h,void * tableBase,tableType_t tableType,const BYTE * srcBase)453 static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
454 {
455     if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; }
456     if (tableType == byU32) { const U32* const hashTable = (U32*) tableBase; return hashTable[h] + srcBase; }
457     { const U16* const hashTable = (U16*) tableBase; return hashTable[h] + srcBase; }   /* default, to ensure a return */
458 }
459 
LZ4_getPosition(const BYTE * p,void * tableBase,tableType_t tableType,const BYTE * srcBase)460 FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
461 {
462     U32 const h = LZ4_hashPosition(p, tableType);
463     return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
464 }
465 
466 
467 /** LZ4_compress_generic() :
468     inlined, to ensure branches are decided at compilation time */
LZ4_compress_generic(LZ4_stream_t_internal * const cctx,const char * const source,char * const dest,const int inputSize,const int maxOutputSize,const limitedOutput_directive outputLimited,const tableType_t tableType,const dict_directive dict,const dictIssue_directive dictIssue,const U32 acceleration)469 FORCE_INLINE int LZ4_compress_generic(
470                  LZ4_stream_t_internal* const cctx,
471                  const char* const source,
472                  char* const dest,
473                  const int inputSize,
474                  const int maxOutputSize,
475                  const limitedOutput_directive outputLimited,
476                  const tableType_t tableType,
477                  const dict_directive dict,
478                  const dictIssue_directive dictIssue,
479                  const U32 acceleration)
480 {
481     const BYTE* ip = (const BYTE*) source;
482     const BYTE* base;
483     const BYTE* lowLimit;
484     const BYTE* const lowRefLimit = ip - cctx->dictSize;
485     const BYTE* const dictionary = cctx->dictionary;
486     const BYTE* const dictEnd = dictionary + cctx->dictSize;
487     const ptrdiff_t dictDelta = dictEnd - (const BYTE*)source;
488     const BYTE* anchor = (const BYTE*) source;
489     const BYTE* const iend = ip + inputSize;
490     const BYTE* const mflimit = iend - MFLIMIT;
491     const BYTE* const matchlimit = iend - LASTLITERALS;
492 
493     BYTE* op = (BYTE*) dest;
494     BYTE* const olimit = op + maxOutputSize;
495 
496     U32 forwardH;
497 
498     /* Init conditions */
499     if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0;   /* Unsupported inputSize, too large (or negative) */
500     switch(dict)
501     {
502     case noDict:
503     default:
504         base = (const BYTE*)source;
505         lowLimit = (const BYTE*)source;
506         break;
507     case withPrefix64k:
508         base = (const BYTE*)source - cctx->currentOffset;
509         lowLimit = (const BYTE*)source - cctx->dictSize;
510         break;
511     case usingExtDict:
512         base = (const BYTE*)source - cctx->currentOffset;
513         lowLimit = (const BYTE*)source;
514         break;
515     }
516     if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0;   /* Size too large (not within 64K limit) */
517     if (inputSize<LZ4_minLength) goto _last_literals;                  /* Input too small, no compression (all literals) */
518 
519     /* First Byte */
520     LZ4_putPosition(ip, cctx->hashTable, tableType, base);
521     ip++; forwardH = LZ4_hashPosition(ip, tableType);
522 
523     /* Main Loop */
524     for ( ; ; ) {
525         ptrdiff_t refDelta = 0;
526         const BYTE* match;
527         BYTE* token;
528 
529         /* Find a match */
530         {   const BYTE* forwardIp = ip;
531             unsigned step = 1;
532             unsigned searchMatchNb = acceleration << LZ4_skipTrigger;
533             do {
534                 U32 const h = forwardH;
535                 ip = forwardIp;
536                 forwardIp += step;
537                 step = (searchMatchNb++ >> LZ4_skipTrigger);
538 
539                 if (unlikely(forwardIp > mflimit)) goto _last_literals;
540 
541                 match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
542                 if (dict==usingExtDict) {
543                     if (match < (const BYTE*)source) {
544                         refDelta = dictDelta;
545                         lowLimit = dictionary;
546                     } else {
547                         refDelta = 0;
548                         lowLimit = (const BYTE*)source;
549                 }   }
550                 forwardH = LZ4_hashPosition(forwardIp, tableType);
551                 LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
552 
553             } while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0)
554                 || ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
555                 || (LZ4_read32(match+refDelta) != LZ4_read32(ip)) );
556         }
557 
558         /* Catch up */
559         while (((ip>anchor) & (match+refDelta > lowLimit)) && (unlikely(ip[-1]==match[refDelta-1]))) { ip--; match--; }
560 
561         /* Encode Literals */
562         {   unsigned const litLength = (unsigned)(ip - anchor);
563             token = op++;
564             if ((outputLimited) &&  /* Check output buffer overflow */
565                 (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)))
566                 return 0;
567             if (litLength >= RUN_MASK) {
568                 int len = (int)litLength-RUN_MASK;
569                 *token = (RUN_MASK<<ML_BITS);
570                 for(; len >= 255 ; len-=255) *op++ = 255;
571                 *op++ = (BYTE)len;
572             }
573             else *token = (BYTE)(litLength<<ML_BITS);
574 
575             /* Copy Literals */
576             LZ4_wildCopy(op, anchor, op+litLength);
577             op+=litLength;
578         }
579 
580 _next_match:
581         /* Encode Offset */
582         LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
583 
584         /* Encode MatchLength */
585         {   unsigned matchCode;
586 
587             if ((dict==usingExtDict) && (lowLimit==dictionary)) {
588                 const BYTE* limit;
589                 match += refDelta;
590                 limit = ip + (dictEnd-match);
591                 if (limit > matchlimit) limit = matchlimit;
592                 matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
593                 ip += MINMATCH + matchCode;
594                 if (ip==limit) {
595                     unsigned const more = LZ4_count(ip, (const BYTE*)source, matchlimit);
596                     matchCode += more;
597                     ip += more;
598                 }
599             } else {
600                 matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
601                 ip += MINMATCH + matchCode;
602             }
603 
604             if ( outputLimited &&    /* Check output buffer overflow */
605                 (unlikely(op + (1 + LASTLITERALS) + (matchCode>>8) > olimit)) )
606                 return 0;
607             if (matchCode >= ML_MASK) {
608                 *token += ML_MASK;
609                 matchCode -= ML_MASK;
610                 LZ4_write32(op, 0xFFFFFFFF);
611                 while (matchCode >= 4*255) op+=4, LZ4_write32(op, 0xFFFFFFFF), matchCode -= 4*255;
612                 op += matchCode / 255;
613                 *op++ = (BYTE)(matchCode % 255);
614             } else
615                 *token += (BYTE)(matchCode);
616         }
617 
618         anchor = ip;
619 
620         /* Test end of chunk */
621         if (ip > mflimit) break;
622 
623         /* Fill table */
624         LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
625 
626         /* Test next position */
627         match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
628         if (dict==usingExtDict) {
629             if (match < (const BYTE*)source) {
630                 refDelta = dictDelta;
631                 lowLimit = dictionary;
632             } else {
633                 refDelta = 0;
634                 lowLimit = (const BYTE*)source;
635         }   }
636         LZ4_putPosition(ip, cctx->hashTable, tableType, base);
637         if ( ((dictIssue==dictSmall) ? (match>=lowRefLimit) : 1)
638             && (match+MAX_DISTANCE>=ip)
639             && (LZ4_read32(match+refDelta)==LZ4_read32(ip)) )
640         { token=op++; *token=0; goto _next_match; }
641 
642         /* Prepare next loop */
643         forwardH = LZ4_hashPosition(++ip, tableType);
644     }
645 
646 _last_literals:
647     /* Encode Last Literals */
648     {   size_t const lastRun = (size_t)(iend - anchor);
649         if ( (outputLimited) &&  /* Check output buffer overflow */
650             ((op - (BYTE*)dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize) )
651             return 0;
652         if (lastRun >= RUN_MASK) {
653             size_t accumulator = lastRun - RUN_MASK;
654             *op++ = RUN_MASK << ML_BITS;
655             for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
656             *op++ = (BYTE) accumulator;
657         } else {
658             *op++ = (BYTE)(lastRun<<ML_BITS);
659         }
660         memcpy(op, anchor, lastRun);
661         op += lastRun;
662     }
663 
664     /* End */
665     return (int) (((char*)op)-dest);
666 }
667 
668 
LZ4_compress_fast_extState(void * state,const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)669 int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
670 {
671     LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
672     LZ4_resetStream((LZ4_stream_t*)state);
673     if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
674 
675     if (maxOutputSize >= LZ4_compressBound(inputSize)) {
676         if (inputSize < LZ4_64Klimit)
677             return LZ4_compress_generic(ctx, source, dest, inputSize,             0,    notLimited,                        byU16, noDict, noDictIssue, acceleration);
678         else
679             return LZ4_compress_generic(ctx, source, dest, inputSize,             0,    notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration);
680     } else {
681         if (inputSize < LZ4_64Klimit)
682             return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput,                        byU16, noDict, noDictIssue, acceleration);
683         else
684             return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration);
685     }
686 }
687 
688 
LZ4_compress_fast(const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)689 int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
690 {
691 #if (HEAPMODE)
692     void* ctxPtr = ALLOCATOR(1, sizeof(LZ4_stream_t));   /* malloc-calloc always properly aligned */
693 #else
694     LZ4_stream_t ctx;
695     void* const ctxPtr = &ctx;
696 #endif
697 
698     int const result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
699 
700 #if (HEAPMODE)
701     FREEMEM(ctxPtr);
702 #endif
703     return result;
704 }
705 
706 
LZ4_compress_default(const char * source,char * dest,int inputSize,int maxOutputSize)707 int LZ4_compress_default(const char* source, char* dest, int inputSize, int maxOutputSize)
708 {
709     return LZ4_compress_fast(source, dest, inputSize, maxOutputSize, 1);
710 }
711 
712 
713 /* hidden debug function */
714 /* strangely enough, gcc generates faster code when this function is uncommented, even if unused */
LZ4_compress_fast_force(const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)715 int LZ4_compress_fast_force(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
716 {
717     LZ4_stream_t ctx;
718     LZ4_resetStream(&ctx);
719 
720     if (inputSize < LZ4_64Klimit)
721         return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, byU16,                        noDict, noDictIssue, acceleration);
722     else
723         return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, sizeof(void*)==8 ? byU32 : byPtr, noDict, noDictIssue, acceleration);
724 }
725 
726 
727 /*-******************************
728 *  *_destSize() variant
729 ********************************/
730 
LZ4_compress_destSize_generic(LZ4_stream_t_internal * const ctx,const char * const src,char * const dst,int * const srcSizePtr,const int targetDstSize,const tableType_t tableType)731 static int LZ4_compress_destSize_generic(
732                        LZ4_stream_t_internal* const ctx,
733                  const char* const src,
734                        char* const dst,
735                        int*  const srcSizePtr,
736                  const int targetDstSize,
737                  const tableType_t tableType)
738 {
739     const BYTE* ip = (const BYTE*) src;
740     const BYTE* base = (const BYTE*) src;
741     const BYTE* lowLimit = (const BYTE*) src;
742     const BYTE* anchor = ip;
743     const BYTE* const iend = ip + *srcSizePtr;
744     const BYTE* const mflimit = iend - MFLIMIT;
745     const BYTE* const matchlimit = iend - LASTLITERALS;
746 
747     BYTE* op = (BYTE*) dst;
748     BYTE* const oend = op + targetDstSize;
749     BYTE* const oMaxLit = op + targetDstSize - 2 /* offset */ - 8 /* because 8+MINMATCH==MFLIMIT */ - 1 /* token */;
750     BYTE* const oMaxMatch = op + targetDstSize - (LASTLITERALS + 1 /* token */);
751     BYTE* const oMaxSeq = oMaxLit - 1 /* token */;
752 
753     U32 forwardH;
754 
755 
756     /* Init conditions */
757     if (targetDstSize < 1) return 0;                                     /* Impossible to store anything */
758     if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0;            /* Unsupported input size, too large (or negative) */
759     if ((tableType == byU16) && (*srcSizePtr>=LZ4_64Klimit)) return 0;   /* Size too large (not within 64K limit) */
760     if (*srcSizePtr<LZ4_minLength) goto _last_literals;                  /* Input too small, no compression (all literals) */
761 
762     /* First Byte */
763     *srcSizePtr = 0;
764     LZ4_putPosition(ip, ctx->hashTable, tableType, base);
765     ip++; forwardH = LZ4_hashPosition(ip, tableType);
766 
767     /* Main Loop */
768     for ( ; ; ) {
769         const BYTE* match;
770         BYTE* token;
771 
772         /* Find a match */
773         {   const BYTE* forwardIp = ip;
774             unsigned step = 1;
775             unsigned searchMatchNb = 1 << LZ4_skipTrigger;
776 
777             do {
778                 U32 h = forwardH;
779                 ip = forwardIp;
780                 forwardIp += step;
781                 step = (searchMatchNb++ >> LZ4_skipTrigger);
782 
783                 if (unlikely(forwardIp > mflimit)) goto _last_literals;
784 
785                 match = LZ4_getPositionOnHash(h, ctx->hashTable, tableType, base);
786                 forwardH = LZ4_hashPosition(forwardIp, tableType);
787                 LZ4_putPositionOnHash(ip, h, ctx->hashTable, tableType, base);
788 
789             } while ( ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
790                 || (LZ4_read32(match) != LZ4_read32(ip)) );
791         }
792 
793         /* Catch up */
794         while ((ip>anchor) && (match > lowLimit) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
795 
796         /* Encode Literal length */
797         {   unsigned litLength = (unsigned)(ip - anchor);
798             token = op++;
799             if (op + ((litLength+240)/255) + litLength > oMaxLit) {
800                 /* Not enough space for a last match */
801                 op--;
802                 goto _last_literals;
803             }
804             if (litLength>=RUN_MASK) {
805                 unsigned len = litLength - RUN_MASK;
806                 *token=(RUN_MASK<<ML_BITS);
807                 for(; len >= 255 ; len-=255) *op++ = 255;
808                 *op++ = (BYTE)len;
809             }
810             else *token = (BYTE)(litLength<<ML_BITS);
811 
812             /* Copy Literals */
813             LZ4_wildCopy(op, anchor, op+litLength);
814             op += litLength;
815         }
816 
817 _next_match:
818         /* Encode Offset */
819         LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
820 
821         /* Encode MatchLength */
822         {   size_t matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
823 
824             if (op + ((matchLength+240)/255) > oMaxMatch) {
825                 /* Match description too long : reduce it */
826                 matchLength = (15-1) + (oMaxMatch-op) * 255;
827             }
828             ip += MINMATCH + matchLength;
829 
830             if (matchLength>=ML_MASK) {
831                 *token += ML_MASK;
832                 matchLength -= ML_MASK;
833                 while (matchLength >= 255) { matchLength-=255; *op++ = 255; }
834                 *op++ = (BYTE)matchLength;
835             }
836             else *token += (BYTE)(matchLength);
837         }
838 
839         anchor = ip;
840 
841         /* Test end of block */
842         if (ip > mflimit) break;
843         if (op > oMaxSeq) break;
844 
845         /* Fill table */
846         LZ4_putPosition(ip-2, ctx->hashTable, tableType, base);
847 
848         /* Test next position */
849         match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
850         LZ4_putPosition(ip, ctx->hashTable, tableType, base);
851         if ( (match+MAX_DISTANCE>=ip)
852             && (LZ4_read32(match)==LZ4_read32(ip)) )
853         { token=op++; *token=0; goto _next_match; }
854 
855         /* Prepare next loop */
856         forwardH = LZ4_hashPosition(++ip, tableType);
857     }
858 
859 _last_literals:
860     /* Encode Last Literals */
861     {   size_t lastRunSize = (size_t)(iend - anchor);
862         if (op + 1 /* token */ + ((lastRunSize+240)/255) /* litLength */ + lastRunSize /* literals */ > oend) {
863             /* adapt lastRunSize to fill 'dst' */
864             lastRunSize  = (oend-op) - 1;
865             lastRunSize -= (lastRunSize+240)/255;
866         }
867         ip = anchor + lastRunSize;
868 
869         if (lastRunSize >= RUN_MASK) {
870             size_t accumulator = lastRunSize - RUN_MASK;
871             *op++ = RUN_MASK << ML_BITS;
872             for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
873             *op++ = (BYTE) accumulator;
874         } else {
875             *op++ = (BYTE)(lastRunSize<<ML_BITS);
876         }
877         memcpy(op, anchor, lastRunSize);
878         op += lastRunSize;
879     }
880 
881     /* End */
882     *srcSizePtr = (int) (((const char*)ip)-src);
883     return (int) (((char*)op)-dst);
884 }
885 
886 
LZ4_compress_destSize_extState(LZ4_stream_t * state,const char * src,char * dst,int * srcSizePtr,int targetDstSize)887 static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
888 {
889     LZ4_resetStream(state);
890 
891     if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) {  /* compression success is guaranteed */
892         return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
893     } else {
894         if (*srcSizePtr < LZ4_64Klimit)
895             return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, byU16);
896         else
897             return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, sizeof(void*)==8 ? byU32 : byPtr);
898     }
899 }
900 
901 
LZ4_compress_destSize(const char * src,char * dst,int * srcSizePtr,int targetDstSize)902 int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
903 {
904 #if (HEAPMODE)
905     LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOCATOR(1, sizeof(LZ4_stream_t));   /* malloc-calloc always properly aligned */
906 #else
907     LZ4_stream_t ctxBody;
908     LZ4_stream_t* ctx = &ctxBody;
909 #endif
910 
911     int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
912 
913 #if (HEAPMODE)
914     FREEMEM(ctx);
915 #endif
916     return result;
917 }
918 
919 
920 
921 /*-******************************
922 *  Streaming functions
923 ********************************/
924 
LZ4_createStream(void)925 LZ4_stream_t* LZ4_createStream(void)
926 {
927     LZ4_stream_t* lz4s = (LZ4_stream_t*)ALLOCATOR(8, LZ4_STREAMSIZE_U64);
928     LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal));    /* A compilation error here means LZ4_STREAMSIZE is not large enough */
929     LZ4_resetStream(lz4s);
930     return lz4s;
931 }
932 
LZ4_resetStream(LZ4_stream_t * LZ4_stream)933 void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
934 {
935     MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t));
936 }
937 
LZ4_freeStream(LZ4_stream_t * LZ4_stream)938 int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
939 {
940     FREEMEM(LZ4_stream);
941     return (0);
942 }
943 
944 
945 #define HASH_UNIT sizeof(reg_t)
LZ4_loadDict(LZ4_stream_t * LZ4_dict,const char * dictionary,int dictSize)946 int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
947 {
948     LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
949     const BYTE* p = (const BYTE*)dictionary;
950     const BYTE* const dictEnd = p + dictSize;
951     const BYTE* base;
952 
953     if ((dict->initCheck) || (dict->currentOffset > 1 GB))  /* Uninitialized structure, or reuse overflow */
954         LZ4_resetStream(LZ4_dict);
955 
956     if (dictSize < (int)HASH_UNIT) {
957         dict->dictionary = NULL;
958         dict->dictSize = 0;
959         return 0;
960     }
961 
962     if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
963     dict->currentOffset += 64 KB;
964     base = p - dict->currentOffset;
965     dict->dictionary = p;
966     dict->dictSize = (U32)(dictEnd - p);
967     dict->currentOffset += dict->dictSize;
968 
969     while (p <= dictEnd-HASH_UNIT) {
970         LZ4_putPosition(p, dict->hashTable, byU32, base);
971         p+=3;
972     }
973 
974     return dict->dictSize;
975 }
976 
977 
LZ4_renormDictT(LZ4_stream_t_internal * LZ4_dict,const BYTE * src)978 static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src)
979 {
980     if ((LZ4_dict->currentOffset > 0x80000000) ||
981         ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) {   /* address space overflow */
982         /* rescale hash table */
983         U32 const delta = LZ4_dict->currentOffset - 64 KB;
984         const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
985         int i;
986         for (i=0; i<LZ4_HASH_SIZE_U32; i++) {
987             if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
988             else LZ4_dict->hashTable[i] -= delta;
989         }
990         LZ4_dict->currentOffset = 64 KB;
991         if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
992         LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
993     }
994 }
995 
996 
LZ4_compress_fast_continue(LZ4_stream_t * LZ4_stream,const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)997 int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
998 {
999     LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse;
1000     const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
1001 
1002     const BYTE* smallest = (const BYTE*) source;
1003     if (streamPtr->initCheck) return 0;   /* Uninitialized structure detected */
1004     if ((streamPtr->dictSize>0) && (smallest>dictEnd)) smallest = dictEnd;
1005     LZ4_renormDictT(streamPtr, smallest);
1006     if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
1007 
1008     /* Check overlapping input/dictionary space */
1009     {   const BYTE* sourceEnd = (const BYTE*) source + inputSize;
1010         if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) {
1011             streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
1012             if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
1013             if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
1014             streamPtr->dictionary = dictEnd - streamPtr->dictSize;
1015         }
1016     }
1017 
1018     /* prefix mode : source data follows dictionary */
1019     if (dictEnd == (const BYTE*)source) {
1020         int result;
1021         if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
1022             result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, dictSmall, acceleration);
1023         else
1024             result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, noDictIssue, acceleration);
1025         streamPtr->dictSize += (U32)inputSize;
1026         streamPtr->currentOffset += (U32)inputSize;
1027         return result;
1028     }
1029 
1030     /* external dictionary mode */
1031     {   int result;
1032         if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
1033             result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, dictSmall, acceleration);
1034         else
1035             result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, noDictIssue, acceleration);
1036         streamPtr->dictionary = (const BYTE*)source;
1037         streamPtr->dictSize = (U32)inputSize;
1038         streamPtr->currentOffset += (U32)inputSize;
1039         return result;
1040     }
1041 }
1042 
1043 
1044 /* Hidden debug function, to force external dictionary mode */
LZ4_compress_forceExtDict(LZ4_stream_t * LZ4_dict,const char * source,char * dest,int inputSize)1045 int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int inputSize)
1046 {
1047     LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
1048     int result;
1049     const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
1050 
1051     const BYTE* smallest = dictEnd;
1052     if (smallest > (const BYTE*) source) smallest = (const BYTE*) source;
1053     LZ4_renormDictT(streamPtr, smallest);
1054 
1055     result = LZ4_compress_generic(streamPtr, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
1056 
1057     streamPtr->dictionary = (const BYTE*)source;
1058     streamPtr->dictSize = (U32)inputSize;
1059     streamPtr->currentOffset += (U32)inputSize;
1060 
1061     return result;
1062 }
1063 
1064 
1065 /*! LZ4_saveDict() :
1066  *  If previously compressed data block is not guaranteed to remain available at its memory location,
1067  *  save it into a safer place (char* safeBuffer).
1068  *  Note : you don't need to call LZ4_loadDict() afterwards,
1069  *         dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue().
1070  *  Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.
1071  */
LZ4_saveDict(LZ4_stream_t * LZ4_dict,char * safeBuffer,int dictSize)1072 int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
1073 {
1074     LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
1075     const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
1076 
1077     if ((U32)dictSize > 64 KB) dictSize = 64 KB;   /* useless to define a dictionary > 64 KB */
1078     if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize;
1079 
1080     memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
1081 
1082     dict->dictionary = (const BYTE*)safeBuffer;
1083     dict->dictSize = (U32)dictSize;
1084 
1085     return dictSize;
1086 }
1087 
1088 
1089 
1090 /*-*****************************
1091 *  Decompression functions
1092 *******************************/
1093 /*! LZ4_decompress_generic() :
1094  *  This generic decompression function cover all use cases.
1095  *  It shall be instantiated several times, using different sets of directives
1096  *  Note that it is important this generic function is really inlined,
1097  *  in order to remove useless branches during compilation optimization.
1098  */
LZ4_decompress_generic(const char * const source,char * const dest,int inputSize,int outputSize,int endOnInput,int partialDecoding,int targetOutputSize,int dict,const BYTE * const lowPrefix,const BYTE * const dictStart,const size_t dictSize)1099 FORCE_INLINE int LZ4_decompress_generic(
1100                  const char* const source,
1101                  char* const dest,
1102                  int inputSize,
1103                  int outputSize,         /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */
1104 
1105                  int endOnInput,         /* endOnOutputSize, endOnInputSize */
1106                  int partialDecoding,    /* full, partial */
1107                  int targetOutputSize,   /* only used if partialDecoding==partial */
1108                  int dict,               /* noDict, withPrefix64k, usingExtDict */
1109                  const BYTE* const lowPrefix,  /* == dest when no prefix */
1110                  const BYTE* const dictStart,  /* only if dict==usingExtDict */
1111                  const size_t dictSize         /* note : = 0 if noDict */
1112                  )
1113 {
1114     /* Local Variables */
1115     const BYTE* ip = (const BYTE*) source;
1116     const BYTE* const iend = ip + inputSize;
1117 
1118     BYTE* op = (BYTE*) dest;
1119     BYTE* const oend = op + outputSize;
1120     BYTE* cpy;
1121     BYTE* oexit = op + targetOutputSize;
1122     const BYTE* const lowLimit = lowPrefix - dictSize;
1123 
1124     const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize;
1125     const unsigned dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};
1126     const int dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
1127 
1128     const int safeDecode = (endOnInput==endOnInputSize);
1129     const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
1130 
1131 
1132     /* Special cases */
1133     if ((partialDecoding) && (oexit > oend-MFLIMIT)) oexit = oend-MFLIMIT;                        /* targetOutputSize too high => decode everything */
1134     if ((endOnInput) && (unlikely(outputSize==0))) return ((inputSize==1) && (*ip==0)) ? 0 : -1;  /* Empty output buffer */
1135     if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1);
1136 
1137     /* Main Loop : decode sequences */
1138     while (1) {
1139         size_t length;
1140         const BYTE* match;
1141         size_t offset;
1142 
1143         /* get literal length */
1144         unsigned const token = *ip++;
1145         if ((length=(token>>ML_BITS)) == RUN_MASK) {
1146             unsigned s;
1147             do {
1148                 s = *ip++;
1149                 length += s;
1150             } while ( likely(endOnInput ? ip<iend-RUN_MASK : 1) & (s==255) );
1151             if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) goto _output_error;   /* overflow detection */
1152             if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) goto _output_error;   /* overflow detection */
1153         }
1154 
1155         /* copy literals */
1156         cpy = op+length;
1157         if ( ((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
1158             || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) )
1159         {
1160             if (partialDecoding) {
1161                 if (cpy > oend) goto _output_error;                           /* Error : write attempt beyond end of output buffer */
1162                 if ((endOnInput) && (ip+length > iend)) goto _output_error;   /* Error : read attempt beyond end of input buffer */
1163             } else {
1164                 if ((!endOnInput) && (cpy != oend)) goto _output_error;       /* Error : block decoding must stop exactly there */
1165                 if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error;   /* Error : input must be consumed */
1166             }
1167             memcpy(op, ip, length);
1168             ip += length;
1169             op += length;
1170             break;     /* Necessarily EOF, due to parsing restrictions */
1171         }
1172         LZ4_wildCopy(op, ip, cpy);
1173         ip += length; op = cpy;
1174 
1175         /* get offset */
1176         offset = LZ4_readLE16(ip); ip+=2;
1177         match = op - offset;
1178         if ((checkOffset) && (unlikely(match < lowLimit))) goto _output_error;   /* Error : offset outside buffers */
1179         LZ4_write32(op, (U32)offset);   /* costs ~1%; silence an msan warning when offset==0 */
1180 
1181         /* get matchlength */
1182         length = token & ML_MASK;
1183         if (length == ML_MASK) {
1184             unsigned s;
1185             do {
1186                 s = *ip++;
1187                 if ((endOnInput) && (ip > iend-LASTLITERALS)) goto _output_error;
1188                 length += s;
1189             } while (s==255);
1190             if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error;   /* overflow detection */
1191         }
1192         length += MINMATCH;
1193 
1194         /* check external dictionary */
1195         if ((dict==usingExtDict) && (match < lowPrefix)) {
1196             if (unlikely(op+length > oend-LASTLITERALS)) goto _output_error;   /* doesn't respect parsing restriction */
1197 
1198             if (length <= (size_t)(lowPrefix-match)) {
1199                 /* match can be copied as a single segment from external dictionary */
1200                 memmove(op, dictEnd - (lowPrefix-match), length);
1201                 op += length;
1202             } else {
1203                 /* match encompass external dictionary and current block */
1204                 size_t const copySize = (size_t)(lowPrefix-match);
1205                 size_t const restSize = length - copySize;
1206                 memcpy(op, dictEnd - copySize, copySize);
1207                 op += copySize;
1208                 if (restSize > (size_t)(op-lowPrefix)) {  /* overlap copy */
1209                     BYTE* const endOfMatch = op + restSize;
1210                     const BYTE* copyFrom = lowPrefix;
1211                     while (op < endOfMatch) *op++ = *copyFrom++;
1212                 } else {
1213                     memcpy(op, lowPrefix, restSize);
1214                     op += restSize;
1215             }   }
1216             continue;
1217         }
1218 
1219         /* copy match within block */
1220         cpy = op + length;
1221         if (unlikely(offset<8)) {
1222             const int dec64 = dec64table[offset];
1223             op[0] = match[0];
1224             op[1] = match[1];
1225             op[2] = match[2];
1226             op[3] = match[3];
1227             match += dec32table[offset];
1228             memcpy(op+4, match, 4);
1229             match -= dec64;
1230         } else { LZ4_copy8(op, match); match+=8; }
1231         op += 8;
1232 
1233         if (unlikely(cpy>oend-12)) {
1234             BYTE* const oCopyLimit = oend-(WILDCOPYLENGTH-1);
1235             if (cpy > oend-LASTLITERALS) goto _output_error;    /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
1236             if (op < oCopyLimit) {
1237                 LZ4_wildCopy(op, match, oCopyLimit);
1238                 match += oCopyLimit - op;
1239                 op = oCopyLimit;
1240             }
1241             while (op<cpy) *op++ = *match++;
1242         } else {
1243             LZ4_copy8(op, match);
1244             if (length>16) LZ4_wildCopy(op+8, match+8, cpy);
1245         }
1246         op=cpy;   /* correction */
1247     }
1248 
1249     /* end of decoding */
1250     if (endOnInput)
1251        return (int) (((char*)op)-dest);     /* Nb of output bytes decoded */
1252     else
1253        return (int) (((const char*)ip)-source);   /* Nb of input bytes read */
1254 
1255     /* Overflow error detected */
1256 _output_error:
1257     return (int) (-(((const char*)ip)-source))-1;
1258 }
1259 
1260 
LZ4_decompress_safe(const char * source,char * dest,int compressedSize,int maxDecompressedSize)1261 int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
1262 {
1263     return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, noDict, (BYTE*)dest, NULL, 0);
1264 }
1265 
LZ4_decompress_safe_partial(const char * source,char * dest,int compressedSize,int targetOutputSize,int maxDecompressedSize)1266 int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize)
1267 {
1268     return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, partial, targetOutputSize, noDict, (BYTE*)dest, NULL, 0);
1269 }
1270 
LZ4_decompress_fast(const char * source,char * dest,int originalSize)1271 int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
1272 {
1273     return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)(dest - 64 KB), NULL, 64 KB);
1274 }
1275 
1276 
1277 /*===== streaming decompression functions =====*/
1278 
1279 /*
1280  * If you prefer dynamic allocation methods,
1281  * LZ4_createStreamDecode()
1282  * provides a pointer (void*) towards an initialized LZ4_streamDecode_t structure.
1283  */
LZ4_createStreamDecode(void)1284 LZ4_streamDecode_t* LZ4_createStreamDecode(void)
1285 {
1286     LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOCATOR(1, sizeof(LZ4_streamDecode_t));
1287     return lz4s;
1288 }
1289 
LZ4_freeStreamDecode(LZ4_streamDecode_t * LZ4_stream)1290 int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
1291 {
1292     FREEMEM(LZ4_stream);
1293     return 0;
1294 }
1295 
1296 /*!
1297  * LZ4_setStreamDecode() :
1298  * Use this function to instruct where to find the dictionary.
1299  * This function is not necessary if previous data is still available where it was decoded.
1300  * Loading a size of 0 is allowed (same effect as no dictionary).
1301  * Return : 1 if OK, 0 if error
1302  */
LZ4_setStreamDecode(LZ4_streamDecode_t * LZ4_streamDecode,const char * dictionary,int dictSize)1303 int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
1304 {
1305     LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
1306     lz4sd->prefixSize = (size_t) dictSize;
1307     lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;
1308     lz4sd->externalDict = NULL;
1309     lz4sd->extDictSize  = 0;
1310     return 1;
1311 }
1312 
1313 /*
1314 *_continue() :
1315     These decoding functions allow decompression of multiple blocks in "streaming" mode.
1316     Previously decoded blocks must still be available at the memory position where they were decoded.
1317     If it's not possible, save the relevant part of decoded data into a safe buffer,
1318     and indicate where it stands using LZ4_setStreamDecode()
1319 */
LZ4_decompress_safe_continue(LZ4_streamDecode_t * LZ4_streamDecode,const char * source,char * dest,int compressedSize,int maxOutputSize)1320 int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
1321 {
1322     LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
1323     int result;
1324 
1325     if (lz4sd->prefixEnd == (BYTE*)dest) {
1326         result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1327                                         endOnInputSize, full, 0,
1328                                         usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
1329         if (result <= 0) return result;
1330         lz4sd->prefixSize += result;
1331         lz4sd->prefixEnd  += result;
1332     } else {
1333         lz4sd->extDictSize = lz4sd->prefixSize;
1334         lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
1335         result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1336                                         endOnInputSize, full, 0,
1337                                         usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
1338         if (result <= 0) return result;
1339         lz4sd->prefixSize = result;
1340         lz4sd->prefixEnd  = (BYTE*)dest + result;
1341     }
1342 
1343     return result;
1344 }
1345 
LZ4_decompress_fast_continue(LZ4_streamDecode_t * LZ4_streamDecode,const char * source,char * dest,int originalSize)1346 int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize)
1347 {
1348     LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
1349     int result;
1350 
1351     if (lz4sd->prefixEnd == (BYTE*)dest) {
1352         result = LZ4_decompress_generic(source, dest, 0, originalSize,
1353                                         endOnOutputSize, full, 0,
1354                                         usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
1355         if (result <= 0) return result;
1356         lz4sd->prefixSize += originalSize;
1357         lz4sd->prefixEnd  += originalSize;
1358     } else {
1359         lz4sd->extDictSize = lz4sd->prefixSize;
1360         lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
1361         result = LZ4_decompress_generic(source, dest, 0, originalSize,
1362                                         endOnOutputSize, full, 0,
1363                                         usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
1364         if (result <= 0) return result;
1365         lz4sd->prefixSize = originalSize;
1366         lz4sd->prefixEnd  = (BYTE*)dest + originalSize;
1367     }
1368 
1369     return result;
1370 }
1371 
1372 
1373 /*
1374 Advanced decoding functions :
1375 *_usingDict() :
1376     These decoding functions work the same as "_continue" ones,
1377     the dictionary must be explicitly provided within parameters
1378 */
1379 
LZ4_decompress_usingDict_generic(const char * source,char * dest,int compressedSize,int maxOutputSize,int safe,const char * dictStart,int dictSize)1380 FORCE_INLINE int LZ4_decompress_usingDict_generic(const char* source, char* dest, int compressedSize, int maxOutputSize, int safe, const char* dictStart, int dictSize)
1381 {
1382     if (dictSize==0)
1383         return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest, NULL, 0);
1384     if (dictStart+dictSize == dest) {
1385         if (dictSize >= (int)(64 KB - 1))
1386             return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, withPrefix64k, (BYTE*)dest-64 KB, NULL, 0);
1387         return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest-dictSize, NULL, 0);
1388     }
1389     return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
1390 }
1391 
LZ4_decompress_safe_usingDict(const char * source,char * dest,int compressedSize,int maxOutputSize,const char * dictStart,int dictSize)1392 int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
1393 {
1394     return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize);
1395 }
1396 
LZ4_decompress_fast_usingDict(const char * source,char * dest,int originalSize,const char * dictStart,int dictSize)1397 int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
1398 {
1399     return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize);
1400 }
1401 
1402 /* debug function */
LZ4_decompress_safe_forceExtDict(const char * source,char * dest,int compressedSize,int maxOutputSize,const char * dictStart,int dictSize)1403 int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
1404 {
1405     return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
1406 }
1407 
1408 
1409 /*=*************************************************
1410 *  Obsolete Functions
1411 ***************************************************/
1412 /* obsolete compression functions */
LZ4_compress_limitedOutput(const char * source,char * dest,int inputSize,int maxOutputSize)1413 int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) { return LZ4_compress_default(source, dest, inputSize, maxOutputSize); }
LZ4_compress(const char * source,char * dest,int inputSize)1414 int LZ4_compress(const char* source, char* dest, int inputSize) { return LZ4_compress_default(source, dest, inputSize, LZ4_compressBound(inputSize)); }
LZ4_compress_limitedOutput_withState(void * state,const char * src,char * dst,int srcSize,int dstSize)1415 int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); }
LZ4_compress_withState(void * state,const char * src,char * dst,int srcSize)1416 int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); }
LZ4_compress_limitedOutput_continue(LZ4_stream_t * LZ4_stream,const char * src,char * dst,int srcSize,int maxDstSize)1417 int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, maxDstSize, 1); }
LZ4_compress_continue(LZ4_stream_t * LZ4_stream,const char * source,char * dest,int inputSize)1418 int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) { return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); }
1419 
1420 /*
1421 These function names are deprecated and should no longer be used.
1422 They are only provided here for compatibility with older user programs.
1423 - LZ4_uncompress is totally equivalent to LZ4_decompress_fast
1424 - LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
1425 */
LZ4_uncompress(const char * source,char * dest,int outputSize)1426 int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); }
LZ4_uncompress_unknownOutputSize(const char * source,char * dest,int isize,int maxOutputSize)1427 int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); }
1428 
1429 
1430 /* Obsolete Streaming functions */
1431 
LZ4_sizeofStreamState(void)1432 int LZ4_sizeofStreamState(void) { return LZ4_STREAMSIZE; }
1433 
LZ4_init(LZ4_stream_t * lz4ds,BYTE * base)1434 static void LZ4_init(LZ4_stream_t* lz4ds, BYTE* base)
1435 {
1436     MEM_INIT(lz4ds, 0, sizeof(LZ4_stream_t));
1437     lz4ds->internal_donotuse.bufferStart = base;
1438 }
1439 
LZ4_resetStreamState(void * state,char * inputBuffer)1440 int LZ4_resetStreamState(void* state, char* inputBuffer)
1441 {
1442     if ((((uptrval)state) & 3) != 0) return 1;   /* Error : pointer is not aligned on 4-bytes boundary */
1443     LZ4_init((LZ4_stream_t*)state, (BYTE*)inputBuffer);
1444     return 0;
1445 }
1446 
LZ4_create(char * inputBuffer)1447 void* LZ4_create (char* inputBuffer)
1448 {
1449     LZ4_stream_t* lz4ds = (LZ4_stream_t*)ALLOCATOR(8, sizeof(LZ4_stream_t));
1450     LZ4_init (lz4ds, (BYTE*)inputBuffer);
1451     return lz4ds;
1452 }
1453 
LZ4_slideInputBuffer(void * LZ4_Data)1454 char* LZ4_slideInputBuffer (void* LZ4_Data)
1455 {
1456     LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)LZ4_Data)->internal_donotuse;
1457     int dictSize = LZ4_saveDict((LZ4_stream_t*)LZ4_Data, (char*)ctx->bufferStart, 64 KB);
1458     return (char*)(ctx->bufferStart + dictSize);
1459 }
1460 
1461 /* Obsolete streaming decompression functions */
1462 
LZ4_decompress_safe_withPrefix64k(const char * source,char * dest,int compressedSize,int maxOutputSize)1463 int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
1464 {
1465     return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
1466 }
1467 
LZ4_decompress_fast_withPrefix64k(const char * source,char * dest,int originalSize)1468 int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
1469 {
1470     return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
1471 }
1472 
1473 #endif   /* LZ4_COMMONDEFS_ONLY */
1474 #else /* !SVN_INTERNAL_LZ4 */
1475 
1476 /* Silence OSX ranlib warnings about object files with no symbols. */
1477 #include <apr.h>
1478 extern const apr_uint32_t svn__fake__lz4internal;
1479 const apr_uint32_t svn__fake__lz4internal = 0xdeadbeef;
1480 
1481 #endif /* SVN_INTERNAL_LZ4 */
1482