1 /*
2    LZ4 - Fast LZ compression algorithm
3    Copyright (C) 2011-2017, Yann Collet.
4 
5    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6 
7    Redistribution and use in source and binary forms, with or without
8    modification, are permitted provided that the following conditions are
9    met:
10 
11        * Redistributions of source code must retain the above copyright
12    notice, this list of conditions and the following disclaimer.
13        * Redistributions in binary form must reproduce the above
14    copyright notice, this list of conditions and the following disclaimer
15    in the documentation and/or other materials provided with the
16    distribution.
17 
18    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 
30    You can contact the author at :
31     - LZ4 homepage : http://www.lz4.org
32     - LZ4 source repository : https://github.com/lz4/lz4
33 */
34 
35 /*-************************************
36  *  Tuning parameters
37  **************************************/
38 /*
39  * LZ4_HEAPMODE :
40  * Select how default compression functions will allocate memory for their hash
41  * table, in memory stack (0:default, fastest), or in memory heap (1:requires
42  * malloc()).
43  */
44 #ifndef LZ4_HEAPMODE
45 #define LZ4_HEAPMODE 0
46 #endif
47 
48 /*
49  * ACCELERATION_DEFAULT :
50  * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
51  */
52 #define ACCELERATION_DEFAULT 1
53 
54 /*-************************************
55  *  CPU Feature Detection
56  **************************************/
57 /* LZ4_FORCE_MEMORY_ACCESS
58  * By default, access to unaligned memory is controlled by `memcpy()`, which is
59  * safe and portable. Unfortunately, on some target/compiler combinations, the
60  * generated assembly is sub-optimal. The below switch allow to select different
61  * access method for improved performance. Method 0 (default) : use `memcpy()`.
62  * Safe and portable. Method 1 : `__packed` statement. It depends on compiler
63  * extension (ie, not portable). This method is safe if your compiler supports
64  * it, and *generally* as fast or faster than `memcpy`. Method 2 :
65  * direct access. This method is portable but violate C standard. It can
66  * generate buggy code on targets which assembly generation depends
67  * on alignment. But in some circumstances, it's the only known way to get the
68  * most performance (ie GCC + ARMv6) See
69  * https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html
70  * for details. Prefer these methods in priority order (0 > 1 > 2)
71  */
72 #ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */
73 #if defined(__GNUC__) &&                                     \
74     (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) ||  \
75      defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
76      defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__))
77 #define LZ4_FORCE_MEMORY_ACCESS 2
78 #elif defined(__INTEL_COMPILER) || defined(__GNUC__)
79 #define LZ4_FORCE_MEMORY_ACCESS 1
80 #endif
81 #endif
82 
83 /*
84  * LZ4_FORCE_SW_BITCOUNT
85  * Define this parameter if your target system or compiler does not support
86  * hardware bit count
87  */
88 #if defined(_MSC_VER) &&                                                 \
89     defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support \
90                            Hardware bit count */
91 #define LZ4_FORCE_SW_BITCOUNT
92 #endif
93 
94 /*-************************************
95  *  Dependency
96  **************************************/
97 #include "lz4.h"
98 /* see also "memory routines" below */
99 
100 /*-************************************
101  *  Compiler Options
102  **************************************/
103 #ifdef _MSC_VER /* Visual Studio */
104 #include <intrin.h>
105 #pragma warning( \
106     disable : 4127) /* disable: C4127: conditional expression is constant */
107 #pragma warning(disable : 4293) /* disable: C4293: too large shift (32-bits) \
108                                  */
109 #endif                          /* _MSC_VER */
110 
111 #ifndef LZ4_FORCE_INLINE
112 #ifdef _MSC_VER /* Visual Studio */
113 #define LZ4_FORCE_INLINE static __forceinline
114 #else
115 #if defined(__cplusplus) || \
116     defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
117 #ifdef __GNUC__
118 #define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
119 #else
120 #define LZ4_FORCE_INLINE static inline
121 #endif
122 #else
123 #define LZ4_FORCE_INLINE static
124 #endif /* __STDC_VERSION__ */
125 #endif /* _MSC_VER */
126 #endif /* LZ4_FORCE_INLINE */
127 
128 /* LZ4_FORCE_O2_GCC_PPC64LE and LZ4_FORCE_O2_INLINE_GCC_PPC64LE
129  * Gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy,
130  * together with a simple 8-byte copy loop as a fall-back path.
131  * However, this optimization hurts the decompression speed by >30%,
132  * because the execution does not go to the optimized loop
133  * for typical compressible data, and all of the preamble checks
134  * before going to the fall-back path become useless overhead.
135  * This optimization happens only with the -O3 flag, and -O2 generates
136  * a simple 8-byte copy loop.
137  * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy
138  * functions are annotated with __attribute__((optimize("O2"))),
139  * and also LZ4_wildCopy is forcibly inlined, so that the O2 attribute
140  * of LZ4_wildCopy does not affect the compression speed.
141  */
142 #if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__)
143 #define LZ4_FORCE_O2_GCC_PPC64LE __attribute__((optimize("O2")))
144 #define LZ4_FORCE_O2_INLINE_GCC_PPC64LE \
145   __attribute__((optimize("O2"))) LZ4_FORCE_INLINE
146 #else
147 #define LZ4_FORCE_O2_GCC_PPC64LE
148 #define LZ4_FORCE_O2_INLINE_GCC_PPC64LE static
149 #endif
150 
151 #if (defined(__GNUC__) && (__GNUC__ >= 3)) ||                   \
152     (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || \
153     defined(__clang__)
154 #define expect(expr, value) (__builtin_expect((expr), (value)))
155 #else
156 #define expect(expr, value) (expr)
157 #endif
158 
159 #define likely(expr) expect((expr) != 0, 1)
160 #define unlikely(expr) expect((expr) != 0, 0)
161 
162 /*-************************************
163  *  Memory routines
164  **************************************/
165 #include <stdlib.h> /* malloc, calloc, free */
166 #define ALLOCATOR(n, s) calloc(n, s)
167 #define FREEMEM free
168 #include <string.h> /* memset, memcpy */
169 #define MEM_INIT memset
170 
171 /*-************************************
172  *  Basic Types
173  **************************************/
174 #if defined(__cplusplus) || \
175     (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
176 #include <stdint.h>
177 typedef uint8_t BYTE;
178 typedef uint16_t U16;
179 typedef uint32_t U32;
180 typedef int32_t S32;
181 typedef uint64_t U64;
182 typedef uintptr_t uptrval;
183 #else
184 typedef unsigned char BYTE;
185 typedef unsigned short U16;
186 typedef unsigned int U32;
187 typedef signed int S32;
188 typedef unsigned long long U64;
189 typedef size_t uptrval; /* generally true, except OpenVMS-64 */
190 #endif
191 
192 #if defined(__x86_64__)
193 typedef U64 reg_t; /* 64-bits in x32 mode */
194 #else
195 typedef size_t reg_t;   /* 32-bits in x32 mode */
196 #endif
197 
198 /*-************************************
199  *  Reading and writing into memory
200  **************************************/
LZ4_isLittleEndian(void)201 static unsigned LZ4_isLittleEndian(void) {
202   const union {
203     U32 u;
204     BYTE c[4];
205   } one = {1}; /* don't use static : performance detrimental */
206   return one.c[0];
207 }
208 
209 #if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS == 2)
210 /* lie to the compiler about data alignment; use with caution */
211 
LZ4_read16(const void * memPtr)212 static U16 LZ4_read16(const void* memPtr) { return *(const U16*)memPtr; }
LZ4_read32(const void * memPtr)213 static U32 LZ4_read32(const void* memPtr) { return *(const U32*)memPtr; }
LZ4_read_ARCH(const void * memPtr)214 static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*)memPtr; }
215 
LZ4_write16(void * memPtr,U16 value)216 static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
LZ4_write32(void * memPtr,U32 value)217 static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
218 
219 #elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS == 1)
220 
221 /* __pack instructions are safer, but compiler specific, hence potentially
222  * problematic for some compilers */
223 /* currently only defined for gcc and icc */
224 typedef union {
225   U16 u16;
226   U32 u32;
227   reg_t uArch;
228 } __attribute__((packed)) unalign;
229 
LZ4_read16(const void * ptr)230 static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
LZ4_read32(const void * ptr)231 static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
LZ4_read_ARCH(const void * ptr)232 static reg_t LZ4_read_ARCH(const void* ptr) {
233   return ((const unalign*)ptr)->uArch;
234 }
235 
LZ4_write16(void * memPtr,U16 value)236 static void LZ4_write16(void* memPtr, U16 value) {
237   ((unalign*)memPtr)->u16 = value;
238 }
LZ4_write32(void * memPtr,U32 value)239 static void LZ4_write32(void* memPtr, U32 value) {
240   ((unalign*)memPtr)->u32 = value;
241 }
242 
243 #else /* safe and portable access through memcpy() */
244 
LZ4_read16(const void * memPtr)245 static U16 LZ4_read16(const void* memPtr) {
246   U16 val;
247   memcpy(&val, memPtr, sizeof(val));
248   return val;
249 }
250 
LZ4_read32(const void * memPtr)251 static U32 LZ4_read32(const void* memPtr) {
252   U32 val;
253   memcpy(&val, memPtr, sizeof(val));
254   return val;
255 }
256 
LZ4_read_ARCH(const void * memPtr)257 static reg_t LZ4_read_ARCH(const void* memPtr) {
258   reg_t val;
259   memcpy(&val, memPtr, sizeof(val));
260   return val;
261 }
262 
LZ4_write16(void * memPtr,U16 value)263 static void LZ4_write16(void* memPtr, U16 value) {
264   memcpy(memPtr, &value, sizeof(value));
265 }
266 
LZ4_write32(void * memPtr,U32 value)267 static void LZ4_write32(void* memPtr, U32 value) {
268   memcpy(memPtr, &value, sizeof(value));
269 }
270 
271 #endif /* LZ4_FORCE_MEMORY_ACCESS */
272 
LZ4_readLE16(const void * memPtr)273 static U16 LZ4_readLE16(const void* memPtr) {
274   if (LZ4_isLittleEndian()) {
275     return LZ4_read16(memPtr);
276   } else {
277     const BYTE* p = (const BYTE*)memPtr;
278     return (U16)((U16)p[0] + (p[1] << 8));
279   }
280 }
281 
LZ4_writeLE16(void * memPtr,U16 value)282 static void LZ4_writeLE16(void* memPtr, U16 value) {
283   if (LZ4_isLittleEndian()) {
284     LZ4_write16(memPtr, value);
285   } else {
286     BYTE* p = (BYTE*)memPtr;
287     p[0] = (BYTE)value;
288     p[1] = (BYTE)(value >> 8);
289   }
290 }
291 
LZ4_copy8(void * dst,const void * src)292 static void LZ4_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
293 
294 /* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd
295  */
296 LZ4_FORCE_O2_INLINE_GCC_PPC64LE
LZ4_wildCopy(void * dstPtr,const void * srcPtr,void * dstEnd)297 void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd) {
298   BYTE* d = (BYTE*)dstPtr;
299   const BYTE* s = (const BYTE*)srcPtr;
300   BYTE* const e = (BYTE*)dstEnd;
301 
302   do {
303     LZ4_copy8(d, s);
304     d += 8;
305     s += 8;
306   } while (d < e);
307 }
308 
309 /*-************************************
310  *  Common Constants
311  **************************************/
312 #define MINMATCH 4
313 
314 #define WILDCOPYLENGTH 8
315 #define LASTLITERALS 5
316 #define MFLIMIT (WILDCOPYLENGTH + MINMATCH)
317 static const int LZ4_minLength = (MFLIMIT + 1);
318 
319 #define KB *(1 << 10)
320 #define MB *(1 << 20)
321 #define GB *(1U << 30)
322 
323 #define MAXD_LOG 16
324 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
325 
326 #define ML_BITS 4
327 #define ML_MASK ((1U << ML_BITS) - 1)
328 #define RUN_BITS (8 - ML_BITS)
329 #define RUN_MASK ((1U << RUN_BITS) - 1)
330 
331 /*-************************************
332  *  Error detection
333  **************************************/
334 #if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 1)
335 #include <assert.h>
336 #else
337 #ifndef assert
338 #define assert(condition) ((void)0)
339 #endif
340 #endif
341 
342 #define LZ4_STATIC_ASSERT(c)                       \
343   {                                                \
344     enum { LZ4_static_assert = 1 / (int)(!!(c)) }; \
345   } /* use only *after* variable declarations */
346 
347 #if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 2)
348 #include <stdio.h>
349 static int g_debuglog_enable = 1;
350 #define DEBUGLOG(l, ...)                           \
351   {                                                \
352     if ((g_debuglog_enable) && (l <= LZ4_DEBUG)) { \
353       fprintf(stderr, __FILE__ ": ");              \
354       fprintf(stderr, __VA_ARGS__);                \
355       fprintf(stderr, " \n");                      \
356     }                                              \
357   }
358 #else
359 #define DEBUGLOG(l, ...) \
360   {} /* disabled */
361 #endif
362 
363 /*-************************************
364  *  Common functions
365  **************************************/
LZ4_NbCommonBytes(reg_t val)366 static unsigned LZ4_NbCommonBytes(reg_t val) {
367   if (LZ4_isLittleEndian()) {
368     if (sizeof(val) == 8) {
369 #if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
370       unsigned long r = 0;
371       _BitScanForward64(&r, (U64)val);
372       return (int)(r >> 3);
373 #elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 3))) && \
374     !defined(LZ4_FORCE_SW_BITCOUNT)
375       return (__builtin_ctzll((U64)val) >> 3);
376 #else
377       static const int DeBruijnBytePos[64] = {
378           0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5,
379           3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5,
380           3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7};
381       return DeBruijnBytePos[((U64)((val & -(long long)val) *
382                                     0x0218A392CDABBD3FULL)) >>
383                              58];
384 #endif
385     } else /* 32 bits */ {
386 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
387       unsigned long r;
388       _BitScanForward(&r, (U32)val);
389       return (int)(r >> 3);
390 #elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 3))) && \
391     !defined(LZ4_FORCE_SW_BITCOUNT)
392       return (__builtin_ctz((U32)val) >> 3);
393 #else
394       static const int DeBruijnBytePos[32] = {0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2,
395                                               1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2,
396                                               2, 0, 3, 1, 2, 0, 1, 0, 1, 1};
397       return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
398 #endif
399     }
400   } else /* Big Endian CPU */ {
401     if (sizeof(val) == 8) { /* 64-bits */
402 #if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
403       unsigned long r = 0;
404       _BitScanReverse64(&r, val);
405       return (unsigned)(r >> 3);
406 #elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 3))) && \
407     !defined(LZ4_FORCE_SW_BITCOUNT)
408       return (__builtin_clzll((U64)val) >> 3);
409 #else
410       static const U32 by32 = sizeof(val) * 4; /* 32 on 64 bits (goal), 16 on 32
411            bits. Just to avoid some static analyzer complaining about shift by
412            32 on 32-bits target. Note that this code path is never triggered in
413            32-bits mode. */
414       unsigned r;
415       if (!(val >> by32)) {
416         r = 4;
417       } else {
418         r = 0;
419         val >>= by32;
420       }
421       if (!(val >> 16)) {
422         r += 2;
423         val >>= 8;
424       } else {
425         val >>= 24;
426       }
427       r += (!val);
428       return r;
429 #endif
430     } else /* 32 bits */ {
431 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
432       unsigned long r = 0;
433       _BitScanReverse(&r, (unsigned long)val);
434       return (unsigned)(r >> 3);
435 #elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 3))) && \
436     !defined(LZ4_FORCE_SW_BITCOUNT)
437       return (__builtin_clz((U32)val) >> 3);
438 #else
439       unsigned r;
440       if (!(val >> 16)) {
441         r = 2;
442         val >>= 8;
443       } else {
444         r = 0;
445         val >>= 24;
446       }
447       r += (!val);
448       return r;
449 #endif
450     }
451   }
452 }
453 
454 #define STEPSIZE sizeof(reg_t)
455 LZ4_FORCE_INLINE
LZ4_count(const BYTE * pIn,const BYTE * pMatch,const BYTE * pInLimit)456 unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit) {
457   const BYTE* const pStart = pIn;
458 
459   if (likely(pIn < pInLimit - (STEPSIZE - 1))) {
460     reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
461     if (!diff) {
462       pIn += STEPSIZE;
463       pMatch += STEPSIZE;
464     } else {
465       return LZ4_NbCommonBytes(diff);
466     }
467   }
468 
469   while (likely(pIn < pInLimit - (STEPSIZE - 1))) {
470     reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
471     if (!diff) {
472       pIn += STEPSIZE;
473       pMatch += STEPSIZE;
474       continue;
475     }
476     pIn += LZ4_NbCommonBytes(diff);
477     return (unsigned)(pIn - pStart);
478   }
479 
480   if ((STEPSIZE == 8) && (pIn < (pInLimit - 3)) &&
481       (LZ4_read32(pMatch) == LZ4_read32(pIn))) {
482     pIn += 4;
483     pMatch += 4;
484   }
485   if ((pIn < (pInLimit - 1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) {
486     pIn += 2;
487     pMatch += 2;
488   }
489   if ((pIn < pInLimit) && (*pMatch == *pIn)) pIn++;
490   return (unsigned)(pIn - pStart);
491 }
492 
493 #ifndef LZ4_COMMONDEFS_ONLY
494 /*-************************************
495  *  Local Constants
496  **************************************/
497 static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT - 1));
498 static const U32 LZ4_skipTrigger =
499     6; /* Increase this value ==> compression run slower on
500           incompressible data */
501 
502 /*-************************************
503  *  Local Structures and types
504  **************************************/
505 typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
506 typedef enum { byPtr, byU32, byU16 } tableType_t;
507 
508 typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
509 typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
510 
511 typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
512 typedef enum { full = 0, partial = 1 } earlyEnd_directive;
513 
514 /*-************************************
515  *  Local Utils
516  **************************************/
LZ4_versionNumber(void)517 int LZ4_versionNumber(void) { return LZ4_VERSION_NUMBER; }
LZ4_versionString(void)518 const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
LZ4_compressBound(int isize)519 int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
LZ4_sizeofState()520 int LZ4_sizeofState() { return LZ4_STREAMSIZE; }
521 
522 /*-******************************
523  *  Compression functions
524  ********************************/
LZ4_hash4(U32 sequence,tableType_t const tableType)525 static U32 LZ4_hash4(U32 sequence, tableType_t const tableType) {
526   if (tableType == byU16)
527     return ((sequence * 2654435761U) >> ((MINMATCH * 8) - (LZ4_HASHLOG + 1)));
528   else
529     return ((sequence * 2654435761U) >> ((MINMATCH * 8) - LZ4_HASHLOG));
530 }
531 
LZ4_hash5(U64 sequence,tableType_t const tableType)532 static U32 LZ4_hash5(U64 sequence, tableType_t const tableType) {
533   static const U64 prime5bytes = 889523592379ULL;
534   static const U64 prime8bytes = 11400714785074694791ULL;
535   const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG + 1 : LZ4_HASHLOG;
536   if (LZ4_isLittleEndian())
537     return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
538   else
539     return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
540 }
541 
LZ4_hashPosition(const void * const p,tableType_t const tableType)542 LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p,
543                                       tableType_t const tableType) {
544   if ((sizeof(reg_t) == 8) && (tableType != byU16))
545     return LZ4_hash5(LZ4_read_ARCH(p), tableType);
546   return LZ4_hash4(LZ4_read32(p), tableType);
547 }
548 
LZ4_putPositionOnHash(const BYTE * p,U32 h,void * tableBase,tableType_t const tableType,const BYTE * srcBase)549 static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase,
550                                   tableType_t const tableType,
551                                   const BYTE* srcBase) {
552   switch (tableType) {
553     case byPtr: {
554       const BYTE** hashTable = (const BYTE**)tableBase;
555       hashTable[h] = p;
556       return;
557     }
558     case byU32: {
559       U32* hashTable = (U32*)tableBase;
560       hashTable[h] = (U32)(p - srcBase);
561       return;
562     }
563     case byU16: {
564       U16* hashTable = (U16*)tableBase;
565       hashTable[h] = (U16)(p - srcBase);
566       return;
567     }
568   }
569 }
570 
LZ4_putPosition(const BYTE * p,void * tableBase,tableType_t tableType,const BYTE * srcBase)571 LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase,
572                                       tableType_t tableType,
573                                       const BYTE* srcBase) {
574   U32 const h = LZ4_hashPosition(p, tableType);
575   LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
576 }
577 
LZ4_getPositionOnHash(U32 h,void * tableBase,tableType_t tableType,const BYTE * srcBase)578 static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase,
579                                          tableType_t tableType,
580                                          const BYTE* srcBase) {
581   if (tableType == byPtr) {
582     const BYTE** hashTable = (const BYTE**)tableBase;
583     return hashTable[h];
584   }
585   if (tableType == byU32) {
586     const U32* const hashTable = (U32*)tableBase;
587     return hashTable[h] + srcBase;
588   }
589   {
590     const U16* const hashTable = (U16*)tableBase;
591     return hashTable[h] + srcBase;
592   } /* default, to ensure a return */
593 }
594 
LZ4_getPosition(const BYTE * p,void * tableBase,tableType_t tableType,const BYTE * srcBase)595 LZ4_FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase,
596                                              tableType_t tableType,
597                                              const BYTE* srcBase) {
598   U32 const h = LZ4_hashPosition(p, tableType);
599   return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
600 }
601 
602 /** LZ4_compress_generic() :
603     inlined, to ensure branches are decided at compilation time */
LZ4_compress_generic(LZ4_stream_t_internal * const cctx,const char * const source,char * const dest,const int inputSize,const int maxOutputSize,const limitedOutput_directive outputLimited,const tableType_t tableType,const dict_directive dict,const dictIssue_directive dictIssue,const U32 acceleration)604 LZ4_FORCE_INLINE int LZ4_compress_generic(
605     LZ4_stream_t_internal* const cctx, const char* const source,
606     char* const dest, const int inputSize, const int maxOutputSize,
607     const limitedOutput_directive outputLimited, const tableType_t tableType,
608     const dict_directive dict, const dictIssue_directive dictIssue,
609     const U32 acceleration) {
610   const BYTE* ip = (const BYTE*)source;
611   const BYTE* base;
612   const BYTE* lowLimit;
613   const BYTE* const lowRefLimit = ip - cctx->dictSize;
614   const BYTE* const dictionary = cctx->dictionary;
615   const BYTE* const dictEnd = dictionary + cctx->dictSize;
616   const ptrdiff_t dictDelta = dictEnd - (const BYTE*)source;
617   const BYTE* anchor = (const BYTE*)source;
618   const BYTE* const iend = ip + inputSize;
619   const BYTE* const mflimit = iend - MFLIMIT;
620   const BYTE* const matchlimit = iend - LASTLITERALS;
621 
622   BYTE* op = (BYTE*)dest;
623   BYTE* const olimit = op + maxOutputSize;
624 
625   U32 forwardH;
626 
627   /* Init conditions */
628   if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE)
629     return 0; /* Unsupported inputSize, too large (or negative) */
630   switch (dict) {
631     case noDict:
632     default:
633       base = (const BYTE*)source;
634       lowLimit = (const BYTE*)source;
635       break;
636     case withPrefix64k:
637       base = (const BYTE*)source - cctx->currentOffset;
638       lowLimit = (const BYTE*)source - cctx->dictSize;
639       break;
640     case usingExtDict:
641       base = (const BYTE*)source - cctx->currentOffset;
642       lowLimit = (const BYTE*)source;
643       break;
644   }
645   if ((tableType == byU16) && (inputSize >= LZ4_64Klimit))
646     return 0; /* Size too large (not within 64K limit) */
647   if (inputSize < LZ4_minLength)
648     goto _last_literals; /* Input too small, no compression (all literals) */
649 
650   /* First Byte */
651   LZ4_putPosition(ip, cctx->hashTable, tableType, base);
652   ip++;
653   forwardH = LZ4_hashPosition(ip, tableType);
654 
655   /* Main Loop */
656   for (;;) {
657     ptrdiff_t refDelta = 0;
658     const BYTE* match;
659     BYTE* token;
660 
661     /* Find a match */
662     {
663       const BYTE* forwardIp = ip;
664       unsigned step = 1;
665       unsigned searchMatchNb = acceleration << LZ4_skipTrigger;
666       do {
667         U32 const h = forwardH;
668         ip = forwardIp;
669         forwardIp += step;
670         step = (searchMatchNb++ >> LZ4_skipTrigger);
671 
672         if (unlikely(forwardIp > mflimit)) goto _last_literals;
673 
674         match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
675         if (dict == usingExtDict) {
676           if (match < (const BYTE*)source) {
677             refDelta = dictDelta;
678             lowLimit = dictionary;
679           } else {
680             refDelta = 0;
681             lowLimit = (const BYTE*)source;
682           }
683         }
684         forwardH = LZ4_hashPosition(forwardIp, tableType);
685         LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
686 
687       } while (((dictIssue == dictSmall) ? (match < lowRefLimit) : 0) ||
688                ((tableType == byU16) ? 0 : (match + MAX_DISTANCE < ip)) ||
689                (LZ4_read32(match + refDelta) != LZ4_read32(ip)));
690     }
691 
692     /* Catch up */
693     while (((ip > anchor) & (match + refDelta > lowLimit)) &&
694            (unlikely(ip[-1] == match[refDelta - 1]))) {
695       ip--;
696       match--;
697     }
698 
699     /* Encode Literals */
700     {
701       unsigned const litLength = (unsigned)(ip - anchor);
702       token = op++;
703       if ((outputLimited) && /* Check output buffer overflow */
704           (unlikely(op + litLength + (2 + 1 + LASTLITERALS) +
705                         (litLength / 255) >
706                     olimit)))
707         return 0;
708       if (litLength >= RUN_MASK) {
709         int len = (int)litLength - RUN_MASK;
710         *token = (RUN_MASK << ML_BITS);
711         for (; len >= 255; len -= 255) *op++ = 255;
712         *op++ = (BYTE)len;
713       } else
714         *token = (BYTE)(litLength << ML_BITS);
715 
716       /* Copy Literals */
717       LZ4_wildCopy(op, anchor, op + litLength);
718       op += litLength;
719     }
720 
721   _next_match:
722     /* Encode Offset */
723     LZ4_writeLE16(op, (U16)(ip - match));
724     op += 2;
725 
726     /* Encode MatchLength */
727     {
728       unsigned matchCode;
729 
730       if ((dict == usingExtDict) && (lowLimit == dictionary)) {
731         const BYTE* limit;
732         match += refDelta;
733         limit = ip + (dictEnd - match);
734         if (limit > matchlimit) limit = matchlimit;
735         matchCode = LZ4_count(ip + MINMATCH, match + MINMATCH, limit);
736         ip += MINMATCH + matchCode;
737         if (ip == limit) {
738           unsigned const more = LZ4_count(ip, (const BYTE*)source, matchlimit);
739           matchCode += more;
740           ip += more;
741         }
742       } else {
743         matchCode = LZ4_count(ip + MINMATCH, match + MINMATCH, matchlimit);
744         ip += MINMATCH + matchCode;
745       }
746 
747       if (outputLimited && /* Check output buffer overflow */
748           (unlikely(op + (1 + LASTLITERALS) + (matchCode >> 8) > olimit)))
749         return 0;
750       if (matchCode >= ML_MASK) {
751         *token += ML_MASK;
752         matchCode -= ML_MASK;
753         LZ4_write32(op, 0xFFFFFFFF);
754         while (matchCode >= 4 * 255) {
755           op += 4;
756           LZ4_write32(op, 0xFFFFFFFF);
757           matchCode -= 4 * 255;
758         }
759         op += matchCode / 255;
760         *op++ = (BYTE)(matchCode % 255);
761       } else
762         *token += (BYTE)(matchCode);
763     }
764 
765     anchor = ip;
766 
767     /* Test end of chunk */
768     if (ip > mflimit) break;
769 
770     /* Fill table */
771     LZ4_putPosition(ip - 2, cctx->hashTable, tableType, base);
772 
773     /* Test next position */
774     match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
775     if (dict == usingExtDict) {
776       if (match < (const BYTE*)source) {
777         refDelta = dictDelta;
778         lowLimit = dictionary;
779       } else {
780         refDelta = 0;
781         lowLimit = (const BYTE*)source;
782       }
783     }
784     LZ4_putPosition(ip, cctx->hashTable, tableType, base);
785     if (((dictIssue == dictSmall) ? (match >= lowRefLimit) : 1) &&
786         (match + MAX_DISTANCE >= ip) &&
787         (LZ4_read32(match + refDelta) == LZ4_read32(ip))) {
788       token = op++;
789       *token = 0;
790       goto _next_match;
791     }
792 
793     /* Prepare next loop */
794     forwardH = LZ4_hashPosition(++ip, tableType);
795   }
796 
797 _last_literals:
798   /* Encode Last Literals */
799   {
800     size_t const lastRun = (size_t)(iend - anchor);
801     if ((outputLimited) && /* Check output buffer overflow */
802         ((op - (BYTE*)dest) + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) >
803          (U32)maxOutputSize))
804       return 0;
805     if (lastRun >= RUN_MASK) {
806       size_t accumulator = lastRun - RUN_MASK;
807       *op++ = RUN_MASK << ML_BITS;
808       for (; accumulator >= 255; accumulator -= 255) *op++ = 255;
809       *op++ = (BYTE)accumulator;
810     } else {
811       *op++ = (BYTE)(lastRun << ML_BITS);
812     }
813     memcpy(op, anchor, lastRun);
814     op += lastRun;
815   }
816 
817   /* End */
818   return (int)(((char*)op) - dest);
819 }
820 
LZ4_compress_fast_extState(void * state,const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)821 int LZ4_compress_fast_extState(void* state, const char* source, char* dest,
822                                int inputSize, int maxOutputSize,
823                                int acceleration) {
824   LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
825   LZ4_resetStream((LZ4_stream_t*)state);
826   if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
827 
828   if (maxOutputSize >= LZ4_compressBound(inputSize)) {
829     if (inputSize < LZ4_64Klimit)
830       return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited,
831                                   byU16, noDict, noDictIssue, acceleration);
832     else
833       return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited,
834                                   (sizeof(void*) == 8) ? byU32 : byPtr, noDict,
835                                   noDictIssue, acceleration);
836   } else {
837     if (inputSize < LZ4_64Klimit)
838       return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize,
839                                   limitedOutput, byU16, noDict, noDictIssue,
840                                   acceleration);
841     else
842       return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize,
843                                   limitedOutput,
844                                   (sizeof(void*) == 8) ? byU32 : byPtr, noDict,
845                                   noDictIssue, acceleration);
846   }
847 }
848 
LZ4_compress_fast(const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)849 int LZ4_compress_fast(const char* source, char* dest, int inputSize,
850                       int maxOutputSize, int acceleration) {
851 #if (LZ4_HEAPMODE)
852   void* ctxPtr = ALLOCATOR(
853       1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
854 #else
855   LZ4_stream_t ctx;
856   void* const ctxPtr = &ctx;
857 #endif
858 
859   int const result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize,
860                                                 maxOutputSize, acceleration);
861 
862 #if (LZ4_HEAPMODE)
863   FREEMEM(ctxPtr);
864 #endif
865   return result;
866 }
867 
LZ4_compress_default(const char * source,char * dest,int inputSize,int maxOutputSize)868 int LZ4_compress_default(const char* source, char* dest, int inputSize,
869                          int maxOutputSize) {
870   return LZ4_compress_fast(source, dest, inputSize, maxOutputSize, 1);
871 }
872 
873 /* hidden debug function */
874 /* strangely enough, gcc generates faster code when this function is
875  * uncommented, even if unused */
LZ4_compress_fast_force(const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)876 int LZ4_compress_fast_force(const char* source, char* dest, int inputSize,
877                             int maxOutputSize, int acceleration) {
878   LZ4_stream_t ctx;
879   LZ4_resetStream(&ctx);
880 
881   if (inputSize < LZ4_64Klimit)
882     return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize,
883                                 maxOutputSize, limitedOutput, byU16, noDict,
884                                 noDictIssue, acceleration);
885   else
886     return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize,
887                                 maxOutputSize, limitedOutput,
888                                 sizeof(void*) == 8 ? byU32 : byPtr, noDict,
889                                 noDictIssue, acceleration);
890 }
891 
892 /*-******************************
893  *  *_destSize() variant
894  ********************************/
895 
LZ4_compress_destSize_generic(LZ4_stream_t_internal * const ctx,const char * const src,char * const dst,int * const srcSizePtr,const int targetDstSize,const tableType_t tableType)896 static int LZ4_compress_destSize_generic(LZ4_stream_t_internal* const ctx,
897                                          const char* const src, char* const dst,
898                                          int* const srcSizePtr,
899                                          const int targetDstSize,
900                                          const tableType_t tableType) {
901   const BYTE* ip = (const BYTE*)src;
902   const BYTE* base = (const BYTE*)src;
903   const BYTE* lowLimit = (const BYTE*)src;
904   const BYTE* anchor = ip;
905   const BYTE* const iend = ip + *srcSizePtr;
906   const BYTE* const mflimit = iend - MFLIMIT;
907   const BYTE* const matchlimit = iend - LASTLITERALS;
908 
909   BYTE* op = (BYTE*)dst;
910   BYTE* const oend = op + targetDstSize;
911   BYTE* const oMaxLit = op + targetDstSize - 2 /* offset */ -
912                         8 /* because 8+MINMATCH==MFLIMIT */ - 1 /* token */;
913   BYTE* const oMaxMatch = op + targetDstSize - (LASTLITERALS + 1 /* token */);
914   BYTE* const oMaxSeq = oMaxLit - 1 /* token */;
915 
916   U32 forwardH;
917 
918   /* Init conditions */
919   if (targetDstSize < 1) return 0; /* Impossible to store anything */
920   if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE)
921     return 0; /* Unsupported input size, too large (or negative) */
922   if ((tableType == byU16) && (*srcSizePtr >= LZ4_64Klimit))
923     return 0; /* Size too large (not within 64K limit) */
924   if (*srcSizePtr < LZ4_minLength)
925     goto _last_literals; /* Input too small, no compression (all literals) */
926 
927   /* First Byte */
928   *srcSizePtr = 0;
929   LZ4_putPosition(ip, ctx->hashTable, tableType, base);
930   ip++;
931   forwardH = LZ4_hashPosition(ip, tableType);
932 
933   /* Main Loop */
934   for (;;) {
935     const BYTE* match;
936     BYTE* token;
937 
938     /* Find a match */
939     {
940       const BYTE* forwardIp = ip;
941       unsigned step = 1;
942       unsigned searchMatchNb = 1 << LZ4_skipTrigger;
943 
944       do {
945         U32 h = forwardH;
946         ip = forwardIp;
947         forwardIp += step;
948         step = (searchMatchNb++ >> LZ4_skipTrigger);
949 
950         if (unlikely(forwardIp > mflimit)) goto _last_literals;
951 
952         match = LZ4_getPositionOnHash(h, ctx->hashTable, tableType, base);
953         forwardH = LZ4_hashPosition(forwardIp, tableType);
954         LZ4_putPositionOnHash(ip, h, ctx->hashTable, tableType, base);
955 
956       } while (((tableType == byU16) ? 0 : (match + MAX_DISTANCE < ip)) ||
957                (LZ4_read32(match) != LZ4_read32(ip)));
958     }
959 
960     /* Catch up */
961     while ((ip > anchor) && (match > lowLimit) &&
962            (unlikely(ip[-1] == match[-1]))) {
963       ip--;
964       match--;
965     }
966 
967     /* Encode Literal length */
968     {
969       unsigned litLength = (unsigned)(ip - anchor);
970       token = op++;
971       if (op + ((litLength + 240) / 255) + litLength > oMaxLit) {
972         /* Not enough space for a last match */
973         op--;
974         goto _last_literals;
975       }
976       if (litLength >= RUN_MASK) {
977         unsigned len = litLength - RUN_MASK;
978         *token = (RUN_MASK << ML_BITS);
979         for (; len >= 255; len -= 255) *op++ = 255;
980         *op++ = (BYTE)len;
981       } else
982         *token = (BYTE)(litLength << ML_BITS);
983 
984       /* Copy Literals */
985       LZ4_wildCopy(op, anchor, op + litLength);
986       op += litLength;
987     }
988 
989   _next_match:
990     /* Encode Offset */
991     LZ4_writeLE16(op, (U16)(ip - match));
992     op += 2;
993 
994     /* Encode MatchLength */
995     {
996       size_t matchLength =
997           LZ4_count(ip + MINMATCH, match + MINMATCH, matchlimit);
998 
999       if (op + ((matchLength + 240) / 255) > oMaxMatch) {
1000         /* Match description too long : reduce it */
1001         matchLength = (15 - 1) + (oMaxMatch - op) * 255;
1002       }
1003       ip += MINMATCH + matchLength;
1004 
1005       if (matchLength >= ML_MASK) {
1006         *token += ML_MASK;
1007         matchLength -= ML_MASK;
1008         while (matchLength >= 255) {
1009           matchLength -= 255;
1010           *op++ = 255;
1011         }
1012         *op++ = (BYTE)matchLength;
1013       } else
1014         *token += (BYTE)(matchLength);
1015     }
1016 
1017     anchor = ip;
1018 
1019     /* Test end of block */
1020     if (ip > mflimit) break;
1021     if (op > oMaxSeq) break;
1022 
1023     /* Fill table */
1024     LZ4_putPosition(ip - 2, ctx->hashTable, tableType, base);
1025 
1026     /* Test next position */
1027     match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
1028     LZ4_putPosition(ip, ctx->hashTable, tableType, base);
1029     if ((match + MAX_DISTANCE >= ip) && (LZ4_read32(match) == LZ4_read32(ip))) {
1030       token = op++;
1031       *token = 0;
1032       goto _next_match;
1033     }
1034 
1035     /* Prepare next loop */
1036     forwardH = LZ4_hashPosition(++ip, tableType);
1037   }
1038 
1039 _last_literals:
1040   /* Encode Last Literals */
1041   {
1042     size_t lastRunSize = (size_t)(iend - anchor);
1043     if (op + 1 /* token */ + ((lastRunSize + 240) / 255) /* litLength */ +
1044             lastRunSize /* literals */
1045         > oend) {
1046       /* adapt lastRunSize to fill 'dst' */
1047       lastRunSize = (oend - op) - 1;
1048       lastRunSize -= (lastRunSize + 240) / 255;
1049     }
1050     ip = anchor + lastRunSize;
1051 
1052     if (lastRunSize >= RUN_MASK) {
1053       size_t accumulator = lastRunSize - RUN_MASK;
1054       *op++ = RUN_MASK << ML_BITS;
1055       for (; accumulator >= 255; accumulator -= 255) *op++ = 255;
1056       *op++ = (BYTE)accumulator;
1057     } else {
1058       *op++ = (BYTE)(lastRunSize << ML_BITS);
1059     }
1060     memcpy(op, anchor, lastRunSize);
1061     op += lastRunSize;
1062   }
1063 
1064   /* End */
1065   *srcSizePtr = (int)(((const char*)ip) - src);
1066   return (int)(((char*)op) - dst);
1067 }
1068 
LZ4_compress_destSize_extState(LZ4_stream_t * state,const char * src,char * dst,int * srcSizePtr,int targetDstSize)1069 static int LZ4_compress_destSize_extState(LZ4_stream_t* state, const char* src,
1070                                           char* dst, int* srcSizePtr,
1071                                           int targetDstSize) {
1072   LZ4_resetStream(state);
1073 
1074   if (targetDstSize >=
1075       LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */
1076     return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr,
1077                                       targetDstSize, 1);
1078   } else {
1079     if (*srcSizePtr < LZ4_64Klimit)
1080       return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst,
1081                                            srcSizePtr, targetDstSize, byU16);
1082     else
1083       return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst,
1084                                            srcSizePtr, targetDstSize,
1085                                            sizeof(void*) == 8 ? byU32 : byPtr);
1086   }
1087 }
1088 
LZ4_compress_destSize(const char * src,char * dst,int * srcSizePtr,int targetDstSize)1089 int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr,
1090                           int targetDstSize) {
1091 #if (LZ4_HEAPMODE)
1092   LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOCATOR(
1093       1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
1094 #else
1095   LZ4_stream_t ctxBody;
1096   LZ4_stream_t* ctx = &ctxBody;
1097 #endif
1098 
1099   int result =
1100       LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
1101 
1102 #if (LZ4_HEAPMODE)
1103   FREEMEM(ctx);
1104 #endif
1105   return result;
1106 }
1107 
1108 /*-******************************
1109  *  Streaming functions
1110  ********************************/
1111 
LZ4_createStream(void)1112 LZ4_stream_t* LZ4_createStream(void) {
1113   LZ4_stream_t* lz4s = (LZ4_stream_t*)ALLOCATOR(8, LZ4_STREAMSIZE_U64);
1114   LZ4_STATIC_ASSERT(
1115       LZ4_STREAMSIZE >=
1116       sizeof(LZ4_stream_t_internal)); /* A compilation error here means
1117                                          LZ4_STREAMSIZE is not large enough */
1118   LZ4_resetStream(lz4s);
1119   return lz4s;
1120 }
1121 
LZ4_resetStream(LZ4_stream_t * LZ4_stream)1122 void LZ4_resetStream(LZ4_stream_t* LZ4_stream) {
1123   DEBUGLOG(4, "LZ4_resetStream");
1124   MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t));
1125 }
1126 
LZ4_freeStream(LZ4_stream_t * LZ4_stream)1127 int LZ4_freeStream(LZ4_stream_t* LZ4_stream) {
1128   if (!LZ4_stream) return 0; /* support free on NULL */
1129   FREEMEM(LZ4_stream);
1130   return (0);
1131 }
1132 
1133 #define HASH_UNIT sizeof(reg_t)
LZ4_loadDict(LZ4_stream_t * LZ4_dict,const char * dictionary,int dictSize)1134 int LZ4_loadDict(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize) {
1135   LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
1136   const BYTE* p = (const BYTE*)dictionary;
1137   const BYTE* const dictEnd = p + dictSize;
1138   const BYTE* base;
1139 
1140   if ((dict->initCheck) ||
1141       (dict->currentOffset >
1142        1 GB)) /* Uninitialized structure, or reuse overflow */
1143     LZ4_resetStream(LZ4_dict);
1144 
1145   if (dictSize < (int)HASH_UNIT) {
1146     dict->dictionary = NULL;
1147     dict->dictSize = 0;
1148     return 0;
1149   }
1150 
1151   if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
1152   dict->currentOffset += 64 KB;
1153   base = p - dict->currentOffset;
1154   dict->dictionary = p;
1155   dict->dictSize = (U32)(dictEnd - p);
1156   dict->currentOffset += dict->dictSize;
1157 
1158   while (p <= dictEnd - HASH_UNIT) {
1159     LZ4_putPosition(p, dict->hashTable, byU32, base);
1160     p += 3;
1161   }
1162 
1163   return dict->dictSize;
1164 }
1165 
LZ4_renormDictT(LZ4_stream_t_internal * LZ4_dict,const BYTE * src)1166 static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src) {
1167   if ((LZ4_dict->currentOffset > 0x80000000) ||
1168       ((uptrval)LZ4_dict->currentOffset >
1169        (uptrval)src)) { /* address space overflow */
1170     /* rescale hash table */
1171     U32 const delta = LZ4_dict->currentOffset - 64 KB;
1172     const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
1173     int i;
1174     for (i = 0; i < LZ4_HASH_SIZE_U32; i++) {
1175       if (LZ4_dict->hashTable[i] < delta)
1176         LZ4_dict->hashTable[i] = 0;
1177       else
1178         LZ4_dict->hashTable[i] -= delta;
1179     }
1180     LZ4_dict->currentOffset = 64 KB;
1181     if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
1182     LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
1183   }
1184 }
1185 
LZ4_compress_fast_continue(LZ4_stream_t * LZ4_stream,const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)1186 int LZ4_compress_fast_continue(LZ4_stream_t* LZ4_stream, const char* source,
1187                                char* dest, int inputSize, int maxOutputSize,
1188                                int acceleration) {
1189   LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse;
1190   const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
1191 
1192   const BYTE* smallest = (const BYTE*)source;
1193   if (streamPtr->initCheck) return 0; /* Uninitialized structure detected */
1194   if ((streamPtr->dictSize > 0) && (smallest > dictEnd)) smallest = dictEnd;
1195   LZ4_renormDictT(streamPtr, smallest);
1196   if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
1197 
1198   /* Check overlapping input/dictionary space */
1199   {
1200     const BYTE* sourceEnd = (const BYTE*)source + inputSize;
1201     if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) {
1202       streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
1203       if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
1204       if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
1205       streamPtr->dictionary = dictEnd - streamPtr->dictSize;
1206     }
1207   }
1208 
1209   /* prefix mode : source data follows dictionary */
1210   if (dictEnd == (const BYTE*)source) {
1211     int result;
1212     if ((streamPtr->dictSize < 64 KB) &&
1213         (streamPtr->dictSize < streamPtr->currentOffset))
1214       result = LZ4_compress_generic(streamPtr, source, dest, inputSize,
1215                                     maxOutputSize, limitedOutput, byU32,
1216                                     withPrefix64k, dictSmall, acceleration);
1217     else
1218       result = LZ4_compress_generic(streamPtr, source, dest, inputSize,
1219                                     maxOutputSize, limitedOutput, byU32,
1220                                     withPrefix64k, noDictIssue, acceleration);
1221     streamPtr->dictSize += (U32)inputSize;
1222     streamPtr->currentOffset += (U32)inputSize;
1223     return result;
1224   }
1225 
1226   /* external dictionary mode */
1227   {
1228     int result;
1229     if ((streamPtr->dictSize < 64 KB) &&
1230         (streamPtr->dictSize < streamPtr->currentOffset))
1231       result = LZ4_compress_generic(streamPtr, source, dest, inputSize,
1232                                     maxOutputSize, limitedOutput, byU32,
1233                                     usingExtDict, dictSmall, acceleration);
1234     else
1235       result = LZ4_compress_generic(streamPtr, source, dest, inputSize,
1236                                     maxOutputSize, limitedOutput, byU32,
1237                                     usingExtDict, noDictIssue, acceleration);
1238     streamPtr->dictionary = (const BYTE*)source;
1239     streamPtr->dictSize = (U32)inputSize;
1240     streamPtr->currentOffset += (U32)inputSize;
1241     return result;
1242   }
1243 }
1244 
1245 /* Hidden debug function, to force external dictionary mode */
LZ4_compress_forceExtDict(LZ4_stream_t * LZ4_dict,const char * source,char * dest,int inputSize)1246 int LZ4_compress_forceExtDict(LZ4_stream_t* LZ4_dict, const char* source,
1247                               char* dest, int inputSize) {
1248   LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
1249   int result;
1250   const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
1251 
1252   const BYTE* smallest = dictEnd;
1253   if (smallest > (const BYTE*)source) smallest = (const BYTE*)source;
1254   LZ4_renormDictT(streamPtr, smallest);
1255 
1256   result =
1257       LZ4_compress_generic(streamPtr, source, dest, inputSize, 0, notLimited,
1258                            byU32, usingExtDict, noDictIssue, 1);
1259 
1260   streamPtr->dictionary = (const BYTE*)source;
1261   streamPtr->dictSize = (U32)inputSize;
1262   streamPtr->currentOffset += (U32)inputSize;
1263 
1264   return result;
1265 }
1266 
1267 /*! LZ4_saveDict() :
1268  *  If previously compressed data block is not guaranteed to remain available at
1269  * its memory location, save it into a safer place (char* safeBuffer). Note :
1270  * you don't need to call LZ4_loadDict() afterwards, dictionary is immediately
1271  * usable, you can therefore call LZ4_compress_fast_continue(). Return :
1272  * saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.
1273  */
LZ4_saveDict(LZ4_stream_t * LZ4_dict,char * safeBuffer,int dictSize)1274 int LZ4_saveDict(LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize) {
1275   LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
1276   const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
1277 
1278   if ((U32)dictSize > 64 KB)
1279     dictSize = 64 KB; /* useless to define a dictionary > 64 KB */
1280   if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize;
1281 
1282   memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
1283 
1284   dict->dictionary = (const BYTE*)safeBuffer;
1285   dict->dictSize = (U32)dictSize;
1286 
1287   return dictSize;
1288 }
1289 
1290 /*-*****************************
1291  *  Decompression functions
1292  *******************************/
1293 /*! LZ4_decompress_generic() :
1294  *  This generic decompression function covers all use cases.
1295  *  It shall be instantiated several times, using different sets of directives.
1296  *  Note that it is important for performance that this function really get
1297  * inlined, in order to remove useless branches during compilation optimization.
1298  */
1299 LZ4_FORCE_O2_GCC_PPC64LE
LZ4_decompress_generic(const char * const src,char * const dst,int srcSize,int outputSize,int endOnInput,int partialDecoding,int targetOutputSize,int dict,const BYTE * const lowPrefix,const BYTE * const dictStart,const size_t dictSize)1300 LZ4_FORCE_INLINE int LZ4_decompress_generic(
1301     const char* const src, char* const dst, int srcSize,
1302     int outputSize, /* If endOnInput==endOnInputSize, this value is
1303                        `dstCapacity` */
1304 
1305     int endOnInput,              /* endOnOutputSize, endOnInputSize */
1306     int partialDecoding,         /* full, partial */
1307     int targetOutputSize,        /* only used if partialDecoding==partial */
1308     int dict,                    /* noDict, withPrefix64k, usingExtDict */
1309     const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */
1310     const BYTE* const dictStart, /* only if dict==usingExtDict */
1311     const size_t dictSize        /* note : = 0 if noDict */
1312 ) {
1313   const BYTE* ip = (const BYTE*)src;
1314   const BYTE* const iend = ip + srcSize;
1315 
1316   BYTE* op = (BYTE*)dst;
1317   BYTE* const oend = op + outputSize;
1318   BYTE* cpy;
1319   BYTE* oexit = op + targetOutputSize;
1320 
1321   const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize;
1322   const unsigned inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
1323   const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
1324 
1325   const int safeDecode = (endOnInput == endOnInputSize);
1326   const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
1327 
1328   /* Special cases */
1329   if ((partialDecoding) && (oexit > oend - MFLIMIT))
1330     oexit = oend -
1331             MFLIMIT; /* targetOutputSize too high => just decode everything */
1332   if ((endOnInput) && (unlikely(outputSize == 0)))
1333     return ((srcSize == 1) && (*ip == 0)) ? 0 : -1; /* Empty output buffer */
1334   if ((!endOnInput) && (unlikely(outputSize == 0))) return (*ip == 0 ? 1 : -1);
1335 
1336   /* Main Loop : decode sequences */
1337   while (1) {
1338     size_t length;
1339     const BYTE* match;
1340     size_t offset;
1341 
1342     unsigned const token = *ip++;
1343 
1344     /* shortcut for common case :
1345      * in most circumstances, we expect to decode small matches (<= 18 bytes)
1346      * separated by few literals (<= 14 bytes). this shortcut was tested on x86
1347      * and x64, where it improves decoding speed. it has not yet been
1348      * benchmarked on ARM, Power, mips, etc. */
1349     if (((ip + 14 /*maxLL*/ + 2 /*offset*/ <= iend) &
1350          (op + 14 /*maxLL*/ + 18 /*maxML*/ <= oend)) &
1351         ((token < (15 << ML_BITS)) & ((token & ML_MASK) != 15))) {
1352       size_t const ll = token >> ML_BITS;
1353       size_t const off = LZ4_readLE16(ip + ll);
1354       const BYTE* const matchPtr = op + ll - off; /* pointer underflow risk ? */
1355       if ((off >= 18) /* do not deal with overlapping matches */ &
1356           (matchPtr >= lowPrefix)) {
1357         size_t const ml = (token & ML_MASK) + MINMATCH;
1358         memcpy(op, ip, 16);
1359         op += ll;
1360         ip += ll + 2 /*offset*/;
1361         memcpy(op, matchPtr, 18);
1362         op += ml;
1363         continue;
1364       }
1365     }
1366 
1367     /* decode literal length */
1368     if ((length = (token >> ML_BITS)) == RUN_MASK) {
1369       unsigned s;
1370       do {
1371         s = *ip++;
1372         length += s;
1373       } while (likely(endOnInput ? ip < iend - RUN_MASK : 1) & (s == 255));
1374       if ((safeDecode) && unlikely((uptrval)(op) + length < (uptrval)(op)))
1375         goto _output_error; /* overflow detection */
1376       if ((safeDecode) && unlikely((uptrval)(ip) + length < (uptrval)(ip)))
1377         goto _output_error; /* overflow detection */
1378     }
1379 
1380     /* copy literals */
1381     cpy = op + length;
1382     if (((endOnInput) && ((cpy > (partialDecoding ? oexit : oend - MFLIMIT)) ||
1383                           (ip + length > iend - (2 + 1 + LASTLITERALS)))) ||
1384         ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) {
1385       if (partialDecoding) {
1386         if (cpy > oend)
1387           goto _output_error; /* Error : write attempt beyond end of output
1388                                  buffer */
1389         if ((endOnInput) && (ip + length > iend))
1390           goto _output_error; /* Error : read attempt beyond end of input buffer
1391                                */
1392       } else {
1393         if ((!endOnInput) && (cpy != oend))
1394           goto _output_error; /* Error : block decoding must stop exactly there
1395                                */
1396         if ((endOnInput) && ((ip + length != iend) || (cpy > oend)))
1397           goto _output_error; /* Error : input must be consumed */
1398       }
1399       memcpy(op, ip, length);
1400       ip += length;
1401       op += length;
1402       break; /* Necessarily EOF, due to parsing restrictions */
1403     }
1404     LZ4_wildCopy(op, ip, cpy);
1405     ip += length;
1406     op = cpy;
1407 
1408     /* get offset */
1409     offset = LZ4_readLE16(ip);
1410     ip += 2;
1411     match = op - offset;
1412     if ((checkOffset) && (unlikely(match + dictSize < lowPrefix)))
1413       goto _output_error; /* Error : offset outside buffers */
1414     LZ4_write32(
1415         op,
1416         (U32)offset); /* costs ~1%; silence an msan warning when offset==0 */
1417 
1418     /* get matchlength */
1419     length = token & ML_MASK;
1420     if (length == ML_MASK) {
1421       unsigned s;
1422       do {
1423         s = *ip++;
1424         if ((endOnInput) && (ip > iend - LASTLITERALS)) goto _output_error;
1425         length += s;
1426       } while (s == 255);
1427       if ((safeDecode) && unlikely((uptrval)(op) + length < (uptrval)op))
1428         goto _output_error; /* overflow detection */
1429     }
1430     length += MINMATCH;
1431 
1432     /* check external dictionary */
1433     if ((dict == usingExtDict) && (match < lowPrefix)) {
1434       if (unlikely(op + length > oend - LASTLITERALS))
1435         goto _output_error; /* doesn't respect parsing restriction */
1436 
1437       if (length <= (size_t)(lowPrefix - match)) {
1438         /* match can be copied as a single segment from external dictionary */
1439         memmove(op, dictEnd - (lowPrefix - match), length);
1440         op += length;
1441       } else {
1442         /* match encompass external dictionary and current block */
1443         size_t const copySize = (size_t)(lowPrefix - match);
1444         size_t const restSize = length - copySize;
1445         memcpy(op, dictEnd - copySize, copySize);
1446         op += copySize;
1447         if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
1448           BYTE* const endOfMatch = op + restSize;
1449           const BYTE* copyFrom = lowPrefix;
1450           while (op < endOfMatch) *op++ = *copyFrom++;
1451         } else {
1452           memcpy(op, lowPrefix, restSize);
1453           op += restSize;
1454         }
1455       }
1456       continue;
1457     }
1458 
1459     /* copy match within block */
1460     cpy = op + length;
1461     if (unlikely(offset < 8)) {
1462       op[0] = match[0];
1463       op[1] = match[1];
1464       op[2] = match[2];
1465       op[3] = match[3];
1466       match += inc32table[offset];
1467       memcpy(op + 4, match, 4);
1468       match -= dec64table[offset];
1469     } else {
1470       LZ4_copy8(op, match);
1471       match += 8;
1472     }
1473     op += 8;
1474 
1475     if (unlikely(cpy > oend - 12)) {
1476       BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
1477       if (cpy > oend - LASTLITERALS)
1478         goto _output_error; /* Error : last LASTLITERALS bytes must be literals
1479                                (uncompressed) */
1480       if (op < oCopyLimit) {
1481         LZ4_wildCopy(op, match, oCopyLimit);
1482         match += oCopyLimit - op;
1483         op = oCopyLimit;
1484       }
1485       while (op < cpy) *op++ = *match++;
1486     } else {
1487       LZ4_copy8(op, match);
1488       if (length > 16) LZ4_wildCopy(op + 8, match + 8, cpy);
1489     }
1490     op = cpy; /* correction */
1491   }
1492 
1493   /* end of decoding */
1494   if (endOnInput)
1495     return (int)(((char*)op) - dst); /* Nb of output bytes decoded */
1496   else
1497     return (int)(((const char*)ip) - src); /* Nb of input bytes read */
1498 
1499   /* Overflow error detected */
1500 _output_error:
1501   return (int)(-(((const char*)ip) - src)) - 1;
1502 }
1503 
1504 LZ4_FORCE_O2_GCC_PPC64LE
LZ4_decompress_safe(const char * source,char * dest,int compressedSize,int maxDecompressedSize)1505 int LZ4_decompress_safe(const char* source, char* dest, int compressedSize,
1506                         int maxDecompressedSize) {
1507   return LZ4_decompress_generic(source, dest, compressedSize,
1508                                 maxDecompressedSize, endOnInputSize, full, 0,
1509                                 noDict, (BYTE*)dest, NULL, 0);
1510 }
1511 
1512 LZ4_FORCE_O2_GCC_PPC64LE
LZ4_decompress_safe_partial(const char * source,char * dest,int compressedSize,int targetOutputSize,int maxDecompressedSize)1513 int LZ4_decompress_safe_partial(const char* source, char* dest,
1514                                 int compressedSize, int targetOutputSize,
1515                                 int maxDecompressedSize) {
1516   return LZ4_decompress_generic(source, dest, compressedSize,
1517                                 maxDecompressedSize, endOnInputSize, partial,
1518                                 targetOutputSize, noDict, (BYTE*)dest, NULL, 0);
1519 }
1520 
1521 LZ4_FORCE_O2_GCC_PPC64LE
LZ4_decompress_fast(const char * source,char * dest,int originalSize)1522 int LZ4_decompress_fast(const char* source, char* dest, int originalSize) {
1523   return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize,
1524                                 full, 0, withPrefix64k, (BYTE*)(dest - 64 KB),
1525                                 NULL, 64 KB);
1526 }
1527 
1528 /*===== streaming decompression functions =====*/
1529 
LZ4_createStreamDecode(void)1530 LZ4_streamDecode_t* LZ4_createStreamDecode(void) {
1531   LZ4_streamDecode_t* lz4s =
1532       (LZ4_streamDecode_t*)ALLOCATOR(1, sizeof(LZ4_streamDecode_t));
1533   return lz4s;
1534 }
1535 
LZ4_freeStreamDecode(LZ4_streamDecode_t * LZ4_stream)1536 int LZ4_freeStreamDecode(LZ4_streamDecode_t* LZ4_stream) {
1537   if (!LZ4_stream) return 0; /* support free on NULL */
1538   FREEMEM(LZ4_stream);
1539   return 0;
1540 }
1541 
1542 /*!
1543  * LZ4_setStreamDecode() :
1544  * Use this function to instruct where to find the dictionary.
1545  * This function is not necessary if previous data is still available where it
1546  * was decoded. Loading a size of 0 is allowed (same effect as no dictionary).
1547  * Return : 1 if OK, 0 if error
1548  */
LZ4_setStreamDecode(LZ4_streamDecode_t * LZ4_streamDecode,const char * dictionary,int dictSize)1549 int LZ4_setStreamDecode(LZ4_streamDecode_t* LZ4_streamDecode,
1550                         const char* dictionary, int dictSize) {
1551   LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
1552   lz4sd->prefixSize = (size_t)dictSize;
1553   lz4sd->prefixEnd = (const BYTE*)dictionary + dictSize;
1554   lz4sd->externalDict = NULL;
1555   lz4sd->extDictSize = 0;
1556   return 1;
1557 }
1558 
1559 /*
1560 *_continue() :
1561     These decoding functions allow decompression of multiple blocks in
1562 "streaming" mode. Previously decoded blocks must still be available at the
1563 memory position where they were decoded. If it's not possible, save the relevant
1564 part of decoded data into a safe buffer, and indicate where it stands using
1565 LZ4_setStreamDecode()
1566 */
1567 LZ4_FORCE_O2_GCC_PPC64LE
LZ4_decompress_safe_continue(LZ4_streamDecode_t * LZ4_streamDecode,const char * source,char * dest,int compressedSize,int maxOutputSize)1568 int LZ4_decompress_safe_continue(LZ4_streamDecode_t* LZ4_streamDecode,
1569                                  const char* source, char* dest,
1570                                  int compressedSize, int maxOutputSize) {
1571   LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
1572   int result;
1573 
1574   if (lz4sd->prefixEnd == (BYTE*)dest) {
1575     result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1576                                     endOnInputSize, full, 0, usingExtDict,
1577                                     lz4sd->prefixEnd - lz4sd->prefixSize,
1578                                     lz4sd->externalDict, lz4sd->extDictSize);
1579     if (result <= 0) return result;
1580     lz4sd->prefixSize += result;
1581     lz4sd->prefixEnd += result;
1582   } else {
1583     lz4sd->extDictSize = lz4sd->prefixSize;
1584     lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
1585     result = LZ4_decompress_generic(
1586         source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0,
1587         usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
1588     if (result <= 0) return result;
1589     lz4sd->prefixSize = result;
1590     lz4sd->prefixEnd = (BYTE*)dest + result;
1591   }
1592 
1593   return result;
1594 }
1595 
1596 LZ4_FORCE_O2_GCC_PPC64LE
LZ4_decompress_fast_continue(LZ4_streamDecode_t * LZ4_streamDecode,const char * source,char * dest,int originalSize)1597 int LZ4_decompress_fast_continue(LZ4_streamDecode_t* LZ4_streamDecode,
1598                                  const char* source, char* dest,
1599                                  int originalSize) {
1600   LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
1601   int result;
1602 
1603   if (lz4sd->prefixEnd == (BYTE*)dest) {
1604     result = LZ4_decompress_generic(source, dest, 0, originalSize,
1605                                     endOnOutputSize, full, 0, usingExtDict,
1606                                     lz4sd->prefixEnd - lz4sd->prefixSize,
1607                                     lz4sd->externalDict, lz4sd->extDictSize);
1608     if (result <= 0) return result;
1609     lz4sd->prefixSize += originalSize;
1610     lz4sd->prefixEnd += originalSize;
1611   } else {
1612     lz4sd->extDictSize = lz4sd->prefixSize;
1613     lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
1614     result = LZ4_decompress_generic(
1615         source, dest, 0, originalSize, endOnOutputSize, full, 0, usingExtDict,
1616         (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
1617     if (result <= 0) return result;
1618     lz4sd->prefixSize = originalSize;
1619     lz4sd->prefixEnd = (BYTE*)dest + originalSize;
1620   }
1621 
1622   return result;
1623 }
1624 
1625 /*
1626 Advanced decoding functions :
1627 *_usingDict() :
1628     These decoding functions work the same as "_continue" ones,
1629     the dictionary must be explicitly provided within parameters
1630 */
1631 
1632 LZ4_FORCE_O2_GCC_PPC64LE
LZ4_decompress_usingDict_generic(const char * source,char * dest,int compressedSize,int maxOutputSize,int safe,const char * dictStart,int dictSize)1633 LZ4_FORCE_INLINE int LZ4_decompress_usingDict_generic(
1634     const char* source, char* dest, int compressedSize, int maxOutputSize,
1635     int safe, const char* dictStart, int dictSize) {
1636   if (dictSize == 0)
1637     return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1638                                   safe, full, 0, noDict, (BYTE*)dest, NULL, 0);
1639   if (dictStart + dictSize == dest) {
1640     if (dictSize >= (int)(64 KB - 1))
1641       return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1642                                     safe, full, 0, withPrefix64k,
1643                                     (BYTE*)dest - 64 KB, NULL, 0);
1644     return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1645                                   safe, full, 0, noDict, (BYTE*)dest - dictSize,
1646                                   NULL, 0);
1647   }
1648   return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1649                                 safe, full, 0, usingExtDict, (BYTE*)dest,
1650                                 (const BYTE*)dictStart, dictSize);
1651 }
1652 
1653 LZ4_FORCE_O2_GCC_PPC64LE
LZ4_decompress_safe_usingDict(const char * source,char * dest,int compressedSize,int maxOutputSize,const char * dictStart,int dictSize)1654 int LZ4_decompress_safe_usingDict(const char* source, char* dest,
1655                                   int compressedSize, int maxOutputSize,
1656                                   const char* dictStart, int dictSize) {
1657   return LZ4_decompress_usingDict_generic(
1658       source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize);
1659 }
1660 
1661 LZ4_FORCE_O2_GCC_PPC64LE
LZ4_decompress_fast_usingDict(const char * source,char * dest,int originalSize,const char * dictStart,int dictSize)1662 int LZ4_decompress_fast_usingDict(const char* source, char* dest,
1663                                   int originalSize, const char* dictStart,
1664                                   int dictSize) {
1665   return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0,
1666                                           dictStart, dictSize);
1667 }
1668 
1669 /* debug function */
1670 LZ4_FORCE_O2_GCC_PPC64LE
LZ4_decompress_safe_forceExtDict(const char * source,char * dest,int compressedSize,int maxOutputSize,const char * dictStart,int dictSize)1671 int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
1672                                      int compressedSize, int maxOutputSize,
1673                                      const char* dictStart, int dictSize) {
1674   return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1675                                 endOnInputSize, full, 0, usingExtDict,
1676                                 (BYTE*)dest, (const BYTE*)dictStart, dictSize);
1677 }
1678 
1679 /*=*************************************************
1680  *  Obsolete Functions
1681  ***************************************************/
1682 /* obsolete compression functions */
LZ4_compress_limitedOutput(const char * source,char * dest,int inputSize,int maxOutputSize)1683 int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize,
1684                                int maxOutputSize) {
1685   return LZ4_compress_default(source, dest, inputSize, maxOutputSize);
1686 }
LZ4_compress(const char * source,char * dest,int inputSize)1687 int LZ4_compress(const char* source, char* dest, int inputSize) {
1688   return LZ4_compress_default(source, dest, inputSize,
1689                               LZ4_compressBound(inputSize));
1690 }
LZ4_compress_limitedOutput_withState(void * state,const char * src,char * dst,int srcSize,int dstSize)1691 int LZ4_compress_limitedOutput_withState(void* state, const char* src,
1692                                          char* dst, int srcSize, int dstSize) {
1693   return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);
1694 }
LZ4_compress_withState(void * state,const char * src,char * dst,int srcSize)1695 int LZ4_compress_withState(void* state, const char* src, char* dst,
1696                            int srcSize) {
1697   return LZ4_compress_fast_extState(state, src, dst, srcSize,
1698                                     LZ4_compressBound(srcSize), 1);
1699 }
LZ4_compress_limitedOutput_continue(LZ4_stream_t * LZ4_stream,const char * src,char * dst,int srcSize,int maxDstSize)1700 int LZ4_compress_limitedOutput_continue(LZ4_stream_t* LZ4_stream,
1701                                         const char* src, char* dst, int srcSize,
1702                                         int maxDstSize) {
1703   return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, maxDstSize,
1704                                     1);
1705 }
LZ4_compress_continue(LZ4_stream_t * LZ4_stream,const char * source,char * dest,int inputSize)1706 int LZ4_compress_continue(LZ4_stream_t* LZ4_stream, const char* source,
1707                           char* dest, int inputSize) {
1708   return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize,
1709                                     LZ4_compressBound(inputSize), 1);
1710 }
1711 
1712 /*
1713 These function names are deprecated and should no longer be used.
1714 They are only provided here for compatibility with older user programs.
1715 - LZ4_uncompress is totally equivalent to LZ4_decompress_fast
1716 - LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
1717 */
LZ4_uncompress(const char * source,char * dest,int outputSize)1718 int LZ4_uncompress(const char* source, char* dest, int outputSize) {
1719   return LZ4_decompress_fast(source, dest, outputSize);
1720 }
LZ4_uncompress_unknownOutputSize(const char * source,char * dest,int isize,int maxOutputSize)1721 int LZ4_uncompress_unknownOutputSize(const char* source, char* dest, int isize,
1722                                      int maxOutputSize) {
1723   return LZ4_decompress_safe(source, dest, isize, maxOutputSize);
1724 }
1725 
1726 /* Obsolete Streaming functions */
1727 
LZ4_sizeofStreamState()1728 int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; }
1729 
LZ4_init(LZ4_stream_t * lz4ds,BYTE * base)1730 static void LZ4_init(LZ4_stream_t* lz4ds, BYTE* base) {
1731   MEM_INIT(lz4ds, 0, sizeof(LZ4_stream_t));
1732   lz4ds->internal_donotuse.bufferStart = base;
1733 }
1734 
LZ4_resetStreamState(void * state,char * inputBuffer)1735 int LZ4_resetStreamState(void* state, char* inputBuffer) {
1736   if ((((uptrval)state) & 3) != 0)
1737     return 1; /* Error : pointer is not aligned on 4-bytes boundary */
1738   LZ4_init((LZ4_stream_t*)state, (BYTE*)inputBuffer);
1739   return 0;
1740 }
1741 
LZ4_create(char * inputBuffer)1742 void* LZ4_create(char* inputBuffer) {
1743   LZ4_stream_t* lz4ds = (LZ4_stream_t*)ALLOCATOR(8, sizeof(LZ4_stream_t));
1744   LZ4_init(lz4ds, (BYTE*)inputBuffer);
1745   return lz4ds;
1746 }
1747 
LZ4_slideInputBuffer(void * LZ4_Data)1748 char* LZ4_slideInputBuffer(void* LZ4_Data) {
1749   LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)LZ4_Data)->internal_donotuse;
1750   int dictSize =
1751       LZ4_saveDict((LZ4_stream_t*)LZ4_Data, (char*)ctx->bufferStart, 64 KB);
1752   return (char*)(ctx->bufferStart + dictSize);
1753 }
1754 
1755 /* Obsolete streaming decompression functions */
1756 
LZ4_decompress_safe_withPrefix64k(const char * source,char * dest,int compressedSize,int maxOutputSize)1757 int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest,
1758                                       int compressedSize, int maxOutputSize) {
1759   return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1760                                 endOnInputSize, full, 0, withPrefix64k,
1761                                 (BYTE*)dest - 64 KB, NULL, 64 KB);
1762 }
1763 
LZ4_decompress_fast_withPrefix64k(const char * source,char * dest,int originalSize)1764 int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest,
1765                                       int originalSize) {
1766   return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize,
1767                                 full, 0, withPrefix64k, (BYTE*)dest - 64 KB,
1768                                 NULL, 64 KB);
1769 }
1770 
1771 #endif /* LZ4_COMMONDEFS_ONLY */
1772