1*c03c5b1cSMartin Matuska /*
2*c03c5b1cSMartin Matuska  * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
3*c03c5b1cSMartin Matuska  * All rights reserved.
4*c03c5b1cSMartin Matuska  *
5*c03c5b1cSMartin Matuska  * This source code is licensed under both the BSD-style license (found in the
6*c03c5b1cSMartin Matuska  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7*c03c5b1cSMartin Matuska  * in the COPYING file in the root directory of this source tree).
8*c03c5b1cSMartin Matuska  * You may select, at your option, one of the above-listed licenses.
9*c03c5b1cSMartin Matuska  */
10*c03c5b1cSMartin Matuska 
11*c03c5b1cSMartin Matuska /* This header contains definitions
12*c03c5b1cSMartin Matuska  * that shall **only** be used by modules within lib/compress.
13*c03c5b1cSMartin Matuska  */
14*c03c5b1cSMartin Matuska 
15*c03c5b1cSMartin Matuska #ifndef ZSTD_COMPRESS_H
16*c03c5b1cSMartin Matuska #define ZSTD_COMPRESS_H
17*c03c5b1cSMartin Matuska 
18*c03c5b1cSMartin Matuska /*-*************************************
19*c03c5b1cSMartin Matuska *  Dependencies
20*c03c5b1cSMartin Matuska ***************************************/
21*c03c5b1cSMartin Matuska #include "../common/zstd_internal.h"
22*c03c5b1cSMartin Matuska #include "zstd_cwksp.h"
23*c03c5b1cSMartin Matuska #ifdef ZSTD_MULTITHREAD
24*c03c5b1cSMartin Matuska #  include "zstdmt_compress.h"
25*c03c5b1cSMartin Matuska #endif
26*c03c5b1cSMartin Matuska 
27*c03c5b1cSMartin Matuska #if defined (__cplusplus)
28*c03c5b1cSMartin Matuska extern "C" {
29*c03c5b1cSMartin Matuska #endif
30*c03c5b1cSMartin Matuska 
31*c03c5b1cSMartin Matuska 
32*c03c5b1cSMartin Matuska /*-*************************************
33*c03c5b1cSMartin Matuska *  Constants
34*c03c5b1cSMartin Matuska ***************************************/
35*c03c5b1cSMartin Matuska #define kSearchStrength      8
36*c03c5b1cSMartin Matuska #define HASH_READ_SIZE       8
37*c03c5b1cSMartin Matuska #define ZSTD_DUBT_UNSORTED_MARK 1   /* For btlazy2 strategy, index ZSTD_DUBT_UNSORTED_MARK==1 means "unsorted".
38*c03c5b1cSMartin Matuska                                        It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
39*c03c5b1cSMartin Matuska                                        It's not a big deal though : candidate will just be sorted again.
40*c03c5b1cSMartin Matuska                                        Additionally, candidate position 1 will be lost.
41*c03c5b1cSMartin Matuska                                        But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
42*c03c5b1cSMartin Matuska                                        The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy.
43*c03c5b1cSMartin Matuska                                        This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
44*c03c5b1cSMartin Matuska 
45*c03c5b1cSMartin Matuska 
46*c03c5b1cSMartin Matuska /*-*************************************
47*c03c5b1cSMartin Matuska *  Context memory management
48*c03c5b1cSMartin Matuska ***************************************/
49*c03c5b1cSMartin Matuska typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
50*c03c5b1cSMartin Matuska typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage;
51*c03c5b1cSMartin Matuska 
52*c03c5b1cSMartin Matuska typedef struct ZSTD_prefixDict_s {
53*c03c5b1cSMartin Matuska     const void* dict;
54*c03c5b1cSMartin Matuska     size_t dictSize;
55*c03c5b1cSMartin Matuska     ZSTD_dictContentType_e dictContentType;
56*c03c5b1cSMartin Matuska } ZSTD_prefixDict;
57*c03c5b1cSMartin Matuska 
58*c03c5b1cSMartin Matuska typedef struct {
59*c03c5b1cSMartin Matuska     void* dictBuffer;
60*c03c5b1cSMartin Matuska     void const* dict;
61*c03c5b1cSMartin Matuska     size_t dictSize;
62*c03c5b1cSMartin Matuska     ZSTD_dictContentType_e dictContentType;
63*c03c5b1cSMartin Matuska     ZSTD_CDict* cdict;
64*c03c5b1cSMartin Matuska } ZSTD_localDict;
65*c03c5b1cSMartin Matuska 
66*c03c5b1cSMartin Matuska typedef struct {
67*c03c5b1cSMartin Matuska     U32 CTable[HUF_CTABLE_SIZE_U32(255)];
68*c03c5b1cSMartin Matuska     HUF_repeat repeatMode;
69*c03c5b1cSMartin Matuska } ZSTD_hufCTables_t;
70*c03c5b1cSMartin Matuska 
71*c03c5b1cSMartin Matuska typedef struct {
72*c03c5b1cSMartin Matuska     FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
73*c03c5b1cSMartin Matuska     FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
74*c03c5b1cSMartin Matuska     FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
75*c03c5b1cSMartin Matuska     FSE_repeat offcode_repeatMode;
76*c03c5b1cSMartin Matuska     FSE_repeat matchlength_repeatMode;
77*c03c5b1cSMartin Matuska     FSE_repeat litlength_repeatMode;
78*c03c5b1cSMartin Matuska } ZSTD_fseCTables_t;
79*c03c5b1cSMartin Matuska 
80*c03c5b1cSMartin Matuska typedef struct {
81*c03c5b1cSMartin Matuska     ZSTD_hufCTables_t huf;
82*c03c5b1cSMartin Matuska     ZSTD_fseCTables_t fse;
83*c03c5b1cSMartin Matuska } ZSTD_entropyCTables_t;
84*c03c5b1cSMartin Matuska 
85*c03c5b1cSMartin Matuska typedef struct {
86*c03c5b1cSMartin Matuska     U32 off;
87*c03c5b1cSMartin Matuska     U32 len;
88*c03c5b1cSMartin Matuska } ZSTD_match_t;
89*c03c5b1cSMartin Matuska 
90*c03c5b1cSMartin Matuska typedef struct {
91*c03c5b1cSMartin Matuska     int price;
92*c03c5b1cSMartin Matuska     U32 off;
93*c03c5b1cSMartin Matuska     U32 mlen;
94*c03c5b1cSMartin Matuska     U32 litlen;
95*c03c5b1cSMartin Matuska     U32 rep[ZSTD_REP_NUM];
96*c03c5b1cSMartin Matuska } ZSTD_optimal_t;
97*c03c5b1cSMartin Matuska 
98*c03c5b1cSMartin Matuska typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e;
99*c03c5b1cSMartin Matuska 
100*c03c5b1cSMartin Matuska typedef struct {
101*c03c5b1cSMartin Matuska     /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
102*c03c5b1cSMartin Matuska     unsigned* litFreq;           /* table of literals statistics, of size 256 */
103*c03c5b1cSMartin Matuska     unsigned* litLengthFreq;     /* table of litLength statistics, of size (MaxLL+1) */
104*c03c5b1cSMartin Matuska     unsigned* matchLengthFreq;   /* table of matchLength statistics, of size (MaxML+1) */
105*c03c5b1cSMartin Matuska     unsigned* offCodeFreq;       /* table of offCode statistics, of size (MaxOff+1) */
106*c03c5b1cSMartin Matuska     ZSTD_match_t* matchTable;    /* list of found matches, of size ZSTD_OPT_NUM+1 */
107*c03c5b1cSMartin Matuska     ZSTD_optimal_t* priceTable;  /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
108*c03c5b1cSMartin Matuska 
109*c03c5b1cSMartin Matuska     U32  litSum;                 /* nb of literals */
110*c03c5b1cSMartin Matuska     U32  litLengthSum;           /* nb of litLength codes */
111*c03c5b1cSMartin Matuska     U32  matchLengthSum;         /* nb of matchLength codes */
112*c03c5b1cSMartin Matuska     U32  offCodeSum;             /* nb of offset codes */
113*c03c5b1cSMartin Matuska     U32  litSumBasePrice;        /* to compare to log2(litfreq) */
114*c03c5b1cSMartin Matuska     U32  litLengthSumBasePrice;  /* to compare to log2(llfreq)  */
115*c03c5b1cSMartin Matuska     U32  matchLengthSumBasePrice;/* to compare to log2(mlfreq)  */
116*c03c5b1cSMartin Matuska     U32  offCodeSumBasePrice;    /* to compare to log2(offreq)  */
117*c03c5b1cSMartin Matuska     ZSTD_OptPrice_e priceType;   /* prices can be determined dynamically, or follow a pre-defined cost structure */
118*c03c5b1cSMartin Matuska     const ZSTD_entropyCTables_t* symbolCosts;  /* pre-calculated dictionary statistics */
119*c03c5b1cSMartin Matuska     ZSTD_literalCompressionMode_e literalCompressionMode;
120*c03c5b1cSMartin Matuska } optState_t;
121*c03c5b1cSMartin Matuska 
122*c03c5b1cSMartin Matuska typedef struct {
123*c03c5b1cSMartin Matuska   ZSTD_entropyCTables_t entropy;
124*c03c5b1cSMartin Matuska   U32 rep[ZSTD_REP_NUM];
125*c03c5b1cSMartin Matuska } ZSTD_compressedBlockState_t;
126*c03c5b1cSMartin Matuska 
127*c03c5b1cSMartin Matuska typedef struct {
128*c03c5b1cSMartin Matuska     BYTE const* nextSrc;    /* next block here to continue on current prefix */
129*c03c5b1cSMartin Matuska     BYTE const* base;       /* All regular indexes relative to this position */
130*c03c5b1cSMartin Matuska     BYTE const* dictBase;   /* extDict indexes relative to this position */
131*c03c5b1cSMartin Matuska     U32 dictLimit;          /* below that point, need extDict */
132*c03c5b1cSMartin Matuska     U32 lowLimit;           /* below that point, no more valid data */
133*c03c5b1cSMartin Matuska } ZSTD_window_t;
134*c03c5b1cSMartin Matuska 
135*c03c5b1cSMartin Matuska typedef struct ZSTD_matchState_t ZSTD_matchState_t;
136*c03c5b1cSMartin Matuska struct ZSTD_matchState_t {
137*c03c5b1cSMartin Matuska     ZSTD_window_t window;   /* State for window round buffer management */
138*c03c5b1cSMartin Matuska     U32 loadedDictEnd;      /* index of end of dictionary, within context's referential.
139*c03c5b1cSMartin Matuska                              * When loadedDictEnd != 0, a dictionary is in use, and still valid.
140*c03c5b1cSMartin Matuska                              * This relies on a mechanism to set loadedDictEnd=0 when dictionary is no longer within distance.
141*c03c5b1cSMartin Matuska                              * Such mechanism is provided within ZSTD_window_enforceMaxDist() and ZSTD_checkDictValidity().
142*c03c5b1cSMartin Matuska                              * When dict referential is copied into active context (i.e. not attached),
143*c03c5b1cSMartin Matuska                              * loadedDictEnd == dictSize, since referential starts from zero.
144*c03c5b1cSMartin Matuska                              */
145*c03c5b1cSMartin Matuska     U32 nextToUpdate;       /* index from which to continue table update */
146*c03c5b1cSMartin Matuska     U32 hashLog3;           /* dispatch table for matches of len==3 : larger == faster, more memory */
147*c03c5b1cSMartin Matuska     U32* hashTable;
148*c03c5b1cSMartin Matuska     U32* hashTable3;
149*c03c5b1cSMartin Matuska     U32* chainTable;
150*c03c5b1cSMartin Matuska     optState_t opt;         /* optimal parser state */
151*c03c5b1cSMartin Matuska     const ZSTD_matchState_t* dictMatchState;
152*c03c5b1cSMartin Matuska     ZSTD_compressionParameters cParams;
153*c03c5b1cSMartin Matuska };
154*c03c5b1cSMartin Matuska 
155*c03c5b1cSMartin Matuska typedef struct {
156*c03c5b1cSMartin Matuska     ZSTD_compressedBlockState_t* prevCBlock;
157*c03c5b1cSMartin Matuska     ZSTD_compressedBlockState_t* nextCBlock;
158*c03c5b1cSMartin Matuska     ZSTD_matchState_t matchState;
159*c03c5b1cSMartin Matuska } ZSTD_blockState_t;
160*c03c5b1cSMartin Matuska 
161*c03c5b1cSMartin Matuska typedef struct {
162*c03c5b1cSMartin Matuska     U32 offset;
163*c03c5b1cSMartin Matuska     U32 checksum;
164*c03c5b1cSMartin Matuska } ldmEntry_t;
165*c03c5b1cSMartin Matuska 
166*c03c5b1cSMartin Matuska typedef struct {
167*c03c5b1cSMartin Matuska     ZSTD_window_t window;   /* State for the window round buffer management */
168*c03c5b1cSMartin Matuska     ldmEntry_t* hashTable;
169*c03c5b1cSMartin Matuska     U32 loadedDictEnd;
170*c03c5b1cSMartin Matuska     BYTE* bucketOffsets;    /* Next position in bucket to insert entry */
171*c03c5b1cSMartin Matuska     U64 hashPower;          /* Used to compute the rolling hash.
172*c03c5b1cSMartin Matuska                              * Depends on ldmParams.minMatchLength */
173*c03c5b1cSMartin Matuska } ldmState_t;
174*c03c5b1cSMartin Matuska 
175*c03c5b1cSMartin Matuska typedef struct {
176*c03c5b1cSMartin Matuska     U32 enableLdm;          /* 1 if enable long distance matching */
177*c03c5b1cSMartin Matuska     U32 hashLog;            /* Log size of hashTable */
178*c03c5b1cSMartin Matuska     U32 bucketSizeLog;      /* Log bucket size for collision resolution, at most 8 */
179*c03c5b1cSMartin Matuska     U32 minMatchLength;     /* Minimum match length */
180*c03c5b1cSMartin Matuska     U32 hashRateLog;       /* Log number of entries to skip */
181*c03c5b1cSMartin Matuska     U32 windowLog;          /* Window log for the LDM */
182*c03c5b1cSMartin Matuska } ldmParams_t;
183*c03c5b1cSMartin Matuska 
184*c03c5b1cSMartin Matuska typedef struct {
185*c03c5b1cSMartin Matuska     U32 offset;
186*c03c5b1cSMartin Matuska     U32 litLength;
187*c03c5b1cSMartin Matuska     U32 matchLength;
188*c03c5b1cSMartin Matuska } rawSeq;
189*c03c5b1cSMartin Matuska 
190*c03c5b1cSMartin Matuska typedef struct {
191*c03c5b1cSMartin Matuska   rawSeq* seq;     /* The start of the sequences */
192*c03c5b1cSMartin Matuska   size_t pos;      /* The position where reading stopped. <= size. */
193*c03c5b1cSMartin Matuska   size_t size;     /* The number of sequences. <= capacity. */
194*c03c5b1cSMartin Matuska   size_t capacity; /* The capacity starting from `seq` pointer */
195*c03c5b1cSMartin Matuska } rawSeqStore_t;
196*c03c5b1cSMartin Matuska 
197*c03c5b1cSMartin Matuska typedef struct {
198*c03c5b1cSMartin Matuska     int collectSequences;
199*c03c5b1cSMartin Matuska     ZSTD_Sequence* seqStart;
200*c03c5b1cSMartin Matuska     size_t seqIndex;
201*c03c5b1cSMartin Matuska     size_t maxSequences;
202*c03c5b1cSMartin Matuska } SeqCollector;
203*c03c5b1cSMartin Matuska 
204*c03c5b1cSMartin Matuska struct ZSTD_CCtx_params_s {
205*c03c5b1cSMartin Matuska     ZSTD_format_e format;
206*c03c5b1cSMartin Matuska     ZSTD_compressionParameters cParams;
207*c03c5b1cSMartin Matuska     ZSTD_frameParameters fParams;
208*c03c5b1cSMartin Matuska 
209*c03c5b1cSMartin Matuska     int compressionLevel;
210*c03c5b1cSMartin Matuska     int forceWindow;           /* force back-references to respect limit of
211*c03c5b1cSMartin Matuska                                 * 1<<wLog, even for dictionary */
212*c03c5b1cSMartin Matuska     size_t targetCBlockSize;   /* Tries to fit compressed block size to be around targetCBlockSize.
213*c03c5b1cSMartin Matuska                                 * No target when targetCBlockSize == 0.
214*c03c5b1cSMartin Matuska                                 * There is no guarantee on compressed block size */
215*c03c5b1cSMartin Matuska     int srcSizeHint;           /* User's best guess of source size.
216*c03c5b1cSMartin Matuska                                 * Hint is not valid when srcSizeHint == 0.
217*c03c5b1cSMartin Matuska                                 * There is no guarantee that hint is close to actual source size */
218*c03c5b1cSMartin Matuska 
219*c03c5b1cSMartin Matuska     ZSTD_dictAttachPref_e attachDictPref;
220*c03c5b1cSMartin Matuska     ZSTD_literalCompressionMode_e literalCompressionMode;
221*c03c5b1cSMartin Matuska 
222*c03c5b1cSMartin Matuska     /* Multithreading: used to pass parameters to mtctx */
223*c03c5b1cSMartin Matuska     int nbWorkers;
224*c03c5b1cSMartin Matuska     size_t jobSize;
225*c03c5b1cSMartin Matuska     int overlapLog;
226*c03c5b1cSMartin Matuska     int rsyncable;
227*c03c5b1cSMartin Matuska 
228*c03c5b1cSMartin Matuska     /* Long distance matching parameters */
229*c03c5b1cSMartin Matuska     ldmParams_t ldmParams;
230*c03c5b1cSMartin Matuska 
231*c03c5b1cSMartin Matuska     /* Internal use, for createCCtxParams() and freeCCtxParams() only */
232*c03c5b1cSMartin Matuska     ZSTD_customMem customMem;
233*c03c5b1cSMartin Matuska };  /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
234*c03c5b1cSMartin Matuska 
235*c03c5b1cSMartin Matuska struct ZSTD_CCtx_s {
236*c03c5b1cSMartin Matuska     ZSTD_compressionStage_e stage;
237*c03c5b1cSMartin Matuska     int cParamsChanged;                  /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
238*c03c5b1cSMartin Matuska     int bmi2;                            /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
239*c03c5b1cSMartin Matuska     ZSTD_CCtx_params requestedParams;
240*c03c5b1cSMartin Matuska     ZSTD_CCtx_params appliedParams;
241*c03c5b1cSMartin Matuska     U32   dictID;
242*c03c5b1cSMartin Matuska 
243*c03c5b1cSMartin Matuska     ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */
244*c03c5b1cSMartin Matuska     size_t blockSize;
245*c03c5b1cSMartin Matuska     unsigned long long pledgedSrcSizePlusOne;  /* this way, 0 (default) == unknown */
246*c03c5b1cSMartin Matuska     unsigned long long consumedSrcSize;
247*c03c5b1cSMartin Matuska     unsigned long long producedCSize;
248*c03c5b1cSMartin Matuska     XXH64_state_t xxhState;
249*c03c5b1cSMartin Matuska     ZSTD_customMem customMem;
250*c03c5b1cSMartin Matuska     size_t staticSize;
251*c03c5b1cSMartin Matuska     SeqCollector seqCollector;
252*c03c5b1cSMartin Matuska     int isFirstBlock;
253*c03c5b1cSMartin Matuska     int initialized;
254*c03c5b1cSMartin Matuska 
255*c03c5b1cSMartin Matuska     seqStore_t seqStore;      /* sequences storage ptrs */
256*c03c5b1cSMartin Matuska     ldmState_t ldmState;      /* long distance matching state */
257*c03c5b1cSMartin Matuska     rawSeq* ldmSequences;     /* Storage for the ldm output sequences */
258*c03c5b1cSMartin Matuska     size_t maxNbLdmSequences;
259*c03c5b1cSMartin Matuska     rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
260*c03c5b1cSMartin Matuska     ZSTD_blockState_t blockState;
261*c03c5b1cSMartin Matuska     U32* entropyWorkspace;  /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
262*c03c5b1cSMartin Matuska 
263*c03c5b1cSMartin Matuska     /* streaming */
264*c03c5b1cSMartin Matuska     char*  inBuff;
265*c03c5b1cSMartin Matuska     size_t inBuffSize;
266*c03c5b1cSMartin Matuska     size_t inToCompress;
267*c03c5b1cSMartin Matuska     size_t inBuffPos;
268*c03c5b1cSMartin Matuska     size_t inBuffTarget;
269*c03c5b1cSMartin Matuska     char*  outBuff;
270*c03c5b1cSMartin Matuska     size_t outBuffSize;
271*c03c5b1cSMartin Matuska     size_t outBuffContentSize;
272*c03c5b1cSMartin Matuska     size_t outBuffFlushedSize;
273*c03c5b1cSMartin Matuska     ZSTD_cStreamStage streamStage;
274*c03c5b1cSMartin Matuska     U32    frameEnded;
275*c03c5b1cSMartin Matuska 
276*c03c5b1cSMartin Matuska     /* Dictionary */
277*c03c5b1cSMartin Matuska     ZSTD_localDict localDict;
278*c03c5b1cSMartin Matuska     const ZSTD_CDict* cdict;
279*c03c5b1cSMartin Matuska     ZSTD_prefixDict prefixDict;   /* single-usage dictionary */
280*c03c5b1cSMartin Matuska 
281*c03c5b1cSMartin Matuska     /* Multi-threading */
282*c03c5b1cSMartin Matuska #ifdef ZSTD_MULTITHREAD
283*c03c5b1cSMartin Matuska     ZSTDMT_CCtx* mtctx;
284*c03c5b1cSMartin Matuska #endif
285*c03c5b1cSMartin Matuska };
286*c03c5b1cSMartin Matuska 
287*c03c5b1cSMartin Matuska typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
288*c03c5b1cSMartin Matuska 
289*c03c5b1cSMartin Matuska typedef enum { ZSTD_noDict = 0, ZSTD_extDict = 1, ZSTD_dictMatchState = 2 } ZSTD_dictMode_e;
290*c03c5b1cSMartin Matuska 
291*c03c5b1cSMartin Matuska 
292*c03c5b1cSMartin Matuska typedef size_t (*ZSTD_blockCompressor) (
293*c03c5b1cSMartin Matuska         ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
294*c03c5b1cSMartin Matuska         void const* src, size_t srcSize);
295*c03c5b1cSMartin Matuska ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode);
296*c03c5b1cSMartin Matuska 
297*c03c5b1cSMartin Matuska 
ZSTD_LLcode(U32 litLength)298*c03c5b1cSMartin Matuska MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
299*c03c5b1cSMartin Matuska {
300*c03c5b1cSMartin Matuska     static const BYTE LL_Code[64] = {  0,  1,  2,  3,  4,  5,  6,  7,
301*c03c5b1cSMartin Matuska                                        8,  9, 10, 11, 12, 13, 14, 15,
302*c03c5b1cSMartin Matuska                                       16, 16, 17, 17, 18, 18, 19, 19,
303*c03c5b1cSMartin Matuska                                       20, 20, 20, 20, 21, 21, 21, 21,
304*c03c5b1cSMartin Matuska                                       22, 22, 22, 22, 22, 22, 22, 22,
305*c03c5b1cSMartin Matuska                                       23, 23, 23, 23, 23, 23, 23, 23,
306*c03c5b1cSMartin Matuska                                       24, 24, 24, 24, 24, 24, 24, 24,
307*c03c5b1cSMartin Matuska                                       24, 24, 24, 24, 24, 24, 24, 24 };
308*c03c5b1cSMartin Matuska     static const U32 LL_deltaCode = 19;
309*c03c5b1cSMartin Matuska     return (litLength > 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
310*c03c5b1cSMartin Matuska }
311*c03c5b1cSMartin Matuska 
312*c03c5b1cSMartin Matuska /* ZSTD_MLcode() :
313*c03c5b1cSMartin Matuska  * note : mlBase = matchLength - MINMATCH;
314*c03c5b1cSMartin Matuska  *        because it's the format it's stored in seqStore->sequences */
ZSTD_MLcode(U32 mlBase)315*c03c5b1cSMartin Matuska MEM_STATIC U32 ZSTD_MLcode(U32 mlBase)
316*c03c5b1cSMartin Matuska {
317*c03c5b1cSMartin Matuska     static const BYTE ML_Code[128] = { 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
318*c03c5b1cSMartin Matuska                                       16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
319*c03c5b1cSMartin Matuska                                       32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
320*c03c5b1cSMartin Matuska                                       38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
321*c03c5b1cSMartin Matuska                                       40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
322*c03c5b1cSMartin Matuska                                       41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
323*c03c5b1cSMartin Matuska                                       42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
324*c03c5b1cSMartin Matuska                                       42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
325*c03c5b1cSMartin Matuska     static const U32 ML_deltaCode = 36;
326*c03c5b1cSMartin Matuska     return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
327*c03c5b1cSMartin Matuska }
328*c03c5b1cSMartin Matuska 
329*c03c5b1cSMartin Matuska typedef struct repcodes_s {
330*c03c5b1cSMartin Matuska     U32 rep[3];
331*c03c5b1cSMartin Matuska } repcodes_t;
332*c03c5b1cSMartin Matuska 
ZSTD_updateRep(U32 const rep[3],U32 const offset,U32 const ll0)333*c03c5b1cSMartin Matuska MEM_STATIC repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
334*c03c5b1cSMartin Matuska {
335*c03c5b1cSMartin Matuska     repcodes_t newReps;
336*c03c5b1cSMartin Matuska     if (offset >= ZSTD_REP_NUM) {  /* full offset */
337*c03c5b1cSMartin Matuska         newReps.rep[2] = rep[1];
338*c03c5b1cSMartin Matuska         newReps.rep[1] = rep[0];
339*c03c5b1cSMartin Matuska         newReps.rep[0] = offset - ZSTD_REP_MOVE;
340*c03c5b1cSMartin Matuska     } else {   /* repcode */
341*c03c5b1cSMartin Matuska         U32 const repCode = offset + ll0;
342*c03c5b1cSMartin Matuska         if (repCode > 0) {  /* note : if repCode==0, no change */
343*c03c5b1cSMartin Matuska             U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
344*c03c5b1cSMartin Matuska             newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2];
345*c03c5b1cSMartin Matuska             newReps.rep[1] = rep[0];
346*c03c5b1cSMartin Matuska             newReps.rep[0] = currentOffset;
347*c03c5b1cSMartin Matuska         } else {   /* repCode == 0 */
348*c03c5b1cSMartin Matuska             memcpy(&newReps, rep, sizeof(newReps));
349*c03c5b1cSMartin Matuska         }
350*c03c5b1cSMartin Matuska     }
351*c03c5b1cSMartin Matuska     return newReps;
352*c03c5b1cSMartin Matuska }
353*c03c5b1cSMartin Matuska 
354*c03c5b1cSMartin Matuska /* ZSTD_cParam_withinBounds:
355*c03c5b1cSMartin Matuska  * @return 1 if value is within cParam bounds,
356*c03c5b1cSMartin Matuska  * 0 otherwise */
ZSTD_cParam_withinBounds(ZSTD_cParameter cParam,int value)357*c03c5b1cSMartin Matuska MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
358*c03c5b1cSMartin Matuska {
359*c03c5b1cSMartin Matuska     ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
360*c03c5b1cSMartin Matuska     if (ZSTD_isError(bounds.error)) return 0;
361*c03c5b1cSMartin Matuska     if (value < bounds.lowerBound) return 0;
362*c03c5b1cSMartin Matuska     if (value > bounds.upperBound) return 0;
363*c03c5b1cSMartin Matuska     return 1;
364*c03c5b1cSMartin Matuska }
365*c03c5b1cSMartin Matuska 
366*c03c5b1cSMartin Matuska /* ZSTD_noCompressBlock() :
367*c03c5b1cSMartin Matuska  * Writes uncompressed block to dst buffer from given src.
368*c03c5b1cSMartin Matuska  * Returns the size of the block */
ZSTD_noCompressBlock(void * dst,size_t dstCapacity,const void * src,size_t srcSize,U32 lastBlock)369*c03c5b1cSMartin Matuska MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
370*c03c5b1cSMartin Matuska {
371*c03c5b1cSMartin Matuska     U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
372*c03c5b1cSMartin Matuska     RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
373*c03c5b1cSMartin Matuska                     dstSize_tooSmall, "dst buf too small for uncompressed block");
374*c03c5b1cSMartin Matuska     MEM_writeLE24(dst, cBlockHeader24);
375*c03c5b1cSMartin Matuska     memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
376*c03c5b1cSMartin Matuska     return ZSTD_blockHeaderSize + srcSize;
377*c03c5b1cSMartin Matuska }
378*c03c5b1cSMartin Matuska 
ZSTD_rleCompressBlock(void * dst,size_t dstCapacity,BYTE src,size_t srcSize,U32 lastBlock)379*c03c5b1cSMartin Matuska MEM_STATIC size_t ZSTD_rleCompressBlock (void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock)
380*c03c5b1cSMartin Matuska {
381*c03c5b1cSMartin Matuska     BYTE* const op = (BYTE*)dst;
382*c03c5b1cSMartin Matuska     U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3);
383*c03c5b1cSMartin Matuska     RETURN_ERROR_IF(dstCapacity < 4, dstSize_tooSmall, "");
384*c03c5b1cSMartin Matuska     MEM_writeLE24(op, cBlockHeader);
385*c03c5b1cSMartin Matuska     op[3] = src;
386*c03c5b1cSMartin Matuska     return 4;
387*c03c5b1cSMartin Matuska }
388*c03c5b1cSMartin Matuska 
389*c03c5b1cSMartin Matuska 
390*c03c5b1cSMartin Matuska /* ZSTD_minGain() :
391*c03c5b1cSMartin Matuska  * minimum compression required
392*c03c5b1cSMartin Matuska  * to generate a compress block or a compressed literals section.
393*c03c5b1cSMartin Matuska  * note : use same formula for both situations */
ZSTD_minGain(size_t srcSize,ZSTD_strategy strat)394*c03c5b1cSMartin Matuska MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
395*c03c5b1cSMartin Matuska {
396*c03c5b1cSMartin Matuska     U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
397*c03c5b1cSMartin Matuska     ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
398*c03c5b1cSMartin Matuska     assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
399*c03c5b1cSMartin Matuska     return (srcSize >> minlog) + 2;
400*c03c5b1cSMartin Matuska }
401*c03c5b1cSMartin Matuska 
ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params * cctxParams)402*c03c5b1cSMartin Matuska MEM_STATIC int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams)
403*c03c5b1cSMartin Matuska {
404*c03c5b1cSMartin Matuska     switch (cctxParams->literalCompressionMode) {
405*c03c5b1cSMartin Matuska     case ZSTD_lcm_huffman:
406*c03c5b1cSMartin Matuska         return 0;
407*c03c5b1cSMartin Matuska     case ZSTD_lcm_uncompressed:
408*c03c5b1cSMartin Matuska         return 1;
409*c03c5b1cSMartin Matuska     default:
410*c03c5b1cSMartin Matuska         assert(0 /* impossible: pre-validated */);
411*c03c5b1cSMartin Matuska         /* fall-through */
412*c03c5b1cSMartin Matuska     case ZSTD_lcm_auto:
413*c03c5b1cSMartin Matuska         return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
414*c03c5b1cSMartin Matuska     }
415*c03c5b1cSMartin Matuska }
416*c03c5b1cSMartin Matuska 
417*c03c5b1cSMartin Matuska /*! ZSTD_safecopyLiterals() :
418*c03c5b1cSMartin Matuska  *  memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w.
419*c03c5b1cSMartin Matuska  *  Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single
420*c03c5b1cSMartin Matuska  *  large copies.
421*c03c5b1cSMartin Matuska  */
ZSTD_safecopyLiterals(BYTE * op,BYTE const * ip,BYTE const * const iend,BYTE const * ilimit_w)422*c03c5b1cSMartin Matuska static void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) {
423*c03c5b1cSMartin Matuska     assert(iend > ilimit_w);
424*c03c5b1cSMartin Matuska     if (ip <= ilimit_w) {
425*c03c5b1cSMartin Matuska         ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap);
426*c03c5b1cSMartin Matuska         op += ilimit_w - ip;
427*c03c5b1cSMartin Matuska         ip = ilimit_w;
428*c03c5b1cSMartin Matuska     }
429*c03c5b1cSMartin Matuska     while (ip < iend) *op++ = *ip++;
430*c03c5b1cSMartin Matuska }
431*c03c5b1cSMartin Matuska 
432*c03c5b1cSMartin Matuska /*! ZSTD_storeSeq() :
433*c03c5b1cSMartin Matuska  *  Store a sequence (litlen, litPtr, offCode and mlBase) into seqStore_t.
434*c03c5b1cSMartin Matuska  *  `offCode` : distance to match + ZSTD_REP_MOVE (values <= ZSTD_REP_MOVE are repCodes).
435*c03c5b1cSMartin Matuska  *  `mlBase` : matchLength - MINMATCH
436*c03c5b1cSMartin Matuska  *  Allowed to overread literals up to litLimit.
437*c03c5b1cSMartin Matuska */
438*c03c5b1cSMartin Matuska HINT_INLINE UNUSED_ATTR
ZSTD_storeSeq(seqStore_t * seqStorePtr,size_t litLength,const BYTE * literals,const BYTE * litLimit,U32 offCode,size_t mlBase)439*c03c5b1cSMartin Matuska void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, U32 offCode, size_t mlBase)
440*c03c5b1cSMartin Matuska {
441*c03c5b1cSMartin Matuska     BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
442*c03c5b1cSMartin Matuska     BYTE const* const litEnd = literals + litLength;
443*c03c5b1cSMartin Matuska #if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6)
444*c03c5b1cSMartin Matuska     static const BYTE* g_start = NULL;
445*c03c5b1cSMartin Matuska     if (g_start==NULL) g_start = (const BYTE*)literals;  /* note : index only works for compression within a single segment */
446*c03c5b1cSMartin Matuska     {   U32 const pos = (U32)((const BYTE*)literals - g_start);
447*c03c5b1cSMartin Matuska         DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
448*c03c5b1cSMartin Matuska                pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offCode);
449*c03c5b1cSMartin Matuska     }
450*c03c5b1cSMartin Matuska #endif
451*c03c5b1cSMartin Matuska     assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
452*c03c5b1cSMartin Matuska     /* copy Literals */
453*c03c5b1cSMartin Matuska     assert(seqStorePtr->maxNbLit <= 128 KB);
454*c03c5b1cSMartin Matuska     assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
455*c03c5b1cSMartin Matuska     assert(literals + litLength <= litLimit);
456*c03c5b1cSMartin Matuska     if (litEnd <= litLimit_w) {
457*c03c5b1cSMartin Matuska         /* Common case we can use wildcopy.
458*c03c5b1cSMartin Matuska 	 * First copy 16 bytes, because literals are likely short.
459*c03c5b1cSMartin Matuska 	 */
460*c03c5b1cSMartin Matuska         assert(WILDCOPY_OVERLENGTH >= 16);
461*c03c5b1cSMartin Matuska         ZSTD_copy16(seqStorePtr->lit, literals);
462*c03c5b1cSMartin Matuska         if (litLength > 16) {
463*c03c5b1cSMartin Matuska             ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap);
464*c03c5b1cSMartin Matuska         }
465*c03c5b1cSMartin Matuska     } else {
466*c03c5b1cSMartin Matuska         ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w);
467*c03c5b1cSMartin Matuska     }
468*c03c5b1cSMartin Matuska     seqStorePtr->lit += litLength;
469*c03c5b1cSMartin Matuska 
470*c03c5b1cSMartin Matuska     /* literal Length */
471*c03c5b1cSMartin Matuska     if (litLength>0xFFFF) {
472*c03c5b1cSMartin Matuska         assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
473*c03c5b1cSMartin Matuska         seqStorePtr->longLengthID = 1;
474*c03c5b1cSMartin Matuska         seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
475*c03c5b1cSMartin Matuska     }
476*c03c5b1cSMartin Matuska     seqStorePtr->sequences[0].litLength = (U16)litLength;
477*c03c5b1cSMartin Matuska 
478*c03c5b1cSMartin Matuska     /* match offset */
479*c03c5b1cSMartin Matuska     seqStorePtr->sequences[0].offset = offCode + 1;
480*c03c5b1cSMartin Matuska 
481*c03c5b1cSMartin Matuska     /* match Length */
482*c03c5b1cSMartin Matuska     if (mlBase>0xFFFF) {
483*c03c5b1cSMartin Matuska         assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
484*c03c5b1cSMartin Matuska         seqStorePtr->longLengthID = 2;
485*c03c5b1cSMartin Matuska         seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
486*c03c5b1cSMartin Matuska     }
487*c03c5b1cSMartin Matuska     seqStorePtr->sequences[0].matchLength = (U16)mlBase;
488*c03c5b1cSMartin Matuska 
489*c03c5b1cSMartin Matuska     seqStorePtr->sequences++;
490*c03c5b1cSMartin Matuska }
491*c03c5b1cSMartin Matuska 
492*c03c5b1cSMartin Matuska 
493*c03c5b1cSMartin Matuska /*-*************************************
494*c03c5b1cSMartin Matuska *  Match length counter
495*c03c5b1cSMartin Matuska ***************************************/
ZSTD_NbCommonBytes(size_t val)496*c03c5b1cSMartin Matuska static unsigned ZSTD_NbCommonBytes (size_t val)
497*c03c5b1cSMartin Matuska {
498*c03c5b1cSMartin Matuska     if (MEM_isLittleEndian()) {
499*c03c5b1cSMartin Matuska         if (MEM_64bits()) {
500*c03c5b1cSMartin Matuska #       if defined(_MSC_VER) && defined(_WIN64)
501*c03c5b1cSMartin Matuska             unsigned long r = 0;
502*c03c5b1cSMartin Matuska             return _BitScanForward64( &r, (U64)val ) ? (unsigned)(r >> 3) : 0;
503*c03c5b1cSMartin Matuska #       elif defined(__GNUC__) && (__GNUC__ >= 4)
504*c03c5b1cSMartin Matuska             return (__builtin_ctzll((U64)val) >> 3);
505*c03c5b1cSMartin Matuska #       else
506*c03c5b1cSMartin Matuska             static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
507*c03c5b1cSMartin Matuska                                                      0, 3, 1, 3, 1, 4, 2, 7,
508*c03c5b1cSMartin Matuska                                                      0, 2, 3, 6, 1, 5, 3, 5,
509*c03c5b1cSMartin Matuska                                                      1, 3, 4, 4, 2, 5, 6, 7,
510*c03c5b1cSMartin Matuska                                                      7, 0, 1, 2, 3, 3, 4, 6,
511*c03c5b1cSMartin Matuska                                                      2, 6, 5, 5, 3, 4, 5, 6,
512*c03c5b1cSMartin Matuska                                                      7, 1, 2, 4, 6, 4, 4, 5,
513*c03c5b1cSMartin Matuska                                                      7, 2, 6, 5, 7, 6, 7, 7 };
514*c03c5b1cSMartin Matuska             return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
515*c03c5b1cSMartin Matuska #       endif
516*c03c5b1cSMartin Matuska         } else { /* 32 bits */
517*c03c5b1cSMartin Matuska #       if defined(_MSC_VER)
518*c03c5b1cSMartin Matuska             unsigned long r=0;
519*c03c5b1cSMartin Matuska             return _BitScanForward( &r, (U32)val ) ? (unsigned)(r >> 3) : 0;
520*c03c5b1cSMartin Matuska #       elif defined(__GNUC__) && (__GNUC__ >= 3)
521*c03c5b1cSMartin Matuska             return (__builtin_ctz((U32)val) >> 3);
522*c03c5b1cSMartin Matuska #       else
523*c03c5b1cSMartin Matuska             static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
524*c03c5b1cSMartin Matuska                                                      3, 2, 2, 1, 3, 2, 0, 1,
525*c03c5b1cSMartin Matuska                                                      3, 3, 1, 2, 2, 2, 2, 0,
526*c03c5b1cSMartin Matuska                                                      3, 1, 2, 0, 1, 0, 1, 1 };
527*c03c5b1cSMartin Matuska             return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
528*c03c5b1cSMartin Matuska #       endif
529*c03c5b1cSMartin Matuska         }
530*c03c5b1cSMartin Matuska     } else {  /* Big Endian CPU */
531*c03c5b1cSMartin Matuska         if (MEM_64bits()) {
532*c03c5b1cSMartin Matuska #       if defined(_MSC_VER) && defined(_WIN64)
533*c03c5b1cSMartin Matuska             unsigned long r = 0;
534*c03c5b1cSMartin Matuska             return _BitScanReverse64( &r, val ) ? (unsigned)(r >> 3) : 0;
535*c03c5b1cSMartin Matuska #       elif defined(__GNUC__) && (__GNUC__ >= 4)
536*c03c5b1cSMartin Matuska             return (__builtin_clzll(val) >> 3);
537*c03c5b1cSMartin Matuska #       else
538*c03c5b1cSMartin Matuska             unsigned r;
539*c03c5b1cSMartin Matuska             const unsigned n32 = sizeof(size_t)*4;   /* calculate this way due to compiler complaining in 32-bits mode */
540*c03c5b1cSMartin Matuska             if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
541*c03c5b1cSMartin Matuska             if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
542*c03c5b1cSMartin Matuska             r += (!val);
543*c03c5b1cSMartin Matuska             return r;
544*c03c5b1cSMartin Matuska #       endif
545*c03c5b1cSMartin Matuska         } else { /* 32 bits */
546*c03c5b1cSMartin Matuska #       if defined(_MSC_VER)
547*c03c5b1cSMartin Matuska             unsigned long r = 0;
548*c03c5b1cSMartin Matuska             return _BitScanReverse( &r, (unsigned long)val ) ? (unsigned)(r >> 3) : 0;
549*c03c5b1cSMartin Matuska #       elif defined(__GNUC__) && (__GNUC__ >= 3)
550*c03c5b1cSMartin Matuska             return (__builtin_clz((U32)val) >> 3);
551*c03c5b1cSMartin Matuska #       else
552*c03c5b1cSMartin Matuska             unsigned r;
553*c03c5b1cSMartin Matuska             if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
554*c03c5b1cSMartin Matuska             r += (!val);
555*c03c5b1cSMartin Matuska             return r;
556*c03c5b1cSMartin Matuska #       endif
557*c03c5b1cSMartin Matuska     }   }
558*c03c5b1cSMartin Matuska }
559*c03c5b1cSMartin Matuska 
560*c03c5b1cSMartin Matuska 
ZSTD_count(const BYTE * pIn,const BYTE * pMatch,const BYTE * const pInLimit)561*c03c5b1cSMartin Matuska MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
562*c03c5b1cSMartin Matuska {
563*c03c5b1cSMartin Matuska     const BYTE* const pStart = pIn;
564*c03c5b1cSMartin Matuska     const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
565*c03c5b1cSMartin Matuska 
566*c03c5b1cSMartin Matuska     if (pIn < pInLoopLimit) {
567*c03c5b1cSMartin Matuska         { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
568*c03c5b1cSMartin Matuska           if (diff) return ZSTD_NbCommonBytes(diff); }
569*c03c5b1cSMartin Matuska         pIn+=sizeof(size_t); pMatch+=sizeof(size_t);
570*c03c5b1cSMartin Matuska         while (pIn < pInLoopLimit) {
571*c03c5b1cSMartin Matuska             size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
572*c03c5b1cSMartin Matuska             if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
573*c03c5b1cSMartin Matuska             pIn += ZSTD_NbCommonBytes(diff);
574*c03c5b1cSMartin Matuska             return (size_t)(pIn - pStart);
575*c03c5b1cSMartin Matuska     }   }
576*c03c5b1cSMartin Matuska     if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
577*c03c5b1cSMartin Matuska     if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
578*c03c5b1cSMartin Matuska     if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
579*c03c5b1cSMartin Matuska     return (size_t)(pIn - pStart);
580*c03c5b1cSMartin Matuska }
581*c03c5b1cSMartin Matuska 
582*c03c5b1cSMartin Matuska /** ZSTD_count_2segments() :
583*c03c5b1cSMartin Matuska  *  can count match length with `ip` & `match` in 2 different segments.
584*c03c5b1cSMartin Matuska  *  convention : on reaching mEnd, match count continue starting from iStart
585*c03c5b1cSMartin Matuska  */
586*c03c5b1cSMartin Matuska MEM_STATIC size_t
ZSTD_count_2segments(const BYTE * ip,const BYTE * match,const BYTE * iEnd,const BYTE * mEnd,const BYTE * iStart)587*c03c5b1cSMartin Matuska ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
588*c03c5b1cSMartin Matuska                      const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
589*c03c5b1cSMartin Matuska {
590*c03c5b1cSMartin Matuska     const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
591*c03c5b1cSMartin Matuska     size_t const matchLength = ZSTD_count(ip, match, vEnd);
592*c03c5b1cSMartin Matuska     if (match + matchLength != mEnd) return matchLength;
593*c03c5b1cSMartin Matuska     DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength);
594*c03c5b1cSMartin Matuska     DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match);
595*c03c5b1cSMartin Matuska     DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip);
596*c03c5b1cSMartin Matuska     DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart);
597*c03c5b1cSMartin Matuska     DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd));
598*c03c5b1cSMartin Matuska     return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
599*c03c5b1cSMartin Matuska }
600*c03c5b1cSMartin Matuska 
601*c03c5b1cSMartin Matuska 
602*c03c5b1cSMartin Matuska /*-*************************************
603*c03c5b1cSMartin Matuska  *  Hashes
604*c03c5b1cSMartin Matuska  ***************************************/
605*c03c5b1cSMartin Matuska static const U32 prime3bytes = 506832829U;
ZSTD_hash3(U32 u,U32 h)606*c03c5b1cSMartin Matuska static U32    ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes)  >> (32-h) ; }
ZSTD_hash3Ptr(const void * ptr,U32 h)607*c03c5b1cSMartin Matuska MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
608*c03c5b1cSMartin Matuska 
609*c03c5b1cSMartin Matuska static const U32 prime4bytes = 2654435761U;
ZSTD_hash4(U32 u,U32 h)610*c03c5b1cSMartin Matuska static U32    ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
ZSTD_hash4Ptr(const void * ptr,U32 h)611*c03c5b1cSMartin Matuska static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
612*c03c5b1cSMartin Matuska 
613*c03c5b1cSMartin Matuska static const U64 prime5bytes = 889523592379ULL;
ZSTD_hash5(U64 u,U32 h)614*c03c5b1cSMartin Matuska static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u  << (64-40)) * prime5bytes) >> (64-h)) ; }
ZSTD_hash5Ptr(const void * p,U32 h)615*c03c5b1cSMartin Matuska static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
616*c03c5b1cSMartin Matuska 
617*c03c5b1cSMartin Matuska static const U64 prime6bytes = 227718039650203ULL;
ZSTD_hash6(U64 u,U32 h)618*c03c5b1cSMartin Matuska static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u  << (64-48)) * prime6bytes) >> (64-h)) ; }
ZSTD_hash6Ptr(const void * p,U32 h)619*c03c5b1cSMartin Matuska static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
620*c03c5b1cSMartin Matuska 
621*c03c5b1cSMartin Matuska static const U64 prime7bytes = 58295818150454627ULL;
ZSTD_hash7(U64 u,U32 h)622*c03c5b1cSMartin Matuska static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u  << (64-56)) * prime7bytes) >> (64-h)) ; }
ZSTD_hash7Ptr(const void * p,U32 h)623*c03c5b1cSMartin Matuska static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
624*c03c5b1cSMartin Matuska 
625*c03c5b1cSMartin Matuska static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
ZSTD_hash8(U64 u,U32 h)626*c03c5b1cSMartin Matuska static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
ZSTD_hash8Ptr(const void * p,U32 h)627*c03c5b1cSMartin Matuska static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
628*c03c5b1cSMartin Matuska 
ZSTD_hashPtr(const void * p,U32 hBits,U32 mls)629*c03c5b1cSMartin Matuska MEM_STATIC size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
630*c03c5b1cSMartin Matuska {
631*c03c5b1cSMartin Matuska     switch(mls)
632*c03c5b1cSMartin Matuska     {
633*c03c5b1cSMartin Matuska     default:
634*c03c5b1cSMartin Matuska     case 4: return ZSTD_hash4Ptr(p, hBits);
635*c03c5b1cSMartin Matuska     case 5: return ZSTD_hash5Ptr(p, hBits);
636*c03c5b1cSMartin Matuska     case 6: return ZSTD_hash6Ptr(p, hBits);
637*c03c5b1cSMartin Matuska     case 7: return ZSTD_hash7Ptr(p, hBits);
638*c03c5b1cSMartin Matuska     case 8: return ZSTD_hash8Ptr(p, hBits);
639*c03c5b1cSMartin Matuska     }
640*c03c5b1cSMartin Matuska }
641*c03c5b1cSMartin Matuska 
642*c03c5b1cSMartin Matuska /** ZSTD_ipow() :
643*c03c5b1cSMartin Matuska  * Return base^exponent.
644*c03c5b1cSMartin Matuska  */
ZSTD_ipow(U64 base,U64 exponent)645*c03c5b1cSMartin Matuska static U64 ZSTD_ipow(U64 base, U64 exponent)
646*c03c5b1cSMartin Matuska {
647*c03c5b1cSMartin Matuska     U64 power = 1;
648*c03c5b1cSMartin Matuska     while (exponent) {
649*c03c5b1cSMartin Matuska       if (exponent & 1) power *= base;
650*c03c5b1cSMartin Matuska       exponent >>= 1;
651*c03c5b1cSMartin Matuska       base *= base;
652*c03c5b1cSMartin Matuska     }
653*c03c5b1cSMartin Matuska     return power;
654*c03c5b1cSMartin Matuska }
655*c03c5b1cSMartin Matuska 
656*c03c5b1cSMartin Matuska #define ZSTD_ROLL_HASH_CHAR_OFFSET 10
657*c03c5b1cSMartin Matuska 
658*c03c5b1cSMartin Matuska /** ZSTD_rollingHash_append() :
659*c03c5b1cSMartin Matuska  * Add the buffer to the hash value.
660*c03c5b1cSMartin Matuska  */
ZSTD_rollingHash_append(U64 hash,void const * buf,size_t size)661*c03c5b1cSMartin Matuska static U64 ZSTD_rollingHash_append(U64 hash, void const* buf, size_t size)
662*c03c5b1cSMartin Matuska {
663*c03c5b1cSMartin Matuska     BYTE const* istart = (BYTE const*)buf;
664*c03c5b1cSMartin Matuska     size_t pos;
665*c03c5b1cSMartin Matuska     for (pos = 0; pos < size; ++pos) {
666*c03c5b1cSMartin Matuska         hash *= prime8bytes;
667*c03c5b1cSMartin Matuska         hash += istart[pos] + ZSTD_ROLL_HASH_CHAR_OFFSET;
668*c03c5b1cSMartin Matuska     }
669*c03c5b1cSMartin Matuska     return hash;
670*c03c5b1cSMartin Matuska }
671*c03c5b1cSMartin Matuska 
672*c03c5b1cSMartin Matuska /** ZSTD_rollingHash_compute() :
673*c03c5b1cSMartin Matuska  * Compute the rolling hash value of the buffer.
674*c03c5b1cSMartin Matuska  */
ZSTD_rollingHash_compute(void const * buf,size_t size)675*c03c5b1cSMartin Matuska MEM_STATIC U64 ZSTD_rollingHash_compute(void const* buf, size_t size)
676*c03c5b1cSMartin Matuska {
677*c03c5b1cSMartin Matuska     return ZSTD_rollingHash_append(0, buf, size);
678*c03c5b1cSMartin Matuska }
679*c03c5b1cSMartin Matuska 
680*c03c5b1cSMartin Matuska /** ZSTD_rollingHash_primePower() :
681*c03c5b1cSMartin Matuska  * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash
682*c03c5b1cSMartin Matuska  * over a window of length bytes.
683*c03c5b1cSMartin Matuska  */
ZSTD_rollingHash_primePower(U32 length)684*c03c5b1cSMartin Matuska MEM_STATIC U64 ZSTD_rollingHash_primePower(U32 length)
685*c03c5b1cSMartin Matuska {
686*c03c5b1cSMartin Matuska     return ZSTD_ipow(prime8bytes, length - 1);
687*c03c5b1cSMartin Matuska }
688*c03c5b1cSMartin Matuska 
689*c03c5b1cSMartin Matuska /** ZSTD_rollingHash_rotate() :
690*c03c5b1cSMartin Matuska  * Rotate the rolling hash by one byte.
691*c03c5b1cSMartin Matuska  */
ZSTD_rollingHash_rotate(U64 hash,BYTE toRemove,BYTE toAdd,U64 primePower)692*c03c5b1cSMartin Matuska MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower)
693*c03c5b1cSMartin Matuska {
694*c03c5b1cSMartin Matuska     hash -= (toRemove + ZSTD_ROLL_HASH_CHAR_OFFSET) * primePower;
695*c03c5b1cSMartin Matuska     hash *= prime8bytes;
696*c03c5b1cSMartin Matuska     hash += toAdd + ZSTD_ROLL_HASH_CHAR_OFFSET;
697*c03c5b1cSMartin Matuska     return hash;
698*c03c5b1cSMartin Matuska }
699*c03c5b1cSMartin Matuska 
700*c03c5b1cSMartin Matuska /*-*************************************
701*c03c5b1cSMartin Matuska *  Round buffer management
702*c03c5b1cSMartin Matuska ***************************************/
703*c03c5b1cSMartin Matuska #if (ZSTD_WINDOWLOG_MAX_64 > 31)
704*c03c5b1cSMartin Matuska # error "ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX"
705*c03c5b1cSMartin Matuska #endif
706*c03c5b1cSMartin Matuska /* Max current allowed */
707*c03c5b1cSMartin Matuska #define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
708*c03c5b1cSMartin Matuska /* Maximum chunk size before overflow correction needs to be called again */
709*c03c5b1cSMartin Matuska #define ZSTD_CHUNKSIZE_MAX                                                     \
710*c03c5b1cSMartin Matuska     ( ((U32)-1)                  /* Maximum ending current index */            \
711*c03c5b1cSMartin Matuska     - ZSTD_CURRENT_MAX)          /* Maximum beginning lowLimit */
712*c03c5b1cSMartin Matuska 
713*c03c5b1cSMartin Matuska /**
714*c03c5b1cSMartin Matuska  * ZSTD_window_clear():
715*c03c5b1cSMartin Matuska  * Clears the window containing the history by simply setting it to empty.
716*c03c5b1cSMartin Matuska  */
ZSTD_window_clear(ZSTD_window_t * window)717*c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window)
718*c03c5b1cSMartin Matuska {
719*c03c5b1cSMartin Matuska     size_t const endT = (size_t)(window->nextSrc - window->base);
720*c03c5b1cSMartin Matuska     U32 const end = (U32)endT;
721*c03c5b1cSMartin Matuska 
722*c03c5b1cSMartin Matuska     window->lowLimit = end;
723*c03c5b1cSMartin Matuska     window->dictLimit = end;
724*c03c5b1cSMartin Matuska }
725*c03c5b1cSMartin Matuska 
726*c03c5b1cSMartin Matuska /**
727*c03c5b1cSMartin Matuska  * ZSTD_window_hasExtDict():
728*c03c5b1cSMartin Matuska  * Returns non-zero if the window has a non-empty extDict.
729*c03c5b1cSMartin Matuska  */
ZSTD_window_hasExtDict(ZSTD_window_t const window)730*c03c5b1cSMartin Matuska MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window)
731*c03c5b1cSMartin Matuska {
732*c03c5b1cSMartin Matuska     return window.lowLimit < window.dictLimit;
733*c03c5b1cSMartin Matuska }
734*c03c5b1cSMartin Matuska 
735*c03c5b1cSMartin Matuska /**
736*c03c5b1cSMartin Matuska  * ZSTD_matchState_dictMode():
737*c03c5b1cSMartin Matuska  * Inspects the provided matchState and figures out what dictMode should be
738*c03c5b1cSMartin Matuska  * passed to the compressor.
739*c03c5b1cSMartin Matuska  */
ZSTD_matchState_dictMode(const ZSTD_matchState_t * ms)740*c03c5b1cSMartin Matuska MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms)
741*c03c5b1cSMartin Matuska {
742*c03c5b1cSMartin Matuska     return ZSTD_window_hasExtDict(ms->window) ?
743*c03c5b1cSMartin Matuska         ZSTD_extDict :
744*c03c5b1cSMartin Matuska         ms->dictMatchState != NULL ?
745*c03c5b1cSMartin Matuska             ZSTD_dictMatchState :
746*c03c5b1cSMartin Matuska             ZSTD_noDict;
747*c03c5b1cSMartin Matuska }
748*c03c5b1cSMartin Matuska 
749*c03c5b1cSMartin Matuska /**
750*c03c5b1cSMartin Matuska  * ZSTD_window_needOverflowCorrection():
751*c03c5b1cSMartin Matuska  * Returns non-zero if the indices are getting too large and need overflow
752*c03c5b1cSMartin Matuska  * protection.
753*c03c5b1cSMartin Matuska  */
ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,void const * srcEnd)754*c03c5b1cSMartin Matuska MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
755*c03c5b1cSMartin Matuska                                                   void const* srcEnd)
756*c03c5b1cSMartin Matuska {
757*c03c5b1cSMartin Matuska     U32 const current = (U32)((BYTE const*)srcEnd - window.base);
758*c03c5b1cSMartin Matuska     return current > ZSTD_CURRENT_MAX;
759*c03c5b1cSMartin Matuska }
760*c03c5b1cSMartin Matuska 
761*c03c5b1cSMartin Matuska /**
762*c03c5b1cSMartin Matuska  * ZSTD_window_correctOverflow():
763*c03c5b1cSMartin Matuska  * Reduces the indices to protect from index overflow.
764*c03c5b1cSMartin Matuska  * Returns the correction made to the indices, which must be applied to every
765*c03c5b1cSMartin Matuska  * stored index.
766*c03c5b1cSMartin Matuska  *
767*c03c5b1cSMartin Matuska  * The least significant cycleLog bits of the indices must remain the same,
768*c03c5b1cSMartin Matuska  * which may be 0. Every index up to maxDist in the past must be valid.
769*c03c5b1cSMartin Matuska  * NOTE: (maxDist & cycleMask) must be zero.
770*c03c5b1cSMartin Matuska  */
ZSTD_window_correctOverflow(ZSTD_window_t * window,U32 cycleLog,U32 maxDist,void const * src)771*c03c5b1cSMartin Matuska MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
772*c03c5b1cSMartin Matuska                                            U32 maxDist, void const* src)
773*c03c5b1cSMartin Matuska {
774*c03c5b1cSMartin Matuska     /* preemptive overflow correction:
775*c03c5b1cSMartin Matuska      * 1. correction is large enough:
776*c03c5b1cSMartin Matuska      *    lowLimit > (3<<29) ==> current > 3<<29 + 1<<windowLog
777*c03c5b1cSMartin Matuska      *    1<<windowLog <= newCurrent < 1<<chainLog + 1<<windowLog
778*c03c5b1cSMartin Matuska      *
779*c03c5b1cSMartin Matuska      *    current - newCurrent
780*c03c5b1cSMartin Matuska      *    > (3<<29 + 1<<windowLog) - (1<<windowLog + 1<<chainLog)
781*c03c5b1cSMartin Matuska      *    > (3<<29) - (1<<chainLog)
782*c03c5b1cSMartin Matuska      *    > (3<<29) - (1<<30)             (NOTE: chainLog <= 30)
783*c03c5b1cSMartin Matuska      *    > 1<<29
784*c03c5b1cSMartin Matuska      *
785*c03c5b1cSMartin Matuska      * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow:
786*c03c5b1cSMartin Matuska      *    After correction, current is less than (1<<chainLog + 1<<windowLog).
787*c03c5b1cSMartin Matuska      *    In 64-bit mode we are safe, because we have 64-bit ptrdiff_t.
788*c03c5b1cSMartin Matuska      *    In 32-bit mode we are safe, because (chainLog <= 29), so
789*c03c5b1cSMartin Matuska      *    ip+ZSTD_CHUNKSIZE_MAX - cctx->base < 1<<32.
790*c03c5b1cSMartin Matuska      * 3. (cctx->lowLimit + 1<<windowLog) < 1<<32:
791*c03c5b1cSMartin Matuska      *    windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
792*c03c5b1cSMartin Matuska      */
793*c03c5b1cSMartin Matuska     U32 const cycleMask = (1U << cycleLog) - 1;
794*c03c5b1cSMartin Matuska     U32 const current = (U32)((BYTE const*)src - window->base);
795*c03c5b1cSMartin Matuska     U32 const currentCycle0 = current & cycleMask;
796*c03c5b1cSMartin Matuska     /* Exclude zero so that newCurrent - maxDist >= 1. */
797*c03c5b1cSMartin Matuska     U32 const currentCycle1 = currentCycle0 == 0 ? (1U << cycleLog) : currentCycle0;
798*c03c5b1cSMartin Matuska     U32 const newCurrent = currentCycle1 + maxDist;
799*c03c5b1cSMartin Matuska     U32 const correction = current - newCurrent;
800*c03c5b1cSMartin Matuska     assert((maxDist & cycleMask) == 0);
801*c03c5b1cSMartin Matuska     assert(current > newCurrent);
802*c03c5b1cSMartin Matuska     /* Loose bound, should be around 1<<29 (see above) */
803*c03c5b1cSMartin Matuska     assert(correction > 1<<28);
804*c03c5b1cSMartin Matuska 
805*c03c5b1cSMartin Matuska     window->base += correction;
806*c03c5b1cSMartin Matuska     window->dictBase += correction;
807*c03c5b1cSMartin Matuska     if (window->lowLimit <= correction) window->lowLimit = 1;
808*c03c5b1cSMartin Matuska     else window->lowLimit -= correction;
809*c03c5b1cSMartin Matuska     if (window->dictLimit <= correction) window->dictLimit = 1;
810*c03c5b1cSMartin Matuska     else window->dictLimit -= correction;
811*c03c5b1cSMartin Matuska 
812*c03c5b1cSMartin Matuska     /* Ensure we can still reference the full window. */
813*c03c5b1cSMartin Matuska     assert(newCurrent >= maxDist);
814*c03c5b1cSMartin Matuska     assert(newCurrent - maxDist >= 1);
815*c03c5b1cSMartin Matuska     /* Ensure that lowLimit and dictLimit didn't underflow. */
816*c03c5b1cSMartin Matuska     assert(window->lowLimit <= newCurrent);
817*c03c5b1cSMartin Matuska     assert(window->dictLimit <= newCurrent);
818*c03c5b1cSMartin Matuska 
819*c03c5b1cSMartin Matuska     DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction,
820*c03c5b1cSMartin Matuska              window->lowLimit);
821*c03c5b1cSMartin Matuska     return correction;
822*c03c5b1cSMartin Matuska }
823*c03c5b1cSMartin Matuska 
824*c03c5b1cSMartin Matuska /**
825*c03c5b1cSMartin Matuska  * ZSTD_window_enforceMaxDist():
826*c03c5b1cSMartin Matuska  * Updates lowLimit so that:
827*c03c5b1cSMartin Matuska  *    (srcEnd - base) - lowLimit == maxDist + loadedDictEnd
828*c03c5b1cSMartin Matuska  *
829*c03c5b1cSMartin Matuska  * It ensures index is valid as long as index >= lowLimit.
830*c03c5b1cSMartin Matuska  * This must be called before a block compression call.
831*c03c5b1cSMartin Matuska  *
832*c03c5b1cSMartin Matuska  * loadedDictEnd is only defined if a dictionary is in use for current compression.
833*c03c5b1cSMartin Matuska  * As the name implies, loadedDictEnd represents the index at end of dictionary.
834*c03c5b1cSMartin Matuska  * The value lies within context's referential, it can be directly compared to blockEndIdx.
835*c03c5b1cSMartin Matuska  *
836*c03c5b1cSMartin Matuska  * If loadedDictEndPtr is NULL, no dictionary is in use, and we use loadedDictEnd == 0.
837*c03c5b1cSMartin Matuska  * If loadedDictEndPtr is not NULL, we set it to zero after updating lowLimit.
838*c03c5b1cSMartin Matuska  * This is because dictionaries are allowed to be referenced fully
839*c03c5b1cSMartin Matuska  * as long as the last byte of the dictionary is in the window.
840*c03c5b1cSMartin Matuska  * Once input has progressed beyond window size, dictionary cannot be referenced anymore.
841*c03c5b1cSMartin Matuska  *
842*c03c5b1cSMartin Matuska  * In normal dict mode, the dictionary lies between lowLimit and dictLimit.
843*c03c5b1cSMartin Matuska  * In dictMatchState mode, lowLimit and dictLimit are the same,
844*c03c5b1cSMartin Matuska  * and the dictionary is below them.
845*c03c5b1cSMartin Matuska  * forceWindow and dictMatchState are therefore incompatible.
846*c03c5b1cSMartin Matuska  */
847*c03c5b1cSMartin Matuska MEM_STATIC void
ZSTD_window_enforceMaxDist(ZSTD_window_t * window,const void * blockEnd,U32 maxDist,U32 * loadedDictEndPtr,const ZSTD_matchState_t ** dictMatchStatePtr)848*c03c5b1cSMartin Matuska ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
849*c03c5b1cSMartin Matuska                      const void* blockEnd,
850*c03c5b1cSMartin Matuska                            U32   maxDist,
851*c03c5b1cSMartin Matuska                            U32*  loadedDictEndPtr,
852*c03c5b1cSMartin Matuska                      const ZSTD_matchState_t** dictMatchStatePtr)
853*c03c5b1cSMartin Matuska {
854*c03c5b1cSMartin Matuska     U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
855*c03c5b1cSMartin Matuska     U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
856*c03c5b1cSMartin Matuska     DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
857*c03c5b1cSMartin Matuska                 (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
858*c03c5b1cSMartin Matuska 
859*c03c5b1cSMartin Matuska     /* - When there is no dictionary : loadedDictEnd == 0.
860*c03c5b1cSMartin Matuska          In which case, the test (blockEndIdx > maxDist) is merely to avoid
861*c03c5b1cSMartin Matuska          overflowing next operation `newLowLimit = blockEndIdx - maxDist`.
862*c03c5b1cSMartin Matuska        - When there is a standard dictionary :
863*c03c5b1cSMartin Matuska          Index referential is copied from the dictionary,
864*c03c5b1cSMartin Matuska          which means it starts from 0.
865*c03c5b1cSMartin Matuska          In which case, loadedDictEnd == dictSize,
866*c03c5b1cSMartin Matuska          and it makes sense to compare `blockEndIdx > maxDist + dictSize`
867*c03c5b1cSMartin Matuska          since `blockEndIdx` also starts from zero.
868*c03c5b1cSMartin Matuska        - When there is an attached dictionary :
869*c03c5b1cSMartin Matuska          loadedDictEnd is expressed within the referential of the context,
870*c03c5b1cSMartin Matuska          so it can be directly compared against blockEndIdx.
871*c03c5b1cSMartin Matuska     */
872*c03c5b1cSMartin Matuska     if (blockEndIdx > maxDist + loadedDictEnd) {
873*c03c5b1cSMartin Matuska         U32 const newLowLimit = blockEndIdx - maxDist;
874*c03c5b1cSMartin Matuska         if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
875*c03c5b1cSMartin Matuska         if (window->dictLimit < window->lowLimit) {
876*c03c5b1cSMartin Matuska             DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u",
877*c03c5b1cSMartin Matuska                         (unsigned)window->dictLimit, (unsigned)window->lowLimit);
878*c03c5b1cSMartin Matuska             window->dictLimit = window->lowLimit;
879*c03c5b1cSMartin Matuska         }
880*c03c5b1cSMartin Matuska         /* On reaching window size, dictionaries are invalidated */
881*c03c5b1cSMartin Matuska         if (loadedDictEndPtr) *loadedDictEndPtr = 0;
882*c03c5b1cSMartin Matuska         if (dictMatchStatePtr) *dictMatchStatePtr = NULL;
883*c03c5b1cSMartin Matuska     }
884*c03c5b1cSMartin Matuska }
885*c03c5b1cSMartin Matuska 
886*c03c5b1cSMartin Matuska /* Similar to ZSTD_window_enforceMaxDist(),
887*c03c5b1cSMartin Matuska  * but only invalidates dictionary
888*c03c5b1cSMartin Matuska  * when input progresses beyond window size.
889*c03c5b1cSMartin Matuska  * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL)
890*c03c5b1cSMartin Matuska  *              loadedDictEnd uses same referential as window->base
891*c03c5b1cSMartin Matuska  *              maxDist is the window size */
892*c03c5b1cSMartin Matuska MEM_STATIC void
ZSTD_checkDictValidity(const ZSTD_window_t * window,const void * blockEnd,U32 maxDist,U32 * loadedDictEndPtr,const ZSTD_matchState_t ** dictMatchStatePtr)893*c03c5b1cSMartin Matuska ZSTD_checkDictValidity(const ZSTD_window_t* window,
894*c03c5b1cSMartin Matuska                        const void* blockEnd,
895*c03c5b1cSMartin Matuska                              U32   maxDist,
896*c03c5b1cSMartin Matuska                              U32*  loadedDictEndPtr,
897*c03c5b1cSMartin Matuska                        const ZSTD_matchState_t** dictMatchStatePtr)
898*c03c5b1cSMartin Matuska {
899*c03c5b1cSMartin Matuska     assert(loadedDictEndPtr != NULL);
900*c03c5b1cSMartin Matuska     assert(dictMatchStatePtr != NULL);
901*c03c5b1cSMartin Matuska     {   U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
902*c03c5b1cSMartin Matuska         U32 const loadedDictEnd = *loadedDictEndPtr;
903*c03c5b1cSMartin Matuska         DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
904*c03c5b1cSMartin Matuska                     (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
905*c03c5b1cSMartin Matuska         assert(blockEndIdx >= loadedDictEnd);
906*c03c5b1cSMartin Matuska 
907*c03c5b1cSMartin Matuska         if (blockEndIdx > loadedDictEnd + maxDist) {
908*c03c5b1cSMartin Matuska             /* On reaching window size, dictionaries are invalidated.
909*c03c5b1cSMartin Matuska              * For simplification, if window size is reached anywhere within next block,
910*c03c5b1cSMartin Matuska              * the dictionary is invalidated for the full block.
911*c03c5b1cSMartin Matuska              */
912*c03c5b1cSMartin Matuska             DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)");
913*c03c5b1cSMartin Matuska             *loadedDictEndPtr = 0;
914*c03c5b1cSMartin Matuska             *dictMatchStatePtr = NULL;
915*c03c5b1cSMartin Matuska         } else {
916*c03c5b1cSMartin Matuska             if (*loadedDictEndPtr != 0) {
917*c03c5b1cSMartin Matuska                 DEBUGLOG(6, "dictionary considered valid for current block");
918*c03c5b1cSMartin Matuska     }   }   }
919*c03c5b1cSMartin Matuska }
920*c03c5b1cSMartin Matuska 
ZSTD_window_init(ZSTD_window_t * window)921*c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
922*c03c5b1cSMartin Matuska     memset(window, 0, sizeof(*window));
923*c03c5b1cSMartin Matuska     window->base = (BYTE const*)"";
924*c03c5b1cSMartin Matuska     window->dictBase = (BYTE const*)"";
925*c03c5b1cSMartin Matuska     window->dictLimit = 1;    /* start from 1, so that 1st position is valid */
926*c03c5b1cSMartin Matuska     window->lowLimit = 1;     /* it ensures first and later CCtx usages compress the same */
927*c03c5b1cSMartin Matuska     window->nextSrc = window->base + 1;   /* see issue #1241 */
928*c03c5b1cSMartin Matuska }
929*c03c5b1cSMartin Matuska 
930*c03c5b1cSMartin Matuska /**
931*c03c5b1cSMartin Matuska  * ZSTD_window_update():
932*c03c5b1cSMartin Matuska  * Updates the window by appending [src, src + srcSize) to the window.
933*c03c5b1cSMartin Matuska  * If it is not contiguous, the current prefix becomes the extDict, and we
934*c03c5b1cSMartin Matuska  * forget about the extDict. Handles overlap of the prefix and extDict.
935*c03c5b1cSMartin Matuska  * Returns non-zero if the segment is contiguous.
936*c03c5b1cSMartin Matuska  */
ZSTD_window_update(ZSTD_window_t * window,void const * src,size_t srcSize)937*c03c5b1cSMartin Matuska MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
938*c03c5b1cSMartin Matuska                                   void const* src, size_t srcSize)
939*c03c5b1cSMartin Matuska {
940*c03c5b1cSMartin Matuska     BYTE const* const ip = (BYTE const*)src;
941*c03c5b1cSMartin Matuska     U32 contiguous = 1;
942*c03c5b1cSMartin Matuska     DEBUGLOG(5, "ZSTD_window_update");
943*c03c5b1cSMartin Matuska     if (srcSize == 0)
944*c03c5b1cSMartin Matuska         return contiguous;
945*c03c5b1cSMartin Matuska     assert(window->base != NULL);
946*c03c5b1cSMartin Matuska     assert(window->dictBase != NULL);
947*c03c5b1cSMartin Matuska     /* Check if blocks follow each other */
948*c03c5b1cSMartin Matuska     if (src != window->nextSrc) {
949*c03c5b1cSMartin Matuska         /* not contiguous */
950*c03c5b1cSMartin Matuska         size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
951*c03c5b1cSMartin Matuska         DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit);
952*c03c5b1cSMartin Matuska         window->lowLimit = window->dictLimit;
953*c03c5b1cSMartin Matuska         assert(distanceFromBase == (size_t)(U32)distanceFromBase);  /* should never overflow */
954*c03c5b1cSMartin Matuska         window->dictLimit = (U32)distanceFromBase;
955*c03c5b1cSMartin Matuska         window->dictBase = window->base;
956*c03c5b1cSMartin Matuska         window->base = ip - distanceFromBase;
957*c03c5b1cSMartin Matuska         /* ms->nextToUpdate = window->dictLimit; */
958*c03c5b1cSMartin Matuska         if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit;   /* too small extDict */
959*c03c5b1cSMartin Matuska         contiguous = 0;
960*c03c5b1cSMartin Matuska     }
961*c03c5b1cSMartin Matuska     window->nextSrc = ip + srcSize;
962*c03c5b1cSMartin Matuska     /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
963*c03c5b1cSMartin Matuska     if ( (ip+srcSize > window->dictBase + window->lowLimit)
964*c03c5b1cSMartin Matuska        & (ip < window->dictBase + window->dictLimit)) {
965*c03c5b1cSMartin Matuska         ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
966*c03c5b1cSMartin Matuska         U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
967*c03c5b1cSMartin Matuska         window->lowLimit = lowLimitMax;
968*c03c5b1cSMartin Matuska         DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit);
969*c03c5b1cSMartin Matuska     }
970*c03c5b1cSMartin Matuska     return contiguous;
971*c03c5b1cSMartin Matuska }
972*c03c5b1cSMartin Matuska 
973*c03c5b1cSMartin Matuska /**
974*c03c5b1cSMartin Matuska  * Returns the lowest allowed match index. It may either be in the ext-dict or the prefix.
975*c03c5b1cSMartin Matuska  */
ZSTD_getLowestMatchIndex(const ZSTD_matchState_t * ms,U32 current,unsigned windowLog)976*c03c5b1cSMartin Matuska MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 current, unsigned windowLog)
977*c03c5b1cSMartin Matuska {
978*c03c5b1cSMartin Matuska     U32    const maxDistance = 1U << windowLog;
979*c03c5b1cSMartin Matuska     U32    const lowestValid = ms->window.lowLimit;
980*c03c5b1cSMartin Matuska     U32    const withinWindow = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid;
981*c03c5b1cSMartin Matuska     U32    const isDictionary = (ms->loadedDictEnd != 0);
982*c03c5b1cSMartin Matuska     U32    const matchLowest = isDictionary ? lowestValid : withinWindow;
983*c03c5b1cSMartin Matuska     return matchLowest;
984*c03c5b1cSMartin Matuska }
985*c03c5b1cSMartin Matuska 
986*c03c5b1cSMartin Matuska /**
987*c03c5b1cSMartin Matuska  * Returns the lowest allowed match index in the prefix.
988*c03c5b1cSMartin Matuska  */
ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t * ms,U32 current,unsigned windowLog)989*c03c5b1cSMartin Matuska MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 current, unsigned windowLog)
990*c03c5b1cSMartin Matuska {
991*c03c5b1cSMartin Matuska     U32    const maxDistance = 1U << windowLog;
992*c03c5b1cSMartin Matuska     U32    const lowestValid = ms->window.dictLimit;
993*c03c5b1cSMartin Matuska     U32    const withinWindow = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid;
994*c03c5b1cSMartin Matuska     U32    const isDictionary = (ms->loadedDictEnd != 0);
995*c03c5b1cSMartin Matuska     U32    const matchLowest = isDictionary ? lowestValid : withinWindow;
996*c03c5b1cSMartin Matuska     return matchLowest;
997*c03c5b1cSMartin Matuska }
998*c03c5b1cSMartin Matuska 
999*c03c5b1cSMartin Matuska 
1000*c03c5b1cSMartin Matuska 
1001*c03c5b1cSMartin Matuska /* debug functions */
1002*c03c5b1cSMartin Matuska #if (DEBUGLEVEL>=2)
1003*c03c5b1cSMartin Matuska 
ZSTD_fWeight(U32 rawStat)1004*c03c5b1cSMartin Matuska MEM_STATIC double ZSTD_fWeight(U32 rawStat)
1005*c03c5b1cSMartin Matuska {
1006*c03c5b1cSMartin Matuska     U32 const fp_accuracy = 8;
1007*c03c5b1cSMartin Matuska     U32 const fp_multiplier = (1 << fp_accuracy);
1008*c03c5b1cSMartin Matuska     U32 const newStat = rawStat + 1;
1009*c03c5b1cSMartin Matuska     U32 const hb = ZSTD_highbit32(newStat);
1010*c03c5b1cSMartin Matuska     U32 const BWeight = hb * fp_multiplier;
1011*c03c5b1cSMartin Matuska     U32 const FWeight = (newStat << fp_accuracy) >> hb;
1012*c03c5b1cSMartin Matuska     U32 const weight = BWeight + FWeight;
1013*c03c5b1cSMartin Matuska     assert(hb + fp_accuracy < 31);
1014*c03c5b1cSMartin Matuska     return (double)weight / fp_multiplier;
1015*c03c5b1cSMartin Matuska }
1016*c03c5b1cSMartin Matuska 
1017*c03c5b1cSMartin Matuska /* display a table content,
1018*c03c5b1cSMartin Matuska  * listing each element, its frequency, and its predicted bit cost */
ZSTD_debugTable(const U32 * table,U32 max)1019*c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
1020*c03c5b1cSMartin Matuska {
1021*c03c5b1cSMartin Matuska     unsigned u, sum;
1022*c03c5b1cSMartin Matuska     for (u=0, sum=0; u<=max; u++) sum += table[u];
1023*c03c5b1cSMartin Matuska     DEBUGLOG(2, "total nb elts: %u", sum);
1024*c03c5b1cSMartin Matuska     for (u=0; u<=max; u++) {
1025*c03c5b1cSMartin Matuska         DEBUGLOG(2, "%2u: %5u  (%.2f)",
1026*c03c5b1cSMartin Matuska                 u, table[u], ZSTD_fWeight(sum) - ZSTD_fWeight(table[u]) );
1027*c03c5b1cSMartin Matuska     }
1028*c03c5b1cSMartin Matuska }
1029*c03c5b1cSMartin Matuska 
1030*c03c5b1cSMartin Matuska #endif
1031*c03c5b1cSMartin Matuska 
1032*c03c5b1cSMartin Matuska 
1033*c03c5b1cSMartin Matuska #if defined (__cplusplus)
1034*c03c5b1cSMartin Matuska }
1035*c03c5b1cSMartin Matuska #endif
1036*c03c5b1cSMartin Matuska 
1037*c03c5b1cSMartin Matuska /* ===============================================================
1038*c03c5b1cSMartin Matuska  * Shared internal declarations
1039*c03c5b1cSMartin Matuska  * These prototypes may be called from sources not in lib/compress
1040*c03c5b1cSMartin Matuska  * =============================================================== */
1041*c03c5b1cSMartin Matuska 
1042*c03c5b1cSMartin Matuska /* ZSTD_loadCEntropy() :
1043*c03c5b1cSMartin Matuska  * dict : must point at beginning of a valid zstd dictionary.
1044*c03c5b1cSMartin Matuska  * return : size of dictionary header (size of magic number + dict ID + entropy tables)
1045*c03c5b1cSMartin Matuska  * assumptions : magic number supposed already checked
1046*c03c5b1cSMartin Matuska  *               and dictSize >= 8 */
1047*c03c5b1cSMartin Matuska size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
1048*c03c5b1cSMartin Matuska                          short* offcodeNCount, unsigned* offcodeMaxValue,
1049*c03c5b1cSMartin Matuska                          const void* const dict, size_t dictSize);
1050*c03c5b1cSMartin Matuska 
1051*c03c5b1cSMartin Matuska void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs);
1052*c03c5b1cSMartin Matuska 
1053*c03c5b1cSMartin Matuska /* ==============================================================
1054*c03c5b1cSMartin Matuska  * Private declarations
1055*c03c5b1cSMartin Matuska  * These prototypes shall only be called from within lib/compress
1056*c03c5b1cSMartin Matuska  * ============================================================== */
1057*c03c5b1cSMartin Matuska 
1058*c03c5b1cSMartin Matuska /* ZSTD_getCParamsFromCCtxParams() :
1059*c03c5b1cSMartin Matuska  * cParams are built depending on compressionLevel, src size hints,
1060*c03c5b1cSMartin Matuska  * LDM and manually set compression parameters.
1061*c03c5b1cSMartin Matuska  * Note: srcSizeHint == 0 means 0!
1062*c03c5b1cSMartin Matuska  */
1063*c03c5b1cSMartin Matuska ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
1064*c03c5b1cSMartin Matuska         const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize);
1065*c03c5b1cSMartin Matuska 
1066*c03c5b1cSMartin Matuska /*! ZSTD_initCStream_internal() :
1067*c03c5b1cSMartin Matuska  *  Private use only. Init streaming operation.
1068*c03c5b1cSMartin Matuska  *  expects params to be valid.
1069*c03c5b1cSMartin Matuska  *  must receive dict, or cdict, or none, but not both.
1070*c03c5b1cSMartin Matuska  *  @return : 0, or an error code */
1071*c03c5b1cSMartin Matuska size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
1072*c03c5b1cSMartin Matuska                      const void* dict, size_t dictSize,
1073*c03c5b1cSMartin Matuska                      const ZSTD_CDict* cdict,
1074*c03c5b1cSMartin Matuska                      const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize);
1075*c03c5b1cSMartin Matuska 
1076*c03c5b1cSMartin Matuska void ZSTD_resetSeqStore(seqStore_t* ssPtr);
1077*c03c5b1cSMartin Matuska 
1078*c03c5b1cSMartin Matuska /*! ZSTD_getCParamsFromCDict() :
1079*c03c5b1cSMartin Matuska  *  as the name implies */
1080*c03c5b1cSMartin Matuska ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
1081*c03c5b1cSMartin Matuska 
1082*c03c5b1cSMartin Matuska /* ZSTD_compressBegin_advanced_internal() :
1083*c03c5b1cSMartin Matuska  * Private use only. To be called from zstdmt_compress.c. */
1084*c03c5b1cSMartin Matuska size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
1085*c03c5b1cSMartin Matuska                                     const void* dict, size_t dictSize,
1086*c03c5b1cSMartin Matuska                                     ZSTD_dictContentType_e dictContentType,
1087*c03c5b1cSMartin Matuska                                     ZSTD_dictTableLoadMethod_e dtlm,
1088*c03c5b1cSMartin Matuska                                     const ZSTD_CDict* cdict,
1089*c03c5b1cSMartin Matuska                                     const ZSTD_CCtx_params* params,
1090*c03c5b1cSMartin Matuska                                     unsigned long long pledgedSrcSize);
1091*c03c5b1cSMartin Matuska 
1092*c03c5b1cSMartin Matuska /* ZSTD_compress_advanced_internal() :
1093*c03c5b1cSMartin Matuska  * Private use only. To be called from zstdmt_compress.c. */
1094*c03c5b1cSMartin Matuska size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
1095*c03c5b1cSMartin Matuska                                        void* dst, size_t dstCapacity,
1096*c03c5b1cSMartin Matuska                                  const void* src, size_t srcSize,
1097*c03c5b1cSMartin Matuska                                  const void* dict,size_t dictSize,
1098*c03c5b1cSMartin Matuska                                  const ZSTD_CCtx_params* params);
1099*c03c5b1cSMartin Matuska 
1100*c03c5b1cSMartin Matuska 
1101*c03c5b1cSMartin Matuska /* ZSTD_writeLastEmptyBlock() :
1102*c03c5b1cSMartin Matuska  * output an empty Block with end-of-frame mark to complete a frame
1103*c03c5b1cSMartin Matuska  * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
1104*c03c5b1cSMartin Matuska  *           or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
1105*c03c5b1cSMartin Matuska  */
1106*c03c5b1cSMartin Matuska size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
1107*c03c5b1cSMartin Matuska 
1108*c03c5b1cSMartin Matuska 
1109*c03c5b1cSMartin Matuska /* ZSTD_referenceExternalSequences() :
1110*c03c5b1cSMartin Matuska  * Must be called before starting a compression operation.
1111*c03c5b1cSMartin Matuska  * seqs must parse a prefix of the source.
1112*c03c5b1cSMartin Matuska  * This cannot be used when long range matching is enabled.
1113*c03c5b1cSMartin Matuska  * Zstd will use these sequences, and pass the literals to a secondary block
1114*c03c5b1cSMartin Matuska  * compressor.
1115*c03c5b1cSMartin Matuska  * @return : An error code on failure.
1116*c03c5b1cSMartin Matuska  * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory
1117*c03c5b1cSMartin Matuska  * access and data corruption.
1118*c03c5b1cSMartin Matuska  */
1119*c03c5b1cSMartin Matuska size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
1120*c03c5b1cSMartin Matuska 
1121*c03c5b1cSMartin Matuska /** ZSTD_cycleLog() :
1122*c03c5b1cSMartin Matuska  *  condition for correct operation : hashLog > 1 */
1123*c03c5b1cSMartin Matuska U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat);
1124*c03c5b1cSMartin Matuska 
1125*c03c5b1cSMartin Matuska #endif /* ZSTD_COMPRESS_H */
1126