1 /*
2  * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
3  * All rights reserved.
4  *
5  * This source code is licensed under both the BSD-style license (found in the
6  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7  * in the COPYING file in the root directory of this source tree).
8  * You may select, at your option, one of the above-listed licenses.
9  */
10 
11 /*-*************************************
12 *  Dependencies
13 ***************************************/
14 #include <limits.h>         /* INT_MAX */
15 #include <string.h>         /* memset */
16 #include "cpu.h"
17 #include "mem.h"
18 #include "hist.h"           /* HIST_countFast_wksp */
19 #define FSE_STATIC_LINKING_ONLY   /* FSE_encodeSymbol */
20 #include "fse.h"
21 #define HUF_STATIC_LINKING_ONLY
22 #include "huf.h"
23 #include "zstd_compress_internal.h"
24 #include "zstd_fast.h"
25 #include "zstd_double_fast.h"
26 #include "zstd_lazy.h"
27 #include "zstd_opt.h"
28 #include "zstd_ldm.h"
29 
30 
31 /*-*************************************
32 *  Helper functions
33 ***************************************/
34 size_t ZSTD_compressBound(size_t srcSize) {
35     return ZSTD_COMPRESSBOUND(srcSize);
36 }
37 
38 
39 /*-*************************************
40 *  Context memory management
41 ***************************************/
42 struct ZSTD_CDict_s {
43     void* dictBuffer;
44     const void* dictContent;
45     size_t dictContentSize;
46     void* workspace;
47     size_t workspaceSize;
48     ZSTD_matchState_t matchState;
49     ZSTD_compressedBlockState_t cBlockState;
50     ZSTD_customMem customMem;
51     U32 dictID;
52 };  /* typedef'd to ZSTD_CDict within "zstd.h" */
53 
54 ZSTD_CCtx* ZSTD_createCCtx(void)
55 {
56     return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
57 }
58 
59 static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
60 {
61     assert(cctx != NULL);
62     memset(cctx, 0, sizeof(*cctx));
63     cctx->customMem = memManager;
64     cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
65     {   size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters);
66         assert(!ZSTD_isError(err));
67         (void)err;
68     }
69 }
70 
71 ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
72 {
73     ZSTD_STATIC_ASSERT(zcss_init==0);
74     ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
75     if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
76     {   ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
77         if (!cctx) return NULL;
78         ZSTD_initCCtx(cctx, customMem);
79         return cctx;
80     }
81 }
82 
83 ZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize)
84 {
85     ZSTD_CCtx* const cctx = (ZSTD_CCtx*) workspace;
86     if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL;  /* minimum size */
87     if ((size_t)workspace & 7) return NULL;  /* must be 8-aligned */
88     memset(workspace, 0, workspaceSize);   /* may be a bit generous, could memset be smaller ? */
89     cctx->staticSize = workspaceSize;
90     cctx->workSpace = (void*)(cctx+1);
91     cctx->workSpaceSize = workspaceSize - sizeof(ZSTD_CCtx);
92 
93     /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
94     if (cctx->workSpaceSize < HUF_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t)) return NULL;
95     assert(((size_t)cctx->workSpace & (sizeof(void*)-1)) == 0);   /* ensure correct alignment */
96     cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)cctx->workSpace;
97     cctx->blockState.nextCBlock = cctx->blockState.prevCBlock + 1;
98     {
99         void* const ptr = cctx->blockState.nextCBlock + 1;
100         cctx->entropyWorkspace = (U32*)ptr;
101     }
102     cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
103     return cctx;
104 }
105 
106 static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
107 {
108     assert(cctx != NULL);
109     assert(cctx->staticSize == 0);
110     ZSTD_free(cctx->workSpace, cctx->customMem); cctx->workSpace = NULL;
111     ZSTD_freeCDict(cctx->cdictLocal); cctx->cdictLocal = NULL;
112 #ifdef ZSTD_MULTITHREAD
113     ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL;
114 #endif
115 }
116 
117 size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
118 {
119     if (cctx==NULL) return 0;   /* support free on NULL */
120     if (cctx->staticSize) return ERROR(memory_allocation);   /* not compatible with static CCtx */
121     ZSTD_freeCCtxContent(cctx);
122     ZSTD_free(cctx, cctx->customMem);
123     return 0;
124 }
125 
126 
127 static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
128 {
129 #ifdef ZSTD_MULTITHREAD
130     return ZSTDMT_sizeof_CCtx(cctx->mtctx);
131 #else
132     (void)cctx;
133     return 0;
134 #endif
135 }
136 
137 
138 size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
139 {
140     if (cctx==NULL) return 0;   /* support sizeof on NULL */
141     return sizeof(*cctx) + cctx->workSpaceSize
142            + ZSTD_sizeof_CDict(cctx->cdictLocal)
143            + ZSTD_sizeof_mtctx(cctx);
144 }
145 
146 size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
147 {
148     return ZSTD_sizeof_CCtx(zcs);  /* same object */
149 }
150 
151 /* private API call, for dictBuilder only */
152 const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
153 
154 static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
155         ZSTD_compressionParameters cParams)
156 {
157     ZSTD_CCtx_params cctxParams;
158     memset(&cctxParams, 0, sizeof(cctxParams));
159     cctxParams.cParams = cParams;
160     cctxParams.compressionLevel = ZSTD_CLEVEL_DEFAULT;  /* should not matter, as all cParams are presumed properly defined */
161     assert(!ZSTD_checkCParams(cParams));
162     cctxParams.fParams.contentSizeFlag = 1;
163     return cctxParams;
164 }
165 
166 static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
167         ZSTD_customMem customMem)
168 {
169     ZSTD_CCtx_params* params;
170     if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
171     params = (ZSTD_CCtx_params*)ZSTD_calloc(
172             sizeof(ZSTD_CCtx_params), customMem);
173     if (!params) { return NULL; }
174     params->customMem = customMem;
175     params->compressionLevel = ZSTD_CLEVEL_DEFAULT;
176     params->fParams.contentSizeFlag = 1;
177     return params;
178 }
179 
180 ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
181 {
182     return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
183 }
184 
185 size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
186 {
187     if (params == NULL) { return 0; }
188     ZSTD_free(params, params->customMem);
189     return 0;
190 }
191 
192 size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
193 {
194     return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
195 }
196 
197 size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
198     if (!cctxParams) { return ERROR(GENERIC); }
199     memset(cctxParams, 0, sizeof(*cctxParams));
200     cctxParams->compressionLevel = compressionLevel;
201     cctxParams->fParams.contentSizeFlag = 1;
202     return 0;
203 }
204 
205 size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
206 {
207     if (!cctxParams) { return ERROR(GENERIC); }
208     CHECK_F( ZSTD_checkCParams(params.cParams) );
209     memset(cctxParams, 0, sizeof(*cctxParams));
210     cctxParams->cParams = params.cParams;
211     cctxParams->fParams = params.fParams;
212     cctxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT;   /* should not matter, as all cParams are presumed properly defined */
213     assert(!ZSTD_checkCParams(params.cParams));
214     return 0;
215 }
216 
217 /* ZSTD_assignParamsToCCtxParams() :
218  * params is presumed valid at this stage */
219 static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams(
220         ZSTD_CCtx_params cctxParams, ZSTD_parameters params)
221 {
222     ZSTD_CCtx_params ret = cctxParams;
223     ret.cParams = params.cParams;
224     ret.fParams = params.fParams;
225     ret.compressionLevel = ZSTD_CLEVEL_DEFAULT;   /* should not matter, as all cParams are presumed properly defined */
226     assert(!ZSTD_checkCParams(params.cParams));
227     return ret;
228 }
229 
230 ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
231 {
232     ZSTD_bounds bounds = { 0, 0, 0 };
233 
234     switch(param)
235     {
236     case ZSTD_c_compressionLevel:
237         bounds.lowerBound = ZSTD_minCLevel();
238         bounds.upperBound = ZSTD_maxCLevel();
239         return bounds;
240 
241     case ZSTD_c_windowLog:
242         bounds.lowerBound = ZSTD_WINDOWLOG_MIN;
243         bounds.upperBound = ZSTD_WINDOWLOG_MAX;
244         return bounds;
245 
246     case ZSTD_c_hashLog:
247         bounds.lowerBound = ZSTD_HASHLOG_MIN;
248         bounds.upperBound = ZSTD_HASHLOG_MAX;
249         return bounds;
250 
251     case ZSTD_c_chainLog:
252         bounds.lowerBound = ZSTD_CHAINLOG_MIN;
253         bounds.upperBound = ZSTD_CHAINLOG_MAX;
254         return bounds;
255 
256     case ZSTD_c_searchLog:
257         bounds.lowerBound = ZSTD_SEARCHLOG_MIN;
258         bounds.upperBound = ZSTD_SEARCHLOG_MAX;
259         return bounds;
260 
261     case ZSTD_c_minMatch:
262         bounds.lowerBound = ZSTD_MINMATCH_MIN;
263         bounds.upperBound = ZSTD_MINMATCH_MAX;
264         return bounds;
265 
266     case ZSTD_c_targetLength:
267         bounds.lowerBound = ZSTD_TARGETLENGTH_MIN;
268         bounds.upperBound = ZSTD_TARGETLENGTH_MAX;
269         return bounds;
270 
271     case ZSTD_c_strategy:
272         bounds.lowerBound = ZSTD_STRATEGY_MIN;
273         bounds.upperBound = ZSTD_STRATEGY_MAX;
274         return bounds;
275 
276     case ZSTD_c_contentSizeFlag:
277         bounds.lowerBound = 0;
278         bounds.upperBound = 1;
279         return bounds;
280 
281     case ZSTD_c_checksumFlag:
282         bounds.lowerBound = 0;
283         bounds.upperBound = 1;
284         return bounds;
285 
286     case ZSTD_c_dictIDFlag:
287         bounds.lowerBound = 0;
288         bounds.upperBound = 1;
289         return bounds;
290 
291     case ZSTD_c_nbWorkers:
292         bounds.lowerBound = 0;
293 #ifdef ZSTD_MULTITHREAD
294         bounds.upperBound = ZSTDMT_NBWORKERS_MAX;
295 #else
296         bounds.upperBound = 0;
297 #endif
298         return bounds;
299 
300     case ZSTD_c_jobSize:
301         bounds.lowerBound = 0;
302 #ifdef ZSTD_MULTITHREAD
303         bounds.upperBound = ZSTDMT_JOBSIZE_MAX;
304 #else
305         bounds.upperBound = 0;
306 #endif
307         return bounds;
308 
309     case ZSTD_c_overlapLog:
310         bounds.lowerBound = ZSTD_OVERLAPLOG_MIN;
311         bounds.upperBound = ZSTD_OVERLAPLOG_MAX;
312         return bounds;
313 
314     case ZSTD_c_enableLongDistanceMatching:
315         bounds.lowerBound = 0;
316         bounds.upperBound = 1;
317         return bounds;
318 
319     case ZSTD_c_ldmHashLog:
320         bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN;
321         bounds.upperBound = ZSTD_LDM_HASHLOG_MAX;
322         return bounds;
323 
324     case ZSTD_c_ldmMinMatch:
325         bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN;
326         bounds.upperBound = ZSTD_LDM_MINMATCH_MAX;
327         return bounds;
328 
329     case ZSTD_c_ldmBucketSizeLog:
330         bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN;
331         bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX;
332         return bounds;
333 
334     case ZSTD_c_ldmHashRateLog:
335         bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN;
336         bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX;
337         return bounds;
338 
339     /* experimental parameters */
340     case ZSTD_c_rsyncable:
341         bounds.lowerBound = 0;
342         bounds.upperBound = 1;
343         return bounds;
344 
345     case ZSTD_c_forceMaxWindow :
346         bounds.lowerBound = 0;
347         bounds.upperBound = 1;
348         return bounds;
349 
350     case ZSTD_c_format:
351         ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
352         bounds.lowerBound = ZSTD_f_zstd1;
353         bounds.upperBound = ZSTD_f_zstd1_magicless;   /* note : how to ensure at compile time that this is the highest value enum ? */
354         return bounds;
355 
356     case ZSTD_c_forceAttachDict:
357         ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceCopy);
358         bounds.lowerBound = ZSTD_dictDefaultAttach;
359         bounds.upperBound = ZSTD_dictForceCopy;       /* note : how to ensure at compile time that this is the highest value enum ? */
360         return bounds;
361 
362     default:
363         {   ZSTD_bounds const boundError = { ERROR(parameter_unsupported), 0, 0 };
364             return boundError;
365         }
366     }
367 }
368 
369 /* ZSTD_cParam_withinBounds:
370  * @return 1 if value is within cParam bounds,
371  * 0 otherwise */
372 static int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
373 {
374     ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
375     if (ZSTD_isError(bounds.error)) return 0;
376     if (value < bounds.lowerBound) return 0;
377     if (value > bounds.upperBound) return 0;
378     return 1;
379 }
380 
381 #define BOUNDCHECK(cParam, val) {                  \
382     if (!ZSTD_cParam_withinBounds(cParam,val)) {   \
383         return ERROR(parameter_outOfBound);        \
384 }   }
385 
386 
387 static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
388 {
389     switch(param)
390     {
391     case ZSTD_c_compressionLevel:
392     case ZSTD_c_hashLog:
393     case ZSTD_c_chainLog:
394     case ZSTD_c_searchLog:
395     case ZSTD_c_minMatch:
396     case ZSTD_c_targetLength:
397     case ZSTD_c_strategy:
398         return 1;
399 
400     case ZSTD_c_format:
401     case ZSTD_c_windowLog:
402     case ZSTD_c_contentSizeFlag:
403     case ZSTD_c_checksumFlag:
404     case ZSTD_c_dictIDFlag:
405     case ZSTD_c_forceMaxWindow :
406     case ZSTD_c_nbWorkers:
407     case ZSTD_c_jobSize:
408     case ZSTD_c_overlapLog:
409     case ZSTD_c_rsyncable:
410     case ZSTD_c_enableLongDistanceMatching:
411     case ZSTD_c_ldmHashLog:
412     case ZSTD_c_ldmMinMatch:
413     case ZSTD_c_ldmBucketSizeLog:
414     case ZSTD_c_ldmHashRateLog:
415     case ZSTD_c_forceAttachDict:
416     default:
417         return 0;
418     }
419 }
420 
421 size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
422 {
423     DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value);
424     if (cctx->streamStage != zcss_init) {
425         if (ZSTD_isUpdateAuthorized(param)) {
426             cctx->cParamsChanged = 1;
427         } else {
428             return ERROR(stage_wrong);
429     }   }
430 
431     switch(param)
432     {
433     case ZSTD_c_format :
434         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
435 
436     case ZSTD_c_compressionLevel:
437         if (cctx->cdict) return ERROR(stage_wrong);
438         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
439 
440     case ZSTD_c_windowLog:
441     case ZSTD_c_hashLog:
442     case ZSTD_c_chainLog:
443     case ZSTD_c_searchLog:
444     case ZSTD_c_minMatch:
445     case ZSTD_c_targetLength:
446     case ZSTD_c_strategy:
447         if (cctx->cdict) return ERROR(stage_wrong);
448         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
449 
450     case ZSTD_c_contentSizeFlag:
451     case ZSTD_c_checksumFlag:
452     case ZSTD_c_dictIDFlag:
453         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
454 
455     case ZSTD_c_forceMaxWindow :  /* Force back-references to remain < windowSize,
456                                    * even when referencing into Dictionary content.
457                                    * default : 0 when using a CDict, 1 when using a Prefix */
458         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
459 
460     case ZSTD_c_forceAttachDict:
461         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
462 
463     case ZSTD_c_nbWorkers:
464         if ((value!=0) && cctx->staticSize) {
465             return ERROR(parameter_unsupported);  /* MT not compatible with static alloc */
466         }
467         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
468 
469     case ZSTD_c_jobSize:
470     case ZSTD_c_overlapLog:
471     case ZSTD_c_rsyncable:
472         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
473 
474     case ZSTD_c_enableLongDistanceMatching:
475     case ZSTD_c_ldmHashLog:
476     case ZSTD_c_ldmMinMatch:
477     case ZSTD_c_ldmBucketSizeLog:
478     case ZSTD_c_ldmHashRateLog:
479         if (cctx->cdict) return ERROR(stage_wrong);
480         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
481 
482     default: return ERROR(parameter_unsupported);
483     }
484 }
485 
486 size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* CCtxParams,
487                                    ZSTD_cParameter param, int value)
488 {
489     DEBUGLOG(4, "ZSTD_CCtxParam_setParameter (%i, %i)", (int)param, value);
490     switch(param)
491     {
492     case ZSTD_c_format :
493         BOUNDCHECK(ZSTD_c_format, value);
494         CCtxParams->format = (ZSTD_format_e)value;
495         return (size_t)CCtxParams->format;
496 
497     case ZSTD_c_compressionLevel : {
498         int cLevel = value;
499         if (cLevel > ZSTD_maxCLevel()) cLevel = ZSTD_maxCLevel();
500         if (cLevel < ZSTD_minCLevel()) cLevel = ZSTD_minCLevel();
501         if (cLevel) {  /* 0 : does not change current level */
502             CCtxParams->compressionLevel = cLevel;
503         }
504         if (CCtxParams->compressionLevel >= 0) return CCtxParams->compressionLevel;
505         return 0;  /* return type (size_t) cannot represent negative values */
506     }
507 
508     case ZSTD_c_windowLog :
509         if (value!=0)   /* 0 => use default */
510             BOUNDCHECK(ZSTD_c_windowLog, value);
511         CCtxParams->cParams.windowLog = value;
512         return CCtxParams->cParams.windowLog;
513 
514     case ZSTD_c_hashLog :
515         if (value!=0)   /* 0 => use default */
516             BOUNDCHECK(ZSTD_c_hashLog, value);
517         CCtxParams->cParams.hashLog = value;
518         return CCtxParams->cParams.hashLog;
519 
520     case ZSTD_c_chainLog :
521         if (value!=0)   /* 0 => use default */
522             BOUNDCHECK(ZSTD_c_chainLog, value);
523         CCtxParams->cParams.chainLog = value;
524         return CCtxParams->cParams.chainLog;
525 
526     case ZSTD_c_searchLog :
527         if (value!=0)   /* 0 => use default */
528             BOUNDCHECK(ZSTD_c_searchLog, value);
529         CCtxParams->cParams.searchLog = value;
530         return value;
531 
532     case ZSTD_c_minMatch :
533         if (value!=0)   /* 0 => use default */
534             BOUNDCHECK(ZSTD_c_minMatch, value);
535         CCtxParams->cParams.minMatch = value;
536         return CCtxParams->cParams.minMatch;
537 
538     case ZSTD_c_targetLength :
539         BOUNDCHECK(ZSTD_c_targetLength, value);
540         CCtxParams->cParams.targetLength = value;
541         return CCtxParams->cParams.targetLength;
542 
543     case ZSTD_c_strategy :
544         if (value!=0)   /* 0 => use default */
545             BOUNDCHECK(ZSTD_c_strategy, value);
546         CCtxParams->cParams.strategy = (ZSTD_strategy)value;
547         return (size_t)CCtxParams->cParams.strategy;
548 
549     case ZSTD_c_contentSizeFlag :
550         /* Content size written in frame header _when known_ (default:1) */
551         DEBUGLOG(4, "set content size flag = %u", (value!=0));
552         CCtxParams->fParams.contentSizeFlag = value != 0;
553         return CCtxParams->fParams.contentSizeFlag;
554 
555     case ZSTD_c_checksumFlag :
556         /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
557         CCtxParams->fParams.checksumFlag = value != 0;
558         return CCtxParams->fParams.checksumFlag;
559 
560     case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
561         DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
562         CCtxParams->fParams.noDictIDFlag = !value;
563         return !CCtxParams->fParams.noDictIDFlag;
564 
565     case ZSTD_c_forceMaxWindow :
566         CCtxParams->forceWindow = (value != 0);
567         return CCtxParams->forceWindow;
568 
569     case ZSTD_c_forceAttachDict : {
570         const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;
571         BOUNDCHECK(ZSTD_c_forceAttachDict, pref);
572         CCtxParams->attachDictPref = pref;
573         return CCtxParams->attachDictPref;
574     }
575 
576     case ZSTD_c_nbWorkers :
577 #ifndef ZSTD_MULTITHREAD
578         if (value!=0) return ERROR(parameter_unsupported);
579         return 0;
580 #else
581         return ZSTDMT_CCtxParam_setNbWorkers(CCtxParams, value);
582 #endif
583 
584     case ZSTD_c_jobSize :
585 #ifndef ZSTD_MULTITHREAD
586         return ERROR(parameter_unsupported);
587 #else
588         return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_jobSize, value);
589 #endif
590 
591     case ZSTD_c_overlapLog :
592 #ifndef ZSTD_MULTITHREAD
593         return ERROR(parameter_unsupported);
594 #else
595         return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_overlapLog, value);
596 #endif
597 
598     case ZSTD_c_rsyncable :
599 #ifndef ZSTD_MULTITHREAD
600         return ERROR(parameter_unsupported);
601 #else
602         return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_rsyncable, value);
603 #endif
604 
605     case ZSTD_c_enableLongDistanceMatching :
606         CCtxParams->ldmParams.enableLdm = (value!=0);
607         return CCtxParams->ldmParams.enableLdm;
608 
609     case ZSTD_c_ldmHashLog :
610         if (value!=0)   /* 0 ==> auto */
611             BOUNDCHECK(ZSTD_c_ldmHashLog, value);
612         CCtxParams->ldmParams.hashLog = value;
613         return CCtxParams->ldmParams.hashLog;
614 
615     case ZSTD_c_ldmMinMatch :
616         if (value!=0)   /* 0 ==> default */
617             BOUNDCHECK(ZSTD_c_ldmMinMatch, value);
618         CCtxParams->ldmParams.minMatchLength = value;
619         return CCtxParams->ldmParams.minMatchLength;
620 
621     case ZSTD_c_ldmBucketSizeLog :
622         if (value!=0)   /* 0 ==> default */
623             BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);
624         CCtxParams->ldmParams.bucketSizeLog = value;
625         return CCtxParams->ldmParams.bucketSizeLog;
626 
627     case ZSTD_c_ldmHashRateLog :
628         if (value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
629             return ERROR(parameter_outOfBound);
630         CCtxParams->ldmParams.hashRateLog = value;
631         return CCtxParams->ldmParams.hashRateLog;
632 
633     default: return ERROR(parameter_unsupported);
634     }
635 }
636 
637 size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value)
638 {
639     return ZSTD_CCtxParam_getParameter(&cctx->requestedParams, param, value);
640 }
641 
642 size_t ZSTD_CCtxParam_getParameter(
643         ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, int* value)
644 {
645     switch(param)
646     {
647     case ZSTD_c_format :
648         *value = CCtxParams->format;
649         break;
650     case ZSTD_c_compressionLevel :
651         *value = CCtxParams->compressionLevel;
652         break;
653     case ZSTD_c_windowLog :
654         *value = CCtxParams->cParams.windowLog;
655         break;
656     case ZSTD_c_hashLog :
657         *value = CCtxParams->cParams.hashLog;
658         break;
659     case ZSTD_c_chainLog :
660         *value = CCtxParams->cParams.chainLog;
661         break;
662     case ZSTD_c_searchLog :
663         *value = CCtxParams->cParams.searchLog;
664         break;
665     case ZSTD_c_minMatch :
666         *value = CCtxParams->cParams.minMatch;
667         break;
668     case ZSTD_c_targetLength :
669         *value = CCtxParams->cParams.targetLength;
670         break;
671     case ZSTD_c_strategy :
672         *value = (unsigned)CCtxParams->cParams.strategy;
673         break;
674     case ZSTD_c_contentSizeFlag :
675         *value = CCtxParams->fParams.contentSizeFlag;
676         break;
677     case ZSTD_c_checksumFlag :
678         *value = CCtxParams->fParams.checksumFlag;
679         break;
680     case ZSTD_c_dictIDFlag :
681         *value = !CCtxParams->fParams.noDictIDFlag;
682         break;
683     case ZSTD_c_forceMaxWindow :
684         *value = CCtxParams->forceWindow;
685         break;
686     case ZSTD_c_forceAttachDict :
687         *value = CCtxParams->attachDictPref;
688         break;
689     case ZSTD_c_nbWorkers :
690 #ifndef ZSTD_MULTITHREAD
691         assert(CCtxParams->nbWorkers == 0);
692 #endif
693         *value = CCtxParams->nbWorkers;
694         break;
695     case ZSTD_c_jobSize :
696 #ifndef ZSTD_MULTITHREAD
697         return ERROR(parameter_unsupported);
698 #else
699         assert(CCtxParams->jobSize <= INT_MAX);
700         *value = (int)CCtxParams->jobSize;
701         break;
702 #endif
703     case ZSTD_c_overlapLog :
704 #ifndef ZSTD_MULTITHREAD
705         return ERROR(parameter_unsupported);
706 #else
707         *value = CCtxParams->overlapLog;
708         break;
709 #endif
710     case ZSTD_c_rsyncable :
711 #ifndef ZSTD_MULTITHREAD
712         return ERROR(parameter_unsupported);
713 #else
714         *value = CCtxParams->rsyncable;
715         break;
716 #endif
717     case ZSTD_c_enableLongDistanceMatching :
718         *value = CCtxParams->ldmParams.enableLdm;
719         break;
720     case ZSTD_c_ldmHashLog :
721         *value = CCtxParams->ldmParams.hashLog;
722         break;
723     case ZSTD_c_ldmMinMatch :
724         *value = CCtxParams->ldmParams.minMatchLength;
725         break;
726     case ZSTD_c_ldmBucketSizeLog :
727         *value = CCtxParams->ldmParams.bucketSizeLog;
728         break;
729     case ZSTD_c_ldmHashRateLog :
730         *value = CCtxParams->ldmParams.hashRateLog;
731         break;
732     default: return ERROR(parameter_unsupported);
733     }
734     return 0;
735 }
736 
737 /** ZSTD_CCtx_setParametersUsingCCtxParams() :
738  *  just applies `params` into `cctx`
739  *  no action is performed, parameters are merely stored.
740  *  If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.
741  *    This is possible even if a compression is ongoing.
742  *    In which case, new parameters will be applied on the fly, starting with next compression job.
743  */
744 size_t ZSTD_CCtx_setParametersUsingCCtxParams(
745         ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
746 {
747     DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
748     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
749     if (cctx->cdict) return ERROR(stage_wrong);
750 
751     cctx->requestedParams = *params;
752     return 0;
753 }
754 
755 ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
756 {
757     DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
758     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
759     cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
760     return 0;
761 }
762 
763 size_t ZSTD_CCtx_loadDictionary_advanced(
764         ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
765         ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
766 {
767     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
768     if (cctx->staticSize) return ERROR(memory_allocation);  /* no malloc for static CCtx */
769     DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
770     ZSTD_freeCDict(cctx->cdictLocal);  /* in case one already exists */
771     if (dict==NULL || dictSize==0) {   /* no dictionary mode */
772         cctx->cdictLocal = NULL;
773         cctx->cdict = NULL;
774     } else {
775         ZSTD_compressionParameters const cParams =
776                 ZSTD_getCParamsFromCCtxParams(&cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, dictSize);
777         cctx->cdictLocal = ZSTD_createCDict_advanced(
778                                 dict, dictSize,
779                                 dictLoadMethod, dictContentType,
780                                 cParams, cctx->customMem);
781         cctx->cdict = cctx->cdictLocal;
782         if (cctx->cdictLocal == NULL)
783             return ERROR(memory_allocation);
784     }
785     return 0;
786 }
787 
788 ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(
789       ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
790 {
791     return ZSTD_CCtx_loadDictionary_advanced(
792             cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
793 }
794 
795 ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
796 {
797     return ZSTD_CCtx_loadDictionary_advanced(
798             cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
799 }
800 
801 
802 size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
803 {
804     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
805     cctx->cdict = cdict;
806     memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));  /* exclusive */
807     return 0;
808 }
809 
810 size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
811 {
812     return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
813 }
814 
815 size_t ZSTD_CCtx_refPrefix_advanced(
816         ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
817 {
818     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
819     cctx->cdict = NULL;   /* prefix discards any prior cdict */
820     cctx->prefixDict.dict = prefix;
821     cctx->prefixDict.dictSize = prefixSize;
822     cctx->prefixDict.dictContentType = dictContentType;
823     return 0;
824 }
825 
826 /*! ZSTD_CCtx_reset() :
827  *  Also dumps dictionary */
828 size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
829 {
830     if ( (reset == ZSTD_reset_session_only)
831       || (reset == ZSTD_reset_session_and_parameters) ) {
832         cctx->streamStage = zcss_init;
833         cctx->pledgedSrcSizePlusOne = 0;
834     }
835     if ( (reset == ZSTD_reset_parameters)
836       || (reset == ZSTD_reset_session_and_parameters) ) {
837         if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
838         cctx->cdict = NULL;
839         return ZSTD_CCtxParams_reset(&cctx->requestedParams);
840     }
841     return 0;
842 }
843 
844 
845 /** ZSTD_checkCParams() :
846     control CParam values remain within authorized range.
847     @return : 0, or an error code if one value is beyond authorized range */
848 size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
849 {
850     BOUNDCHECK(ZSTD_c_windowLog, cParams.windowLog);
851     BOUNDCHECK(ZSTD_c_chainLog,  cParams.chainLog);
852     BOUNDCHECK(ZSTD_c_hashLog,   cParams.hashLog);
853     BOUNDCHECK(ZSTD_c_searchLog, cParams.searchLog);
854     BOUNDCHECK(ZSTD_c_minMatch,  cParams.minMatch);
855     BOUNDCHECK(ZSTD_c_targetLength,cParams.targetLength);
856     BOUNDCHECK(ZSTD_c_strategy,  cParams.strategy);
857     return 0;
858 }
859 
860 /** ZSTD_clampCParams() :
861  *  make CParam values within valid range.
862  *  @return : valid CParams */
863 static ZSTD_compressionParameters
864 ZSTD_clampCParams(ZSTD_compressionParameters cParams)
865 {
866 #   define CLAMP_TYPE(cParam, val, type) {                                \
867         ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);         \
868         if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound;      \
869         else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
870     }
871 #   define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, int)
872     CLAMP(ZSTD_c_windowLog, cParams.windowLog);
873     CLAMP(ZSTD_c_chainLog,  cParams.chainLog);
874     CLAMP(ZSTD_c_hashLog,   cParams.hashLog);
875     CLAMP(ZSTD_c_searchLog, cParams.searchLog);
876     CLAMP(ZSTD_c_minMatch,  cParams.minMatch);
877     CLAMP(ZSTD_c_targetLength,cParams.targetLength);
878     CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy);
879     return cParams;
880 }
881 
882 /** ZSTD_cycleLog() :
883  *  condition for correct operation : hashLog > 1 */
884 static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
885 {
886     U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
887     return hashLog - btScale;
888 }
889 
890 /** ZSTD_adjustCParams_internal() :
891     optimize `cPar` for a given input (`srcSize` and `dictSize`).
892     mostly downsizing to reduce memory consumption and initialization latency.
893     Both `srcSize` and `dictSize` are optional (use 0 if unknown).
894     Note : cPar is assumed validated. Use ZSTD_checkCParams() to ensure this condition. */
895 static ZSTD_compressionParameters
896 ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
897                             unsigned long long srcSize,
898                             size_t dictSize)
899 {
900     static const U64 minSrcSize = 513; /* (1<<9) + 1 */
901     static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
902     assert(ZSTD_checkCParams(cPar)==0);
903 
904     if (dictSize && (srcSize+1<2) /* srcSize unknown */ )
905         srcSize = minSrcSize;  /* presumed small when there is a dictionary */
906     else if (srcSize == 0)
907         srcSize = ZSTD_CONTENTSIZE_UNKNOWN;  /* 0 == unknown : presumed large */
908 
909     /* resize windowLog if input is small enough, to use less memory */
910     if ( (srcSize < maxWindowResize)
911       && (dictSize < maxWindowResize) )  {
912         U32 const tSize = (U32)(srcSize + dictSize);
913         static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
914         U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
915                             ZSTD_highbit32(tSize-1) + 1;
916         if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
917     }
918     if (cPar.hashLog > cPar.windowLog+1) cPar.hashLog = cPar.windowLog+1;
919     {   U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
920         if (cycleLog > cPar.windowLog)
921             cPar.chainLog -= (cycleLog - cPar.windowLog);
922     }
923 
924     if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
925         cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN;  /* required for frame header */
926 
927     return cPar;
928 }
929 
930 ZSTD_compressionParameters
931 ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
932                    unsigned long long srcSize,
933                    size_t dictSize)
934 {
935     cPar = ZSTD_clampCParams(cPar);
936     return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize);
937 }
938 
939 ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
940         const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize)
941 {
942     ZSTD_compressionParameters cParams = ZSTD_getCParams(CCtxParams->compressionLevel, srcSizeHint, dictSize);
943     if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
944     if (CCtxParams->cParams.windowLog) cParams.windowLog = CCtxParams->cParams.windowLog;
945     if (CCtxParams->cParams.hashLog) cParams.hashLog = CCtxParams->cParams.hashLog;
946     if (CCtxParams->cParams.chainLog) cParams.chainLog = CCtxParams->cParams.chainLog;
947     if (CCtxParams->cParams.searchLog) cParams.searchLog = CCtxParams->cParams.searchLog;
948     if (CCtxParams->cParams.minMatch) cParams.minMatch = CCtxParams->cParams.minMatch;
949     if (CCtxParams->cParams.targetLength) cParams.targetLength = CCtxParams->cParams.targetLength;
950     if (CCtxParams->cParams.strategy) cParams.strategy = CCtxParams->cParams.strategy;
951     assert(!ZSTD_checkCParams(cParams));
952     return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize);
953 }
954 
955 static size_t
956 ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
957                        const U32 forCCtx)
958 {
959     size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
960     size_t const hSize = ((size_t)1) << cParams->hashLog;
961     U32    const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
962     size_t const h3Size = ((size_t)1) << hashLog3;
963     size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
964     size_t const optPotentialSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits)) * sizeof(U32)
965                           + (ZSTD_OPT_NUM+1) * (sizeof(ZSTD_match_t)+sizeof(ZSTD_optimal_t));
966     size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
967                                 ? optPotentialSpace
968                                 : 0;
969     DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
970                 (U32)chainSize, (U32)hSize, (U32)h3Size);
971     return tableSpace + optSpace;
972 }
973 
974 size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
975 {
976     /* Estimate CCtx size is supported for single-threaded compression only. */
977     if (params->nbWorkers > 0) { return ERROR(GENERIC); }
978     {   ZSTD_compressionParameters const cParams =
979                 ZSTD_getCParamsFromCCtxParams(params, 0, 0);
980         size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
981         U32    const divider = (cParams.minMatch==3) ? 3 : 4;
982         size_t const maxNbSeq = blockSize / divider;
983         size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq;
984         size_t const entropySpace = HUF_WORKSPACE_SIZE;
985         size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t);
986         size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 1);
987 
988         size_t const ldmSpace = ZSTD_ldm_getTableSize(params->ldmParams);
989         size_t const ldmSeqSpace = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize) * sizeof(rawSeq);
990 
991         size_t const neededSpace = entropySpace + blockStateSpace + tokenSpace +
992                                    matchStateSize + ldmSpace + ldmSeqSpace;
993 
994         DEBUGLOG(5, "sizeof(ZSTD_CCtx) : %u", (U32)sizeof(ZSTD_CCtx));
995         DEBUGLOG(5, "estimate workSpace : %u", (U32)neededSpace);
996         return sizeof(ZSTD_CCtx) + neededSpace;
997     }
998 }
999 
1000 size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
1001 {
1002     ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
1003     return ZSTD_estimateCCtxSize_usingCCtxParams(&params);
1004 }
1005 
1006 static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
1007 {
1008     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
1009     return ZSTD_estimateCCtxSize_usingCParams(cParams);
1010 }
1011 
1012 size_t ZSTD_estimateCCtxSize(int compressionLevel)
1013 {
1014     int level;
1015     size_t memBudget = 0;
1016     for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
1017         size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
1018         if (newMB > memBudget) memBudget = newMB;
1019     }
1020     return memBudget;
1021 }
1022 
1023 size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
1024 {
1025     if (params->nbWorkers > 0) { return ERROR(GENERIC); }
1026     {   size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params);
1027         size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << params->cParams.windowLog);
1028         size_t const inBuffSize = ((size_t)1 << params->cParams.windowLog) + blockSize;
1029         size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
1030         size_t const streamingSize = inBuffSize + outBuffSize;
1031 
1032         return CCtxSize + streamingSize;
1033     }
1034 }
1035 
1036 size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
1037 {
1038     ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
1039     return ZSTD_estimateCStreamSize_usingCCtxParams(&params);
1040 }
1041 
1042 static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
1043 {
1044     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
1045     return ZSTD_estimateCStreamSize_usingCParams(cParams);
1046 }
1047 
1048 size_t ZSTD_estimateCStreamSize(int compressionLevel)
1049 {
1050     int level;
1051     size_t memBudget = 0;
1052     for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
1053         size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
1054         if (newMB > memBudget) memBudget = newMB;
1055     }
1056     return memBudget;
1057 }
1058 
1059 /* ZSTD_getFrameProgression():
1060  * tells how much data has been consumed (input) and produced (output) for current frame.
1061  * able to count progression inside worker threads (non-blocking mode).
1062  */
1063 ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
1064 {
1065 #ifdef ZSTD_MULTITHREAD
1066     if (cctx->appliedParams.nbWorkers > 0) {
1067         return ZSTDMT_getFrameProgression(cctx->mtctx);
1068     }
1069 #endif
1070     {   ZSTD_frameProgression fp;
1071         size_t const buffered = (cctx->inBuff == NULL) ? 0 :
1072                                 cctx->inBuffPos - cctx->inToCompress;
1073         if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);
1074         assert(buffered <= ZSTD_BLOCKSIZE_MAX);
1075         fp.ingested = cctx->consumedSrcSize + buffered;
1076         fp.consumed = cctx->consumedSrcSize;
1077         fp.produced = cctx->producedCSize;
1078         fp.flushed  = cctx->producedCSize;   /* simplified; some data might still be left within streaming output buffer */
1079         fp.currentJobID = 0;
1080         fp.nbActiveWorkers = 0;
1081         return fp;
1082 }   }
1083 
1084 /*! ZSTD_toFlushNow()
1085  *  Only useful for multithreading scenarios currently (nbWorkers >= 1).
1086  */
1087 size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)
1088 {
1089 #ifdef ZSTD_MULTITHREAD
1090     if (cctx->appliedParams.nbWorkers > 0) {
1091         return ZSTDMT_toFlushNow(cctx->mtctx);
1092     }
1093 #endif
1094     (void)cctx;
1095     return 0;   /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
1096 }
1097 
1098 
1099 
1100 static U32 ZSTD_equivalentCParams(ZSTD_compressionParameters cParams1,
1101                                   ZSTD_compressionParameters cParams2)
1102 {
1103     return (cParams1.hashLog  == cParams2.hashLog)
1104          & (cParams1.chainLog == cParams2.chainLog)
1105          & (cParams1.strategy == cParams2.strategy)   /* opt parser space */
1106          & ((cParams1.minMatch==3) == (cParams2.minMatch==3));  /* hashlog3 space */
1107 }
1108 
1109 static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
1110                                     ZSTD_compressionParameters cParams2)
1111 {
1112     (void)cParams1;
1113     (void)cParams2;
1114     assert(cParams1.windowLog    == cParams2.windowLog);
1115     assert(cParams1.chainLog     == cParams2.chainLog);
1116     assert(cParams1.hashLog      == cParams2.hashLog);
1117     assert(cParams1.searchLog    == cParams2.searchLog);
1118     assert(cParams1.minMatch     == cParams2.minMatch);
1119     assert(cParams1.targetLength == cParams2.targetLength);
1120     assert(cParams1.strategy     == cParams2.strategy);
1121 }
1122 
1123 /** The parameters are equivalent if ldm is not enabled in both sets or
1124  *  all the parameters are equivalent. */
1125 static U32 ZSTD_equivalentLdmParams(ldmParams_t ldmParams1,
1126                                     ldmParams_t ldmParams2)
1127 {
1128     return (!ldmParams1.enableLdm && !ldmParams2.enableLdm) ||
1129            (ldmParams1.enableLdm == ldmParams2.enableLdm &&
1130             ldmParams1.hashLog == ldmParams2.hashLog &&
1131             ldmParams1.bucketSizeLog == ldmParams2.bucketSizeLog &&
1132             ldmParams1.minMatchLength == ldmParams2.minMatchLength &&
1133             ldmParams1.hashRateLog == ldmParams2.hashRateLog);
1134 }
1135 
1136 typedef enum { ZSTDb_not_buffered, ZSTDb_buffered } ZSTD_buffered_policy_e;
1137 
1138 /* ZSTD_sufficientBuff() :
1139  * check internal buffers exist for streaming if buffPol == ZSTDb_buffered .
1140  * Note : they are assumed to be correctly sized if ZSTD_equivalentCParams()==1 */
1141 static U32 ZSTD_sufficientBuff(size_t bufferSize1, size_t maxNbSeq1,
1142                             size_t maxNbLit1,
1143                             ZSTD_buffered_policy_e buffPol2,
1144                             ZSTD_compressionParameters cParams2,
1145                             U64 pledgedSrcSize)
1146 {
1147     size_t const windowSize2 = MAX(1, (size_t)MIN(((U64)1 << cParams2.windowLog), pledgedSrcSize));
1148     size_t const blockSize2 = MIN(ZSTD_BLOCKSIZE_MAX, windowSize2);
1149     size_t const maxNbSeq2 = blockSize2 / ((cParams2.minMatch == 3) ? 3 : 4);
1150     size_t const maxNbLit2 = blockSize2;
1151     size_t const neededBufferSize2 = (buffPol2==ZSTDb_buffered) ? windowSize2 + blockSize2 : 0;
1152     DEBUGLOG(4, "ZSTD_sufficientBuff: is neededBufferSize2=%u <= bufferSize1=%u",
1153                 (U32)neededBufferSize2, (U32)bufferSize1);
1154     DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbSeq2=%u <= maxNbSeq1=%u",
1155                 (U32)maxNbSeq2, (U32)maxNbSeq1);
1156     DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbLit2=%u <= maxNbLit1=%u",
1157                 (U32)maxNbLit2, (U32)maxNbLit1);
1158     return (maxNbLit2 <= maxNbLit1)
1159          & (maxNbSeq2 <= maxNbSeq1)
1160          & (neededBufferSize2 <= bufferSize1);
1161 }
1162 
1163 /** Equivalence for resetCCtx purposes */
1164 static U32 ZSTD_equivalentParams(ZSTD_CCtx_params params1,
1165                                  ZSTD_CCtx_params params2,
1166                                  size_t buffSize1,
1167                                  size_t maxNbSeq1, size_t maxNbLit1,
1168                                  ZSTD_buffered_policy_e buffPol2,
1169                                  U64 pledgedSrcSize)
1170 {
1171     DEBUGLOG(4, "ZSTD_equivalentParams: pledgedSrcSize=%u", (U32)pledgedSrcSize);
1172     if (!ZSTD_equivalentCParams(params1.cParams, params2.cParams)) {
1173       DEBUGLOG(4, "ZSTD_equivalentCParams() == 0");
1174       return 0;
1175     }
1176     if (!ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams)) {
1177       DEBUGLOG(4, "ZSTD_equivalentLdmParams() == 0");
1178       return 0;
1179     }
1180     if (!ZSTD_sufficientBuff(buffSize1, maxNbSeq1, maxNbLit1, buffPol2,
1181                              params2.cParams, pledgedSrcSize)) {
1182       DEBUGLOG(4, "ZSTD_sufficientBuff() == 0");
1183       return 0;
1184     }
1185     return 1;
1186 }
1187 
1188 static void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
1189 {
1190     int i;
1191     for (i = 0; i < ZSTD_REP_NUM; ++i)
1192         bs->rep[i] = repStartValue[i];
1193     bs->entropy.huf.repeatMode = HUF_repeat_none;
1194     bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
1195     bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
1196     bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
1197 }
1198 
1199 /*! ZSTD_invalidateMatchState()
1200  * Invalidate all the matches in the match finder tables.
1201  * Requires nextSrc and base to be set (can be NULL).
1202  */
1203 static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
1204 {
1205     ZSTD_window_clear(&ms->window);
1206 
1207     ms->nextToUpdate = ms->window.dictLimit;
1208     ms->nextToUpdate3 = ms->window.dictLimit;
1209     ms->loadedDictEnd = 0;
1210     ms->opt.litLengthSum = 0;  /* force reset of btopt stats */
1211     ms->dictMatchState = NULL;
1212 }
1213 
1214 /*! ZSTD_continueCCtx() :
1215  *  reuse CCtx without reset (note : requires no dictionary) */
1216 static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_CCtx_params params, U64 pledgedSrcSize)
1217 {
1218     size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
1219     size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
1220     DEBUGLOG(4, "ZSTD_continueCCtx: re-use context in place");
1221 
1222     cctx->blockSize = blockSize;   /* previous block size could be different even for same windowLog, due to pledgedSrcSize */
1223     cctx->appliedParams = params;
1224     cctx->blockState.matchState.cParams = params.cParams;
1225     cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
1226     cctx->consumedSrcSize = 0;
1227     cctx->producedCSize = 0;
1228     if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
1229         cctx->appliedParams.fParams.contentSizeFlag = 0;
1230     DEBUGLOG(4, "pledged content size : %u ; flag : %u",
1231         (U32)pledgedSrcSize, cctx->appliedParams.fParams.contentSizeFlag);
1232     cctx->stage = ZSTDcs_init;
1233     cctx->dictID = 0;
1234     if (params.ldmParams.enableLdm)
1235         ZSTD_window_clear(&cctx->ldmState.window);
1236     ZSTD_referenceExternalSequences(cctx, NULL, 0);
1237     ZSTD_invalidateMatchState(&cctx->blockState.matchState);
1238     ZSTD_reset_compressedBlockState(cctx->blockState.prevCBlock);
1239     XXH64_reset(&cctx->xxhState, 0);
1240     return 0;
1241 }
1242 
1243 typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset } ZSTD_compResetPolicy_e;
1244 
1245 static void*
1246 ZSTD_reset_matchState(ZSTD_matchState_t* ms,
1247                       void* ptr,
1248                 const ZSTD_compressionParameters* cParams,
1249                       ZSTD_compResetPolicy_e const crp, U32 const forCCtx)
1250 {
1251     size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
1252     size_t const hSize = ((size_t)1) << cParams->hashLog;
1253     U32    const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
1254     size_t const h3Size = ((size_t)1) << hashLog3;
1255     size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
1256 
1257     assert(((size_t)ptr & 3) == 0);
1258 
1259     ms->hashLog3 = hashLog3;
1260     memset(&ms->window, 0, sizeof(ms->window));
1261     ms->window.dictLimit = 1;    /* start from 1, so that 1st position is valid */
1262     ms->window.lowLimit = 1;     /* it ensures first and later CCtx usages compress the same */
1263     ms->window.nextSrc = ms->window.base + 1;   /* see issue #1241 */
1264     ZSTD_invalidateMatchState(ms);
1265 
1266     /* opt parser space */
1267     if (forCCtx && (cParams->strategy >= ZSTD_btopt)) {
1268         DEBUGLOG(4, "reserving optimal parser space");
1269         ms->opt.litFreq = (unsigned*)ptr;
1270         ms->opt.litLengthFreq = ms->opt.litFreq + (1<<Litbits);
1271         ms->opt.matchLengthFreq = ms->opt.litLengthFreq + (MaxLL+1);
1272         ms->opt.offCodeFreq = ms->opt.matchLengthFreq + (MaxML+1);
1273         ptr = ms->opt.offCodeFreq + (MaxOff+1);
1274         ms->opt.matchTable = (ZSTD_match_t*)ptr;
1275         ptr = ms->opt.matchTable + ZSTD_OPT_NUM+1;
1276         ms->opt.priceTable = (ZSTD_optimal_t*)ptr;
1277         ptr = ms->opt.priceTable + ZSTD_OPT_NUM+1;
1278     }
1279 
1280     /* table Space */
1281     DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_noMemset);
1282     assert(((size_t)ptr & 3) == 0);  /* ensure ptr is properly aligned */
1283     if (crp!=ZSTDcrp_noMemset) memset(ptr, 0, tableSpace);   /* reset tables only */
1284     ms->hashTable = (U32*)(ptr);
1285     ms->chainTable = ms->hashTable + hSize;
1286     ms->hashTable3 = ms->chainTable + chainSize;
1287     ptr = ms->hashTable3 + h3Size;
1288 
1289     ms->cParams = *cParams;
1290 
1291     assert(((size_t)ptr & 3) == 0);
1292     return ptr;
1293 }
1294 
1295 #define ZSTD_WORKSPACETOOLARGE_FACTOR 3 /* define "workspace is too large" as this number of times larger than needed */
1296 #define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128  /* when workspace is continuously too large
1297                                          * during at least this number of times,
1298                                          * context's memory usage is considered wasteful,
1299                                          * because it's sized to handle a worst case scenario which rarely happens.
1300                                          * In which case, resize it down to free some memory */
1301 
1302 /*! ZSTD_resetCCtx_internal() :
1303     note : `params` are assumed fully validated at this stage */
1304 static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
1305                                       ZSTD_CCtx_params params,
1306                                       U64 pledgedSrcSize,
1307                                       ZSTD_compResetPolicy_e const crp,
1308                                       ZSTD_buffered_policy_e const zbuff)
1309 {
1310     DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u",
1311                 (U32)pledgedSrcSize, params.cParams.windowLog);
1312     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
1313 
1314     if (crp == ZSTDcrp_continue) {
1315         if (ZSTD_equivalentParams(zc->appliedParams, params,
1316                                   zc->inBuffSize,
1317                                   zc->seqStore.maxNbSeq, zc->seqStore.maxNbLit,
1318                                   zbuff, pledgedSrcSize)) {
1319             DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> continue mode (wLog1=%u, blockSize1=%zu)",
1320                         zc->appliedParams.cParams.windowLog, zc->blockSize);
1321             zc->workSpaceOversizedDuration += (zc->workSpaceOversizedDuration > 0);   /* if it was too large, it still is */
1322             if (zc->workSpaceOversizedDuration <= ZSTD_WORKSPACETOOLARGE_MAXDURATION)
1323                 return ZSTD_continueCCtx(zc, params, pledgedSrcSize);
1324     }   }
1325     DEBUGLOG(4, "ZSTD_equivalentParams()==0 -> reset CCtx");
1326 
1327     if (params.ldmParams.enableLdm) {
1328         /* Adjust long distance matching parameters */
1329         ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
1330         assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
1331         assert(params.ldmParams.hashRateLog < 32);
1332         zc->ldmState.hashPower = ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength);
1333     }
1334 
1335     {   size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
1336         size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
1337         U32    const divider = (params.cParams.minMatch==3) ? 3 : 4;
1338         size_t const maxNbSeq = blockSize / divider;
1339         size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq;
1340         size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0;
1341         size_t const buffInSize = (zbuff==ZSTDb_buffered) ? windowSize + blockSize : 0;
1342         size_t const matchStateSize = ZSTD_sizeof_matchState(&params.cParams, /* forCCtx */ 1);
1343         size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize);
1344         void* ptr;   /* used to partition workSpace */
1345 
1346         /* Check if workSpace is large enough, alloc a new one if needed */
1347         {   size_t const entropySpace = HUF_WORKSPACE_SIZE;
1348             size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t);
1349             size_t const bufferSpace = buffInSize + buffOutSize;
1350             size_t const ldmSpace = ZSTD_ldm_getTableSize(params.ldmParams);
1351             size_t const ldmSeqSpace = maxNbLdmSeq * sizeof(rawSeq);
1352 
1353             size_t const neededSpace = entropySpace + blockStateSpace + ldmSpace +
1354                                        ldmSeqSpace + matchStateSize + tokenSpace +
1355                                        bufferSpace;
1356 
1357             int const workSpaceTooSmall = zc->workSpaceSize < neededSpace;
1358             int const workSpaceTooLarge = zc->workSpaceSize > ZSTD_WORKSPACETOOLARGE_FACTOR * neededSpace;
1359             int const workSpaceWasteful = workSpaceTooLarge && (zc->workSpaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION);
1360             zc->workSpaceOversizedDuration = workSpaceTooLarge ? zc->workSpaceOversizedDuration+1 : 0;
1361 
1362             DEBUGLOG(4, "Need %zuKB workspace, including %zuKB for match state, and %zuKB for buffers",
1363                         neededSpace>>10, matchStateSize>>10, bufferSpace>>10);
1364             DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
1365 
1366             if (workSpaceTooSmall || workSpaceWasteful) {
1367                 DEBUGLOG(4, "Need to resize workSpaceSize from %zuKB to %zuKB",
1368                             zc->workSpaceSize >> 10,
1369                             neededSpace >> 10);
1370                 /* static cctx : no resize, error out */
1371                 if (zc->staticSize) return ERROR(memory_allocation);
1372 
1373                 zc->workSpaceSize = 0;
1374                 ZSTD_free(zc->workSpace, zc->customMem);
1375                 zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
1376                 if (zc->workSpace == NULL) return ERROR(memory_allocation);
1377                 zc->workSpaceSize = neededSpace;
1378                 zc->workSpaceOversizedDuration = 0;
1379 
1380                 /* Statically sized space.
1381                  * entropyWorkspace never moves,
1382                  * though prev/next block swap places */
1383                 assert(((size_t)zc->workSpace & 3) == 0);   /* ensure correct alignment */
1384                 assert(zc->workSpaceSize >= 2 * sizeof(ZSTD_compressedBlockState_t));
1385                 zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)zc->workSpace;
1386                 zc->blockState.nextCBlock = zc->blockState.prevCBlock + 1;
1387                 ptr = zc->blockState.nextCBlock + 1;
1388                 zc->entropyWorkspace = (U32*)ptr;
1389         }   }
1390 
1391         /* init params */
1392         zc->appliedParams = params;
1393         zc->blockState.matchState.cParams = params.cParams;
1394         zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
1395         zc->consumedSrcSize = 0;
1396         zc->producedCSize = 0;
1397         if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
1398             zc->appliedParams.fParams.contentSizeFlag = 0;
1399         DEBUGLOG(4, "pledged content size : %u ; flag : %u",
1400             (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
1401         zc->blockSize = blockSize;
1402 
1403         XXH64_reset(&zc->xxhState, 0);
1404         zc->stage = ZSTDcs_init;
1405         zc->dictID = 0;
1406 
1407         ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
1408 
1409         ptr = zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32;
1410 
1411         /* ldm hash table */
1412         /* initialize bucketOffsets table later for pointer alignment */
1413         if (params.ldmParams.enableLdm) {
1414             size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
1415             memset(ptr, 0, ldmHSize * sizeof(ldmEntry_t));
1416             assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
1417             zc->ldmState.hashTable = (ldmEntry_t*)ptr;
1418             ptr = zc->ldmState.hashTable + ldmHSize;
1419             zc->ldmSequences = (rawSeq*)ptr;
1420             ptr = zc->ldmSequences + maxNbLdmSeq;
1421             zc->maxNbLdmSequences = maxNbLdmSeq;
1422 
1423             memset(&zc->ldmState.window, 0, sizeof(zc->ldmState.window));
1424         }
1425         assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
1426 
1427         ptr = ZSTD_reset_matchState(&zc->blockState.matchState, ptr, &params.cParams, crp, /* forCCtx */ 1);
1428 
1429         /* sequences storage */
1430         zc->seqStore.maxNbSeq = maxNbSeq;
1431         zc->seqStore.sequencesStart = (seqDef*)ptr;
1432         ptr = zc->seqStore.sequencesStart + maxNbSeq;
1433         zc->seqStore.llCode = (BYTE*) ptr;
1434         zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
1435         zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
1436         zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
1437         /* ZSTD_wildcopy() is used to copy into the literals buffer,
1438          * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
1439          */
1440         zc->seqStore.maxNbLit = blockSize;
1441         ptr = zc->seqStore.litStart + blockSize + WILDCOPY_OVERLENGTH;
1442 
1443         /* ldm bucketOffsets table */
1444         if (params.ldmParams.enableLdm) {
1445             size_t const ldmBucketSize =
1446                   ((size_t)1) << (params.ldmParams.hashLog -
1447                                   params.ldmParams.bucketSizeLog);
1448             memset(ptr, 0, ldmBucketSize);
1449             zc->ldmState.bucketOffsets = (BYTE*)ptr;
1450             ptr = zc->ldmState.bucketOffsets + ldmBucketSize;
1451             ZSTD_window_clear(&zc->ldmState.window);
1452         }
1453         ZSTD_referenceExternalSequences(zc, NULL, 0);
1454 
1455         /* buffers */
1456         zc->inBuffSize = buffInSize;
1457         zc->inBuff = (char*)ptr;
1458         zc->outBuffSize = buffOutSize;
1459         zc->outBuff = zc->inBuff + buffInSize;
1460 
1461         return 0;
1462     }
1463 }
1464 
1465 /* ZSTD_invalidateRepCodes() :
1466  * ensures next compression will not use repcodes from previous block.
1467  * Note : only works with regular variant;
1468  *        do not use with extDict variant ! */
1469 void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
1470     int i;
1471     for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
1472     assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
1473 }
1474 
1475 /* These are the approximate sizes for each strategy past which copying the
1476  * dictionary tables into the working context is faster than using them
1477  * in-place.
1478  */
1479 static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = {
1480     8 KB,  /* unused */
1481     8 KB,  /* ZSTD_fast */
1482     16 KB, /* ZSTD_dfast */
1483     32 KB, /* ZSTD_greedy */
1484     32 KB, /* ZSTD_lazy */
1485     32 KB, /* ZSTD_lazy2 */
1486     32 KB, /* ZSTD_btlazy2 */
1487     32 KB, /* ZSTD_btopt */
1488     8 KB,  /* ZSTD_btultra */
1489     8 KB   /* ZSTD_btultra2 */
1490 };
1491 
1492 static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
1493                                  ZSTD_CCtx_params params,
1494                                  U64 pledgedSrcSize)
1495 {
1496     size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
1497     return ( pledgedSrcSize <= cutoff
1498           || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
1499           || params.attachDictPref == ZSTD_dictForceAttach )
1500         && params.attachDictPref != ZSTD_dictForceCopy
1501         && !params.forceWindow; /* dictMatchState isn't correctly
1502                                  * handled in _enforceMaxDist */
1503 }
1504 
1505 static size_t ZSTD_resetCCtx_byAttachingCDict(
1506     ZSTD_CCtx* cctx,
1507     const ZSTD_CDict* cdict,
1508     ZSTD_CCtx_params params,
1509     U64 pledgedSrcSize,
1510     ZSTD_buffered_policy_e zbuff)
1511 {
1512     {
1513         const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
1514         unsigned const windowLog = params.cParams.windowLog;
1515         assert(windowLog != 0);
1516         /* Resize working context table params for input only, since the dict
1517          * has its own tables. */
1518         params.cParams = ZSTD_adjustCParams_internal(*cdict_cParams, pledgedSrcSize, 0);
1519         params.cParams.windowLog = windowLog;
1520         ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
1521                                 ZSTDcrp_continue, zbuff);
1522         assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
1523     }
1524 
1525     {
1526         const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
1527                                   - cdict->matchState.window.base);
1528         const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;
1529         if (cdictLen == 0) {
1530             /* don't even attach dictionaries with no contents */
1531             DEBUGLOG(4, "skipping attaching empty dictionary");
1532         } else {
1533             DEBUGLOG(4, "attaching dictionary into context");
1534             cctx->blockState.matchState.dictMatchState = &cdict->matchState;
1535 
1536             /* prep working match state so dict matches never have negative indices
1537              * when they are translated to the working context's index space. */
1538             if (cctx->blockState.matchState.window.dictLimit < cdictEnd) {
1539                 cctx->blockState.matchState.window.nextSrc =
1540                     cctx->blockState.matchState.window.base + cdictEnd;
1541                 ZSTD_window_clear(&cctx->blockState.matchState.window);
1542             }
1543             cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
1544         }
1545     }
1546 
1547     cctx->dictID = cdict->dictID;
1548 
1549     /* copy block state */
1550     memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
1551 
1552     return 0;
1553 }
1554 
1555 static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
1556                             const ZSTD_CDict* cdict,
1557                             ZSTD_CCtx_params params,
1558                             U64 pledgedSrcSize,
1559                             ZSTD_buffered_policy_e zbuff)
1560 {
1561     const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
1562 
1563     DEBUGLOG(4, "copying dictionary into context");
1564 
1565     {   unsigned const windowLog = params.cParams.windowLog;
1566         assert(windowLog != 0);
1567         /* Copy only compression parameters related to tables. */
1568         params.cParams = *cdict_cParams;
1569         params.cParams.windowLog = windowLog;
1570         ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
1571                                 ZSTDcrp_noMemset, zbuff);
1572         assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
1573         assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
1574         assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
1575     }
1576 
1577     /* copy tables */
1578     {   size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog);
1579         size_t const hSize =  (size_t)1 << cdict_cParams->hashLog;
1580         size_t const tableSpace = (chainSize + hSize) * sizeof(U32);
1581         assert((U32*)cctx->blockState.matchState.chainTable == (U32*)cctx->blockState.matchState.hashTable + hSize);  /* chainTable must follow hashTable */
1582         assert((U32*)cctx->blockState.matchState.hashTable3 == (U32*)cctx->blockState.matchState.chainTable + chainSize);
1583         assert((U32*)cdict->matchState.chainTable == (U32*)cdict->matchState.hashTable + hSize);  /* chainTable must follow hashTable */
1584         assert((U32*)cdict->matchState.hashTable3 == (U32*)cdict->matchState.chainTable + chainSize);
1585         memcpy(cctx->blockState.matchState.hashTable, cdict->matchState.hashTable, tableSpace);   /* presumes all tables follow each other */
1586     }
1587 
1588     /* Zero the hashTable3, since the cdict never fills it */
1589     {   size_t const h3Size = (size_t)1 << cctx->blockState.matchState.hashLog3;
1590         assert(cdict->matchState.hashLog3 == 0);
1591         memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
1592     }
1593 
1594     /* copy dictionary offsets */
1595     {   ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
1596         ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
1597         dstMatchState->window       = srcMatchState->window;
1598         dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
1599         dstMatchState->nextToUpdate3= srcMatchState->nextToUpdate3;
1600         dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
1601     }
1602 
1603     cctx->dictID = cdict->dictID;
1604 
1605     /* copy block state */
1606     memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
1607 
1608     return 0;
1609 }
1610 
1611 /* We have a choice between copying the dictionary context into the working
1612  * context, or referencing the dictionary context from the working context
1613  * in-place. We decide here which strategy to use. */
1614 static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
1615                             const ZSTD_CDict* cdict,
1616                             ZSTD_CCtx_params params,
1617                             U64 pledgedSrcSize,
1618                             ZSTD_buffered_policy_e zbuff)
1619 {
1620 
1621     DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)",
1622                 (unsigned)pledgedSrcSize);
1623 
1624     if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
1625         return ZSTD_resetCCtx_byAttachingCDict(
1626             cctx, cdict, params, pledgedSrcSize, zbuff);
1627     } else {
1628         return ZSTD_resetCCtx_byCopyingCDict(
1629             cctx, cdict, params, pledgedSrcSize, zbuff);
1630     }
1631 }
1632 
1633 /*! ZSTD_copyCCtx_internal() :
1634  *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
1635  *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
1636  *  The "context", in this case, refers to the hash and chain tables,
1637  *  entropy tables, and dictionary references.
1638  * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
1639  * @return : 0, or an error code */
1640 static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
1641                             const ZSTD_CCtx* srcCCtx,
1642                             ZSTD_frameParameters fParams,
1643                             U64 pledgedSrcSize,
1644                             ZSTD_buffered_policy_e zbuff)
1645 {
1646     DEBUGLOG(5, "ZSTD_copyCCtx_internal");
1647     if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong);
1648 
1649     memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
1650     {   ZSTD_CCtx_params params = dstCCtx->requestedParams;
1651         /* Copy only compression parameters related to tables. */
1652         params.cParams = srcCCtx->appliedParams.cParams;
1653         params.fParams = fParams;
1654         ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize,
1655                                 ZSTDcrp_noMemset, zbuff);
1656         assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
1657         assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
1658         assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
1659         assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
1660         assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
1661     }
1662 
1663     /* copy tables */
1664     {   size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);
1665         size_t const hSize =  (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
1666         size_t const h3Size = (size_t)1 << srcCCtx->blockState.matchState.hashLog3;
1667         size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
1668         assert((U32*)dstCCtx->blockState.matchState.chainTable == (U32*)dstCCtx->blockState.matchState.hashTable + hSize);  /* chainTable must follow hashTable */
1669         assert((U32*)dstCCtx->blockState.matchState.hashTable3 == (U32*)dstCCtx->blockState.matchState.chainTable + chainSize);
1670         memcpy(dstCCtx->blockState.matchState.hashTable, srcCCtx->blockState.matchState.hashTable, tableSpace);   /* presumes all tables follow each other */
1671     }
1672 
1673     /* copy dictionary offsets */
1674     {
1675         const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
1676         ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
1677         dstMatchState->window       = srcMatchState->window;
1678         dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
1679         dstMatchState->nextToUpdate3= srcMatchState->nextToUpdate3;
1680         dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
1681     }
1682     dstCCtx->dictID = srcCCtx->dictID;
1683 
1684     /* copy block state */
1685     memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
1686 
1687     return 0;
1688 }
1689 
1690 /*! ZSTD_copyCCtx() :
1691  *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
1692  *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
1693  *  pledgedSrcSize==0 means "unknown".
1694 *   @return : 0, or an error code */
1695 size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
1696 {
1697     ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
1698     ZSTD_buffered_policy_e const zbuff = (ZSTD_buffered_policy_e)(srcCCtx->inBuffSize>0);
1699     ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
1700     if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
1701     fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
1702 
1703     return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
1704                                 fParams, pledgedSrcSize,
1705                                 zbuff);
1706 }
1707 
1708 
1709 #define ZSTD_ROWSIZE 16
1710 /*! ZSTD_reduceTable() :
1711  *  reduce table indexes by `reducerValue`, or squash to zero.
1712  *  PreserveMark preserves "unsorted mark" for btlazy2 strategy.
1713  *  It must be set to a clear 0/1 value, to remove branch during inlining.
1714  *  Presume table size is a multiple of ZSTD_ROWSIZE
1715  *  to help auto-vectorization */
1716 FORCE_INLINE_TEMPLATE void
1717 ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
1718 {
1719     int const nbRows = (int)size / ZSTD_ROWSIZE;
1720     int cellNb = 0;
1721     int rowNb;
1722     assert((size & (ZSTD_ROWSIZE-1)) == 0);  /* multiple of ZSTD_ROWSIZE */
1723     assert(size < (1U<<31));   /* can be casted to int */
1724     for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
1725         int column;
1726         for (column=0; column<ZSTD_ROWSIZE; column++) {
1727             if (preserveMark) {
1728                 U32 const adder = (table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) ? reducerValue : 0;
1729                 table[cellNb] += adder;
1730             }
1731             if (table[cellNb] < reducerValue) table[cellNb] = 0;
1732             else table[cellNb] -= reducerValue;
1733             cellNb++;
1734     }   }
1735 }
1736 
1737 static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
1738 {
1739     ZSTD_reduceTable_internal(table, size, reducerValue, 0);
1740 }
1741 
1742 static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
1743 {
1744     ZSTD_reduceTable_internal(table, size, reducerValue, 1);
1745 }
1746 
1747 /*! ZSTD_reduceIndex() :
1748 *   rescale all indexes to avoid future overflow (indexes are U32) */
1749 static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue)
1750 {
1751     ZSTD_matchState_t* const ms = &zc->blockState.matchState;
1752     {   U32 const hSize = (U32)1 << zc->appliedParams.cParams.hashLog;
1753         ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
1754     }
1755 
1756     if (zc->appliedParams.cParams.strategy != ZSTD_fast) {
1757         U32 const chainSize = (U32)1 << zc->appliedParams.cParams.chainLog;
1758         if (zc->appliedParams.cParams.strategy == ZSTD_btlazy2)
1759             ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
1760         else
1761             ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
1762     }
1763 
1764     if (ms->hashLog3) {
1765         U32 const h3Size = (U32)1 << ms->hashLog3;
1766         ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
1767     }
1768 }
1769 
1770 
1771 /*-*******************************************************
1772 *  Block entropic compression
1773 *********************************************************/
1774 
1775 /* See doc/zstd_compression_format.md for detailed format description */
1776 
1777 static size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
1778 {
1779     U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
1780     if (srcSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
1781     MEM_writeLE24(dst, cBlockHeader24);
1782     memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
1783     return ZSTD_blockHeaderSize + srcSize;
1784 }
1785 
1786 static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
1787 {
1788     BYTE* const ostart = (BYTE* const)dst;
1789     U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
1790 
1791     if (srcSize + flSize > dstCapacity) return ERROR(dstSize_tooSmall);
1792 
1793     switch(flSize)
1794     {
1795         case 1: /* 2 - 1 - 5 */
1796             ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
1797             break;
1798         case 2: /* 2 - 2 - 12 */
1799             MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
1800             break;
1801         case 3: /* 2 - 2 - 20 */
1802             MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
1803             break;
1804         default:   /* not necessary : flSize is {1,2,3} */
1805             assert(0);
1806     }
1807 
1808     memcpy(ostart + flSize, src, srcSize);
1809     return srcSize + flSize;
1810 }
1811 
1812 static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
1813 {
1814     BYTE* const ostart = (BYTE* const)dst;
1815     U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
1816 
1817     (void)dstCapacity;  /* dstCapacity already guaranteed to be >=4, hence large enough */
1818 
1819     switch(flSize)
1820     {
1821         case 1: /* 2 - 1 - 5 */
1822             ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
1823             break;
1824         case 2: /* 2 - 2 - 12 */
1825             MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
1826             break;
1827         case 3: /* 2 - 2 - 20 */
1828             MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
1829             break;
1830         default:   /* not necessary : flSize is {1,2,3} */
1831             assert(0);
1832     }
1833 
1834     ostart[flSize] = *(const BYTE*)src;
1835     return flSize+1;
1836 }
1837 
1838 
1839 /* ZSTD_minGain() :
1840  * minimum compression required
1841  * to generate a compress block or a compressed literals section.
1842  * note : use same formula for both situations */
1843 static size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
1844 {
1845     U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
1846     ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
1847     assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
1848     return (srcSize >> minlog) + 2;
1849 }
1850 
1851 static size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
1852                                      ZSTD_hufCTables_t* nextHuf,
1853                                      ZSTD_strategy strategy, int disableLiteralCompression,
1854                                      void* dst, size_t dstCapacity,
1855                                const void* src, size_t srcSize,
1856                                      void* workspace, size_t wkspSize,
1857                                const int bmi2)
1858 {
1859     size_t const minGain = ZSTD_minGain(srcSize, strategy);
1860     size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
1861     BYTE*  const ostart = (BYTE*)dst;
1862     U32 singleStream = srcSize < 256;
1863     symbolEncodingType_e hType = set_compressed;
1864     size_t cLitSize;
1865 
1866     DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i)",
1867                 disableLiteralCompression);
1868 
1869     /* Prepare nextEntropy assuming reusing the existing table */
1870     memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
1871 
1872     if (disableLiteralCompression)
1873         return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
1874 
1875     /* small ? don't even attempt compression (speed opt) */
1876 #   define COMPRESS_LITERALS_SIZE_MIN 63
1877     {   size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
1878         if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
1879     }
1880 
1881     if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall);   /* not enough space for compression */
1882     {   HUF_repeat repeat = prevHuf->repeatMode;
1883         int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
1884         if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
1885         cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
1886                                       workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2)
1887                                 : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
1888                                       workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
1889         if (repeat != HUF_repeat_none) {
1890             /* reused the existing table */
1891             hType = set_repeat;
1892         }
1893     }
1894 
1895     if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
1896         memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
1897         return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
1898     }
1899     if (cLitSize==1) {
1900         memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
1901         return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
1902     }
1903 
1904     if (hType == set_compressed) {
1905         /* using a newly constructed table */
1906         nextHuf->repeatMode = HUF_repeat_check;
1907     }
1908 
1909     /* Build header */
1910     switch(lhSize)
1911     {
1912     case 3: /* 2 - 2 - 10 - 10 */
1913         {   U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
1914             MEM_writeLE24(ostart, lhc);
1915             break;
1916         }
1917     case 4: /* 2 - 2 - 14 - 14 */
1918         {   U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
1919             MEM_writeLE32(ostart, lhc);
1920             break;
1921         }
1922     case 5: /* 2 - 2 - 18 - 18 */
1923         {   U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
1924             MEM_writeLE32(ostart, lhc);
1925             ostart[4] = (BYTE)(cLitSize >> 10);
1926             break;
1927         }
1928     default:  /* not possible : lhSize is {3,4,5} */
1929         assert(0);
1930     }
1931     return lhSize+cLitSize;
1932 }
1933 
1934 
1935 void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
1936 {
1937     const seqDef* const sequences = seqStorePtr->sequencesStart;
1938     BYTE* const llCodeTable = seqStorePtr->llCode;
1939     BYTE* const ofCodeTable = seqStorePtr->ofCode;
1940     BYTE* const mlCodeTable = seqStorePtr->mlCode;
1941     U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
1942     U32 u;
1943     assert(nbSeq <= seqStorePtr->maxNbSeq);
1944     for (u=0; u<nbSeq; u++) {
1945         U32 const llv = sequences[u].litLength;
1946         U32 const mlv = sequences[u].matchLength;
1947         llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
1948         ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
1949         mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
1950     }
1951     if (seqStorePtr->longLengthID==1)
1952         llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
1953     if (seqStorePtr->longLengthID==2)
1954         mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
1955 }
1956 
1957 
1958 /**
1959  * -log2(x / 256) lookup table for x in [0, 256).
1960  * If x == 0: Return 0
1961  * Else: Return floor(-log2(x / 256) * 256)
1962  */
1963 static unsigned const kInverseProbabiltyLog256[256] = {
1964     0,    2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
1965     1130, 1100, 1073, 1047, 1024, 1001, 980,  960,  941,  923,  906,  889,
1966     874,  859,  844,  830,  817,  804,  791,  779,  768,  756,  745,  734,
1967     724,  714,  704,  694,  685,  676,  667,  658,  650,  642,  633,  626,
1968     618,  610,  603,  595,  588,  581,  574,  567,  561,  554,  548,  542,
1969     535,  529,  523,  517,  512,  506,  500,  495,  489,  484,  478,  473,
1970     468,  463,  458,  453,  448,  443,  438,  434,  429,  424,  420,  415,
1971     411,  407,  402,  398,  394,  390,  386,  382,  377,  373,  370,  366,
1972     362,  358,  354,  350,  347,  343,  339,  336,  332,  329,  325,  322,
1973     318,  315,  311,  308,  305,  302,  298,  295,  292,  289,  286,  282,
1974     279,  276,  273,  270,  267,  264,  261,  258,  256,  253,  250,  247,
1975     244,  241,  239,  236,  233,  230,  228,  225,  222,  220,  217,  215,
1976     212,  209,  207,  204,  202,  199,  197,  194,  192,  190,  187,  185,
1977     182,  180,  178,  175,  173,  171,  168,  166,  164,  162,  159,  157,
1978     155,  153,  151,  149,  146,  144,  142,  140,  138,  136,  134,  132,
1979     130,  128,  126,  123,  121,  119,  117,  115,  114,  112,  110,  108,
1980     106,  104,  102,  100,  98,   96,   94,   93,   91,   89,   87,   85,
1981     83,   82,   80,   78,   76,   74,   73,   71,   69,   67,   66,   64,
1982     62,   61,   59,   57,   55,   54,   52,   50,   49,   47,   46,   44,
1983     42,   41,   39,   37,   36,   34,   33,   31,   30,   28,   26,   25,
1984     23,   22,   20,   19,   17,   16,   14,   13,   11,   10,   8,    7,
1985     5,    4,    2,    1,
1986 };
1987 
1988 
1989 /**
1990  * Returns the cost in bits of encoding the distribution described by count
1991  * using the entropy bound.
1992  */
1993 static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total)
1994 {
1995     unsigned cost = 0;
1996     unsigned s;
1997     for (s = 0; s <= max; ++s) {
1998         unsigned norm = (unsigned)((256 * count[s]) / total);
1999         if (count[s] != 0 && norm == 0)
2000             norm = 1;
2001         assert(count[s] < total);
2002         cost += count[s] * kInverseProbabiltyLog256[norm];
2003     }
2004     return cost >> 8;
2005 }
2006 
2007 
2008 /**
2009  * Returns the cost in bits of encoding the distribution in count using the
2010  * table described by norm. The max symbol support by norm is assumed >= max.
2011  * norm must be valid for every symbol with non-zero probability in count.
2012  */
2013 static size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
2014                                     unsigned const* count, unsigned const max)
2015 {
2016     unsigned const shift = 8 - accuracyLog;
2017     size_t cost = 0;
2018     unsigned s;
2019     assert(accuracyLog <= 8);
2020     for (s = 0; s <= max; ++s) {
2021         unsigned const normAcc = norm[s] != -1 ? norm[s] : 1;
2022         unsigned const norm256 = normAcc << shift;
2023         assert(norm256 > 0);
2024         assert(norm256 < 256);
2025         cost += count[s] * kInverseProbabiltyLog256[norm256];
2026     }
2027     return cost >> 8;
2028 }
2029 
2030 
2031 static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
2032   void const* ptr = ctable;
2033   U16 const* u16ptr = (U16 const*)ptr;
2034   U32 const maxSymbolValue = MEM_read16(u16ptr + 1);
2035   return maxSymbolValue;
2036 }
2037 
2038 
2039 /**
2040  * Returns the cost in bits of encoding the distribution in count using ctable.
2041  * Returns an error if ctable cannot represent all the symbols in count.
2042  */
2043 static size_t ZSTD_fseBitCost(
2044     FSE_CTable const* ctable,
2045     unsigned const* count,
2046     unsigned const max)
2047 {
2048     unsigned const kAccuracyLog = 8;
2049     size_t cost = 0;
2050     unsigned s;
2051     FSE_CState_t cstate;
2052     FSE_initCState(&cstate, ctable);
2053     if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {
2054         DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u",
2055                     ZSTD_getFSEMaxSymbolValue(ctable), max);
2056         return ERROR(GENERIC);
2057     }
2058     for (s = 0; s <= max; ++s) {
2059         unsigned const tableLog = cstate.stateLog;
2060         unsigned const badCost = (tableLog + 1) << kAccuracyLog;
2061         unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
2062         if (count[s] == 0)
2063             continue;
2064         if (bitCost >= badCost) {
2065             DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s);
2066             return ERROR(GENERIC);
2067         }
2068         cost += count[s] * bitCost;
2069     }
2070     return cost >> kAccuracyLog;
2071 }
2072 
2073 /**
2074  * Returns the cost in bytes of encoding the normalized count header.
2075  * Returns an error if any of the helper functions return an error.
2076  */
2077 static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
2078                               size_t const nbSeq, unsigned const FSELog)
2079 {
2080     BYTE wksp[FSE_NCOUNTBOUND];
2081     S16 norm[MaxSeq + 1];
2082     const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
2083     CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq, max));
2084     return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
2085 }
2086 
2087 
2088 typedef enum {
2089     ZSTD_defaultDisallowed = 0,
2090     ZSTD_defaultAllowed = 1
2091 } ZSTD_defaultPolicy_e;
2092 
2093 MEM_STATIC symbolEncodingType_e
2094 ZSTD_selectEncodingType(
2095         FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
2096         size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
2097         FSE_CTable const* prevCTable,
2098         short const* defaultNorm, U32 defaultNormLog,
2099         ZSTD_defaultPolicy_e const isDefaultAllowed,
2100         ZSTD_strategy const strategy)
2101 {
2102     ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
2103     if (mostFrequent == nbSeq) {
2104         *repeatMode = FSE_repeat_none;
2105         if (isDefaultAllowed && nbSeq <= 2) {
2106             /* Prefer set_basic over set_rle when there are 2 or less symbols,
2107              * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
2108              * If basic encoding isn't possible, always choose RLE.
2109              */
2110             DEBUGLOG(5, "Selected set_basic");
2111             return set_basic;
2112         }
2113         DEBUGLOG(5, "Selected set_rle");
2114         return set_rle;
2115     }
2116     if (strategy < ZSTD_lazy) {
2117         if (isDefaultAllowed) {
2118             size_t const staticFse_nbSeq_max = 1000;
2119             size_t const mult = 10 - strategy;
2120             size_t const baseLog = 3;
2121             size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog;  /* 28-36 for offset, 56-72 for lengths */
2122             assert(defaultNormLog >= 5 && defaultNormLog <= 6);  /* xx_DEFAULTNORMLOG */
2123             assert(mult <= 9 && mult >= 7);
2124             if ( (*repeatMode == FSE_repeat_valid)
2125               && (nbSeq < staticFse_nbSeq_max) ) {
2126                 DEBUGLOG(5, "Selected set_repeat");
2127                 return set_repeat;
2128             }
2129             if ( (nbSeq < dynamicFse_nbSeq_min)
2130               || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) {
2131                 DEBUGLOG(5, "Selected set_basic");
2132                 /* The format allows default tables to be repeated, but it isn't useful.
2133                  * When using simple heuristics to select encoding type, we don't want
2134                  * to confuse these tables with dictionaries. When running more careful
2135                  * analysis, we don't need to waste time checking both repeating tables
2136                  * and default tables.
2137                  */
2138                 *repeatMode = FSE_repeat_none;
2139                 return set_basic;
2140             }
2141         }
2142     } else {
2143         size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC);
2144         size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC);
2145         size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog);
2146         size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq);
2147 
2148         if (isDefaultAllowed) {
2149             assert(!ZSTD_isError(basicCost));
2150             assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost)));
2151         }
2152         assert(!ZSTD_isError(NCountCost));
2153         assert(compressedCost < ERROR(maxCode));
2154         DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
2155                     (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost);
2156         if (basicCost <= repeatCost && basicCost <= compressedCost) {
2157             DEBUGLOG(5, "Selected set_basic");
2158             assert(isDefaultAllowed);
2159             *repeatMode = FSE_repeat_none;
2160             return set_basic;
2161         }
2162         if (repeatCost <= compressedCost) {
2163             DEBUGLOG(5, "Selected set_repeat");
2164             assert(!ZSTD_isError(repeatCost));
2165             return set_repeat;
2166         }
2167         assert(compressedCost < basicCost && compressedCost < repeatCost);
2168     }
2169     DEBUGLOG(5, "Selected set_compressed");
2170     *repeatMode = FSE_repeat_check;
2171     return set_compressed;
2172 }
2173 
2174 MEM_STATIC size_t
2175 ZSTD_buildCTable(void* dst, size_t dstCapacity,
2176                 FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
2177                 unsigned* count, U32 max,
2178                 const BYTE* codeTable, size_t nbSeq,
2179                 const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
2180                 const FSE_CTable* prevCTable, size_t prevCTableSize,
2181                 void* workspace, size_t workspaceSize)
2182 {
2183     BYTE* op = (BYTE*)dst;
2184     const BYTE* const oend = op + dstCapacity;
2185     DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity);
2186 
2187     switch (type) {
2188     case set_rle:
2189         CHECK_F(FSE_buildCTable_rle(nextCTable, (BYTE)max));
2190         if (dstCapacity==0) return ERROR(dstSize_tooSmall);
2191         *op = codeTable[0];
2192         return 1;
2193     case set_repeat:
2194         memcpy(nextCTable, prevCTable, prevCTableSize);
2195         return 0;
2196     case set_basic:
2197         CHECK_F(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize));  /* note : could be pre-calculated */
2198         return 0;
2199     case set_compressed: {
2200         S16 norm[MaxSeq + 1];
2201         size_t nbSeq_1 = nbSeq;
2202         const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
2203         if (count[codeTable[nbSeq-1]] > 1) {
2204             count[codeTable[nbSeq-1]]--;
2205             nbSeq_1--;
2206         }
2207         assert(nbSeq_1 > 1);
2208         CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
2209         {   size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog);   /* overflow protected */
2210             if (FSE_isError(NCountSize)) return NCountSize;
2211             CHECK_F(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize));
2212             return NCountSize;
2213         }
2214     }
2215     default: return assert(0), ERROR(GENERIC);
2216     }
2217 }
2218 
2219 FORCE_INLINE_TEMPLATE size_t
2220 ZSTD_encodeSequences_body(
2221             void* dst, size_t dstCapacity,
2222             FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
2223             FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
2224             FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
2225             seqDef const* sequences, size_t nbSeq, int longOffsets)
2226 {
2227     BIT_CStream_t blockStream;
2228     FSE_CState_t  stateMatchLength;
2229     FSE_CState_t  stateOffsetBits;
2230     FSE_CState_t  stateLitLength;
2231 
2232     CHECK_E(BIT_initCStream(&blockStream, dst, dstCapacity), dstSize_tooSmall); /* not enough space remaining */
2233     DEBUGLOG(6, "available space for bitstream : %i  (dstCapacity=%u)",
2234                 (int)(blockStream.endPtr - blockStream.startPtr),
2235                 (unsigned)dstCapacity);
2236 
2237     /* first symbols */
2238     FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
2239     FSE_initCState2(&stateOffsetBits,  CTable_OffsetBits,  ofCodeTable[nbSeq-1]);
2240     FSE_initCState2(&stateLitLength,   CTable_LitLength,   llCodeTable[nbSeq-1]);
2241     BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
2242     if (MEM_32bits()) BIT_flushBits(&blockStream);
2243     BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
2244     if (MEM_32bits()) BIT_flushBits(&blockStream);
2245     if (longOffsets) {
2246         U32 const ofBits = ofCodeTable[nbSeq-1];
2247         int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
2248         if (extraBits) {
2249             BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
2250             BIT_flushBits(&blockStream);
2251         }
2252         BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
2253                     ofBits - extraBits);
2254     } else {
2255         BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
2256     }
2257     BIT_flushBits(&blockStream);
2258 
2259     {   size_t n;
2260         for (n=nbSeq-2 ; n<nbSeq ; n--) {      /* intentional underflow */
2261             BYTE const llCode = llCodeTable[n];
2262             BYTE const ofCode = ofCodeTable[n];
2263             BYTE const mlCode = mlCodeTable[n];
2264             U32  const llBits = LL_bits[llCode];
2265             U32  const ofBits = ofCode;
2266             U32  const mlBits = ML_bits[mlCode];
2267             DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
2268                         (unsigned)sequences[n].litLength,
2269                         (unsigned)sequences[n].matchLength + MINMATCH,
2270                         (unsigned)sequences[n].offset);
2271                                                                             /* 32b*/  /* 64b*/
2272                                                                             /* (7)*/  /* (7)*/
2273             FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode);       /* 15 */  /* 15 */
2274             FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode);      /* 24 */  /* 24 */
2275             if (MEM_32bits()) BIT_flushBits(&blockStream);                  /* (7)*/
2276             FSE_encodeSymbol(&blockStream, &stateLitLength, llCode);        /* 16 */  /* 33 */
2277             if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
2278                 BIT_flushBits(&blockStream);                                /* (7)*/
2279             BIT_addBits(&blockStream, sequences[n].litLength, llBits);
2280             if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
2281             BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
2282             if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
2283             if (longOffsets) {
2284                 int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
2285                 if (extraBits) {
2286                     BIT_addBits(&blockStream, sequences[n].offset, extraBits);
2287                     BIT_flushBits(&blockStream);                            /* (7)*/
2288                 }
2289                 BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
2290                             ofBits - extraBits);                            /* 31 */
2291             } else {
2292                 BIT_addBits(&blockStream, sequences[n].offset, ofBits);     /* 31 */
2293             }
2294             BIT_flushBits(&blockStream);                                    /* (7)*/
2295             DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
2296     }   }
2297 
2298     DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog);
2299     FSE_flushCState(&blockStream, &stateMatchLength);
2300     DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog);
2301     FSE_flushCState(&blockStream, &stateOffsetBits);
2302     DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog);
2303     FSE_flushCState(&blockStream, &stateLitLength);
2304 
2305     {   size_t const streamSize = BIT_closeCStream(&blockStream);
2306         if (streamSize==0) return ERROR(dstSize_tooSmall);   /* not enough space */
2307         return streamSize;
2308     }
2309 }
2310 
2311 static size_t
2312 ZSTD_encodeSequences_default(
2313             void* dst, size_t dstCapacity,
2314             FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
2315             FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
2316             FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
2317             seqDef const* sequences, size_t nbSeq, int longOffsets)
2318 {
2319     return ZSTD_encodeSequences_body(dst, dstCapacity,
2320                                     CTable_MatchLength, mlCodeTable,
2321                                     CTable_OffsetBits, ofCodeTable,
2322                                     CTable_LitLength, llCodeTable,
2323                                     sequences, nbSeq, longOffsets);
2324 }
2325 
2326 
2327 #if DYNAMIC_BMI2
2328 
2329 static TARGET_ATTRIBUTE("bmi2") size_t
2330 ZSTD_encodeSequences_bmi2(
2331             void* dst, size_t dstCapacity,
2332             FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
2333             FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
2334             FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
2335             seqDef const* sequences, size_t nbSeq, int longOffsets)
2336 {
2337     return ZSTD_encodeSequences_body(dst, dstCapacity,
2338                                     CTable_MatchLength, mlCodeTable,
2339                                     CTable_OffsetBits, ofCodeTable,
2340                                     CTable_LitLength, llCodeTable,
2341                                     sequences, nbSeq, longOffsets);
2342 }
2343 
2344 #endif
2345 
2346 static size_t ZSTD_encodeSequences(
2347             void* dst, size_t dstCapacity,
2348             FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
2349             FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
2350             FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
2351             seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
2352 {
2353     DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
2354 #if DYNAMIC_BMI2
2355     if (bmi2) {
2356         return ZSTD_encodeSequences_bmi2(dst, dstCapacity,
2357                                          CTable_MatchLength, mlCodeTable,
2358                                          CTable_OffsetBits, ofCodeTable,
2359                                          CTable_LitLength, llCodeTable,
2360                                          sequences, nbSeq, longOffsets);
2361     }
2362 #endif
2363     (void)bmi2;
2364     return ZSTD_encodeSequences_default(dst, dstCapacity,
2365                                         CTable_MatchLength, mlCodeTable,
2366                                         CTable_OffsetBits, ofCodeTable,
2367                                         CTable_LitLength, llCodeTable,
2368                                         sequences, nbSeq, longOffsets);
2369 }
2370 
2371 /* ZSTD_compressSequences_internal():
2372  * actually compresses both literals and sequences */
2373 MEM_STATIC size_t
2374 ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
2375                           const ZSTD_entropyCTables_t* prevEntropy,
2376                                 ZSTD_entropyCTables_t* nextEntropy,
2377                           const ZSTD_CCtx_params* cctxParams,
2378                                 void* dst, size_t dstCapacity,
2379                                 void* workspace, size_t wkspSize,
2380                           const int bmi2)
2381 {
2382     const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
2383     ZSTD_strategy const strategy = cctxParams->cParams.strategy;
2384     unsigned count[MaxSeq+1];
2385     FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
2386     FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
2387     FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
2388     U32 LLtype, Offtype, MLtype;   /* compressed, raw or rle */
2389     const seqDef* const sequences = seqStorePtr->sequencesStart;
2390     const BYTE* const ofCodeTable = seqStorePtr->ofCode;
2391     const BYTE* const llCodeTable = seqStorePtr->llCode;
2392     const BYTE* const mlCodeTable = seqStorePtr->mlCode;
2393     BYTE* const ostart = (BYTE*)dst;
2394     BYTE* const oend = ostart + dstCapacity;
2395     BYTE* op = ostart;
2396     size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
2397     BYTE* seqHead;
2398     BYTE* lastNCount = NULL;
2399 
2400     ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
2401     DEBUGLOG(5, "ZSTD_compressSequences_internal");
2402 
2403     /* Compress literals */
2404     {   const BYTE* const literals = seqStorePtr->litStart;
2405         size_t const litSize = seqStorePtr->lit - literals;
2406         int const disableLiteralCompression = (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
2407         size_t const cSize = ZSTD_compressLiterals(
2408                                     &prevEntropy->huf, &nextEntropy->huf,
2409                                     cctxParams->cParams.strategy, disableLiteralCompression,
2410                                     op, dstCapacity,
2411                                     literals, litSize,
2412                                     workspace, wkspSize,
2413                                     bmi2);
2414         if (ZSTD_isError(cSize))
2415           return cSize;
2416         assert(cSize <= dstCapacity);
2417         op += cSize;
2418     }
2419 
2420     /* Sequences Header */
2421     if ((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/) return ERROR(dstSize_tooSmall);
2422     if (nbSeq < 0x7F)
2423         *op++ = (BYTE)nbSeq;
2424     else if (nbSeq < LONGNBSEQ)
2425         op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
2426     else
2427         op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
2428     if (nbSeq==0) {
2429         /* Copy the old tables over as if we repeated them */
2430         memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
2431         return op - ostart;
2432     }
2433 
2434     /* seqHead : flags for FSE encoding type */
2435     seqHead = op++;
2436 
2437     /* convert length/distances into codes */
2438     ZSTD_seqToCodes(seqStorePtr);
2439     /* build CTable for Literal Lengths */
2440     {   unsigned max = MaxLL;
2441         size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace, wkspSize);   /* can't fail */
2442         DEBUGLOG(5, "Building LL table");
2443         nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
2444         LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode,
2445                                         count, max, mostFrequent, nbSeq,
2446                                         LLFSELog, prevEntropy->fse.litlengthCTable,
2447                                         LL_defaultNorm, LL_defaultNormLog,
2448                                         ZSTD_defaultAllowed, strategy);
2449         assert(set_basic < set_compressed && set_rle < set_compressed);
2450         assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
2451         {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
2452                                                     count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
2453                                                     prevEntropy->fse.litlengthCTable, sizeof(prevEntropy->fse.litlengthCTable),
2454                                                     workspace, wkspSize);
2455             if (ZSTD_isError(countSize)) return countSize;
2456             if (LLtype == set_compressed)
2457                 lastNCount = op;
2458             op += countSize;
2459     }   }
2460     /* build CTable for Offsets */
2461     {   unsigned max = MaxOff;
2462         size_t const mostFrequent = HIST_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace, wkspSize);  /* can't fail */
2463         /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
2464         ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
2465         DEBUGLOG(5, "Building OF table");
2466         nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode;
2467         Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode,
2468                                         count, max, mostFrequent, nbSeq,
2469                                         OffFSELog, prevEntropy->fse.offcodeCTable,
2470                                         OF_defaultNorm, OF_defaultNormLog,
2471                                         defaultPolicy, strategy);
2472         assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
2473         {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
2474                                                     count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
2475                                                     prevEntropy->fse.offcodeCTable, sizeof(prevEntropy->fse.offcodeCTable),
2476                                                     workspace, wkspSize);
2477             if (ZSTD_isError(countSize)) return countSize;
2478             if (Offtype == set_compressed)
2479                 lastNCount = op;
2480             op += countSize;
2481     }   }
2482     /* build CTable for MatchLengths */
2483     {   unsigned max = MaxML;
2484         size_t const mostFrequent = HIST_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace, wkspSize);   /* can't fail */
2485         DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
2486         nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
2487         MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode,
2488                                         count, max, mostFrequent, nbSeq,
2489                                         MLFSELog, prevEntropy->fse.matchlengthCTable,
2490                                         ML_defaultNorm, ML_defaultNormLog,
2491                                         ZSTD_defaultAllowed, strategy);
2492         assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
2493         {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
2494                                                     count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
2495                                                     prevEntropy->fse.matchlengthCTable, sizeof(prevEntropy->fse.matchlengthCTable),
2496                                                     workspace, wkspSize);
2497             if (ZSTD_isError(countSize)) return countSize;
2498             if (MLtype == set_compressed)
2499                 lastNCount = op;
2500             op += countSize;
2501     }   }
2502 
2503     *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
2504 
2505     {   size_t const bitstreamSize = ZSTD_encodeSequences(
2506                                         op, oend - op,
2507                                         CTable_MatchLength, mlCodeTable,
2508                                         CTable_OffsetBits, ofCodeTable,
2509                                         CTable_LitLength, llCodeTable,
2510                                         sequences, nbSeq,
2511                                         longOffsets, bmi2);
2512         if (ZSTD_isError(bitstreamSize)) return bitstreamSize;
2513         op += bitstreamSize;
2514         /* zstd versions <= 1.3.4 mistakenly report corruption when
2515          * FSE_readNCount() recieves a buffer < 4 bytes.
2516          * Fixed by https://github.com/facebook/zstd/pull/1146.
2517          * This can happen when the last set_compressed table present is 2
2518          * bytes and the bitstream is only one byte.
2519          * In this exceedingly rare case, we will simply emit an uncompressed
2520          * block, since it isn't worth optimizing.
2521          */
2522         if (lastNCount && (op - lastNCount) < 4) {
2523             /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
2524             assert(op - lastNCount == 3);
2525             DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
2526                         "emitting an uncompressed block.");
2527             return 0;
2528         }
2529     }
2530 
2531     DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
2532     return op - ostart;
2533 }
2534 
2535 MEM_STATIC size_t
2536 ZSTD_compressSequences(seqStore_t* seqStorePtr,
2537                        const ZSTD_entropyCTables_t* prevEntropy,
2538                              ZSTD_entropyCTables_t* nextEntropy,
2539                        const ZSTD_CCtx_params* cctxParams,
2540                              void* dst, size_t dstCapacity,
2541                              size_t srcSize,
2542                              void* workspace, size_t wkspSize,
2543                              int bmi2)
2544 {
2545     size_t const cSize = ZSTD_compressSequences_internal(
2546                             seqStorePtr, prevEntropy, nextEntropy, cctxParams,
2547                             dst, dstCapacity,
2548                             workspace, wkspSize, bmi2);
2549     if (cSize == 0) return 0;
2550     /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
2551      * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
2552      */
2553     if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
2554         return 0;  /* block not compressed */
2555     if (ZSTD_isError(cSize)) return cSize;
2556 
2557     /* Check compressibility */
2558     {   size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
2559         if (cSize >= maxCSize) return 0;  /* block not compressed */
2560     }
2561 
2562     return cSize;
2563 }
2564 
2565 /* ZSTD_selectBlockCompressor() :
2566  * Not static, but internal use only (used by long distance matcher)
2567  * assumption : strat is a valid strategy */
2568 ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode)
2569 {
2570     static const ZSTD_blockCompressor blockCompressor[3][ZSTD_STRATEGY_MAX+1] = {
2571         { ZSTD_compressBlock_fast  /* default for 0 */,
2572           ZSTD_compressBlock_fast,
2573           ZSTD_compressBlock_doubleFast,
2574           ZSTD_compressBlock_greedy,
2575           ZSTD_compressBlock_lazy,
2576           ZSTD_compressBlock_lazy2,
2577           ZSTD_compressBlock_btlazy2,
2578           ZSTD_compressBlock_btopt,
2579           ZSTD_compressBlock_btultra,
2580           ZSTD_compressBlock_btultra2 },
2581         { ZSTD_compressBlock_fast_extDict  /* default for 0 */,
2582           ZSTD_compressBlock_fast_extDict,
2583           ZSTD_compressBlock_doubleFast_extDict,
2584           ZSTD_compressBlock_greedy_extDict,
2585           ZSTD_compressBlock_lazy_extDict,
2586           ZSTD_compressBlock_lazy2_extDict,
2587           ZSTD_compressBlock_btlazy2_extDict,
2588           ZSTD_compressBlock_btopt_extDict,
2589           ZSTD_compressBlock_btultra_extDict,
2590           ZSTD_compressBlock_btultra_extDict },
2591         { ZSTD_compressBlock_fast_dictMatchState  /* default for 0 */,
2592           ZSTD_compressBlock_fast_dictMatchState,
2593           ZSTD_compressBlock_doubleFast_dictMatchState,
2594           ZSTD_compressBlock_greedy_dictMatchState,
2595           ZSTD_compressBlock_lazy_dictMatchState,
2596           ZSTD_compressBlock_lazy2_dictMatchState,
2597           ZSTD_compressBlock_btlazy2_dictMatchState,
2598           ZSTD_compressBlock_btopt_dictMatchState,
2599           ZSTD_compressBlock_btultra_dictMatchState,
2600           ZSTD_compressBlock_btultra_dictMatchState }
2601     };
2602     ZSTD_blockCompressor selectedCompressor;
2603     ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
2604 
2605     assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
2606     selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
2607     assert(selectedCompressor != NULL);
2608     return selectedCompressor;
2609 }
2610 
2611 static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
2612                                    const BYTE* anchor, size_t lastLLSize)
2613 {
2614     memcpy(seqStorePtr->lit, anchor, lastLLSize);
2615     seqStorePtr->lit += lastLLSize;
2616 }
2617 
2618 void ZSTD_resetSeqStore(seqStore_t* ssPtr)
2619 {
2620     ssPtr->lit = ssPtr->litStart;
2621     ssPtr->sequences = ssPtr->sequencesStart;
2622     ssPtr->longLengthID = 0;
2623 }
2624 
2625 static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
2626                                         void* dst, size_t dstCapacity,
2627                                         const void* src, size_t srcSize)
2628 {
2629     ZSTD_matchState_t* const ms = &zc->blockState.matchState;
2630     size_t cSize;
2631     DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
2632                 (unsigned)dstCapacity, (unsigned)ms->window.dictLimit, (unsigned)ms->nextToUpdate);
2633     assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
2634 
2635     /* Assert that we have correctly flushed the ctx params into the ms's copy */
2636     ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
2637 
2638     if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
2639         ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
2640         cSize = 0;
2641         goto out;  /* don't even attempt compression below a certain srcSize */
2642     }
2643     ZSTD_resetSeqStore(&(zc->seqStore));
2644     ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;   /* required for optimal parser to read stats from dictionary */
2645 
2646     /* a gap between an attached dict and the current window is not safe,
2647      * they must remain adjacent,
2648      * and when that stops being the case, the dict must be unset */
2649     assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
2650 
2651     /* limited update after a very long match */
2652     {   const BYTE* const base = ms->window.base;
2653         const BYTE* const istart = (const BYTE*)src;
2654         const U32 current = (U32)(istart-base);
2655         if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1));   /* ensure no overflow */
2656         if (current > ms->nextToUpdate + 384)
2657             ms->nextToUpdate = current - MIN(192, (U32)(current - ms->nextToUpdate - 384));
2658     }
2659 
2660     /* select and store sequences */
2661     {   ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
2662         size_t lastLLSize;
2663         {   int i;
2664             for (i = 0; i < ZSTD_REP_NUM; ++i)
2665                 zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
2666         }
2667         if (zc->externSeqStore.pos < zc->externSeqStore.size) {
2668             assert(!zc->appliedParams.ldmParams.enableLdm);
2669             /* Updates ldmSeqStore.pos */
2670             lastLLSize =
2671                 ZSTD_ldm_blockCompress(&zc->externSeqStore,
2672                                        ms, &zc->seqStore,
2673                                        zc->blockState.nextCBlock->rep,
2674                                        src, srcSize);
2675             assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
2676         } else if (zc->appliedParams.ldmParams.enableLdm) {
2677             rawSeqStore_t ldmSeqStore = {NULL, 0, 0, 0};
2678 
2679             ldmSeqStore.seq = zc->ldmSequences;
2680             ldmSeqStore.capacity = zc->maxNbLdmSequences;
2681             /* Updates ldmSeqStore.size */
2682             CHECK_F(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
2683                                                &zc->appliedParams.ldmParams,
2684                                                src, srcSize));
2685             /* Updates ldmSeqStore.pos */
2686             lastLLSize =
2687                 ZSTD_ldm_blockCompress(&ldmSeqStore,
2688                                        ms, &zc->seqStore,
2689                                        zc->blockState.nextCBlock->rep,
2690                                        src, srcSize);
2691             assert(ldmSeqStore.pos == ldmSeqStore.size);
2692         } else {   /* not long range mode */
2693             ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode);
2694             lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
2695         }
2696         {   const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
2697             ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
2698     }   }
2699 
2700     /* encode sequences and literals */
2701     cSize = ZSTD_compressSequences(&zc->seqStore,
2702             &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
2703             &zc->appliedParams,
2704             dst, dstCapacity,
2705             srcSize,
2706             zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
2707             zc->bmi2);
2708 
2709 out:
2710     if (!ZSTD_isError(cSize) && cSize != 0) {
2711         /* confirm repcodes and entropy tables when emitting a compressed block */
2712         ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
2713         zc->blockState.prevCBlock = zc->blockState.nextCBlock;
2714         zc->blockState.nextCBlock = tmp;
2715     }
2716     /* We check that dictionaries have offset codes available for the first
2717      * block. After the first block, the offcode table might not have large
2718      * enough codes to represent the offsets in the data.
2719      */
2720     if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
2721         zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
2722 
2723     return cSize;
2724 }
2725 
2726 
2727 /*! ZSTD_compress_frameChunk() :
2728 *   Compress a chunk of data into one or multiple blocks.
2729 *   All blocks will be terminated, all input will be consumed.
2730 *   Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
2731 *   Frame is supposed already started (header already produced)
2732 *   @return : compressed size, or an error code
2733 */
2734 static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
2735                                      void* dst, size_t dstCapacity,
2736                                const void* src, size_t srcSize,
2737                                      U32 lastFrameChunk)
2738 {
2739     size_t blockSize = cctx->blockSize;
2740     size_t remaining = srcSize;
2741     const BYTE* ip = (const BYTE*)src;
2742     BYTE* const ostart = (BYTE*)dst;
2743     BYTE* op = ostart;
2744     U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
2745     assert(cctx->appliedParams.cParams.windowLog <= 31);
2746 
2747     DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
2748     if (cctx->appliedParams.fParams.checksumFlag && srcSize)
2749         XXH64_update(&cctx->xxhState, src, srcSize);
2750 
2751     while (remaining) {
2752         ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
2753         U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
2754 
2755         if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE)
2756             return ERROR(dstSize_tooSmall);   /* not enough space to store compressed block */
2757         if (remaining < blockSize) blockSize = remaining;
2758 
2759         if (ZSTD_window_needOverflowCorrection(ms->window, ip + blockSize)) {
2760             U32 const cycleLog = ZSTD_cycleLog(cctx->appliedParams.cParams.chainLog, cctx->appliedParams.cParams.strategy);
2761             U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
2762             ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
2763             ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
2764             ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
2765             ZSTD_reduceIndex(cctx, correction);
2766             if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
2767             else ms->nextToUpdate -= correction;
2768             ms->loadedDictEnd = 0;
2769             ms->dictMatchState = NULL;
2770         }
2771         ZSTD_window_enforceMaxDist(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
2772         if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
2773 
2774         {   size_t cSize = ZSTD_compressBlock_internal(cctx,
2775                                 op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
2776                                 ip, blockSize);
2777             if (ZSTD_isError(cSize)) return cSize;
2778 
2779             if (cSize == 0) {  /* block is not compressible */
2780                 cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
2781                 if (ZSTD_isError(cSize)) return cSize;
2782             } else {
2783                 U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
2784                 MEM_writeLE24(op, cBlockHeader24);
2785                 cSize += ZSTD_blockHeaderSize;
2786             }
2787 
2788             ip += blockSize;
2789             assert(remaining >= blockSize);
2790             remaining -= blockSize;
2791             op += cSize;
2792             assert(dstCapacity >= cSize);
2793             dstCapacity -= cSize;
2794             DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
2795                         (unsigned)cSize);
2796     }   }
2797 
2798     if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
2799     return op-ostart;
2800 }
2801 
2802 
2803 static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
2804                                     ZSTD_CCtx_params params, U64 pledgedSrcSize, U32 dictID)
2805 {   BYTE* const op = (BYTE*)dst;
2806     U32   const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536);   /* 0-3 */
2807     U32   const dictIDSizeCode = params.fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength;   /* 0-3 */
2808     U32   const checksumFlag = params.fParams.checksumFlag>0;
2809     U32   const windowSize = (U32)1 << params.cParams.windowLog;
2810     U32   const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
2811     BYTE  const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
2812     U32   const fcsCode = params.fParams.contentSizeFlag ?
2813                      (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0;  /* 0-3 */
2814     BYTE  const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
2815     size_t pos=0;
2816 
2817     assert(!(params.fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
2818     if (dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX) return ERROR(dstSize_tooSmall);
2819     DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
2820                 !params.fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
2821 
2822     if (params.format == ZSTD_f_zstd1) {
2823         MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
2824         pos = 4;
2825     }
2826     op[pos++] = frameHeaderDecriptionByte;
2827     if (!singleSegment) op[pos++] = windowLogByte;
2828     switch(dictIDSizeCode)
2829     {
2830         default:  assert(0); /* impossible */
2831         case 0 : break;
2832         case 1 : op[pos] = (BYTE)(dictID); pos++; break;
2833         case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
2834         case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
2835     }
2836     switch(fcsCode)
2837     {
2838         default:  assert(0); /* impossible */
2839         case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
2840         case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
2841         case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
2842         case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
2843     }
2844     return pos;
2845 }
2846 
2847 /* ZSTD_writeLastEmptyBlock() :
2848  * output an empty Block with end-of-frame mark to complete a frame
2849  * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
2850  *           or an error code if `dstCapcity` is too small (<ZSTD_blockHeaderSize)
2851  */
2852 size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
2853 {
2854     if (dstCapacity < ZSTD_blockHeaderSize) return ERROR(dstSize_tooSmall);
2855     {   U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1);  /* 0 size */
2856         MEM_writeLE24(dst, cBlockHeader24);
2857         return ZSTD_blockHeaderSize;
2858     }
2859 }
2860 
2861 size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
2862 {
2863     if (cctx->stage != ZSTDcs_init)
2864         return ERROR(stage_wrong);
2865     if (cctx->appliedParams.ldmParams.enableLdm)
2866         return ERROR(parameter_unsupported);
2867     cctx->externSeqStore.seq = seq;
2868     cctx->externSeqStore.size = nbSeq;
2869     cctx->externSeqStore.capacity = nbSeq;
2870     cctx->externSeqStore.pos = 0;
2871     return 0;
2872 }
2873 
2874 
2875 static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
2876                               void* dst, size_t dstCapacity,
2877                         const void* src, size_t srcSize,
2878                                U32 frame, U32 lastFrameChunk)
2879 {
2880     ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
2881     size_t fhSize = 0;
2882 
2883     DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
2884                 cctx->stage, (unsigned)srcSize);
2885     if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong);   /* missing init (ZSTD_compressBegin) */
2886 
2887     if (frame && (cctx->stage==ZSTDcs_init)) {
2888         fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams,
2889                                        cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
2890         if (ZSTD_isError(fhSize)) return fhSize;
2891         dstCapacity -= fhSize;
2892         dst = (char*)dst + fhSize;
2893         cctx->stage = ZSTDcs_ongoing;
2894     }
2895 
2896     if (!srcSize) return fhSize;  /* do not generate an empty block if no input */
2897 
2898     if (!ZSTD_window_update(&ms->window, src, srcSize)) {
2899         ms->nextToUpdate = ms->window.dictLimit;
2900     }
2901     if (cctx->appliedParams.ldmParams.enableLdm) {
2902         ZSTD_window_update(&cctx->ldmState.window, src, srcSize);
2903     }
2904 
2905     if (!frame) {
2906         /* overflow check and correction for block mode */
2907         if (ZSTD_window_needOverflowCorrection(ms->window, (const char*)src + srcSize)) {
2908             U32 const cycleLog = ZSTD_cycleLog(cctx->appliedParams.cParams.chainLog, cctx->appliedParams.cParams.strategy);
2909             U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, 1 << cctx->appliedParams.cParams.windowLog, src);
2910             ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
2911             ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
2912             ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
2913             ZSTD_reduceIndex(cctx, correction);
2914             if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
2915             else ms->nextToUpdate -= correction;
2916             ms->loadedDictEnd = 0;
2917             ms->dictMatchState = NULL;
2918         }
2919     }
2920 
2921     DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
2922     {   size_t const cSize = frame ?
2923                              ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
2924                              ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
2925         if (ZSTD_isError(cSize)) return cSize;
2926         cctx->consumedSrcSize += srcSize;
2927         cctx->producedCSize += (cSize + fhSize);
2928         assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
2929         if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
2930             ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
2931             if (cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne) {
2932                 DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize >= %u",
2933                     (unsigned)cctx->pledgedSrcSizePlusOne-1, (unsigned)cctx->consumedSrcSize);
2934                 return ERROR(srcSize_wrong);
2935             }
2936         }
2937         return cSize + fhSize;
2938     }
2939 }
2940 
2941 size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
2942                               void* dst, size_t dstCapacity,
2943                         const void* src, size_t srcSize)
2944 {
2945     DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
2946     return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
2947 }
2948 
2949 
2950 size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
2951 {
2952     ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
2953     assert(!ZSTD_checkCParams(cParams));
2954     return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
2955 }
2956 
2957 size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
2958 {
2959     size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
2960     if (srcSize > blockSizeMax) return ERROR(srcSize_wrong);
2961 
2962     return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
2963 }
2964 
2965 /*! ZSTD_loadDictionaryContent() :
2966  *  @return : 0, or an error code
2967  */
2968 static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
2969                                          ZSTD_CCtx_params const* params,
2970                                          const void* src, size_t srcSize,
2971                                          ZSTD_dictTableLoadMethod_e dtlm)
2972 {
2973     const BYTE* const ip = (const BYTE*) src;
2974     const BYTE* const iend = ip + srcSize;
2975 
2976     ZSTD_window_update(&ms->window, src, srcSize);
2977     ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
2978 
2979     /* Assert that we the ms params match the params we're being given */
2980     ZSTD_assertEqualCParams(params->cParams, ms->cParams);
2981 
2982     if (srcSize <= HASH_READ_SIZE) return 0;
2983 
2984     switch(params->cParams.strategy)
2985     {
2986     case ZSTD_fast:
2987         ZSTD_fillHashTable(ms, iend, dtlm);
2988         break;
2989     case ZSTD_dfast:
2990         ZSTD_fillDoubleHashTable(ms, iend, dtlm);
2991         break;
2992 
2993     case ZSTD_greedy:
2994     case ZSTD_lazy:
2995     case ZSTD_lazy2:
2996         if (srcSize >= HASH_READ_SIZE)
2997             ZSTD_insertAndFindFirstIndex(ms, iend-HASH_READ_SIZE);
2998         break;
2999 
3000     case ZSTD_btlazy2:   /* we want the dictionary table fully sorted */
3001     case ZSTD_btopt:
3002     case ZSTD_btultra:
3003     case ZSTD_btultra2:
3004         if (srcSize >= HASH_READ_SIZE)
3005             ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend);
3006         break;
3007 
3008     default:
3009         assert(0);  /* not possible : not a valid strategy id */
3010     }
3011 
3012     ms->nextToUpdate = (U32)(iend - ms->window.base);
3013     return 0;
3014 }
3015 
3016 
3017 /* Dictionaries that assign zero probability to symbols that show up causes problems
3018    when FSE encoding.  Refuse dictionaries that assign zero probability to symbols
3019    that we may encounter during compression.
3020    NOTE: This behavior is not standard and could be improved in the future. */
3021 static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) {
3022     U32 s;
3023     if (dictMaxSymbolValue < maxSymbolValue) return ERROR(dictionary_corrupted);
3024     for (s = 0; s <= maxSymbolValue; ++s) {
3025         if (normalizedCounter[s] == 0) return ERROR(dictionary_corrupted);
3026     }
3027     return 0;
3028 }
3029 
3030 
3031 /* Dictionary format :
3032  * See :
3033  * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
3034  */
3035 /*! ZSTD_loadZstdDictionary() :
3036  * @return : dictID, or an error code
3037  *  assumptions : magic number supposed already checked
3038  *                dictSize supposed > 8
3039  */
3040 static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
3041                                       ZSTD_matchState_t* ms,
3042                                       ZSTD_CCtx_params const* params,
3043                                       const void* dict, size_t dictSize,
3044                                       ZSTD_dictTableLoadMethod_e dtlm,
3045                                       void* workspace)
3046 {
3047     const BYTE* dictPtr = (const BYTE*)dict;
3048     const BYTE* const dictEnd = dictPtr + dictSize;
3049     short offcodeNCount[MaxOff+1];
3050     unsigned offcodeMaxValue = MaxOff;
3051     size_t dictID;
3052 
3053     ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
3054     assert(dictSize > 8);
3055     assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
3056 
3057     dictPtr += 4;   /* skip magic number */
3058     dictID = params->fParams.noDictIDFlag ? 0 :  MEM_readLE32(dictPtr);
3059     dictPtr += 4;
3060 
3061     {   unsigned maxSymbolValue = 255;
3062         size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr);
3063         if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
3064         if (maxSymbolValue < 255) return ERROR(dictionary_corrupted);
3065         dictPtr += hufHeaderSize;
3066     }
3067 
3068     {   unsigned offcodeLog;
3069         size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
3070         if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
3071         if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
3072         /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
3073         /* fill all offset symbols to avoid garbage at end of table */
3074         CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.offcodeCTable,
3075                                     offcodeNCount, MaxOff, offcodeLog,
3076                                     workspace, HUF_WORKSPACE_SIZE),
3077                  dictionary_corrupted);
3078         dictPtr += offcodeHeaderSize;
3079     }
3080 
3081     {   short matchlengthNCount[MaxML+1];
3082         unsigned matchlengthMaxValue = MaxML, matchlengthLog;
3083         size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
3084         if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
3085         if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
3086         /* Every match length code must have non-zero probability */
3087         CHECK_F( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
3088         CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.matchlengthCTable,
3089                                     matchlengthNCount, matchlengthMaxValue, matchlengthLog,
3090                                     workspace, HUF_WORKSPACE_SIZE),
3091                  dictionary_corrupted);
3092         dictPtr += matchlengthHeaderSize;
3093     }
3094 
3095     {   short litlengthNCount[MaxLL+1];
3096         unsigned litlengthMaxValue = MaxLL, litlengthLog;
3097         size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
3098         if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
3099         if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
3100         /* Every literal length code must have non-zero probability */
3101         CHECK_F( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
3102         CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.litlengthCTable,
3103                                     litlengthNCount, litlengthMaxValue, litlengthLog,
3104                                     workspace, HUF_WORKSPACE_SIZE),
3105                  dictionary_corrupted);
3106         dictPtr += litlengthHeaderSize;
3107     }
3108 
3109     if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
3110     bs->rep[0] = MEM_readLE32(dictPtr+0);
3111     bs->rep[1] = MEM_readLE32(dictPtr+4);
3112     bs->rep[2] = MEM_readLE32(dictPtr+8);
3113     dictPtr += 12;
3114 
3115     {   size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
3116         U32 offcodeMax = MaxOff;
3117         if (dictContentSize <= ((U32)-1) - 128 KB) {
3118             U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
3119             offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
3120         }
3121         /* All offset values <= dictContentSize + 128 KB must be representable */
3122         CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
3123         /* All repCodes must be <= dictContentSize and != 0*/
3124         {   U32 u;
3125             for (u=0; u<3; u++) {
3126                 if (bs->rep[u] == 0) return ERROR(dictionary_corrupted);
3127                 if (bs->rep[u] > dictContentSize) return ERROR(dictionary_corrupted);
3128         }   }
3129 
3130         bs->entropy.huf.repeatMode = HUF_repeat_valid;
3131         bs->entropy.fse.offcode_repeatMode = FSE_repeat_valid;
3132         bs->entropy.fse.matchlength_repeatMode = FSE_repeat_valid;
3133         bs->entropy.fse.litlength_repeatMode = FSE_repeat_valid;
3134         CHECK_F(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize, dtlm));
3135         return dictID;
3136     }
3137 }
3138 
3139 /** ZSTD_compress_insertDictionary() :
3140 *   @return : dictID, or an error code */
3141 static size_t
3142 ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
3143                                ZSTD_matchState_t* ms,
3144                          const ZSTD_CCtx_params* params,
3145                          const void* dict, size_t dictSize,
3146                                ZSTD_dictContentType_e dictContentType,
3147                                ZSTD_dictTableLoadMethod_e dtlm,
3148                                void* workspace)
3149 {
3150     DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
3151     if ((dict==NULL) || (dictSize<=8)) return 0;
3152 
3153     ZSTD_reset_compressedBlockState(bs);
3154 
3155     /* dict restricted modes */
3156     if (dictContentType == ZSTD_dct_rawContent)
3157         return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
3158 
3159     if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
3160         if (dictContentType == ZSTD_dct_auto) {
3161             DEBUGLOG(4, "raw content dictionary detected");
3162             return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
3163         }
3164         if (dictContentType == ZSTD_dct_fullDict)
3165             return ERROR(dictionary_wrong);
3166         assert(0);   /* impossible */
3167     }
3168 
3169     /* dict as full zstd dictionary */
3170     return ZSTD_loadZstdDictionary(bs, ms, params, dict, dictSize, dtlm, workspace);
3171 }
3172 
3173 /*! ZSTD_compressBegin_internal() :
3174  * @return : 0, or an error code */
3175 static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
3176                                     const void* dict, size_t dictSize,
3177                                     ZSTD_dictContentType_e dictContentType,
3178                                     ZSTD_dictTableLoadMethod_e dtlm,
3179                                     const ZSTD_CDict* cdict,
3180                                     ZSTD_CCtx_params params, U64 pledgedSrcSize,
3181                                     ZSTD_buffered_policy_e zbuff)
3182 {
3183     DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params.cParams.windowLog);
3184     /* params are supposed to be fully validated at this point */
3185     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
3186     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
3187 
3188     if (cdict && cdict->dictContentSize>0) {
3189         return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
3190     }
3191 
3192     CHECK_F( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
3193                                      ZSTDcrp_continue, zbuff) );
3194     {
3195         size_t const dictID = ZSTD_compress_insertDictionary(
3196                 cctx->blockState.prevCBlock, &cctx->blockState.matchState,
3197                 &params, dict, dictSize, dictContentType, dtlm, cctx->entropyWorkspace);
3198         if (ZSTD_isError(dictID)) return dictID;
3199         assert(dictID <= (size_t)(U32)-1);
3200         cctx->dictID = (U32)dictID;
3201     }
3202     return 0;
3203 }
3204 
3205 size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
3206                                     const void* dict, size_t dictSize,
3207                                     ZSTD_dictContentType_e dictContentType,
3208                                     ZSTD_dictTableLoadMethod_e dtlm,
3209                                     const ZSTD_CDict* cdict,
3210                                     ZSTD_CCtx_params params,
3211                                     unsigned long long pledgedSrcSize)
3212 {
3213     DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params.cParams.windowLog);
3214     /* compression parameters verification and optimization */
3215     CHECK_F( ZSTD_checkCParams(params.cParams) );
3216     return ZSTD_compressBegin_internal(cctx,
3217                                        dict, dictSize, dictContentType, dtlm,
3218                                        cdict,
3219                                        params, pledgedSrcSize,
3220                                        ZSTDb_not_buffered);
3221 }
3222 
3223 /*! ZSTD_compressBegin_advanced() :
3224 *   @return : 0, or an error code */
3225 size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
3226                              const void* dict, size_t dictSize,
3227                                    ZSTD_parameters params, unsigned long long pledgedSrcSize)
3228 {
3229     ZSTD_CCtx_params const cctxParams =
3230             ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
3231     return ZSTD_compressBegin_advanced_internal(cctx,
3232                                             dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
3233                                             NULL /*cdict*/,
3234                                             cctxParams, pledgedSrcSize);
3235 }
3236 
3237 size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
3238 {
3239     ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
3240     ZSTD_CCtx_params const cctxParams =
3241             ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
3242     DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
3243     return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
3244                                        cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
3245 }
3246 
3247 size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
3248 {
3249     return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
3250 }
3251 
3252 
3253 /*! ZSTD_writeEpilogue() :
3254 *   Ends a frame.
3255 *   @return : nb of bytes written into dst (or an error code) */
3256 static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
3257 {
3258     BYTE* const ostart = (BYTE*)dst;
3259     BYTE* op = ostart;
3260     size_t fhSize = 0;
3261 
3262     DEBUGLOG(4, "ZSTD_writeEpilogue");
3263     if (cctx->stage == ZSTDcs_created) return ERROR(stage_wrong);  /* init missing */
3264 
3265     /* special case : empty frame */
3266     if (cctx->stage == ZSTDcs_init) {
3267         fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, 0, 0);
3268         if (ZSTD_isError(fhSize)) return fhSize;
3269         dstCapacity -= fhSize;
3270         op += fhSize;
3271         cctx->stage = ZSTDcs_ongoing;
3272     }
3273 
3274     if (cctx->stage != ZSTDcs_ending) {
3275         /* write one last empty block, make it the "last" block */
3276         U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
3277         if (dstCapacity<4) return ERROR(dstSize_tooSmall);
3278         MEM_writeLE32(op, cBlockHeader24);
3279         op += ZSTD_blockHeaderSize;
3280         dstCapacity -= ZSTD_blockHeaderSize;
3281     }
3282 
3283     if (cctx->appliedParams.fParams.checksumFlag) {
3284         U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
3285         if (dstCapacity<4) return ERROR(dstSize_tooSmall);
3286         DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
3287         MEM_writeLE32(op, checksum);
3288         op += 4;
3289     }
3290 
3291     cctx->stage = ZSTDcs_created;  /* return to "created but no init" status */
3292     return op-ostart;
3293 }
3294 
3295 size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
3296                          void* dst, size_t dstCapacity,
3297                    const void* src, size_t srcSize)
3298 {
3299     size_t endResult;
3300     size_t const cSize = ZSTD_compressContinue_internal(cctx,
3301                                 dst, dstCapacity, src, srcSize,
3302                                 1 /* frame mode */, 1 /* last chunk */);
3303     if (ZSTD_isError(cSize)) return cSize;
3304     endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
3305     if (ZSTD_isError(endResult)) return endResult;
3306     assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
3307     if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
3308         ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
3309         DEBUGLOG(4, "end of frame : controlling src size");
3310         if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1) {
3311             DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize = %u",
3312                 (unsigned)cctx->pledgedSrcSizePlusOne-1, (unsigned)cctx->consumedSrcSize);
3313             return ERROR(srcSize_wrong);
3314     }   }
3315     return cSize + endResult;
3316 }
3317 
3318 
3319 static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx,
3320                                       void* dst, size_t dstCapacity,
3321                                 const void* src, size_t srcSize,
3322                                 const void* dict,size_t dictSize,
3323                                       ZSTD_parameters params)
3324 {
3325     ZSTD_CCtx_params const cctxParams =
3326             ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
3327     DEBUGLOG(4, "ZSTD_compress_internal");
3328     return ZSTD_compress_advanced_internal(cctx,
3329                                            dst, dstCapacity,
3330                                            src, srcSize,
3331                                            dict, dictSize,
3332                                            cctxParams);
3333 }
3334 
3335 size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
3336                                void* dst, size_t dstCapacity,
3337                          const void* src, size_t srcSize,
3338                          const void* dict,size_t dictSize,
3339                                ZSTD_parameters params)
3340 {
3341     DEBUGLOG(4, "ZSTD_compress_advanced");
3342     CHECK_F(ZSTD_checkCParams(params.cParams));
3343     return ZSTD_compress_internal(cctx,
3344                                   dst, dstCapacity,
3345                                   src, srcSize,
3346                                   dict, dictSize,
3347                                   params);
3348 }
3349 
3350 /* Internal */
3351 size_t ZSTD_compress_advanced_internal(
3352         ZSTD_CCtx* cctx,
3353         void* dst, size_t dstCapacity,
3354         const void* src, size_t srcSize,
3355         const void* dict,size_t dictSize,
3356         ZSTD_CCtx_params params)
3357 {
3358     DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
3359     CHECK_F( ZSTD_compressBegin_internal(cctx,
3360                          dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
3361                          params, srcSize, ZSTDb_not_buffered) );
3362     return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
3363 }
3364 
3365 size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
3366                                void* dst, size_t dstCapacity,
3367                          const void* src, size_t srcSize,
3368                          const void* dict, size_t dictSize,
3369                                int compressionLevel)
3370 {
3371     ZSTD_parameters const params = ZSTD_getParams(compressionLevel, srcSize + (!srcSize), dict ? dictSize : 0);
3372     ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
3373     assert(params.fParams.contentSizeFlag == 1);
3374     return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, cctxParams);
3375 }
3376 
3377 size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
3378                          void* dst, size_t dstCapacity,
3379                    const void* src, size_t srcSize,
3380                          int compressionLevel)
3381 {
3382     DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize);
3383     assert(cctx != NULL);
3384     return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
3385 }
3386 
3387 size_t ZSTD_compress(void* dst, size_t dstCapacity,
3388                const void* src, size_t srcSize,
3389                      int compressionLevel)
3390 {
3391     size_t result;
3392     ZSTD_CCtx ctxBody;
3393     ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem);
3394     result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
3395     ZSTD_freeCCtxContent(&ctxBody);   /* can't free ctxBody itself, as it's on stack; free only heap content */
3396     return result;
3397 }
3398 
3399 
3400 /* =====  Dictionary API  ===== */
3401 
3402 /*! ZSTD_estimateCDictSize_advanced() :
3403  *  Estimate amount of memory that will be needed to create a dictionary with following arguments */
3404 size_t ZSTD_estimateCDictSize_advanced(
3405         size_t dictSize, ZSTD_compressionParameters cParams,
3406         ZSTD_dictLoadMethod_e dictLoadMethod)
3407 {
3408     DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
3409     return sizeof(ZSTD_CDict) + HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
3410            + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
3411 }
3412 
3413 size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
3414 {
3415     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
3416     return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
3417 }
3418 
3419 size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
3420 {
3421     if (cdict==NULL) return 0;   /* support sizeof on NULL */
3422     DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
3423     return cdict->workspaceSize + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict);
3424 }
3425 
3426 static size_t ZSTD_initCDict_internal(
3427                     ZSTD_CDict* cdict,
3428               const void* dictBuffer, size_t dictSize,
3429                     ZSTD_dictLoadMethod_e dictLoadMethod,
3430                     ZSTD_dictContentType_e dictContentType,
3431                     ZSTD_compressionParameters cParams)
3432 {
3433     DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
3434     assert(!ZSTD_checkCParams(cParams));
3435     cdict->matchState.cParams = cParams;
3436     if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
3437         cdict->dictBuffer = NULL;
3438         cdict->dictContent = dictBuffer;
3439     } else {
3440         void* const internalBuffer = ZSTD_malloc(dictSize, cdict->customMem);
3441         cdict->dictBuffer = internalBuffer;
3442         cdict->dictContent = internalBuffer;
3443         if (!internalBuffer) return ERROR(memory_allocation);
3444         memcpy(internalBuffer, dictBuffer, dictSize);
3445     }
3446     cdict->dictContentSize = dictSize;
3447 
3448     /* Reset the state to no dictionary */
3449     ZSTD_reset_compressedBlockState(&cdict->cBlockState);
3450     {   void* const end = ZSTD_reset_matchState(
3451                 &cdict->matchState,
3452                 (U32*)cdict->workspace + HUF_WORKSPACE_SIZE_U32,
3453                 &cParams, ZSTDcrp_continue, /* forCCtx */ 0);
3454         assert(end == (char*)cdict->workspace + cdict->workspaceSize);
3455         (void)end;
3456     }
3457     /* (Maybe) load the dictionary
3458      * Skips loading the dictionary if it is <= 8 bytes.
3459      */
3460     {   ZSTD_CCtx_params params;
3461         memset(&params, 0, sizeof(params));
3462         params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
3463         params.fParams.contentSizeFlag = 1;
3464         params.cParams = cParams;
3465         {   size_t const dictID = ZSTD_compress_insertDictionary(
3466                     &cdict->cBlockState, &cdict->matchState, &params,
3467                     cdict->dictContent, cdict->dictContentSize,
3468                     dictContentType, ZSTD_dtlm_full, cdict->workspace);
3469             if (ZSTD_isError(dictID)) return dictID;
3470             assert(dictID <= (size_t)(U32)-1);
3471             cdict->dictID = (U32)dictID;
3472         }
3473     }
3474 
3475     return 0;
3476 }
3477 
3478 ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
3479                                       ZSTD_dictLoadMethod_e dictLoadMethod,
3480                                       ZSTD_dictContentType_e dictContentType,
3481                                       ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
3482 {
3483     DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (unsigned)dictContentType);
3484     if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
3485 
3486     {   ZSTD_CDict* const cdict = (ZSTD_CDict*)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
3487         size_t const workspaceSize = HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
3488         void* const workspace = ZSTD_malloc(workspaceSize, customMem);
3489 
3490         if (!cdict || !workspace) {
3491             ZSTD_free(cdict, customMem);
3492             ZSTD_free(workspace, customMem);
3493             return NULL;
3494         }
3495         cdict->customMem = customMem;
3496         cdict->workspace = workspace;
3497         cdict->workspaceSize = workspaceSize;
3498         if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
3499                                         dictBuffer, dictSize,
3500                                         dictLoadMethod, dictContentType,
3501                                         cParams) )) {
3502             ZSTD_freeCDict(cdict);
3503             return NULL;
3504         }
3505 
3506         return cdict;
3507     }
3508 }
3509 
3510 ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
3511 {
3512     ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
3513     return ZSTD_createCDict_advanced(dict, dictSize,
3514                                      ZSTD_dlm_byCopy, ZSTD_dct_auto,
3515                                      cParams, ZSTD_defaultCMem);
3516 }
3517 
3518 ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
3519 {
3520     ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
3521     return ZSTD_createCDict_advanced(dict, dictSize,
3522                                      ZSTD_dlm_byRef, ZSTD_dct_auto,
3523                                      cParams, ZSTD_defaultCMem);
3524 }
3525 
3526 size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
3527 {
3528     if (cdict==NULL) return 0;   /* support free on NULL */
3529     {   ZSTD_customMem const cMem = cdict->customMem;
3530         ZSTD_free(cdict->workspace, cMem);
3531         ZSTD_free(cdict->dictBuffer, cMem);
3532         ZSTD_free(cdict, cMem);
3533         return 0;
3534     }
3535 }
3536 
3537 /*! ZSTD_initStaticCDict_advanced() :
3538  *  Generate a digested dictionary in provided memory area.
3539  *  workspace: The memory area to emplace the dictionary into.
3540  *             Provided pointer must 8-bytes aligned.
3541  *             It must outlive dictionary usage.
3542  *  workspaceSize: Use ZSTD_estimateCDictSize()
3543  *                 to determine how large workspace must be.
3544  *  cParams : use ZSTD_getCParams() to transform a compression level
3545  *            into its relevants cParams.
3546  * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
3547  *  Note : there is no corresponding "free" function.
3548  *         Since workspace was allocated externally, it must be freed externally.
3549  */
3550 const ZSTD_CDict* ZSTD_initStaticCDict(
3551                                  void* workspace, size_t workspaceSize,
3552                            const void* dict, size_t dictSize,
3553                                  ZSTD_dictLoadMethod_e dictLoadMethod,
3554                                  ZSTD_dictContentType_e dictContentType,
3555                                  ZSTD_compressionParameters cParams)
3556 {
3557     size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
3558     size_t const neededSize = sizeof(ZSTD_CDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize)
3559                             + HUF_WORKSPACE_SIZE + matchStateSize;
3560     ZSTD_CDict* const cdict = (ZSTD_CDict*) workspace;
3561     void* ptr;
3562     if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
3563     DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
3564         (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
3565     if (workspaceSize < neededSize) return NULL;
3566 
3567     if (dictLoadMethod == ZSTD_dlm_byCopy) {
3568         memcpy(cdict+1, dict, dictSize);
3569         dict = cdict+1;
3570         ptr = (char*)workspace + sizeof(ZSTD_CDict) + dictSize;
3571     } else {
3572         ptr = cdict+1;
3573     }
3574     cdict->workspace = ptr;
3575     cdict->workspaceSize = HUF_WORKSPACE_SIZE + matchStateSize;
3576 
3577     if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
3578                                               dict, dictSize,
3579                                               ZSTD_dlm_byRef, dictContentType,
3580                                               cParams) ))
3581         return NULL;
3582 
3583     return cdict;
3584 }
3585 
3586 ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
3587 {
3588     assert(cdict != NULL);
3589     return cdict->matchState.cParams;
3590 }
3591 
3592 /* ZSTD_compressBegin_usingCDict_advanced() :
3593  * cdict must be != NULL */
3594 size_t ZSTD_compressBegin_usingCDict_advanced(
3595     ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
3596     ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
3597 {
3598     DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
3599     if (cdict==NULL) return ERROR(dictionary_wrong);
3600     {   ZSTD_CCtx_params params = cctx->requestedParams;
3601         params.cParams = ZSTD_getCParamsFromCDict(cdict);
3602         /* Increase window log to fit the entire dictionary and source if the
3603          * source size is known. Limit the increase to 19, which is the
3604          * window log for compression level 1 with the largest source size.
3605          */
3606         if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
3607             U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
3608             U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
3609             params.cParams.windowLog = MAX(params.cParams.windowLog, limitedSrcLog);
3610         }
3611         params.fParams = fParams;
3612         return ZSTD_compressBegin_internal(cctx,
3613                                            NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
3614                                            cdict,
3615                                            params, pledgedSrcSize,
3616                                            ZSTDb_not_buffered);
3617     }
3618 }
3619 
3620 /* ZSTD_compressBegin_usingCDict() :
3621  * pledgedSrcSize=0 means "unknown"
3622  * if pledgedSrcSize>0, it will enable contentSizeFlag */
3623 size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
3624 {
3625     ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
3626     DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
3627     return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
3628 }
3629 
3630 size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
3631                                 void* dst, size_t dstCapacity,
3632                                 const void* src, size_t srcSize,
3633                                 const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
3634 {
3635     CHECK_F (ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize));   /* will check if cdict != NULL */
3636     return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
3637 }
3638 
3639 /*! ZSTD_compress_usingCDict() :
3640  *  Compression using a digested Dictionary.
3641  *  Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
3642  *  Note that compression parameters are decided at CDict creation time
3643  *  while frame parameters are hardcoded */
3644 size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
3645                                 void* dst, size_t dstCapacity,
3646                                 const void* src, size_t srcSize,
3647                                 const ZSTD_CDict* cdict)
3648 {
3649     ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
3650     return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
3651 }
3652 
3653 
3654 
3655 /* ******************************************************************
3656 *  Streaming
3657 ********************************************************************/
3658 
3659 ZSTD_CStream* ZSTD_createCStream(void)
3660 {
3661     DEBUGLOG(3, "ZSTD_createCStream");
3662     return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
3663 }
3664 
3665 ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
3666 {
3667     return ZSTD_initStaticCCtx(workspace, workspaceSize);
3668 }
3669 
3670 ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
3671 {   /* CStream and CCtx are now same object */
3672     return ZSTD_createCCtx_advanced(customMem);
3673 }
3674 
3675 size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
3676 {
3677     return ZSTD_freeCCtx(zcs);   /* same object */
3678 }
3679 
3680 
3681 
3682 /*======   Initialization   ======*/
3683 
3684 size_t ZSTD_CStreamInSize(void)  { return ZSTD_BLOCKSIZE_MAX; }
3685 
3686 size_t ZSTD_CStreamOutSize(void)
3687 {
3688     return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
3689 }
3690 
3691 static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx,
3692                     const void* const dict, size_t const dictSize, ZSTD_dictContentType_e const dictContentType,
3693                     const ZSTD_CDict* const cdict,
3694                     ZSTD_CCtx_params params, unsigned long long const pledgedSrcSize)
3695 {
3696     DEBUGLOG(4, "ZSTD_resetCStream_internal");
3697     /* Finalize the compression parameters */
3698     params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, dictSize);
3699     /* params are supposed to be fully validated at this point */
3700     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
3701     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
3702 
3703     CHECK_F( ZSTD_compressBegin_internal(cctx,
3704                                          dict, dictSize, dictContentType, ZSTD_dtlm_fast,
3705                                          cdict,
3706                                          params, pledgedSrcSize,
3707                                          ZSTDb_buffered) );
3708 
3709     cctx->inToCompress = 0;
3710     cctx->inBuffPos = 0;
3711     cctx->inBuffTarget = cctx->blockSize
3712                       + (cctx->blockSize == pledgedSrcSize);   /* for small input: avoid automatic flush on reaching end of block, since it would require to add a 3-bytes null block to end frame */
3713     cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;
3714     cctx->streamStage = zcss_load;
3715     cctx->frameEnded = 0;
3716     return 0;   /* ready to go */
3717 }
3718 
3719 /* ZSTD_resetCStream():
3720  * pledgedSrcSize == 0 means "unknown" */
3721 size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
3722 {
3723     ZSTD_CCtx_params params = zcs->requestedParams;
3724     DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
3725     if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
3726     params.fParams.contentSizeFlag = 1;
3727     return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize);
3728 }
3729 
3730 /*! ZSTD_initCStream_internal() :
3731  *  Note : for lib/compress only. Used by zstdmt_compress.c.
3732  *  Assumption 1 : params are valid
3733  *  Assumption 2 : either dict, or cdict, is defined, not both */
3734 size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
3735                     const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
3736                     ZSTD_CCtx_params params, unsigned long long pledgedSrcSize)
3737 {
3738     DEBUGLOG(4, "ZSTD_initCStream_internal");
3739     params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, dictSize);
3740     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
3741     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
3742 
3743     if (dict && dictSize >= 8) {
3744         DEBUGLOG(4, "loading dictionary of size %u", (unsigned)dictSize);
3745         if (zcs->staticSize) {   /* static CCtx : never uses malloc */
3746             /* incompatible with internal cdict creation */
3747             return ERROR(memory_allocation);
3748         }
3749         ZSTD_freeCDict(zcs->cdictLocal);
3750         zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
3751                                             ZSTD_dlm_byCopy, ZSTD_dct_auto,
3752                                             params.cParams, zcs->customMem);
3753         zcs->cdict = zcs->cdictLocal;
3754         if (zcs->cdictLocal == NULL) return ERROR(memory_allocation);
3755     } else {
3756         if (cdict) {
3757             params.cParams = ZSTD_getCParamsFromCDict(cdict);  /* cParams are enforced from cdict; it includes windowLog */
3758         }
3759         ZSTD_freeCDict(zcs->cdictLocal);
3760         zcs->cdictLocal = NULL;
3761         zcs->cdict = cdict;
3762     }
3763 
3764     return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize);
3765 }
3766 
3767 /* ZSTD_initCStream_usingCDict_advanced() :
3768  * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
3769 size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
3770                                             const ZSTD_CDict* cdict,
3771                                             ZSTD_frameParameters fParams,
3772                                             unsigned long long pledgedSrcSize)
3773 {
3774     DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
3775     if (!cdict) return ERROR(dictionary_wrong); /* cannot handle NULL cdict (does not know what to do) */
3776     {   ZSTD_CCtx_params params = zcs->requestedParams;
3777         params.cParams = ZSTD_getCParamsFromCDict(cdict);
3778         params.fParams = fParams;
3779         return ZSTD_initCStream_internal(zcs,
3780                                 NULL, 0, cdict,
3781                                 params, pledgedSrcSize);
3782     }
3783 }
3784 
3785 /* note : cdict must outlive compression session */
3786 size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
3787 {
3788     ZSTD_frameParameters const fParams = { 0 /* contentSizeFlag */, 0 /* checksum */, 0 /* hideDictID */ };
3789     DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
3790     return ZSTD_initCStream_usingCDict_advanced(zcs, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);  /* note : will check that cdict != NULL */
3791 }
3792 
3793 
3794 /* ZSTD_initCStream_advanced() :
3795  * pledgedSrcSize must be exact.
3796  * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
3797  * dict is loaded with default parameters ZSTD_dm_auto and ZSTD_dlm_byCopy. */
3798 size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
3799                                  const void* dict, size_t dictSize,
3800                                  ZSTD_parameters params, unsigned long long pledgedSrcSize)
3801 {
3802     DEBUGLOG(4, "ZSTD_initCStream_advanced: pledgedSrcSize=%u, flag=%u",
3803                 (unsigned)pledgedSrcSize, params.fParams.contentSizeFlag);
3804     CHECK_F( ZSTD_checkCParams(params.cParams) );
3805     if ((pledgedSrcSize==0) && (params.fParams.contentSizeFlag==0)) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;  /* for compatibility with older programs relying on this behavior. Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. This line will be removed in the future. */
3806     zcs->requestedParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
3807     return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL /*cdict*/, zcs->requestedParams, pledgedSrcSize);
3808 }
3809 
3810 size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
3811 {
3812     ZSTD_CCtxParams_init(&zcs->requestedParams, compressionLevel);
3813     return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, zcs->requestedParams, ZSTD_CONTENTSIZE_UNKNOWN);
3814 }
3815 
3816 size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
3817 {
3818     U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;  /* temporary : 0 interpreted as "unknown" during transition period. Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. `0` will be interpreted as "empty" in the future */
3819     ZSTD_CCtxParams_init(&zcs->requestedParams, compressionLevel);
3820     return ZSTD_initCStream_internal(zcs, NULL, 0, NULL, zcs->requestedParams, pledgedSrcSize);
3821 }
3822 
3823 size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
3824 {
3825     DEBUGLOG(4, "ZSTD_initCStream");
3826     return ZSTD_initCStream_srcSize(zcs, compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN);
3827 }
3828 
3829 /*======   Compression   ======*/
3830 
3831 static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
3832 {
3833     size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
3834     if (hintInSize==0) hintInSize = cctx->blockSize;
3835     return hintInSize;
3836 }
3837 
3838 static size_t ZSTD_limitCopy(void* dst, size_t dstCapacity,
3839                        const void* src, size_t srcSize)
3840 {
3841     size_t const length = MIN(dstCapacity, srcSize);
3842     if (length) memcpy(dst, src, length);
3843     return length;
3844 }
3845 
3846 /** ZSTD_compressStream_generic():
3847  *  internal function for all *compressStream*() variants
3848  *  non-static, because can be called from zstdmt_compress.c
3849  * @return : hint size for next input */
3850 size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
3851                                    ZSTD_outBuffer* output,
3852                                    ZSTD_inBuffer* input,
3853                                    ZSTD_EndDirective const flushMode)
3854 {
3855     const char* const istart = (const char*)input->src;
3856     const char* const iend = istart + input->size;
3857     const char* ip = istart + input->pos;
3858     char* const ostart = (char*)output->dst;
3859     char* const oend = ostart + output->size;
3860     char* op = ostart + output->pos;
3861     U32 someMoreWork = 1;
3862 
3863     /* check expectations */
3864     DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
3865     assert(zcs->inBuff != NULL);
3866     assert(zcs->inBuffSize > 0);
3867     assert(zcs->outBuff !=  NULL);
3868     assert(zcs->outBuffSize > 0);
3869     assert(output->pos <= output->size);
3870     assert(input->pos <= input->size);
3871 
3872     while (someMoreWork) {
3873         switch(zcs->streamStage)
3874         {
3875         case zcss_init:
3876             /* call ZSTD_initCStream() first ! */
3877             return ERROR(init_missing);
3878 
3879         case zcss_load:
3880             if ( (flushMode == ZSTD_e_end)
3881               && ((size_t)(oend-op) >= ZSTD_compressBound(iend-ip))  /* enough dstCapacity */
3882               && (zcs->inBuffPos == 0) ) {
3883                 /* shortcut to compression pass directly into output buffer */
3884                 size_t const cSize = ZSTD_compressEnd(zcs,
3885                                                 op, oend-op, ip, iend-ip);
3886                 DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
3887                 if (ZSTD_isError(cSize)) return cSize;
3888                 ip = iend;
3889                 op += cSize;
3890                 zcs->frameEnded = 1;
3891                 ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
3892                 someMoreWork = 0; break;
3893             }
3894             /* complete loading into inBuffer */
3895             {   size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
3896                 size_t const loaded = ZSTD_limitCopy(
3897                                         zcs->inBuff + zcs->inBuffPos, toLoad,
3898                                         ip, iend-ip);
3899                 zcs->inBuffPos += loaded;
3900                 ip += loaded;
3901                 if ( (flushMode == ZSTD_e_continue)
3902                   && (zcs->inBuffPos < zcs->inBuffTarget) ) {
3903                     /* not enough input to fill full block : stop here */
3904                     someMoreWork = 0; break;
3905                 }
3906                 if ( (flushMode == ZSTD_e_flush)
3907                   && (zcs->inBuffPos == zcs->inToCompress) ) {
3908                     /* empty */
3909                     someMoreWork = 0; break;
3910                 }
3911             }
3912             /* compress current block (note : this stage cannot be stopped in the middle) */
3913             DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
3914             {   void* cDst;
3915                 size_t cSize;
3916                 size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
3917                 size_t oSize = oend-op;
3918                 unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
3919                 if (oSize >= ZSTD_compressBound(iSize))
3920                     cDst = op;   /* compress into output buffer, to skip flush stage */
3921                 else
3922                     cDst = zcs->outBuff, oSize = zcs->outBuffSize;
3923                 cSize = lastBlock ?
3924                         ZSTD_compressEnd(zcs, cDst, oSize,
3925                                     zcs->inBuff + zcs->inToCompress, iSize) :
3926                         ZSTD_compressContinue(zcs, cDst, oSize,
3927                                     zcs->inBuff + zcs->inToCompress, iSize);
3928                 if (ZSTD_isError(cSize)) return cSize;
3929                 zcs->frameEnded = lastBlock;
3930                 /* prepare next block */
3931                 zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
3932                 if (zcs->inBuffTarget > zcs->inBuffSize)
3933                     zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
3934                 DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
3935                          (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
3936                 if (!lastBlock)
3937                     assert(zcs->inBuffTarget <= zcs->inBuffSize);
3938                 zcs->inToCompress = zcs->inBuffPos;
3939                 if (cDst == op) {  /* no need to flush */
3940                     op += cSize;
3941                     if (zcs->frameEnded) {
3942                         DEBUGLOG(5, "Frame completed directly in outBuffer");
3943                         someMoreWork = 0;
3944                         ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
3945                     }
3946                     break;
3947                 }
3948                 zcs->outBuffContentSize = cSize;
3949                 zcs->outBuffFlushedSize = 0;
3950                 zcs->streamStage = zcss_flush; /* pass-through to flush stage */
3951             }
3952 	    /* fall-through */
3953         case zcss_flush:
3954             DEBUGLOG(5, "flush stage");
3955             {   size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
3956                 size_t const flushed = ZSTD_limitCopy(op, oend-op,
3957                             zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
3958                 DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
3959                             (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
3960                 op += flushed;
3961                 zcs->outBuffFlushedSize += flushed;
3962                 if (toFlush!=flushed) {
3963                     /* flush not fully completed, presumably because dst is too small */
3964                     assert(op==oend);
3965                     someMoreWork = 0;
3966                     break;
3967                 }
3968                 zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
3969                 if (zcs->frameEnded) {
3970                     DEBUGLOG(5, "Frame completed on flush");
3971                     someMoreWork = 0;
3972                     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
3973                     break;
3974                 }
3975                 zcs->streamStage = zcss_load;
3976                 break;
3977             }
3978 
3979         default: /* impossible */
3980             assert(0);
3981         }
3982     }
3983 
3984     input->pos = ip - istart;
3985     output->pos = op - ostart;
3986     if (zcs->frameEnded) return 0;
3987     return ZSTD_nextInputSizeHint(zcs);
3988 }
3989 
3990 static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)
3991 {
3992 #ifdef ZSTD_MULTITHREAD
3993     if (cctx->appliedParams.nbWorkers >= 1) {
3994         assert(cctx->mtctx != NULL);
3995         return ZSTDMT_nextInputSizeHint(cctx->mtctx);
3996     }
3997 #endif
3998     return ZSTD_nextInputSizeHint(cctx);
3999 
4000 }
4001 
4002 size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
4003 {
4004     CHECK_F( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) );
4005     return ZSTD_nextInputSizeHint_MTorST(zcs);
4006 }
4007 
4008 
4009 size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
4010                              ZSTD_outBuffer* output,
4011                              ZSTD_inBuffer* input,
4012                              ZSTD_EndDirective endOp)
4013 {
4014     DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
4015     /* check conditions */
4016     if (output->pos > output->size) return ERROR(GENERIC);
4017     if (input->pos  > input->size)  return ERROR(GENERIC);
4018     assert(cctx!=NULL);
4019 
4020     /* transparent initialization stage */
4021     if (cctx->streamStage == zcss_init) {
4022         ZSTD_CCtx_params params = cctx->requestedParams;
4023         ZSTD_prefixDict const prefixDict = cctx->prefixDict;
4024         memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));   /* single usage */
4025         assert(prefixDict.dict==NULL || cctx->cdict==NULL);    /* only one can be set */
4026         DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
4027         if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = input->size + 1;  /* auto-fix pledgedSrcSize */
4028         params.cParams = ZSTD_getCParamsFromCCtxParams(
4029                 &cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/);
4030 
4031 
4032 #ifdef ZSTD_MULTITHREAD
4033         if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) {
4034             params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */
4035         }
4036         if (params.nbWorkers > 0) {
4037             /* mt context creation */
4038             if (cctx->mtctx == NULL) {
4039                 DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u",
4040                             params.nbWorkers);
4041                 cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbWorkers, cctx->customMem);
4042                 if (cctx->mtctx == NULL) return ERROR(memory_allocation);
4043             }
4044             /* mt compression */
4045             DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers);
4046             CHECK_F( ZSTDMT_initCStream_internal(
4047                         cctx->mtctx,
4048                         prefixDict.dict, prefixDict.dictSize, ZSTD_dct_rawContent,
4049                         cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) );
4050             cctx->streamStage = zcss_load;
4051             cctx->appliedParams.nbWorkers = params.nbWorkers;
4052         } else
4053 #endif
4054         {   CHECK_F( ZSTD_resetCStream_internal(cctx,
4055                             prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType,
4056                             cctx->cdict,
4057                             params, cctx->pledgedSrcSizePlusOne-1) );
4058             assert(cctx->streamStage == zcss_load);
4059             assert(cctx->appliedParams.nbWorkers == 0);
4060     }   }
4061     /* end of transparent initialization stage */
4062 
4063     /* compression stage */
4064 #ifdef ZSTD_MULTITHREAD
4065     if (cctx->appliedParams.nbWorkers > 0) {
4066         if (cctx->cParamsChanged) {
4067             ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams);
4068             cctx->cParamsChanged = 0;
4069         }
4070         {   size_t const flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
4071             if ( ZSTD_isError(flushMin)
4072               || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
4073                 ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
4074             }
4075             DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic");
4076             return flushMin;
4077     }   }
4078 #endif
4079     CHECK_F( ZSTD_compressStream_generic(cctx, output, input, endOp) );
4080     DEBUGLOG(5, "completed ZSTD_compressStream2");
4081     return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
4082 }
4083 
4084 size_t ZSTD_compressStream2_simpleArgs (
4085                             ZSTD_CCtx* cctx,
4086                             void* dst, size_t dstCapacity, size_t* dstPos,
4087                       const void* src, size_t srcSize, size_t* srcPos,
4088                             ZSTD_EndDirective endOp)
4089 {
4090     ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
4091     ZSTD_inBuffer  input  = { src, srcSize, *srcPos };
4092     /* ZSTD_compressStream2() will check validity of dstPos and srcPos */
4093     size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
4094     *dstPos = output.pos;
4095     *srcPos = input.pos;
4096     return cErr;
4097 }
4098 
4099 size_t ZSTD_compress2(ZSTD_CCtx* cctx,
4100                       void* dst, size_t dstCapacity,
4101                       const void* src, size_t srcSize)
4102 {
4103     ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
4104     {   size_t oPos = 0;
4105         size_t iPos = 0;
4106         size_t const result = ZSTD_compressStream2_simpleArgs(cctx,
4107                                         dst, dstCapacity, &oPos,
4108                                         src, srcSize, &iPos,
4109                                         ZSTD_e_end);
4110         if (ZSTD_isError(result)) return result;
4111         if (result != 0) {  /* compression not completed, due to lack of output space */
4112             assert(oPos == dstCapacity);
4113             return ERROR(dstSize_tooSmall);
4114         }
4115         assert(iPos == srcSize);   /* all input is expected consumed */
4116         return oPos;
4117     }
4118 }
4119 
4120 /*======   Finalize   ======*/
4121 
4122 /*! ZSTD_flushStream() :
4123  * @return : amount of data remaining to flush */
4124 size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
4125 {
4126     ZSTD_inBuffer input = { NULL, 0, 0 };
4127     return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
4128 }
4129 
4130 
4131 size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
4132 {
4133     ZSTD_inBuffer input = { NULL, 0, 0 };
4134     size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
4135     CHECK_F( remainingToFlush );
4136     if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush;   /* minimal estimation */
4137     /* single thread mode : attempt to calculate remaining to flush more precisely */
4138     {   size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
4139         size_t const checksumSize = zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4;
4140         size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
4141         DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
4142         return toFlush;
4143     }
4144 }
4145 
4146 
4147 /*-=====  Pre-defined compression levels  =====-*/
4148 
4149 #define ZSTD_MAX_CLEVEL     22
4150 int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
4151 int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
4152 
4153 static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
4154 {   /* "default" - guarantees a monotonically increasing memory budget */
4155     /* W,  C,  H,  S,  L, TL, strat */
4156     { 19, 12, 13,  1,  6,  1, ZSTD_fast    },  /* base for negative levels */
4157     { 19, 13, 14,  1,  7,  0, ZSTD_fast    },  /* level  1 */
4158     { 20, 15, 16,  1,  6,  0, ZSTD_fast    },  /* level  2 */
4159     { 21, 16, 17,  1,  5,  1, ZSTD_dfast   },  /* level  3 */
4160     { 21, 18, 18,  1,  5,  1, ZSTD_dfast   },  /* level  4 */
4161     { 21, 18, 19,  2,  5,  2, ZSTD_greedy  },  /* level  5 */
4162     { 21, 19, 19,  3,  5,  4, ZSTD_greedy  },  /* level  6 */
4163     { 21, 19, 19,  3,  5,  8, ZSTD_lazy    },  /* level  7 */
4164     { 21, 19, 19,  3,  5, 16, ZSTD_lazy2   },  /* level  8 */
4165     { 21, 19, 20,  4,  5, 16, ZSTD_lazy2   },  /* level  9 */
4166     { 22, 20, 21,  4,  5, 16, ZSTD_lazy2   },  /* level 10 */
4167     { 22, 21, 22,  4,  5, 16, ZSTD_lazy2   },  /* level 11 */
4168     { 22, 21, 22,  5,  5, 16, ZSTD_lazy2   },  /* level 12 */
4169     { 22, 21, 22,  5,  5, 32, ZSTD_btlazy2 },  /* level 13 */
4170     { 22, 22, 23,  5,  5, 32, ZSTD_btlazy2 },  /* level 14 */
4171     { 22, 23, 23,  6,  5, 32, ZSTD_btlazy2 },  /* level 15 */
4172     { 22, 22, 22,  5,  5, 48, ZSTD_btopt   },  /* level 16 */
4173     { 23, 23, 22,  5,  4, 64, ZSTD_btopt   },  /* level 17 */
4174     { 23, 23, 22,  6,  3, 64, ZSTD_btultra },  /* level 18 */
4175     { 23, 24, 22,  7,  3,256, ZSTD_btultra2},  /* level 19 */
4176     { 25, 25, 23,  7,  3,256, ZSTD_btultra2},  /* level 20 */
4177     { 26, 26, 24,  7,  3,512, ZSTD_btultra2},  /* level 21 */
4178     { 27, 27, 25,  9,  3,999, ZSTD_btultra2},  /* level 22 */
4179 },
4180 {   /* for srcSize <= 256 KB */
4181     /* W,  C,  H,  S,  L,  T, strat */
4182     { 18, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
4183     { 18, 13, 14,  1,  6,  0, ZSTD_fast    },  /* level  1 */
4184     { 18, 14, 14,  1,  5,  1, ZSTD_dfast   },  /* level  2 */
4185     { 18, 16, 16,  1,  4,  1, ZSTD_dfast   },  /* level  3 */
4186     { 18, 16, 17,  2,  5,  2, ZSTD_greedy  },  /* level  4.*/
4187     { 18, 18, 18,  3,  5,  2, ZSTD_greedy  },  /* level  5.*/
4188     { 18, 18, 19,  3,  5,  4, ZSTD_lazy    },  /* level  6.*/
4189     { 18, 18, 19,  4,  4,  4, ZSTD_lazy    },  /* level  7 */
4190     { 18, 18, 19,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
4191     { 18, 18, 19,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
4192     { 18, 18, 19,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
4193     { 18, 18, 19,  5,  4, 12, ZSTD_btlazy2 },  /* level 11.*/
4194     { 18, 19, 19,  7,  4, 12, ZSTD_btlazy2 },  /* level 12.*/
4195     { 18, 18, 19,  4,  4, 16, ZSTD_btopt   },  /* level 13 */
4196     { 18, 18, 19,  4,  3, 32, ZSTD_btopt   },  /* level 14.*/
4197     { 18, 18, 19,  6,  3,128, ZSTD_btopt   },  /* level 15.*/
4198     { 18, 19, 19,  6,  3,128, ZSTD_btultra },  /* level 16.*/
4199     { 18, 19, 19,  8,  3,256, ZSTD_btultra },  /* level 17.*/
4200     { 18, 19, 19,  6,  3,128, ZSTD_btultra2},  /* level 18.*/
4201     { 18, 19, 19,  8,  3,256, ZSTD_btultra2},  /* level 19.*/
4202     { 18, 19, 19, 10,  3,512, ZSTD_btultra2},  /* level 20.*/
4203     { 18, 19, 19, 12,  3,512, ZSTD_btultra2},  /* level 21.*/
4204     { 18, 19, 19, 13,  3,999, ZSTD_btultra2},  /* level 22.*/
4205 },
4206 {   /* for srcSize <= 128 KB */
4207     /* W,  C,  H,  S,  L,  T, strat */
4208     { 17, 12, 12,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
4209     { 17, 12, 13,  1,  6,  0, ZSTD_fast    },  /* level  1 */
4210     { 17, 13, 15,  1,  5,  0, ZSTD_fast    },  /* level  2 */
4211     { 17, 15, 16,  2,  5,  1, ZSTD_dfast   },  /* level  3 */
4212     { 17, 17, 17,  2,  4,  1, ZSTD_dfast   },  /* level  4 */
4213     { 17, 16, 17,  3,  4,  2, ZSTD_greedy  },  /* level  5 */
4214     { 17, 17, 17,  3,  4,  4, ZSTD_lazy    },  /* level  6 */
4215     { 17, 17, 17,  3,  4,  8, ZSTD_lazy2   },  /* level  7 */
4216     { 17, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
4217     { 17, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
4218     { 17, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
4219     { 17, 17, 17,  5,  4,  8, ZSTD_btlazy2 },  /* level 11 */
4220     { 17, 18, 17,  7,  4, 12, ZSTD_btlazy2 },  /* level 12 */
4221     { 17, 18, 17,  3,  4, 12, ZSTD_btopt   },  /* level 13.*/
4222     { 17, 18, 17,  4,  3, 32, ZSTD_btopt   },  /* level 14.*/
4223     { 17, 18, 17,  6,  3,256, ZSTD_btopt   },  /* level 15.*/
4224     { 17, 18, 17,  6,  3,128, ZSTD_btultra },  /* level 16.*/
4225     { 17, 18, 17,  8,  3,256, ZSTD_btultra },  /* level 17.*/
4226     { 17, 18, 17, 10,  3,512, ZSTD_btultra },  /* level 18.*/
4227     { 17, 18, 17,  5,  3,256, ZSTD_btultra2},  /* level 19.*/
4228     { 17, 18, 17,  7,  3,512, ZSTD_btultra2},  /* level 20.*/
4229     { 17, 18, 17,  9,  3,512, ZSTD_btultra2},  /* level 21.*/
4230     { 17, 18, 17, 11,  3,999, ZSTD_btultra2},  /* level 22.*/
4231 },
4232 {   /* for srcSize <= 16 KB */
4233     /* W,  C,  H,  S,  L,  T, strat */
4234     { 14, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
4235     { 14, 14, 15,  1,  5,  0, ZSTD_fast    },  /* level  1 */
4236     { 14, 14, 15,  1,  4,  0, ZSTD_fast    },  /* level  2 */
4237     { 14, 14, 15,  2,  4,  1, ZSTD_dfast   },  /* level  3 */
4238     { 14, 14, 14,  4,  4,  2, ZSTD_greedy  },  /* level  4 */
4239     { 14, 14, 14,  3,  4,  4, ZSTD_lazy    },  /* level  5.*/
4240     { 14, 14, 14,  4,  4,  8, ZSTD_lazy2   },  /* level  6 */
4241     { 14, 14, 14,  6,  4,  8, ZSTD_lazy2   },  /* level  7 */
4242     { 14, 14, 14,  8,  4,  8, ZSTD_lazy2   },  /* level  8.*/
4243     { 14, 15, 14,  5,  4,  8, ZSTD_btlazy2 },  /* level  9.*/
4244     { 14, 15, 14,  9,  4,  8, ZSTD_btlazy2 },  /* level 10.*/
4245     { 14, 15, 14,  3,  4, 12, ZSTD_btopt   },  /* level 11.*/
4246     { 14, 15, 14,  4,  3, 24, ZSTD_btopt   },  /* level 12.*/
4247     { 14, 15, 14,  5,  3, 32, ZSTD_btultra },  /* level 13.*/
4248     { 14, 15, 15,  6,  3, 64, ZSTD_btultra },  /* level 14.*/
4249     { 14, 15, 15,  7,  3,256, ZSTD_btultra },  /* level 15.*/
4250     { 14, 15, 15,  5,  3, 48, ZSTD_btultra2},  /* level 16.*/
4251     { 14, 15, 15,  6,  3,128, ZSTD_btultra2},  /* level 17.*/
4252     { 14, 15, 15,  7,  3,256, ZSTD_btultra2},  /* level 18.*/
4253     { 14, 15, 15,  8,  3,256, ZSTD_btultra2},  /* level 19.*/
4254     { 14, 15, 15,  8,  3,512, ZSTD_btultra2},  /* level 20.*/
4255     { 14, 15, 15,  9,  3,512, ZSTD_btultra2},  /* level 21.*/
4256     { 14, 15, 15, 10,  3,999, ZSTD_btultra2},  /* level 22.*/
4257 },
4258 };
4259 
4260 /*! ZSTD_getCParams() :
4261 *  @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
4262 *   Size values are optional, provide 0 if not known or unused */
4263 ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
4264 {
4265     size_t const addedSize = srcSizeHint ? 0 : 500;
4266     U64 const rSize = srcSizeHint+dictSize ? srcSizeHint+dictSize+addedSize : (U64)-1;
4267     U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);   /* intentional underflow for srcSizeHint == 0 */
4268     int row = compressionLevel;
4269     DEBUGLOG(5, "ZSTD_getCParams (cLevel=%i)", compressionLevel);
4270     if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT;   /* 0 == default */
4271     if (compressionLevel < 0) row = 0;   /* entry 0 is baseline for fast mode */
4272     if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
4273     {   ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
4274         if (compressionLevel < 0) cp.targetLength = (unsigned)(-compressionLevel);   /* acceleration factor */
4275         return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize);
4276     }
4277 }
4278 
4279 /*! ZSTD_getParams() :
4280 *   same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`).
4281 *   All fields of `ZSTD_frameParameters` are set to default (0) */
4282 ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
4283     ZSTD_parameters params;
4284     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSizeHint, dictSize);
4285     DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
4286     memset(&params, 0, sizeof(params));
4287     params.cParams = cParams;
4288     params.fParams.contentSizeFlag = 1;
4289     return params;
4290 }
4291