1 /*
2  * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
3  * All rights reserved.
4  *
5  * This source code is licensed under both the BSD-style license (found in the
6  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7  * in the COPYING file in the root directory of this source tree).
8  * You may select, at your option, one of the above-listed licenses.
9  */
10 
11 #ifndef ZSTD_CWKSP_H
12 #define ZSTD_CWKSP_H
13 
14 /*-*************************************
15 *  Dependencies
16 ***************************************/
17 #include "zstd_internal.h"
18 
19 #if defined (__cplusplus)
20 extern "C" {
21 #endif
22 
23 /*-*************************************
24 *  Constants
25 ***************************************/
26 
27 /* Since the workspace is effectively its own little malloc implementation /
28  * arena, when we run under ASAN, we should similarly insert redzones between
29  * each internal element of the workspace, so ASAN will catch overruns that
30  * reach outside an object but that stay inside the workspace.
31  *
32  * This defines the size of that redzone.
33  */
34 #ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
35 #define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
36 #endif
37 
38 /*-*************************************
39 *  Structures
40 ***************************************/
41 typedef enum {
42     ZSTD_cwksp_alloc_objects,
43     ZSTD_cwksp_alloc_buffers,
44     ZSTD_cwksp_alloc_aligned
45 } ZSTD_cwksp_alloc_phase_e;
46 
47 /**
48  * Zstd fits all its internal datastructures into a single continuous buffer,
49  * so that it only needs to perform a single OS allocation (or so that a buffer
50  * can be provided to it and it can perform no allocations at all). This buffer
51  * is called the workspace.
52  *
53  * Several optimizations complicate that process of allocating memory ranges
54  * from this workspace for each internal datastructure:
55  *
56  * - These different internal datastructures have different setup requirements:
57  *
58  *   - The static objects need to be cleared once and can then be trivially
59  *     reused for each compression.
60  *
61  *   - Various buffers don't need to be initialized at all--they are always
62  *     written into before they're read.
63  *
64  *   - The matchstate tables have a unique requirement that they don't need
65  *     their memory to be totally cleared, but they do need the memory to have
66  *     some bound, i.e., a guarantee that all values in the memory they've been
67  *     allocated is less than some maximum value (which is the starting value
68  *     for the indices that they will then use for compression). When this
69  *     guarantee is provided to them, they can use the memory without any setup
70  *     work. When it can't, they have to clear the area.
71  *
72  * - These buffers also have different alignment requirements.
73  *
74  * - We would like to reuse the objects in the workspace for multiple
75  *   compressions without having to perform any expensive reallocation or
76  *   reinitialization work.
77  *
78  * - We would like to be able to efficiently reuse the workspace across
79  *   multiple compressions **even when the compression parameters change** and
80  *   we need to resize some of the objects (where possible).
81  *
82  * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
83  * abstraction was created. It works as follows:
84  *
85  * Workspace Layout:
86  *
87  * [                        ... workspace ...                         ]
88  * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
89  *
90  * The various objects that live in the workspace are divided into the
91  * following categories, and are allocated separately:
92  *
93  * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
94  *   so that literally everything fits in a single buffer. Note: if present,
95  *   this must be the first object in the workspace, since ZSTD_free{CCtx,
96  *   CDict}() rely on a pointer comparison to see whether one or two frees are
97  *   required.
98  *
99  * - Fixed size objects: these are fixed-size, fixed-count objects that are
100  *   nonetheless "dynamically" allocated in the workspace so that we can
101  *   control how they're initialized separately from the broader ZSTD_CCtx.
102  *   Examples:
103  *   - Entropy Workspace
104  *   - 2 x ZSTD_compressedBlockState_t
105  *   - CDict dictionary contents
106  *
107  * - Tables: these are any of several different datastructures (hash tables,
108  *   chain tables, binary trees) that all respect a common format: they are
109  *   uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
110  *   Their sizes depend on the cparams.
111  *
112  * - Aligned: these buffers are used for various purposes that require 4 byte
113  *   alignment, but don't require any initialization before they're used.
114  *
115  * - Buffers: these buffers are used for various purposes that don't require
116  *   any alignment or initialization before they're used. This means they can
117  *   be moved around at no cost for a new compression.
118  *
119  * Allocating Memory:
120  *
121  * The various types of objects must be allocated in order, so they can be
122  * correctly packed into the workspace buffer. That order is:
123  *
124  * 1. Objects
125  * 2. Buffers
126  * 3. Aligned
127  * 4. Tables
128  *
129  * Attempts to reserve objects of different types out of order will fail.
130  */
131 typedef struct {
132     void* workspace;
133     void* workspaceEnd;
134 
135     void* objectEnd;
136     void* tableEnd;
137     void* tableValidEnd;
138     void* allocStart;
139 
140     int allocFailed;
141     int workspaceOversizedDuration;
142     ZSTD_cwksp_alloc_phase_e phase;
143 } ZSTD_cwksp;
144 
145 /*-*************************************
146 *  Functions
147 ***************************************/
148 
149 MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
150 
ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp * ws)151 MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
152     (void)ws;
153     assert(ws->workspace <= ws->objectEnd);
154     assert(ws->objectEnd <= ws->tableEnd);
155     assert(ws->objectEnd <= ws->tableValidEnd);
156     assert(ws->tableEnd <= ws->allocStart);
157     assert(ws->tableValidEnd <= ws->allocStart);
158     assert(ws->allocStart <= ws->workspaceEnd);
159 }
160 
161 /**
162  * Align must be a power of 2.
163  */
ZSTD_cwksp_align(size_t size,size_t const align)164 MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
165     size_t const mask = align - 1;
166     assert((align & mask) == 0);
167     return (size + mask) & ~mask;
168 }
169 
170 /**
171  * Use this to determine how much space in the workspace we will consume to
172  * allocate this object. (Normally it should be exactly the size of the object,
173  * but under special conditions, like ASAN, where we pad each object, it might
174  * be larger.)
175  *
176  * Since tables aren't currently redzoned, you don't need to call through this
177  * to figure out how much space you need for the matchState tables. Everything
178  * else is though.
179  */
ZSTD_cwksp_alloc_size(size_t size)180 MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
181 #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
182     return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
183 #else
184     return size;
185 #endif
186 }
187 
ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp * ws,ZSTD_cwksp_alloc_phase_e phase)188 MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
189         ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
190     assert(phase >= ws->phase);
191     if (phase > ws->phase) {
192         if (ws->phase < ZSTD_cwksp_alloc_buffers &&
193                 phase >= ZSTD_cwksp_alloc_buffers) {
194             ws->tableValidEnd = ws->objectEnd;
195         }
196         if (ws->phase < ZSTD_cwksp_alloc_aligned &&
197                 phase >= ZSTD_cwksp_alloc_aligned) {
198             /* If unaligned allocations down from a too-large top have left us
199              * unaligned, we need to realign our alloc ptr. Technically, this
200              * can consume space that is unaccounted for in the neededSpace
201              * calculation. However, I believe this can only happen when the
202              * workspace is too large, and specifically when it is too large
203              * by a larger margin than the space that will be consumed. */
204             /* TODO: cleaner, compiler warning friendly way to do this??? */
205             ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
206             if (ws->allocStart < ws->tableValidEnd) {
207                 ws->tableValidEnd = ws->allocStart;
208             }
209         }
210         ws->phase = phase;
211     }
212 }
213 
214 /**
215  * Returns whether this object/buffer/etc was allocated in this workspace.
216  */
ZSTD_cwksp_owns_buffer(const ZSTD_cwksp * ws,const void * ptr)217 MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
218     return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
219 }
220 
221 /**
222  * Internal function. Do not use directly.
223  */
ZSTD_cwksp_reserve_internal(ZSTD_cwksp * ws,size_t bytes,ZSTD_cwksp_alloc_phase_e phase)224 MEM_STATIC void* ZSTD_cwksp_reserve_internal(
225         ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
226     void* alloc;
227     void* bottom = ws->tableEnd;
228     ZSTD_cwksp_internal_advance_phase(ws, phase);
229     alloc = (BYTE *)ws->allocStart - bytes;
230 
231 #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
232     /* over-reserve space */
233     alloc = (BYTE *)alloc - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
234 #endif
235 
236     DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
237         alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
238     ZSTD_cwksp_assert_internal_consistency(ws);
239     assert(alloc >= bottom);
240     if (alloc < bottom) {
241         DEBUGLOG(4, "cwksp: alloc failed!");
242         ws->allocFailed = 1;
243         return NULL;
244     }
245     if (alloc < ws->tableValidEnd) {
246         ws->tableValidEnd = alloc;
247     }
248     ws->allocStart = alloc;
249 
250 #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
251     /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
252      * either size. */
253     alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
254     __asan_unpoison_memory_region(alloc, bytes);
255 #endif
256 
257     return alloc;
258 }
259 
260 /**
261  * Reserves and returns unaligned memory.
262  */
ZSTD_cwksp_reserve_buffer(ZSTD_cwksp * ws,size_t bytes)263 MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
264     return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
265 }
266 
267 /**
268  * Reserves and returns memory sized on and aligned on sizeof(unsigned).
269  */
ZSTD_cwksp_reserve_aligned(ZSTD_cwksp * ws,size_t bytes)270 MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
271     assert((bytes & (sizeof(U32)-1)) == 0);
272     return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
273 }
274 
275 /**
276  * Aligned on sizeof(unsigned). These buffers have the special property that
277  * their values remain constrained, allowing us to re-use them without
278  * memset()-ing them.
279  */
ZSTD_cwksp_reserve_table(ZSTD_cwksp * ws,size_t bytes)280 MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
281     const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
282     void* alloc = ws->tableEnd;
283     void* end = (BYTE *)alloc + bytes;
284     void* top = ws->allocStart;
285 
286     DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
287         alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
288     assert((bytes & (sizeof(U32)-1)) == 0);
289     ZSTD_cwksp_internal_advance_phase(ws, phase);
290     ZSTD_cwksp_assert_internal_consistency(ws);
291     assert(end <= top);
292     if (end > top) {
293         DEBUGLOG(4, "cwksp: table alloc failed!");
294         ws->allocFailed = 1;
295         return NULL;
296     }
297     ws->tableEnd = end;
298 
299 #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
300     __asan_unpoison_memory_region(alloc, bytes);
301 #endif
302 
303     return alloc;
304 }
305 
306 /**
307  * Aligned on sizeof(void*).
308  */
ZSTD_cwksp_reserve_object(ZSTD_cwksp * ws,size_t bytes)309 MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
310     size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
311     void* alloc = ws->objectEnd;
312     void* end = (BYTE*)alloc + roundedBytes;
313 
314 #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
315     /* over-reserve space */
316     end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
317 #endif
318 
319     DEBUGLOG(5,
320         "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
321         alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
322     assert(((size_t)alloc & (sizeof(void*)-1)) == 0);
323     assert((bytes & (sizeof(void*)-1)) == 0);
324     ZSTD_cwksp_assert_internal_consistency(ws);
325     /* we must be in the first phase, no advance is possible */
326     if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
327         DEBUGLOG(4, "cwksp: object alloc failed!");
328         ws->allocFailed = 1;
329         return NULL;
330     }
331     ws->objectEnd = end;
332     ws->tableEnd = end;
333     ws->tableValidEnd = end;
334 
335 #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
336     /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
337      * either size. */
338     alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
339     __asan_unpoison_memory_region(alloc, bytes);
340 #endif
341 
342     return alloc;
343 }
344 
ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp * ws)345 MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
346     DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
347 
348 #if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
349     /* To validate that the table re-use logic is sound, and that we don't
350      * access table space that we haven't cleaned, we re-"poison" the table
351      * space every time we mark it dirty. */
352     {
353         size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
354         assert(__msan_test_shadow(ws->objectEnd, size) == -1);
355         __msan_poison(ws->objectEnd, size);
356     }
357 #endif
358 
359     assert(ws->tableValidEnd >= ws->objectEnd);
360     assert(ws->tableValidEnd <= ws->allocStart);
361     ws->tableValidEnd = ws->objectEnd;
362     ZSTD_cwksp_assert_internal_consistency(ws);
363 }
364 
ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp * ws)365 MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
366     DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
367     assert(ws->tableValidEnd >= ws->objectEnd);
368     assert(ws->tableValidEnd <= ws->allocStart);
369     if (ws->tableValidEnd < ws->tableEnd) {
370         ws->tableValidEnd = ws->tableEnd;
371     }
372     ZSTD_cwksp_assert_internal_consistency(ws);
373 }
374 
375 /**
376  * Zero the part of the allocated tables not already marked clean.
377  */
ZSTD_cwksp_clean_tables(ZSTD_cwksp * ws)378 MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
379     DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
380     assert(ws->tableValidEnd >= ws->objectEnd);
381     assert(ws->tableValidEnd <= ws->allocStart);
382     if (ws->tableValidEnd < ws->tableEnd) {
383         memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
384     }
385     ZSTD_cwksp_mark_tables_clean(ws);
386 }
387 
388 /**
389  * Invalidates table allocations.
390  * All other allocations remain valid.
391  */
ZSTD_cwksp_clear_tables(ZSTD_cwksp * ws)392 MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
393     DEBUGLOG(4, "cwksp: clearing tables!");
394 
395 #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
396     {
397         size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
398         __asan_poison_memory_region(ws->objectEnd, size);
399     }
400 #endif
401 
402     ws->tableEnd = ws->objectEnd;
403     ZSTD_cwksp_assert_internal_consistency(ws);
404 }
405 
406 /**
407  * Invalidates all buffer, aligned, and table allocations.
408  * Object allocations remain valid.
409  */
ZSTD_cwksp_clear(ZSTD_cwksp * ws)410 MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
411     DEBUGLOG(4, "cwksp: clearing!");
412 
413 #if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
414     /* To validate that the context re-use logic is sound, and that we don't
415      * access stuff that this compression hasn't initialized, we re-"poison"
416      * the workspace (or at least the non-static, non-table parts of it)
417      * every time we start a new compression. */
418     {
419         size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd;
420         __msan_poison(ws->tableValidEnd, size);
421     }
422 #endif
423 
424 #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
425     {
426         size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
427         __asan_poison_memory_region(ws->objectEnd, size);
428     }
429 #endif
430 
431     ws->tableEnd = ws->objectEnd;
432     ws->allocStart = ws->workspaceEnd;
433     ws->allocFailed = 0;
434     if (ws->phase > ZSTD_cwksp_alloc_buffers) {
435         ws->phase = ZSTD_cwksp_alloc_buffers;
436     }
437     ZSTD_cwksp_assert_internal_consistency(ws);
438 }
439 
440 /**
441  * The provided workspace takes ownership of the buffer [start, start+size).
442  * Any existing values in the workspace are ignored (the previously managed
443  * buffer, if present, must be separately freed).
444  */
ZSTD_cwksp_init(ZSTD_cwksp * ws,void * start,size_t size)445 MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
446     DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
447     assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
448     ws->workspace = start;
449     ws->workspaceEnd = (BYTE*)start + size;
450     ws->objectEnd = ws->workspace;
451     ws->tableValidEnd = ws->objectEnd;
452     ws->phase = ZSTD_cwksp_alloc_objects;
453     ZSTD_cwksp_clear(ws);
454     ws->workspaceOversizedDuration = 0;
455     ZSTD_cwksp_assert_internal_consistency(ws);
456 }
457 
ZSTD_cwksp_create(ZSTD_cwksp * ws,size_t size,ZSTD_customMem customMem)458 MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
459     void* workspace = ZSTD_malloc(size, customMem);
460     DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
461     RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
462     ZSTD_cwksp_init(ws, workspace, size);
463     return 0;
464 }
465 
ZSTD_cwksp_free(ZSTD_cwksp * ws,ZSTD_customMem customMem)466 MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
467     void *ptr = ws->workspace;
468     DEBUGLOG(4, "cwksp: freeing workspace");
469     memset(ws, 0, sizeof(ZSTD_cwksp));
470     ZSTD_free(ptr, customMem);
471 }
472 
473 /**
474  * Moves the management of a workspace from one cwksp to another. The src cwksp
475  * is left in an invalid state (src must be re-init()'ed before its used again).
476  */
ZSTD_cwksp_move(ZSTD_cwksp * dst,ZSTD_cwksp * src)477 MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
478     *dst = *src;
479     memset(src, 0, sizeof(ZSTD_cwksp));
480 }
481 
ZSTD_cwksp_sizeof(const ZSTD_cwksp * ws)482 MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
483     return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
484 }
485 
ZSTD_cwksp_reserve_failed(const ZSTD_cwksp * ws)486 MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
487     return ws->allocFailed;
488 }
489 
490 /*-*************************************
491 *  Functions Checking Free Space
492 ***************************************/
493 
ZSTD_cwksp_available_space(ZSTD_cwksp * ws)494 MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
495     return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
496 }
497 
ZSTD_cwksp_check_available(ZSTD_cwksp * ws,size_t additionalNeededSpace)498 MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
499     return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
500 }
501 
ZSTD_cwksp_check_too_large(ZSTD_cwksp * ws,size_t additionalNeededSpace)502 MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
503     return ZSTD_cwksp_check_available(
504         ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
505 }
506 
ZSTD_cwksp_check_wasteful(ZSTD_cwksp * ws,size_t additionalNeededSpace)507 MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
508     return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
509         && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
510 }
511 
ZSTD_cwksp_bump_oversized_duration(ZSTD_cwksp * ws,size_t additionalNeededSpace)512 MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
513         ZSTD_cwksp* ws, size_t additionalNeededSpace) {
514     if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
515         ws->workspaceOversizedDuration++;
516     } else {
517         ws->workspaceOversizedDuration = 0;
518     }
519 }
520 
521 #if defined (__cplusplus)
522 }
523 #endif
524 
525 #endif /* ZSTD_CWKSP_H */
526