1 /*
2  * Copyright (c) Yann Collet, Facebook, Inc.
3  * All rights reserved.
4  *
5  * This source code is licensed under both the BSD-style license (found in the
6  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7  * in the COPYING file in the root directory of this source tree).
8  * You may select, at your option, one of the above-listed licenses.
9  */
10 
11 #ifndef ZSTD_CWKSP_H
12 #define ZSTD_CWKSP_H
13 
14 /*-*************************************
15 *  Dependencies
16 ***************************************/
17 #include "../common/zstd_internal.h"
18 
19 #if defined (__cplusplus)
20 extern "C" {
21 #endif
22 
23 /*-*************************************
24 *  Constants
25 ***************************************/
26 
27 /* Since the workspace is effectively its own little malloc implementation /
28  * arena, when we run under ASAN, we should similarly insert redzones between
29  * each internal element of the workspace, so ASAN will catch overruns that
30  * reach outside an object but that stay inside the workspace.
31  *
32  * This defines the size of that redzone.
33  */
34 #ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
35 #define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
36 #endif
37 
38 
39 /* Set our tables and aligneds to align by 64 bytes */
40 #define ZSTD_CWKSP_ALIGNMENT_BYTES 64
41 
42 /*-*************************************
43 *  Structures
44 ***************************************/
45 typedef enum {
46     ZSTD_cwksp_alloc_objects,
47     ZSTD_cwksp_alloc_buffers,
48     ZSTD_cwksp_alloc_aligned
49 } ZSTD_cwksp_alloc_phase_e;
50 
51 /**
52  * Used to describe whether the workspace is statically allocated (and will not
53  * necessarily ever be freed), or if it's dynamically allocated and we can
54  * expect a well-formed caller to free this.
55  */
56 typedef enum {
57     ZSTD_cwksp_dynamic_alloc,
58     ZSTD_cwksp_static_alloc
59 } ZSTD_cwksp_static_alloc_e;
60 
61 /**
62  * Zstd fits all its internal datastructures into a single continuous buffer,
63  * so that it only needs to perform a single OS allocation (or so that a buffer
64  * can be provided to it and it can perform no allocations at all). This buffer
65  * is called the workspace.
66  *
67  * Several optimizations complicate that process of allocating memory ranges
68  * from this workspace for each internal datastructure:
69  *
70  * - These different internal datastructures have different setup requirements:
71  *
72  *   - The static objects need to be cleared once and can then be trivially
73  *     reused for each compression.
74  *
75  *   - Various buffers don't need to be initialized at all--they are always
76  *     written into before they're read.
77  *
78  *   - The matchstate tables have a unique requirement that they don't need
79  *     their memory to be totally cleared, but they do need the memory to have
80  *     some bound, i.e., a guarantee that all values in the memory they've been
81  *     allocated is less than some maximum value (which is the starting value
82  *     for the indices that they will then use for compression). When this
83  *     guarantee is provided to them, they can use the memory without any setup
84  *     work. When it can't, they have to clear the area.
85  *
86  * - These buffers also have different alignment requirements.
87  *
88  * - We would like to reuse the objects in the workspace for multiple
89  *   compressions without having to perform any expensive reallocation or
90  *   reinitialization work.
91  *
92  * - We would like to be able to efficiently reuse the workspace across
93  *   multiple compressions **even when the compression parameters change** and
94  *   we need to resize some of the objects (where possible).
95  *
96  * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
97  * abstraction was created. It works as follows:
98  *
99  * Workspace Layout:
100  *
101  * [                        ... workspace ...                         ]
102  * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
103  *
104  * The various objects that live in the workspace are divided into the
105  * following categories, and are allocated separately:
106  *
107  * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
108  *   so that literally everything fits in a single buffer. Note: if present,
109  *   this must be the first object in the workspace, since ZSTD_customFree{CCtx,
110  *   CDict}() rely on a pointer comparison to see whether one or two frees are
111  *   required.
112  *
113  * - Fixed size objects: these are fixed-size, fixed-count objects that are
114  *   nonetheless "dynamically" allocated in the workspace so that we can
115  *   control how they're initialized separately from the broader ZSTD_CCtx.
116  *   Examples:
117  *   - Entropy Workspace
118  *   - 2 x ZSTD_compressedBlockState_t
119  *   - CDict dictionary contents
120  *
121  * - Tables: these are any of several different datastructures (hash tables,
122  *   chain tables, binary trees) that all respect a common format: they are
123  *   uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
124  *   Their sizes depend on the cparams. These tables are 64-byte aligned.
125  *
126  * - Aligned: these buffers are used for various purposes that require 4 byte
127  *   alignment, but don't require any initialization before they're used. These
128  *   buffers are each aligned to 64 bytes.
129  *
130  * - Buffers: these buffers are used for various purposes that don't require
131  *   any alignment or initialization before they're used. This means they can
132  *   be moved around at no cost for a new compression.
133  *
134  * Allocating Memory:
135  *
136  * The various types of objects must be allocated in order, so they can be
137  * correctly packed into the workspace buffer. That order is:
138  *
139  * 1. Objects
140  * 2. Buffers
141  * 3. Aligned/Tables
142  *
143  * Attempts to reserve objects of different types out of order will fail.
144  */
145 typedef struct {
146     void* workspace;
147     void* workspaceEnd;
148 
149     void* objectEnd;
150     void* tableEnd;
151     void* tableValidEnd;
152     void* allocStart;
153 
154     BYTE allocFailed;
155     int workspaceOversizedDuration;
156     ZSTD_cwksp_alloc_phase_e phase;
157     ZSTD_cwksp_static_alloc_e isStatic;
158 } ZSTD_cwksp;
159 
160 /*-*************************************
161 *  Functions
162 ***************************************/
163 
164 MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
165 
ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp * ws)166 MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
167     (void)ws;
168     assert(ws->workspace <= ws->objectEnd);
169     assert(ws->objectEnd <= ws->tableEnd);
170     assert(ws->objectEnd <= ws->tableValidEnd);
171     assert(ws->tableEnd <= ws->allocStart);
172     assert(ws->tableValidEnd <= ws->allocStart);
173     assert(ws->allocStart <= ws->workspaceEnd);
174 }
175 
176 /**
177  * Align must be a power of 2.
178  */
ZSTD_cwksp_align(size_t size,size_t const align)179 MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
180     size_t const mask = align - 1;
181     assert((align & mask) == 0);
182     return (size + mask) & ~mask;
183 }
184 
185 /**
186  * Use this to determine how much space in the workspace we will consume to
187  * allocate this object. (Normally it should be exactly the size of the object,
188  * but under special conditions, like ASAN, where we pad each object, it might
189  * be larger.)
190  *
191  * Since tables aren't currently redzoned, you don't need to call through this
192  * to figure out how much space you need for the matchState tables. Everything
193  * else is though.
194  *
195  * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
196  */
ZSTD_cwksp_alloc_size(size_t size)197 MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
198     if (size == 0)
199         return 0;
200 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
201     return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
202 #else
203     return size;
204 #endif
205 }
206 
207 /**
208  * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
209  * Used to determine the number of bytes required for a given "aligned".
210  */
ZSTD_cwksp_aligned_alloc_size(size_t size)211 MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
212     return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));
213 }
214 
215 /**
216  * Returns the amount of additional space the cwksp must allocate
217  * for internal purposes (currently only alignment).
218  */
ZSTD_cwksp_slack_space_required(void)219 MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
220     /* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes
221      * to align the beginning of tables section, as well as another n_2=[0, 63] bytes
222      * to align the beginning of the aligned secion.
223      *
224      * n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and
225      * aligneds being sized in multiples of 64 bytes.
226      */
227     size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES;
228     return slackSpace;
229 }
230 
231 
232 /**
233  * Return the number of additional bytes required to align a pointer to the given number of bytes.
234  * alignBytes must be a power of two.
235  */
ZSTD_cwksp_bytes_to_align_ptr(void * ptr,const size_t alignBytes)236 MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
237     size_t const alignBytesMask = alignBytes - 1;
238     size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
239     assert((alignBytes & alignBytesMask) == 0);
240     assert(bytes != ZSTD_CWKSP_ALIGNMENT_BYTES);
241     return bytes;
242 }
243 
244 /**
245  * Internal function. Do not use directly.
246  * Reserves the given number of bytes within the aligned/buffer segment of the wksp, which
247  * counts from the end of the wksp. (as opposed to the object/table segment)
248  *
249  * Returns a pointer to the beginning of that space.
250  */
ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp * ws,size_t const bytes)251 MEM_STATIC void* ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes) {
252     void* const alloc = (BYTE*)ws->allocStart - bytes;
253     void* const bottom = ws->tableEnd;
254     DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
255         alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
256     ZSTD_cwksp_assert_internal_consistency(ws);
257     assert(alloc >= bottom);
258     if (alloc < bottom) {
259         DEBUGLOG(4, "cwksp: alloc failed!");
260         ws->allocFailed = 1;
261         return NULL;
262     }
263     if (alloc < ws->tableValidEnd) {
264         ws->tableValidEnd = alloc;
265     }
266     ws->allocStart = alloc;
267     return alloc;
268 }
269 
270 /**
271  * Moves the cwksp to the next phase, and does any necessary allocations.
272  * Returns a 0 on success, or zstd error
273  */
ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp * ws,ZSTD_cwksp_alloc_phase_e phase)274 MEM_STATIC size_t ZSTD_cwksp_internal_advance_phase(
275         ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
276     assert(phase >= ws->phase);
277     if (phase > ws->phase) {
278         /* Going from allocating objects to allocating buffers */
279         if (ws->phase < ZSTD_cwksp_alloc_buffers &&
280                 phase >= ZSTD_cwksp_alloc_buffers) {
281             ws->tableValidEnd = ws->objectEnd;
282         }
283 
284         /* Going from allocating buffers to allocating aligneds/tables */
285         if (ws->phase < ZSTD_cwksp_alloc_aligned &&
286                 phase >= ZSTD_cwksp_alloc_aligned) {
287             {   /* Align the start of the "aligned" to 64 bytes. Use [1, 64] bytes. */
288                 size_t const bytesToAlign =
289                     ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BYTES);
290                 DEBUGLOG(5, "reserving aligned alignment addtl space: %zu", bytesToAlign);
291                 ZSTD_STATIC_ASSERT((ZSTD_CWKSP_ALIGNMENT_BYTES & (ZSTD_CWKSP_ALIGNMENT_BYTES - 1)) == 0); /* power of 2 */
292                 RETURN_ERROR_IF(!ZSTD_cwksp_reserve_internal_buffer_space(ws, bytesToAlign),
293                                 memory_allocation, "aligned phase - alignment initial allocation failed!");
294             }
295             {   /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
296                 void* const alloc = ws->objectEnd;
297                 size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
298                 void* const end = (BYTE*)alloc + bytesToAlign;
299                 DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
300                 RETURN_ERROR_IF(end > ws->workspaceEnd, memory_allocation,
301                                 "table phase - alignment initial allocation failed!");
302                 ws->objectEnd = end;
303                 ws->tableEnd = end;
304                 ws->tableValidEnd = end;
305             }
306         }
307         ws->phase = phase;
308         ZSTD_cwksp_assert_internal_consistency(ws);
309     }
310     return 0;
311 }
312 
313 /**
314  * Returns whether this object/buffer/etc was allocated in this workspace.
315  */
ZSTD_cwksp_owns_buffer(const ZSTD_cwksp * ws,const void * ptr)316 MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
317     return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
318 }
319 
320 /**
321  * Internal function. Do not use directly.
322  */
ZSTD_cwksp_reserve_internal(ZSTD_cwksp * ws,size_t bytes,ZSTD_cwksp_alloc_phase_e phase)323 MEM_STATIC void* ZSTD_cwksp_reserve_internal(
324         ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
325     void* alloc;
326     if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
327         return NULL;
328     }
329 
330 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
331     /* over-reserve space */
332     bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
333 #endif
334 
335     alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
336 
337 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
338     /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
339      * either size. */
340     if (alloc) {
341         alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
342         if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
343             __asan_unpoison_memory_region(alloc, bytes);
344         }
345     }
346 #endif
347 
348     return alloc;
349 }
350 
351 /**
352  * Reserves and returns unaligned memory.
353  */
ZSTD_cwksp_reserve_buffer(ZSTD_cwksp * ws,size_t bytes)354 MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
355     return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
356 }
357 
358 /**
359  * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
360  */
ZSTD_cwksp_reserve_aligned(ZSTD_cwksp * ws,size_t bytes)361 MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
362     void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
363                                             ZSTD_cwksp_alloc_aligned);
364     assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
365     return ptr;
366 }
367 
368 /**
369  * Aligned on 64 bytes. These buffers have the special property that
370  * their values remain constrained, allowing us to re-use them without
371  * memset()-ing them.
372  */
ZSTD_cwksp_reserve_table(ZSTD_cwksp * ws,size_t bytes)373 MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
374     const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
375     void* alloc;
376     void* end;
377     void* top;
378 
379     if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
380         return NULL;
381     }
382     alloc = ws->tableEnd;
383     end = (BYTE *)alloc + bytes;
384     top = ws->allocStart;
385 
386     DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
387         alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
388     assert((bytes & (sizeof(U32)-1)) == 0);
389     ZSTD_cwksp_assert_internal_consistency(ws);
390     assert(end <= top);
391     if (end > top) {
392         DEBUGLOG(4, "cwksp: table alloc failed!");
393         ws->allocFailed = 1;
394         return NULL;
395     }
396     ws->tableEnd = end;
397 
398 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
399     if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
400         __asan_unpoison_memory_region(alloc, bytes);
401     }
402 #endif
403 
404     assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
405     assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
406     return alloc;
407 }
408 
409 /**
410  * Aligned on sizeof(void*).
411  */
ZSTD_cwksp_reserve_object(ZSTD_cwksp * ws,size_t bytes)412 MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
413     size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
414     void* alloc = ws->objectEnd;
415     void* end = (BYTE*)alloc + roundedBytes;
416 
417 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
418     /* over-reserve space */
419     end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
420 #endif
421 
422     DEBUGLOG(5,
423         "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
424         alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
425     assert(((size_t)alloc & (sizeof(void*)-1)) == 0);
426     assert((bytes & (sizeof(void*)-1)) == 0);
427     ZSTD_cwksp_assert_internal_consistency(ws);
428     /* we must be in the first phase, no advance is possible */
429     if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
430         DEBUGLOG(4, "cwksp: object alloc failed!");
431         ws->allocFailed = 1;
432         return NULL;
433     }
434     ws->objectEnd = end;
435     ws->tableEnd = end;
436     ws->tableValidEnd = end;
437 
438 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
439     /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
440      * either size. */
441     alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
442     if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
443         __asan_unpoison_memory_region(alloc, bytes);
444     }
445 #endif
446 
447     return alloc;
448 }
449 
ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp * ws)450 MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
451     DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
452 
453 #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
454     /* To validate that the table re-use logic is sound, and that we don't
455      * access table space that we haven't cleaned, we re-"poison" the table
456      * space every time we mark it dirty. */
457     {
458         size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
459         assert(__msan_test_shadow(ws->objectEnd, size) == -1);
460         __msan_poison(ws->objectEnd, size);
461     }
462 #endif
463 
464     assert(ws->tableValidEnd >= ws->objectEnd);
465     assert(ws->tableValidEnd <= ws->allocStart);
466     ws->tableValidEnd = ws->objectEnd;
467     ZSTD_cwksp_assert_internal_consistency(ws);
468 }
469 
ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp * ws)470 MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
471     DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
472     assert(ws->tableValidEnd >= ws->objectEnd);
473     assert(ws->tableValidEnd <= ws->allocStart);
474     if (ws->tableValidEnd < ws->tableEnd) {
475         ws->tableValidEnd = ws->tableEnd;
476     }
477     ZSTD_cwksp_assert_internal_consistency(ws);
478 }
479 
480 /**
481  * Zero the part of the allocated tables not already marked clean.
482  */
ZSTD_cwksp_clean_tables(ZSTD_cwksp * ws)483 MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
484     DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
485     assert(ws->tableValidEnd >= ws->objectEnd);
486     assert(ws->tableValidEnd <= ws->allocStart);
487     if (ws->tableValidEnd < ws->tableEnd) {
488         ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
489     }
490     ZSTD_cwksp_mark_tables_clean(ws);
491 }
492 
493 /**
494  * Invalidates table allocations.
495  * All other allocations remain valid.
496  */
ZSTD_cwksp_clear_tables(ZSTD_cwksp * ws)497 MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
498     DEBUGLOG(4, "cwksp: clearing tables!");
499 
500 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
501     /* We don't do this when the workspace is statically allocated, because
502      * when that is the case, we have no capability to hook into the end of the
503      * workspace's lifecycle to unpoison the memory.
504      */
505     if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
506         size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
507         __asan_poison_memory_region(ws->objectEnd, size);
508     }
509 #endif
510 
511     ws->tableEnd = ws->objectEnd;
512     ZSTD_cwksp_assert_internal_consistency(ws);
513 }
514 
515 /**
516  * Invalidates all buffer, aligned, and table allocations.
517  * Object allocations remain valid.
518  */
ZSTD_cwksp_clear(ZSTD_cwksp * ws)519 MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
520     DEBUGLOG(4, "cwksp: clearing!");
521 
522 #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
523     /* To validate that the context re-use logic is sound, and that we don't
524      * access stuff that this compression hasn't initialized, we re-"poison"
525      * the workspace (or at least the non-static, non-table parts of it)
526      * every time we start a new compression. */
527     {
528         size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd;
529         __msan_poison(ws->tableValidEnd, size);
530     }
531 #endif
532 
533 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
534     /* We don't do this when the workspace is statically allocated, because
535      * when that is the case, we have no capability to hook into the end of the
536      * workspace's lifecycle to unpoison the memory.
537      */
538     if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
539         size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
540         __asan_poison_memory_region(ws->objectEnd, size);
541     }
542 #endif
543 
544     ws->tableEnd = ws->objectEnd;
545     ws->allocStart = ws->workspaceEnd;
546     ws->allocFailed = 0;
547     if (ws->phase > ZSTD_cwksp_alloc_buffers) {
548         ws->phase = ZSTD_cwksp_alloc_buffers;
549     }
550     ZSTD_cwksp_assert_internal_consistency(ws);
551 }
552 
553 /**
554  * The provided workspace takes ownership of the buffer [start, start+size).
555  * Any existing values in the workspace are ignored (the previously managed
556  * buffer, if present, must be separately freed).
557  */
ZSTD_cwksp_init(ZSTD_cwksp * ws,void * start,size_t size,ZSTD_cwksp_static_alloc_e isStatic)558 MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
559     DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
560     assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
561     ws->workspace = start;
562     ws->workspaceEnd = (BYTE*)start + size;
563     ws->objectEnd = ws->workspace;
564     ws->tableValidEnd = ws->objectEnd;
565     ws->phase = ZSTD_cwksp_alloc_objects;
566     ws->isStatic = isStatic;
567     ZSTD_cwksp_clear(ws);
568     ws->workspaceOversizedDuration = 0;
569     ZSTD_cwksp_assert_internal_consistency(ws);
570 }
571 
ZSTD_cwksp_create(ZSTD_cwksp * ws,size_t size,ZSTD_customMem customMem)572 MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
573     void* workspace = ZSTD_customMalloc(size, customMem);
574     DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
575     RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
576     ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
577     return 0;
578 }
579 
ZSTD_cwksp_free(ZSTD_cwksp * ws,ZSTD_customMem customMem)580 MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
581     void *ptr = ws->workspace;
582     DEBUGLOG(4, "cwksp: freeing workspace");
583     ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
584     ZSTD_customFree(ptr, customMem);
585 }
586 
587 /**
588  * Moves the management of a workspace from one cwksp to another. The src cwksp
589  * is left in an invalid state (src must be re-init()'ed before it's used again).
590  */
ZSTD_cwksp_move(ZSTD_cwksp * dst,ZSTD_cwksp * src)591 MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
592     *dst = *src;
593     ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
594 }
595 
ZSTD_cwksp_sizeof(const ZSTD_cwksp * ws)596 MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
597     return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
598 }
599 
ZSTD_cwksp_used(const ZSTD_cwksp * ws)600 MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
601     return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
602          + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
603 }
604 
ZSTD_cwksp_reserve_failed(const ZSTD_cwksp * ws)605 MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
606     return ws->allocFailed;
607 }
608 
609 /*-*************************************
610 *  Functions Checking Free Space
611 ***************************************/
612 
613 /* ZSTD_alignmentSpaceWithinBounds() :
614  * Returns if the estimated space needed for a wksp is within an acceptable limit of the
615  * actual amount of space used.
616  */
ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp * const ws,size_t const estimatedSpace,int resizedWorkspace)617 MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp* const ws,
618                                                         size_t const estimatedSpace, int resizedWorkspace) {
619     if (resizedWorkspace) {
620         /* Resized/newly allocated wksp should have exact bounds */
621         return ZSTD_cwksp_used(ws) == estimatedSpace;
622     } else {
623         /* Due to alignment, when reusing a workspace, we can actually consume 63 fewer or more bytes
624          * than estimatedSpace. See the comments in zstd_cwksp.h for details.
625          */
626         return (ZSTD_cwksp_used(ws) >= estimatedSpace - 63) && (ZSTD_cwksp_used(ws) <= estimatedSpace + 63);
627     }
628 }
629 
630 
ZSTD_cwksp_available_space(ZSTD_cwksp * ws)631 MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
632     return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
633 }
634 
ZSTD_cwksp_check_available(ZSTD_cwksp * ws,size_t additionalNeededSpace)635 MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
636     return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
637 }
638 
ZSTD_cwksp_check_too_large(ZSTD_cwksp * ws,size_t additionalNeededSpace)639 MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
640     return ZSTD_cwksp_check_available(
641         ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
642 }
643 
ZSTD_cwksp_check_wasteful(ZSTD_cwksp * ws,size_t additionalNeededSpace)644 MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
645     return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
646         && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
647 }
648 
ZSTD_cwksp_bump_oversized_duration(ZSTD_cwksp * ws,size_t additionalNeededSpace)649 MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
650         ZSTD_cwksp* ws, size_t additionalNeededSpace) {
651     if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
652         ws->workspaceOversizedDuration++;
653     } else {
654         ws->workspaceOversizedDuration = 0;
655     }
656 }
657 
658 #if defined (__cplusplus)
659 }
660 #endif
661 
662 #endif /* ZSTD_CWKSP_H */
663