1 /*
2  * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
3  * All rights reserved.
4  *
5  * This source code is licensed under both the BSD-style license (found in the
6  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7  * in the COPYING file in the root directory of this source tree).
8  * You may select, at your option, one of the above-listed licenses.
9  */
10 
11 #ifndef ZSTD_CWKSP_H
12 #define ZSTD_CWKSP_H
13 
14 /*-*************************************
15 *  Dependencies
16 ***************************************/
17 #include "zstd_internal.h"
18 
19 #if defined (__cplusplus)
20 extern "C" {
21 #endif
22 
23 /*-*************************************
24 *  Constants
25 ***************************************/
26 
27 /* define "workspace is too large" as this number of times larger than needed */
28 #define ZSTD_WORKSPACETOOLARGE_FACTOR 3
29 
30 /* when workspace is continuously too large
31  * during at least this number of times,
32  * context's memory usage is considered wasteful,
33  * because it's sized to handle a worst case scenario which rarely happens.
34  * In which case, resize it down to free some memory */
35 #define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
36 
37 /* Since the workspace is effectively its own little malloc implementation /
38  * arena, when we run under ASAN, we should similarly insert redzones between
39  * each internal element of the workspace, so ASAN will catch overruns that
40  * reach outside an object but that stay inside the workspace.
41  *
42  * This defines the size of that redzone.
43  */
44 #ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
45 #define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
46 #endif
47 
48 /*-*************************************
49 *  Structures
50 ***************************************/
51 typedef enum {
52     ZSTD_cwksp_alloc_objects,
53     ZSTD_cwksp_alloc_buffers,
54     ZSTD_cwksp_alloc_aligned
55 } ZSTD_cwksp_alloc_phase_e;
56 
57 /**
58  * Zstd fits all its internal datastructures into a single continuous buffer,
59  * so that it only needs to perform a single OS allocation (or so that a buffer
60  * can be provided to it and it can perform no allocations at all). This buffer
61  * is called the workspace.
62  *
63  * Several optimizations complicate that process of allocating memory ranges
64  * from this workspace for each internal datastructure:
65  *
66  * - These different internal datastructures have different setup requirements:
67  *
68  *   - The static objects need to be cleared once and can then be trivially
69  *     reused for each compression.
70  *
71  *   - Various buffers don't need to be initialized at all--they are always
72  *     written into before they're read.
73  *
74  *   - The matchstate tables have a unique requirement that they don't need
75  *     their memory to be totally cleared, but they do need the memory to have
76  *     some bound, i.e., a guarantee that all values in the memory they've been
77  *     allocated is less than some maximum value (which is the starting value
78  *     for the indices that they will then use for compression). When this
79  *     guarantee is provided to them, they can use the memory without any setup
80  *     work. When it can't, they have to clear the area.
81  *
82  * - These buffers also have different alignment requirements.
83  *
84  * - We would like to reuse the objects in the workspace for multiple
85  *   compressions without having to perform any expensive reallocation or
86  *   reinitialization work.
87  *
88  * - We would like to be able to efficiently reuse the workspace across
89  *   multiple compressions **even when the compression parameters change** and
90  *   we need to resize some of the objects (where possible).
91  *
92  * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
93  * abstraction was created. It works as follows:
94  *
95  * Workspace Layout:
96  *
97  * [                        ... workspace ...                         ]
98  * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
99  *
100  * The various objects that live in the workspace are divided into the
101  * following categories, and are allocated separately:
102  *
103  * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
104  *   so that literally everything fits in a single buffer. Note: if present,
105  *   this must be the first object in the workspace, since ZSTD_free{CCtx,
106  *   CDict}() rely on a pointer comparison to see whether one or two frees are
107  *   required.
108  *
109  * - Fixed size objects: these are fixed-size, fixed-count objects that are
110  *   nonetheless "dynamically" allocated in the workspace so that we can
111  *   control how they're initialized separately from the broader ZSTD_CCtx.
112  *   Examples:
113  *   - Entropy Workspace
114  *   - 2 x ZSTD_compressedBlockState_t
115  *   - CDict dictionary contents
116  *
117  * - Tables: these are any of several different datastructures (hash tables,
118  *   chain tables, binary trees) that all respect a common format: they are
119  *   uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
120  *   Their sizes depend on the cparams.
121  *
122  * - Aligned: these buffers are used for various purposes that require 4 byte
123  *   alignment, but don't require any initialization before they're used.
124  *
125  * - Buffers: these buffers are used for various purposes that don't require
126  *   any alignment or initialization before they're used. This means they can
127  *   be moved around at no cost for a new compression.
128  *
129  * Allocating Memory:
130  *
131  * The various types of objects must be allocated in order, so they can be
132  * correctly packed into the workspace buffer. That order is:
133  *
134  * 1. Objects
135  * 2. Buffers
136  * 3. Aligned
137  * 4. Tables
138  *
139  * Attempts to reserve objects of different types out of order will fail.
140  */
141 typedef struct {
142     void* workspace;
143     void* workspaceEnd;
144 
145     void* objectEnd;
146     void* tableEnd;
147     void* tableValidEnd;
148     void* allocStart;
149 
150     int allocFailed;
151     int workspaceOversizedDuration;
152     ZSTD_cwksp_alloc_phase_e phase;
153 } ZSTD_cwksp;
154 
155 /*-*************************************
156 *  Functions
157 ***************************************/
158 
159 MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
160 
161 MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
162     (void)ws;
163     assert(ws->workspace <= ws->objectEnd);
164     assert(ws->objectEnd <= ws->tableEnd);
165     assert(ws->objectEnd <= ws->tableValidEnd);
166     assert(ws->tableEnd <= ws->allocStart);
167     assert(ws->tableValidEnd <= ws->allocStart);
168     assert(ws->allocStart <= ws->workspaceEnd);
169 }
170 
171 /**
172  * Align must be a power of 2.
173  */
174 MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
175     size_t const mask = align - 1;
176     assert((align & mask) == 0);
177     return (size + mask) & ~mask;
178 }
179 
180 /**
181  * Use this to determine how much space in the workspace we will consume to
182  * allocate this object. (Normally it should be exactly the size of the object,
183  * but under special conditions, like ASAN, where we pad each object, it might
184  * be larger.)
185  *
186  * Since tables aren't currently redzoned, you don't need to call through this
187  * to figure out how much space you need for the matchState tables. Everything
188  * else is though.
189  */
190 MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
191 #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
192     return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
193 #else
194     return size;
195 #endif
196 }
197 
198 MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
199         ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
200     assert(phase >= ws->phase);
201     if (phase > ws->phase) {
202         if (ws->phase < ZSTD_cwksp_alloc_buffers &&
203                 phase >= ZSTD_cwksp_alloc_buffers) {
204             ws->tableValidEnd = ws->objectEnd;
205         }
206         if (ws->phase < ZSTD_cwksp_alloc_aligned &&
207                 phase >= ZSTD_cwksp_alloc_aligned) {
208             /* If unaligned allocations down from a too-large top have left us
209              * unaligned, we need to realign our alloc ptr. Technically, this
210              * can consume space that is unaccounted for in the neededSpace
211              * calculation. However, I believe this can only happen when the
212              * workspace is too large, and specifically when it is too large
213              * by a larger margin than the space that will be consumed. */
214             /* TODO: cleaner, compiler warning friendly way to do this??? */
215             ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
216             if (ws->allocStart < ws->tableValidEnd) {
217                 ws->tableValidEnd = ws->allocStart;
218             }
219         }
220         ws->phase = phase;
221     }
222 }
223 
224 /**
225  * Returns whether this object/buffer/etc was allocated in this workspace.
226  */
227 MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
228     return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
229 }
230 
231 /**
232  * Internal function. Do not use directly.
233  */
234 MEM_STATIC void* ZSTD_cwksp_reserve_internal(
235         ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
236     void* alloc;
237     void* bottom = ws->tableEnd;
238     ZSTD_cwksp_internal_advance_phase(ws, phase);
239     alloc = (BYTE *)ws->allocStart - bytes;
240 
241 #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
242     /* over-reserve space */
243     alloc = (BYTE *)alloc - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
244 #endif
245 
246     DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
247         alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
248     ZSTD_cwksp_assert_internal_consistency(ws);
249     assert(alloc >= bottom);
250     if (alloc < bottom) {
251         DEBUGLOG(4, "cwksp: alloc failed!");
252         ws->allocFailed = 1;
253         return NULL;
254     }
255     if (alloc < ws->tableValidEnd) {
256         ws->tableValidEnd = alloc;
257     }
258     ws->allocStart = alloc;
259 
260 #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
261     /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
262      * either size. */
263     alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
264     __asan_unpoison_memory_region(alloc, bytes);
265 #endif
266 
267     return alloc;
268 }
269 
270 /**
271  * Reserves and returns unaligned memory.
272  */
273 MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
274     return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
275 }
276 
277 /**
278  * Reserves and returns memory sized on and aligned on sizeof(unsigned).
279  */
280 MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
281     assert((bytes & (sizeof(U32)-1)) == 0);
282     return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
283 }
284 
285 /**
286  * Aligned on sizeof(unsigned). These buffers have the special property that
287  * their values remain constrained, allowing us to re-use them without
288  * memset()-ing them.
289  */
290 MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
291     const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
292     void* alloc = ws->tableEnd;
293     void* end = (BYTE *)alloc + bytes;
294     void* top = ws->allocStart;
295 
296     DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
297         alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
298     assert((bytes & (sizeof(U32)-1)) == 0);
299     ZSTD_cwksp_internal_advance_phase(ws, phase);
300     ZSTD_cwksp_assert_internal_consistency(ws);
301     assert(end <= top);
302     if (end > top) {
303         DEBUGLOG(4, "cwksp: table alloc failed!");
304         ws->allocFailed = 1;
305         return NULL;
306     }
307     ws->tableEnd = end;
308 
309 #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
310     __asan_unpoison_memory_region(alloc, bytes);
311 #endif
312 
313     return alloc;
314 }
315 
316 /**
317  * Aligned on sizeof(void*).
318  */
319 MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
320     size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
321     void* alloc = ws->objectEnd;
322     void* end = (BYTE*)alloc + roundedBytes;
323 
324 #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
325     /* over-reserve space */
326     end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
327 #endif
328 
329     DEBUGLOG(5,
330         "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
331         alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
332     assert(((size_t)alloc & (sizeof(void*)-1)) == 0);
333     assert((bytes & (sizeof(void*)-1)) == 0);
334     ZSTD_cwksp_assert_internal_consistency(ws);
335     /* we must be in the first phase, no advance is possible */
336     if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
337         DEBUGLOG(4, "cwksp: object alloc failed!");
338         ws->allocFailed = 1;
339         return NULL;
340     }
341     ws->objectEnd = end;
342     ws->tableEnd = end;
343     ws->tableValidEnd = end;
344 
345 #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
346     /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
347      * either size. */
348     alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
349     __asan_unpoison_memory_region(alloc, bytes);
350 #endif
351 
352     return alloc;
353 }
354 
355 MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
356     DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
357 
358 #if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
359     /* To validate that the table re-use logic is sound, and that we don't
360      * access table space that we haven't cleaned, we re-"poison" the table
361      * space every time we mark it dirty. */
362     {
363         size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
364         assert(__msan_test_shadow(ws->objectEnd, size) == -1);
365         __msan_poison(ws->objectEnd, size);
366     }
367 #endif
368 
369     assert(ws->tableValidEnd >= ws->objectEnd);
370     assert(ws->tableValidEnd <= ws->allocStart);
371     ws->tableValidEnd = ws->objectEnd;
372     ZSTD_cwksp_assert_internal_consistency(ws);
373 }
374 
375 MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
376     DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
377     assert(ws->tableValidEnd >= ws->objectEnd);
378     assert(ws->tableValidEnd <= ws->allocStart);
379     if (ws->tableValidEnd < ws->tableEnd) {
380         ws->tableValidEnd = ws->tableEnd;
381     }
382     ZSTD_cwksp_assert_internal_consistency(ws);
383 }
384 
385 /**
386  * Zero the part of the allocated tables not already marked clean.
387  */
388 MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
389     DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
390     assert(ws->tableValidEnd >= ws->objectEnd);
391     assert(ws->tableValidEnd <= ws->allocStart);
392     if (ws->tableValidEnd < ws->tableEnd) {
393         memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
394     }
395     ZSTD_cwksp_mark_tables_clean(ws);
396 }
397 
398 /**
399  * Invalidates table allocations.
400  * All other allocations remain valid.
401  */
402 MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
403     DEBUGLOG(4, "cwksp: clearing tables!");
404 
405 #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
406     {
407         size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
408         __asan_poison_memory_region(ws->objectEnd, size);
409     }
410 #endif
411 
412     ws->tableEnd = ws->objectEnd;
413     ZSTD_cwksp_assert_internal_consistency(ws);
414 }
415 
416 /**
417  * Invalidates all buffer, aligned, and table allocations.
418  * Object allocations remain valid.
419  */
420 MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
421     DEBUGLOG(4, "cwksp: clearing!");
422 
423 #if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
424     /* To validate that the context re-use logic is sound, and that we don't
425      * access stuff that this compression hasn't initialized, we re-"poison"
426      * the workspace (or at least the non-static, non-table parts of it)
427      * every time we start a new compression. */
428     {
429         size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd;
430         __msan_poison(ws->tableValidEnd, size);
431     }
432 #endif
433 
434 #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
435     {
436         size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
437         __asan_poison_memory_region(ws->objectEnd, size);
438     }
439 #endif
440 
441     ws->tableEnd = ws->objectEnd;
442     ws->allocStart = ws->workspaceEnd;
443     ws->allocFailed = 0;
444     if (ws->phase > ZSTD_cwksp_alloc_buffers) {
445         ws->phase = ZSTD_cwksp_alloc_buffers;
446     }
447     ZSTD_cwksp_assert_internal_consistency(ws);
448 }
449 
450 /**
451  * The provided workspace takes ownership of the buffer [start, start+size).
452  * Any existing values in the workspace are ignored (the previously managed
453  * buffer, if present, must be separately freed).
454  */
455 MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
456     DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
457     assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
458     ws->workspace = start;
459     ws->workspaceEnd = (BYTE*)start + size;
460     ws->objectEnd = ws->workspace;
461     ws->tableValidEnd = ws->objectEnd;
462     ws->phase = ZSTD_cwksp_alloc_objects;
463     ZSTD_cwksp_clear(ws);
464     ws->workspaceOversizedDuration = 0;
465     ZSTD_cwksp_assert_internal_consistency(ws);
466 }
467 
468 MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
469     void* workspace = ZSTD_malloc(size, customMem);
470     DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
471     RETURN_ERROR_IF(workspace == NULL, memory_allocation);
472     ZSTD_cwksp_init(ws, workspace, size);
473     return 0;
474 }
475 
476 MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
477     void *ptr = ws->workspace;
478     DEBUGLOG(4, "cwksp: freeing workspace");
479     memset(ws, 0, sizeof(ZSTD_cwksp));
480     ZSTD_free(ptr, customMem);
481 }
482 
483 /**
484  * Moves the management of a workspace from one cwksp to another. The src cwksp
485  * is left in an invalid state (src must be re-init()'ed before its used again).
486  */
487 MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
488     *dst = *src;
489     memset(src, 0, sizeof(ZSTD_cwksp));
490 }
491 
492 MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
493     return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
494 }
495 
496 MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
497     return ws->allocFailed;
498 }
499 
500 /*-*************************************
501 *  Functions Checking Free Space
502 ***************************************/
503 
504 MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
505     return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
506 }
507 
508 MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
509     return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
510 }
511 
512 MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
513     return ZSTD_cwksp_check_available(
514         ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
515 }
516 
517 MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
518     return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
519         && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
520 }
521 
522 MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
523         ZSTD_cwksp* ws, size_t additionalNeededSpace) {
524     if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
525         ws->workspaceOversizedDuration++;
526     } else {
527         ws->workspaceOversizedDuration = 0;
528     }
529 }
530 
531 #if defined (__cplusplus)
532 }
533 #endif
534 
535 #endif /* ZSTD_CWKSP_H */
536