1*c03c5b1cSMartin Matuska /*
2*c03c5b1cSMartin Matuska  * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
3*c03c5b1cSMartin Matuska  * All rights reserved.
4*c03c5b1cSMartin Matuska  *
5*c03c5b1cSMartin Matuska  * This source code is licensed under both the BSD-style license (found in the
6*c03c5b1cSMartin Matuska  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7*c03c5b1cSMartin Matuska  * in the COPYING file in the root directory of this source tree).
8*c03c5b1cSMartin Matuska  * You may select, at your option, one of the above-listed licenses.
9*c03c5b1cSMartin Matuska  */
10*c03c5b1cSMartin Matuska 
11*c03c5b1cSMartin Matuska #ifndef ZSTD_CWKSP_H
12*c03c5b1cSMartin Matuska #define ZSTD_CWKSP_H
13*c03c5b1cSMartin Matuska 
14*c03c5b1cSMartin Matuska /*-*************************************
15*c03c5b1cSMartin Matuska *  Dependencies
16*c03c5b1cSMartin Matuska ***************************************/
17*c03c5b1cSMartin Matuska #include "../common/zstd_internal.h"
18*c03c5b1cSMartin Matuska 
19*c03c5b1cSMartin Matuska #if defined (__cplusplus)
20*c03c5b1cSMartin Matuska extern "C" {
21*c03c5b1cSMartin Matuska #endif
22*c03c5b1cSMartin Matuska 
23*c03c5b1cSMartin Matuska /*-*************************************
24*c03c5b1cSMartin Matuska *  Constants
25*c03c5b1cSMartin Matuska ***************************************/
26*c03c5b1cSMartin Matuska 
27*c03c5b1cSMartin Matuska /* Since the workspace is effectively its own little malloc implementation /
28*c03c5b1cSMartin Matuska  * arena, when we run under ASAN, we should similarly insert redzones between
29*c03c5b1cSMartin Matuska  * each internal element of the workspace, so ASAN will catch overruns that
30*c03c5b1cSMartin Matuska  * reach outside an object but that stay inside the workspace.
31*c03c5b1cSMartin Matuska  *
32*c03c5b1cSMartin Matuska  * This defines the size of that redzone.
33*c03c5b1cSMartin Matuska  */
34*c03c5b1cSMartin Matuska #ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
35*c03c5b1cSMartin Matuska #define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
36*c03c5b1cSMartin Matuska #endif
37*c03c5b1cSMartin Matuska 
38*c03c5b1cSMartin Matuska /*-*************************************
39*c03c5b1cSMartin Matuska *  Structures
40*c03c5b1cSMartin Matuska ***************************************/
41*c03c5b1cSMartin Matuska typedef enum {
42*c03c5b1cSMartin Matuska     ZSTD_cwksp_alloc_objects,
43*c03c5b1cSMartin Matuska     ZSTD_cwksp_alloc_buffers,
44*c03c5b1cSMartin Matuska     ZSTD_cwksp_alloc_aligned
45*c03c5b1cSMartin Matuska } ZSTD_cwksp_alloc_phase_e;
46*c03c5b1cSMartin Matuska 
47*c03c5b1cSMartin Matuska /**
48*c03c5b1cSMartin Matuska  * Zstd fits all its internal datastructures into a single continuous buffer,
49*c03c5b1cSMartin Matuska  * so that it only needs to perform a single OS allocation (or so that a buffer
50*c03c5b1cSMartin Matuska  * can be provided to it and it can perform no allocations at all). This buffer
51*c03c5b1cSMartin Matuska  * is called the workspace.
52*c03c5b1cSMartin Matuska  *
53*c03c5b1cSMartin Matuska  * Several optimizations complicate that process of allocating memory ranges
54*c03c5b1cSMartin Matuska  * from this workspace for each internal datastructure:
55*c03c5b1cSMartin Matuska  *
56*c03c5b1cSMartin Matuska  * - These different internal datastructures have different setup requirements:
57*c03c5b1cSMartin Matuska  *
58*c03c5b1cSMartin Matuska  *   - The static objects need to be cleared once and can then be trivially
59*c03c5b1cSMartin Matuska  *     reused for each compression.
60*c03c5b1cSMartin Matuska  *
61*c03c5b1cSMartin Matuska  *   - Various buffers don't need to be initialized at all--they are always
62*c03c5b1cSMartin Matuska  *     written into before they're read.
63*c03c5b1cSMartin Matuska  *
64*c03c5b1cSMartin Matuska  *   - The matchstate tables have a unique requirement that they don't need
65*c03c5b1cSMartin Matuska  *     their memory to be totally cleared, but they do need the memory to have
66*c03c5b1cSMartin Matuska  *     some bound, i.e., a guarantee that all values in the memory they've been
67*c03c5b1cSMartin Matuska  *     allocated is less than some maximum value (which is the starting value
68*c03c5b1cSMartin Matuska  *     for the indices that they will then use for compression). When this
69*c03c5b1cSMartin Matuska  *     guarantee is provided to them, they can use the memory without any setup
70*c03c5b1cSMartin Matuska  *     work. When it can't, they have to clear the area.
71*c03c5b1cSMartin Matuska  *
72*c03c5b1cSMartin Matuska  * - These buffers also have different alignment requirements.
73*c03c5b1cSMartin Matuska  *
74*c03c5b1cSMartin Matuska  * - We would like to reuse the objects in the workspace for multiple
75*c03c5b1cSMartin Matuska  *   compressions without having to perform any expensive reallocation or
76*c03c5b1cSMartin Matuska  *   reinitialization work.
77*c03c5b1cSMartin Matuska  *
78*c03c5b1cSMartin Matuska  * - We would like to be able to efficiently reuse the workspace across
79*c03c5b1cSMartin Matuska  *   multiple compressions **even when the compression parameters change** and
80*c03c5b1cSMartin Matuska  *   we need to resize some of the objects (where possible).
81*c03c5b1cSMartin Matuska  *
82*c03c5b1cSMartin Matuska  * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
83*c03c5b1cSMartin Matuska  * abstraction was created. It works as follows:
84*c03c5b1cSMartin Matuska  *
85*c03c5b1cSMartin Matuska  * Workspace Layout:
86*c03c5b1cSMartin Matuska  *
87*c03c5b1cSMartin Matuska  * [                        ... workspace ...                         ]
88*c03c5b1cSMartin Matuska  * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
89*c03c5b1cSMartin Matuska  *
90*c03c5b1cSMartin Matuska  * The various objects that live in the workspace are divided into the
91*c03c5b1cSMartin Matuska  * following categories, and are allocated separately:
92*c03c5b1cSMartin Matuska  *
93*c03c5b1cSMartin Matuska  * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
94*c03c5b1cSMartin Matuska  *   so that literally everything fits in a single buffer. Note: if present,
95*c03c5b1cSMartin Matuska  *   this must be the first object in the workspace, since ZSTD_free{CCtx,
96*c03c5b1cSMartin Matuska  *   CDict}() rely on a pointer comparison to see whether one or two frees are
97*c03c5b1cSMartin Matuska  *   required.
98*c03c5b1cSMartin Matuska  *
99*c03c5b1cSMartin Matuska  * - Fixed size objects: these are fixed-size, fixed-count objects that are
100*c03c5b1cSMartin Matuska  *   nonetheless "dynamically" allocated in the workspace so that we can
101*c03c5b1cSMartin Matuska  *   control how they're initialized separately from the broader ZSTD_CCtx.
102*c03c5b1cSMartin Matuska  *   Examples:
103*c03c5b1cSMartin Matuska  *   - Entropy Workspace
104*c03c5b1cSMartin Matuska  *   - 2 x ZSTD_compressedBlockState_t
105*c03c5b1cSMartin Matuska  *   - CDict dictionary contents
106*c03c5b1cSMartin Matuska  *
107*c03c5b1cSMartin Matuska  * - Tables: these are any of several different datastructures (hash tables,
108*c03c5b1cSMartin Matuska  *   chain tables, binary trees) that all respect a common format: they are
109*c03c5b1cSMartin Matuska  *   uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
110*c03c5b1cSMartin Matuska  *   Their sizes depend on the cparams.
111*c03c5b1cSMartin Matuska  *
112*c03c5b1cSMartin Matuska  * - Aligned: these buffers are used for various purposes that require 4 byte
113*c03c5b1cSMartin Matuska  *   alignment, but don't require any initialization before they're used.
114*c03c5b1cSMartin Matuska  *
115*c03c5b1cSMartin Matuska  * - Buffers: these buffers are used for various purposes that don't require
116*c03c5b1cSMartin Matuska  *   any alignment or initialization before they're used. This means they can
117*c03c5b1cSMartin Matuska  *   be moved around at no cost for a new compression.
118*c03c5b1cSMartin Matuska  *
119*c03c5b1cSMartin Matuska  * Allocating Memory:
120*c03c5b1cSMartin Matuska  *
121*c03c5b1cSMartin Matuska  * The various types of objects must be allocated in order, so they can be
122*c03c5b1cSMartin Matuska  * correctly packed into the workspace buffer. That order is:
123*c03c5b1cSMartin Matuska  *
124*c03c5b1cSMartin Matuska  * 1. Objects
125*c03c5b1cSMartin Matuska  * 2. Buffers
126*c03c5b1cSMartin Matuska  * 3. Aligned
127*c03c5b1cSMartin Matuska  * 4. Tables
128*c03c5b1cSMartin Matuska  *
129*c03c5b1cSMartin Matuska  * Attempts to reserve objects of different types out of order will fail.
130*c03c5b1cSMartin Matuska  */
131*c03c5b1cSMartin Matuska typedef struct {
132*c03c5b1cSMartin Matuska     void* workspace;
133*c03c5b1cSMartin Matuska     void* workspaceEnd;
134*c03c5b1cSMartin Matuska 
135*c03c5b1cSMartin Matuska     void* objectEnd;
136*c03c5b1cSMartin Matuska     void* tableEnd;
137*c03c5b1cSMartin Matuska     void* tableValidEnd;
138*c03c5b1cSMartin Matuska     void* allocStart;
139*c03c5b1cSMartin Matuska 
140*c03c5b1cSMartin Matuska     int allocFailed;
141*c03c5b1cSMartin Matuska     int workspaceOversizedDuration;
142*c03c5b1cSMartin Matuska     ZSTD_cwksp_alloc_phase_e phase;
143*c03c5b1cSMartin Matuska } ZSTD_cwksp;
144*c03c5b1cSMartin Matuska 
145*c03c5b1cSMartin Matuska /*-*************************************
146*c03c5b1cSMartin Matuska *  Functions
147*c03c5b1cSMartin Matuska ***************************************/
148*c03c5b1cSMartin Matuska 
149*c03c5b1cSMartin Matuska MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
150*c03c5b1cSMartin Matuska 
ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp * ws)151*c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
152*c03c5b1cSMartin Matuska     (void)ws;
153*c03c5b1cSMartin Matuska     assert(ws->workspace <= ws->objectEnd);
154*c03c5b1cSMartin Matuska     assert(ws->objectEnd <= ws->tableEnd);
155*c03c5b1cSMartin Matuska     assert(ws->objectEnd <= ws->tableValidEnd);
156*c03c5b1cSMartin Matuska     assert(ws->tableEnd <= ws->allocStart);
157*c03c5b1cSMartin Matuska     assert(ws->tableValidEnd <= ws->allocStart);
158*c03c5b1cSMartin Matuska     assert(ws->allocStart <= ws->workspaceEnd);
159*c03c5b1cSMartin Matuska }
160*c03c5b1cSMartin Matuska 
161*c03c5b1cSMartin Matuska /**
162*c03c5b1cSMartin Matuska  * Align must be a power of 2.
163*c03c5b1cSMartin Matuska  */
ZSTD_cwksp_align(size_t size,size_t const align)164*c03c5b1cSMartin Matuska MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
165*c03c5b1cSMartin Matuska     size_t const mask = align - 1;
166*c03c5b1cSMartin Matuska     assert((align & mask) == 0);
167*c03c5b1cSMartin Matuska     return (size + mask) & ~mask;
168*c03c5b1cSMartin Matuska }
169*c03c5b1cSMartin Matuska 
170*c03c5b1cSMartin Matuska /**
171*c03c5b1cSMartin Matuska  * Use this to determine how much space in the workspace we will consume to
172*c03c5b1cSMartin Matuska  * allocate this object. (Normally it should be exactly the size of the object,
173*c03c5b1cSMartin Matuska  * but under special conditions, like ASAN, where we pad each object, it might
174*c03c5b1cSMartin Matuska  * be larger.)
175*c03c5b1cSMartin Matuska  *
176*c03c5b1cSMartin Matuska  * Since tables aren't currently redzoned, you don't need to call through this
177*c03c5b1cSMartin Matuska  * to figure out how much space you need for the matchState tables. Everything
178*c03c5b1cSMartin Matuska  * else is though.
179*c03c5b1cSMartin Matuska  */
ZSTD_cwksp_alloc_size(size_t size)180*c03c5b1cSMartin Matuska MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
181*c03c5b1cSMartin Matuska #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
182*c03c5b1cSMartin Matuska     return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
183*c03c5b1cSMartin Matuska #else
184*c03c5b1cSMartin Matuska     return size;
185*c03c5b1cSMartin Matuska #endif
186*c03c5b1cSMartin Matuska }
187*c03c5b1cSMartin Matuska 
ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp * ws,ZSTD_cwksp_alloc_phase_e phase)188*c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
189*c03c5b1cSMartin Matuska         ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
190*c03c5b1cSMartin Matuska     assert(phase >= ws->phase);
191*c03c5b1cSMartin Matuska     if (phase > ws->phase) {
192*c03c5b1cSMartin Matuska         if (ws->phase < ZSTD_cwksp_alloc_buffers &&
193*c03c5b1cSMartin Matuska                 phase >= ZSTD_cwksp_alloc_buffers) {
194*c03c5b1cSMartin Matuska             ws->tableValidEnd = ws->objectEnd;
195*c03c5b1cSMartin Matuska         }
196*c03c5b1cSMartin Matuska         if (ws->phase < ZSTD_cwksp_alloc_aligned &&
197*c03c5b1cSMartin Matuska                 phase >= ZSTD_cwksp_alloc_aligned) {
198*c03c5b1cSMartin Matuska             /* If unaligned allocations down from a too-large top have left us
199*c03c5b1cSMartin Matuska              * unaligned, we need to realign our alloc ptr. Technically, this
200*c03c5b1cSMartin Matuska              * can consume space that is unaccounted for in the neededSpace
201*c03c5b1cSMartin Matuska              * calculation. However, I believe this can only happen when the
202*c03c5b1cSMartin Matuska              * workspace is too large, and specifically when it is too large
203*c03c5b1cSMartin Matuska              * by a larger margin than the space that will be consumed. */
204*c03c5b1cSMartin Matuska             /* TODO: cleaner, compiler warning friendly way to do this??? */
205*c03c5b1cSMartin Matuska             ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
206*c03c5b1cSMartin Matuska             if (ws->allocStart < ws->tableValidEnd) {
207*c03c5b1cSMartin Matuska                 ws->tableValidEnd = ws->allocStart;
208*c03c5b1cSMartin Matuska             }
209*c03c5b1cSMartin Matuska         }
210*c03c5b1cSMartin Matuska         ws->phase = phase;
211*c03c5b1cSMartin Matuska     }
212*c03c5b1cSMartin Matuska }
213*c03c5b1cSMartin Matuska 
214*c03c5b1cSMartin Matuska /**
215*c03c5b1cSMartin Matuska  * Returns whether this object/buffer/etc was allocated in this workspace.
216*c03c5b1cSMartin Matuska  */
ZSTD_cwksp_owns_buffer(const ZSTD_cwksp * ws,const void * ptr)217*c03c5b1cSMartin Matuska MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
218*c03c5b1cSMartin Matuska     return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
219*c03c5b1cSMartin Matuska }
220*c03c5b1cSMartin Matuska 
221*c03c5b1cSMartin Matuska /**
222*c03c5b1cSMartin Matuska  * Internal function. Do not use directly.
223*c03c5b1cSMartin Matuska  */
ZSTD_cwksp_reserve_internal(ZSTD_cwksp * ws,size_t bytes,ZSTD_cwksp_alloc_phase_e phase)224*c03c5b1cSMartin Matuska MEM_STATIC void* ZSTD_cwksp_reserve_internal(
225*c03c5b1cSMartin Matuska         ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
226*c03c5b1cSMartin Matuska     void* alloc;
227*c03c5b1cSMartin Matuska     void* bottom = ws->tableEnd;
228*c03c5b1cSMartin Matuska     ZSTD_cwksp_internal_advance_phase(ws, phase);
229*c03c5b1cSMartin Matuska     alloc = (BYTE *)ws->allocStart - bytes;
230*c03c5b1cSMartin Matuska 
231*c03c5b1cSMartin Matuska #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
232*c03c5b1cSMartin Matuska     /* over-reserve space */
233*c03c5b1cSMartin Matuska     alloc = (BYTE *)alloc - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
234*c03c5b1cSMartin Matuska #endif
235*c03c5b1cSMartin Matuska 
236*c03c5b1cSMartin Matuska     DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
237*c03c5b1cSMartin Matuska         alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
238*c03c5b1cSMartin Matuska     ZSTD_cwksp_assert_internal_consistency(ws);
239*c03c5b1cSMartin Matuska     assert(alloc >= bottom);
240*c03c5b1cSMartin Matuska     if (alloc < bottom) {
241*c03c5b1cSMartin Matuska         DEBUGLOG(4, "cwksp: alloc failed!");
242*c03c5b1cSMartin Matuska         ws->allocFailed = 1;
243*c03c5b1cSMartin Matuska         return NULL;
244*c03c5b1cSMartin Matuska     }
245*c03c5b1cSMartin Matuska     if (alloc < ws->tableValidEnd) {
246*c03c5b1cSMartin Matuska         ws->tableValidEnd = alloc;
247*c03c5b1cSMartin Matuska     }
248*c03c5b1cSMartin Matuska     ws->allocStart = alloc;
249*c03c5b1cSMartin Matuska 
250*c03c5b1cSMartin Matuska #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
251*c03c5b1cSMartin Matuska     /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
252*c03c5b1cSMartin Matuska      * either size. */
253*c03c5b1cSMartin Matuska     alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
254*c03c5b1cSMartin Matuska     __asan_unpoison_memory_region(alloc, bytes);
255*c03c5b1cSMartin Matuska #endif
256*c03c5b1cSMartin Matuska 
257*c03c5b1cSMartin Matuska     return alloc;
258*c03c5b1cSMartin Matuska }
259*c03c5b1cSMartin Matuska 
260*c03c5b1cSMartin Matuska /**
261*c03c5b1cSMartin Matuska  * Reserves and returns unaligned memory.
262*c03c5b1cSMartin Matuska  */
ZSTD_cwksp_reserve_buffer(ZSTD_cwksp * ws,size_t bytes)263*c03c5b1cSMartin Matuska MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
264*c03c5b1cSMartin Matuska     return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
265*c03c5b1cSMartin Matuska }
266*c03c5b1cSMartin Matuska 
267*c03c5b1cSMartin Matuska /**
268*c03c5b1cSMartin Matuska  * Reserves and returns memory sized on and aligned on sizeof(unsigned).
269*c03c5b1cSMartin Matuska  */
ZSTD_cwksp_reserve_aligned(ZSTD_cwksp * ws,size_t bytes)270*c03c5b1cSMartin Matuska MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
271*c03c5b1cSMartin Matuska     assert((bytes & (sizeof(U32)-1)) == 0);
272*c03c5b1cSMartin Matuska     return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
273*c03c5b1cSMartin Matuska }
274*c03c5b1cSMartin Matuska 
275*c03c5b1cSMartin Matuska /**
276*c03c5b1cSMartin Matuska  * Aligned on sizeof(unsigned). These buffers have the special property that
277*c03c5b1cSMartin Matuska  * their values remain constrained, allowing us to re-use them without
278*c03c5b1cSMartin Matuska  * memset()-ing them.
279*c03c5b1cSMartin Matuska  */
ZSTD_cwksp_reserve_table(ZSTD_cwksp * ws,size_t bytes)280*c03c5b1cSMartin Matuska MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
281*c03c5b1cSMartin Matuska     const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
282*c03c5b1cSMartin Matuska     void* alloc = ws->tableEnd;
283*c03c5b1cSMartin Matuska     void* end = (BYTE *)alloc + bytes;
284*c03c5b1cSMartin Matuska     void* top = ws->allocStart;
285*c03c5b1cSMartin Matuska 
286*c03c5b1cSMartin Matuska     DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
287*c03c5b1cSMartin Matuska         alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
288*c03c5b1cSMartin Matuska     assert((bytes & (sizeof(U32)-1)) == 0);
289*c03c5b1cSMartin Matuska     ZSTD_cwksp_internal_advance_phase(ws, phase);
290*c03c5b1cSMartin Matuska     ZSTD_cwksp_assert_internal_consistency(ws);
291*c03c5b1cSMartin Matuska     assert(end <= top);
292*c03c5b1cSMartin Matuska     if (end > top) {
293*c03c5b1cSMartin Matuska         DEBUGLOG(4, "cwksp: table alloc failed!");
294*c03c5b1cSMartin Matuska         ws->allocFailed = 1;
295*c03c5b1cSMartin Matuska         return NULL;
296*c03c5b1cSMartin Matuska     }
297*c03c5b1cSMartin Matuska     ws->tableEnd = end;
298*c03c5b1cSMartin Matuska 
299*c03c5b1cSMartin Matuska #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
300*c03c5b1cSMartin Matuska     __asan_unpoison_memory_region(alloc, bytes);
301*c03c5b1cSMartin Matuska #endif
302*c03c5b1cSMartin Matuska 
303*c03c5b1cSMartin Matuska     return alloc;
304*c03c5b1cSMartin Matuska }
305*c03c5b1cSMartin Matuska 
306*c03c5b1cSMartin Matuska /**
307*c03c5b1cSMartin Matuska  * Aligned on sizeof(void*).
308*c03c5b1cSMartin Matuska  */
ZSTD_cwksp_reserve_object(ZSTD_cwksp * ws,size_t bytes)309*c03c5b1cSMartin Matuska MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
310*c03c5b1cSMartin Matuska     size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
311*c03c5b1cSMartin Matuska     void* alloc = ws->objectEnd;
312*c03c5b1cSMartin Matuska     void* end = (BYTE*)alloc + roundedBytes;
313*c03c5b1cSMartin Matuska 
314*c03c5b1cSMartin Matuska #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
315*c03c5b1cSMartin Matuska     /* over-reserve space */
316*c03c5b1cSMartin Matuska     end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
317*c03c5b1cSMartin Matuska #endif
318*c03c5b1cSMartin Matuska 
319*c03c5b1cSMartin Matuska     DEBUGLOG(5,
320*c03c5b1cSMartin Matuska         "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
321*c03c5b1cSMartin Matuska         alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
322*c03c5b1cSMartin Matuska     assert(((size_t)alloc & (sizeof(void*)-1)) == 0);
323*c03c5b1cSMartin Matuska     assert((bytes & (sizeof(void*)-1)) == 0);
324*c03c5b1cSMartin Matuska     ZSTD_cwksp_assert_internal_consistency(ws);
325*c03c5b1cSMartin Matuska     /* we must be in the first phase, no advance is possible */
326*c03c5b1cSMartin Matuska     if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
327*c03c5b1cSMartin Matuska         DEBUGLOG(4, "cwksp: object alloc failed!");
328*c03c5b1cSMartin Matuska         ws->allocFailed = 1;
329*c03c5b1cSMartin Matuska         return NULL;
330*c03c5b1cSMartin Matuska     }
331*c03c5b1cSMartin Matuska     ws->objectEnd = end;
332*c03c5b1cSMartin Matuska     ws->tableEnd = end;
333*c03c5b1cSMartin Matuska     ws->tableValidEnd = end;
334*c03c5b1cSMartin Matuska 
335*c03c5b1cSMartin Matuska #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
336*c03c5b1cSMartin Matuska     /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
337*c03c5b1cSMartin Matuska      * either size. */
338*c03c5b1cSMartin Matuska     alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
339*c03c5b1cSMartin Matuska     __asan_unpoison_memory_region(alloc, bytes);
340*c03c5b1cSMartin Matuska #endif
341*c03c5b1cSMartin Matuska 
342*c03c5b1cSMartin Matuska     return alloc;
343*c03c5b1cSMartin Matuska }
344*c03c5b1cSMartin Matuska 
ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp * ws)345*c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
346*c03c5b1cSMartin Matuska     DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
347*c03c5b1cSMartin Matuska 
348*c03c5b1cSMartin Matuska #if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
349*c03c5b1cSMartin Matuska     /* To validate that the table re-use logic is sound, and that we don't
350*c03c5b1cSMartin Matuska      * access table space that we haven't cleaned, we re-"poison" the table
351*c03c5b1cSMartin Matuska      * space every time we mark it dirty. */
352*c03c5b1cSMartin Matuska     {
353*c03c5b1cSMartin Matuska         size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
354*c03c5b1cSMartin Matuska         assert(__msan_test_shadow(ws->objectEnd, size) == -1);
355*c03c5b1cSMartin Matuska         __msan_poison(ws->objectEnd, size);
356*c03c5b1cSMartin Matuska     }
357*c03c5b1cSMartin Matuska #endif
358*c03c5b1cSMartin Matuska 
359*c03c5b1cSMartin Matuska     assert(ws->tableValidEnd >= ws->objectEnd);
360*c03c5b1cSMartin Matuska     assert(ws->tableValidEnd <= ws->allocStart);
361*c03c5b1cSMartin Matuska     ws->tableValidEnd = ws->objectEnd;
362*c03c5b1cSMartin Matuska     ZSTD_cwksp_assert_internal_consistency(ws);
363*c03c5b1cSMartin Matuska }
364*c03c5b1cSMartin Matuska 
ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp * ws)365*c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
366*c03c5b1cSMartin Matuska     DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
367*c03c5b1cSMartin Matuska     assert(ws->tableValidEnd >= ws->objectEnd);
368*c03c5b1cSMartin Matuska     assert(ws->tableValidEnd <= ws->allocStart);
369*c03c5b1cSMartin Matuska     if (ws->tableValidEnd < ws->tableEnd) {
370*c03c5b1cSMartin Matuska         ws->tableValidEnd = ws->tableEnd;
371*c03c5b1cSMartin Matuska     }
372*c03c5b1cSMartin Matuska     ZSTD_cwksp_assert_internal_consistency(ws);
373*c03c5b1cSMartin Matuska }
374*c03c5b1cSMartin Matuska 
375*c03c5b1cSMartin Matuska /**
376*c03c5b1cSMartin Matuska  * Zero the part of the allocated tables not already marked clean.
377*c03c5b1cSMartin Matuska  */
ZSTD_cwksp_clean_tables(ZSTD_cwksp * ws)378*c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
379*c03c5b1cSMartin Matuska     DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
380*c03c5b1cSMartin Matuska     assert(ws->tableValidEnd >= ws->objectEnd);
381*c03c5b1cSMartin Matuska     assert(ws->tableValidEnd <= ws->allocStart);
382*c03c5b1cSMartin Matuska     if (ws->tableValidEnd < ws->tableEnd) {
383*c03c5b1cSMartin Matuska         memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
384*c03c5b1cSMartin Matuska     }
385*c03c5b1cSMartin Matuska     ZSTD_cwksp_mark_tables_clean(ws);
386*c03c5b1cSMartin Matuska }
387*c03c5b1cSMartin Matuska 
388*c03c5b1cSMartin Matuska /**
389*c03c5b1cSMartin Matuska  * Invalidates table allocations.
390*c03c5b1cSMartin Matuska  * All other allocations remain valid.
391*c03c5b1cSMartin Matuska  */
ZSTD_cwksp_clear_tables(ZSTD_cwksp * ws)392*c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
393*c03c5b1cSMartin Matuska     DEBUGLOG(4, "cwksp: clearing tables!");
394*c03c5b1cSMartin Matuska 
395*c03c5b1cSMartin Matuska #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
396*c03c5b1cSMartin Matuska     {
397*c03c5b1cSMartin Matuska         size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
398*c03c5b1cSMartin Matuska         __asan_poison_memory_region(ws->objectEnd, size);
399*c03c5b1cSMartin Matuska     }
400*c03c5b1cSMartin Matuska #endif
401*c03c5b1cSMartin Matuska 
402*c03c5b1cSMartin Matuska     ws->tableEnd = ws->objectEnd;
403*c03c5b1cSMartin Matuska     ZSTD_cwksp_assert_internal_consistency(ws);
404*c03c5b1cSMartin Matuska }
405*c03c5b1cSMartin Matuska 
406*c03c5b1cSMartin Matuska /**
407*c03c5b1cSMartin Matuska  * Invalidates all buffer, aligned, and table allocations.
408*c03c5b1cSMartin Matuska  * Object allocations remain valid.
409*c03c5b1cSMartin Matuska  */
ZSTD_cwksp_clear(ZSTD_cwksp * ws)410*c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
411*c03c5b1cSMartin Matuska     DEBUGLOG(4, "cwksp: clearing!");
412*c03c5b1cSMartin Matuska 
413*c03c5b1cSMartin Matuska #if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
414*c03c5b1cSMartin Matuska     /* To validate that the context re-use logic is sound, and that we don't
415*c03c5b1cSMartin Matuska      * access stuff that this compression hasn't initialized, we re-"poison"
416*c03c5b1cSMartin Matuska      * the workspace (or at least the non-static, non-table parts of it)
417*c03c5b1cSMartin Matuska      * every time we start a new compression. */
418*c03c5b1cSMartin Matuska     {
419*c03c5b1cSMartin Matuska         size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd;
420*c03c5b1cSMartin Matuska         __msan_poison(ws->tableValidEnd, size);
421*c03c5b1cSMartin Matuska     }
422*c03c5b1cSMartin Matuska #endif
423*c03c5b1cSMartin Matuska 
424*c03c5b1cSMartin Matuska #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
425*c03c5b1cSMartin Matuska     {
426*c03c5b1cSMartin Matuska         size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
427*c03c5b1cSMartin Matuska         __asan_poison_memory_region(ws->objectEnd, size);
428*c03c5b1cSMartin Matuska     }
429*c03c5b1cSMartin Matuska #endif
430*c03c5b1cSMartin Matuska 
431*c03c5b1cSMartin Matuska     ws->tableEnd = ws->objectEnd;
432*c03c5b1cSMartin Matuska     ws->allocStart = ws->workspaceEnd;
433*c03c5b1cSMartin Matuska     ws->allocFailed = 0;
434*c03c5b1cSMartin Matuska     if (ws->phase > ZSTD_cwksp_alloc_buffers) {
435*c03c5b1cSMartin Matuska         ws->phase = ZSTD_cwksp_alloc_buffers;
436*c03c5b1cSMartin Matuska     }
437*c03c5b1cSMartin Matuska     ZSTD_cwksp_assert_internal_consistency(ws);
438*c03c5b1cSMartin Matuska }
439*c03c5b1cSMartin Matuska 
440*c03c5b1cSMartin Matuska /**
441*c03c5b1cSMartin Matuska  * The provided workspace takes ownership of the buffer [start, start+size).
442*c03c5b1cSMartin Matuska  * Any existing values in the workspace are ignored (the previously managed
443*c03c5b1cSMartin Matuska  * buffer, if present, must be separately freed).
444*c03c5b1cSMartin Matuska  */
ZSTD_cwksp_init(ZSTD_cwksp * ws,void * start,size_t size)445*c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
446*c03c5b1cSMartin Matuska     DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
447*c03c5b1cSMartin Matuska     assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
448*c03c5b1cSMartin Matuska     ws->workspace = start;
449*c03c5b1cSMartin Matuska     ws->workspaceEnd = (BYTE*)start + size;
450*c03c5b1cSMartin Matuska     ws->objectEnd = ws->workspace;
451*c03c5b1cSMartin Matuska     ws->tableValidEnd = ws->objectEnd;
452*c03c5b1cSMartin Matuska     ws->phase = ZSTD_cwksp_alloc_objects;
453*c03c5b1cSMartin Matuska     ZSTD_cwksp_clear(ws);
454*c03c5b1cSMartin Matuska     ws->workspaceOversizedDuration = 0;
455*c03c5b1cSMartin Matuska     ZSTD_cwksp_assert_internal_consistency(ws);
456*c03c5b1cSMartin Matuska }
457*c03c5b1cSMartin Matuska 
ZSTD_cwksp_create(ZSTD_cwksp * ws,size_t size,ZSTD_customMem customMem)458*c03c5b1cSMartin Matuska MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
459*c03c5b1cSMartin Matuska     void* workspace = ZSTD_malloc(size, customMem);
460*c03c5b1cSMartin Matuska     DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
461*c03c5b1cSMartin Matuska     RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
462*c03c5b1cSMartin Matuska     ZSTD_cwksp_init(ws, workspace, size);
463*c03c5b1cSMartin Matuska     return 0;
464*c03c5b1cSMartin Matuska }
465*c03c5b1cSMartin Matuska 
ZSTD_cwksp_free(ZSTD_cwksp * ws,ZSTD_customMem customMem)466*c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
467*c03c5b1cSMartin Matuska     void *ptr = ws->workspace;
468*c03c5b1cSMartin Matuska     DEBUGLOG(4, "cwksp: freeing workspace");
469*c03c5b1cSMartin Matuska     memset(ws, 0, sizeof(ZSTD_cwksp));
470*c03c5b1cSMartin Matuska     ZSTD_free(ptr, customMem);
471*c03c5b1cSMartin Matuska }
472*c03c5b1cSMartin Matuska 
473*c03c5b1cSMartin Matuska /**
474*c03c5b1cSMartin Matuska  * Moves the management of a workspace from one cwksp to another. The src cwksp
475*c03c5b1cSMartin Matuska  * is left in an invalid state (src must be re-init()'ed before its used again).
476*c03c5b1cSMartin Matuska  */
ZSTD_cwksp_move(ZSTD_cwksp * dst,ZSTD_cwksp * src)477*c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
478*c03c5b1cSMartin Matuska     *dst = *src;
479*c03c5b1cSMartin Matuska     memset(src, 0, sizeof(ZSTD_cwksp));
480*c03c5b1cSMartin Matuska }
481*c03c5b1cSMartin Matuska 
ZSTD_cwksp_sizeof(const ZSTD_cwksp * ws)482*c03c5b1cSMartin Matuska MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
483*c03c5b1cSMartin Matuska     return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
484*c03c5b1cSMartin Matuska }
485*c03c5b1cSMartin Matuska 
ZSTD_cwksp_reserve_failed(const ZSTD_cwksp * ws)486*c03c5b1cSMartin Matuska MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
487*c03c5b1cSMartin Matuska     return ws->allocFailed;
488*c03c5b1cSMartin Matuska }
489*c03c5b1cSMartin Matuska 
490*c03c5b1cSMartin Matuska /*-*************************************
491*c03c5b1cSMartin Matuska *  Functions Checking Free Space
492*c03c5b1cSMartin Matuska ***************************************/
493*c03c5b1cSMartin Matuska 
ZSTD_cwksp_available_space(ZSTD_cwksp * ws)494*c03c5b1cSMartin Matuska MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
495*c03c5b1cSMartin Matuska     return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
496*c03c5b1cSMartin Matuska }
497*c03c5b1cSMartin Matuska 
ZSTD_cwksp_check_available(ZSTD_cwksp * ws,size_t additionalNeededSpace)498*c03c5b1cSMartin Matuska MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
499*c03c5b1cSMartin Matuska     return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
500*c03c5b1cSMartin Matuska }
501*c03c5b1cSMartin Matuska 
ZSTD_cwksp_check_too_large(ZSTD_cwksp * ws,size_t additionalNeededSpace)502*c03c5b1cSMartin Matuska MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
503*c03c5b1cSMartin Matuska     return ZSTD_cwksp_check_available(
504*c03c5b1cSMartin Matuska         ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
505*c03c5b1cSMartin Matuska }
506*c03c5b1cSMartin Matuska 
ZSTD_cwksp_check_wasteful(ZSTD_cwksp * ws,size_t additionalNeededSpace)507*c03c5b1cSMartin Matuska MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
508*c03c5b1cSMartin Matuska     return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
509*c03c5b1cSMartin Matuska         && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
510*c03c5b1cSMartin Matuska }
511*c03c5b1cSMartin Matuska 
ZSTD_cwksp_bump_oversized_duration(ZSTD_cwksp * ws,size_t additionalNeededSpace)512*c03c5b1cSMartin Matuska MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
513*c03c5b1cSMartin Matuska         ZSTD_cwksp* ws, size_t additionalNeededSpace) {
514*c03c5b1cSMartin Matuska     if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
515*c03c5b1cSMartin Matuska         ws->workspaceOversizedDuration++;
516*c03c5b1cSMartin Matuska     } else {
517*c03c5b1cSMartin Matuska         ws->workspaceOversizedDuration = 0;
518*c03c5b1cSMartin Matuska     }
519*c03c5b1cSMartin Matuska }
520*c03c5b1cSMartin Matuska 
521*c03c5b1cSMartin Matuska #if defined (__cplusplus)
522*c03c5b1cSMartin Matuska }
523*c03c5b1cSMartin Matuska #endif
524*c03c5b1cSMartin Matuska 
525*c03c5b1cSMartin Matuska #endif /* ZSTD_CWKSP_H */
526