xref: /dragonfly/contrib/zstd/lib/common/pool.c (revision a28cd43d)
1*a28cd43dSSascha Wildner /*
2*a28cd43dSSascha Wildner  * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
3*a28cd43dSSascha Wildner  * All rights reserved.
4*a28cd43dSSascha Wildner  *
5*a28cd43dSSascha Wildner  * This source code is licensed under both the BSD-style license (found in the
6*a28cd43dSSascha Wildner  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7*a28cd43dSSascha Wildner  * in the COPYING file in the root directory of this source tree).
8*a28cd43dSSascha Wildner  * You may select, at your option, one of the above-listed licenses.
9*a28cd43dSSascha Wildner  */
10*a28cd43dSSascha Wildner 
11*a28cd43dSSascha Wildner 
12*a28cd43dSSascha Wildner /* ======   Dependencies   ======= */
13*a28cd43dSSascha Wildner #include "zstd_deps.h" /* size_t */
14*a28cd43dSSascha Wildner #include "debug.h"     /* assert */
15*a28cd43dSSascha Wildner #include "zstd_internal.h"  /* ZSTD_customMalloc, ZSTD_customFree */
16*a28cd43dSSascha Wildner #include "pool.h"
17*a28cd43dSSascha Wildner 
18*a28cd43dSSascha Wildner /* ======   Compiler specifics   ====== */
19*a28cd43dSSascha Wildner #if defined(_MSC_VER)
20*a28cd43dSSascha Wildner #  pragma warning(disable : 4204)        /* disable: C4204: non-constant aggregate initializer */
21*a28cd43dSSascha Wildner #endif
22*a28cd43dSSascha Wildner 
23*a28cd43dSSascha Wildner 
24*a28cd43dSSascha Wildner #ifdef ZSTD_MULTITHREAD
25*a28cd43dSSascha Wildner 
26*a28cd43dSSascha Wildner #include "threading.h"   /* pthread adaptation */
27*a28cd43dSSascha Wildner 
28*a28cd43dSSascha Wildner /* A job is a function and an opaque argument */
29*a28cd43dSSascha Wildner typedef struct POOL_job_s {
30*a28cd43dSSascha Wildner     POOL_function function;
31*a28cd43dSSascha Wildner     void *opaque;
32*a28cd43dSSascha Wildner } POOL_job;
33*a28cd43dSSascha Wildner 
34*a28cd43dSSascha Wildner struct POOL_ctx_s {
35*a28cd43dSSascha Wildner     ZSTD_customMem customMem;
36*a28cd43dSSascha Wildner     /* Keep track of the threads */
37*a28cd43dSSascha Wildner     ZSTD_pthread_t* threads;
38*a28cd43dSSascha Wildner     size_t threadCapacity;
39*a28cd43dSSascha Wildner     size_t threadLimit;
40*a28cd43dSSascha Wildner 
41*a28cd43dSSascha Wildner     /* The queue is a circular buffer */
42*a28cd43dSSascha Wildner     POOL_job *queue;
43*a28cd43dSSascha Wildner     size_t queueHead;
44*a28cd43dSSascha Wildner     size_t queueTail;
45*a28cd43dSSascha Wildner     size_t queueSize;
46*a28cd43dSSascha Wildner 
47*a28cd43dSSascha Wildner     /* The number of threads working on jobs */
48*a28cd43dSSascha Wildner     size_t numThreadsBusy;
49*a28cd43dSSascha Wildner     /* Indicates if the queue is empty */
50*a28cd43dSSascha Wildner     int queueEmpty;
51*a28cd43dSSascha Wildner 
52*a28cd43dSSascha Wildner     /* The mutex protects the queue */
53*a28cd43dSSascha Wildner     ZSTD_pthread_mutex_t queueMutex;
54*a28cd43dSSascha Wildner     /* Condition variable for pushers to wait on when the queue is full */
55*a28cd43dSSascha Wildner     ZSTD_pthread_cond_t queuePushCond;
56*a28cd43dSSascha Wildner     /* Condition variables for poppers to wait on when the queue is empty */
57*a28cd43dSSascha Wildner     ZSTD_pthread_cond_t queuePopCond;
58*a28cd43dSSascha Wildner     /* Indicates if the queue is shutting down */
59*a28cd43dSSascha Wildner     int shutdown;
60*a28cd43dSSascha Wildner };
61*a28cd43dSSascha Wildner 
62*a28cd43dSSascha Wildner /* POOL_thread() :
63*a28cd43dSSascha Wildner  * Work thread for the thread pool.
64*a28cd43dSSascha Wildner  * Waits for jobs and executes them.
65*a28cd43dSSascha Wildner  * @returns : NULL on failure else non-null.
66*a28cd43dSSascha Wildner  */
POOL_thread(void * opaque)67*a28cd43dSSascha Wildner static void* POOL_thread(void* opaque) {
68*a28cd43dSSascha Wildner     POOL_ctx* const ctx = (POOL_ctx*)opaque;
69*a28cd43dSSascha Wildner     if (!ctx) { return NULL; }
70*a28cd43dSSascha Wildner     for (;;) {
71*a28cd43dSSascha Wildner         /* Lock the mutex and wait for a non-empty queue or until shutdown */
72*a28cd43dSSascha Wildner         ZSTD_pthread_mutex_lock(&ctx->queueMutex);
73*a28cd43dSSascha Wildner 
74*a28cd43dSSascha Wildner         while ( ctx->queueEmpty
75*a28cd43dSSascha Wildner             || (ctx->numThreadsBusy >= ctx->threadLimit) ) {
76*a28cd43dSSascha Wildner             if (ctx->shutdown) {
77*a28cd43dSSascha Wildner                 /* even if !queueEmpty, (possible if numThreadsBusy >= threadLimit),
78*a28cd43dSSascha Wildner                  * a few threads will be shutdown while !queueEmpty,
79*a28cd43dSSascha Wildner                  * but enough threads will remain active to finish the queue */
80*a28cd43dSSascha Wildner                 ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
81*a28cd43dSSascha Wildner                 return opaque;
82*a28cd43dSSascha Wildner             }
83*a28cd43dSSascha Wildner             ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
84*a28cd43dSSascha Wildner         }
85*a28cd43dSSascha Wildner         /* Pop a job off the queue */
86*a28cd43dSSascha Wildner         {   POOL_job const job = ctx->queue[ctx->queueHead];
87*a28cd43dSSascha Wildner             ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
88*a28cd43dSSascha Wildner             ctx->numThreadsBusy++;
89*a28cd43dSSascha Wildner             ctx->queueEmpty = ctx->queueHead == ctx->queueTail;
90*a28cd43dSSascha Wildner             /* Unlock the mutex, signal a pusher, and run the job */
91*a28cd43dSSascha Wildner             ZSTD_pthread_cond_signal(&ctx->queuePushCond);
92*a28cd43dSSascha Wildner             ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
93*a28cd43dSSascha Wildner 
94*a28cd43dSSascha Wildner             job.function(job.opaque);
95*a28cd43dSSascha Wildner 
96*a28cd43dSSascha Wildner             /* If the intended queue size was 0, signal after finishing job */
97*a28cd43dSSascha Wildner             ZSTD_pthread_mutex_lock(&ctx->queueMutex);
98*a28cd43dSSascha Wildner             ctx->numThreadsBusy--;
99*a28cd43dSSascha Wildner             if (ctx->queueSize == 1) {
100*a28cd43dSSascha Wildner                 ZSTD_pthread_cond_signal(&ctx->queuePushCond);
101*a28cd43dSSascha Wildner             }
102*a28cd43dSSascha Wildner             ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
103*a28cd43dSSascha Wildner         }
104*a28cd43dSSascha Wildner     }  /* for (;;) */
105*a28cd43dSSascha Wildner     assert(0);  /* Unreachable */
106*a28cd43dSSascha Wildner }
107*a28cd43dSSascha Wildner 
ZSTD_createThreadPool(size_t numThreads)108*a28cd43dSSascha Wildner POOL_ctx* ZSTD_createThreadPool(size_t numThreads) {
109*a28cd43dSSascha Wildner     return POOL_create (numThreads, 0);
110*a28cd43dSSascha Wildner }
111*a28cd43dSSascha Wildner 
POOL_create(size_t numThreads,size_t queueSize)112*a28cd43dSSascha Wildner POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
113*a28cd43dSSascha Wildner     return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
114*a28cd43dSSascha Wildner }
115*a28cd43dSSascha Wildner 
POOL_create_advanced(size_t numThreads,size_t queueSize,ZSTD_customMem customMem)116*a28cd43dSSascha Wildner POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
117*a28cd43dSSascha Wildner                                ZSTD_customMem customMem) {
118*a28cd43dSSascha Wildner     POOL_ctx* ctx;
119*a28cd43dSSascha Wildner     /* Check parameters */
120*a28cd43dSSascha Wildner     if (!numThreads) { return NULL; }
121*a28cd43dSSascha Wildner     /* Allocate the context and zero initialize */
122*a28cd43dSSascha Wildner     ctx = (POOL_ctx*)ZSTD_customCalloc(sizeof(POOL_ctx), customMem);
123*a28cd43dSSascha Wildner     if (!ctx) { return NULL; }
124*a28cd43dSSascha Wildner     /* Initialize the job queue.
125*a28cd43dSSascha Wildner      * It needs one extra space since one space is wasted to differentiate
126*a28cd43dSSascha Wildner      * empty and full queues.
127*a28cd43dSSascha Wildner      */
128*a28cd43dSSascha Wildner     ctx->queueSize = queueSize + 1;
129*a28cd43dSSascha Wildner     ctx->queue = (POOL_job*)ZSTD_customMalloc(ctx->queueSize * sizeof(POOL_job), customMem);
130*a28cd43dSSascha Wildner     ctx->queueHead = 0;
131*a28cd43dSSascha Wildner     ctx->queueTail = 0;
132*a28cd43dSSascha Wildner     ctx->numThreadsBusy = 0;
133*a28cd43dSSascha Wildner     ctx->queueEmpty = 1;
134*a28cd43dSSascha Wildner     {
135*a28cd43dSSascha Wildner         int error = 0;
136*a28cd43dSSascha Wildner         error |= ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL);
137*a28cd43dSSascha Wildner         error |= ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL);
138*a28cd43dSSascha Wildner         error |= ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL);
139*a28cd43dSSascha Wildner         if (error) { POOL_free(ctx); return NULL; }
140*a28cd43dSSascha Wildner     }
141*a28cd43dSSascha Wildner     ctx->shutdown = 0;
142*a28cd43dSSascha Wildner     /* Allocate space for the thread handles */
143*a28cd43dSSascha Wildner     ctx->threads = (ZSTD_pthread_t*)ZSTD_customMalloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
144*a28cd43dSSascha Wildner     ctx->threadCapacity = 0;
145*a28cd43dSSascha Wildner     ctx->customMem = customMem;
146*a28cd43dSSascha Wildner     /* Check for errors */
147*a28cd43dSSascha Wildner     if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }
148*a28cd43dSSascha Wildner     /* Initialize the threads */
149*a28cd43dSSascha Wildner     {   size_t i;
150*a28cd43dSSascha Wildner         for (i = 0; i < numThreads; ++i) {
151*a28cd43dSSascha Wildner             if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) {
152*a28cd43dSSascha Wildner                 ctx->threadCapacity = i;
153*a28cd43dSSascha Wildner                 POOL_free(ctx);
154*a28cd43dSSascha Wildner                 return NULL;
155*a28cd43dSSascha Wildner         }   }
156*a28cd43dSSascha Wildner         ctx->threadCapacity = numThreads;
157*a28cd43dSSascha Wildner         ctx->threadLimit = numThreads;
158*a28cd43dSSascha Wildner     }
159*a28cd43dSSascha Wildner     return ctx;
160*a28cd43dSSascha Wildner }
161*a28cd43dSSascha Wildner 
162*a28cd43dSSascha Wildner /*! POOL_join() :
163*a28cd43dSSascha Wildner     Shutdown the queue, wake any sleeping threads, and join all of the threads.
164*a28cd43dSSascha Wildner */
POOL_join(POOL_ctx * ctx)165*a28cd43dSSascha Wildner static void POOL_join(POOL_ctx* ctx) {
166*a28cd43dSSascha Wildner     /* Shut down the queue */
167*a28cd43dSSascha Wildner     ZSTD_pthread_mutex_lock(&ctx->queueMutex);
168*a28cd43dSSascha Wildner     ctx->shutdown = 1;
169*a28cd43dSSascha Wildner     ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
170*a28cd43dSSascha Wildner     /* Wake up sleeping threads */
171*a28cd43dSSascha Wildner     ZSTD_pthread_cond_broadcast(&ctx->queuePushCond);
172*a28cd43dSSascha Wildner     ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
173*a28cd43dSSascha Wildner     /* Join all of the threads */
174*a28cd43dSSascha Wildner     {   size_t i;
175*a28cd43dSSascha Wildner         for (i = 0; i < ctx->threadCapacity; ++i) {
176*a28cd43dSSascha Wildner             ZSTD_pthread_join(ctx->threads[i], NULL);  /* note : could fail */
177*a28cd43dSSascha Wildner     }   }
178*a28cd43dSSascha Wildner }
179*a28cd43dSSascha Wildner 
POOL_free(POOL_ctx * ctx)180*a28cd43dSSascha Wildner void POOL_free(POOL_ctx *ctx) {
181*a28cd43dSSascha Wildner     if (!ctx) { return; }
182*a28cd43dSSascha Wildner     POOL_join(ctx);
183*a28cd43dSSascha Wildner     ZSTD_pthread_mutex_destroy(&ctx->queueMutex);
184*a28cd43dSSascha Wildner     ZSTD_pthread_cond_destroy(&ctx->queuePushCond);
185*a28cd43dSSascha Wildner     ZSTD_pthread_cond_destroy(&ctx->queuePopCond);
186*a28cd43dSSascha Wildner     ZSTD_customFree(ctx->queue, ctx->customMem);
187*a28cd43dSSascha Wildner     ZSTD_customFree(ctx->threads, ctx->customMem);
188*a28cd43dSSascha Wildner     ZSTD_customFree(ctx, ctx->customMem);
189*a28cd43dSSascha Wildner }
190*a28cd43dSSascha Wildner 
ZSTD_freeThreadPool(ZSTD_threadPool * pool)191*a28cd43dSSascha Wildner void ZSTD_freeThreadPool (ZSTD_threadPool* pool) {
192*a28cd43dSSascha Wildner   POOL_free (pool);
193*a28cd43dSSascha Wildner }
194*a28cd43dSSascha Wildner 
POOL_sizeof(POOL_ctx * ctx)195*a28cd43dSSascha Wildner size_t POOL_sizeof(POOL_ctx *ctx) {
196*a28cd43dSSascha Wildner     if (ctx==NULL) return 0;  /* supports sizeof NULL */
197*a28cd43dSSascha Wildner     return sizeof(*ctx)
198*a28cd43dSSascha Wildner         + ctx->queueSize * sizeof(POOL_job)
199*a28cd43dSSascha Wildner         + ctx->threadCapacity * sizeof(ZSTD_pthread_t);
200*a28cd43dSSascha Wildner }
201*a28cd43dSSascha Wildner 
202*a28cd43dSSascha Wildner 
203*a28cd43dSSascha Wildner /* @return : 0 on success, 1 on error */
POOL_resize_internal(POOL_ctx * ctx,size_t numThreads)204*a28cd43dSSascha Wildner static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads)
205*a28cd43dSSascha Wildner {
206*a28cd43dSSascha Wildner     if (numThreads <= ctx->threadCapacity) {
207*a28cd43dSSascha Wildner         if (!numThreads) return 1;
208*a28cd43dSSascha Wildner         ctx->threadLimit = numThreads;
209*a28cd43dSSascha Wildner         return 0;
210*a28cd43dSSascha Wildner     }
211*a28cd43dSSascha Wildner     /* numThreads > threadCapacity */
212*a28cd43dSSascha Wildner     {   ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customMalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
213*a28cd43dSSascha Wildner         if (!threadPool) return 1;
214*a28cd43dSSascha Wildner         /* replace existing thread pool */
215*a28cd43dSSascha Wildner         ZSTD_memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool));
216*a28cd43dSSascha Wildner         ZSTD_customFree(ctx->threads, ctx->customMem);
217*a28cd43dSSascha Wildner         ctx->threads = threadPool;
218*a28cd43dSSascha Wildner         /* Initialize additional threads */
219*a28cd43dSSascha Wildner         {   size_t threadId;
220*a28cd43dSSascha Wildner             for (threadId = ctx->threadCapacity; threadId < numThreads; ++threadId) {
221*a28cd43dSSascha Wildner                 if (ZSTD_pthread_create(&threadPool[threadId], NULL, &POOL_thread, ctx)) {
222*a28cd43dSSascha Wildner                     ctx->threadCapacity = threadId;
223*a28cd43dSSascha Wildner                     return 1;
224*a28cd43dSSascha Wildner             }   }
225*a28cd43dSSascha Wildner     }   }
226*a28cd43dSSascha Wildner     /* successfully expanded */
227*a28cd43dSSascha Wildner     ctx->threadCapacity = numThreads;
228*a28cd43dSSascha Wildner     ctx->threadLimit = numThreads;
229*a28cd43dSSascha Wildner     return 0;
230*a28cd43dSSascha Wildner }
231*a28cd43dSSascha Wildner 
232*a28cd43dSSascha Wildner /* @return : 0 on success, 1 on error */
POOL_resize(POOL_ctx * ctx,size_t numThreads)233*a28cd43dSSascha Wildner int POOL_resize(POOL_ctx* ctx, size_t numThreads)
234*a28cd43dSSascha Wildner {
235*a28cd43dSSascha Wildner     int result;
236*a28cd43dSSascha Wildner     if (ctx==NULL) return 1;
237*a28cd43dSSascha Wildner     ZSTD_pthread_mutex_lock(&ctx->queueMutex);
238*a28cd43dSSascha Wildner     result = POOL_resize_internal(ctx, numThreads);
239*a28cd43dSSascha Wildner     ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
240*a28cd43dSSascha Wildner     ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
241*a28cd43dSSascha Wildner     return result;
242*a28cd43dSSascha Wildner }
243*a28cd43dSSascha Wildner 
244*a28cd43dSSascha Wildner /**
245*a28cd43dSSascha Wildner  * Returns 1 if the queue is full and 0 otherwise.
246*a28cd43dSSascha Wildner  *
247*a28cd43dSSascha Wildner  * When queueSize is 1 (pool was created with an intended queueSize of 0),
248*a28cd43dSSascha Wildner  * then a queue is empty if there is a thread free _and_ no job is waiting.
249*a28cd43dSSascha Wildner  */
isQueueFull(POOL_ctx const * ctx)250*a28cd43dSSascha Wildner static int isQueueFull(POOL_ctx const* ctx) {
251*a28cd43dSSascha Wildner     if (ctx->queueSize > 1) {
252*a28cd43dSSascha Wildner         return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize);
253*a28cd43dSSascha Wildner     } else {
254*a28cd43dSSascha Wildner         return (ctx->numThreadsBusy == ctx->threadLimit) ||
255*a28cd43dSSascha Wildner                !ctx->queueEmpty;
256*a28cd43dSSascha Wildner     }
257*a28cd43dSSascha Wildner }
258*a28cd43dSSascha Wildner 
259*a28cd43dSSascha Wildner 
POOL_add_internal(POOL_ctx * ctx,POOL_function function,void * opaque)260*a28cd43dSSascha Wildner static void POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque)
261*a28cd43dSSascha Wildner {
262*a28cd43dSSascha Wildner     POOL_job const job = {function, opaque};
263*a28cd43dSSascha Wildner     assert(ctx != NULL);
264*a28cd43dSSascha Wildner     if (ctx->shutdown) return;
265*a28cd43dSSascha Wildner 
266*a28cd43dSSascha Wildner     ctx->queueEmpty = 0;
267*a28cd43dSSascha Wildner     ctx->queue[ctx->queueTail] = job;
268*a28cd43dSSascha Wildner     ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize;
269*a28cd43dSSascha Wildner     ZSTD_pthread_cond_signal(&ctx->queuePopCond);
270*a28cd43dSSascha Wildner }
271*a28cd43dSSascha Wildner 
POOL_add(POOL_ctx * ctx,POOL_function function,void * opaque)272*a28cd43dSSascha Wildner void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque)
273*a28cd43dSSascha Wildner {
274*a28cd43dSSascha Wildner     assert(ctx != NULL);
275*a28cd43dSSascha Wildner     ZSTD_pthread_mutex_lock(&ctx->queueMutex);
276*a28cd43dSSascha Wildner     /* Wait until there is space in the queue for the new job */
277*a28cd43dSSascha Wildner     while (isQueueFull(ctx) && (!ctx->shutdown)) {
278*a28cd43dSSascha Wildner         ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
279*a28cd43dSSascha Wildner     }
280*a28cd43dSSascha Wildner     POOL_add_internal(ctx, function, opaque);
281*a28cd43dSSascha Wildner     ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
282*a28cd43dSSascha Wildner }
283*a28cd43dSSascha Wildner 
284*a28cd43dSSascha Wildner 
POOL_tryAdd(POOL_ctx * ctx,POOL_function function,void * opaque)285*a28cd43dSSascha Wildner int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque)
286*a28cd43dSSascha Wildner {
287*a28cd43dSSascha Wildner     assert(ctx != NULL);
288*a28cd43dSSascha Wildner     ZSTD_pthread_mutex_lock(&ctx->queueMutex);
289*a28cd43dSSascha Wildner     if (isQueueFull(ctx)) {
290*a28cd43dSSascha Wildner         ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
291*a28cd43dSSascha Wildner         return 0;
292*a28cd43dSSascha Wildner     }
293*a28cd43dSSascha Wildner     POOL_add_internal(ctx, function, opaque);
294*a28cd43dSSascha Wildner     ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
295*a28cd43dSSascha Wildner     return 1;
296*a28cd43dSSascha Wildner }
297*a28cd43dSSascha Wildner 
298*a28cd43dSSascha Wildner 
299*a28cd43dSSascha Wildner #else  /* ZSTD_MULTITHREAD  not defined */
300*a28cd43dSSascha Wildner 
301*a28cd43dSSascha Wildner /* ========================== */
302*a28cd43dSSascha Wildner /* No multi-threading support */
303*a28cd43dSSascha Wildner /* ========================== */
304*a28cd43dSSascha Wildner 
305*a28cd43dSSascha Wildner 
306*a28cd43dSSascha Wildner /* We don't need any data, but if it is empty, malloc() might return NULL. */
307*a28cd43dSSascha Wildner struct POOL_ctx_s {
308*a28cd43dSSascha Wildner     int dummy;
309*a28cd43dSSascha Wildner };
310*a28cd43dSSascha Wildner static POOL_ctx g_poolCtx;
311*a28cd43dSSascha Wildner 
POOL_create(size_t numThreads,size_t queueSize)312*a28cd43dSSascha Wildner POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
313*a28cd43dSSascha Wildner     return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
314*a28cd43dSSascha Wildner }
315*a28cd43dSSascha Wildner 
POOL_create_advanced(size_t numThreads,size_t queueSize,ZSTD_customMem customMem)316*a28cd43dSSascha Wildner POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) {
317*a28cd43dSSascha Wildner     (void)numThreads;
318*a28cd43dSSascha Wildner     (void)queueSize;
319*a28cd43dSSascha Wildner     (void)customMem;
320*a28cd43dSSascha Wildner     return &g_poolCtx;
321*a28cd43dSSascha Wildner }
322*a28cd43dSSascha Wildner 
POOL_free(POOL_ctx * ctx)323*a28cd43dSSascha Wildner void POOL_free(POOL_ctx* ctx) {
324*a28cd43dSSascha Wildner     assert(!ctx || ctx == &g_poolCtx);
325*a28cd43dSSascha Wildner     (void)ctx;
326*a28cd43dSSascha Wildner }
327*a28cd43dSSascha Wildner 
POOL_resize(POOL_ctx * ctx,size_t numThreads)328*a28cd43dSSascha Wildner int POOL_resize(POOL_ctx* ctx, size_t numThreads) {
329*a28cd43dSSascha Wildner     (void)ctx; (void)numThreads;
330*a28cd43dSSascha Wildner     return 0;
331*a28cd43dSSascha Wildner }
332*a28cd43dSSascha Wildner 
POOL_add(POOL_ctx * ctx,POOL_function function,void * opaque)333*a28cd43dSSascha Wildner void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) {
334*a28cd43dSSascha Wildner     (void)ctx;
335*a28cd43dSSascha Wildner     function(opaque);
336*a28cd43dSSascha Wildner }
337*a28cd43dSSascha Wildner 
POOL_tryAdd(POOL_ctx * ctx,POOL_function function,void * opaque)338*a28cd43dSSascha Wildner int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) {
339*a28cd43dSSascha Wildner     (void)ctx;
340*a28cd43dSSascha Wildner     function(opaque);
341*a28cd43dSSascha Wildner     return 1;
342*a28cd43dSSascha Wildner }
343*a28cd43dSSascha Wildner 
POOL_sizeof(POOL_ctx * ctx)344*a28cd43dSSascha Wildner size_t POOL_sizeof(POOL_ctx* ctx) {
345*a28cd43dSSascha Wildner     if (ctx==NULL) return 0;  /* supports sizeof NULL */
346*a28cd43dSSascha Wildner     assert(ctx == &g_poolCtx);
347*a28cd43dSSascha Wildner     return sizeof(*ctx);
348*a28cd43dSSascha Wildner }
349*a28cd43dSSascha Wildner 
350*a28cd43dSSascha Wildner #endif  /* ZSTD_MULTITHREAD */
351