1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2008 by Blender Foundation.
17  * All rights reserved.
18  */
19 
20 /** \file
21  * \ingroup bli
22  *
23  * Simple, fast memory allocator for allocating many elements of the same size.
24  *
25  * Supports:
26  *
27  * - Freeing chunks.
28  * - Iterating over allocated chunks
29  *   (optionally when using the #BLI_MEMPOOL_ALLOW_ITER flag).
30  */
31 
32 #include <stdlib.h>
33 #include <string.h>
34 
35 #include "atomic_ops.h"
36 
37 #include "BLI_utildefines.h"
38 
39 #include "BLI_mempool.h" /* own include */
40 
41 #include "MEM_guardedalloc.h"
42 
43 #include "BLI_strict_flags.h" /* keep last */
44 
45 #ifdef WITH_MEM_VALGRIND
46 #  include "valgrind/memcheck.h"
47 #endif
48 
49 /* note: copied from BLO_blend_defs.h, don't use here because we're in BLI */
50 #ifdef __BIG_ENDIAN__
51 /* Big Endian */
52 #  define MAKE_ID(a, b, c, d) ((int)(a) << 24 | (int)(b) << 16 | (c) << 8 | (d))
53 #  define MAKE_ID_8(a, b, c, d, e, f, g, h) \
54     ((int64_t)(a) << 56 | (int64_t)(b) << 48 | (int64_t)(c) << 40 | (int64_t)(d) << 32 | \
55      (int64_t)(e) << 24 | (int64_t)(f) << 16 | (int64_t)(g) << 8 | (h))
56 #else
57 /* Little Endian */
58 #  define MAKE_ID(a, b, c, d) ((int)(d) << 24 | (int)(c) << 16 | (b) << 8 | (a))
59 #  define MAKE_ID_8(a, b, c, d, e, f, g, h) \
60     ((int64_t)(h) << 56 | (int64_t)(g) << 48 | (int64_t)(f) << 40 | (int64_t)(e) << 32 | \
61      (int64_t)(d) << 24 | (int64_t)(c) << 16 | (int64_t)(b) << 8 | (a))
62 #endif
63 
64 /**
65  * Important that this value is an is _not_  aligned with ``sizeof(void *)``.
66  * So having a pointer to 2/4/8... aligned memory is enough to ensure
67  * the freeword will never be used.
68  * To be safe, use a word that's the same in both directions.
69  */
70 #define FREEWORD \
71   ((sizeof(void *) > sizeof(int32_t)) ? MAKE_ID_8('e', 'e', 'r', 'f', 'f', 'r', 'e', 'e') : \
72                                         MAKE_ID('e', 'f', 'f', 'e'))
73 
74 /**
75  * The 'used' word just needs to be set to something besides FREEWORD.
76  */
77 #define USEDWORD MAKE_ID('u', 's', 'e', 'd')
78 
79 /* currently totalloc isnt used */
80 // #define USE_TOTALLOC
81 
82 /* optimize pool size */
83 #define USE_CHUNK_POW2
84 
85 #ifndef NDEBUG
86 static bool mempool_debug_memset = false;
87 #endif
88 
89 /**
90  * A free element from #BLI_mempool_chunk. Data is cast to this type and stored in
91  * #BLI_mempool.free as a single linked list, each item #BLI_mempool.esize large.
92  *
93  * Each element represents a block which BLI_mempool_alloc may return.
94  */
95 typedef struct BLI_freenode {
96   struct BLI_freenode *next;
97   /** Used to identify this as a freed node. */
98   intptr_t freeword;
99 } BLI_freenode;
100 
101 /**
102  * A chunk of memory in the mempool stored in
103  * #BLI_mempool.chunks as a double linked list.
104  */
105 typedef struct BLI_mempool_chunk {
106   struct BLI_mempool_chunk *next;
107 } BLI_mempool_chunk;
108 
109 /**
110  * The mempool, stores and tracks memory \a chunks and elements within those chunks \a free.
111  */
112 struct BLI_mempool {
113   /** Single linked list of allocated chunks. */
114   BLI_mempool_chunk *chunks;
115   /** Keep a pointer to the last, so we can append new chunks there
116    * this is needed for iteration so we can loop over chunks in the order added. */
117   BLI_mempool_chunk *chunk_tail;
118 
119   /** Element size in bytes. */
120   uint esize;
121   /** Chunk size in bytes. */
122   uint csize;
123   /** Number of elements per chunk. */
124   uint pchunk;
125   uint flag;
126   /* keeps aligned to 16 bits */
127 
128   /** Free element list. Interleaved into chunk datas. */
129   BLI_freenode *free;
130   /** Use to know how many chunks to keep for #BLI_mempool_clear. */
131   uint maxchunks;
132   /** Number of elements currently in use. */
133   uint totused;
134 #ifdef USE_TOTALLOC
135   /** Number of elements allocated in total. */
136   uint totalloc;
137 #endif
138 };
139 
140 #define MEMPOOL_ELEM_SIZE_MIN (sizeof(void *) * 2)
141 
142 #define CHUNK_DATA(chunk) (CHECK_TYPE_INLINE(chunk, BLI_mempool_chunk *), (void *)((chunk) + 1))
143 
144 #define NODE_STEP_NEXT(node) ((void *)((char *)(node) + esize))
145 #define NODE_STEP_PREV(node) ((void *)((char *)(node)-esize))
146 
147 /** Extra bytes implicitly used for every chunk alloc. */
148 #define CHUNK_OVERHEAD (uint)(MEM_SIZE_OVERHEAD + sizeof(BLI_mempool_chunk))
149 
150 #ifdef USE_CHUNK_POW2
power_of_2_max_u(uint x)151 static uint power_of_2_max_u(uint x)
152 {
153   x -= 1;
154   x = x | (x >> 1);
155   x = x | (x >> 2);
156   x = x | (x >> 4);
157   x = x | (x >> 8);
158   x = x | (x >> 16);
159   return x + 1;
160 }
161 #endif
162 
mempool_chunk_find(BLI_mempool_chunk * head,uint index)163 BLI_INLINE BLI_mempool_chunk *mempool_chunk_find(BLI_mempool_chunk *head, uint index)
164 {
165   while (index-- && head) {
166     head = head->next;
167   }
168   return head;
169 }
170 
171 /**
172  * \return the number of chunks to allocate based on how many elements are needed.
173  *
174  * \note for small pools 1 is a good default, the elements need to be initialized,
175  * adding overhead on creation which is redundant if they aren't used.
176  */
mempool_maxchunks(const uint totelem,const uint pchunk)177 BLI_INLINE uint mempool_maxchunks(const uint totelem, const uint pchunk)
178 {
179   return (totelem <= pchunk) ? 1 : ((totelem / pchunk) + 1);
180 }
181 
mempool_chunk_alloc(BLI_mempool * pool)182 static BLI_mempool_chunk *mempool_chunk_alloc(BLI_mempool *pool)
183 {
184   return MEM_mallocN(sizeof(BLI_mempool_chunk) + (size_t)pool->csize, "BLI_Mempool Chunk");
185 }
186 
187 /**
188  * Initialize a chunk and add into \a pool->chunks
189  *
190  * \param pool: The pool to add the chunk into.
191  * \param mpchunk: The new uninitialized chunk (can be malloc'd)
192  * \param last_tail: The last element of the previous chunk
193  * (used when building free chunks initially)
194  * \return The last chunk,
195  */
mempool_chunk_add(BLI_mempool * pool,BLI_mempool_chunk * mpchunk,BLI_freenode * last_tail)196 static BLI_freenode *mempool_chunk_add(BLI_mempool *pool,
197                                        BLI_mempool_chunk *mpchunk,
198                                        BLI_freenode *last_tail)
199 {
200   const uint esize = pool->esize;
201   BLI_freenode *curnode = CHUNK_DATA(mpchunk);
202   uint j;
203 
204   /* append */
205   if (pool->chunk_tail) {
206     pool->chunk_tail->next = mpchunk;
207   }
208   else {
209     BLI_assert(pool->chunks == NULL);
210     pool->chunks = mpchunk;
211   }
212 
213   mpchunk->next = NULL;
214   pool->chunk_tail = mpchunk;
215 
216   if (UNLIKELY(pool->free == NULL)) {
217     pool->free = curnode;
218   }
219 
220   /* loop through the allocated data, building the pointer structures */
221   j = pool->pchunk;
222   if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
223     while (j--) {
224       curnode->next = NODE_STEP_NEXT(curnode);
225       curnode->freeword = FREEWORD;
226       curnode = curnode->next;
227     }
228   }
229   else {
230     while (j--) {
231       curnode->next = NODE_STEP_NEXT(curnode);
232       curnode = curnode->next;
233     }
234   }
235 
236   /* terminate the list (rewind one)
237    * will be overwritten if 'curnode' gets passed in again as 'last_tail' */
238   curnode = NODE_STEP_PREV(curnode);
239   curnode->next = NULL;
240 
241 #ifdef USE_TOTALLOC
242   pool->totalloc += pool->pchunk;
243 #endif
244 
245   /* final pointer in the previously allocated chunk is wrong */
246   if (last_tail) {
247     last_tail->next = CHUNK_DATA(mpchunk);
248   }
249 
250   return curnode;
251 }
252 
mempool_chunk_free(BLI_mempool_chunk * mpchunk)253 static void mempool_chunk_free(BLI_mempool_chunk *mpchunk)
254 {
255   MEM_freeN(mpchunk);
256 }
257 
mempool_chunk_free_all(BLI_mempool_chunk * mpchunk)258 static void mempool_chunk_free_all(BLI_mempool_chunk *mpchunk)
259 {
260   BLI_mempool_chunk *mpchunk_next;
261 
262   for (; mpchunk; mpchunk = mpchunk_next) {
263     mpchunk_next = mpchunk->next;
264     mempool_chunk_free(mpchunk);
265   }
266 }
267 
BLI_mempool_create(uint esize,uint totelem,uint pchunk,uint flag)268 BLI_mempool *BLI_mempool_create(uint esize, uint totelem, uint pchunk, uint flag)
269 {
270   BLI_mempool *pool;
271   BLI_freenode *last_tail = NULL;
272   uint i, maxchunks;
273 
274   /* allocate the pool structure */
275   pool = MEM_mallocN(sizeof(BLI_mempool), "memory pool");
276 
277   /* set the elem size */
278   if (esize < (int)MEMPOOL_ELEM_SIZE_MIN) {
279     esize = (int)MEMPOOL_ELEM_SIZE_MIN;
280   }
281 
282   if (flag & BLI_MEMPOOL_ALLOW_ITER) {
283     esize = MAX2(esize, (uint)sizeof(BLI_freenode));
284   }
285 
286   maxchunks = mempool_maxchunks(totelem, pchunk);
287 
288   pool->chunks = NULL;
289   pool->chunk_tail = NULL;
290   pool->esize = esize;
291 
292   /* Optimize chunk size to powers of 2, accounting for slop-space. */
293 #ifdef USE_CHUNK_POW2
294   {
295     BLI_assert(power_of_2_max_u(pchunk * esize) > CHUNK_OVERHEAD);
296     pchunk = (power_of_2_max_u(pchunk * esize) - CHUNK_OVERHEAD) / esize;
297   }
298 #endif
299 
300   pool->csize = esize * pchunk;
301 
302   /* Ensure this is a power of 2, minus the rounding by element size. */
303 #if defined(USE_CHUNK_POW2) && !defined(NDEBUG)
304   {
305     uint final_size = (uint)MEM_SIZE_OVERHEAD + (uint)sizeof(BLI_mempool_chunk) + pool->csize;
306     BLI_assert(((uint)power_of_2_max_u(final_size) - final_size) < pool->esize);
307   }
308 #endif
309 
310   pool->pchunk = pchunk;
311   pool->flag = flag;
312   pool->free = NULL; /* mempool_chunk_add assigns */
313   pool->maxchunks = maxchunks;
314 #ifdef USE_TOTALLOC
315   pool->totalloc = 0;
316 #endif
317   pool->totused = 0;
318 
319   if (totelem) {
320     /* Allocate the actual chunks. */
321     for (i = 0; i < maxchunks; i++) {
322       BLI_mempool_chunk *mpchunk = mempool_chunk_alloc(pool);
323       last_tail = mempool_chunk_add(pool, mpchunk, last_tail);
324     }
325   }
326 
327 #ifdef WITH_MEM_VALGRIND
328   VALGRIND_CREATE_MEMPOOL(pool, 0, false);
329 #endif
330 
331   return pool;
332 }
333 
BLI_mempool_alloc(BLI_mempool * pool)334 void *BLI_mempool_alloc(BLI_mempool *pool)
335 {
336   BLI_freenode *free_pop;
337 
338   if (UNLIKELY(pool->free == NULL)) {
339     /* Need to allocate a new chunk. */
340     BLI_mempool_chunk *mpchunk = mempool_chunk_alloc(pool);
341     mempool_chunk_add(pool, mpchunk, NULL);
342   }
343 
344   free_pop = pool->free;
345 
346   BLI_assert(pool->chunk_tail->next == NULL);
347 
348   if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
349     free_pop->freeword = USEDWORD;
350   }
351 
352   pool->free = free_pop->next;
353   pool->totused++;
354 
355 #ifdef WITH_MEM_VALGRIND
356   VALGRIND_MEMPOOL_ALLOC(pool, free_pop, pool->esize);
357 #endif
358 
359   return (void *)free_pop;
360 }
361 
BLI_mempool_calloc(BLI_mempool * pool)362 void *BLI_mempool_calloc(BLI_mempool *pool)
363 {
364   void *retval = BLI_mempool_alloc(pool);
365   memset(retval, 0, (size_t)pool->esize);
366   return retval;
367 }
368 
369 /**
370  * Free an element from the mempool.
371  *
372  * \note doesn't protect against double frees, take care!
373  */
BLI_mempool_free(BLI_mempool * pool,void * addr)374 void BLI_mempool_free(BLI_mempool *pool, void *addr)
375 {
376   BLI_freenode *newhead = addr;
377 
378 #ifndef NDEBUG
379   {
380     BLI_mempool_chunk *chunk;
381     bool found = false;
382     for (chunk = pool->chunks; chunk; chunk = chunk->next) {
383       if (ARRAY_HAS_ITEM((char *)addr, (char *)CHUNK_DATA(chunk), pool->csize)) {
384         found = true;
385         break;
386       }
387     }
388     if (!found) {
389       BLI_assert(!"Attempt to free data which is not in pool.\n");
390     }
391   }
392 
393   /* Enable for debugging. */
394   if (UNLIKELY(mempool_debug_memset)) {
395     memset(addr, 255, pool->esize);
396   }
397 #endif
398 
399   if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
400 #ifndef NDEBUG
401     /* This will detect double free's. */
402     BLI_assert(newhead->freeword != FREEWORD);
403 #endif
404     newhead->freeword = FREEWORD;
405   }
406 
407   newhead->next = pool->free;
408   pool->free = newhead;
409 
410   pool->totused--;
411 
412 #ifdef WITH_MEM_VALGRIND
413   VALGRIND_MEMPOOL_FREE(pool, addr);
414 #endif
415 
416   /* Nothing is in use; free all the chunks except the first. */
417   if (UNLIKELY(pool->totused == 0) && (pool->chunks->next)) {
418     const uint esize = pool->esize;
419     BLI_freenode *curnode;
420     uint j;
421     BLI_mempool_chunk *first;
422 
423     first = pool->chunks;
424     mempool_chunk_free_all(first->next);
425     first->next = NULL;
426     pool->chunk_tail = first;
427 
428 #ifdef USE_TOTALLOC
429     pool->totalloc = pool->pchunk;
430 #endif
431 
432     /* Temp alloc so valgrind doesn't complain when setting free'd blocks 'next'. */
433 #ifdef WITH_MEM_VALGRIND
434     VALGRIND_MEMPOOL_ALLOC(pool, CHUNK_DATA(first), pool->csize);
435 #endif
436 
437     curnode = CHUNK_DATA(first);
438     pool->free = curnode;
439 
440     j = pool->pchunk;
441     while (j--) {
442       curnode->next = NODE_STEP_NEXT(curnode);
443       curnode = curnode->next;
444     }
445     curnode = NODE_STEP_PREV(curnode);
446     curnode->next = NULL; /* terminate the list */
447 
448 #ifdef WITH_MEM_VALGRIND
449     VALGRIND_MEMPOOL_FREE(pool, CHUNK_DATA(first));
450 #endif
451   }
452 }
453 
BLI_mempool_len(BLI_mempool * pool)454 int BLI_mempool_len(BLI_mempool *pool)
455 {
456   return (int)pool->totused;
457 }
458 
BLI_mempool_findelem(BLI_mempool * pool,uint index)459 void *BLI_mempool_findelem(BLI_mempool *pool, uint index)
460 {
461   BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
462 
463   if (index < pool->totused) {
464     /* We could have some faster mem chunk stepping code inline. */
465     BLI_mempool_iter iter;
466     void *elem;
467     BLI_mempool_iternew(pool, &iter);
468     for (elem = BLI_mempool_iterstep(&iter); index-- != 0; elem = BLI_mempool_iterstep(&iter)) {
469       /* pass */
470     }
471     return elem;
472   }
473 
474   return NULL;
475 }
476 
477 /**
478  * Fill in \a data with pointers to each element of the mempool,
479  * to create lookup table.
480  *
481  * \param pool: Pool to create a table from.
482  * \param data: array of pointers at least the size of 'pool->totused'
483  */
BLI_mempool_as_table(BLI_mempool * pool,void ** data)484 void BLI_mempool_as_table(BLI_mempool *pool, void **data)
485 {
486   BLI_mempool_iter iter;
487   void *elem;
488   void **p = data;
489   BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
490   BLI_mempool_iternew(pool, &iter);
491   while ((elem = BLI_mempool_iterstep(&iter))) {
492     *p++ = elem;
493   }
494   BLI_assert((uint)(p - data) == pool->totused);
495 }
496 
497 /**
498  * A version of #BLI_mempool_as_table that allocates and returns the data.
499  */
BLI_mempool_as_tableN(BLI_mempool * pool,const char * allocstr)500 void **BLI_mempool_as_tableN(BLI_mempool *pool, const char *allocstr)
501 {
502   void **data = MEM_mallocN((size_t)pool->totused * sizeof(void *), allocstr);
503   BLI_mempool_as_table(pool, data);
504   return data;
505 }
506 
507 /**
508  * Fill in \a data with the contents of the mempool.
509  */
BLI_mempool_as_array(BLI_mempool * pool,void * data)510 void BLI_mempool_as_array(BLI_mempool *pool, void *data)
511 {
512   const uint esize = pool->esize;
513   BLI_mempool_iter iter;
514   char *elem, *p = data;
515   BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
516   BLI_mempool_iternew(pool, &iter);
517   while ((elem = BLI_mempool_iterstep(&iter))) {
518     memcpy(p, elem, (size_t)esize);
519     p = NODE_STEP_NEXT(p);
520   }
521   BLI_assert((uint)(p - (char *)data) == pool->totused * esize);
522 }
523 
524 /**
525  * A version of #BLI_mempool_as_array that allocates and returns the data.
526  */
BLI_mempool_as_arrayN(BLI_mempool * pool,const char * allocstr)527 void *BLI_mempool_as_arrayN(BLI_mempool *pool, const char *allocstr)
528 {
529   char *data = MEM_malloc_arrayN(pool->totused, pool->esize, allocstr);
530   BLI_mempool_as_array(pool, data);
531   return data;
532 }
533 
534 /**
535  * Initialize a new mempool iterator, #BLI_MEMPOOL_ALLOW_ITER flag must be set.
536  */
BLI_mempool_iternew(BLI_mempool * pool,BLI_mempool_iter * iter)537 void BLI_mempool_iternew(BLI_mempool *pool, BLI_mempool_iter *iter)
538 {
539   BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
540 
541   iter->pool = pool;
542   iter->curchunk = pool->chunks;
543   iter->curindex = 0;
544 
545   iter->curchunk_threaded_shared = NULL;
546 }
547 
548 /**
549  * Initialize an array of mempool iterators, #BLI_MEMPOOL_ALLOW_ITER flag must be set.
550  *
551  * This is used in threaded code, to generate as much iterators as needed
552  * (each task should have its own),
553  * such that each iterator goes over its own single chunk,
554  * and only getting the next chunk to iterate over has to be
555  * protected against concurrency (which can be done in a lockless way).
556  *
557  * To be used when creating a task for each single item in the pool is totally overkill.
558  *
559  * See BLI_task_parallel_mempool implementation for detailed usage example.
560  */
BLI_mempool_iter_threadsafe_create(BLI_mempool * pool,const size_t num_iter)561 BLI_mempool_iter *BLI_mempool_iter_threadsafe_create(BLI_mempool *pool, const size_t num_iter)
562 {
563   BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
564 
565   BLI_mempool_iter *iter_arr = MEM_mallocN(sizeof(*iter_arr) * num_iter, __func__);
566   BLI_mempool_chunk **curchunk_threaded_shared = MEM_mallocN(sizeof(void *), __func__);
567 
568   BLI_mempool_iternew(pool, iter_arr);
569 
570   *curchunk_threaded_shared = iter_arr->curchunk;
571   iter_arr->curchunk_threaded_shared = curchunk_threaded_shared;
572 
573   for (size_t i = 1; i < num_iter; i++) {
574     iter_arr[i] = iter_arr[0];
575     *curchunk_threaded_shared = iter_arr[i].curchunk = ((*curchunk_threaded_shared) ?
576                                                             (*curchunk_threaded_shared)->next :
577                                                             NULL);
578   }
579 
580   return iter_arr;
581 }
582 
BLI_mempool_iter_threadsafe_free(BLI_mempool_iter * iter_arr)583 void BLI_mempool_iter_threadsafe_free(BLI_mempool_iter *iter_arr)
584 {
585   BLI_assert(iter_arr->curchunk_threaded_shared != NULL);
586 
587   MEM_freeN(iter_arr->curchunk_threaded_shared);
588   MEM_freeN(iter_arr);
589 }
590 
591 #if 0
592 /* unoptimized, more readable */
593 
594 static void *bli_mempool_iternext(BLI_mempool_iter *iter)
595 {
596   void *ret = NULL;
597 
598   if (iter->curchunk == NULL || !iter->pool->totused) {
599     return ret;
600   }
601 
602   ret = ((char *)CHUNK_DATA(iter->curchunk)) + (iter->pool->esize * iter->curindex);
603 
604   iter->curindex++;
605 
606   if (iter->curindex == iter->pool->pchunk) {
607     iter->curindex = 0;
608     if (iter->curchunk_threaded_shared) {
609       while (1) {
610         iter->curchunk = *iter->curchunk_threaded_shared;
611         if (iter->curchunk == NULL) {
612           return ret;
613         }
614         if (atomic_cas_ptr((void **)iter->curchunk_threaded_shared,
615                            iter->curchunk,
616                            iter->curchunk->next) == iter->curchunk) {
617           break;
618         }
619       }
620     }
621     iter->curchunk = iter->curchunk->next;
622   }
623 
624   return ret;
625 }
626 
627 void *BLI_mempool_iterstep(BLI_mempool_iter *iter)
628 {
629   BLI_freenode *ret;
630 
631   do {
632     ret = bli_mempool_iternext(iter);
633   } while (ret && ret->freeword == FREEWORD);
634 
635   return ret;
636 }
637 
638 #else
639 
640 /* optimized version of code above */
641 
642 /**
643  * Step over the iterator, returning the mempool item or NULL.
644  */
BLI_mempool_iterstep(BLI_mempool_iter * iter)645 void *BLI_mempool_iterstep(BLI_mempool_iter *iter)
646 {
647   if (UNLIKELY(iter->curchunk == NULL)) {
648     return NULL;
649   }
650 
651   const uint esize = iter->pool->esize;
652   BLI_freenode *curnode = POINTER_OFFSET(CHUNK_DATA(iter->curchunk), (esize * iter->curindex));
653   BLI_freenode *ret;
654   do {
655     ret = curnode;
656 
657     if (++iter->curindex != iter->pool->pchunk) {
658       curnode = POINTER_OFFSET(curnode, esize);
659     }
660     else {
661       iter->curindex = 0;
662       if (iter->curchunk_threaded_shared) {
663         for (iter->curchunk = *iter->curchunk_threaded_shared;
664              (iter->curchunk != NULL) && (atomic_cas_ptr((void **)iter->curchunk_threaded_shared,
665                                                          iter->curchunk,
666                                                          iter->curchunk->next) != iter->curchunk);
667              iter->curchunk = *iter->curchunk_threaded_shared) {
668           /* pass. */
669         }
670 
671         if (UNLIKELY(iter->curchunk == NULL)) {
672           return (ret->freeword == FREEWORD) ? NULL : ret;
673         }
674       }
675       iter->curchunk = iter->curchunk->next;
676       if (UNLIKELY(iter->curchunk == NULL)) {
677         return (ret->freeword == FREEWORD) ? NULL : ret;
678       }
679       curnode = CHUNK_DATA(iter->curchunk);
680     }
681   } while (ret->freeword == FREEWORD);
682 
683   return ret;
684 }
685 
686 #endif
687 
688 /**
689  * Empty the pool, as if it were just created.
690  *
691  * \param pool: The pool to clear.
692  * \param totelem_reserve: Optionally reserve how many items should be kept from clearing.
693  */
BLI_mempool_clear_ex(BLI_mempool * pool,const int totelem_reserve)694 void BLI_mempool_clear_ex(BLI_mempool *pool, const int totelem_reserve)
695 {
696   BLI_mempool_chunk *mpchunk;
697   BLI_mempool_chunk *mpchunk_next;
698   uint maxchunks;
699 
700   BLI_mempool_chunk *chunks_temp;
701   BLI_freenode *last_tail = NULL;
702 
703 #ifdef WITH_MEM_VALGRIND
704   VALGRIND_DESTROY_MEMPOOL(pool);
705   VALGRIND_CREATE_MEMPOOL(pool, 0, false);
706 #endif
707 
708   if (totelem_reserve == -1) {
709     maxchunks = pool->maxchunks;
710   }
711   else {
712     maxchunks = mempool_maxchunks((uint)totelem_reserve, pool->pchunk);
713   }
714 
715   /* Free all after 'pool->maxchunks'. */
716   mpchunk = mempool_chunk_find(pool->chunks, maxchunks - 1);
717   if (mpchunk && mpchunk->next) {
718     /* terminate */
719     mpchunk_next = mpchunk->next;
720     mpchunk->next = NULL;
721     mpchunk = mpchunk_next;
722 
723     do {
724       mpchunk_next = mpchunk->next;
725       mempool_chunk_free(mpchunk);
726     } while ((mpchunk = mpchunk_next));
727   }
728 
729   /* re-initialize */
730   pool->free = NULL;
731   pool->totused = 0;
732 #ifdef USE_TOTALLOC
733   pool->totalloc = 0;
734 #endif
735 
736   chunks_temp = pool->chunks;
737   pool->chunks = NULL;
738   pool->chunk_tail = NULL;
739 
740   while ((mpchunk = chunks_temp)) {
741     chunks_temp = mpchunk->next;
742     last_tail = mempool_chunk_add(pool, mpchunk, last_tail);
743   }
744 }
745 
746 /**
747  * Wrap #BLI_mempool_clear_ex with no reserve set.
748  */
BLI_mempool_clear(BLI_mempool * pool)749 void BLI_mempool_clear(BLI_mempool *pool)
750 {
751   BLI_mempool_clear_ex(pool, -1);
752 }
753 
754 /**
755  * Free the mempool its self (and all elements).
756  */
BLI_mempool_destroy(BLI_mempool * pool)757 void BLI_mempool_destroy(BLI_mempool *pool)
758 {
759   mempool_chunk_free_all(pool->chunks);
760 
761 #ifdef WITH_MEM_VALGRIND
762   VALGRIND_DESTROY_MEMPOOL(pool);
763 #endif
764 
765   MEM_freeN(pool);
766 }
767 
768 #ifndef NDEBUG
BLI_mempool_set_memory_debug(void)769 void BLI_mempool_set_memory_debug(void)
770 {
771   mempool_debug_memset = true;
772 }
773 #endif
774