1 /*---------------------------------------------------------------------------
2 * Specialised Memory Allocators.
3 *
4 *---------------------------------------------------------------------------
5 * Memory buffers provide memory for functions which repeatedly allocate
6 * large chunks of temporary memory (restore_object() for example). Instead
7 * of deallocating the memory after its use, the memory buffer keeps it
8 * around for the next time. This way any interim fragmentation of the free
9 * large memory pool won't affect the next use.
10 *
11 * The driver implements a buffer each for dedicated purposes. Each of
12 * these buffers is identified by a number from membuffer_e.
13 *---------------------------------------------------------------------------
14 * Purpose of memory pools is to provide fast allocation methods for
15 * certain allocation patterns. They are most useful for scratchpad
16 * purposes where a largish number of small allocations need to be
17 * deallocated at once.
18 *
19 * Mempools: allocation of objects with identical time of death.
20 * Attempts to deallocate single objects have no effect.
21 *
22 * Lifopools: allocation/deallocation of objects follows (more than less)
23 * a lifo pattern.
24 *
25 * TODO: A small-block pool, to manage lots of small blocks of equal size
26 * TODO:: without the overhead of smalloc. Initialized with the block size,
27 * TODO:: the number of initial blocks, and the number of blocks each
28 * TODO:: the pool has to grow. These blocks can be freed and are then
29 * TODO:: kept in a freelist for later reuse. To help compacting the pool
30 * TODO:: a method alloc_before(void *) would allocate a free small block
31 * TODO:: before the given one - if there is no such free block, the
32 * TODO:: method would return NULL. With this method a user of the
33 * TODO:: can compact its data at the front of the pool blocks.
34 * TODO:: The last unused pool block is freed only if either another small
35 * TODO:: block is freed, or the method flush_free_pool_blocks() (or so)
36 * TODO:: is called.
37 *
38 * Memory pools can be made dependant from other pools. This means that
39 * whenever the 'superior' pool is reset or deleted, all pools depending
40 * on this superior one are also reset or deleted. This is a
41 * 1:n relationship: one pool can have several pools depending on it, but
42 * itself can be depending on only one superior pool.
43 *
44 * The implementation uses one 'mempool' structure to hold the organisational
45 * data of the mempool, and several 'memblock' structures providing the
46 * actual memory. Memory pool users only get the pointer to their pool (the
47 * internals of the mempool structure are private) and have to use it
48 * as a handle in all function calls. The memblock structures are used
49 * internally only.
50 *
51 * Every memory pool is created with an 'allocation size' as parameter.
52 * This size determines how much memory is available in every memblock.
53 * Since Mempools uses an immediate-fit strategy when allocating memory from
54 * a pool, it is the responsibility of the caller to chose an block allocation
55 * size substantially larger than a typical allocation from the pool.
56 * It is possible to allocate memory larger than the allocation size from
57 * the pool, in which case the the mempool behaves like a malloc() with
58 * (semi)automatic free().
59 *
60 * The size_xxxpool() utility functions calculate a somewhat optimum
61 * pool allocation size based on the element size. The function tries to
62 * keep the allocation size under a certain limit in order to avoid running
63 * into large block fragmentation (because if that happens, we would be
64 * better off not using a memory pool at all).
65 *
66 * The memory allocated from a mempool is aligned to the size of union align
67 * (which is assumed to be a power of 2).
68 *---------------------------------------------------------------------------
69 */
70
71 #include "driver.h"
72
73 #include <assert.h>
74 #include <stddef.h>
75 #include <stdlib.h>
76 #include <sys/types.h>
77
78 #include "mempools.h"
79 #include "gcollect.h"
80 #ifdef DEBUG
81 #include "simulate.h"
82 #endif
83 #include "strfuns.h"
84 #include "svalue.h"
85 #include "xalloc.h"
86
87 #include "../mudlib/sys/debug_info.h"
88
89 /*=========================================================================*/
90 /* M E M B U F F E R S */
91
92 /*-------------------------------------------------------------------------*/
93
94 /* -- struct membuffer_s: one memory buffer -- */
95
96 typedef struct membuffer_s {
97 void * mem; /* The allocated memory */
98 size_t size; /* Size of the allocated memory */
99 } membuffer_t;
100
101 static membuffer_t membuffers[mbMax];
102 /* The memory buffers.
103 */
104
105 /*-------------------------------------------------------------------------*/
106 void
mb_init(void)107 mb_init (void)
108
109 /* Initialize the memory buffers.
110 */
111
112 {
113 int i;
114
115 for (i = 0; i < mbMax; i++)
116 {
117 membuffers[i].mem = NULL;
118 membuffers[i].size = 0;
119 }
120 } /* mb_init() */
121
122 /*-------------------------------------------------------------------------*/
123 void
mb_release(void)124 mb_release (void)
125
126 /* Free all memory buffers.
127 */
128
129 {
130 int i;
131
132 for (i = 0; i < mbMax; i++)
133 {
134 if (membuffers[i].mem != NULL)
135 xfree(membuffers[i].mem);
136 membuffers[i].mem = NULL;
137 membuffers[i].size = 0;
138 }
139 } /* mb_release() */
140
141 /*-------------------------------------------------------------------------*/
142 void *
mb_alloc(membuffer_e buf,size_t size)143 mb_alloc (membuffer_e buf, size_t size)
144
145 /* Allocate 'size' bytes of memory for buffer <buf>.
146 * Returns NULL when out of memory.
147 */
148
149 {
150 #ifdef DEBUG
151 if (buf < 0 || buf >= mbMax)
152 fatal("mb_alloc: Illegal buf# %d\n", buf);
153 #endif
154
155 if (membuffers[buf].size >= size)
156 return membuffers[buf].mem;
157
158 if (membuffers[buf].mem != NULL)
159 xfree(membuffers[buf].mem);
160
161 membuffers[buf].mem = xalloc(size);
162
163 if (membuffers[buf].mem != NULL)
164 membuffers[buf].size = size;
165 else
166 membuffers[buf].size = 0;
167
168 return membuffers[buf].mem;
169 } /* mb_alloc() */
170
171 /*-------------------------------------------------------------------------*/
172 void *
mb_realloc(membuffer_e buf,size_t size)173 mb_realloc (membuffer_e buf, size_t size)
174
175 /* Realloate the memory of buffer <buf> to hold <size> bytes, without
176 * losing the current content, and return the new pointer.
177 * Returns NULL when out of memory (the old memory block will be unaffected
178 * then).
179 */
180
181 {
182 void * mem;
183
184 #ifdef DEBUG
185 if (buf < 0 || buf >= mbMax)
186 fatal("mb_alloc: Illegal buf# %d\n", buf);
187 #endif
188
189 if (membuffers[buf].size >= size)
190 return membuffers[buf].mem;
191
192 if (membuffers[buf].mem != NULL)
193 mem = rexalloc(membuffers[buf].mem, size);
194 else
195 mem = xalloc(size);
196
197 if (mem != NULL)
198 {
199 membuffers[buf].mem = mem;
200 membuffers[buf].size = size;
201 }
202
203 return mem;
204 } /* mb_realloc() */
205
206 /*-------------------------------------------------------------------------*/
207 #ifdef GC_SUPPORT
208
209 void
mb_clear_refs(void)210 mb_clear_refs (void)
211
212 /* GC Support: Clear the refs of all memory associated with the
213 * memory buffers.
214 */
215
216 {
217 int i;
218
219 for (i = 0; i < mbMax; i++)
220 {
221 if (membuffers[i].mem != NULL)
222 clear_memory_reference(membuffers[i].mem);
223 }
224 } /* mb_clear_refs() */
225
226 void
mb_note_refs(void)227 mb_note_refs (void)
228
229 /* GC Support: Note the refs of all memory associated with the
230 * memory buffers.
231 */
232
233 {
234 int i;
235
236 for (i = 0; i < mbMax; i++)
237 {
238 if (membuffers[i].mem != NULL)
239 note_malloced_block_ref(membuffers[i].mem);
240 }
241 } /* mb_note_refs() */
242
243 #endif /* GC_SUPPORT */
244
245 /*-------------------------------------------------------------------------*/
246 size_t
mb_status(strbuf_t * sbuf,Bool verbose)247 mb_status (strbuf_t * sbuf, Bool verbose)
248
249 /* Gather (and optionally print) the statistics from the membuffers.
250 * Return the amount of memory used.
251 */
252
253 {
254 size_t res;
255 int i;
256
257 for (i = 0, res = 0; i < mbMax; i++)
258 res += membuffers[i].size;
259
260 #if defined(__MWERKS__) && !defined(WARN_ALL)
261 # pragma warn_largeargs off
262 #endif
263
264 /* In verbose mode, print the statistics */
265 if (verbose)
266 {
267 strbuf_add(sbuf, "\nMemory Buffers:\n");
268 strbuf_add(sbuf, "---------------\n");
269 strbuf_addf(sbuf, "File data: %8zu\n", membuffers[mbFile].size);
270 strbuf_addf(sbuf, "Swap buffer: %8zu\n", membuffers[mbSwap].size);
271 }
272 else
273 {
274 strbuf_addf(sbuf, "Memory buffers:\t\t\t\t %9zu\n", res);
275 }
276
277 return res;
278
279 #if defined(__MWERKS__)
280 # pragma warn_largeargs reset
281 #endif
282
283 } /* mb_status() */
284
285 /*-------------------------------------------------------------------------*/
286 void
mb_dinfo_status(svalue_t * svp,int value)287 mb_dinfo_status (svalue_t *svp, int value)
288
289 /* Return the rxcache information for debug_info(DINFO_DATA, DID_STATUS).
290 * <svp> points to the svalue block for the result, this function fills in
291 * the spots for the object table.
292 * If <value> is -1, <svp> points indeed to a value block; other it is
293 * the index of the desired value and <svp> points to a single svalue.
294 */
295
296 {
297 #define ST_NUMBER(which,code) \
298 if (value == -1) svp[which].u.number = code; \
299 else if (value == which) svp->u.number = code
300
301 ST_NUMBER(DID_ST_MB_FILE, membuffers[mbFile].size);
302 ST_NUMBER(DID_ST_MB_SWAP, membuffers[mbSwap].size);
303
304 #undef ST_NUMBER
305 } /* mb_dinfo_status() */
306
307 /*=========================================================================*/
308 /* M E M P O O L S */
309
310 /*-------------------------------------------------------------------------*/
311
312 /* --- union align: aligment structure---
313 * Combines several basic basic types in order to determined
314 * the basic alignment size. This size must be a power of 2, assert()s will
315 * check for that.
316 */
317
318 union align {
319 int i;
320 long l;
321 double d;
322 void *p;
323 };
324
325 typedef union align align_t;
326
327 /* Round the value x up to the next integral of sizeof(union align)
328 */
329
330 #define ROUND(x) (((x)+sizeof(align_t)-1) & ~(sizeof(align_t)-1))
331
332 /* --- struct memblock_s: manages one block of memory ---
333 * Used for the low-level allocation of memory, the various allocators
334 * then work within the allocated arena.
335 *
336 * Mempools fill the arena from the end. pMark marks the break between
337 * the (low) unused and (high) used memory, pointing to the first used
338 * byte.
339 *
340 * Lifopools are like Mempools, with the addition that every used
341 * block is prepended by a lifo_t structure (allocated to a multiple
342 * of the align_t type). pMark points therefore at this structure of the
343 * first used block. If a lifo-allocation is freed, it is just marked
344 * as free (negative length) unless it is the block pMark points to.
345 * In that case, pMark is moved up until it points to a used block,
346 * retrieving previously freed blocks as well. When a memblock is
347 * totally free, the pool will put it into a freelist for later re-use.
348 *
349 * The union construct makes sure that data[] is aligned properly.
350 */
351
352 typedef struct memblock_s * Memblock;
353
354 struct memblock_s {
355 Memblock pNext; /* Next memblock in list */
356 char *pMark; /* End of unused arena */
357 size_t length; /* Total size of this memblock */
358 union {
359 align_t a;
360 char data[1]; /* Placeholder for data arena */
361 } u;
362 };
363
364 #define MEMBLOCK_LIMIT (128)
365 /* Maximum size for the userspace of a memory block.
366 * Using blocks larger than this is likely to run into fragmentation
367 * of the large block heap.
368 */
369
370 /* --- struct lifo_s: headblock for a lifo-type allocation ---
371 *
372 * One of these structures, allocated to a multiple of align_t, is
373 * prepended to every lifopool allocation. Additionally, the end
374 * of every memory block is marked with a 'used' structure as sentinel.
375 */
376
377 typedef struct lifo_s lifo_t;
378
379 struct lifo_s {
380 ssize_t length; /* Size of this block, including this structure.
381 * Positive for allocated blocks, negative for free ones.
382 */
383 Memblock pBlock; /* Backpointer to the memoryblock holding this block */
384 };
385
386 #define SIZEOF_LIFO_T ROUND(sizeof(lifo_t))
387
388 /*=========================================================================*/
389
390 /*-------------------------------------------------------------------------*/
391 /* Memory pool types
392 */
393
394 enum pooltype_u {
395 MEMPOOL = 0,
396 LIFOPOOL
397 };
398
399 typedef enum pooltype_u pooltype_t;
400
401 /*-------------------------------------------------------------------------*/
402 /* struct mempool_s: the organisational structure of every memory pool.
403 */
404
405 struct mempool_s {
406 pooltype_t type; /* The type of this pool */
407 size_t iAllocSize; /* Typical alloc size of a memory block */
408 Memblock pBlocks; /* List of memory blocks
409 * It is guaranteed that at least one memory block
410 * exists. */
411 Memblock pFree; /* Lifopools: List of unused memory blocks */
412 Mempool pSuper; /* The pool this one depends on */
413 Mempool pSubPools; /* List of depending pools */
414 Mempool pNextSub; /* Next pool in the dependee list */
415 };
416
417 /*-------------------------------------------------------------------------*/
418 static INLINE size_t
size_pool(size_t elemsize,size_t o_size)419 size_pool (size_t elemsize, size_t o_size)
420
421 /* Return the userspace size for a memblock suitable to hold objects of
422 * size <elemsize>, taking into account an per-element overhead of <o_size>
423 * and the maximum memblock size.
424 * The result can be passed as 'size' parameter to new_pool() functions.
425 */
426
427 {
428 size_t esize = (ROUND(elemsize) + o_size);
429 unsigned int num;
430
431 num = MEMBLOCK_LIMIT / esize;
432 if (num < 1)
433 num = 1;
434 return num * esize;
435 } /* size_pool() */
436
437 /*-------------------------------------------------------------------------*/
438 size_t
size_mempool(size_t elemsize)439 size_mempool (size_t elemsize)
440
441 /* Return the userspace size for a mempool suitable to hold objects of
442 * size <elemsize>, taking into account an per-element overhead of <o_size>
443 * and the maximum memblock size.
444 * The result can be passed as 'size' parameter to new_mempool().
445 */
446
447 {
448 return size_pool(elemsize, 0);
449 } /* size_mempool() */
450
451 /*-------------------------------------------------------------------------*/
452 size_t
size_lifopool(size_t elemsize)453 size_lifopool (size_t elemsize)
454
455 /* Return the userspace size for a lifopool suitable to hold objects of
456 * size <elemsize>, taking into account an per-element overhead of <o_size>
457 * and the maximum memblock size.
458 * The result can be passed as 'size' parameter to new_lifopool().
459 */
460
461 {
462 return size_pool(elemsize, SIZEOF_LIFO_T);
463 } /* size_lifopool() */
464
465 /*-------------------------------------------------------------------------*/
466 static INLINE Mempool
new_pool(size_t iSize,pooltype_t type)467 new_pool (size_t iSize, pooltype_t type)
468
469 /* Create a new memory pool of <type> for a typical allocation size of <iSize>
470 * bytes per memory block and prepare
471 * Result is the pointer to the mempool structure, or NULL if an error
472 * occurs.
473 */
474
475 {
476 Mempool pPool;
477 struct memblock_s * pBlock;
478
479 assert(iSize > 0);
480 assert(sizeof(union align) == (sizeof(union align) & ~(sizeof(union align)-1)));
481 /* If this assert fails, sizeof(union align) is not a power of 2 */
482
483 /* Round iSize up to the next integral of sizeof(union align) */
484 iSize = ROUND(iSize);
485
486 pPool = xalloc(sizeof(*pPool));
487 if (!pPool)
488 return NULL;
489 /* There must be at least one memblock in the list! */
490
491 pBlock = xalloc(sizeof(*pBlock) - sizeof(pBlock->u) + iSize);
492 if (!pBlock)
493 {
494 xfree(pPool);
495 return NULL;
496 }
497 pBlock->pNext = NULL;
498 pBlock->length = sizeof(*pBlock) - sizeof(pBlock->u) + iSize;
499 pBlock->pMark = pBlock->u.data + iSize;
500
501 /* Setup the pool */
502 pPool->type = type;
503 pPool->iAllocSize = iSize;
504 pPool->pBlocks = pBlock;
505 pPool->pFree = NULL;
506 pPool->pSuper = NULL;
507 pPool->pSubPools = NULL;
508
509 return pPool;
510 } /* new_pool() */
511
512 /*-------------------------------------------------------------------------*/
513 Mempool
new_mempool(size_t iSize)514 new_mempool (size_t iSize)
515
516 /* Create a new Mempool for a typical allocation size of <iSize>
517 * bytes per memory block and prepare
518 * Result is the pointer to the mempool structure, or NULL if an error
519 * occurs.
520 */
521
522 {
523 return new_pool(iSize, MEMPOOL);
524 } /* new_mempool() */
525
526 /*-------------------------------------------------------------------------*/
527 Mempool
new_lifopool(size_t iSize)528 new_lifopool (size_t iSize)
529
530 /* Create a new Lifopool for a typical allocation size of <iSize>
531 * bytes per memory block and prepare
532 * Result is the pointer to the mempool structure, or NULL if an error
533 * occurs.
534 */
535
536 {
537 Mempool pPool;
538
539 iSize += SIZEOF_LIFO_T; /* Include space for the sentinel block */
540
541 pPool = new_pool(iSize, LIFOPOOL);
542 if (pPool)
543 {
544 /* Add a sentinel (pseudo-used block) at the end of the arena.
545 */
546 struct memblock_s * pBlock = pPool->pBlocks;
547 lifo_t *p = (lifo_t *)(pBlock->pMark - SIZEOF_LIFO_T);
548
549 p->length = 1;
550 p->pBlock = pBlock;
551
552 /* Update the pMark pointer */
553 pBlock->pMark = (char *)p;
554 }
555 return pPool;
556 } /* new_lifopool() */
557
558 /*-------------------------------------------------------------------------*/
559 void
mempool_depend_on(Mempool pSub,Mempool pSuper)560 mempool_depend_on (Mempool pSub, Mempool pSuper)
561
562 /* Memory pool pSub is made a dependee of pool pSuper.
563 * If pSub is a dependee of some pool already or if the pooltypes differ,
564 * an assertion is raised.
565 */
566
567 {
568 assert(pSub->pSuper == NULL);
569 assert(pSub->type == pSuper->type);
570
571 pSub->pSuper = pSuper;
572 pSub->pNextSub = pSuper->pSubPools;
573 pSuper->pSubPools = pSub;
574 } /* mempool_depend_on() */
575
576 /*-------------------------------------------------------------------------*/
577 static INLINE void *
alloc_from_pool(Mempool pPool,size_t iSize)578 alloc_from_pool (Mempool pPool, size_t iSize)
579
580 /* Allocate <iSize> bytes of memory from the mempool <pPool>.
581 * Return a pointer to the allocated memory (it is at least aligned to
582 * the size of a ALIGNTYPE), or NULL on failure.
583 *
584 * Within the memblocks, the memory is allocated from the end of
585 * the allocated memory.
586 */
587
588 {
589 Memblock pBlock;
590
591 /* Round iSize up to the next integral of sizeof(ALIGNTYPE) */
592 iSize = ROUND(iSize);
593
594 /* If it is a big block, allocate it directly and insert
595 * it directly _after_ the current 'normal sized' memblock.
596 */
597
598 if (iSize >= pPool->iAllocSize)
599 {
600 assert(pPool->pBlocks != NULL); /* just in case */
601
602 pBlock = xalloc(sizeof(*pBlock)-sizeof(pBlock->u)+iSize);
603 if (pBlock == NULL)
604 return NULL;
605 pBlock->length = sizeof(*pBlock)-sizeof(pBlock->u)+iSize;
606 pBlock->pMark = pBlock->u.data;
607 pBlock->pNext = pPool->pBlocks->pNext;
608 pPool->pBlocks->pNext = pBlock;
609 return (void *)(pBlock->u.data);
610 }
611
612 /* Normal iSizes are always allocated from the first memblock
613 * in the pBlock list. If the current memblock has not enough
614 * memory left, a new one is allocated.
615 */
616
617 pBlock = pPool->pBlocks;
618 if ((ptrdiff_t)iSize > pBlock->pMark - pBlock->u.data)
619 {
620 pBlock = xalloc( sizeof(*pBlock)-sizeof(pBlock->u)
621 + pPool->iAllocSize);
622 if (pBlock == NULL)
623 return NULL;
624 pBlock->length = sizeof(*pBlock)-sizeof(pBlock->u)
625 + pPool->iAllocSize;
626 pBlock->pMark = pBlock->u.data + pPool->iAllocSize;
627 pBlock->pNext = pPool->pBlocks;
628 pPool->pBlocks = pBlock;
629 }
630
631 /* pBlock now points to a memblock with enough memory left.
632 * Allocate the desired chunk from the end of the .data[] array.
633 */
634
635 pBlock->pMark -= iSize;
636 return (void *)(pBlock->pMark);
637
638 } /* alloc_from_pool() */
639
640 /*-------------------------------------------------------------------------*/
641 static INLINE void *
alloc_from_lifo(Mempool pPool,size_t iSize)642 alloc_from_lifo (Mempool pPool, size_t iSize)
643
644 /* Allocate <iSize> bytes of memory from the lifopool <pPool>.
645 * Return a pointer to the allocated memory (it is at least aligned to
646 * the size of a ALIGNTYPE), or NULL on failure.
647 *
648 * Within the memblocks, the memory is allocated from the end of
649 * the allocated memory.
650 */
651
652 {
653 Memblock pBlock;
654 lifo_t *pLifo;
655
656 /* Round iSize up to the next integral of sizeof(ALIGNTYPE) */
657 iSize = ROUND(iSize + SIZEOF_LIFO_T);
658 /* If it is a big block, allocate it directly and insert
659 * it directly _after_ the current 'normal sized' memblock.
660 */
661
662 if (iSize >= pPool->iAllocSize)
663 {
664 assert(pPool->pBlocks != NULL); /* just in case */
665
666 pBlock = xalloc(sizeof(*pBlock)-sizeof(pBlock->u)
667 +iSize+SIZEOF_LIFO_T);
668 if (pBlock == NULL)
669 return NULL;
670 pBlock->length = sizeof(*pBlock)-sizeof(pBlock->u)
671 +iSize+SIZEOF_LIFO_T;
672 pBlock->pMark = pBlock->u.data;
673 pBlock->pNext = pPool->pBlocks->pNext;
674 pPool->pBlocks->pNext = pBlock;
675
676 /* Write the lifo_t for the allocated block */
677 pLifo = (lifo_t *)pBlock->pMark;
678 pLifo->length = (ssize_t)iSize;
679 pLifo->pBlock = pBlock;
680
681 /* Write the sentinel */
682 pLifo = (lifo_t *)(pBlock->pMark+iSize);
683 pLifo->length = 1;
684 pLifo->pBlock = pBlock;
685
686 /* Return the address */
687 return (void *)(pBlock->u.data+SIZEOF_LIFO_T);
688 }
689
690 /* Normal iSizes are always allocated from the first memblock
691 * in the pBlock list. If the current memblock has not enough
692 * memory left, a new one is allocated.
693 */
694 pBlock = pPool->pBlocks;
695 if ((ptrdiff_t)iSize > pBlock->pMark - pBlock->u.data)
696 {
697 /* If there are blocks in the freelist, use those first;
698 * otherwise allocate a new one.
699 */
700 if (pPool->pFree)
701 {
702 pBlock = pPool->pFree;
703 pPool->pFree = pBlock->pNext;
704 }
705 else
706 {
707 pBlock = xalloc( sizeof(*pBlock)-sizeof(pBlock->u)
708 + pPool->iAllocSize);
709 if (pBlock == NULL)
710 return NULL;
711 pBlock->length = sizeof(*pBlock)-sizeof(pBlock->u)
712 + pPool->iAllocSize;
713 pBlock->pMark = pBlock->u.data + pPool->iAllocSize;
714
715 /* For lifopools, add a sentinel (pseudo-used block) at the end
716 * of the arena.
717 */
718 pLifo = (lifo_t *)(pBlock->pMark-SIZEOF_LIFO_T);
719
720 pLifo->length = 1;
721 pLifo->pBlock = pBlock;
722
723 /* Update the pMark pointer */
724 pBlock->pMark = (char *)pLifo;
725 }
726
727 /* Link the block into the list of used blocks */
728 pBlock->pNext = pPool->pBlocks;
729 pPool->pBlocks = pBlock;
730 }
731
732 /* pBlock now points to a memblock with enough memory left.
733 * Allocate the desired chunk from the end of the .data[] array.
734 */
735 pBlock->pMark -= iSize;
736
737 /* Put in the lifo_t structure and
738 * return the address after the structure.
739 */
740 pLifo = (lifo_t *)pBlock->pMark;
741
742 pLifo->length = (ssize_t)iSize;
743 pLifo->pBlock = pBlock;
744
745 return (void *)(pBlock->pMark + SIZEOF_LIFO_T);
746
747 } /* alloc_from_lifo() */
748
749 /*-------------------------------------------------------------------------*/
750 void *
mempool_alloc(Mempool pPool,size_t iSize)751 mempool_alloc (Mempool pPool, size_t iSize)
752
753 /* Allocate <iSize> bytes of memory from the pool <pPool>.
754 * Return a pointer to the allocated memory (it is at least aligned to
755 * the size of a ALIGNTYPE), or NULL on failure.
756 *
757 * Within the memblocks, the memory is allocated from the end of
758 * the allocated memory.
759 */
760
761 {
762 assert(pPool != NULL);
763 assert(iSize < LONG_MAX);
764
765 if (pPool->type == LIFOPOOL)
766 return alloc_from_lifo(pPool, iSize);
767
768 return alloc_from_pool(pPool, iSize);
769 } /* mempool_alloc() */
770
771 /*-------------------------------------------------------------------------*/
772 void
mempool_free(Mempool pPool,void * adr)773 mempool_free (Mempool pPool, void * adr)
774
775 /* Return the block allocated at <adr> to the pool <pPool>.
776 * This is a noop for mempools, but (lazily) returns memory to a lifopool.
777 */
778
779 {
780 Memblock pBlock;
781 lifo_t * pLifo;
782 ssize_t length;
783
784 assert(pPool != NULL);
785 assert(adr != NULL);
786
787 if (LIFOPOOL != pPool->type)
788 return;
789
790 /* Get the lifo_t structure and its data */
791 pLifo = (lifo_t *)((char *)adr - SIZEOF_LIFO_T);
792 assert(pLifo->length > 1);
793 pBlock = pLifo->pBlock;
794 length = pLifo->length;
795
796 /* Mark the block as unused */
797 pLifo->length = -length;
798
799 /* If this newly freed block happens to be the first free block in the
800 * memblock, return it and all following free blocks to the free
801 * arena of the memblock.
802 */
803 if ((char *)pLifo == pBlock->pMark)
804 {
805 /* Loop invariant: pMark == pLifo */
806 while (pLifo->length < 0)
807 {
808 pBlock->pMark = pBlock->pMark - pLifo->length;
809 pLifo = (lifo_t *)pBlock->pMark;
810 }
811 }
812
813 /* If the leading memblock(s) of the pool are completely free,
814 * move them over into the free list.
815 */
816 if (pBlock == pPool->pBlocks)
817 {
818 while (pBlock->pNext != NULL
819 && pBlock->pMark - (char*)&(pBlock->u)
820 >= (ptrdiff_t)(pPool->iAllocSize - SIZEOF_LIFO_T))
821 {
822 pPool->pBlocks = pBlock->pNext;
823 pBlock->pNext = pPool->pFree;
824 pPool->pFree = pBlock;
825 pBlock = pPool->pBlocks;
826 }
827 }
828
829 /* That's it */
830 } /* mempool_free() */
831
832 /*-------------------------------------------------------------------------*/
833 void
mempool_reset(Mempool pPool)834 mempool_reset (Mempool pPool)
835
836 /* Free all memory allocated from the pool <pPool>, but leave the pool
837 * around for further usage. If the pool has dependees, they are reset
838 * recursively.
839 */
840
841 {
842 Memblock pBlock;
843
844 assert(pPool != NULL);
845 assert(pPool->pBlocks != NULL); /* just in case */
846
847
848 /* Deallocate all memblocks but the first one */
849
850 pBlock = pPool->pBlocks->pNext;
851 while (pBlock != NULL)
852 {
853 Memblock pThis = pBlock;
854
855 pBlock = pBlock->pNext;
856 xfree(pThis);
857 }
858
859 pBlock = pPool->pFree;
860 while (pBlock != NULL)
861 {
862 Memblock pThis = pBlock;
863
864 pBlock = pBlock->pNext;
865 xfree(pThis);
866 }
867 pPool->pFree = NULL;
868
869 /* Reinitialise the first (and now only) memblock */
870
871 pBlock = pPool->pBlocks;
872 pBlock->pMark = pBlock->u.data + pPool->iAllocSize
873 - (LIFOPOOL == pPool->type ? SIZEOF_LIFO_T : 0);
874 pBlock->pNext = NULL;
875
876 /* Reset all depending pools */
877
878 for (pPool = pPool->pSubPools; pPool != NULL; pPool = pPool->pNextSub)
879 {
880 mempool_reset(pPool);
881 }
882 } /* mempool_reset() */
883
884 /*-------------------------------------------------------------------------*/
885 void
mempool_delete(Mempool pPool)886 mempool_delete (Mempool pPool)
887
888 /* Delete the pool <pPool>, all memory allocated from it, and all
889 * depending pools. <pPool> is invalid after the function returns.
890 */
891
892 {
893 Memblock pBlock;
894 Mempool pSubPool;
895
896 assert(pPool != NULL);
897
898 /* Free all memblocks */
899
900 pBlock = pPool->pBlocks;
901 while (pBlock != NULL)
902 {
903 Memblock pThis = pBlock;
904
905 pBlock = pBlock->pNext;
906 xfree(pThis);
907 }
908
909 pBlock = pPool->pFree;
910 while (pBlock != NULL)
911 {
912 Memblock pThis = pBlock;
913
914 pBlock = pBlock->pNext;
915 xfree(pThis);
916 }
917
918 /* If this pool is a dependee, delete it from the list */
919
920 if (pPool->pSuper != NULL)
921 {
922 Mempool pPrev, pNext;
923
924 pPrev = pPool->pSuper;
925 pNext = pPrev->pSubPools;
926
927 if (pPrev->pSubPools == pPool)
928 {
929 pPrev->pSubPools = pPool->pNextSub;
930 }
931 else
932 {
933 for (; pNext != NULL && pNext != pPool
934 ; pPrev = pNext, pNext = pNext->pNextSub
935 ) /* SKIP */;
936 if (pNext == pPool)
937 pPrev->pNextSub = pPool->pNextSub;
938 }
939 }
940
941 /* Delete all depending pools, but take care that those
942 * pools don't start to scan our subpool list.
943 */
944
945 for (pSubPool = pPool->pSubPools; pSubPool != NULL; )
946 {
947 Mempool pThis = pSubPool;
948 pSubPool = pSubPool->pNextSub;
949 pThis->pSuper = NULL; /* ! */
950 mempool_delete(pThis);
951 }
952
953 /* Finally, deallocate this pool */
954
955 xfree(pPool);
956 } /* mempool_delete() */
957
958 /*-------------------------------------------------------------------------*/
959 size_t
mempool_size(Mempool pPool)960 mempool_size (Mempool pPool)
961
962 /* Return the total size of the mempool <pPool>.
963 */
964
965 {
966 Memblock pBlock;
967 size_t size;
968
969 size = sizeof(*pPool);
970
971 for (pBlock = pPool->pBlocks; pBlock; pBlock = pBlock->pNext)
972 size += pBlock->length;
973 for (pBlock = pPool->pFree; pBlock; pBlock = pBlock->pNext)
974 size += pBlock->length;
975
976 return size;
977
978 } /* mempool_size() */
979
980 /*-------------------------------------------------------------------------*/
981 #ifdef GC_SUPPORT
982
983 void
mempool_clear_refs(Mempool pPool)984 mempool_clear_refs (Mempool pPool)
985
986 /* GC Support: Clear the refs of all memory associated with <pPool> and
987 * its dependees.
988 */
989
990 {
991 Memblock pBlock;
992
993 clear_memory_reference(pPool);
994
995 for (pBlock = pPool->pBlocks; pBlock; pBlock = pBlock->pNext)
996 clear_memory_reference(pBlock);
997 for (pBlock = pPool->pFree; pBlock; pBlock = pBlock->pNext)
998 clear_memory_reference(pBlock);
999
1000 for (pPool = pPool->pSubPools; pPool != NULL; pPool = pPool->pNextSub)
1001 mempool_clear_refs(pPool);
1002
1003 } /* mempool_clear_refs() */
1004
1005 void
mempool_note_refs(Mempool pPool)1006 mempool_note_refs (Mempool pPool)
1007
1008 /* GC Support: Note the refs of all memory associated with <pPool> and
1009 * its dependees.
1010 */
1011
1012 {
1013 Memblock pBlock;
1014
1015 note_malloced_block_ref(pPool);
1016
1017 for (pBlock = pPool->pBlocks; pBlock; pBlock = pBlock->pNext)
1018 note_malloced_block_ref(pBlock);
1019 for (pBlock = pPool->pFree; pBlock; pBlock = pBlock->pNext)
1020 note_malloced_block_ref(pBlock);
1021
1022 for (pPool = pPool->pSubPools; pPool != NULL; pPool = pPool->pNextSub)
1023 mempool_note_refs(pPool);
1024
1025 } /* mempool_note_refs() */
1026
1027 #endif /* GC_SUPPORT */
1028
1029 /*-------------------------------------------------------------------------*/
1030 /*=========================================================================*/
1031
1032 /***************************************************************************/
1033
1034