1 /* $Id: ncbi_heapmgr.c 604960 2020-04-05 16:23:08Z lavr $
2 * ===========================================================================
3 *
4 * PUBLIC DOMAIN NOTICE
5 * National Center for Biotechnology Information
6 *
7 * This software/database is a "United States Government Work" under the
8 * terms of the United States Copyright Act. It was written as part of
9 * the author's official duties as a United States Government employee and
10 * thus cannot be copyrighted. This software/database is freely available
11 * to the public for use. The National Library of Medicine and the U.S.
12 * Government have not placed any restriction on its use or reproduction.
13 *
14 * Although all reasonable efforts have been taken to ensure the accuracy
15 * and reliability of the software and data, the NLM and the U.S.
16 * Government do not and cannot warrant the performance or results that
17 * may be obtained by using this software or data. The NLM and the U.S.
18 * Government disclaim all warranties, express or implied, including
19 * warranties of performance, merchantability or fitness for any particular
20 * purpose.
21 *
22 * Please cite the author in any work or product based on this material.
23 *
24 * ===========================================================================
25 *
26 * Author: Anton Lavrentiev
27 *
28 * Abstract:
29 *
30 * This is a simple heap manager with a primitive garbage collection.
31 *
32 * The heap contains blocks of data, stored in a common contiguous pool, each
33 * block preceded with a SHEAP_Block structure. The value of 'flag' is odd
34 * (LSB is set), when the block is in use, or even (LSB is 0), when the block
35 * is vacant. 'Size' shows the length of the block in bytes, (uninterpreted)
36 * data field of which is extended past the header (the header size IS counted
37 * in the size of the block).
38 *
39 * When 'HEAP_Alloc' is called, the return value is either a heap pointer,
40 * which points to the block header, marked as allocated and guaranteed
41 * to have enough space to hold the requested data size; or 0 meaning, that the
42 * heap has no more room to provide such a block (reasons for that:
43 * heap is corrupt, heap has no provision to be expanded, expansion failed,
44 * or the heap was attached read-only).
45 *
46 * An application program can then use the data field on its need,
47 * providing not to overcome the size limit. The current block header
48 * can be used to find the next heap block with the use of 'size' member
49 * (note, however, some restrictions below).
50 *
51 * The application program is NOT assumed to keep the returned block pointer,
52 * as the garbage collection can occur on the next allocation attempt,
53 * thus making any heap pointers invalid. Instead, the application program
54 * can keep track of the heap base (header of the very first heap block -
55 * see 'HEAP_Create'), and the size of the heap, and can traverse the heap by
56 * this means, or with call to 'HEAP_Walk' (described below).
57 *
58 * While traversing, if the block found is no longer needed, it can be freed
59 * with 'HEAP_Free' call, supplying the address of the block header
60 * as an argument.
61 *
62 * Prior to the heap use, the initialization is required, which comprises
63 * call to either 'HEAP_Create' or 'HEAP_Attach' with the information about
64 * the base heap pointer. 'HEAP_Create' also takes the size of initial
65 * heap area (if there is one), and size of chunk (usually, a page size)
66 * to be used in heap expansions (defaults to alignment if provided as 0).
67 * Additionally (but not compulsory) the application program can provide
68 * heap manager with 'resize' routine, which is supposed to be called,
69 * when no more room is available in the heap, or the heap has not been
70 * preallocated (base = 0 in 'HEAP_Create'), and given the arguments:
71 * - current heap base address (or 0 if this is the very first heap alloc),
72 * - new required heap size (or 0 if this is the last call to deallocate
73 * the entire heap).
74 * If successful, the resize routine must return the new heap base
75 * address (if any) of expanded heap area, and where the exact copy of
76 * the current heap is made.
77 *
78 * Note that all heap base pointers must be aligned on a 'double' boundary.
79 * Please also be warned not to store pointers to the heap area, as a
80 * garbage collection can clobber them. Within a block, however,
81 * it is possible to use local pointers (offsets), which remain same
82 * regardless of garbage collections.
83 *
84 * For automatic traverse purposes there is a 'HEAP_Walk' call, which returns
85 * the next block (either free, or used) from the heap. Given a NULL-pointer,
86 * this function returns the very first block, whereas all subsequent calls
87 * with the argument being the last observed block, result in the next block
88 * returned. NULL comes back when no more blocks exist in the heap.
89 *
90 * Note that for proper heap operations, no allocation(s) should happen between
91 * successive calls to 'HEAP_Walk', whereas deallocation of the seen block
92 * is okay.
93 *
94 * Explicit heap traversing should not overcome the heap limit,
95 * as any information outside is not maintained by the heap manager.
96 * Every heap operation guarantees that there are no adjacent free blocks,
97 * only used blocks can follow each other sequentially.
98 *
99 * To discontinue to use the heap, 'HEAP_Destroy' or 'HEAP_Detach' can be
100 * called. The former deallocates the heap (by means of a call to 'resize'),
101 * the latter just removes the heap handle, retaining the heap data intact.
102 * Later, such a heap can be used again if attached with 'HEAP_Attach'.
103 *
104 * Note that an attached heap is always in read-only mode, that is nothing
105 * can be allocated and/or freed in that heap, as well as an attempt to call
106 * 'HEAP_Destroy' will not actually touch any heap data (but destroy the
107 * handle only).
108 *
109 * Note also, that 'HEAP_Create' always does heap reset, that is the
110 * memory area pointed to by 'base' (if not 0) gets reformatted and lose
111 * all previous contents.
112 *
113 */
114
115 #include "ncbi_priv.h"
116 #include <connect/ncbi_heapmgr.h>
117 #include <stdlib.h>
118 #include <string.h>
119
120 #define NCBI_USE_ERRCODE_X Connect_HeapMgr
121
122 #if defined(NCBI_OS_MSWIN) && defined(_WIN64)
123 /* Disable ptr->long conversion warning (even on explicit cast!) */
124 # pragma warning (disable : 4311)
125 #endif /*NCBI_OS_MSWIN && _WIN64*/
126
127 #ifdef abs
128 # undef abs
129 #endif
130 #define abs(a) ((a) < 0 ? (a) : -(a))
131
132 #ifdef NCBI_OS_LINUX
133 # if NCBI_PLATFORM_BITS == 64
134 # ifdef __GNUC__
135 # define HEAP_PACKED __attribute__((packed))
136 # elif defined(_CRAYC)
137 # define HEAP_PACKED /* */
138 # else
139 # error "Don't know how to pack on this 64-bit platform"
140 # endif
141 # else
142 # define HEAP_PACKED /* */
143 # endif
144 #else
145 # define HEAP_PACKED /* */
146 #endif
147
148
149 /* Heap's own block view */
150 typedef struct HEAP_PACKED {
151 SHEAP_Block head; /* Block head, a free block has the following: */
152 TNCBI_Size prevfree; /* Heap index for prev (smaller) free block */
153 TNCBI_Size nextfree; /* Heap index for next (bigger) free block */
154 } SHEAP_HeapBlock;
155
156
157 struct SHEAP_tag {
158 SHEAP_HeapBlock* base; /* Base of heap extent: !base == !size */
159 TNCBI_Size size; /* # blocks in the heap extent: !base == !size */
160 TNCBI_Size used; /* # of blocks used (size - used = free blocks)*/
161 TNCBI_Size free; /* Index of the largest free block (OOB=none) */
162 TNCBI_Size last; /* Index of the last block (RW heap, else OOB) */
163 TNCBI_Size chunk; /* Aligned (bytes); 0 when the heap is RO */
164 FHEAP_Resize resize; /* != NULL when resizeable (RW heap only) */
165 void* auxarg; /* Auxiliary argument to pass to "resize" */
166 unsigned int refcnt; /* Reference count (for heap copy, 0=original) */
167 int serial; /* Serial number as assigned by (Attach|Copy) */
168 };
169
170
171 static int/*bool*/ s_HEAP_fast = 1/*true*/;
172
173
174 #define _HEAP_ALIGN_EX(a, b) ((((unsigned long)(a) + ((b) - 1)) / (b)) * (b))
175 #define _HEAP_ALIGN_2(a, b) (( (unsigned long)(a) + ((b) - 1)) \
176 & (unsigned long)(~((b) - 1)))
177 #define _HEAP_SIZESHIFT 4
178 #define HEAP_BLOCKS(s) ((s) >> _HEAP_SIZESHIFT)
179 #define HEAP_EXTENT(b) ((b) << _HEAP_SIZESHIFT)
180 #define HEAP_ALIGN(a) _HEAP_ALIGN_2(a, HEAP_EXTENT(1))
181 #define HEAP_MASK (~(HEAP_EXTENT(1) - 1))
182 #define HEAP_PREV_BIT 8
183 #define HEAP_NEXT_BIT 4
184 #define HEAP_LAST 2
185 #define HEAP_USED 1
186 #define HEAP_SIZE(s) ((s) & (unsigned long) HEAP_MASK)
187 #define HEAP_FLAG(s) ((s) & (unsigned long)(~HEAP_MASK))
188 #define HEAP_NEXT(b) ((SHEAP_HeapBlock*) \
189 ((char*)(b) + (b)->head.size))
190 #define HEAP_PREV(b) ((SHEAP_HeapBlock*) \
191 ((char*)(b) - HEAP_SIZE((b)->head.flag)))
192 #define HEAP_INDEX(b, base) ((TNCBI_Size)((b) - (base)))
193 #define HEAP_ISLAST(b) ((b)->head.flag & HEAP_LAST)
194 #define HEAP_ISUSED(b) ((b)->head.flag & HEAP_USED)
195
196 #define HEAP_CHECK(heap) \
197 assert(!heap->base == !heap->size); \
198 assert(heap->used <= heap->size); \
199 assert(heap->free <= heap->size); \
200 assert(heap->last <= heap->size); \
201 assert(heap->used == heap->size || heap->free < heap->size)
202
203 #if ~HEAP_MASK != (HEAP_PREV_BIT | HEAP_NEXT_BIT | HEAP_LAST | HEAP_USED) \
204 || HEAP_BLOCKS(~HEAP_MASK) != 0
205 # error "HEAP_MASK is invalid!"
206 #endif
207
208
209 #if 0 /*FIXME*/
210 /* Performance / integrity improvements:
211 * 1. flag is to keep byte-size of the previous block (instead of the magic);
212 * 2. since sizes always have last nibble zero, use that in the flag field as
213 * the following:
214 * bit 0 -- set for used, unset for a free block;
215 * bit 1 -- set for the last block in the heap.
216 * 3. bits 2 & 3 can be used as a parity checks for each size to compensate
217 * for discontinued use of the magic value (at least for used blocks).
218 * With this scheme, block freeing will no longer need a lookup.
219 * Yet keeping current size clean will still allow fast forward moves.
220 */
221
222 #ifdef __GNUC__
223 inline
224 #endif /*__GNUC__*/
225 static int/*bool*/ x_Parity(unsigned int v)
226 {
227 #if 0
228 v ^= v >> 1;
229 v ^= v >> 2;
230 v = (v & 0x11111111U) * 0x11111111U;
231 return (v >> 28) & 1;
232 #else
233 v ^= v >> 16;
234 v ^= v >> 8;
235 v ^= v >> 4;
236 v &= 0xF;
237 return (0x6996 >> v) & 1;
238 #endif
239 }
240
241
242 #ifdef __GNUC__
243 inline
244 #endif /*__GNUC__*/
245 static unsigned int x_NextBit(TNCBI_Size size)
246 {
247 return x_Parity(size) ? HEAP_NEXT_BIT : 0;
248 }
249
250
251 #ifdef __GNUC__
252 inline
253 #endif /*__GNUC__*/
254 static unsigned int x_PrevBit(TNCBI_Size size)
255 {
256 return x_Parity(size) ? HEAP_PREV_BIT : 0;
257 }
258 #endif /*0*/
259
260
HEAP_Create(void * base,TNCBI_Size size,TNCBI_Size chunk,FHEAP_Resize resize,void * auxarg)261 extern HEAP HEAP_Create(void* base, TNCBI_Size size,
262 TNCBI_Size chunk, FHEAP_Resize resize, void* auxarg)
263 {
264 HEAP heap;
265
266 assert(HEAP_EXTENT(1) == sizeof(SHEAP_HeapBlock));
267 assert(_HEAP_ALIGN_EX(1, sizeof(SHEAP_Block)) ==
268 _HEAP_ALIGN_2 (1, sizeof(SHEAP_Block)));
269
270 if (!base != !size)
271 return 0;
272 if (size && size < HEAP_EXTENT(1)) {
273 CORE_LOGF_X(1, eLOG_Error,
274 ("Heap Create: Storage too small:"
275 " provided %u, required %u+",
276 size, HEAP_EXTENT(1)));
277 return 0;
278 }
279 if (!(heap = (HEAP) malloc(sizeof(*heap))))
280 return 0;
281 heap->base = (SHEAP_HeapBlock*) base;
282 heap->size = HEAP_BLOCKS(size);
283 heap->used = 0;
284 heap->free = 0;
285 heap->last = 0;
286 heap->chunk = chunk ? (TNCBI_Size) HEAP_ALIGN(chunk) : 0;
287 heap->resize = heap->chunk ? resize : 0;
288 heap->auxarg = heap->resize ? auxarg : 0;
289 heap->refcnt = 0/*original*/;
290 heap->serial = 0;
291 if (base) {
292 SHEAP_HeapBlock* b = heap->base;
293 /* Reformat the pre-allocated heap */
294 if (_HEAP_ALIGN_2(base, sizeof(SHEAP_Block)) != (unsigned long) base) {
295 CORE_LOGF_X(2, eLOG_Warning,
296 ("Heap Create: Unaligned base (0x%08lX)",
297 (long) base));
298 }
299 b->head.flag = HEAP_LAST;
300 b->head.size = (TNCBI_Size) HEAP_SIZE(size);
301 b->nextfree = 0;
302 b->prevfree = 0;
303 }
304 return heap;
305 }
306
307
HEAP_AttachFast(const void * base,TNCBI_Size size,int serial)308 extern HEAP HEAP_AttachFast(const void* base, TNCBI_Size size, int serial)
309 {
310 HEAP heap;
311
312 assert(HEAP_EXTENT(1) == sizeof(SHEAP_HeapBlock));
313 assert(_HEAP_ALIGN_EX(1, sizeof(SHEAP_Block)) ==
314 _HEAP_ALIGN_2 (1, sizeof(SHEAP_Block)));
315
316 if (!base != !size || !(heap = (HEAP) calloc(1, sizeof(*heap))))
317 return 0;
318 if (_HEAP_ALIGN_2(base, sizeof(SHEAP_Block)) != (unsigned long) base) {
319 CORE_LOGF_X(3, eLOG_Warning,
320 ("Heap Attach: Unaligned base (0x%08lX)", (long) base));
321 }
322 heap->base = (SHEAP_HeapBlock*) base;
323 heap->size = HEAP_BLOCKS(size);
324 heap->used = heap->size;
325 heap->free = heap->size;
326 heap->last = heap->size;
327 heap->serial = serial;
328 if (size != HEAP_EXTENT(heap->size)) {
329 CORE_LOGF_X(4, eLOG_Warning,
330 ("Heap Attach: Heap size truncation (%u->%u)"
331 " can result in missing data",
332 size, HEAP_EXTENT(heap->size)));
333 }
334 return heap;
335 }
336
337
HEAP_Attach(const void * base,TNCBI_Size maxsize,int serial)338 extern HEAP HEAP_Attach(const void* base, TNCBI_Size maxsize, int serial)
339 {
340 TNCBI_Size size = 0;
341
342 if (base && (!maxsize || maxsize > sizeof(SHEAP_Block))) {
343 const SHEAP_HeapBlock* b = (const SHEAP_HeapBlock*) base, *p = 0;
344 for (;;) {
345 #if 0 /*FIXME*/
346 if ((b->head.flag & HEAP_NEXT_BIT) != x_NextBit(b->size) ||
347 (b->head.flag & HEAP_PREV_BIT) != x_PrevBit(HEAP_SIZE(b->flag))
348 || (p && p != b - HEAP_BLOCKS(b->flag))) {
349 CORE_LOGF_X(5, eLOG_Error,
350 ("Heap Attach: Heap corrupt @%u (0x%08X, %u)",
351 HEAP_INDEX(b, (SHEAP_HeapBlock*) base),
352 b->head.flag, b->head.size));
353 return 0;
354 }
355 #endif /*0*/
356 size += b->head.size;
357 if (maxsize &&
358 (maxsize < size ||
359 (maxsize - size < sizeof(SHEAP_Block) && !HEAP_ISLAST(b)))){
360 CORE_LOGF_X(34, eLOG_Error,
361 ("Heap Attach: Runaway heap @%u (0x%08X, %u)"
362 " size=%u vs. maxsize=%u",
363 HEAP_INDEX(b, (SHEAP_HeapBlock*) base),
364 b->head.flag, b->head.size,
365 size, maxsize));
366 return 0;
367 }
368 if (HEAP_ISLAST(b))
369 break;
370 p = b;
371 b = HEAP_NEXT(p);
372 }
373 }
374 return HEAP_AttachFast(base, size, serial);
375 }
376
377
s_HEAP_Id(char * buf,HEAP h)378 static const char* s_HEAP_Id(char* buf, HEAP h)
379 {
380 if (!h)
381 return "";
382 if (h->serial && h->refcnt)
383 sprintf(buf,"[C%d%sR%u]",abs(h->serial),&"-"[h->serial > 0],h->refcnt);
384 else if (h->serial)
385 sprintf(buf,"[C%d%s]", abs(h->serial), &"-"[h->serial > 0]);
386 else if (h->refcnt)
387 sprintf(buf,"[R%u]", h->refcnt);
388 else
389 strcpy(buf, "");
390 return buf;
391 }
392
393
394 /* Scan the heap and return a free block, whose size is the minimal to fit the
395 * requested one (i.e. its previous free block, if any, is smaller). The block
396 * returned is still linked into the list of free blocks. "hint" (if provided)
397 * is from where to start searching downto the start of the free block chain.
398 */
s_HEAP_Find(HEAP heap,TNCBI_Size need,SHEAP_HeapBlock * hint)399 static SHEAP_HeapBlock* s_HEAP_Find(HEAP heap,
400 TNCBI_Size need,
401 SHEAP_HeapBlock* hint)
402 {
403 SHEAP_HeapBlock *f, *b, *e = heap->base + heap->free;
404
405 assert(!hint || !HEAP_ISUSED(hint));
406 assert(heap->free < heap->size && !HEAP_ISUSED(e));
407 if (!hint && need < (e->head.size >> 1)) {
408 /* begin from the smallest block */
409 for (b = heap->base + e->nextfree; ; b = heap->base + b->nextfree) {
410 if (unlikely(!s_HEAP_fast)) {
411 if (b < heap->base || heap->base + heap->size <= b) {
412 b = 0;
413 goto err;
414 }
415 if (HEAP_ISUSED(b))
416 goto err;
417 }
418 if (need <= b->head.size) {
419 f = b;
420 break;
421 }
422 assert(b != e);
423 }
424 assert(f);
425 } else {
426 b = hint ? hint : e;
427 f = b->head.size < need ? 0 : b; /*a bigger free block*/
428 for (b = heap->base + b->prevfree; ; b = heap->base + b->prevfree) {
429 if (unlikely(!s_HEAP_fast)) {
430 if (b < heap->base || heap->base + heap->size <= b) {
431 b = 0;
432 goto err;
433 }
434 if (HEAP_ISUSED(b))
435 goto err;
436 }
437 if (b == e || b->head.size < need)
438 break;
439 f = b;
440 }
441 }
442 return f;
443
444 err:
445 {{
446 char _id[32], msg[80];
447 if (b)
448 sprintf(msg, " (0x%08X, %u)", b->head.flag, b->head.size);
449 else
450 *msg = '\0';
451 CORE_LOGF_X(8, eLOG_Error,
452 ("Heap Find%s: Heap corrupt @%u/%u%s",
453 s_HEAP_Id(_id, heap), HEAP_INDEX(b, heap->base),
454 heap->size, msg));
455 }}
456 return 0;
457 }
458
459
s_HEAP_Link(HEAP heap,SHEAP_HeapBlock * f,SHEAP_HeapBlock * hint)460 static void s_HEAP_Link(HEAP heap, SHEAP_HeapBlock* f, SHEAP_HeapBlock* hint)
461 {
462 unsigned int free = HEAP_INDEX(f, heap->base);
463 SHEAP_HeapBlock* b;
464
465 assert(!HEAP_ISUSED(f) && (!hint || !HEAP_ISUSED(hint)));
466 if (heap->free == heap->size) {
467 assert(!hint);
468 f->prevfree = free;
469 f->nextfree = free;
470 heap->free = free;
471 return;
472 }
473 assert(heap->free < heap->size);
474 b = heap->base + heap->free;
475 assert(!HEAP_ISUSED(b));
476 if (b->head.size < f->head.size) {
477 assert(!hint);
478 /* Link in AFTER b, and also set the new free head */
479 f->nextfree = b->nextfree;
480 f->prevfree = HEAP_INDEX(b, heap->base);
481 heap->base[b->nextfree].prevfree = free;
482 b->nextfree = free;
483 heap->free = free;
484 } else {
485 /* Find a block "b" that is just bigger than "f" */
486 assert(!hint || f->head.size <= hint->head.size);
487 b = s_HEAP_Find(heap, f->head.size, hint);
488 assert(b && !HEAP_ISUSED(b));
489 /* Link in BEFORE b (so that f <= b) */
490 f->nextfree = HEAP_INDEX(b, heap->base);
491 f->prevfree = b->prevfree;
492 heap->base[b->prevfree].nextfree = free;
493 b->prevfree = free;
494 }
495 }
496
497
498 #if defined(_DEBUG) && !defined(NDEBUG)
499 # define s_HEAP_Unlink(b) { b->prevfree = b->nextfree = ~((TNCBI_Size) 0); }
500 #else
501 # define s_HEAP_Unlink(b) /*void*/
502 #endif /*_DEBUG && !NDEBUG*/
503
504
505 /* Collect garbage in the heap, moving all contents up to the top, and merging
506 * all free blocks down at the end into a single free block (or until "need"
507 * bytes become available). Return pointer to that free block (which is
508 * unlinked from the free chain), or NULL if there was no free space.
509 * If the free block is returned its "flag" is set to the byte-size of the
510 * previous block with LSB+1 set if it's the last block, clear otherwise.
511 * FIXME: can be sped up with prev size in place.
512 */
s_HEAP_Collect(HEAP heap,TNCBI_Size need)513 static SHEAP_HeapBlock* s_HEAP_Collect(HEAP heap, TNCBI_Size need)
514 {
515 const SHEAP_HeapBlock* e = heap->base + heap->size;
516 SHEAP_HeapBlock* f = 0, *u = 0, *p = 0;
517 SHEAP_HeapBlock* b = heap->base;
518 unsigned int last = 0;
519 TNCBI_Size free = 0;
520
521 do {
522 SHEAP_HeapBlock* n = b == e ? 0 : HEAP_NEXT(b);
523 assert(!n || HEAP_SIZE(b->head.size) == b->head.size);
524 if (n)
525 last = HEAP_ISLAST(b);
526 if (!n || !HEAP_ISUSED(b)) {
527 if (n) {
528 assert(!need || b->head.size < need);
529 assert(n == e || HEAP_ISUSED(n));
530 free += b->head.size;
531 }
532 if (f) {
533 assert(f != p && (u || (!n && !need)));
534 assert(!u || HEAP_NEXT(f) == u);
535 if (n) {
536 heap->base[b->prevfree].nextfree = b->nextfree;
537 heap->base[b->nextfree].prevfree = b->prevfree;
538 if (b == heap->base + heap->free)
539 heap->free = b->prevfree;
540 assert(heap->base + heap->size != b); /*because of "f"*/
541 s_HEAP_Unlink(b);
542 }
543 if (f != heap->base + heap->free) {
544 heap->base[f->prevfree].nextfree = f->nextfree;
545 heap->base[f->nextfree].prevfree = f->prevfree;
546 } else if (heap->free != f->prevfree) {
547 heap->base[f->prevfree].nextfree = f->nextfree;
548 heap->base[f->nextfree].prevfree = f->prevfree;
549 heap->free = f->prevfree;
550 } else {
551 assert(f->prevfree == f->nextfree);
552 heap->free = heap->size;
553 }
554 if (u) {
555 TNCBI_Size size = HEAP_BLOCKS(f->head.size);
556 TNCBI_Size used = (TNCBI_Size)(b - u);
557 assert(size == (TNCBI_Size)(u - f));
558 memmove(f, u, HEAP_EXTENT(used));
559 assert(p);
560 p -= size;
561 p->head.flag &= (unsigned int)(~HEAP_LAST);
562 f += used;
563 f->head.flag = last;
564 f->head.size = free;
565 s_HEAP_Unlink(f);
566 if (last)
567 heap->last = HEAP_INDEX(f, heap->base);
568 u = 0;
569 } else
570 s_HEAP_Unlink(f);
571 if (need && need <= free)
572 return f;
573 if (n)
574 s_HEAP_Link(heap, f, 0);
575 } else if (n)
576 f = b;
577 } else {
578 if (f && !u)
579 u = b;
580 p = b;
581 }
582 b = n;
583 } while (b);
584
585 if (f) {
586 assert(last);
587 f->head.flag = (p ? p->head.size : 0) | last;
588 }
589 return f;
590 }
591
592
593 /* Book 'size' bytes (aligned, and block header included) within the given
594 * free block 'b' of an adequate size (perhaps causing the block to be split
595 * in two, if it's roomy enough, and the remaining part marked as a new
596 * free block). Non-zero 'tail' parameter inverses the order of locations of
597 * occupied blocks in successive allocations. Return the block to use.
598 */
s_HEAP_Take(HEAP heap,SHEAP_HeapBlock * b,SHEAP_HeapBlock * n,TNCBI_Size size,TNCBI_Size need,int tail)599 static SHEAP_Block* s_HEAP_Take(HEAP heap,
600 SHEAP_HeapBlock* b, SHEAP_HeapBlock* n,
601 TNCBI_Size size, TNCBI_Size need,
602 int/*bool*/ tail)
603 {
604 assert(HEAP_SIZE(size) == size);
605 assert(!HEAP_ISUSED(b) && size <= b->head.size);
606 if (size + HEAP_EXTENT(1) <= b->head.size) {
607 /* the block is to be split */
608 unsigned int last = HEAP_ISLAST(b);
609 SHEAP_HeapBlock* f;
610 if (tail) {
611 f = b;
612 f->head.flag &= (unsigned int)(~HEAP_LAST);
613 f->head.size -= size;
614 b = HEAP_NEXT(f);
615 b->head.flag = HEAP_USED | last;
616 b->head.size = size;
617 if (last)
618 heap->last = HEAP_INDEX(b, heap->base);
619 } else {
620 TNCBI_Size save = b->head.size;
621 b->head.size = size;
622 f = HEAP_NEXT(b);
623 f->head.flag = b->head.flag;
624 f->head.size = save - size;
625 b->head.flag = HEAP_USED;
626 if (last)
627 heap->last = HEAP_INDEX(f, heap->base);
628 }
629 s_HEAP_Unlink(f);
630 s_HEAP_Link(heap, f, n);
631 } else
632 b->head.flag |= HEAP_USED;
633 heap->used += HEAP_BLOCKS(size);
634 if (size -= need)
635 memset((char*) b + need, 0, size); /* block padding (if any) zeroed */
636 return &b->head;
637 }
638
639
HEAP_Alloc(HEAP heap,TNCBI_Size size,int tail)640 extern SHEAP_Block* HEAP_Alloc(HEAP heap,
641 TNCBI_Size size,
642 int/*bool*/ tail)
643 {
644 SHEAP_HeapBlock *f, *n;
645 TNCBI_Size need;
646 char _id[32];
647
648 if (unlikely(!heap)) {
649 CORE_LOG_X(6, eLOG_Warning, "Heap Alloc: NULL heap");
650 return 0;
651 }
652 HEAP_CHECK(heap);
653
654 if (unlikely(!heap->chunk)) {
655 CORE_LOGF_X(7, eLOG_Error,
656 ("Heap Alloc%s: Heap read-only", s_HEAP_Id(_id, heap)));
657 return 0;
658 }
659 if (unlikely(size < 1))
660 return 0;
661
662 size += (TNCBI_Size) sizeof(SHEAP_Block);
663 need = (TNCBI_Size) HEAP_ALIGN(size);
664
665 if (need <= HEAP_EXTENT(heap->size - heap->used)) {
666 assert(heap->free < heap->size);
667 if (likely((f = s_HEAP_Find(heap, need, 0)) != 0)) {
668 /*NB: f is still linked -- unlink*/
669 n = heap->base + f->nextfree;
670 if (n == f) {
671 assert(f == heap->base + heap->free);
672 assert(f->prevfree == f->nextfree);
673 heap->free = heap->size;
674 n = 0;
675 } else {
676 n->prevfree = f->prevfree;
677 heap->base[f->prevfree].nextfree = f->nextfree;
678 if (f == heap->base + heap->free) {
679 heap->free = f->prevfree;
680 n = 0;
681 }
682 }
683 s_HEAP_Unlink(f);
684 } else {
685 /*NB: here f is returned unlinked*/
686 f = s_HEAP_Collect(heap, need);
687 assert(f && !HEAP_ISUSED(f) && need <= f->head.size);
688 if (unlikely(f->head.flag & HEAP_LAST))
689 f->head.flag = HEAP_LAST;
690 n = 0;
691 }
692 } else
693 f = n = 0;
694 if (unlikely(!f)) {
695 TNCBI_Size dsize = (TNCBI_Size) HEAP_EXTENT(heap->size);
696 TNCBI_Size hsize = (TNCBI_Size) _HEAP_ALIGN_EX(dsize + need,
697 heap->chunk);
698 SHEAP_HeapBlock* base = (SHEAP_HeapBlock*)
699 heap->resize(heap->base, hsize, heap->auxarg);
700 if (unlikely
701 (_HEAP_ALIGN_2(base,sizeof(SHEAP_Block)) != (unsigned long) base)){
702 CORE_LOGF_X(9, eLOG_Warning,
703 ("Heap Alloc%s: Unaligned base (0x%08lX)",
704 s_HEAP_Id(_id, heap), (long) base));
705 }
706 if (unlikely(!base))
707 return 0;
708 dsize = hsize - dsize;
709 memset(base + heap->size, 0, dsize); /* security */
710 f = base + heap->last;
711 if (unlikely(!heap->base)) {
712 assert(!heap->last && !heap->free);
713 f->head.flag = HEAP_LAST;
714 f->head.size = hsize;
715 hsize = HEAP_BLOCKS(hsize);
716 heap->free = hsize;
717 } else {
718 assert(base <= f && f < base + heap->size && HEAP_ISLAST(f));
719 hsize = HEAP_BLOCKS(hsize);
720 if (unlikely(HEAP_ISUSED(f))) {
721 f->head.flag &= (unsigned int)(~HEAP_LAST);
722 /* New block is at the very top of the heap */
723 heap->last = heap->size;
724 f = base + heap->size;
725 f->head.flag = HEAP_LAST;
726 f->head.size = dsize;
727 if (heap->free == heap->size)
728 heap->free = hsize;
729 } else {
730 if (f != base + heap->free) {
731 base[f->nextfree].prevfree = f->prevfree;
732 base[f->prevfree].nextfree = f->nextfree;
733 } else if (heap->free != f->prevfree) {
734 base[f->nextfree].prevfree = f->prevfree;
735 base[f->prevfree].nextfree = f->nextfree;
736 heap->free = f->prevfree;
737 } else {
738 assert(f->prevfree == f->nextfree);
739 heap->free = hsize;
740 }
741 f->head.size += dsize;
742 }
743 s_HEAP_Unlink(f);
744 }
745 heap->base = base;
746 heap->size = hsize;
747 assert(!n);
748 }
749 assert(f && !HEAP_ISUSED(f) && need <= f->head.size);
750 return s_HEAP_Take(heap, f, n, need, size, tail);
751 }
752
753
s_HEAP_Free(HEAP heap,SHEAP_HeapBlock * p,SHEAP_HeapBlock * b,SHEAP_HeapBlock * n)754 static void s_HEAP_Free(HEAP heap,
755 SHEAP_HeapBlock* p,
756 SHEAP_HeapBlock* b,
757 SHEAP_HeapBlock* n)
758 {
759 /* NB: in order to maintain HEAP_Walk() "b" must have "size" updated
760 * so that the next heap block could be located correctly, and also
761 * "flag" must keep its HEAP_LAST bit so that it can be verified. */
762 unsigned int last = HEAP_ISLAST(b);
763
764 assert(HEAP_ISUSED(b));
765 assert(p < b && b < n);
766 assert((!p || HEAP_NEXT(p) == b) && b && HEAP_NEXT(b) == n);
767 assert((!p || heap->base <= p) && n <= heap->base + heap->size);
768
769 s_HEAP_Unlink(b);
770 b->head.flag = last;
771 heap->used -= HEAP_BLOCKS(b->head.size);
772 if (!last && !HEAP_ISUSED(n)) {
773 assert((n->nextfree | n->prevfree) != (TNCBI_Size)(~0));
774 b->head.size += n->head.size;
775 if (HEAP_ISLAST(n)) {
776 last = HEAP_LAST;
777 b->head.flag |= HEAP_LAST;
778 heap->last = HEAP_INDEX(b, heap->base);
779 }
780 if (n == heap->base + heap->free) {
781 if (heap->free == n->prevfree) {
782 assert(!p || HEAP_ISUSED(p));
783 assert(n->prevfree == n->nextfree);
784 heap->free = HEAP_INDEX(b, heap->base);
785 b->prevfree = heap->free;
786 b->nextfree = heap->free;
787 return;
788 }
789 heap->free = n->prevfree;
790 }
791 heap->base[n->nextfree].prevfree = n->prevfree;
792 heap->base[n->prevfree].nextfree = n->nextfree;
793 s_HEAP_Unlink(n);
794 }
795 if (p && !HEAP_ISUSED(p)) {
796 assert((p->nextfree | p->prevfree) != (TNCBI_Size)(~0));
797 p->head.size += b->head.size;
798 if (last) {
799 assert(!HEAP_ISLAST(p));
800 p->head.flag |= HEAP_LAST;
801 heap->last = HEAP_INDEX(p, heap->base);
802 }
803 if (p == heap->base + heap->free) {
804 if (heap->free == p->prevfree) {
805 assert(p->prevfree == p->nextfree);
806 return;
807 }
808 heap->free = p->prevfree;
809 }
810 heap->base[p->nextfree].prevfree = p->prevfree;
811 heap->base[p->prevfree].nextfree = p->nextfree;
812 s_HEAP_Unlink(p);
813 b = p;
814 }
815 s_HEAP_Link(heap, b, 0);
816 }
817
818
HEAP_Free(HEAP heap,SHEAP_Block * ptr)819 extern void HEAP_Free(HEAP heap, SHEAP_Block* ptr)
820 {
821 const SHEAP_HeapBlock* e;
822 SHEAP_HeapBlock *b, *p;
823 char _id[32];
824
825 if (unlikely(!heap)) {
826 CORE_LOG_X(10, eLOG_Warning, "Heap Free: NULL heap");
827 return;
828 }
829 HEAP_CHECK(heap);
830
831 if (unlikely(!heap->chunk)) {
832 CORE_LOGF_X(11, eLOG_Error,
833 ("Heap Free%s: Heap read-only", s_HEAP_Id(_id, heap)));
834 return;
835 }
836 if (unlikely(!ptr))
837 return;
838
839 p = 0;
840 b = heap->base;
841 e = b + heap->size;
842 while (likely(b < e)) {
843 SHEAP_HeapBlock* n = HEAP_NEXT(b);
844 if (unlikely(n > e)) {
845 CORE_LOGF_X(13, eLOG_Error,
846 ("Heap Free%s: Heap corrupt @%u/%u (0x%08X, %u)",
847 s_HEAP_Id(_id, heap), HEAP_INDEX(b, heap->base),
848 heap->size, b->head.flag, b->head.size));
849 return;
850 }
851 if (unlikely(&b->head == ptr)) {
852 if (unlikely(!HEAP_ISUSED(b))) {
853 CORE_LOGF_X(12, eLOG_Warning,
854 ("Heap Free%s: Freeing free block @%u",
855 s_HEAP_Id(_id, heap),
856 HEAP_INDEX(b, heap->base)));
857 } else
858 s_HEAP_Free(heap, p, b, n);
859 return;
860 }
861 p = b;
862 b = n;
863 }
864
865 CORE_LOGF_X(14, eLOG_Error,
866 ("Heap Free%s: Block not found", s_HEAP_Id(_id, heap)));
867 }
868
869
870 /*FIXME: to remove*/
HEAP_FreeFast(HEAP heap,SHEAP_Block * ptr,const SHEAP_Block * prev)871 extern void HEAP_FreeFast(HEAP heap, SHEAP_Block* ptr, const SHEAP_Block* prev)
872 {
873 SHEAP_HeapBlock *b, *p, *n, *t;
874 char _id[32];
875
876 if (unlikely(!heap)) {
877 CORE_LOG_X(15, eLOG_Warning, "Heap Free: NULL heap");
878 return;
879 }
880 HEAP_CHECK(heap);
881
882 if (unlikely(!heap->chunk)) {
883 CORE_LOGF_X(16, eLOG_Error,
884 ("Heap Free%s: Heap read-only", s_HEAP_Id(_id, heap)));
885 return;
886 }
887 if (unlikely(!ptr))
888 return;
889
890 p = (SHEAP_HeapBlock*) prev;
891 b = (SHEAP_HeapBlock*) ptr;
892 n = HEAP_NEXT(b);
893
894 if (likely(p && HEAP_ISUSED(p)) && unlikely((t = HEAP_NEXT(p)) != b)
895 && likely(!HEAP_ISUSED(t) && HEAP_NEXT(t) == b)) {
896 p = t;
897 }
898
899 if (unlikely(!s_HEAP_fast)) {
900 const SHEAP_HeapBlock* e = heap->base + heap->size;
901 if (unlikely(b < heap->base) || unlikely(e < n)) {
902 CORE_LOGF_X(17, eLOG_Error,
903 ("Heap Free%s: Alien block", s_HEAP_Id(_id, heap)));
904 return;
905 }
906 if (unlikely((!p && b != heap->base)) ||
907 unlikely(( p && (p < heap->base || b != HEAP_NEXT(p))))) {
908 char h[40];
909 if (!p || p < heap->base || e <= p)
910 *h = '\0';
911 else
912 sprintf(h, "(%u)", HEAP_INDEX(p, heap->base));
913 CORE_LOGF_X(18, eLOG_Warning,
914 ("Heap Free%s: Lame hint%s for block @%u",
915 s_HEAP_Id(_id, heap), h, HEAP_INDEX(b, heap->base)));
916 HEAP_Free(heap, ptr);
917 return;
918 }
919 if (unlikely(!HEAP_ISUSED(b))) {
920 CORE_LOGF_X(19, eLOG_Warning,
921 ("Heap Free%s: Freeing free block @%u",
922 s_HEAP_Id(_id, heap), HEAP_INDEX(b, heap->base)));
923 return;
924 }
925 }
926
927 s_HEAP_Free(heap, p, b, n);
928 }
929
930
HEAP_Trim(HEAP heap)931 extern HEAP HEAP_Trim(HEAP heap)
932 {
933 TNCBI_Size hsize, size, prev;
934 SHEAP_HeapBlock* b;
935 char _id[32];
936
937 if (!heap)
938 return 0;
939 HEAP_CHECK(heap);
940
941 if (!heap->chunk) {
942 CORE_LOGF_X(30, eLOG_Error,
943 ("Heap Trim%s: Heap read-only", s_HEAP_Id(_id, heap)));
944 return 0;
945 }
946 if (likely(s_HEAP_fast) && heap->used == heap->size)
947 return heap/*no free space, nothing to do*/;
948
949 b = s_HEAP_Collect(heap, 0);
950
951 if (b) {
952 assert(!HEAP_ISUSED(b) && HEAP_ISLAST(b));
953 prev = HEAP_BLOCKS(b->head.flag);
954 b->head.flag = HEAP_LAST;
955 } else
956 prev = 0;
957 if (!b || b->head.size < heap->chunk) {
958 hsize = HEAP_EXTENT(heap->size);
959 size = 0;
960 } else if (!(size = b->head.size % heap->chunk)) {
961 hsize = HEAP_EXTENT(heap->size) - b->head.size;
962 b -= prev;
963 assert(!prev || HEAP_ISUSED(b));
964 } else {
965 assert(HEAP_EXTENT(1) <= size);
966 hsize = HEAP_EXTENT(heap->size) - b->head.size + size;
967 }
968
969 if (heap->resize) {
970 SHEAP_HeapBlock* base = (SHEAP_HeapBlock*)
971 heap->resize(heap->base, hsize, heap->auxarg);
972 if (!hsize)
973 assert(!base);
974 else if (!base)
975 return 0;
976 if (_HEAP_ALIGN_2(base, sizeof(SHEAP_Block)) != (unsigned long) base) {
977 CORE_LOGF_X(31, eLOG_Warning,
978 ("Heap Trim%s: Unaligned base (0x%08lX)",
979 s_HEAP_Id(_id, heap), (long) base));
980 }
981 prev = HEAP_INDEX(b, heap->base);
982 hsize = HEAP_BLOCKS(hsize);
983 if (heap->free == heap->size)
984 heap->free = hsize;
985 heap->base = base;
986 heap->size = hsize;
987 if (base && b) {
988 b = base + prev;
989 if (HEAP_ISUSED(b)) {
990 assert(heap->free == hsize);
991 b->head.flag |= HEAP_LAST;
992 heap->last = prev;
993 } else {
994 assert(HEAP_ISLAST(b));
995 if (size)
996 b->head.size = size;
997 s_HEAP_Link(heap, b, 0);
998 }
999 }
1000 assert(HEAP_EXTENT(hsize) % heap->chunk == 0);
1001 assert(heap->free <= heap->size);
1002 assert(heap->last <= heap->size);
1003 } else if (hsize != HEAP_EXTENT(heap->size)) {
1004 CORE_LOGF_X(32, eLOG_Error,
1005 ("Heap Trim%s: Heap not trimmable", s_HEAP_Id(_id, heap)));
1006 }
1007
1008 assert(!heap->base == !heap->size);
1009 return heap;
1010 }
1011
1012
x_HEAP_Walk(const HEAP heap,const SHEAP_Block * ptr)1013 static SHEAP_HeapBlock* x_HEAP_Walk(const HEAP heap, const SHEAP_Block* ptr)
1014 {
1015 SHEAP_HeapBlock *n, *b, *p = (SHEAP_HeapBlock*) ptr;
1016 const SHEAP_HeapBlock* e = heap->base + heap->size;
1017 char msg[80];
1018 char _id[32];
1019 size_t i;
1020
1021 if (p) {
1022 if (p < heap->base || e <= p
1023 || p->head.size <= sizeof(SHEAP_Block)
1024 || HEAP_ALIGN(p->head.size) != p->head.size) {
1025 CORE_LOGF_X(28, eLOG_Error,
1026 ("Heap Walk%s: Alien pointer", s_HEAP_Id(_id, heap)));
1027 return 0;
1028 }
1029 b = HEAP_NEXT(p);
1030 } else
1031 b = heap->base;
1032
1033 if (e <= b || b <= p
1034 || b->head.size <= sizeof(SHEAP_Block)
1035 || HEAP_ALIGN(b->head.size) != b->head.size
1036 || e < (n = HEAP_NEXT(b)) || n <= b) {
1037 if (b != e || (b && !p)) {
1038 CORE_LOGF_X(26, eLOG_Error,
1039 ("Heap Walk%s: Heap corrupt", s_HEAP_Id(_id, heap)));
1040 } else if (b/*== e*/ && !HEAP_ISLAST(p)) {
1041 CORE_LOGF_X(27, eLOG_Error,
1042 ("Heap Walk%s: No last block @%u",s_HEAP_Id(_id, heap),
1043 HEAP_INDEX(p, heap->base)));
1044 }
1045 return 0;
1046 }
1047
1048 if (HEAP_ISLAST(b) &&
1049 (n < e || (heap->chunk/*RW heap*/ && b < heap->base + heap->last))) {
1050 if (heap->chunk/*RW heap*/) {
1051 const SHEAP_HeapBlock* l = heap->base + heap->last;
1052 sprintf(msg, " %s @%u", l < e && HEAP_NEXT(l) == e
1053 ? "expected" : "invalid", heap->last);
1054 } else
1055 *msg = '\0';
1056 CORE_LOGF_X(23, eLOG_Error,
1057 ("Heap Walk%s: Last block @%u%s", s_HEAP_Id(_id, heap),
1058 HEAP_INDEX(b, heap->base), msg));
1059 }
1060
1061 if (!HEAP_ISUSED(b)) {
1062 const SHEAP_HeapBlock* c;
1063 if (heap->chunk/*RW heap*/) {
1064 /* Free blocks are tricky! They can be left-overs from
1065 merging of freed blocks while walking. Careful here! */
1066 int/*bool*/ ok = 0/*false*/;
1067 c = heap->base + heap->free;
1068 for (i = 0; i < heap->size; ++i) {
1069 const SHEAP_HeapBlock* s;
1070 int/*bool*/ self;
1071 if (e <= c
1072 || heap->size <= c->prevfree
1073 || heap->size <= c->nextfree
1074 || HEAP_ISUSED(heap->base + c->prevfree)
1075 || HEAP_ISUSED(heap->base + c->nextfree)
1076 || e < (s = HEAP_NEXT(c))) {
1077 c = 0;
1078 break;
1079 }
1080 self = (c->nextfree == c->prevfree &&
1081 c == heap->base + c->prevfree);
1082 if (self || (c <= b && b <= s)){
1083 if (ok || s < n) {
1084 c = 0;
1085 break;
1086 }
1087 if (self/*the only single free block in heap*/) {
1088 ok = c <= b && c == heap->base + heap->free;
1089 break;
1090 }
1091 ok = 1/*true*/;
1092 }
1093 s = heap->base + c->prevfree;
1094 if (s == c || c != heap->base + s->nextfree) {
1095 b = 0;
1096 break;
1097 }
1098 if (s == heap->base + heap->free)
1099 break;
1100 if (c->head.size < s->head.size) {
1101 b = 0;
1102 break;
1103 }
1104 c = s;
1105 }
1106 if (!ok)
1107 b = 0;
1108 else if (c && b)
1109 c = b;
1110 } else {
1111 /*FIXME: check for monotonic increase in sizes*/
1112 /* RO heap may not have any free block peculiarities */
1113 c = b;
1114 if (heap->size <= b->prevfree ||
1115 heap->size <= b->nextfree
1116 || HEAP_ISUSED(heap->base + b->prevfree)
1117 || HEAP_ISUSED(heap->base + b->nextfree)) {
1118 b = 0;
1119 } else if (b->prevfree != b->nextfree ||
1120 b != heap->base + b->nextfree) {
1121 for (i = 0; i < heap->size; ++i) {
1122 const SHEAP_HeapBlock* s = b;
1123 b = heap->base + b->nextfree;
1124 if (HEAP_ISUSED(b) || b == s ||
1125 heap->size <= b->nextfree ||
1126 s != heap->base + b->prevfree) {
1127 b = 0;
1128 break;
1129 }
1130 if (b == c)
1131 break;
1132 }
1133 } else
1134 b = heap->base + b->nextfree; /* NB: must not move */
1135 }
1136 if (!c || !b || b != c) {
1137 if (c) {
1138 sprintf(msg, " @%u/%u (%u, <-%u, %u->)",
1139 HEAP_INDEX(c, heap->base), heap->size,
1140 c->head.size, c->prevfree, c->nextfree);
1141 } else
1142 *msg = '\0';
1143 CORE_LOGF_X(21, eLOG_Error,
1144 ("Heap Walk%s: Free list %s%s", s_HEAP_Id(_id, heap),
1145 b && c ? "broken" : "corrupt", msg));
1146 return 0;
1147 }
1148 }
1149
1150 if (HEAP_ISUSED(b) && heap->chunk/*RW heap*/) {
1151 const SHEAP_HeapBlock* c = heap->base + heap->free;
1152 /* Check that a used block is not within the chain of free
1153 blocks but ignore any inconsistencies in the chain */
1154 for (i = 0; c < e && i < heap->size; ++i) {
1155 if (HEAP_ISUSED(c))
1156 break;
1157 if (c <= b && b < HEAP_NEXT(c)) {
1158 CORE_LOGF_X(20, eLOG_Error,
1159 ("Heap Walk%s: Used block @%u within"
1160 " a free one @%u", s_HEAP_Id(_id, heap),
1161 HEAP_INDEX(b, heap->base),
1162 HEAP_INDEX(c, heap->base)));
1163 return 0;
1164 }
1165 if (c == heap->base + c->nextfree)
1166 break;
1167 c = heap->base + c->nextfree;
1168 if (c == heap->base + heap->free)
1169 break;
1170 }
1171 }
1172
1173 /* Block 'b' seems okay for walking onto, but... */
1174 if (p) {
1175 if (HEAP_ISLAST(p)) {
1176 CORE_LOGF_X(22, eLOG_Error,
1177 ("Heap Walk%s: Last block @%u", s_HEAP_Id(_id,heap),
1178 HEAP_INDEX(p, heap->base)));
1179 } else if (!HEAP_ISUSED(p) && !HEAP_ISUSED(b)) {
1180 const SHEAP_HeapBlock* c = heap->base;
1181 while (c < p) {
1182 if (!HEAP_ISUSED(c) && n <= HEAP_NEXT(c))
1183 break;
1184 c = HEAP_NEXT(c);
1185 }
1186 if (p <= c) {
1187 CORE_LOGF_X(24, eLOG_Error,
1188 ("Heap Walk%s: Adjacent free blocks"
1189 " @%u and @%u", s_HEAP_Id(_id, heap),
1190 HEAP_INDEX(p, heap->base),
1191 HEAP_INDEX(b, heap->base)));
1192 }
1193 }
1194 }
1195
1196 return b;
1197 }
1198
1199
1200 #ifdef __GNUC__
1201 inline
1202 #endif /*__GNUC__*/
s_HEAP_Walk(const HEAP heap,const SHEAP_Block * ptr)1203 static SHEAP_HeapBlock* s_HEAP_Walk(const HEAP heap, const SHEAP_Block* ptr)
1204 {
1205 assert(heap);
1206 if (likely(s_HEAP_fast)) {
1207 SHEAP_HeapBlock* b;
1208 if (unlikely(!ptr))
1209 return heap->base;
1210 b = (SHEAP_HeapBlock*) ptr;
1211 if (HEAP_ISLAST(b))
1212 return 0;
1213 b = HEAP_NEXT(b);
1214 return likely(ptr < &b->head) && likely(b < heap->base + heap->size)
1215 ? b : 0;
1216 }
1217 return x_HEAP_Walk(heap, ptr);
1218 }
1219
1220
HEAP_Walk(const HEAP heap,const SHEAP_Block * ptr)1221 extern SHEAP_Block* HEAP_Walk(const HEAP heap, const SHEAP_Block* ptr)
1222 {
1223 if (unlikely(!heap)) {
1224 CORE_LOG_X(29, eLOG_Warning, "Heap Walk: NULL heap");
1225 return 0;
1226 }
1227 HEAP_CHECK(heap);
1228
1229 return &s_HEAP_Walk(heap, ptr)->head;
1230 }
1231
1232
HEAP_Next(const HEAP heap,const SHEAP_Block * ptr)1233 extern SHEAP_Block* HEAP_Next(const HEAP heap, const SHEAP_Block* ptr)
1234 {
1235 SHEAP_HeapBlock* n;
1236 if (unlikely(!heap)) {
1237 CORE_LOG_X(34, eLOG_Warning, "Heap Next: NULL heap");
1238 return 0;
1239 }
1240 HEAP_CHECK(heap);
1241
1242 for (n = s_HEAP_Walk(heap, ptr); n; n = s_HEAP_Walk(heap, &n->head)) {
1243 if (HEAP_ISUSED(n))
1244 break;
1245 }
1246 return &n->head;
1247 }
1248
1249
HEAP_Copy(const HEAP heap,size_t extra,int serial)1250 extern HEAP HEAP_Copy(const HEAP heap, size_t extra, int serial)
1251 {
1252 HEAP copy;
1253 TNCBI_Size size;
1254
1255 assert(_HEAP_ALIGN_EX(1, sizeof(SHEAP_Block)) ==
1256 _HEAP_ALIGN_2 (1, sizeof(SHEAP_Block)));
1257
1258 if (!heap)
1259 return 0;
1260 HEAP_CHECK(heap);
1261
1262 size = HEAP_EXTENT(heap->size);
1263 copy = (HEAP) malloc(sizeof(*copy) +
1264 (size ? sizeof(SHEAP_Block) - 1 + size : 0) + extra);
1265 if (!copy)
1266 return 0;
1267 if (size) {
1268 char* base = (char*) copy + sizeof(*copy);
1269 base +=_HEAP_ALIGN_2(base,sizeof(SHEAP_Block)) - (unsigned long) base;
1270 assert(_HEAP_ALIGN_2(base,sizeof(SHEAP_Block)) ==(unsigned long) base);
1271 copy->base = (SHEAP_HeapBlock*) base;
1272 } else
1273 copy->base = 0;
1274 copy->size = heap->size;
1275 copy->free = heap->free;
1276 copy->used = heap->used;
1277 copy->last = heap->last;
1278 copy->chunk = 0/*read-only*/;
1279 copy->resize = 0;
1280 copy->auxarg = 0;
1281 copy->refcnt = 1/*copy*/;
1282 copy->serial = serial;
1283 if (size) {
1284 memcpy(copy->base, heap->base, size);
1285 assert(memset((char*) copy->base + size, 0, extra));
1286 }
1287 return copy;
1288 }
1289
1290
HEAP_AddRef(HEAP heap)1291 extern unsigned int HEAP_AddRef(HEAP heap)
1292 {
1293 if (!heap)
1294 return 0;
1295 HEAP_CHECK(heap);
1296 if (heap->refcnt) {
1297 heap->refcnt++;
1298 assert(heap->refcnt);
1299 }
1300 return heap->refcnt;
1301 }
1302
1303
HEAP_Detach(HEAP heap)1304 extern unsigned int HEAP_Detach(HEAP heap)
1305 {
1306 if (!heap)
1307 return 0;
1308 HEAP_CHECK(heap);
1309 if (heap->refcnt && --heap->refcnt)
1310 return heap->refcnt;
1311 memset(heap, 0, sizeof(*heap));
1312 free(heap);
1313 return 0;
1314 }
1315
1316
HEAP_Destroy(HEAP heap)1317 extern unsigned int HEAP_Destroy(HEAP heap)
1318 {
1319 char _id[32];
1320
1321 if (!heap)
1322 return 0;
1323 HEAP_CHECK(heap);
1324 if (!heap->chunk && !heap->refcnt) {
1325 CORE_LOGF_X(33, eLOG_Error,
1326 ("Heap Destroy%s: Heap read-only", s_HEAP_Id(_id, heap)));
1327 } else if (heap->resize/*NB: NULL for heap copies*/)
1328 verify(heap->resize(heap->base, 0, heap->auxarg) == 0);
1329 return HEAP_Detach(heap);
1330 }
1331
1332
HEAP_Base(const HEAP heap)1333 extern void* HEAP_Base(const HEAP heap)
1334 {
1335 if (!heap)
1336 return 0;
1337 HEAP_CHECK(heap);
1338 return heap->base;
1339 }
1340
1341
HEAP_Size(const HEAP heap)1342 extern TNCBI_Size HEAP_Size(const HEAP heap)
1343 {
1344 if (!heap)
1345 return 0;
1346 HEAP_CHECK(heap);
1347 return HEAP_EXTENT(heap->size);
1348 }
1349
1350
HEAP_Used(const HEAP heap)1351 extern TNCBI_Size HEAP_Used(const HEAP heap)
1352 {
1353 if (!heap)
1354 return 0;
1355 HEAP_CHECK(heap);
1356 return HEAP_EXTENT(heap->used);
1357 }
1358
1359
HEAP_Idle(const HEAP heap)1360 extern TNCBI_Size HEAP_Idle(const HEAP heap)
1361 {
1362 TNCBI_Size size = 0;
1363 if (heap) {
1364 HEAP_CHECK(heap);
1365 if (heap->free < heap->size) {
1366 const SHEAP_HeapBlock* f = heap->base + heap->free;
1367 const SHEAP_HeapBlock* b = f;
1368 do {
1369 size += b->head.size;
1370 b = heap->base + b->prevfree;
1371 } while (b != f);
1372 #if defined(_DEBUG) && !defined(NDEBUG)
1373 {{
1374 TNCBI_Size alt_size = 0;
1375 do {
1376 alt_size += b->head.size;
1377 b = heap->base + b->nextfree;
1378 } while (b != f);
1379 assert(alt_size == size);
1380 }}
1381 #endif /*_DEBUG && !NDEBUG*/
1382 }
1383 assert(size == HEAP_SIZE(size));
1384 assert(size == HEAP_EXTENT(heap->size - heap->used));
1385 }
1386 return size;
1387 }
1388
1389
HEAP_Serial(const HEAP heap)1390 extern int HEAP_Serial(const HEAP heap)
1391 {
1392 if (!heap)
1393 return 0;
1394 HEAP_CHECK(heap);
1395 return heap->serial;
1396 }
1397
1398
1399 /*ARGSUSED*/
HEAP_Options(ESwitch fast,ESwitch unused)1400 void HEAP_Options(ESwitch fast, ESwitch unused)
1401 {
1402 switch (fast) {
1403 case eOff:
1404 s_HEAP_fast = 0/*false*/;
1405 break;
1406 case eOn:
1407 s_HEAP_fast = 1/*true*/;
1408 break;
1409 default:
1410 break;
1411 }
1412 }
1413