1 // Copyright 2009 The Archiveopteryx Developers <info@aox.org>
2
3 #include "allocator.h"
4
5 #include "cache.h"
6 #include "estring.h"
7 #include "log.h"
8
9 // fprintf
10 #include <stdio.h>
11
12 // timeval, gettimeofday
13 #include <sys/time.h>
14 #include <time.h>
15
16 // mmap, munmap
17 #include <sys/mman.h>
18
19 // memset
20 #include <string.h>
21
22 // malloc, free
23 #include <stdlib.h>
24
25 #include <errno.h>
26
27
28 // the heuristics in adminLikelyHappy() will probably need changing
29 // if BlockShift changes
30 static uint BlockShift = 19;
31 static uint BlockSize = 1 << BlockShift;
32
33
34
35 class AllocatorMapTable // NOT a Garbage class
36 {
37 public:
AllocatorMapTable()38 AllocatorMapTable(): l( 0 ) {
39 uint i = 0;
40 while ( i < Size ) {
41 data[i] = 0;
42 ++i;
43 }
44 }
45
find(const void * address)46 static Allocator * find( const void * address ) {
47 Allocator::ulong v = ((Allocator::ulong)address) >> BlockShift;
48 AllocatorMapTable * t = root;
49 if ( v & ( ((Allocator::ulong)-1) << ( t->l + Slice ) ) )
50 return 0;
51 while ( t && t->l )
52 t = t->children[(v >> t->l) & Mask];
53 if ( !t )
54 return 0;
55 return t->data[v & Mask];
56 }
provide(Allocator::ulong v)57 static AllocatorMapTable * provide( Allocator::ulong v ) {
58 if ( !root ) {
59 root = new AllocatorMapTable;
60 uint rv = v;
61 while ( rv & ~Mask ) {
62 rv = rv >> Slice;
63 root->l += Slice;
64 }
65 }
66 while ( v & ( ((Allocator::ulong)-1) << ( root->l + Slice ) ) ) {
67 AllocatorMapTable * nroot = new AllocatorMapTable;
68 nroot->l = root->l + Slice;
69 nroot->children[0] = root;
70 root = nroot;
71 }
72 AllocatorMapTable * t = root;
73 while ( t->l ) {
74 uint i = ( v >> t->l ) & Mask;
75 if ( !t->children[i] ) {
76 t->children[i] = new AllocatorMapTable;
77 t->children[i]->l = t->l - Slice;
78 }
79 t = t->children[i];
80 }
81 return t;
82 }
83
insert(Allocator::ulong v,Allocator * a)84 static void insert( Allocator::ulong v, Allocator * a ) {
85 provide( v )->data[v & Mask] = a;
86 }
insert(Allocator * a)87 static void insert( Allocator * a ) {
88 Allocator::ulong v = ((Allocator::ulong)a->buffer) >> BlockShift;
89 Allocator::ulong i = 0;
90 while ( i < a->step * a->capacity ) {
91 insert( v + i, a );
92 i += BlockSize;
93 }
94 }
95
remove(Allocator::ulong v,Allocator * a)96 static void remove( Allocator::ulong v, Allocator * a ) {
97 AllocatorMapTable * t = provide( v );
98 if ( t && t->data[v & Mask] == a )
99 t->data[v & Mask] = 0;
100 }
remove(Allocator * a)101 static void remove( Allocator * a ) {
102 Allocator::ulong v = ((Allocator::ulong)a->buffer) >> BlockShift;
103 Allocator::ulong i = 0;
104 while ( i < a->step * a->capacity ) {
105 remove( v + i, a );
106 i += BlockSize;
107 }
108 }
109
110 static const uint Slice = 10;
111 static const uint Size = 1 << Slice;
112 static const uint Mask = Size - 1;
113
114 private:
115 uint l;
116 union {
117 AllocatorMapTable * children[Size]; // if l>0
118 Allocator * data[Size]; // if l==0
119 };
120
121 static AllocatorMapTable * root;
122 };
123
124
125 AllocatorMapTable * AllocatorMapTable::root = 0;
126
127
128
129
130 struct AllocationBlock
131 {
132 union {
133 struct {
134 uint magic: 15;
135 uint number: 7;
136 } x;
137 uint y;
138 void * z;
139 };
140 void* payload[1];
141 };
142
143 const uint SizeLimit = 512 * 1024 * 1024;
144
145
146 static int total;
147 static uint allocated;
148 static uint objects;
149 static uint marked;
150 static uint tos;
151 static uint peak;
152 static AllocationBlock ** stack;
153
154
oneMegabyteAllocated()155 static void oneMegabyteAllocated()
156 {
157 // this is a good place to put a breakpoint when we want to
158 // find out who allocates memory.
159 }
160
161
162 /*! Allocates \a s bytes of collectible memory, which may contain up
163 to \a n pointers. If n is too large to be contained within \a s
164 bytes, alloc() uses the largest number that will fit. The default
165 value is UINT_MAX, which in practice means that the entire object
166 may consist of pointers.
167
168 Note that \a s is a uint, not a size_t. In our universe, it isn't
169 possible to allocate more than 4GB at a time. So it is.
170 */
171
172
alloc(uint s,uint n)173 void * Allocator::alloc( uint s, uint n )
174 {
175 if ( s > SizeLimit )
176 die( Memory );
177 if ( n > s / sizeof( void* ) )
178 n = s / sizeof( void* );
179 if ( s > 262144 ) {
180 fprintf( stderr, "%s", "" );
181 }
182 Allocator * a = Allocator::allocator( s );
183 while ( a->taken == a->capacity && a->next )
184 a = a->next;
185 void * p = a->allocate( s, n );
186 if ( ( ( ::total + ::allocated + s ) & 0xfff00000 ) >
187 ( ( ::total + ::allocated ) & 0xfff00000 ) )
188 ::oneMegabyteAllocated();
189 ::allocated += a->chunkSize();
190 return p;
191 }
192
193
194 /*! Deallocates the object at \a p.
195
196 This is never strictly necessary, however, if a very large number
197 of objects are allocated and deallocated, it may be beneficial.
198 This function exists because it was beneficial in
199 EString::reserve().
200 */
201
202
dealloc(void * p)203 void Allocator::dealloc( void * p )
204 {
205 Allocator * a = AllocatorMapTable::find( p );
206 if ( a )
207 a->deallocate( p );
208 }
209
210
211 const uint bytes = sizeof(void*);
212 const uint bits = 8 * sizeof(void*);
213 const uint magic = 0x7d34;
214
215
216 static Allocator * allocators[32];
217
218
219 static const uint maxRoots = 4096;
220
221 static struct {
222 void * root;
223 const char * name;
224 uint objects;
225 uint size;
226 } roots[maxRoots];
227
228 static uint numRoots;
229
230 static bool verbose;
231
232
233 /*! Returns a pointer to the Allocator responsible for \a size. \a
234 size need not be rounded.
235 */
236
allocator(uint size)237 Allocator * Allocator::allocator( uint size )
238 {
239 uint i = 0;
240 uint b = 8;
241 if ( bits == 64 )
242 b = 16;
243 while ( size + bytes > b << i )
244 i++;
245 if ( !allocators[i] )
246 allocators[i] = new Allocator( b << i );
247 return allocators[i];
248 }
249
250
251 /*! \class Allocator allocator.h
252
253 The Allocator class does the heavy lifting for Archiveopteryx memory
254 allocation system, a simple garbage collector for event-driven
255 servers.
256
257 Our GC system is based on the notion of eternal objects and safe
258 GC points. Eternal objects must be declared by calling
259 addEternal. Collectible objects are allocated by calling alloc(),
260 or alternatively by inheriting Garbage. Most classes inherit from
261 Garbage.
262
263 The free() function mark all objects that can be reached from the
264 eternal ones, and afterwards frees anything which isn't
265 reachable. It can be called whenever there are no pointers into
266 the heap, ie. only during the main event loop.
267
268 Each single instance of the Allocator class allocates memory blocks
269 of a given size. There are static functions to the heavy loading,
270 such as free() to free all unreachable memory, allocate() to
271 allocate something, allocator() to find an Allocator responsible
272 for a given size and finally rounded(), to find the largest size
273 which will fit comfortably in an allocation block, rounded().
274
275 The EString and UString classes can call rounded() to optimize
276 their memory usage.
277 */
278
279
280
281 /*! This private constructor creates an Allocator to dispense objects
282 of size at most \a s - sizeof(void*) bytes.
283 */
284
Allocator(uint s)285 Allocator::Allocator( uint s )
286 : base( 0 ), step( s ), taken( 0 ), capacity( 0 ),
287 used( 0 ), marked( 0 ), buffer( 0 ),
288 next( 0 )
289 {
290 if ( s < ( BlockSize ) )
291 capacity = ( BlockSize ) / ( s );
292 else
293 capacity = 1;
294 uint l = capacity * s;
295 l = ( ( l-1 ) | 4095 ) + 1;
296 capacity = l / s;
297
298 buffer = mmap( 0, l, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0 );
299 if ( buffer == MAP_FAILED )
300 die( Memory );
301 if ( ((ulong)buffer) & (BlockSize-1) ) {
302 // the block we got wasn't at a megabyte boundary. drop it,
303 // ask for one that MUST span an entire megabyte, then drop
304 // what we don't need from that block.
305 munmap( buffer, l );
306
307 uint xl = l + BlockSize;
308 buffer = mmap( 0, xl, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
309 -1, 0 );
310 if ( buffer == MAP_FAILED )
311 die( Memory );
312 ulong start = (ulong)buffer;
313 ulong desired = ((start-1)|(BlockSize-1))+1;
314 if ( desired != (ulong)buffer )
315 munmap( buffer, desired - start );
316 if ( start + xl > desired + l )
317 munmap( (void*)(desired+l), (start+xl) - (desired+l) );
318 buffer = (void*)desired;
319 }
320
321 memset( buffer, 0, l );
322
323 uint bl = (capacity + bits - 1)/bits;
324 used = (ulong*)::calloc( bl, sizeof( ulong ) );
325 if ( !used )
326 die( Memory );
327 marked = (ulong*)::calloc( bl, sizeof( ulong ) );
328 if ( !marked )
329 die( Memory );
330
331 AllocatorMapTable::insert( this );
332 }
333
334
335
336 /*! Destroys the object and frees its allocated memory. */
337
~Allocator()338 Allocator::~Allocator()
339 {
340 AllocatorMapTable::remove( this );
341 uint l = capacity * step;
342 l = ( ( l-1 ) | 4095 ) + 1;
343 ::munmap( buffer, l );
344
345 ::free( used );
346 ::free( marked );
347
348 next = 0;
349 used = 0;
350 buffer = 0;
351 }
352
353
354 /*! Allocates a chunk of memory (which may contain up to \a pointers
355 pointers), notes that at most \a size bytes are in use, and returns
356 a pointer to it.
357 */
358
allocate(uint size,uint pointers)359 void * Allocator::allocate( uint size, uint pointers )
360 {
361 if ( taken < capacity ) {
362 while ( base < capacity ) {
363 ulong bm = used[base/bits];
364 if ( bm != ~(0UL) ) {
365 uint j = base%bits;
366 while ( bm & ( 1UL << j ) )
367 j++;
368 base = (base & ~(bits-1)) + j;
369 AllocationBlock * b = (AllocationBlock*)block( base );
370 if ( b ) {
371 if ( b->x.magic == ::magic ) {
372 if ( verbose )
373 log( "Internal error in allocate" );
374 die( Memory );
375 }
376 if ( pointers >= 128 )
377 b->x.number = 127;
378 else
379 b->x.number = pointers;
380 b->x.magic = ::magic;
381 marked[base/bits] &= ~( 1UL << j );
382 used[base/bits] |= ( 1UL << j );
383 taken++;
384 base++;
385 memset( b->payload, 0, pointers*sizeof(void*) );
386 return &(b->payload);
387 }
388 }
389 base = (base | (bits-1)) + 1;
390 }
391 }
392
393 if ( !next )
394 next = new Allocator( step );
395 return next->allocate( size, pointers );
396 }
397
398
399 /*! Deallocates the object at \a p, provided that it's within this
400 Allocator. Calling this function is never necessary, since free()
401 does the same job. However, EString helps out by doing it
402 occasionally.
403 */
404
deallocate(void * p)405 void Allocator::deallocate( void * p )
406 {
407 ulong i = ((ulong)p - (ulong)buffer) / step;
408 if ( i >= capacity )
409 return;
410 if ( ! (used[i/bits] & 1UL << (i%bits)) )
411 return;
412
413 AllocationBlock * m = (AllocationBlock *)block( i );
414 if ( m->x.magic != ::magic )
415 die( Memory );
416 used[i/bits] &= ~(1UL << i);
417 marked[i/bits] &= ~(1UL << i);
418 taken--;
419 m->x.magic = 0;
420
421 if ( base > i )
422 base = i;
423 if ( ::allocated > step )
424 ::allocated -= step;
425 }
426
427
428 /*! Records that \a p contains at most \a n pointers, all located at
429 the start of the object. The rest of the object is not scanned for
430 pointers during garbage collection, which can be helpful if the
431 object contains either very large string/text data or apparently
432 random binary data.
433
434 Scanning long strings is slow. Binary data can give false alarms
435 during pointer scanning, which will lead ot memory not being
436 freed.
437 */
438
setNumPointers(const void * p,uint n)439 void Allocator::setNumPointers( const void * p, uint n )
440 {
441 if ( n * sizeof( void * ) >= step || n > 127 )
442 n = 127;
443
444 ulong i = ((ulong)p - (ulong)buffer) / step;
445 if ( i >= capacity )
446 return;
447 if ( ! (used[i/bits] & 1UL << (i%bits)) )
448 return;
449
450 AllocationBlock * m = (AllocationBlock *)block( i );
451 if ( m->x.magic != ::magic )
452 die( Memory );
453
454 m->x.number = n;
455 }
456
457
458 /*! This private helper checks that \a p is a valid pointer to
459 unmarked GCable memory, marks it, and puts it on a stack so that
460 mark() can process it and add its children to the stack.
461 */
462
mark(void * p)463 void Allocator::mark( void * p )
464 {
465 Allocator * a = AllocatorMapTable::find( p );
466 // a may be the allocator we want. does its area encompass p?
467 if ( !a || (ulong)a->buffer > (ulong)p )
468 return;
469 // perhaps, but let's look closer
470 ulong i = ((ulong)p - (ulong)a->buffer) / a->step;
471 if ( i >= a->capacity )
472 return;
473 if ( ! (a->used[i/bits] & 1UL << (i%bits)) )
474 return;
475 // fine. we have the block of memory.
476 AllocationBlock * b = (AllocationBlock*)a->block( i );
477 // does it have our magic marker?
478 if ( b->x.magic != ::magic )
479 die( Memory );
480 // is it already marked?
481 if ( (a->marked[i/bits] & 1UL << (i%bits)) )
482 return;
483 // no. mark it
484 a->marked[i/bits] |= (1UL << (i%bits));
485 objects++;
486 ::marked += a->step;
487 // is there any chance that it contains children?
488 if ( !b->x.number )
489 return;
490 // is there space on the stack for this object?
491 if ( tos == 524288 ) {
492 log( "Ran out of stack space while collecting garbage",
493 Log::Disaster );
494 return;
495 }
496 // yes. put it on the stack so the children, too, can be marked.
497 if ( !stack ) {
498 stack = (AllocationBlock**)malloc( 524288 * sizeof(AllocationBlock *) );
499 if ( !stack )
500 die( Memory );
501 tos = 0;
502 }
503 stack[tos++] = b;
504 if ( tos > peak )
505 peak = tos;
506 }
507
508
509 /*! This private helper processes all the stacked pointers, scanning
510 them for valid pointers and marking any that exist.
511 */
512
mark()513 void Allocator::mark()
514 {
515 while ( tos > 0 ) {
516 AllocationBlock * b = stack[--tos];
517 // mark its children
518 uint number = b->x.number;
519 if ( number == 127 ) {
520 Allocator * a = AllocatorMapTable::find( b );
521 number = ( a->step - bytes ) / sizeof( void* );
522 }
523 uint n = 0;
524 while ( n < number ) {
525 if ( b->payload[n] )
526 mark( b->payload[n] );
527 n++;
528 }
529 }
530 ::free( stack );
531 stack = 0;
532 tos = 0;
533 }
534
535
536 /*! Frees all memory that's no longer in use. This can take some time.
537
538 Returns null if entries is null or empty, returns an object in
539 entries else. The returned object is (in some sense) the one
540 that's responsible for the largest share of allocated memory.
541 */
542
free(List<Garbage> * entries)543 Garbage * Allocator::free( List<Garbage> * entries )
544 {
545 struct timeval start, afterMark, afterSweep;
546 start.tv_sec = 0;
547 start.tv_usec = 0;
548 afterMark.tv_sec = 0;
549 afterMark.tv_usec = 0;
550 afterSweep.tv_sec = 0;
551 afterSweep.tv_usec = 0;
552 gettimeofday( &start, 0 );
553
554 Cache::clearAllCaches( false );
555
556 total = 0;
557 peak = 0;
558 uint freed = 0;
559 objects = 0;
560 ::marked = 0;
561
562 Garbage * biggest = 0;
563
564 // mark
565 if ( entries ) {
566 uint size = 0;
567 List<Garbage>::Iterator i( entries );
568 while ( i ) {
569 uint m = ::marked;
570 mark( i );
571 mark();
572 if ( !biggest || ::marked - m > size ) {
573 biggest = i;
574 size = ::marked - m;
575 }
576 ++i;
577 }
578 }
579 uint i = 0;
580 while ( i < ::numRoots ) {
581 if ( ::roots[i].root ) {
582 uint o = objects;
583 uint m = ::marked;
584 mark( ::roots[i].root );
585 mark();
586 ::roots[i].objects = objects - o;
587 ::roots[i].size = ::marked - m;
588 }
589
590 i++;
591 }
592 gettimeofday( &afterMark, 0 );
593
594 // and sweep
595 i = 0;
596 uint blocks = 0;
597 while ( i < 32 ) {
598 Allocator * a = allocators[i];
599 while ( a ) {
600 uint taken = a->taken;
601 if ( a->taken )
602 a->sweep();
603 freed = freed + ( taken - a->taken ) * a->step;
604 total = total + a->taken * a->step;
605 a = a->next;
606 }
607 Allocator * s = 0;
608 a = allocators[i];
609 while ( a ) {
610 Allocator * n = a->next;
611 if ( a->taken ) {
612 a->next = s;
613 s = a;
614 blocks++;
615 }
616 else {
617 delete a;
618 }
619 a = n;
620 }
621 allocators[i] = s;
622 i++;
623 }
624 gettimeofday( &afterSweep, 0 );
625
626 uint timeToMark = 0;
627 uint timeToSweep = 0;
628 if ( start.tv_sec ) {
629 timeToMark = ( afterMark.tv_sec - start.tv_sec ) * 1000000 +
630 ( afterMark.tv_usec - start.tv_usec );
631 timeToSweep = ( afterSweep.tv_sec - afterMark.tv_sec ) * 1000000 +
632 ( afterSweep.tv_usec - afterMark.tv_usec );
633 }
634 // dumpRandomObject();
635
636 if ( !freed )
637 return biggest;
638
639 if ( verbose && ( ::allocated >= 4*1024*1024 ||
640 timeToMark + timeToSweep >= 10000 ) )
641 log( "Allocator: allocated " +
642 EString::humanNumber( ::allocated ) +
643 " then freed " +
644 EString::humanNumber( freed ) +
645 " bytes, leaving " +
646 fn( objects ) +
647 " objects of " +
648 EString::humanNumber( total ) +
649 " bytes, across " +
650 fn( blocks ) +
651 " " +
652 EString::humanNumber( BlockSize ) +
653 " blocks. Recursion depth: " +//
654 fn( peak ) + ". Time needed to mark: " +
655 fn( (timeToMark+500)/1000 ) + "ms. To sweep: " +
656 fn( (timeToSweep+500)/1000 ) + "ms.",
657 Log::Info );
658 if ( verbose && total > 8 * 1024 * 1024 ) {
659 EString objects;
660 i = 0;
661 while ( i < 32 ) {
662 uint n = 0;
663 uint max = 0;
664 Allocator * a = allocators[i];
665 while ( a ) {
666 n = n + a->taken;
667 max = max + a->capacity;
668 a = a->next;
669 }
670 if ( n ) {
671 if ( objects.isEmpty() )
672 objects = "Objects:";
673 else
674 objects.append( "," );
675 uint size = allocators[i]->step;
676 objects.append( " size " + fn( size-bytes ) + ": " +
677 fn( n ) + " (" +
678 EString::humanNumber( size * n ) + " used, " +
679 EString::humanNumber( size * max ) +
680 " allocated)" );
681 }
682 i++;
683 }
684 log( objects, Log::Debug );
685 }
686 const uint ObjectLimit = 8192;
687 if ( verbose && objects > ObjectLimit ) {
688 i = 0;
689 while ( i < numRoots ) {
690 if ( roots[i].root && roots[i].objects > ObjectLimit/2 ) {
691 EString objects = "Root ";
692 objects.appendNumber( i );
693 objects.append( " (" );
694 objects.append( roots[i].name );
695 objects.append( ") reaches " );
696 objects.appendNumber( roots[i].objects );
697 objects.append( " objects, total size " );
698 objects.append( EString::humanNumber( roots[i].size ) );
699 objects.append( "b" );
700 log( objects, Log::Debug );
701 }
702 i++;
703 }
704 }
705 ::allocated = 0;
706 return biggest;
707 }
708
709
710 /*! Sweeps this allocator, freeing all unmarked memory blocks and
711 unmarking all memory blocks.
712 */
713
sweep()714 void Allocator::sweep()
715 {
716 uint b = 0;
717 while ( taken > 0 && b * bits < capacity ) {
718 uint i = 0;
719 while ( ( used[b] & ~marked[b] ) ) {
720 if ( (used[b] & (1UL<<i)) && !(marked[b] & (1UL<<i)) ) {
721 AllocationBlock * m
722 = (AllocationBlock *)block( b * bits + i );
723 if ( m ) {
724 if ( m->x.magic != ::magic )
725 die( Memory );
726 used[b] &= ~(1UL << i);
727 taken--;
728 m->x.magic = 0;
729 }
730 }
731 i++;
732 }
733 marked[b] = 0;
734 b++;
735 }
736 base = 0;
737 }
738
739
740 /*! Returns the amount of memory allocated to hold \a p and any object
741 to which p points.
742
743 As a side effect, this marks \a p and the other objects so they
744 won't be freed during the next collection.
745 */
746
sizeOf(void * p)747 uint Allocator::sizeOf( void * p )
748 {
749 ::objects = 0;
750 ::marked = 0;
751 mark( p );
752 mark();
753 return ::marked;
754 }
755
756
757 /*! Returns a pointer to block no. \a i in this Allocator. The pointer
758 is to the management word, not the payload.
759 */
760
block(uint i)761 void * Allocator::block( uint i )
762 {
763 if ( i >= capacity )
764 return 0;
765 return (void *)(i * step + (ulong)buffer);
766 }
767
768
769 /*! Returns the biggest number of bytes which can be allocated at the
770 same effective cost as \a size.
771
772 Suppose allocating 24, 25 or 28 bytes all cause Allocator to use
773 32 bytes, but 29 causes Allocator to use 48. Then rounded(24),
774 rounded(25) and rounded(28) all return 28, while rounded(29) might
775 return something like 44.
776
777 This can be used by EString and UString to optimize their memory
778 usage. Perhaps also by other classes.
779 */
780
rounded(uint size)781 uint Allocator::rounded( uint size )
782 {
783 uint i = 3;
784 if ( bits == 64 )
785 i = 4;
786 while ( 1UL << i < size + bytes )
787 i++;
788 return (1UL << i) - bytes;
789 }
790
791
792 /*! Records that \a *p is an allocation root, i.e. that whatever it
793 points to is a valid object. \a t is a description of this root
794 (e.g. "array of connection objects").
795 */
796
addEternal(const void * p,const char * t)797 void Allocator::addEternal( const void * p, const char * t )
798 {
799 // If there are holes left by removeEternal(), we'll fill them;
800 // otherwise, we'll use a new index and increase numRoots.
801 uint i = 0;
802 while ( i < numRoots && roots[i].root )
803 i++;
804
805 roots[i].root = (void *)p;
806 roots[i].name = t;
807 roots[i].objects = 0;
808 roots[i].size = 0;
809
810 if ( i == numRoots )
811 numRoots++;
812
813 if ( i < maxRoots )
814 return;
815
816 // we have a nasty memory leak. probably someone's allocating new
817 // roots in a loop.
818 log( EString( "Ran out of roots. Last allocated root: " ) + t,
819 Log::Disaster );
820 die( Memory );
821 }
822
823
824 /*! Records that \a *p is no longer an allocation root. The object may
825 have been deleted.
826 */
827
removeEternal(void * p)828 void Allocator::removeEternal( void * p )
829 {
830 uint i = 0;
831 while ( i < ::numRoots && roots[i].root != p )
832 i++;
833 if ( i < maxRoots && roots[i].root == p ) {
834 roots[i].root = 0;
835 roots[i].name = 0;
836 roots[i].size = 0;
837 roots[i].objects = 0;
838 }
839 while ( numRoots && !roots[numRoots-1].root )
840 numRoots--;
841 }
842
843
844 /*! Records that \a *p is no longer an allocation root. The object may
845 have been deleted.
846 */
847
removeEternal(const void * p)848 void Allocator::removeEternal( const void * p )
849 {
850 removeEternal( (void*)p );
851 }
852
853
854 /*! Instructs the Allocator to log various statistics if \a report is
855 true, and to be entirely silent if \a report is false.
856
857 The initial value is false.
858 */
859
setReporting(bool report)860 void Allocator::setReporting( bool report )
861 {
862 ::verbose = report;
863 }
864
865
866
867
868 /*! Returns the number of bytes allocated since the last memory sweep. */
869
allocated()870 uint Allocator::allocated()
871 {
872 return ::allocated;
873 }
874
875
876 /*! Returns the number of bytes in use after the last sweep. */
877
inUse()878 uint Allocator::inUse()
879 {
880 return ::total;
881 }
882
883
884 /*! Returns the amount of memory gobbled up when this Allocator
885 allocates memory. This is a little bigger than the biggest object
886 this Allocator can provide.
887 */
888
chunkSize() const889 uint Allocator::chunkSize() const
890 {
891 return step;
892 }
893
894
pointers(void * p)895 void pointers( void * p )
896 {
897 if ( !p )
898 return;
899
900 uint bi = 0;
901 while ( bi < 32 ) {
902 Allocator * a = allocators[bi];
903 while ( a ) {
904 uint b = 0;
905 while ( b * bits < a->capacity ) {
906 uint i = 0;
907 while ( i < 32 ) {
908 if ( (a->used[b] & (1UL<<i)) &&
909 !(a->marked[b] & (1UL<<i)) ) {
910 AllocationBlock * m
911 = (AllocationBlock *)a->block( b * bits + i );
912 if ( m ) {
913 uint number = m->x.number;
914 if ( number == 127 )
915 number = ( a->step - bytes ) / sizeof( void* );
916 uint n = 0;
917 while ( n < number ) {
918 if ( m->payload[n] == p ) {
919 fprintf( stderr,
920 "Pointer at 0x%p (in 0x%p, "
921 "size <= %d, %d pointers)\n",
922 &m->payload[n],
923 &m->payload[0],
924 a->step - bytes,
925 number );
926 number = 0;
927 }
928 n++;
929 }
930 }
931 }
932 i++;
933 }
934 b++;
935 }
936 a = a->next;
937 }
938 bi++;
939 }
940 }
941
942
943 /*! Returns a pointer to the Allocator that manages \a p.
944
945 Should go away. As soon as possible.
946 */
947
owner(const void * p)948 Allocator * Allocator::owner( const void * p )
949 {
950 return AllocatorMapTable::find( p );
951 }
952
953
954 /*! Returns the number of bytes Allocator has allocated from the
955 operating system (and not returned).
956 */
957
allocatedFromOS()958 uint Allocator::allocatedFromOS()
959 {
960 int r = 0;
961 int i = 0;
962 while ( i < 32 ) {
963 Allocator * a = allocators[i];
964 while ( a ) {
965 r += BlockSize;
966 a = a->next;
967 }
968 ++i;
969 }
970
971 return r;
972 }
973