1 /*
2 SuperCollider real time audio synthesis system
3 Copyright (c) 2002 James McCartney. All rights reserved.
4 http://www.audiosynth.com
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include <string.h>
22 #include <stdexcept>
23 #include "SC_AllocPool.h"
24 #include "SC_BoundsMacros.h"
25 #include <assert.h>
26 #include <string>
27
28 /*
29 Requests are `small' if both the corresponding and the next bin are small
30 */
31
32 #ifdef ENABLE_MEMORY_CHECKS
33 # define check_pool() DoCheckPool()
34 # define check_free_chunk(P) DoCheckFreeChunk(P)
35 # define check_inuse_chunk(P) DoCheckInUseChunk(P)
36 # define check_chunk(P) DoCheckChunk(P)
37 # define check_malloced_chunk(P, N) DoCheckAllocedChunk(P, N)
38 # define garbage_fill(P) DoGarbageFill(P)
39 #else
40 # define check_pool()
41 # define check_free_chunk(P)
42 # define check_inuse_chunk(P)
43 # define check_chunk(P)
44 # define check_malloced_chunk(P, N)
45 # define garbage_fill(P)
46 #endif
47
48 #define aligned_OK(m) ((((size_t)(m)) & kAlignMask) == 0)
49
50 /*
51 void* allocmem(AllocPool *pool, int32 size);
52 void* allocmem(AllocPool *pool, int32 size)
53 {
54 return pool->Alloc(size);
55 }
56
57 void* reallocmem(AllocPool *pool, void* ptr, int32 size);
58 void* reallocmem(AllocPool *pool, void* ptr, int32 size)
59 {
60 return pool->Realloc(ptr, size);
61 }
62
63 void freemem(AllocPool *pool, void* ptr);
64 void freemem(AllocPool *pool, void* ptr)
65 {
66 pool->Free(ptr);
67 }
68 */
InitAlloc()69 void AllocPool::InitAlloc() {
70 if (mAreaInitSize == 0)
71 return;
72
73 /* alloc initial area */
74 NewArea(mAreaInitSize);
75 /* get chunk */
76 AllocAreaPtr area = mAreas;
77 AllocChunkPtr chunk = &area->mChunk;
78 LinkFree(chunk);
79
80 check_pool();
81 }
82
InitBins()83 void AllocPool::InitBins() {
84 for (int i = 0; i < kNumAllocBins; ++i) {
85 mBins[i].BeEmpty();
86 }
87 for (int i = 0; i < 4; ++i) {
88 mBinBlocks[i] = 0;
89 }
90 }
91
AllocPool(NewAreaFunc inAllocArea,FreeAreaFunc inFreeArea,size_t inAreaInitSize,size_t inAreaMoreSize)92 AllocPool::AllocPool(NewAreaFunc inAllocArea, FreeAreaFunc inFreeArea, size_t inAreaInitSize, size_t inAreaMoreSize) {
93 InitBins();
94 mAreaInitSize = inAreaInitSize;
95 mAreaMoreSize = inAreaMoreSize;
96 mAllocArea = inAllocArea;
97 mFreeArea = inFreeArea;
98 mAreas = 0;
99 check_pool();
100
101 InitAlloc();
102 }
103
~AllocPool()104 AllocPool::~AllocPool() { FreeAll(); }
105
FreeAll()106 void AllocPool::FreeAll() {
107 check_pool();
108 AllocAreaPtr area = mAreas;
109 if (area) {
110 AllocAreaPtr firstArea = area;
111 do {
112 AllocAreaPtr nextarea = area->mNext;
113 (mFreeArea)(area->mUnalignedPointerToThis);
114 area = nextarea;
115 } while (area != firstArea);
116 mAreas = NULL;
117 }
118 InitBins();
119 check_pool();
120 }
121
FreeAllInternal()122 void AllocPool::FreeAllInternal() {
123 check_pool();
124 InitBins();
125
126 AllocAreaPtr area = mAreas;
127 if (area) {
128 AllocAreaPtr firstArea = area;
129 do {
130 AllocAreaPtr nextarea = area->mNext;
131 size_t size = area->mSize;
132 AllocChunkPtr chunk = &area->mChunk;
133 chunk->SetSizeFree(size);
134 chunk->SetNeighborsInUse(size);
135 LinkFree(chunk);
136 area = nextarea;
137 } while (area != firstArea);
138 }
139 check_pool();
140 }
141
Reinit()142 void AllocPool::Reinit() {
143 FreeAll();
144 InitAlloc();
145 }
146
Free(void * inPtr)147 void AllocPool::Free(void* inPtr) {
148 #ifdef DISABLE_MEMORY_POOLS
149 free(inPtr);
150 return;
151 #endif
152
153 check_pool();
154 if (inPtr == 0)
155 return; /* free(0) has no effect */
156
157 AllocChunkPtr chunk = MemToChunk(inPtr);
158
159 check_inuse_chunk(chunk);
160 garbage_fill(chunk);
161
162 size_t size = chunk->Size();
163
164 if (!chunk->PrevInUse()) /* consolidate backward */
165 {
166 size_t prevSize = chunk->PrevSize();
167 chunk = chunk->ChunkAtOffset(0L - prevSize);
168 size += prevSize;
169 UnlinkFree(chunk);
170 }
171
172 AllocChunkPtr next = chunk->ChunkAtOffset(size);
173 if (!next->InUse()) /* consolidate forward */
174 {
175 size += next->Size();
176 UnlinkFree(next);
177 }
178
179 chunk->SetSizeFree(size);
180 if (mAreaMoreSize && chunk->IsArea()) {
181 // whole area is free
182 FreeArea(chunk);
183 } else {
184 LinkFree(chunk);
185 }
186 check_pool();
187 }
188
189
NewArea(size_t inAreaSize)190 AllocAreaPtr AllocPool::NewArea(size_t inAreaSize) {
191 void* ptr = (AllocAreaPtr)(mAllocArea)(inAreaSize + kAreaOverhead);
192
193 if (ptr == NULL)
194 throw std::runtime_error(std::string("Could not allocate new area"));
195
196 // AllocAreaPtr area = (AllocAreaPtr)((unsigned long)ptr & ~kAlignMask);
197 AllocAreaPtr area = (AllocAreaPtr)(((size_t)ptr + kAlignMask) & ~kAlignMask);
198 assert((area >= ptr) && ((void*)((size_t)area & ~kAlignMask) == area));
199
200 area->mUnalignedPointerToThis = ptr;
201
202 /* link in area */
203 if (mAreas) {
204 area->mNext = mAreas;
205 area->mPrev = mAreas->mPrev;
206 area->mNext->mPrev = area;
207 area->mPrev->mNext = area;
208 } else {
209 area->mNext = area;
210 area->mPrev = area;
211 }
212 mAreas = area;
213
214 /* set area size */
215 area->mSize = inAreaSize;
216 area->mChunk.BeEmpty();
217 area->mChunk.SetNeighborsInUse(inAreaSize);
218 area->mChunk.SetSizeFree(inAreaSize);
219
220 return area;
221 }
222
FreeArea(AllocChunkPtr chunk)223 void AllocPool::FreeArea(AllocChunkPtr chunk) {
224 AllocAreaPtr area = (AllocAreaPtr)((char*)chunk - sizeof(AllocAreaHdr));
225
226 if (area->mNext == area) {
227 mAreas = NULL;
228 } else {
229 /* unlink area */
230 mAreas = area->mPrev->mNext = area->mNext;
231 area->mNext->mPrev = area->mPrev;
232 }
233
234 (mFreeArea)(area->mUnalignedPointerToThis);
235 }
236
237
TotalFree()238 size_t AllocPool::TotalFree() {
239 size_t total = 0;
240 for (int i = 0; i < kNumAllocBins; ++i) {
241 AllocChunkPtr bin = mBins + i;
242 if (bin->Prev() != bin) {
243 for (AllocChunkPtr candidate = bin->Prev(); candidate != bin; candidate = candidate->Prev()) {
244 total += candidate->Size();
245 }
246 }
247 }
248 return total;
249 }
250
LargestFreeChunk()251 size_t AllocPool::LargestFreeChunk() {
252 int word = 0;
253 for (int i = 3; i >= 0; --i) {
254 if (mBinBlocks[i]) {
255 word = i;
256 break;
257 }
258 }
259 int binBits = (int)mBinBlocks[word];
260 int bitPosition = NUMBITS(binBits) - 1;
261 int index = (word << 5) + bitPosition;
262 AllocChunkPtr bin = mBins + index;
263 // postbuf("** %p %p %p %p\n", mBinBlocks[0], mBinBlocks[1], mBinBlocks[2], mBinBlocks[3]);
264 // postbuf("%d %d %d %p %p %p\n", word, bitPosition, index, binBits, bin->Prev(), bin->Next());
265
266 AllocChunkPtr candidate;
267 size_t maxsize = 0;
268 for (candidate = bin->Prev(); candidate != bin; candidate = candidate->Prev()) {
269 size_t candidate_size = candidate->Size();
270 maxsize = sc_max(maxsize, candidate_size);
271 // postbuf(" %d %d\n", maxsize, candidate_size);
272 }
273
274 /*for (int i=0; i<kNumAllocBins; ++i) {
275 bin = mBins + i;
276 if (bin->Prev() != bin) {
277 postbuf("* %d %d\n", i, bin->Prev()->Size());
278 }
279 }*/
280 return maxsize;
281 }
282
Alloc(size_t inReqSize)283 void* AllocPool::Alloc(size_t inReqSize) {
284 #ifdef DISABLE_MEMORY_POOLS
285 return malloc(inReqSize);
286 #endif
287
288 // OK it has a lot of gotos, but these remove a whole lot of common code
289 // that was obfuscating the original version of this function.
290 // So here I am choosing the OnceAndOnlyOnce principle over the caveats on gotos.
291 // The gotos only jump forward and only to the exit paths of the function
292
293 // The old bin block scheme has been replaced by 4 x 32 bit words so that each bin has a bit
294 // and the next bin is found using a count leading zeroes instruction. Much faster.
295 // Also now each bin's flag can be kept accurate. This simplifies the searching code quite a bit.
296
297 // Also fwiw, changed 'victim' in the original code to 'candidate'. 'victim' just bothered me.
298
299
300 AllocChunkPtr candidate; /* inspected/selected chunk */
301 size_t candidate_size; /* its size */
302 AllocChunkPtr remainder; /* remainder from a split */
303 int32 remainder_size; /* its size */
304 AllocAreaPtr area;
305 size_t areaSize;
306
307 size_t size = RequestToSize(inReqSize);
308 int index = BinIndex(size);
309 assert(index < 128);
310 AllocChunkPtr bin = mBins + index;
311
312 check_pool();
313
314 /* Check for exact match in a bin */
315 if (index < kMaxSmallBin) { /* Faster version for small requests */
316 /* No traversal or size check necessary for small bins. */
317 candidate = bin->Prev();
318
319 /* Also scan the next one, since it would have a remainder < kMinAllocSize */
320 if (candidate == bin)
321 candidate = (++bin)->Prev();
322 if (candidate != bin) {
323 candidate_size = candidate->Size();
324 goto found_exact_fit;
325 }
326
327 index += 2; /* Set for bin scan below. We've already scanned 2 bins. */
328 } else {
329 for (candidate = bin->Prev(); candidate != bin; candidate = candidate->Prev()) {
330 candidate_size = candidate->Size();
331 remainder_size = (int)(candidate_size - size);
332 if (remainder_size >= (int32)kMinAllocSize) { /* too big */
333 --index; /* adjust to rescan below after checking last remainder */
334 break;
335 } else if (remainder_size >= 0) { /* exact fit */
336 goto found_exact_fit;
337 }
338 }
339 ++index;
340 }
341
342 for (; (index = NextFullBin(index)) >= 0; ++index) {
343 bin = mBins + index;
344
345 /* Find and use first big enough chunk ... */
346 for (candidate = bin->Prev(); candidate != bin; candidate = candidate->Prev()) {
347 candidate_size = candidate->Size();
348 remainder_size = (int)(candidate_size - size);
349 if (remainder_size >= (int32)kMinAllocSize) { /* split */
350 UnlinkFree(candidate);
351 goto found_bigger_fit;
352 } else if (remainder_size >= 0)
353 goto found_exact_fit;
354 }
355 }
356 check_pool();
357
358 if (mAreaMoreSize == 0) { /* pool has a non-growable area */
359 if (mAreas != NULL /* fixed size area exhausted */
360 || size > mAreaInitSize) /* too big anyway */
361 goto found_nothing;
362 areaSize = mAreaInitSize;
363 goto split_new_area;
364 }
365
366 if (size > mAreaMoreSize) {
367 areaSize = size;
368 goto whole_new_area;
369 } else {
370 areaSize = mAreaMoreSize;
371 goto split_new_area;
372 }
373
374 // exit paths:
375 found_nothing:
376 // ipostbuf("alloc failed. size: %d\n", inReqSize);
377 throw std::runtime_error(std::string("alloc failed, increase server's memory allocation (e.g. via ServerOptions)"));
378
379 whole_new_area:
380 // ipostbuf("whole_new_area\n");
381 area = NewArea(areaSize);
382 if (!area)
383 return 0;
384 candidate = &area->mChunk;
385 candidate_size = candidate->Size();
386 goto return_chunk;
387
388 split_new_area:
389 // ipostbuf("split_new_area\n");
390 area = NewArea(areaSize);
391 if (!area)
392 return 0;
393 candidate = &area->mChunk;
394 candidate_size = candidate->Size();
395 remainder_size = (int)(areaSize - size);
396 // FALL THROUGH
397 found_bigger_fit:
398 // ipostbuf("found_bigger_fit\n");
399 remainder = candidate->ChunkAtOffset(size);
400 remainder->SetSizeFree(remainder_size);
401 candidate_size -= remainder_size;
402 LinkFree(remainder);
403 goto return_chunk;
404
405 found_exact_fit:
406 check_pool();
407 UnlinkFree(candidate);
408 // FALL THROUGH
409 return_chunk:
410
411 candidate->SetSizeInUse(candidate_size);
412 check_malloced_chunk(candidate, candidate_size);
413 check_pool();
414 garbage_fill(candidate);
415 return candidate->ToPtr();
416 }
417
418
Realloc(void * inPtr,size_t inReqSize)419 void* AllocPool::Realloc(void* inPtr, size_t inReqSize) {
420 #ifdef DISABLE_MEMORY_POOLS
421 return realloc(inPtr, inReqSize);
422 #endif
423
424
425 void* outPtr;
426 AllocChunkPtr prev;
427 check_pool();
428 bool docopy = false;
429
430 /* realloc of null is supposed to be same as malloc */
431 if (inPtr == 0)
432 return Alloc(inReqSize);
433
434 AllocChunkPtr oldChunk = MemToChunk(inPtr);
435 AllocChunkPtr newChunk = oldChunk;
436 size_t oldsize = oldChunk->Size();
437 size_t newsize = oldsize;
438 size_t size = RequestToSize(inReqSize);
439 size_t nextsize, prevsize;
440 check_inuse_chunk(oldChunk);
441
442 if (oldsize < size) {
443 /* Try expanding forward */
444 AllocChunkPtr next = oldChunk->NextChunk();
445 if (!next->InUse()) {
446 nextsize = next->Size();
447 /* Forward into next chunk */
448 if (nextsize + newsize >= size) {
449 UnlinkFree(next);
450 newsize += nextsize;
451 goto split;
452 }
453 } else {
454 next = 0;
455 nextsize = 0;
456 }
457
458
459 /* Try shifting backwards. */
460 prev = oldChunk->PrevChunk();
461 if (!prev->InUse()) {
462 prevsize = prev->Size();
463
464 /* try forward + backward first to save a later consolidation */
465 if (next != 0) {
466 /* into next chunk */
467 if (nextsize + prevsize + newsize >= size) {
468 newsize += nextsize + prevsize;
469 UnlinkFree(next);
470 goto alloc_prev;
471 }
472 }
473
474 /* backward only */
475 if (prev != 0 && prevsize + newsize >= size) {
476 newsize += prevsize;
477 goto alloc_prev;
478 }
479 }
480
481 /* Must allocate */
482
483 outPtr = Alloc(inReqSize);
484
485 check_pool();
486 if (outPtr == 0) {
487 // ipostbuf("realloc failed. size: %d\n", inReqSize);
488 throw std::runtime_error(
489 std::string("realloc failed, increase server's memory allocation (e.g. via ServerOptions)"));
490 }
491
492 /* Otherwise copy, free, and exit */
493 memcpy(outPtr, inPtr, oldsize - sizeof(AllocChunk));
494 Free(inPtr);
495 return outPtr;
496 } else
497 goto split;
498
499 alloc_prev:
500 UnlinkFree(prev);
501 newChunk = prev;
502 docopy = true;
503 // FALL THROUGH
504 split: /* split off extra room in old or expanded chunk */
505 // check_pool();
506 if (newsize - size >= kMinAllocSize) { /* split off remainder */
507 size_t remainder_size = newsize - size;
508 AllocChunkPtr remainder = newChunk->ChunkAtOffset(size);
509 remainder->SetSizeInUse(remainder_size);
510 newChunk->SetSizeInUse(size);
511 Free(remainder->ToPtr()); /* let free() deal with it */
512 } else {
513 newChunk->SetSizeInUse(newsize);
514 }
515 outPtr = newChunk->ToPtr();
516 if (docopy) {
517 memmove(outPtr, inPtr, oldsize - sizeof(AllocChunk));
518 }
519 check_inuse_chunk(newChunk);
520 check_pool();
521 garbage_fill(newChunk);
522 return outPtr;
523 }
524
LinkFree(AllocChunkPtr inChunk)525 void AllocPool::LinkFree(AllocChunkPtr inChunk) {
526 size_t size = inChunk->Size();
527 size_t index = BinIndex(size);
528
529 AllocChunkPtr bin = mBins + index;
530
531 if (index < kNumSmallBins || bin->IsEmpty()) {
532 inChunk->InsertAfter(bin);
533 MarkBinBlock(index);
534 } else {
535 AllocChunkPtr link = bin->Next();
536 while (link != bin && size < link->Size())
537 link = link->Next();
538 inChunk->InsertBefore(link);
539 }
540 }
541
DoCheckArea(AllocAreaPtr area)542 void AllocPool::DoCheckArea(AllocAreaPtr area) {
543 assert(area->mChunk.PrevInUse());
544
545 AllocChunkPtr p = &area->mChunk;
546 while (p->mSize != kChunkInUse) {
547 if (p->InUse()) {
548 DoCheckInUseChunk(p);
549 } else {
550 DoCheckFreeChunk(p);
551 }
552 p = p->NextChunk();
553 }
554 }
555
DoCheckBin(AllocChunkPtr bin,long index)556 void AllocPool::DoCheckBin(AllocChunkPtr bin, long index) {
557 AllocChunkPtr p = bin->Next();
558
559 while (p != bin) {
560 assert(BinIndex(p->Size()) == index);
561 DoCheckFreeChunk(p);
562 p = p->Next();
563 }
564 }
565
566
DoCheckPool()567 void AllocPool::DoCheckPool() {
568 AllocAreaPtr area = mAreas;
569 if (area) {
570 do {
571 AllocAreaPtr nextarea = area->mNext;
572 DoCheckArea(area);
573 area = nextarea;
574 } while (area != mAreas);
575 }
576
577 for (int i = 0; i < kNumAllocBins; ++i) {
578 AllocChunkPtr b = mBins + i;
579 DoCheckBin(b, i);
580 }
581 }
582
583
DoCheckChunk(AllocChunkPtr p)584 void AllocPool::DoCheckChunk(AllocChunkPtr p) {
585 #ifndef NDEBUG
586 size_t size = p->Size();
587 // size_t maxsize = mAreaInitSize > mAreaMoreSize ? mAreaInitSize : mAreaMoreSize;
588 // assert(size < maxsize);
589
590 AllocChunkPtr next = p->ChunkAtOffset(size);
591 #endif
592 assert(p->mSize == next->mPrevSize);
593 }
594
595
DoCheckFreeChunk(AllocChunkPtr p)596 void AllocPool::DoCheckFreeChunk(AllocChunkPtr p) {
597 size_t size = p->Size();
598 #ifndef NDEBUG
599 AllocChunkPtr next = p->ChunkAtOffset(size);
600 #endif
601 DoCheckChunk(p);
602
603 /* Check whether it claims to be free ... */
604 assert(!p->InUse());
605
606 /* Unless an end marker, must have OK fields */
607 if (size >= kMinAllocSize) {
608 assert((size & kAlignMask) == 0);
609 assert(aligned_OK(p->ToPtr()));
610 /* ... and is fully consolidated */
611 assert(p->PrevInUse());
612 assert(next->InUse());
613
614 /* ... and has minimally sane links */
615 assert(p->Next()->Prev() == p);
616 assert(p->Prev()->Next() == p);
617 } else /* end markers are always of size 0 */
618 assert(size == 0);
619 }
620
DoCheckInUseChunk(AllocChunkPtr p)621 void AllocPool::DoCheckInUseChunk(AllocChunkPtr p) {
622 size_t size = p->Size();
623 AllocChunkPtr next = p->NextChunk();
624
625 DoCheckChunk(p);
626
627 /* Check whether it claims to be in use ... */
628 assert(p->InUse());
629
630 /* ... and is surrounded by OK chunks.
631 Since more things can be checked with free chunks than inuse ones,
632 if an inuse chunk borders them and debug is on, it's worth doing them.
633 */
634 if (!p->PrevInUse()) {
635 size_t prevsize = p->PrevSize();
636 if (prevsize > 0) {
637 AllocChunkPtr prv = p->PrevChunk();
638 assert(prv->NextChunk() == p);
639 DoCheckFreeChunk(prv);
640 }
641 }
642 if (!p->ChunkAtOffset(size)->InUse()) {
643 DoCheckFreeChunk(next);
644 }
645 }
646
DoCheckAllocedChunk(AllocChunkPtr p,size_t s)647 void AllocPool::DoCheckAllocedChunk(AllocChunkPtr p, size_t s) {
648 #ifndef NDEBUG
649 size_t size = p->Size();
650 long room = size - s;
651 #endif
652
653 DoCheckInUseChunk(p);
654
655 /* Legal size ... */
656 assert(size >= kMinAllocSize);
657 assert((size & kAlignMask) == 0);
658 assert(room >= 0);
659 assert(room < kMinAllocSize);
660
661 /* ... and alignment */
662 assert(aligned_OK(p->ToPtr()));
663
664
665 /* ... and was allocated at front of an available chunk */
666 assert(p->PrevInUse()); // huh?? - jmc
667 }
668
DoGarbageFill(AllocChunkPtr p)669 void AllocPool::DoGarbageFill(AllocChunkPtr p) {
670 long size = (p->Size() - sizeof(AllocChunk));
671 DoGarbageFill(p, size);
672 }
673
DoGarbageFill(AllocChunkPtr p,long size)674 void AllocPool::DoGarbageFill(AllocChunkPtr p, long size) {
675 size /= sizeof(long);
676 long* ptr = (long*)p->ToPtr();
677 for (int i = 0; i < size; ++i) {
678 ptr[i] = 0xA3A56955;
679 }
680 }
681