1 /* Copyright (C) 2010 Wildfire Games.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining
4 * a copy of this software and associated documentation files (the
5 * "Software"), to deal in the Software without restriction, including
6 * without limitation the rights to use, copy, modify, merge, publish,
7 * distribute, sublicense, and/or sell copies of the Software, and to
8 * permit persons to whom the Software is furnished to do so, subject to
9 * the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
17 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
18 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
19 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
20 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 /*
24 * (header-less) pool-based heap allocator
25 */
26
27 #include "precompiled.h"
28 #include "lib/allocators/headerless.h"
29
30 #include "lib/bits.h"
31 #include "lib/allocators/pool.h"
32
33
34 static const bool performSanityChecks = true;
35
36 // shared by Impl::Allocate and FreedBlock::Validate
37 static bool IsValidSize(size_t size);
38
39
40 //-----------------------------------------------------------------------------
41
42 // this combines the boundary tags and link fields into one structure,
43 // which is safer than direct pointer arithmetic.
44 //
45 // it is written to freed memory, which is fine because IsValidSize ensures
46 // the allocations are large enough.
47 class FreedBlock
48 {
49 friend class RangeList; // manipulates link fields directly
50
51 public:
52 // (required for RangeList::m_sentinel)
FreedBlock()53 FreedBlock()
54 {
55 }
56
Setup(uintptr_t id,size_t size)57 void Setup(uintptr_t id, size_t size)
58 {
59 m_magic = s_magic;
60 m_size = size;
61 m_id = id;
62 }
63
Reset()64 void Reset()
65 {
66 // clear all fields to prevent accidental reuse
67 prev = next = 0;
68 m_id = 0;
69 m_size = ~size_t(0);
70 m_magic = 0;
71 }
72
Size() const73 size_t Size() const
74 {
75 return m_size;
76 }
77
78 /**
79 * @return whether this appears to be a FreedBlock instance with the
80 * desired ID. for additional safety, also call Validate().
81 **/
IsFreedBlock(uintptr_t id) const82 bool IsFreedBlock(uintptr_t id) const
83 {
84 if(m_id != id)
85 return false;
86 if(m_magic != s_magic)
87 return false;
88 return true;
89 }
90
91 /**
92 * warn if any invariant doesn't hold.
93 **/
Validate(uintptr_t id) const94 void Validate(uintptr_t id) const
95 {
96 if(!performSanityChecks) return;
97
98 // note: RangeList::Validate implicitly checks the prev and next
99 // fields by iterating over the list.
100
101 // note: we can't check for prev != next because we're called for
102 // footers as well, and they don't have valid pointers.
103
104 ENSURE(IsValidSize(m_size));
105 ENSURE(IsFreedBlock(id));
106 }
107
108 private:
109 // note: the magic and ID fields are stored at both ends of this
110 // class to increase the chance of detecting memory corruption.
111 static const u64 s_magic = 0xFF55AA00BBCCDDEEull;
112 u64 m_magic;
113
114 FreedBlock* prev;
115 FreedBlock* next;
116
117 // size [bytes] of the entire memory block, including header and footer
118 size_t m_size;
119
120 // this differentiates between headers and footers.
121 uintptr_t m_id;
122 };
123
124
IsValidSize(size_t size)125 static bool IsValidSize(size_t size)
126 {
127 // note: we disallow the questionable practice of zero-byte allocations
128 // because they may be indicative of bugs.
129 if(size < HeaderlessAllocator::minAllocationSize)
130 return false;
131
132 if(size % HeaderlessAllocator::allocationAlignment)
133 return false;
134
135 return true;
136 }
137
138
139 //-----------------------------------------------------------------------------
140 // freelists
141 //-----------------------------------------------------------------------------
142
143 // policy: address-ordered good fit
144 // mechanism: segregated range lists of power-of-two size classes
145
146 struct AddressOrder
147 {
ShouldInsertBeforeAddressOrder148 static bool ShouldInsertBefore(FreedBlock* current, FreedBlock* successor)
149 {
150 return current < successor;
151 }
152 };
153
154 // "range list" is a freelist of similarly-sized blocks.
155 class RangeList
156 {
157 public:
RangeList()158 RangeList()
159 {
160 Reset();
161 }
162
Reset()163 void Reset()
164 {
165 m_sentinel.prev = &m_sentinel;
166 m_sentinel.next = &m_sentinel;
167 m_freeBlocks = 0;
168 m_freeBytes = 0;
169 }
170
171 template<class InsertPolicy>
Insert(FreedBlock * freedBlock)172 void Insert(FreedBlock* freedBlock)
173 {
174 // find freedBlock before which to insert
175 FreedBlock* successor;
176 for(successor = m_sentinel.next; successor != &m_sentinel; successor = successor->next)
177 {
178 if(InsertPolicy::ShouldInsertBefore(freedBlock, successor))
179 break;
180 }
181
182 freedBlock->prev = successor->prev;
183 freedBlock->next = successor;
184 successor->prev->next = freedBlock;
185 successor->prev = freedBlock;
186
187 m_freeBlocks++;
188 m_freeBytes += freedBlock->Size();
189 }
190
191 /**
192 * @return the first freed block of size >= minSize or 0 if none exists.
193 **/
Find(size_t minSize)194 FreedBlock* Find(size_t minSize)
195 {
196 for(FreedBlock* freedBlock = m_sentinel.next; freedBlock != &m_sentinel; freedBlock = freedBlock->next)
197 {
198 if(freedBlock->Size() >= minSize)
199 return freedBlock;
200 }
201
202 // none found, so average block size is less than the desired size
203 ENSURE(m_freeBytes/m_freeBlocks < minSize);
204 return 0;
205 }
206
Remove(FreedBlock * freedBlock)207 void Remove(FreedBlock* freedBlock)
208 {
209 freedBlock->next->prev = freedBlock->prev;
210 freedBlock->prev->next = freedBlock->next;
211
212 ENSURE(m_freeBlocks != 0);
213 ENSURE(m_freeBytes >= freedBlock->Size());
214 m_freeBlocks--;
215 m_freeBytes -= freedBlock->Size();
216 }
217
Validate(uintptr_t id) const218 void Validate(uintptr_t id) const
219 {
220 if(!performSanityChecks) return;
221
222 size_t freeBlocks = 0, freeBytes = 0;
223
224 for(FreedBlock* freedBlock = m_sentinel.next; freedBlock != &m_sentinel; freedBlock = freedBlock->next)
225 {
226 freedBlock->Validate(id);
227 freeBlocks++;
228 freeBytes += freedBlock->Size();
229 }
230
231 for(FreedBlock* freedBlock = m_sentinel.prev; freedBlock != &m_sentinel; freedBlock = freedBlock->prev)
232 {
233 freedBlock->Validate(id);
234 freeBlocks++;
235 freeBytes += freedBlock->Size();
236 }
237
238 // our idea of the number and size of free blocks is correct
239 ENSURE(freeBlocks == m_freeBlocks*2 && freeBytes == m_freeBytes*2);
240 // if empty, state must be as established by Reset
241 ENSURE(!IsEmpty() || (m_sentinel.next == &m_sentinel && m_sentinel.prev == &m_sentinel));
242 }
243
IsEmpty() const244 bool IsEmpty() const
245 {
246 return (m_freeBlocks == 0);
247 }
248
FreeBlocks() const249 size_t FreeBlocks() const
250 {
251 return m_freeBlocks;
252 }
253
FreeBytes() const254 size_t FreeBytes() const
255 {
256 return m_freeBytes;
257 }
258
259 private:
260 // a sentinel simplifies Insert and Remove. we store it here instead of
261 // in a separate array to improve locality (it is actually accessed).
262 mutable FreedBlock m_sentinel;
263
264 size_t m_freeBlocks;
265 size_t m_freeBytes;
266 };
267
268
269 //-----------------------------------------------------------------------------
270
271 class SegregatedRangeLists
272 {
273 public:
SegregatedRangeLists()274 SegregatedRangeLists()
275 {
276 Reset();
277 }
278
Reset()279 void Reset()
280 {
281 m_bitmap = 0;
282 for(size_t i = 0; i < numRangeLists; i++)
283 m_rangeLists[i].Reset();
284 }
285
Insert(FreedBlock * freedBlock)286 void Insert(FreedBlock* freedBlock)
287 {
288 const size_t sizeClass = SizeClass(freedBlock->Size());
289 m_rangeLists[sizeClass].Insert<AddressOrder>(freedBlock);
290
291 m_bitmap |= Bit<uintptr_t>(sizeClass);
292 }
293
294 /**
295 * @return the first freed block of size >= minSize or 0 if none exists.
296 **/
Find(size_t minSize)297 FreedBlock* Find(size_t minSize)
298 {
299 // iterate over all large enough, non-empty size classes
300 // (zero overhead for empty size classes)
301 const size_t minSizeClass = SizeClass(minSize);
302 size_t sizeClassBits = m_bitmap & ((~size_t(0)) << minSizeClass);
303 while(sizeClassBits)
304 {
305 const size_t size = ValueOfLeastSignificantOneBit(sizeClassBits);
306 sizeClassBits &= ~size; // remove from sizeClassBits
307 const size_t sizeClass = SizeClass(size);
308
309 FreedBlock* freedBlock = m_rangeLists[sizeClass].Find(minSize);
310 if(freedBlock)
311 return freedBlock;
312 }
313
314 // apparently all classes above minSizeClass are empty,
315 // or the above would have succeeded.
316 ENSURE(m_bitmap < Bit<uintptr_t>(minSizeClass+1));
317 return 0;
318 }
319
Remove(FreedBlock * freedBlock)320 void Remove(FreedBlock* freedBlock)
321 {
322 const size_t sizeClass = SizeClass(freedBlock->Size());
323 m_rangeLists[sizeClass].Remove(freedBlock);
324
325 // (masking with !IsEmpty() << sizeClass would probably be faster)
326 if(m_rangeLists[sizeClass].IsEmpty())
327 m_bitmap &= ~Bit<uintptr_t>(sizeClass);
328 }
329
Validate(uintptr_t id) const330 void Validate(uintptr_t id) const
331 {
332 for(size_t i = 0; i < numRangeLists; i++)
333 {
334 m_rangeLists[i].Validate(id);
335
336 // both bitmap and list must agree on whether they are empty
337 ENSURE(((m_bitmap & Bit<uintptr_t>(i)) == 0) == m_rangeLists[i].IsEmpty());
338 }
339 }
340
FreeBlocks() const341 size_t FreeBlocks() const
342 {
343 size_t freeBlocks = 0;
344 for(size_t i = 0; i < numRangeLists; i++)
345 freeBlocks += m_rangeLists[i].FreeBlocks();
346 return freeBlocks;
347 }
348
FreeBytes() const349 size_t FreeBytes() const
350 {
351 size_t freeBytes = 0;
352 for(size_t i = 0; i < numRangeLists; i++)
353 freeBytes += m_rangeLists[i].FreeBytes();
354 return freeBytes;
355 }
356
357 private:
358 /**
359 * @return "size class" of a given size.
360 * class i > 0 contains blocks of size (2**(i-1), 2**i].
361 **/
SizeClass(size_t size)362 static size_t SizeClass(size_t size)
363 {
364 return ceil_log2(size);
365 }
366
ValueOfLeastSignificantOneBit(uintptr_t x)367 static uintptr_t ValueOfLeastSignificantOneBit(uintptr_t x)
368 {
369 return (x & -(intptr_t)x);
370 }
371
372 // segregated, i.e. one list per size class.
373 static const size_t numRangeLists = sizeof(uintptr_t)*CHAR_BIT;
374 RangeList m_rangeLists[numRangeLists];
375
376 // bit i set <==> size class i's freelist is not empty.
377 // this allows finding a non-empty list in O(1).
378 uintptr_t m_bitmap;
379 };
380
381
382 //-----------------------------------------------------------------------------
383 // coalescing
384 //-----------------------------------------------------------------------------
385
386 // policy: immediately coalesce
387 // mechanism: boundary tags
388
389 // note: the id and magic values are all that differentiates tags from
390 // user data. this isn't 100% reliable, but as with headers, we don't want
391 // to insert extra boundary tags into the allocated memory.
392
393 // note: footers consist of Tag{magic, ID, size}, while headers also
394 // need prev/next pointers. this could comfortably fit in 64 bytes,
395 // but we don't want to inherit headers from a base class because its
396 // prev/next pointers should reside between the magic and ID fields.
397 // maintaining separate FreedBlock and Footer classes is also undesirable;
398 // we prefer to use FreedBlock for both, which increases the minimum
399 // allocation size to 64 + allocationAlignment, e.g. 128.
400 // that's not a problem because the allocator is designed for
401 // returning pages or IO buffers (4..256 KB).
402 cassert(HeaderlessAllocator::minAllocationSize >= 2*sizeof(FreedBlock));
403
404
405 class BoundaryTagManager
406 {
407 public:
BoundaryTagManager()408 BoundaryTagManager()
409 : m_freeBlocks(0), m_freeBytes(0)
410 {
411 }
412
WriteTags(u8 * p,size_t size)413 FreedBlock* WriteTags(u8* p, size_t size)
414 {
415 FreedBlock* freedBlock = (FreedBlock*)p;
416 freedBlock->Setup(s_headerId, size);
417 Footer(freedBlock)->Setup(s_footerId, size);
418
419 m_freeBlocks++;
420 m_freeBytes += size;
421
422 Validate(freedBlock);
423 return freedBlock;
424 }
425
RemoveTags(FreedBlock * freedBlock)426 void RemoveTags(FreedBlock* freedBlock)
427 {
428 Validate(freedBlock);
429
430 ENSURE(m_freeBlocks != 0);
431 ENSURE(m_freeBytes >= freedBlock->Size());
432 m_freeBlocks--;
433 m_freeBytes -= freedBlock->Size();
434
435 FreedBlock* footer = Footer(freedBlock);
436 freedBlock->Reset();
437 footer->Reset();
438 }
439
PrecedingBlock(u8 * p,u8 * beginningOfPool) const440 FreedBlock* PrecedingBlock(u8* p, u8* beginningOfPool) const
441 {
442 if(p == beginningOfPool) // avoid accessing invalid memory
443 return 0;
444
445 FreedBlock* precedingBlock;
446 {
447 FreedBlock* const footer = (FreedBlock*)(p - sizeof(FreedBlock));
448 if(!footer->IsFreedBlock(s_footerId))
449 return 0;
450 footer->Validate(s_footerId);
451 precedingBlock = (FreedBlock*)(p - footer->Size());
452 }
453
454 Validate(precedingBlock);
455 return precedingBlock;
456 }
457
FollowingBlock(u8 * p,size_t size,u8 * endOfPool) const458 FreedBlock* FollowingBlock(u8* p, size_t size, u8* endOfPool) const
459 {
460 if(p+size == endOfPool) // avoid accessing invalid memory
461 return 0;
462
463 FreedBlock* const followingBlock = (FreedBlock*)(p + size);
464 if(!followingBlock->IsFreedBlock(s_headerId))
465 return 0;
466
467 Validate(followingBlock);
468 return followingBlock;
469 }
470
FreeBlocks() const471 size_t FreeBlocks() const
472 {
473 return m_freeBlocks;
474 }
475
FreeBytes() const476 size_t FreeBytes() const
477 {
478 return m_freeBytes;
479 }
480
481 // (generated via GUID)
482 static const uintptr_t s_headerId = 0x111E8E6Fu;
483 static const uintptr_t s_footerId = 0x4D745342u;
484
485 private:
Validate(FreedBlock * freedBlock) const486 void Validate(FreedBlock* freedBlock) const
487 {
488 if(!performSanityChecks) return;
489
490 // the existence of freedBlock means our bookkeeping better have
491 // records of at least that much memory.
492 ENSURE(m_freeBlocks != 0);
493 ENSURE(m_freeBytes >= freedBlock->Size());
494
495 freedBlock->Validate(s_headerId);
496 Footer(freedBlock)->Validate(s_footerId);
497 }
498
Footer(FreedBlock * freedBlock)499 static FreedBlock* Footer(FreedBlock* freedBlock)
500 {
501 u8* const p = (u8*)freedBlock;
502 return (FreedBlock*)(p + freedBlock->Size() - sizeof(FreedBlock));
503 }
504
505 size_t m_freeBlocks;
506 size_t m_freeBytes;
507 };
508
509
510 //-----------------------------------------------------------------------------
511 // stats
512 //-----------------------------------------------------------------------------
513
514 class Stats
515 {
516 public:
OnReset()517 void OnReset()
518 {
519 if(!performSanityChecks) return;
520
521 m_totalAllocatedBlocks = m_totalAllocatedBytes = 0;
522 m_totalDeallocatedBlocks = m_totalDeallocatedBytes = 0;
523 m_currentExtantBlocks = m_currentExtantBytes = 0;
524 m_currentFreeBlocks = m_currentFreeBytes = 0;
525 }
526
OnAllocate(size_t size)527 void OnAllocate(size_t size)
528 {
529 if(!performSanityChecks) return;
530
531 m_totalAllocatedBlocks++;
532 m_totalAllocatedBytes += size;
533
534 m_currentExtantBlocks++;
535 m_currentExtantBytes += size;
536 }
537
OnDeallocate(size_t size)538 void OnDeallocate(size_t size)
539 {
540 if(!performSanityChecks) return;
541
542 m_totalDeallocatedBlocks++;
543 m_totalDeallocatedBytes += size;
544 ENSURE(m_totalDeallocatedBlocks <= m_totalAllocatedBlocks);
545 ENSURE(m_totalDeallocatedBytes <= m_totalAllocatedBytes);
546
547 ENSURE(m_currentExtantBlocks != 0);
548 ENSURE(m_currentExtantBytes >= size);
549 m_currentExtantBlocks--;
550 m_currentExtantBytes -= size;
551 }
552
OnAddToFreelist(size_t size)553 void OnAddToFreelist(size_t size)
554 {
555 m_currentFreeBlocks++;
556 m_currentFreeBytes += size;
557 }
558
OnRemoveFromFreelist(size_t size)559 void OnRemoveFromFreelist(size_t size)
560 {
561 if(!performSanityChecks) return;
562
563 ENSURE(m_currentFreeBlocks != 0);
564 ENSURE(m_currentFreeBytes >= size);
565 m_currentFreeBlocks--;
566 m_currentFreeBytes -= size;
567 }
568
Validate() const569 void Validate() const
570 {
571 if(!performSanityChecks) return;
572
573 ENSURE(m_totalDeallocatedBlocks <= m_totalAllocatedBlocks);
574 ENSURE(m_totalDeallocatedBytes <= m_totalAllocatedBytes);
575
576 ENSURE(m_currentExtantBlocks == m_totalAllocatedBlocks-m_totalDeallocatedBlocks);
577 ENSURE(m_currentExtantBytes == m_totalAllocatedBytes-m_totalDeallocatedBytes);
578 }
579
FreeBlocks() const580 size_t FreeBlocks() const
581 {
582 return m_currentFreeBlocks;
583 }
584
FreeBytes() const585 size_t FreeBytes() const
586 {
587 return m_currentFreeBytes;
588 }
589
590 private:
591 u64 m_totalAllocatedBlocks, m_totalAllocatedBytes;
592 u64 m_totalDeallocatedBlocks, m_totalDeallocatedBytes;
593 u64 m_currentExtantBlocks, m_currentExtantBytes;
594 u64 m_currentFreeBlocks, m_currentFreeBytes;
595 };
596
597
598 //-----------------------------------------------------------------------------
599 // HeaderlessAllocator::Impl
600 //-----------------------------------------------------------------------------
601
AssertEqual(size_t x1,size_t x2,size_t x3)602 static void AssertEqual(size_t x1, size_t x2, size_t x3)
603 {
604 ENSURE(x1 == x2 && x2 == x3);
605 }
606
607 class HeaderlessAllocator::Impl
608 {
609 public:
Impl(size_t poolSize)610 Impl(size_t poolSize)
611 {
612 (void)pool_create(&m_pool, poolSize, 0);
613
614 Reset();
615 }
616
~Impl()617 ~Impl()
618 {
619 Validate();
620
621 (void)pool_destroy(&m_pool);
622 }
623
Reset()624 void Reset()
625 {
626 pool_free_all(&m_pool);
627 m_segregatedRangeLists.Reset();
628 m_stats.OnReset();
629
630 Validate();
631 }
632
Allocate(size_t size)633 NOTHROW_DEFINE void* Allocate(size_t size)
634 {
635 ENSURE(IsValidSize(size));
636 Validate();
637
638 void* p = TakeAndSplitFreeBlock(size);
639 if(!p)
640 {
641 p = pool_alloc(&m_pool, size);
642 if(!p) // both failed; don't throw bad_alloc because
643 return 0; // this often happens with the file cache.
644 }
645
646 // (NB: we must not update the statistics if allocation failed)
647 m_stats.OnAllocate(size);
648
649 Validate();
650 ENSURE((uintptr_t)p % allocationAlignment == 0);
651 return p;
652 }
653
Deallocate(u8 * p,size_t size)654 void Deallocate(u8* p, size_t size)
655 {
656 ENSURE((uintptr_t)p % allocationAlignment == 0);
657 ENSURE(IsValidSize(size));
658 ENSURE(pool_contains(&m_pool, p));
659 ENSURE(pool_contains(&m_pool, p+size-1));
660
661 Validate();
662
663 m_stats.OnDeallocate(size);
664 Coalesce(p, size);
665 AddToFreelist(p, size);
666
667 Validate();
668 }
669
Validate() const670 void Validate() const
671 {
672 if(!performSanityChecks) return;
673
674 m_segregatedRangeLists.Validate(BoundaryTagManager::s_headerId);
675 m_stats.Validate();
676
677 AssertEqual(m_stats.FreeBlocks(), m_segregatedRangeLists.FreeBlocks(), m_boundaryTagManager.FreeBlocks());
678 AssertEqual(m_stats.FreeBytes(), m_segregatedRangeLists.FreeBytes(), m_boundaryTagManager.FreeBytes());
679 }
680
681 private:
AddToFreelist(u8 * p,size_t size)682 void AddToFreelist(u8* p, size_t size)
683 {
684 FreedBlock* freedBlock = m_boundaryTagManager.WriteTags(p, size);
685 m_segregatedRangeLists.Insert(freedBlock);
686 m_stats.OnAddToFreelist(size);
687 }
688
RemoveFromFreelist(FreedBlock * freedBlock)689 void RemoveFromFreelist(FreedBlock* freedBlock)
690 {
691 m_stats.OnRemoveFromFreelist(freedBlock->Size());
692 m_segregatedRangeLists.Remove(freedBlock);
693 m_boundaryTagManager.RemoveTags(freedBlock);
694 }
695
696 /**
697 * expand a block by coalescing it with its free neighbor(s).
698 **/
Coalesce(u8 * & p,size_t & size)699 void Coalesce(u8*& p, size_t& size)
700 {
701 {
702 FreedBlock* precedingBlock = m_boundaryTagManager.PrecedingBlock(p, m_pool.da.base);
703 if(precedingBlock)
704 {
705 p -= precedingBlock->Size();
706 size += precedingBlock->Size();
707 RemoveFromFreelist(precedingBlock);
708 }
709 }
710
711 {
712 FreedBlock* followingBlock = m_boundaryTagManager.FollowingBlock(p, size, m_pool.da.base+m_pool.da.pos);
713 if(followingBlock)
714 {
715 size += followingBlock->Size();
716 RemoveFromFreelist(followingBlock);
717 }
718 }
719 }
720
TakeAndSplitFreeBlock(size_t size)721 void* TakeAndSplitFreeBlock(size_t size)
722 {
723 u8* p;
724 size_t leftoverSize = 0;
725 {
726 FreedBlock* freedBlock = m_segregatedRangeLists.Find(size);
727 if(!freedBlock)
728 return 0;
729
730 p = (u8*)freedBlock;
731 leftoverSize = freedBlock->Size() - size;
732 RemoveFromFreelist(freedBlock);
733 }
734
735 if(IsValidSize(leftoverSize))
736 AddToFreelist(p+size, leftoverSize);
737
738 return p;
739 }
740
741 Pool m_pool;
742 SegregatedRangeLists m_segregatedRangeLists;
743 BoundaryTagManager m_boundaryTagManager;
744 Stats m_stats;
745 };
746
747
748 //-----------------------------------------------------------------------------
749
HeaderlessAllocator(size_t poolSize)750 HeaderlessAllocator::HeaderlessAllocator(size_t poolSize)
751 : impl(new Impl(poolSize))
752 {
753 }
754
Reset()755 void HeaderlessAllocator::Reset()
756 {
757 return impl->Reset();
758 }
759
Allocate(size_t size)760 NOTHROW_DEFINE void* HeaderlessAllocator::Allocate(size_t size)
761 {
762 return impl->Allocate(size);
763 }
764
Deallocate(void * p,size_t size)765 void HeaderlessAllocator::Deallocate(void* p, size_t size)
766 {
767 return impl->Deallocate((u8*)p, size);
768 }
769
Validate() const770 void HeaderlessAllocator::Validate() const
771 {
772 return impl->Validate();
773 }
774