1 //
2 // Copyright (c) 2002-2010 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 
7 #ifndef COMPILER_TRANSLATOR_POOLALLOC_H_
8 #define COMPILER_TRANSLATOR_POOLALLOC_H_
9 
10 #ifdef _DEBUG
11 #define GUARD_BLOCKS  // define to enable guard block sanity checking
12 #endif
13 
14 //
15 // This header defines an allocator that can be used to efficiently
16 // allocate a large number of small requests for heap memory, with the
17 // intention that they are not individually deallocated, but rather
18 // collectively deallocated at one time.
19 //
20 // This simultaneously
21 //
22 // * Makes each individual allocation much more efficient; the
23 //     typical allocation is trivial.
24 // * Completely avoids the cost of doing individual deallocation.
25 // * Saves the trouble of tracking down and plugging a large class of leaks.
26 //
27 // Individual classes can use this allocator by supplying their own
28 // new and delete methods.
29 //
30 // STL containers can use this allocator by using the pool_allocator
31 // class as the allocator (second) template argument.
32 //
33 
34 #include <stddef.h>
35 #include <string.h>
36 #include <vector>
37 
38 // If we are using guard blocks, we must track each indivual
39 // allocation.  If we aren't using guard blocks, these
40 // never get instantiated, so won't have any impact.
41 //
42 
43 class TAllocation
44 {
45   public:
46     TAllocation(size_t size, unsigned char *mem, TAllocation *prev = 0)
size(size)47         : size(size), mem(mem), prevAlloc(prev)
48     {
49 // Allocations are bracketed:
50 //    [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
51 // This would be cleaner with if (guardBlockSize)..., but that
52 // makes the compiler print warnings about 0 length memsets,
53 // even with the if() protecting them.
54 #ifdef GUARD_BLOCKS
55         memset(preGuard(), guardBlockBeginVal, guardBlockSize);
56         memset(data(), userDataFill, size);
57         memset(postGuard(), guardBlockEndVal, guardBlockSize);
58 #endif
59     }
60 
check()61     void check() const
62     {
63         checkGuardBlock(preGuard(), guardBlockBeginVal, "before");
64         checkGuardBlock(postGuard(), guardBlockEndVal, "after");
65     }
66 
67     void checkAllocList() const;
68 
69     // Return total size needed to accomodate user buffer of 'size',
70     // plus our tracking data.
allocationSize(size_t size)71     inline static size_t allocationSize(size_t size)
72     {
73         return size + 2 * guardBlockSize + headerSize();
74     }
75 
76     // Offset from surrounding buffer to get to user data buffer.
offsetAllocation(unsigned char * m)77     inline static unsigned char *offsetAllocation(unsigned char *m)
78     {
79         return m + guardBlockSize + headerSize();
80     }
81 
82   private:
83     void checkGuardBlock(unsigned char *blockMem, unsigned char val, const char *locText) const;
84 
85     // Find offsets to pre and post guard blocks, and user data buffer
preGuard()86     unsigned char *preGuard() const { return mem + headerSize(); }
data()87     unsigned char *data() const { return preGuard() + guardBlockSize; }
postGuard()88     unsigned char *postGuard() const { return data() + size; }
89 
90     size_t size;             // size of the user data area
91     unsigned char *mem;      // beginning of our allocation (pts to header)
92     TAllocation *prevAlloc;  // prior allocation in the chain
93 
94     // Support MSVC++ 6.0
95     const static unsigned char guardBlockBeginVal;
96     const static unsigned char guardBlockEndVal;
97     const static unsigned char userDataFill;
98 
99     const static size_t guardBlockSize;
100 #ifdef GUARD_BLOCKS
headerSize()101     inline static size_t headerSize() { return sizeof(TAllocation); }
102 #else
headerSize()103     inline static size_t headerSize() { return 0; }
104 #endif
105 };
106 
107 //
108 // There are several stacks.  One is to track the pushing and popping
109 // of the user, and not yet implemented.  The others are simply a
110 // repositories of free pages or used pages.
111 //
112 // Page stacks are linked together with a simple header at the beginning
113 // of each allocation obtained from the underlying OS.  Multi-page allocations
114 // are returned to the OS.  Individual page allocations are kept for future
115 // re-use.
116 //
117 // The "page size" used is not, nor must it match, the underlying OS
118 // page size.  But, having it be about that size or equal to a set of
119 // pages is likely most optimal.
120 //
121 class TPoolAllocator
122 {
123   public:
124     TPoolAllocator(int growthIncrement = 8 * 1024, int allocationAlignment = 16);
125 
126     //
127     // Don't call the destructor just to free up the memory, call pop()
128     //
129     ~TPoolAllocator();
130 
131     //
132     // Call push() to establish a new place to pop memory too.  Does not
133     // have to be called to get things started.
134     //
135     void push();
136 
137     //
138     // Call pop() to free all memory allocated since the last call to push(),
139     // or if no last call to push, frees all memory since first allocation.
140     //
141     void pop();
142 
143     //
144     // Call popAll() to free all memory allocated.
145     //
146     void popAll();
147 
148     //
149     // Call allocate() to actually acquire memory.  Returns 0 if no memory
150     // available, otherwise a properly aligned pointer to 'numBytes' of memory.
151     //
152     void *allocate(size_t numBytes);
153 
154     //
155     // There is no deallocate.  The point of this class is that
156     // deallocation can be skipped by the user of it, as the model
157     // of use is to simultaneously deallocate everything at once
158     // by calling pop(), and to not have to solve memory leak problems.
159     //
160 
161     // Catch unwanted allocations.
162     // TODO(jmadill): Remove this when we remove the global allocator.
163     void lock();
164     void unlock();
165 
166   private:
167     size_t alignment;  // all returned allocations will be aligned at
168                        // this granularity, which will be a power of 2
169     size_t alignmentMask;
170 
171 #if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
172     friend struct tHeader;
173 
174     struct tHeader
175     {
tHeadertHeader176         tHeader(tHeader *nextPage, size_t pageCount)
177             : nextPage(nextPage),
178               pageCount(pageCount)
179 #ifdef GUARD_BLOCKS
180               ,
181               lastAllocation(0)
182 #endif
183         {
184         }
185 
~tHeadertHeader186         ~tHeader()
187         {
188 #ifdef GUARD_BLOCKS
189             if (lastAllocation)
190                 lastAllocation->checkAllocList();
191 #endif
192         }
193 
194         tHeader *nextPage;
195         size_t pageCount;
196 #ifdef GUARD_BLOCKS
197         TAllocation *lastAllocation;
198 #endif
199     };
200 
201     struct tAllocState
202     {
203         size_t offset;
204         tHeader *page;
205     };
206     typedef std::vector<tAllocState> tAllocStack;
207 
208     // Track allocations if and only if we're using guard blocks
initializeAllocation(tHeader * block,unsigned char * memory,size_t numBytes)209     void *initializeAllocation(tHeader *block, unsigned char *memory, size_t numBytes)
210     {
211 #ifdef GUARD_BLOCKS
212         new (memory) TAllocation(numBytes, memory, block->lastAllocation);
213         block->lastAllocation = reinterpret_cast<TAllocation *>(memory);
214 #endif
215         // This is optimized entirely away if GUARD_BLOCKS is not defined.
216         return TAllocation::offsetAllocation(memory);
217     }
218 
219     size_t pageSize;           // granularity of allocation from the OS
220     size_t headerSkip;         // amount of memory to skip to make room for the
221                                //      header (basically, size of header, rounded
222                                //      up to make it aligned
223     size_t currentPageOffset;  // next offset in top of inUseList to allocate from
224     tHeader *freeList;         // list of popped memory
225     tHeader *inUseList;        // list of all memory currently being used
226     tAllocStack mStack;        // stack of where to allocate from, to partition pool
227 
228     int numCalls;       // just an interesting statistic
229     size_t totalBytes;  // just an interesting statistic
230 
231 #else  // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
232     std::vector<std::vector<void *>> mStack;
233 #endif
234 
235     TPoolAllocator &operator=(const TPoolAllocator &);  // dont allow assignment operator
236     TPoolAllocator(const TPoolAllocator &);             // dont allow default copy constructor
237     bool mLocked;
238 };
239 
240 //
241 // There could potentially be many pools with pops happening at
242 // different times.  But a simple use is to have a global pop
243 // with everyone using the same global allocator.
244 //
245 extern TPoolAllocator *GetGlobalPoolAllocator();
246 extern void SetGlobalPoolAllocator(TPoolAllocator *poolAllocator);
247 
248 //
249 // This STL compatible allocator is intended to be used as the allocator
250 // parameter to templatized STL containers, like vector and map.
251 //
252 // It will use the pools for allocation, and not
253 // do any deallocation, but will still do destruction.
254 //
255 template <class T>
256 class pool_allocator
257 {
258   public:
259     typedef size_t size_type;
260     typedef ptrdiff_t difference_type;
261     typedef T *pointer;
262     typedef const T *const_pointer;
263     typedef T &reference;
264     typedef const T &const_reference;
265     typedef T value_type;
266 
267     template <class Other>
268     struct rebind
269     {
270         typedef pool_allocator<Other> other;
271     };
address(reference x)272     pointer address(reference x) const { return &x; }
address(const_reference x)273     const_pointer address(const_reference x) const { return &x; }
274 
pool_allocator()275     pool_allocator() {}
276 
277     template <class Other>
pool_allocator(const pool_allocator<Other> & p)278     pool_allocator(const pool_allocator<Other> &p)
279     {
280     }
281 
282     template <class Other>
283     pool_allocator<T> &operator=(const pool_allocator<Other> &p)
284     {
285         return *this;
286     }
287 
288 #if defined(__SUNPRO_CC) && !defined(_RWSTD_ALLOCATOR)
289     // libCStd on some platforms have a different allocate/deallocate interface.
290     // Caller pre-bakes sizeof(T) into 'n' which is the number of bytes to be
291     // allocated, not the number of elements.
allocate(size_type n)292     void *allocate(size_type n) { return getAllocator().allocate(n); }
allocate(size_type n,const void *)293     void *allocate(size_type n, const void *) { return getAllocator().allocate(n); }
deallocate(void *,size_type)294     void deallocate(void *, size_type) {}
295 #else
allocate(size_type n)296     pointer allocate(size_type n)
297     {
298         return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T)));
299     }
allocate(size_type n,const void *)300     pointer allocate(size_type n, const void *)
301     {
302         return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T)));
303     }
deallocate(pointer,size_type)304     void deallocate(pointer, size_type) {}
305 #endif  // _RWSTD_ALLOCATOR
306 
construct(pointer p,const T & val)307     void construct(pointer p, const T &val) { new ((void *)p) T(val); }
destroy(pointer p)308     void destroy(pointer p) { p->T::~T(); }
309 
310     bool operator==(const pool_allocator &rhs) const { return true; }
311     bool operator!=(const pool_allocator &rhs) const { return false; }
312 
max_size()313     size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
max_size(int size)314     size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
315 
getAllocator()316     TPoolAllocator &getAllocator() const { return *GetGlobalPoolAllocator(); }
317 };
318 
319 #endif  // COMPILER_TRANSLATOR_POOLALLOC_H_
320