1 //
2 // Copyright (c) 2002-2010 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 
7 #include "compiler/translator/PoolAlloc.h"
8 
9 #include <stdint.h>
10 #include <stdio.h>
11 #include <assert.h>
12 
13 #include "common/angleutils.h"
14 #include "common/debug.h"
15 #include "common/platform.h"
16 #include "common/tls.h"
17 #include "compiler/translator/InitializeGlobals.h"
18 
19 TLSIndex PoolIndex = TLS_INVALID_INDEX;
20 
InitializePoolIndex()21 bool InitializePoolIndex()
22 {
23     assert(PoolIndex == TLS_INVALID_INDEX);
24 
25     PoolIndex = CreateTLSIndex();
26     return PoolIndex != TLS_INVALID_INDEX;
27 }
28 
FreePoolIndex()29 void FreePoolIndex()
30 {
31     assert(PoolIndex != TLS_INVALID_INDEX);
32 
33     DestroyTLSIndex(PoolIndex);
34     PoolIndex = TLS_INVALID_INDEX;
35 }
36 
GetGlobalPoolAllocator()37 TPoolAllocator *GetGlobalPoolAllocator()
38 {
39     assert(PoolIndex != TLS_INVALID_INDEX);
40     return static_cast<TPoolAllocator *>(GetTLSValue(PoolIndex));
41 }
42 
SetGlobalPoolAllocator(TPoolAllocator * poolAllocator)43 void SetGlobalPoolAllocator(TPoolAllocator *poolAllocator)
44 {
45     assert(PoolIndex != TLS_INVALID_INDEX);
46     SetTLSValue(PoolIndex, poolAllocator);
47 }
48 
49 //
50 // Implement the functionality of the TPoolAllocator class, which
51 // is documented in PoolAlloc.h.
52 //
TPoolAllocator(int growthIncrement,int allocationAlignment)53 TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment)
54     : alignment(allocationAlignment),
55 #if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
56       pageSize(growthIncrement),
57       freeList(0),
58       inUseList(0),
59       numCalls(0),
60       totalBytes(0),
61 #endif
62       mLocked(false)
63 {
64     //
65     // Adjust alignment to be at least pointer aligned and
66     // power of 2.
67     //
68     size_t minAlign = sizeof(void *);
69     alignment &= ~(minAlign - 1);
70     if (alignment < minAlign)
71         alignment = minAlign;
72     size_t a      = 1;
73     while (a < alignment)
74         a <<= 1;
75     alignment     = a;
76     alignmentMask = a - 1;
77 
78 #if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
79     //
80     // Don't allow page sizes we know are smaller than all common
81     // OS page sizes.
82     //
83     if (pageSize < 4 * 1024)
84         pageSize = 4 * 1024;
85 
86     //
87     // A large currentPageOffset indicates a new page needs to
88     // be obtained to allocate memory.
89     //
90     currentPageOffset = pageSize;
91 
92     //
93     // Align header skip
94     //
95     headerSkip = minAlign;
96     if (headerSkip < sizeof(tHeader))
97     {
98         headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
99     }
100 #else  // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
101     mStack.push_back({});
102 #endif
103 }
104 
~TPoolAllocator()105 TPoolAllocator::~TPoolAllocator()
106 {
107 #if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
108     while (inUseList)
109     {
110         tHeader *next = inUseList->nextPage;
111         inUseList->~tHeader();
112         delete[] reinterpret_cast<char *>(inUseList);
113         inUseList = next;
114     }
115 
116     // We should not check the guard blocks
117     // here, because we did it already when the block was
118     // placed into the free list.
119     //
120     while (freeList)
121     {
122         tHeader *next = freeList->nextPage;
123         delete[] reinterpret_cast<char *>(freeList);
124         freeList = next;
125     }
126 #else  // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
127     for (auto &allocs : mStack)
128     {
129         for (auto alloc : allocs)
130         {
131             free(alloc);
132         }
133     }
134     mStack.clear();
135 #endif
136 }
137 
138 // Support MSVC++ 6.0
139 const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
140 const unsigned char TAllocation::guardBlockEndVal   = 0xfe;
141 const unsigned char TAllocation::userDataFill       = 0xcd;
142 
143 #ifdef GUARD_BLOCKS
144 const size_t TAllocation::guardBlockSize = 16;
145 #else
146 const size_t TAllocation::guardBlockSize = 0;
147 #endif
148 
149 //
150 // Check a single guard block for damage
151 //
checkGuardBlock(unsigned char * blockMem,unsigned char val,const char * locText) const152 void TAllocation::checkGuardBlock(unsigned char *blockMem,
153                                   unsigned char val,
154                                   const char *locText) const
155 {
156 #ifdef GUARD_BLOCKS
157     for (size_t x = 0; x < guardBlockSize; x++)
158     {
159         if (blockMem[x] != val)
160         {
161             char assertMsg[80];
162 
163 // We don't print the assert message.  It's here just to be helpful.
164 #if defined(_MSC_VER)
165             snprintf(assertMsg, sizeof(assertMsg),
166                      "PoolAlloc: Damage %s %Iu byte allocation at 0x%p\n", locText, size, data());
167 #else
168             snprintf(assertMsg, sizeof(assertMsg),
169                      "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n", locText, size, data());
170 #endif
171             assert(0 && "PoolAlloc: Damage in guard block");
172         }
173     }
174 #endif
175 }
176 
push()177 void TPoolAllocator::push()
178 {
179 #if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
180     tAllocState state = {currentPageOffset, inUseList};
181 
182     mStack.push_back(state);
183 
184     //
185     // Indicate there is no current page to allocate from.
186     //
187     currentPageOffset = pageSize;
188 #else  // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
189     mStack.push_back({});
190 #endif
191 }
192 
193 //
194 // Do a mass-deallocation of all the individual allocations
195 // that have occurred since the last push(), or since the
196 // last pop(), or since the object's creation.
197 //
198 // The deallocated pages are saved for future allocations.
199 //
pop()200 void TPoolAllocator::pop()
201 {
202     if (mStack.size() < 1)
203         return;
204 
205 #if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
206     tHeader *page     = mStack.back().page;
207     currentPageOffset = mStack.back().offset;
208 
209     while (inUseList != page)
210     {
211         // invoke destructor to free allocation list
212         inUseList->~tHeader();
213 
214         tHeader *nextInUse = inUseList->nextPage;
215         if (inUseList->pageCount > 1)
216             delete[] reinterpret_cast<char *>(inUseList);
217         else
218         {
219             inUseList->nextPage = freeList;
220             freeList            = inUseList;
221         }
222         inUseList = nextInUse;
223     }
224 
225     mStack.pop_back();
226 #else  // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
227     for (auto &alloc : mStack.back())
228     {
229         free(alloc);
230     }
231     mStack.pop_back();
232 #endif
233 }
234 
235 //
236 // Do a mass-deallocation of all the individual allocations
237 // that have occurred.
238 //
popAll()239 void TPoolAllocator::popAll()
240 {
241     while (mStack.size() > 0)
242         pop();
243 }
244 
allocate(size_t numBytes)245 void *TPoolAllocator::allocate(size_t numBytes)
246 {
247     ASSERT(!mLocked);
248 
249 #if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
250     //
251     // Just keep some interesting statistics.
252     //
253     ++numCalls;
254     totalBytes += numBytes;
255 
256     // If we are using guard blocks, all allocations are bracketed by
257     // them: [guardblock][allocation][guardblock].  numBytes is how
258     // much memory the caller asked for.  allocationSize is the total
259     // size including guard blocks.  In release build,
260     // guardBlockSize=0 and this all gets optimized away.
261     size_t allocationSize = TAllocation::allocationSize(numBytes);
262     // Detect integer overflow.
263     if (allocationSize < numBytes)
264         return 0;
265 
266     //
267     // Do the allocation, most likely case first, for efficiency.
268     // This step could be moved to be inline sometime.
269     //
270     if (allocationSize <= pageSize - currentPageOffset)
271     {
272         //
273         // Safe to allocate from currentPageOffset.
274         //
275         unsigned char *memory = reinterpret_cast<unsigned char *>(inUseList) + currentPageOffset;
276         currentPageOffset += allocationSize;
277         currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
278 
279         return initializeAllocation(inUseList, memory, numBytes);
280     }
281 
282     if (allocationSize > pageSize - headerSkip)
283     {
284         //
285         // Do a multi-page allocation.  Don't mix these with the others.
286         // The OS is efficient and allocating and free-ing multiple pages.
287         //
288         size_t numBytesToAlloc = allocationSize + headerSkip;
289         // Detect integer overflow.
290         if (numBytesToAlloc < allocationSize)
291             return 0;
292 
293         tHeader *memory = reinterpret_cast<tHeader *>(::new char[numBytesToAlloc]);
294         if (memory == 0)
295             return 0;
296 
297         // Use placement-new to initialize header
298         new (memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
299         inUseList = memory;
300 
301         currentPageOffset = pageSize;  // make next allocation come from a new page
302 
303         // No guard blocks for multi-page allocations (yet)
304         return reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(memory) + headerSkip);
305     }
306 
307     //
308     // Need a simple page to allocate from.
309     //
310     tHeader *memory;
311     if (freeList)
312     {
313         memory   = freeList;
314         freeList = freeList->nextPage;
315     }
316     else
317     {
318         memory = reinterpret_cast<tHeader *>(::new char[pageSize]);
319         if (memory == 0)
320             return 0;
321     }
322 
323     // Use placement-new to initialize header
324     new (memory) tHeader(inUseList, 1);
325     inUseList = memory;
326 
327     unsigned char *ret = reinterpret_cast<unsigned char *>(inUseList) + headerSkip;
328     currentPageOffset  = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
329 
330     return initializeAllocation(inUseList, ret, numBytes);
331 #else  // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
332     void *alloc = malloc(numBytes + alignmentMask);
333     mStack.back().push_back(alloc);
334 
335     intptr_t intAlloc = reinterpret_cast<intptr_t>(alloc);
336     intAlloc          = (intAlloc + alignmentMask) & ~alignmentMask;
337     return reinterpret_cast<void *>(intAlloc);
338 #endif
339 }
340 
lock()341 void TPoolAllocator::lock()
342 {
343     ASSERT(!mLocked);
344     mLocked = true;
345 }
346 
unlock()347 void TPoolAllocator::unlock()
348 {
349     ASSERT(mLocked);
350     mLocked = false;
351 }
352 
353 //
354 // Check all allocations in a list for damage by calling check on each.
355 //
checkAllocList() const356 void TAllocation::checkAllocList() const
357 {
358     for (const TAllocation *alloc = this; alloc != 0; alloc = alloc->prevAlloc)
359         alloc->check();
360 }
361