1 //
2 // Copyright (c) 2002-2010 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 
7 #include "compiler/translator/PoolAlloc.h"
8 
9 #include <stdint.h>
10 #include <stdio.h>
11 #include <assert.h>
12 
13 #include "common/angleutils.h"
14 #include "common/debug.h"
15 #include "common/platform.h"
16 #include "common/tls.h"
17 #include "compiler/translator/InitializeGlobals.h"
18 
19 TLSIndex PoolIndex = TLS_INVALID_INDEX;
20 
InitializePoolIndex()21 bool InitializePoolIndex()
22 {
23     assert(PoolIndex == TLS_INVALID_INDEX);
24 
25     PoolIndex = CreateTLSIndex();
26     return PoolIndex != TLS_INVALID_INDEX;
27 }
28 
FreePoolIndex()29 void FreePoolIndex()
30 {
31     assert(PoolIndex != TLS_INVALID_INDEX);
32 
33     DestroyTLSIndex(PoolIndex);
34     PoolIndex = TLS_INVALID_INDEX;
35 }
36 
GetGlobalPoolAllocator()37 TPoolAllocator* GetGlobalPoolAllocator()
38 {
39     assert(PoolIndex != TLS_INVALID_INDEX);
40     return static_cast<TPoolAllocator*>(GetTLSValue(PoolIndex));
41 }
42 
SetGlobalPoolAllocator(TPoolAllocator * poolAllocator)43 void SetGlobalPoolAllocator(TPoolAllocator* poolAllocator)
44 {
45     assert(PoolIndex != TLS_INVALID_INDEX);
46     SetTLSValue(PoolIndex, poolAllocator);
47 }
48 
49 //
50 // Implement the functionality of the TPoolAllocator class, which
51 // is documented in PoolAlloc.h.
52 //
TPoolAllocator(int growthIncrement,int allocationAlignment)53 TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment)
54     : alignment(allocationAlignment),
55 #if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
56       pageSize(growthIncrement),
57       freeList(0),
58       inUseList(0),
59       numCalls(0),
60       totalBytes(0),
61 #endif
62       mLocked(false)
63 {
64     //
65     // Adjust alignment to be at least pointer aligned and
66     // power of 2.
67     //
68     size_t minAlign = sizeof(void*);
69     alignment &= ~(minAlign - 1);
70     if (alignment < minAlign)
71         alignment = minAlign;
72     size_t a = 1;
73     while (a < alignment)
74         a <<= 1;
75     alignment = a;
76     alignmentMask = a - 1;
77 
78 #if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
79     //
80     // Don't allow page sizes we know are smaller than all common
81     // OS page sizes.
82     //
83     if (pageSize < 4 * 1024)
84         pageSize = 4 * 1024;
85 
86     //
87     // A large currentPageOffset indicates a new page needs to
88     // be obtained to allocate memory.
89     //
90     currentPageOffset = pageSize;
91 
92     //
93     // Align header skip
94     //
95     headerSkip = minAlign;
96     if (headerSkip < sizeof(tHeader)) {
97         headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
98     }
99 #else  // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
100     mStack.push_back({});
101 #endif
102 }
103 
~TPoolAllocator()104 TPoolAllocator::~TPoolAllocator()
105 {
106 #if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
107     while (inUseList) {
108         tHeader* next = inUseList->nextPage;
109         inUseList->~tHeader();
110         delete [] reinterpret_cast<char*>(inUseList);
111         inUseList = next;
112     }
113 
114     // We should not check the guard blocks
115     // here, because we did it already when the block was
116     // placed into the free list.
117     //
118     while (freeList) {
119         tHeader* next = freeList->nextPage;
120         delete [] reinterpret_cast<char*>(freeList);
121         freeList = next;
122     }
123 #else  // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
124     for (auto &allocs : mStack)
125     {
126         for (auto alloc : allocs)
127         {
128             free(alloc);
129         }
130     }
131     mStack.clear();
132 #endif
133 }
134 
135 // Support MSVC++ 6.0
136 const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
137 const unsigned char TAllocation::guardBlockEndVal   = 0xfe;
138 const unsigned char TAllocation::userDataFill       = 0xcd;
139 
140 #ifdef GUARD_BLOCKS
141     const size_t TAllocation::guardBlockSize = 16;
142 #else
143     const size_t TAllocation::guardBlockSize = 0;
144 #endif
145 
146 //
147 // Check a single guard block for damage
148 //
checkGuardBlock(unsigned char * blockMem,unsigned char val,const char * locText) const149 void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const
150 {
151 #ifdef GUARD_BLOCKS
152     for (size_t x = 0; x < guardBlockSize; x++) {
153         if (blockMem[x] != val) {
154             char assertMsg[80];
155 
156             // We don't print the assert message.  It's here just to be helpful.
157 #if defined(_MSC_VER)
158             snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %Iu byte allocation at 0x%p\n",
159                     locText, size, data());
160 #else
161             snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n",
162                     locText, size, data());
163 #endif
164             assert(0 && "PoolAlloc: Damage in guard block");
165         }
166     }
167 #endif
168 }
169 
170 
push()171 void TPoolAllocator::push()
172 {
173 #if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
174     tAllocState state = { currentPageOffset, inUseList };
175 
176     mStack.push_back(state);
177 
178     //
179     // Indicate there is no current page to allocate from.
180     //
181     currentPageOffset = pageSize;
182 #else  // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
183     mStack.push_back({});
184 #endif
185 }
186 
187 //
188 // Do a mass-deallocation of all the individual allocations
189 // that have occurred since the last push(), or since the
190 // last pop(), or since the object's creation.
191 //
192 // The deallocated pages are saved for future allocations.
193 //
pop()194 void TPoolAllocator::pop()
195 {
196     if (mStack.size() < 1)
197         return;
198 
199 #if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
200     tHeader *page     = mStack.back().page;
201     currentPageOffset = mStack.back().offset;
202 
203     while (inUseList != page) {
204         // invoke destructor to free allocation list
205         inUseList->~tHeader();
206 
207         tHeader* nextInUse = inUseList->nextPage;
208         if (inUseList->pageCount > 1)
209             delete [] reinterpret_cast<char*>(inUseList);
210         else {
211             inUseList->nextPage = freeList;
212             freeList = inUseList;
213         }
214         inUseList = nextInUse;
215     }
216 
217     mStack.pop_back();
218 #else  // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
219     for (auto &alloc : mStack.back())
220     {
221         free(alloc);
222     }
223     mStack.pop_back();
224 #endif
225 }
226 
227 //
228 // Do a mass-deallocation of all the individual allocations
229 // that have occurred.
230 //
popAll()231 void TPoolAllocator::popAll()
232 {
233     while (mStack.size() > 0)
234         pop();
235 }
236 
allocate(size_t numBytes)237 void* TPoolAllocator::allocate(size_t numBytes)
238 {
239     ASSERT(!mLocked);
240 
241 #if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
242     //
243     // Just keep some interesting statistics.
244     //
245     ++numCalls;
246     totalBytes += numBytes;
247 
248     // If we are using guard blocks, all allocations are bracketed by
249     // them: [guardblock][allocation][guardblock].  numBytes is how
250     // much memory the caller asked for.  allocationSize is the total
251     // size including guard blocks.  In release build,
252     // guardBlockSize=0 and this all gets optimized away.
253     size_t allocationSize = TAllocation::allocationSize(numBytes);
254     // Detect integer overflow.
255     if (allocationSize < numBytes)
256         return 0;
257 
258     //
259     // Do the allocation, most likely case first, for efficiency.
260     // This step could be moved to be inline sometime.
261     //
262     if (allocationSize <= pageSize - currentPageOffset) {
263         //
264         // Safe to allocate from currentPageOffset.
265         //
266         unsigned char* memory = reinterpret_cast<unsigned char *>(inUseList) + currentPageOffset;
267         currentPageOffset += allocationSize;
268         currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
269 
270         return initializeAllocation(inUseList, memory, numBytes);
271     }
272 
273     if (allocationSize > pageSize - headerSkip) {
274         //
275         // Do a multi-page allocation.  Don't mix these with the others.
276         // The OS is efficient and allocating and free-ing multiple pages.
277         //
278         size_t numBytesToAlloc = allocationSize + headerSkip;
279         // Detect integer overflow.
280         if (numBytesToAlloc < allocationSize)
281             return 0;
282 
283         tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
284         if (memory == 0)
285             return 0;
286 
287         // Use placement-new to initialize header
288         new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
289         inUseList = memory;
290 
291         currentPageOffset = pageSize;  // make next allocation come from a new page
292 
293         // No guard blocks for multi-page allocations (yet)
294         return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(memory) + headerSkip);
295     }
296 
297     //
298     // Need a simple page to allocate from.
299     //
300     tHeader* memory;
301     if (freeList) {
302         memory = freeList;
303         freeList = freeList->nextPage;
304     } else {
305         memory = reinterpret_cast<tHeader*>(::new char[pageSize]);
306         if (memory == 0)
307             return 0;
308     }
309 
310     // Use placement-new to initialize header
311     new(memory) tHeader(inUseList, 1);
312     inUseList = memory;
313 
314     unsigned char* ret = reinterpret_cast<unsigned char *>(inUseList) + headerSkip;
315     currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
316 
317     return initializeAllocation(inUseList, ret, numBytes);
318 #else  // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
319     void *alloc = malloc(numBytes + alignmentMask);
320     mStack.back().push_back(alloc);
321 
322     intptr_t intAlloc = reinterpret_cast<intptr_t>(alloc);
323     intAlloc          = (intAlloc + alignmentMask) & ~alignmentMask;
324     return reinterpret_cast<void *>(intAlloc);
325 #endif
326 }
327 
lock()328 void TPoolAllocator::lock()
329 {
330     ASSERT(!mLocked);
331     mLocked = true;
332 }
333 
unlock()334 void TPoolAllocator::unlock()
335 {
336     ASSERT(mLocked);
337     mLocked = false;
338 }
339 
340 //
341 // Check all allocations in a list for damage by calling check on each.
342 //
checkAllocList() const343 void TAllocation::checkAllocList() const
344 {
345     for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)
346         alloc->check();
347 }
348