1 // [AsmJit]
2 // Machine Code Generation for C++.
3 //
4 // [License]
5 // Zlib - See LICENSE.md file in the package.
6 
7 #define ASMJIT_EXPORTS
8 
9 #include "../core/support.h"
10 #include "../core/zone.h"
11 
12 ASMJIT_BEGIN_NAMESPACE
13 
14 // ============================================================================
15 // [asmjit::Zone - Statics]
16 // ============================================================================
17 
18 // Zero size block used by `Zone` that doesn't have any memory allocated.
19 // Should be allocated in read-only memory and should never be modified.
20 const Zone::Block Zone::_zeroBlock = { nullptr, nullptr, 0 };
21 
22 // ============================================================================
23 // [asmjit::Zone - Init / Reset]
24 // ============================================================================
25 
_init(size_t blockSize,size_t blockAlignment,const Support::Temporary * temporary)26 void Zone::_init(size_t blockSize, size_t blockAlignment, const Support::Temporary* temporary) noexcept {
27   ASMJIT_ASSERT(blockSize >= kMinBlockSize);
28   ASMJIT_ASSERT(blockSize <= kMaxBlockSize);
29   ASMJIT_ASSERT(blockAlignment <= 64);
30 
31   // Just to make the compiler happy...
32   constexpr size_t kBlockSizeMask = (Support::allOnes<size_t>() >> 4);
33   constexpr size_t kBlockAlignmentShiftMask = 0x7u;
34 
35   _assignZeroBlock();
36   _blockSize = blockSize & kBlockSizeMask;
37   _isTemporary = temporary != nullptr;
38   _blockAlignmentShift = Support::ctz(blockAlignment) & kBlockAlignmentShiftMask;
39 
40   // Setup the first [temporary] block, if necessary.
41   if (temporary) {
42     Block* block = temporary->data<Block>();
43     block->prev = nullptr;
44     block->next = nullptr;
45 
46     ASMJIT_ASSERT(temporary->size() >= kBlockSize);
47     block->size = temporary->size() - kBlockSize;
48 
49     _assignBlock(block);
50   }
51 }
52 
reset(uint32_t resetPolicy)53 void Zone::reset(uint32_t resetPolicy) noexcept {
54   Block* cur = _block;
55 
56   // Can't be altered.
57   if (cur == &_zeroBlock)
58     return;
59 
60   if (resetPolicy == Globals::kResetHard) {
61     Block* initial = const_cast<Zone::Block*>(&_zeroBlock);
62     _ptr = initial->data();
63     _end = initial->data();
64     _block = initial;
65 
66     // Since cur can be in the middle of the double-linked list, we have to
67     // traverse both directions (`prev` and `next`) separately to visit all.
68     Block* next = cur->next;
69     do {
70       Block* prev = cur->prev;
71 
72       // If this is the first block and this ZoneTmp is temporary then the
73       // first block is statically allocated. We cannot free it and it makes
74       // sense to keep it even when this is hard reset.
75       if (prev == nullptr && _isTemporary) {
76         cur->prev = nullptr;
77         cur->next = nullptr;
78         _assignBlock(cur);
79         break;
80       }
81 
82       ::free(cur);
83       cur = prev;
84     } while (cur);
85 
86     cur = next;
87     while (cur) {
88       next = cur->next;
89       ::free(cur);
90       cur = next;
91     }
92   }
93   else {
94     while (cur->prev)
95       cur = cur->prev;
96     _assignBlock(cur);
97   }
98 }
99 
100 // ============================================================================
101 // [asmjit::Zone - Alloc]
102 // ============================================================================
103 
_alloc(size_t size,size_t alignment)104 void* Zone::_alloc(size_t size, size_t alignment) noexcept {
105   Block* curBlock = _block;
106   Block* next = curBlock->next;
107 
108   size_t rawBlockAlignment = blockAlignment();
109   size_t minimumAlignment = Support::max<size_t>(alignment, rawBlockAlignment);
110 
111   // If the `Zone` has been cleared the current block doesn't have to be the
112   // last one. Check if there is a block that can be used instead of allocating
113   // a new one. If there is a `next` block it's completely unused, we don't have
114   // to check for remaining bytes in that case.
115   if (next) {
116     uint8_t* ptr = Support::alignUp(next->data(), minimumAlignment);
117     uint8_t* end = Support::alignDown(next->data() + next->size, rawBlockAlignment);
118 
119     if (size <= (size_t)(end - ptr)) {
120       _block = next;
121       _ptr = ptr + size;
122       _end = Support::alignDown(next->data() + next->size, rawBlockAlignment);
123       return static_cast<void*>(ptr);
124     }
125   }
126 
127   size_t blockAlignmentOverhead = alignment - Support::min<size_t>(alignment, Globals::kAllocAlignment);
128   size_t newSize = Support::max(blockSize(), size);
129 
130   // Prevent arithmetic overflow.
131   if (ASMJIT_UNLIKELY(newSize > std::numeric_limits<size_t>::max() - kBlockSize - blockAlignmentOverhead))
132     return nullptr;
133 
134   // Allocate new block - we add alignment overhead to `newSize`, which becomes the
135   // new block size, and we also add `kBlockOverhead` to the allocator as it includes
136   // members of `Zone::Block` structure.
137   newSize += blockAlignmentOverhead;
138   Block* newBlock = static_cast<Block*>(::malloc(newSize + kBlockSize));
139 
140   if (ASMJIT_UNLIKELY(!newBlock))
141     return nullptr;
142 
143   // Align the pointer to `minimumAlignment` and adjust the size of this block
144   // accordingly. It's the same as using `minimumAlignment - Support::alignUpDiff()`,
145   // just written differently.
146   {
147     newBlock->prev = nullptr;
148     newBlock->next = nullptr;
149     newBlock->size = newSize;
150 
151     if (curBlock != &_zeroBlock) {
152       newBlock->prev = curBlock;
153       curBlock->next = newBlock;
154 
155       // Does only happen if there is a next block, but the requested memory
156       // can't fit into it. In this case a new buffer is allocated and inserted
157       // between the current block and the next one.
158       if (next) {
159         newBlock->next = next;
160         next->prev = newBlock;
161       }
162     }
163 
164     uint8_t* ptr = Support::alignUp(newBlock->data(), minimumAlignment);
165     uint8_t* end = Support::alignDown(newBlock->data() + newSize, rawBlockAlignment);
166 
167     _ptr = ptr + size;
168     _end = end;
169     _block = newBlock;
170 
171     ASMJIT_ASSERT(_ptr <= _end);
172     return static_cast<void*>(ptr);
173   }
174 }
175 
allocZeroed(size_t size,size_t alignment)176 void* Zone::allocZeroed(size_t size, size_t alignment) noexcept {
177   void* p = alloc(size, alignment);
178   if (ASMJIT_UNLIKELY(!p))
179     return p;
180   return memset(p, 0, size);
181 }
182 
dup(const void * data,size_t size,bool nullTerminate)183 void* Zone::dup(const void* data, size_t size, bool nullTerminate) noexcept {
184   if (ASMJIT_UNLIKELY(!data || !size))
185     return nullptr;
186 
187   ASMJIT_ASSERT(size != std::numeric_limits<size_t>::max());
188   uint8_t* m = allocT<uint8_t>(size + nullTerminate);
189   if (ASMJIT_UNLIKELY(!m)) return nullptr;
190 
191   memcpy(m, data, size);
192   if (nullTerminate) m[size] = '\0';
193 
194   return static_cast<void*>(m);
195 }
196 
sformat(const char * fmt,...)197 char* Zone::sformat(const char* fmt, ...) noexcept {
198   if (ASMJIT_UNLIKELY(!fmt))
199     return nullptr;
200 
201   char buf[512];
202   size_t size;
203   va_list ap;
204 
205   va_start(ap, fmt);
206   size = unsigned(vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf) - 1, fmt, ap));
207   va_end(ap);
208 
209   buf[size++] = 0;
210   return static_cast<char*>(dup(buf, size));
211 }
212 
213 // ============================================================================
214 // [asmjit::ZoneAllocator - Helpers]
215 // ============================================================================
216 
217 #if defined(ASMJIT_BUILD_DEBUG)
ZoneAllocator_hasDynamicBlock(ZoneAllocator * self,ZoneAllocator::DynamicBlock * block)218 static bool ZoneAllocator_hasDynamicBlock(ZoneAllocator* self, ZoneAllocator::DynamicBlock* block) noexcept {
219   ZoneAllocator::DynamicBlock* cur = self->_dynamicBlocks;
220   while (cur) {
221     if (cur == block)
222       return true;
223     cur = cur->next;
224   }
225   return false;
226 }
227 #endif
228 
229 // ============================================================================
230 // [asmjit::ZoneAllocator - Init / Reset]
231 // ============================================================================
232 
reset(Zone * zone)233 void ZoneAllocator::reset(Zone* zone) noexcept {
234   // Free dynamic blocks.
235   DynamicBlock* block = _dynamicBlocks;
236   while (block) {
237     DynamicBlock* next = block->next;
238     ::free(block);
239     block = next;
240   }
241 
242   // Zero the entire class and initialize to the given `zone`.
243   memset(this, 0, sizeof(*this));
244   _zone = zone;
245 }
246 
247 // ============================================================================
248 // [asmjit::ZoneAllocator - Alloc / Release]
249 // ============================================================================
250 
_alloc(size_t size,size_t & allocatedSize)251 void* ZoneAllocator::_alloc(size_t size, size_t& allocatedSize) noexcept {
252   ASMJIT_ASSERT(isInitialized());
253 
254   // Use the memory pool only if the requested block has a reasonable size.
255   uint32_t slot;
256   if (_getSlotIndex(size, slot, allocatedSize)) {
257     // Slot reuse.
258     uint8_t* p = reinterpret_cast<uint8_t*>(_slots[slot]);
259     size = allocatedSize;
260 
261     if (p) {
262       _slots[slot] = reinterpret_cast<Slot*>(p)->next;
263       return p;
264     }
265 
266     _zone->align(kBlockAlignment);
267     p = _zone->ptr();
268     size_t remain = (size_t)(_zone->end() - p);
269 
270     if (ASMJIT_LIKELY(remain >= size)) {
271       _zone->setPtr(p + size);
272       return p;
273     }
274     else {
275       // Distribute the remaining memory to suitable slots, if possible.
276       if (remain >= kLoGranularity) {
277         do {
278           size_t distSize = Support::min<size_t>(remain, kLoMaxSize);
279           uint32_t distSlot = uint32_t((distSize - kLoGranularity) / kLoGranularity);
280           ASMJIT_ASSERT(distSlot < kLoCount);
281 
282           reinterpret_cast<Slot*>(p)->next = _slots[distSlot];
283           _slots[distSlot] = reinterpret_cast<Slot*>(p);
284 
285           p += distSize;
286           remain -= distSize;
287         } while (remain >= kLoGranularity);
288         _zone->setPtr(p);
289       }
290 
291       p = static_cast<uint8_t*>(_zone->_alloc(size, kBlockAlignment));
292       if (ASMJIT_UNLIKELY(!p)) {
293         allocatedSize = 0;
294         return nullptr;
295       }
296 
297       return p;
298     }
299   }
300   else {
301     // Allocate a dynamic block.
302     size_t kBlockOverhead = sizeof(DynamicBlock) + sizeof(DynamicBlock*) + kBlockAlignment;
303 
304     // Handle a possible overflow.
305     if (ASMJIT_UNLIKELY(kBlockOverhead >= std::numeric_limits<size_t>::max() - size))
306       return nullptr;
307 
308     void* p = ::malloc(size + kBlockOverhead);
309     if (ASMJIT_UNLIKELY(!p)) {
310       allocatedSize = 0;
311       return nullptr;
312     }
313 
314     // Link as first in `_dynamicBlocks` double-linked list.
315     DynamicBlock* block = static_cast<DynamicBlock*>(p);
316     DynamicBlock* next = _dynamicBlocks;
317 
318     if (next)
319       next->prev = block;
320 
321     block->prev = nullptr;
322     block->next = next;
323     _dynamicBlocks = block;
324 
325     // Align the pointer to the guaranteed alignment and store `DynamicBlock`
326     // at the beginning of the memory block, so `_releaseDynamic()` can find it.
327     p = Support::alignUp(static_cast<uint8_t*>(p) + sizeof(DynamicBlock) + sizeof(DynamicBlock*), kBlockAlignment);
328     reinterpret_cast<DynamicBlock**>(p)[-1] = block;
329 
330     allocatedSize = size;
331     return p;
332   }
333 }
334 
_allocZeroed(size_t size,size_t & allocatedSize)335 void* ZoneAllocator::_allocZeroed(size_t size, size_t& allocatedSize) noexcept {
336   ASMJIT_ASSERT(isInitialized());
337 
338   void* p = _alloc(size, allocatedSize);
339   if (ASMJIT_UNLIKELY(!p)) return p;
340   return memset(p, 0, allocatedSize);
341 }
342 
_releaseDynamic(void * p,size_t size)343 void ZoneAllocator::_releaseDynamic(void* p, size_t size) noexcept {
344   ASMJIT_UNUSED(size);
345   ASMJIT_ASSERT(isInitialized());
346 
347   // Pointer to `DynamicBlock` is stored at [-1].
348   DynamicBlock* block = reinterpret_cast<DynamicBlock**>(p)[-1];
349   ASMJIT_ASSERT(ZoneAllocator_hasDynamicBlock(this, block));
350 
351   // Unlink and free.
352   DynamicBlock* prev = block->prev;
353   DynamicBlock* next = block->next;
354 
355   if (prev)
356     prev->next = next;
357   else
358     _dynamicBlocks = next;
359 
360   if (next)
361     next->prev = prev;
362 
363   ::free(block);
364 }
365 
366 ASMJIT_END_NAMESPACE
367