1 // AsmJit - Machine code generation for C++
2 //
3 //  * Official AsmJit Home Page: https://asmjit.com
4 //  * Official Github Repository: https://github.com/asmjit/asmjit
5 //
6 // Copyright (c) 2008-2020 The AsmJit Authors
7 //
8 // This software is provided 'as-is', without any express or implied
9 // warranty. In no event will the authors be held liable for any damages
10 // arising from the use of this software.
11 //
12 // Permission is granted to anyone to use this software for any purpose,
13 // including commercial applications, and to alter it and redistribute it
14 // freely, subject to the following restrictions:
15 //
16 // 1. The origin of this software must not be misrepresented; you must not
17 //    claim that you wrote the original software. If you use this software
18 //    in a product, an acknowledgment in the product documentation would be
19 //    appreciated but is not required.
20 // 2. Altered source versions must be plainly marked as such, and must not be
21 //    misrepresented as being the original software.
22 // 3. This notice may not be removed or altered from any source distribution.
23 
24 #include "../core/api-build_p.h"
25 #include "../core/constpool.h"
26 #include "../core/support.h"
27 
28 ASMJIT_BEGIN_NAMESPACE
29 
30 // ============================================================================
31 // [asmjit::ConstPool - Construction / Destruction]
32 // ============================================================================
33 
ConstPool(Zone * zone)34 ConstPool::ConstPool(Zone* zone) noexcept { reset(zone); }
~ConstPool()35 ConstPool::~ConstPool() noexcept {}
36 
37 // ============================================================================
38 // [asmjit::ConstPool - Reset]
39 // ============================================================================
40 
reset(Zone * zone)41 void ConstPool::reset(Zone* zone) noexcept {
42   _zone = zone;
43 
44   size_t dataSize = 1;
45   for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
46     _tree[i].reset();
47     _tree[i].setDataSize(dataSize);
48     _gaps[i] = nullptr;
49     dataSize <<= 1;
50   }
51 
52   _gapPool = nullptr;
53   _size = 0;
54   _alignment = 0;
55 }
56 
57 // ============================================================================
58 // [asmjit::ConstPool - Ops]
59 // ============================================================================
60 
ConstPool_allocGap(ConstPool * self)61 static ASMJIT_INLINE ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept {
62   ConstPool::Gap* gap = self->_gapPool;
63   if (!gap)
64     return self->_zone->allocT<ConstPool::Gap>();
65 
66   self->_gapPool = gap->_next;
67   return gap;
68 }
69 
ConstPool_freeGap(ConstPool * self,ConstPool::Gap * gap)70 static ASMJIT_INLINE void ConstPool_freeGap(ConstPool* self, ConstPool::Gap* gap) noexcept {
71   gap->_next = self->_gapPool;
72   self->_gapPool = gap;
73 }
74 
ConstPool_addGap(ConstPool * self,size_t offset,size_t size)75 static void ConstPool_addGap(ConstPool* self, size_t offset, size_t size) noexcept {
76   ASMJIT_ASSERT(size > 0);
77 
78   while (size > 0) {
79     size_t gapIndex;
80     size_t gapSize;
81 
82     if (size >= 16 && Support::isAligned<size_t>(offset, 16)) {
83       gapIndex = ConstPool::kIndex16;
84       gapSize = 16;
85     }
86     else if (size >= 8 && Support::isAligned<size_t>(offset, 8)) {
87       gapIndex = ConstPool::kIndex8;
88       gapSize = 8;
89     }
90     else if (size >= 4 && Support::isAligned<size_t>(offset, 4)) {
91       gapIndex = ConstPool::kIndex4;
92       gapSize = 4;
93     }
94     else if (size >= 2 && Support::isAligned<size_t>(offset, 2)) {
95       gapIndex = ConstPool::kIndex2;
96       gapSize = 2;
97     }
98     else {
99       gapIndex = ConstPool::kIndex1;
100       gapSize = 1;
101     }
102 
103     // We don't have to check for errors here, if this failed nothing really
104     // happened (just the gap won't be visible) and it will fail again at
105     // place where the same check would generate `kErrorOutOfMemory` error.
106     ConstPool::Gap* gap = ConstPool_allocGap(self);
107     if (!gap)
108       return;
109 
110     gap->_next = self->_gaps[gapIndex];
111     self->_gaps[gapIndex] = gap;
112 
113     gap->_offset = offset;
114     gap->_size = gapSize;
115 
116     offset += gapSize;
117     size -= gapSize;
118   }
119 }
120 
add(const void * data,size_t size,size_t & dstOffset)121 Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept {
122   size_t treeIndex;
123 
124   if (size == 32)
125     treeIndex = kIndex32;
126   else if (size == 16)
127     treeIndex = kIndex16;
128   else if (size == 8)
129     treeIndex = kIndex8;
130   else if (size == 4)
131     treeIndex = kIndex4;
132   else if (size == 2)
133     treeIndex = kIndex2;
134   else if (size == 1)
135     treeIndex = kIndex1;
136   else
137     return DebugUtils::errored(kErrorInvalidArgument);
138 
139   ConstPool::Node* node = _tree[treeIndex].get(data);
140   if (node) {
141     dstOffset = node->_offset;
142     return kErrorOk;
143   }
144 
145   // Before incrementing the current offset try if there is a gap that can
146   // be used for the requested data.
147   size_t offset = ~size_t(0);
148   size_t gapIndex = treeIndex;
149 
150   while (gapIndex != kIndexCount - 1) {
151     ConstPool::Gap* gap = _gaps[treeIndex];
152 
153     // Check if there is a gap.
154     if (gap) {
155       size_t gapOffset = gap->_offset;
156       size_t gapSize = gap->_size;
157 
158       // Destroy the gap for now.
159       _gaps[treeIndex] = gap->_next;
160       ConstPool_freeGap(this, gap);
161 
162       offset = gapOffset;
163       ASMJIT_ASSERT(Support::isAligned<size_t>(offset, size));
164 
165       gapSize -= size;
166       if (gapSize > 0)
167         ConstPool_addGap(this, gapOffset, gapSize);
168     }
169 
170     gapIndex++;
171   }
172 
173   if (offset == ~size_t(0)) {
174     // Get how many bytes have to be skipped so the address is aligned accordingly
175     // to the 'size'.
176     size_t diff = Support::alignUpDiff<size_t>(_size, size);
177 
178     if (diff != 0) {
179       ConstPool_addGap(this, _size, diff);
180       _size += diff;
181     }
182 
183     offset = _size;
184     _size += size;
185   }
186 
187   // Add the initial node to the right index.
188   node = ConstPool::Tree::_newNode(_zone, data, size, offset, false);
189   if (!node) return DebugUtils::errored(kErrorOutOfMemory);
190 
191   _tree[treeIndex].insert(node);
192   _alignment = Support::max<size_t>(_alignment, size);
193 
194   dstOffset = offset;
195 
196   // Now create a bunch of shared constants that are based on the data pattern.
197   // We stop at size 4, it probably doesn't make sense to split constants down
198   // to 1 byte.
199   size_t pCount = 1;
200   while (size > 4) {
201     size >>= 1;
202     pCount <<= 1;
203 
204     ASMJIT_ASSERT(treeIndex != 0);
205     treeIndex--;
206 
207     const uint8_t* pData = static_cast<const uint8_t*>(data);
208     for (size_t i = 0; i < pCount; i++, pData += size) {
209       node = _tree[treeIndex].get(pData);
210       if (node) continue;
211 
212       node = ConstPool::Tree::_newNode(_zone, pData, size, offset + (i * size), true);
213       _tree[treeIndex].insert(node);
214     }
215   }
216 
217   return kErrorOk;
218 }
219 
220 // ============================================================================
221 // [asmjit::ConstPool - Reset]
222 // ============================================================================
223 
224 struct ConstPoolFill {
ConstPoolFillConstPoolFill225   inline ConstPoolFill(uint8_t* dst, size_t dataSize) noexcept :
226     _dst(dst),
227     _dataSize(dataSize) {}
228 
operator ()ConstPoolFill229   inline void operator()(const ConstPool::Node* node) noexcept {
230     if (!node->_shared)
231       memcpy(_dst + node->_offset, node->data(), _dataSize);
232   }
233 
234   uint8_t* _dst;
235   size_t _dataSize;
236 };
237 
fill(void * dst) const238 void ConstPool::fill(void* dst) const noexcept {
239   // Clears possible gaps, asmjit should never emit garbage to the output.
240   memset(dst, 0, _size);
241 
242   ConstPoolFill filler(static_cast<uint8_t*>(dst), 1);
243   for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
244     _tree[i].forEach(filler);
245     filler._dataSize <<= 1;
246   }
247 }
248 
249 // ============================================================================
250 // [asmjit::ConstPool - Unit]
251 // ============================================================================
252 
253 #if defined(ASMJIT_TEST)
UNIT(const_pool)254 UNIT(const_pool) {
255   Zone zone(32384 - Zone::kBlockOverhead);
256   ConstPool pool(&zone);
257 
258   uint32_t i;
259   uint32_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 1000000;
260 
261   INFO("Adding %u constants to the pool", kCount);
262   {
263     size_t prevOffset;
264     size_t curOffset;
265     uint64_t c = 0x0101010101010101u;
266 
267     EXPECT(pool.add(&c, 8, prevOffset) == kErrorOk);
268     EXPECT(prevOffset == 0);
269 
270     for (i = 1; i < kCount; i++) {
271       c++;
272       EXPECT(pool.add(&c, 8, curOffset) == kErrorOk);
273       EXPECT(prevOffset + 8 == curOffset);
274       EXPECT(pool.size() == (i + 1) * 8);
275       prevOffset = curOffset;
276     }
277 
278     EXPECT(pool.alignment() == 8);
279   }
280 
281   INFO("Retrieving %u constants from the pool", kCount);
282   {
283     uint64_t c = 0x0101010101010101u;
284 
285     for (i = 0; i < kCount; i++) {
286       size_t offset;
287       EXPECT(pool.add(&c, 8, offset) == kErrorOk);
288       EXPECT(offset == i * 8);
289       c++;
290     }
291   }
292 
293   INFO("Checking if the constants were split into 4-byte patterns");
294   {
295     uint32_t c = 0x01010101;
296     for (i = 0; i < kCount; i++) {
297       size_t offset;
298       EXPECT(pool.add(&c, 4, offset) == kErrorOk);
299       EXPECT(offset == i * 8);
300       c++;
301     }
302   }
303 
304   INFO("Adding 2 byte constant to misalign the current offset");
305   {
306     uint16_t c = 0xFFFF;
307     size_t offset;
308 
309     EXPECT(pool.add(&c, 2, offset) == kErrorOk);
310     EXPECT(offset == kCount * 8);
311     EXPECT(pool.alignment() == 8);
312   }
313 
314   INFO("Adding 8 byte constant to check if pool gets aligned again");
315   {
316     uint64_t c = 0xFFFFFFFFFFFFFFFFu;
317     size_t offset;
318 
319     EXPECT(pool.add(&c, 8, offset) == kErrorOk);
320     EXPECT(offset == kCount * 8 + 8);
321   }
322 
323   INFO("Adding 2 byte constant to verify the gap is filled");
324   {
325     uint16_t c = 0xFFFE;
326     size_t offset;
327 
328     EXPECT(pool.add(&c, 2, offset) == kErrorOk);
329     EXPECT(offset == kCount * 8 + 2);
330     EXPECT(pool.alignment() == 8);
331   }
332 
333   INFO("Checking reset functionality");
334   {
335     pool.reset(&zone);
336     zone.reset();
337 
338     EXPECT(pool.size() == 0);
339     EXPECT(pool.alignment() == 0);
340   }
341 
342   INFO("Checking pool alignment when combined constants are added");
343   {
344     uint8_t bytes[32] = { 0 };
345     size_t offset;
346 
347     pool.add(bytes, 1, offset);
348     EXPECT(pool.size() == 1);
349     EXPECT(pool.alignment() == 1);
350     EXPECT(offset == 0);
351 
352     pool.add(bytes, 2, offset);
353     EXPECT(pool.size() == 4);
354     EXPECT(pool.alignment() == 2);
355     EXPECT(offset == 2);
356 
357     pool.add(bytes, 4, offset);
358     EXPECT(pool.size() == 8);
359     EXPECT(pool.alignment() == 4);
360     EXPECT(offset == 4);
361 
362     pool.add(bytes, 4, offset);
363     EXPECT(pool.size() == 8);
364     EXPECT(pool.alignment() == 4);
365     EXPECT(offset == 4);
366 
367     pool.add(bytes, 32, offset);
368     EXPECT(pool.size() == 64);
369     EXPECT(pool.alignment() == 32);
370     EXPECT(offset == 32);
371   }
372 }
373 #endif
374 
375 ASMJIT_END_NAMESPACE
376