1 // AsmJit - Machine code generation for C++
2 //
3 //  * Official AsmJit Home Page: https://asmjit.com
4 //  * Official Github Repository: https://github.com/asmjit/asmjit
5 //
6 // Copyright (c) 2008-2020 The AsmJit Authors
7 //
8 // This software is provided 'as-is', without any express or implied
9 // warranty. In no event will the authors be held liable for any damages
10 // arising from the use of this software.
11 //
12 // Permission is granted to anyone to use this software for any purpose,
13 // including commercial applications, and to alter it and redistribute it
14 // freely, subject to the following restrictions:
15 //
16 // 1. The origin of this software must not be misrepresented; you must not
17 //    claim that you wrote the original software. If you use this software
18 //    in a product, an acknowledgment in the product documentation would be
19 //    appreciated but is not required.
20 // 2. Altered source versions must be plainly marked as such, and must not be
21 //    misrepresented as being the original software.
22 // 3. This notice may not be removed or altered from any source distribution.
23 
24 #include "../core/api-build_p.h"
25 #ifndef ASMJIT_NO_COMPILER
26 
27 #include "../core/assembler.h"
28 #include "../core/compiler.h"
29 #include "../core/cpuinfo.h"
30 #include "../core/logger.h"
31 #include "../core/rapass_p.h"
32 #include "../core/rastack_p.h"
33 #include "../core/support.h"
34 #include "../core/type.h"
35 
36 ASMJIT_BEGIN_NAMESPACE
37 
38 // ============================================================================
39 // [asmjit::GlobalConstPoolPass]
40 // ============================================================================
41 
42 class GlobalConstPoolPass : public Pass {
43   typedef Pass Base;
44   ASMJIT_NONCOPYABLE(GlobalConstPoolPass)
45 
GlobalConstPoolPass()46   GlobalConstPoolPass() noexcept : Pass("GlobalConstPoolPass") {}
47 
run(Zone * zone,Logger * logger)48   Error run(Zone* zone, Logger* logger) override {
49     DebugUtils::unused(zone, logger);
50 
51     // Flush the global constant pool.
52     BaseCompiler* compiler = static_cast<BaseCompiler*>(_cb);
53     if (compiler->_globalConstPool) {
54       compiler->addAfter(compiler->_globalConstPool, compiler->lastNode());
55       compiler->_globalConstPool = nullptr;
56     }
57 
58     return kErrorOk;
59   }
60 };
61 
62 // ============================================================================
63 // [asmjit::InvokeNode - Arg / Ret]
64 // ============================================================================
65 
_setArg(uint32_t i,const Operand_ & op)66 bool InvokeNode::_setArg(uint32_t i, const Operand_& op) noexcept {
67   if ((i & ~kFuncArgHi) >= _funcDetail.argCount())
68     return false;
69 
70   _args[i] = op;
71   return true;
72 }
73 
_setRet(uint32_t i,const Operand_ & op)74 bool InvokeNode::_setRet(uint32_t i, const Operand_& op) noexcept {
75   if (i >= 2)
76     return false;
77 
78   _rets[i] = op;
79   return true;
80 }
81 
82 // ============================================================================
83 // [asmjit::BaseCompiler - Construction / Destruction]
84 // ============================================================================
85 
BaseCompiler()86 BaseCompiler::BaseCompiler() noexcept
87   : BaseBuilder(),
88     _func(nullptr),
89     _vRegZone(4096 - Zone::kBlockOverhead),
90     _vRegArray(),
91     _localConstPool(nullptr),
92     _globalConstPool(nullptr) {
93 
94   _emitterType = uint8_t(kTypeCompiler);
95   _validationFlags = uint8_t(InstAPI::kValidationFlagVirtRegs);
96 }
~BaseCompiler()97 BaseCompiler::~BaseCompiler() noexcept {}
98 
99 // ============================================================================
100 // [asmjit::BaseCompiler - Function Management]
101 // ============================================================================
102 
_newFuncNode(FuncNode ** out,const FuncSignature & signature)103 Error BaseCompiler::_newFuncNode(FuncNode** out, const FuncSignature& signature) {
104   *out = nullptr;
105 
106   // Create FuncNode together with all the required surrounding nodes.
107   FuncNode* funcNode;
108   ASMJIT_PROPAGATE(_newNodeT<FuncNode>(&funcNode));
109   ASMJIT_PROPAGATE(_newLabelNode(&funcNode->_exitNode));
110   ASMJIT_PROPAGATE(_newNodeT<SentinelNode>(&funcNode->_end, SentinelNode::kSentinelFuncEnd));
111 
112   // Initialize the function's detail info.
113   Error err = funcNode->detail().init(signature, environment());
114   if (ASMJIT_UNLIKELY(err))
115     return reportError(err);
116 
117   // If the Target guarantees greater stack alignment than required by the
118   // calling convention then override it as we can prevent having to perform
119   // dynamic stack alignment
120   uint32_t environmentStackAlignment = _environment.stackAlignment();
121 
122   if (funcNode->_funcDetail._callConv.naturalStackAlignment() < environmentStackAlignment)
123     funcNode->_funcDetail._callConv.setNaturalStackAlignment(environmentStackAlignment);
124 
125   // Initialize the function frame.
126   err = funcNode->_frame.init(funcNode->_funcDetail);
127   if (ASMJIT_UNLIKELY(err))
128     return reportError(err);
129 
130   // Allocate space for function arguments.
131   funcNode->_args = nullptr;
132   if (funcNode->argCount() != 0) {
133     funcNode->_args = _allocator.allocT<VirtReg*>(funcNode->argCount() * sizeof(VirtReg*));
134     if (ASMJIT_UNLIKELY(!funcNode->_args))
135       return reportError(DebugUtils::errored(kErrorOutOfMemory));
136     memset(funcNode->_args, 0, funcNode->argCount() * sizeof(VirtReg*));
137   }
138 
139   ASMJIT_PROPAGATE(registerLabelNode(funcNode));
140 
141   *out = funcNode;
142   return kErrorOk;
143 }
144 
_addFuncNode(FuncNode ** out,const FuncSignature & signature)145 Error BaseCompiler::_addFuncNode(FuncNode** out, const FuncSignature& signature) {
146   ASMJIT_PROPAGATE(_newFuncNode(out, signature));
147   addFunc(*out);
148   return kErrorOk;
149 }
150 
_newRetNode(FuncRetNode ** out,const Operand_ & o0,const Operand_ & o1)151 Error BaseCompiler::_newRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1) {
152   uint32_t opCount = !o1.isNone() ? 2u : !o0.isNone() ? 1u : 0u;
153   FuncRetNode* node;
154 
155   ASMJIT_PROPAGATE(_newNodeT<FuncRetNode>(&node));
156   node->setOpCount(opCount);
157   node->setOp(0, o0);
158   node->setOp(1, o1);
159   node->resetOpRange(2, node->opCapacity());
160 
161   *out = node;
162   return kErrorOk;
163 }
164 
_addRetNode(FuncRetNode ** out,const Operand_ & o0,const Operand_ & o1)165 Error BaseCompiler::_addRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1) {
166   ASMJIT_PROPAGATE(_newRetNode(out, o0, o1));
167   addNode(*out);
168   return kErrorOk;
169 }
170 
addFunc(FuncNode * func)171 FuncNode* BaseCompiler::addFunc(FuncNode* func) {
172   ASMJIT_ASSERT(_func == nullptr);
173   _func = func;
174 
175   addNode(func);                 // Function node.
176   BaseNode* prev = cursor();     // {CURSOR}.
177   addNode(func->exitNode());     // Function exit label.
178   addNode(func->endNode());      // Function end sentinel.
179 
180   _setCursor(prev);
181   return func;
182 }
183 
endFunc()184 Error BaseCompiler::endFunc() {
185   FuncNode* func = _func;
186 
187   if (ASMJIT_UNLIKELY(!func))
188     return reportError(DebugUtils::errored(kErrorInvalidState));
189 
190   // Add the local constant pool at the end of the function (if exists).
191   if (_localConstPool) {
192     setCursor(func->endNode()->prev());
193     addNode(_localConstPool);
194     _localConstPool = nullptr;
195   }
196 
197   // Mark as finished.
198   _func = nullptr;
199 
200   SentinelNode* end = func->endNode();
201   setCursor(end);
202 
203   return kErrorOk;
204 }
205 
setArg(uint32_t argIndex,const BaseReg & r)206 Error BaseCompiler::setArg(uint32_t argIndex, const BaseReg& r) {
207   FuncNode* func = _func;
208 
209   if (ASMJIT_UNLIKELY(!func))
210     return reportError(DebugUtils::errored(kErrorInvalidState));
211 
212   if (ASMJIT_UNLIKELY(!isVirtRegValid(r)))
213     return reportError(DebugUtils::errored(kErrorInvalidVirtId));
214 
215   VirtReg* vReg = virtRegByReg(r);
216   func->setArg(argIndex, vReg);
217 
218   return kErrorOk;
219 }
220 
221 // ============================================================================
222 // [asmjit::BaseCompiler - Function Invocation]
223 // ============================================================================
224 
_newInvokeNode(InvokeNode ** out,uint32_t instId,const Operand_ & o0,const FuncSignature & signature)225 Error BaseCompiler::_newInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
226   InvokeNode* node;
227   ASMJIT_PROPAGATE(_newNodeT<InvokeNode>(&node, instId, 0u));
228 
229   node->setOpCount(1);
230   node->setOp(0, o0);
231   node->resetOpRange(1, node->opCapacity());
232 
233   Error err = node->detail().init(signature, environment());
234   if (ASMJIT_UNLIKELY(err))
235     return reportError(err);
236 
237   // Skip the allocation if there are no arguments.
238   uint32_t argCount = signature.argCount();
239   if (argCount) {
240     node->_args = static_cast<Operand*>(_allocator.alloc(argCount * sizeof(Operand)));
241     if (!node->_args)
242       reportError(DebugUtils::errored(kErrorOutOfMemory));
243     memset(node->_args, 0, argCount * sizeof(Operand));
244   }
245 
246   *out = node;
247   return kErrorOk;
248 }
249 
_addInvokeNode(InvokeNode ** out,uint32_t instId,const Operand_ & o0,const FuncSignature & signature)250 Error BaseCompiler::_addInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
251   ASMJIT_PROPAGATE(_newInvokeNode(out, instId, o0, signature));
252   addNode(*out);
253   return kErrorOk;
254 }
255 
256 // ============================================================================
257 // [asmjit::BaseCompiler - Virtual Registers]
258 // ============================================================================
259 
BaseCompiler_assignGenericName(BaseCompiler * self,VirtReg * vReg)260 static void BaseCompiler_assignGenericName(BaseCompiler* self, VirtReg* vReg) {
261   uint32_t index = unsigned(Operand::virtIdToIndex(vReg->_id));
262 
263   char buf[64];
264   int size = snprintf(buf, ASMJIT_ARRAY_SIZE(buf), "%%%u", unsigned(index));
265 
266   ASMJIT_ASSERT(size > 0 && size < int(ASMJIT_ARRAY_SIZE(buf)));
267   vReg->_name.setData(&self->_dataZone, buf, unsigned(size));
268 }
269 
newVirtReg(VirtReg ** out,uint32_t typeId,uint32_t signature,const char * name)270 Error BaseCompiler::newVirtReg(VirtReg** out, uint32_t typeId, uint32_t signature, const char* name) {
271   *out = nullptr;
272   uint32_t index = _vRegArray.size();
273 
274   if (ASMJIT_UNLIKELY(index >= uint32_t(Operand::kVirtIdCount)))
275     return reportError(DebugUtils::errored(kErrorTooManyVirtRegs));
276 
277   if (ASMJIT_UNLIKELY(_vRegArray.willGrow(&_allocator) != kErrorOk))
278     return reportError(DebugUtils::errored(kErrorOutOfMemory));
279 
280   VirtReg* vReg = _vRegZone.allocZeroedT<VirtReg>();
281   if (ASMJIT_UNLIKELY(!vReg))
282     return reportError(DebugUtils::errored(kErrorOutOfMemory));
283 
284   uint32_t size = Type::sizeOf(typeId);
285   uint32_t alignment = Support::min<uint32_t>(size, 64);
286 
287   vReg = new(vReg) VirtReg(Operand::indexToVirtId(index), signature, size, alignment, typeId);
288 
289 #ifndef ASMJIT_NO_LOGGING
290   if (name && name[0] != '\0')
291     vReg->_name.setData(&_dataZone, name, SIZE_MAX);
292   else
293     BaseCompiler_assignGenericName(this, vReg);
294 #else
295   DebugUtils::unused(name);
296 #endif
297 
298   _vRegArray.appendUnsafe(vReg);
299   *out = vReg;
300 
301   return kErrorOk;
302 }
303 
_newReg(BaseReg * out,uint32_t typeId,const char * name)304 Error BaseCompiler::_newReg(BaseReg* out, uint32_t typeId, const char* name) {
305   out->reset();
306 
307   RegInfo regInfo;
308   Error err = ArchUtils::typeIdToRegInfo(arch(), typeId, &typeId, &regInfo);
309 
310   if (ASMJIT_UNLIKELY(err))
311     return reportError(err);
312 
313   VirtReg* vReg;
314   ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regInfo.signature(), name));
315 
316   out->_initReg(regInfo.signature(), vReg->id());
317   return kErrorOk;
318 }
319 
_newRegFmt(BaseReg * out,uint32_t typeId,const char * fmt,...)320 Error BaseCompiler::_newRegFmt(BaseReg* out, uint32_t typeId, const char* fmt, ...) {
321   va_list ap;
322   StringTmp<256> sb;
323 
324   va_start(ap, fmt);
325   sb.appendVFormat(fmt, ap);
326   va_end(ap);
327 
328   return _newReg(out, typeId, sb.data());
329 }
330 
_newReg(BaseReg * out,const BaseReg & ref,const char * name)331 Error BaseCompiler::_newReg(BaseReg* out, const BaseReg& ref, const char* name) {
332   out->reset();
333 
334   RegInfo regInfo;
335   uint32_t typeId;
336 
337   if (isVirtRegValid(ref)) {
338     VirtReg* vRef = virtRegByReg(ref);
339     typeId = vRef->typeId();
340 
341     // NOTE: It's possible to cast one register type to another if it's the
342     // same register group. However, VirtReg always contains the TypeId that
343     // was used to create the register. This means that in some cases we may
344     // end up having different size of `ref` and `vRef`. In such case we
345     // adjust the TypeId to match the `ref` register type instead of the
346     // original register type, which should be the expected behavior.
347     uint32_t typeSize = Type::sizeOf(typeId);
348     uint32_t refSize = ref.size();
349 
350     if (typeSize != refSize) {
351       if (Type::isInt(typeId)) {
352         // GP register - change TypeId to match `ref`, but keep sign of `vRef`.
353         switch (refSize) {
354           case  1: typeId = Type::kIdI8  | (typeId & 1); break;
355           case  2: typeId = Type::kIdI16 | (typeId & 1); break;
356           case  4: typeId = Type::kIdI32 | (typeId & 1); break;
357           case  8: typeId = Type::kIdI64 | (typeId & 1); break;
358           default: typeId = Type::kIdVoid; break;
359         }
360       }
361       else if (Type::isMmx(typeId)) {
362         // MMX register - always use 64-bit.
363         typeId = Type::kIdMmx64;
364       }
365       else if (Type::isMask(typeId)) {
366         // Mask register - change TypeId to match `ref` size.
367         switch (refSize) {
368           case  1: typeId = Type::kIdMask8; break;
369           case  2: typeId = Type::kIdMask16; break;
370           case  4: typeId = Type::kIdMask32; break;
371           case  8: typeId = Type::kIdMask64; break;
372           default: typeId = Type::kIdVoid; break;
373         }
374       }
375       else {
376         // VEC register - change TypeId to match `ref` size, keep vector metadata.
377         uint32_t elementTypeId = Type::baseOf(typeId);
378 
379         switch (refSize) {
380           case 16: typeId = Type::_kIdVec128Start + (elementTypeId - Type::kIdI8); break;
381           case 32: typeId = Type::_kIdVec256Start + (elementTypeId - Type::kIdI8); break;
382           case 64: typeId = Type::_kIdVec512Start + (elementTypeId - Type::kIdI8); break;
383           default: typeId = Type::kIdVoid; break;
384         }
385       }
386 
387       if (typeId == Type::kIdVoid)
388         return reportError(DebugUtils::errored(kErrorInvalidState));
389     }
390   }
391   else {
392     typeId = ref.type();
393   }
394 
395   Error err = ArchUtils::typeIdToRegInfo(arch(), typeId, &typeId, &regInfo);
396   if (ASMJIT_UNLIKELY(err))
397     return reportError(err);
398 
399   VirtReg* vReg;
400   ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regInfo.signature(), name));
401 
402   out->_initReg(regInfo.signature(), vReg->id());
403   return kErrorOk;
404 }
405 
_newRegFmt(BaseReg * out,const BaseReg & ref,const char * fmt,...)406 Error BaseCompiler::_newRegFmt(BaseReg* out, const BaseReg& ref, const char* fmt, ...) {
407   va_list ap;
408   StringTmp<256> sb;
409 
410   va_start(ap, fmt);
411   sb.appendVFormat(fmt, ap);
412   va_end(ap);
413 
414   return _newReg(out, ref, sb.data());
415 }
416 
_newStack(BaseMem * out,uint32_t size,uint32_t alignment,const char * name)417 Error BaseCompiler::_newStack(BaseMem* out, uint32_t size, uint32_t alignment, const char* name) {
418   out->reset();
419 
420   if (size == 0)
421     return reportError(DebugUtils::errored(kErrorInvalidArgument));
422 
423   if (alignment == 0)
424     alignment = 1;
425 
426   if (!Support::isPowerOf2(alignment))
427     return reportError(DebugUtils::errored(kErrorInvalidArgument));
428 
429   if (alignment > 64)
430     alignment = 64;
431 
432   VirtReg* vReg;
433   ASMJIT_PROPAGATE(newVirtReg(&vReg, 0, 0, name));
434 
435   vReg->_virtSize = size;
436   vReg->_isStack = true;
437   vReg->_alignment = uint8_t(alignment);
438 
439   // Set the memory operand to GPD/GPQ and its id to VirtReg.
440   *out = BaseMem(BaseMem::Decomposed { _gpRegInfo.type(), vReg->id(), BaseReg::kTypeNone, 0, 0, 0, BaseMem::kSignatureMemRegHomeFlag });
441   return kErrorOk;
442 }
443 
setStackSize(uint32_t virtId,uint32_t newSize,uint32_t newAlignment)444 Error BaseCompiler::setStackSize(uint32_t virtId, uint32_t newSize, uint32_t newAlignment) {
445   if (!isVirtIdValid(virtId))
446     return DebugUtils::errored(kErrorInvalidVirtId);
447 
448   if (newAlignment && !Support::isPowerOf2(newAlignment))
449     return reportError(DebugUtils::errored(kErrorInvalidArgument));
450 
451   if (newAlignment > 64)
452     newAlignment = 64;
453 
454   VirtReg* vReg = virtRegById(virtId);
455   if (newSize)
456     vReg->_virtSize = newSize;
457 
458   if (newAlignment)
459     vReg->_alignment = uint8_t(newAlignment);
460 
461   // This is required if the RAPass is already running. There is a chance that
462   // a stack-slot has been already allocated and in that case it has to be
463   // updated as well, otherwise we would allocate wrong amount of memory.
464   RAWorkReg* workReg = vReg->_workReg;
465   if (workReg && workReg->_stackSlot) {
466     workReg->_stackSlot->_size = vReg->_virtSize;
467     workReg->_stackSlot->_alignment = vReg->_alignment;
468   }
469 
470   return kErrorOk;
471 }
472 
_newConst(BaseMem * out,uint32_t scope,const void * data,size_t size)473 Error BaseCompiler::_newConst(BaseMem* out, uint32_t scope, const void* data, size_t size) {
474   out->reset();
475   ConstPoolNode** pPool;
476 
477   if (scope == ConstPool::kScopeLocal)
478     pPool = &_localConstPool;
479   else if (scope == ConstPool::kScopeGlobal)
480     pPool = &_globalConstPool;
481   else
482     return reportError(DebugUtils::errored(kErrorInvalidArgument));
483 
484   if (!*pPool)
485     ASMJIT_PROPAGATE(_newConstPoolNode(pPool));
486 
487   ConstPoolNode* pool = *pPool;
488   size_t off;
489   Error err = pool->add(data, size, off);
490 
491   if (ASMJIT_UNLIKELY(err))
492     return reportError(err);
493 
494   *out = BaseMem(BaseMem::Decomposed {
495     Label::kLabelTag,      // Base type.
496     pool->labelId(),       // Base id.
497     0,                     // Index type.
498     0,                     // Index id.
499     int32_t(off),          // Offset.
500     uint32_t(size),        // Size.
501     0                      // Flags.
502   });
503 
504   return kErrorOk;
505 }
506 
rename(const BaseReg & reg,const char * fmt,...)507 void BaseCompiler::rename(const BaseReg& reg, const char* fmt, ...) {
508   if (!reg.isVirtReg()) return;
509 
510   VirtReg* vReg = virtRegById(reg.id());
511   if (!vReg) return;
512 
513   if (fmt && fmt[0] != '\0') {
514     char buf[128];
515     va_list ap;
516 
517     va_start(ap, fmt);
518     vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
519     va_end(ap);
520 
521     vReg->_name.setData(&_dataZone, buf, SIZE_MAX);
522   }
523   else {
524     BaseCompiler_assignGenericName(this, vReg);
525   }
526 }
527 
528 // ============================================================================
529 // [asmjit::BaseCompiler - Jump Annotations]
530 // ============================================================================
531 
newJumpNode(JumpNode ** out,uint32_t instId,uint32_t instOptions,const Operand_ & o0,JumpAnnotation * annotation)532 Error BaseCompiler::newJumpNode(JumpNode** out, uint32_t instId, uint32_t instOptions, const Operand_& o0, JumpAnnotation* annotation) {
533   JumpNode* node = _allocator.allocT<JumpNode>();
534   uint32_t opCount = 1;
535 
536   *out = node;
537   if (ASMJIT_UNLIKELY(!node))
538     return reportError(DebugUtils::errored(kErrorOutOfMemory));
539 
540   node = new(node) JumpNode(this, instId, instOptions, opCount, annotation);
541   node->setOp(0, o0);
542   node->resetOpRange(opCount, JumpNode::kBaseOpCapacity);
543 
544   return kErrorOk;
545 }
546 
emitAnnotatedJump(uint32_t instId,const Operand_ & o0,JumpAnnotation * annotation)547 Error BaseCompiler::emitAnnotatedJump(uint32_t instId, const Operand_& o0, JumpAnnotation* annotation) {
548   uint32_t options = instOptions() | forcedInstOptions();
549   RegOnly extra = extraReg();
550   const char* comment = inlineComment();
551 
552   resetInstOptions();
553   resetInlineComment();
554   resetExtraReg();
555 
556   JumpNode* node = nullptr;
557   ASMJIT_PROPAGATE(newJumpNode(&node, instId, options, o0, annotation));
558 
559   node->setExtraReg(extra);
560   if (comment)
561     node->setInlineComment(static_cast<char*>(_dataZone.dup(comment, strlen(comment), true)));
562 
563   addNode(node);
564   return kErrorOk;
565 }
566 
newJumpAnnotation()567 JumpAnnotation* BaseCompiler::newJumpAnnotation() {
568   if (_jumpAnnotations.grow(&_allocator, 1) != kErrorOk) {
569     reportError(DebugUtils::errored(kErrorOutOfMemory));
570     return nullptr;
571   }
572 
573   uint32_t id = _jumpAnnotations.size();
574   JumpAnnotation* jumpAnnotation = _allocator.newT<JumpAnnotation>(this, id);
575 
576   if (!jumpAnnotation) {
577     reportError(DebugUtils::errored(kErrorOutOfMemory));
578     return nullptr;
579   }
580 
581   _jumpAnnotations.appendUnsafe(jumpAnnotation);
582   return jumpAnnotation;
583 }
584 
585 // ============================================================================
586 // [asmjit::BaseCompiler - Events]
587 // ============================================================================
588 
onAttach(CodeHolder * code)589 Error BaseCompiler::onAttach(CodeHolder* code) noexcept {
590   ASMJIT_PROPAGATE(Base::onAttach(code));
591 
592   Error err = addPassT<GlobalConstPoolPass>();
593   if (ASMJIT_UNLIKELY(err)) {
594     onDetach(code);
595     return err;
596   }
597 
598   return kErrorOk;
599 }
600 
onDetach(CodeHolder * code)601 Error BaseCompiler::onDetach(CodeHolder* code) noexcept {
602   _func = nullptr;
603   _localConstPool = nullptr;
604   _globalConstPool = nullptr;
605 
606   _vRegArray.reset();
607   _vRegZone.reset();
608 
609   return Base::onDetach(code);
610 }
611 
612 // ============================================================================
613 // [asmjit::FuncPass - Construction / Destruction]
614 // ============================================================================
615 
FuncPass(const char * name)616 FuncPass::FuncPass(const char* name) noexcept
617   : Pass(name) {}
618 
619 // ============================================================================
620 // [asmjit::FuncPass - Run]
621 // ============================================================================
622 
run(Zone * zone,Logger * logger)623 Error FuncPass::run(Zone* zone, Logger* logger) {
624   BaseNode* node = cb()->firstNode();
625   if (!node) return kErrorOk;
626 
627   do {
628     if (node->type() == BaseNode::kNodeFunc) {
629       FuncNode* func = node->as<FuncNode>();
630       node = func->endNode();
631       ASMJIT_PROPAGATE(runOnFunction(zone, logger, func));
632     }
633 
634     // Find a function by skipping all nodes that are not `kNodeFunc`.
635     do {
636       node = node->next();
637     } while (node && node->type() != BaseNode::kNodeFunc);
638   } while (node);
639 
640   return kErrorOk;
641 }
642 
643 ASMJIT_END_NAMESPACE
644 
645 #endif // !ASMJIT_NO_COMPILER
646