1 // AsmJit - Machine code generation for C++
2 //
3 //  * Official AsmJit Home Page: https://asmjit.com
4 //  * Official Github Repository: https://github.com/asmjit/asmjit
5 //
6 // Copyright (c) 2008-2020 The AsmJit Authors
7 //
8 // This software is provided 'as-is', without any express or implied
9 // warranty. In no event will the authors be held liable for any damages
10 // arising from the use of this software.
11 //
12 // Permission is granted to anyone to use this software for any purpose,
13 // including commercial applications, and to alter it and redistribute it
14 // freely, subject to the following restrictions:
15 //
16 // 1. The origin of this software must not be misrepresented; you must not
17 //    claim that you wrote the original software. If you use this software
18 //    in a product, an acknowledgment in the product documentation would be
19 //    appreciated but is not required.
20 // 2. Altered source versions must be plainly marked as such, and must not be
21 //    misrepresented as being the original software.
22 // 3. This notice may not be removed or altered from any source distribution.
23 
24 #include "../core/api-build_p.h"
25 #ifndef ASMJIT_NO_COMPILER
26 
27 #include "../core/assembler.h"
28 #include "../core/compiler.h"
29 #include "../core/cpuinfo.h"
30 #include "../core/logger.h"
31 #include "../core/rapass_p.h"
32 #include "../core/rastack_p.h"
33 #include "../core/support.h"
34 #include "../core/type.h"
35 
36 ASMJIT_BEGIN_NAMESPACE
37 
38 // ============================================================================
39 // [asmjit::GlobalConstPoolPass]
40 // ============================================================================
41 
42 class GlobalConstPoolPass : public Pass {
43   typedef Pass Base;
44   ASMJIT_NONCOPYABLE(GlobalConstPoolPass)
45 
GlobalConstPoolPass()46   GlobalConstPoolPass() noexcept : Pass("GlobalConstPoolPass") {}
47 
run(Zone * zone,Logger * logger)48   Error run(Zone* zone, Logger* logger) override {
49     DebugUtils::unused(zone, logger);
50 
51     // Flush the global constant pool.
52     BaseCompiler* compiler = static_cast<BaseCompiler*>(_cb);
53     if (compiler->_globalConstPool) {
54       compiler->addAfter(compiler->_globalConstPool, compiler->lastNode());
55       compiler->_globalConstPool = nullptr;
56     }
57 
58     return kErrorOk;
59   }
60 };
61 
62 // ============================================================================
63 // [asmjit::BaseCompiler - Construction / Destruction]
64 // ============================================================================
65 
BaseCompiler()66 BaseCompiler::BaseCompiler() noexcept
67   : BaseBuilder(),
68     _func(nullptr),
69     _vRegZone(4096 - Zone::kBlockOverhead),
70     _vRegArray(),
71     _localConstPool(nullptr),
72     _globalConstPool(nullptr) {
73 
74   _emitterType = uint8_t(kTypeCompiler);
75   _validationFlags = uint8_t(InstAPI::kValidationFlagVirtRegs);
76 }
~BaseCompiler()77 BaseCompiler::~BaseCompiler() noexcept {}
78 
79 // ============================================================================
80 // [asmjit::BaseCompiler - Function Management]
81 // ============================================================================
82 
_newFuncNode(FuncNode ** out,const FuncSignature & signature)83 Error BaseCompiler::_newFuncNode(FuncNode** out, const FuncSignature& signature) {
84   *out = nullptr;
85 
86   // Create FuncNode together with all the required surrounding nodes.
87   FuncNode* funcNode;
88   ASMJIT_PROPAGATE(_newNodeT<FuncNode>(&funcNode));
89   ASMJIT_PROPAGATE(_newLabelNode(&funcNode->_exitNode));
90   ASMJIT_PROPAGATE(_newNodeT<SentinelNode>(&funcNode->_end, SentinelNode::kSentinelFuncEnd));
91 
92   // Initialize the function's detail info.
93   Error err = funcNode->detail().init(signature, environment());
94   if (ASMJIT_UNLIKELY(err))
95     return reportError(err);
96 
97   // If the Target guarantees greater stack alignment than required by the
98   // calling convention then override it as we can prevent having to perform
99   // dynamic stack alignment
100   uint32_t environmentStackAlignment = _environment.stackAlignment();
101 
102   if (funcNode->_funcDetail._callConv.naturalStackAlignment() < environmentStackAlignment)
103     funcNode->_funcDetail._callConv.setNaturalStackAlignment(environmentStackAlignment);
104 
105   // Initialize the function frame.
106   err = funcNode->_frame.init(funcNode->_funcDetail);
107   if (ASMJIT_UNLIKELY(err))
108     return reportError(err);
109 
110   // Allocate space for function arguments.
111   funcNode->_args = nullptr;
112   if (funcNode->argCount() != 0) {
113     funcNode->_args = _allocator.allocT<FuncNode::ArgPack>(funcNode->argCount() * sizeof(FuncNode::ArgPack));
114     if (ASMJIT_UNLIKELY(!funcNode->_args))
115       return reportError(DebugUtils::errored(kErrorOutOfMemory));
116     memset(funcNode->_args, 0, funcNode->argCount() * sizeof(FuncNode::ArgPack));
117   }
118 
119   ASMJIT_PROPAGATE(registerLabelNode(funcNode));
120 
121   *out = funcNode;
122   return kErrorOk;
123 }
124 
_addFuncNode(FuncNode ** out,const FuncSignature & signature)125 Error BaseCompiler::_addFuncNode(FuncNode** out, const FuncSignature& signature) {
126   ASMJIT_PROPAGATE(_newFuncNode(out, signature));
127   addFunc(*out);
128   return kErrorOk;
129 }
130 
_newRetNode(FuncRetNode ** out,const Operand_ & o0,const Operand_ & o1)131 Error BaseCompiler::_newRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1) {
132   uint32_t opCount = !o1.isNone() ? 2u : !o0.isNone() ? 1u : 0u;
133   FuncRetNode* node;
134 
135   ASMJIT_PROPAGATE(_newNodeT<FuncRetNode>(&node));
136   node->setOpCount(opCount);
137   node->setOp(0, o0);
138   node->setOp(1, o1);
139   node->resetOpRange(2, node->opCapacity());
140 
141   *out = node;
142   return kErrorOk;
143 }
144 
_addRetNode(FuncRetNode ** out,const Operand_ & o0,const Operand_ & o1)145 Error BaseCompiler::_addRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1) {
146   ASMJIT_PROPAGATE(_newRetNode(out, o0, o1));
147   addNode(*out);
148   return kErrorOk;
149 }
150 
addFunc(FuncNode * func)151 FuncNode* BaseCompiler::addFunc(FuncNode* func) {
152   ASMJIT_ASSERT(_func == nullptr);
153   _func = func;
154 
155   addNode(func);                 // Function node.
156   BaseNode* prev = cursor();     // {CURSOR}.
157   addNode(func->exitNode());     // Function exit label.
158   addNode(func->endNode());      // Function end sentinel.
159 
160   _setCursor(prev);
161   return func;
162 }
163 
endFunc()164 Error BaseCompiler::endFunc() {
165   FuncNode* func = _func;
166 
167   if (ASMJIT_UNLIKELY(!func))
168     return reportError(DebugUtils::errored(kErrorInvalidState));
169 
170   // Add the local constant pool at the end of the function (if exists).
171   if (_localConstPool) {
172     setCursor(func->endNode()->prev());
173     addNode(_localConstPool);
174     _localConstPool = nullptr;
175   }
176 
177   // Mark as finished.
178   _func = nullptr;
179 
180   SentinelNode* end = func->endNode();
181   setCursor(end);
182 
183   return kErrorOk;
184 }
185 
_setArg(size_t argIndex,size_t valueIndex,const BaseReg & r)186 Error BaseCompiler::_setArg(size_t argIndex, size_t valueIndex, const BaseReg& r) {
187   FuncNode* func = _func;
188 
189   if (ASMJIT_UNLIKELY(!func))
190     return reportError(DebugUtils::errored(kErrorInvalidState));
191 
192   if (ASMJIT_UNLIKELY(!isVirtRegValid(r)))
193     return reportError(DebugUtils::errored(kErrorInvalidVirtId));
194 
195   VirtReg* vReg = virtRegByReg(r);
196   func->setArg(argIndex, valueIndex, vReg);
197 
198   return kErrorOk;
199 }
200 
201 // ============================================================================
202 // [asmjit::BaseCompiler - Function Invocation]
203 // ============================================================================
204 
_newInvokeNode(InvokeNode ** out,uint32_t instId,const Operand_ & o0,const FuncSignature & signature)205 Error BaseCompiler::_newInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
206   InvokeNode* node;
207   ASMJIT_PROPAGATE(_newNodeT<InvokeNode>(&node, instId, 0u));
208 
209   node->setOpCount(1);
210   node->setOp(0, o0);
211   node->resetOpRange(1, node->opCapacity());
212 
213   Error err = node->detail().init(signature, environment());
214   if (ASMJIT_UNLIKELY(err))
215     return reportError(err);
216 
217   // Skip the allocation if there are no arguments.
218   uint32_t argCount = signature.argCount();
219   if (argCount) {
220     node->_args = static_cast<InvokeNode::OperandPack*>(_allocator.alloc(argCount * sizeof(InvokeNode::OperandPack)));
221     if (!node->_args)
222       reportError(DebugUtils::errored(kErrorOutOfMemory));
223     memset(node->_args, 0, argCount * sizeof(InvokeNode::OperandPack));
224   }
225 
226   *out = node;
227   return kErrorOk;
228 }
229 
_addInvokeNode(InvokeNode ** out,uint32_t instId,const Operand_ & o0,const FuncSignature & signature)230 Error BaseCompiler::_addInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
231   ASMJIT_PROPAGATE(_newInvokeNode(out, instId, o0, signature));
232   addNode(*out);
233   return kErrorOk;
234 }
235 
236 // ============================================================================
237 // [asmjit::BaseCompiler - Virtual Registers]
238 // ============================================================================
239 
BaseCompiler_assignGenericName(BaseCompiler * self,VirtReg * vReg)240 static void BaseCompiler_assignGenericName(BaseCompiler* self, VirtReg* vReg) {
241   uint32_t index = unsigned(Operand::virtIdToIndex(vReg->_id));
242 
243   char buf[64];
244   int size = snprintf(buf, ASMJIT_ARRAY_SIZE(buf), "%%%u", unsigned(index));
245 
246   ASMJIT_ASSERT(size > 0 && size < int(ASMJIT_ARRAY_SIZE(buf)));
247   vReg->_name.setData(&self->_dataZone, buf, unsigned(size));
248 }
249 
newVirtReg(VirtReg ** out,uint32_t typeId,uint32_t signature,const char * name)250 Error BaseCompiler::newVirtReg(VirtReg** out, uint32_t typeId, uint32_t signature, const char* name) {
251   *out = nullptr;
252   uint32_t index = _vRegArray.size();
253 
254   if (ASMJIT_UNLIKELY(index >= uint32_t(Operand::kVirtIdCount)))
255     return reportError(DebugUtils::errored(kErrorTooManyVirtRegs));
256 
257   if (ASMJIT_UNLIKELY(_vRegArray.willGrow(&_allocator) != kErrorOk))
258     return reportError(DebugUtils::errored(kErrorOutOfMemory));
259 
260   VirtReg* vReg = _vRegZone.allocZeroedT<VirtReg>();
261   if (ASMJIT_UNLIKELY(!vReg))
262     return reportError(DebugUtils::errored(kErrorOutOfMemory));
263 
264   uint32_t size = Type::sizeOf(typeId);
265   uint32_t alignment = Support::min<uint32_t>(size, 64);
266 
267   vReg = new(vReg) VirtReg(Operand::indexToVirtId(index), signature, size, alignment, typeId);
268 
269 #ifndef ASMJIT_NO_LOGGING
270   if (name && name[0] != '\0')
271     vReg->_name.setData(&_dataZone, name, SIZE_MAX);
272   else
273     BaseCompiler_assignGenericName(this, vReg);
274 #else
275   DebugUtils::unused(name);
276 #endif
277 
278   _vRegArray.appendUnsafe(vReg);
279   *out = vReg;
280 
281   return kErrorOk;
282 }
283 
_newReg(BaseReg * out,uint32_t typeId,const char * name)284 Error BaseCompiler::_newReg(BaseReg* out, uint32_t typeId, const char* name) {
285   RegInfo regInfo;
286   out->reset();
287 
288   Error err = ArchUtils::typeIdToRegInfo(arch(), typeId, &typeId, &regInfo);
289   if (ASMJIT_UNLIKELY(err))
290     return reportError(err);
291 
292   VirtReg* vReg;
293   ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regInfo.signature(), name));
294 
295   out->_initReg(regInfo.signature(), vReg->id());
296   return kErrorOk;
297 }
298 
_newRegFmt(BaseReg * out,uint32_t typeId,const char * fmt,...)299 Error BaseCompiler::_newRegFmt(BaseReg* out, uint32_t typeId, const char* fmt, ...) {
300   va_list ap;
301   StringTmp<256> sb;
302 
303   va_start(ap, fmt);
304   sb.appendVFormat(fmt, ap);
305   va_end(ap);
306 
307   return _newReg(out, typeId, sb.data());
308 }
309 
_newReg(BaseReg * out,const BaseReg & ref,const char * name)310 Error BaseCompiler::_newReg(BaseReg* out, const BaseReg& ref, const char* name) {
311   out->reset();
312 
313   RegInfo regInfo;
314   uint32_t typeId;
315 
316   if (isVirtRegValid(ref)) {
317     VirtReg* vRef = virtRegByReg(ref);
318     typeId = vRef->typeId();
319 
320     // NOTE: It's possible to cast one register type to another if it's the
321     // same register group. However, VirtReg always contains the TypeId that
322     // was used to create the register. This means that in some cases we may
323     // end up having different size of `ref` and `vRef`. In such case we
324     // adjust the TypeId to match the `ref` register type instead of the
325     // original register type, which should be the expected behavior.
326     uint32_t typeSize = Type::sizeOf(typeId);
327     uint32_t refSize = ref.size();
328 
329     if (typeSize != refSize) {
330       if (Type::isInt(typeId)) {
331         // GP register - change TypeId to match `ref`, but keep sign of `vRef`.
332         switch (refSize) {
333           case  1: typeId = Type::kIdI8  | (typeId & 1); break;
334           case  2: typeId = Type::kIdI16 | (typeId & 1); break;
335           case  4: typeId = Type::kIdI32 | (typeId & 1); break;
336           case  8: typeId = Type::kIdI64 | (typeId & 1); break;
337           default: typeId = Type::kIdVoid; break;
338         }
339       }
340       else if (Type::isMmx(typeId)) {
341         // MMX register - always use 64-bit.
342         typeId = Type::kIdMmx64;
343       }
344       else if (Type::isMask(typeId)) {
345         // Mask register - change TypeId to match `ref` size.
346         switch (refSize) {
347           case  1: typeId = Type::kIdMask8; break;
348           case  2: typeId = Type::kIdMask16; break;
349           case  4: typeId = Type::kIdMask32; break;
350           case  8: typeId = Type::kIdMask64; break;
351           default: typeId = Type::kIdVoid; break;
352         }
353       }
354       else {
355         // VEC register - change TypeId to match `ref` size, keep vector metadata.
356         uint32_t elementTypeId = Type::baseOf(typeId);
357 
358         switch (refSize) {
359           case 16: typeId = Type::_kIdVec128Start + (elementTypeId - Type::kIdI8); break;
360           case 32: typeId = Type::_kIdVec256Start + (elementTypeId - Type::kIdI8); break;
361           case 64: typeId = Type::_kIdVec512Start + (elementTypeId - Type::kIdI8); break;
362           default: typeId = Type::kIdVoid; break;
363         }
364       }
365 
366       if (typeId == Type::kIdVoid)
367         return reportError(DebugUtils::errored(kErrorInvalidState));
368     }
369   }
370   else {
371     typeId = ref.type();
372   }
373 
374   Error err = ArchUtils::typeIdToRegInfo(arch(), typeId, &typeId, &regInfo);
375   if (ASMJIT_UNLIKELY(err))
376     return reportError(err);
377 
378   VirtReg* vReg;
379   ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regInfo.signature(), name));
380 
381   out->_initReg(regInfo.signature(), vReg->id());
382   return kErrorOk;
383 }
384 
_newRegFmt(BaseReg * out,const BaseReg & ref,const char * fmt,...)385 Error BaseCompiler::_newRegFmt(BaseReg* out, const BaseReg& ref, const char* fmt, ...) {
386   va_list ap;
387   StringTmp<256> sb;
388 
389   va_start(ap, fmt);
390   sb.appendVFormat(fmt, ap);
391   va_end(ap);
392 
393   return _newReg(out, ref, sb.data());
394 }
395 
_newStack(BaseMem * out,uint32_t size,uint32_t alignment,const char * name)396 Error BaseCompiler::_newStack(BaseMem* out, uint32_t size, uint32_t alignment, const char* name) {
397   out->reset();
398 
399   if (size == 0)
400     return reportError(DebugUtils::errored(kErrorInvalidArgument));
401 
402   if (alignment == 0)
403     alignment = 1;
404 
405   if (!Support::isPowerOf2(alignment))
406     return reportError(DebugUtils::errored(kErrorInvalidArgument));
407 
408   if (alignment > 64)
409     alignment = 64;
410 
411   VirtReg* vReg;
412   ASMJIT_PROPAGATE(newVirtReg(&vReg, 0, 0, name));
413 
414   vReg->_virtSize = size;
415   vReg->_isStack = true;
416   vReg->_alignment = uint8_t(alignment);
417 
418   // Set the memory operand to GPD/GPQ and its id to VirtReg.
419   *out = BaseMem(BaseMem::Decomposed { _gpRegInfo.type(), vReg->id(), BaseReg::kTypeNone, 0, 0, 0, BaseMem::kSignatureMemRegHomeFlag });
420   return kErrorOk;
421 }
422 
setStackSize(uint32_t virtId,uint32_t newSize,uint32_t newAlignment)423 Error BaseCompiler::setStackSize(uint32_t virtId, uint32_t newSize, uint32_t newAlignment) {
424   if (!isVirtIdValid(virtId))
425     return DebugUtils::errored(kErrorInvalidVirtId);
426 
427   if (newAlignment && !Support::isPowerOf2(newAlignment))
428     return reportError(DebugUtils::errored(kErrorInvalidArgument));
429 
430   if (newAlignment > 64)
431     newAlignment = 64;
432 
433   VirtReg* vReg = virtRegById(virtId);
434   if (newSize)
435     vReg->_virtSize = newSize;
436 
437   if (newAlignment)
438     vReg->_alignment = uint8_t(newAlignment);
439 
440   // This is required if the RAPass is already running. There is a chance that
441   // a stack-slot has been already allocated and in that case it has to be
442   // updated as well, otherwise we would allocate wrong amount of memory.
443   RAWorkReg* workReg = vReg->_workReg;
444   if (workReg && workReg->_stackSlot) {
445     workReg->_stackSlot->_size = vReg->_virtSize;
446     workReg->_stackSlot->_alignment = vReg->_alignment;
447   }
448 
449   return kErrorOk;
450 }
451 
_newConst(BaseMem * out,uint32_t scope,const void * data,size_t size)452 Error BaseCompiler::_newConst(BaseMem* out, uint32_t scope, const void* data, size_t size) {
453   out->reset();
454   ConstPoolNode** pPool;
455 
456   if (scope == ConstPool::kScopeLocal)
457     pPool = &_localConstPool;
458   else if (scope == ConstPool::kScopeGlobal)
459     pPool = &_globalConstPool;
460   else
461     return reportError(DebugUtils::errored(kErrorInvalidArgument));
462 
463   if (!*pPool)
464     ASMJIT_PROPAGATE(_newConstPoolNode(pPool));
465 
466   ConstPoolNode* pool = *pPool;
467   size_t off;
468   Error err = pool->add(data, size, off);
469 
470   if (ASMJIT_UNLIKELY(err))
471     return reportError(err);
472 
473   *out = BaseMem(BaseMem::Decomposed {
474     Label::kLabelTag,      // Base type.
475     pool->labelId(),       // Base id.
476     0,                     // Index type.
477     0,                     // Index id.
478     int32_t(off),          // Offset.
479     uint32_t(size),        // Size.
480     0                      // Flags.
481   });
482 
483   return kErrorOk;
484 }
485 
rename(const BaseReg & reg,const char * fmt,...)486 void BaseCompiler::rename(const BaseReg& reg, const char* fmt, ...) {
487   if (!reg.isVirtReg()) return;
488 
489   VirtReg* vReg = virtRegById(reg.id());
490   if (!vReg) return;
491 
492   if (fmt && fmt[0] != '\0') {
493     char buf[128];
494     va_list ap;
495 
496     va_start(ap, fmt);
497     vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
498     va_end(ap);
499 
500     vReg->_name.setData(&_dataZone, buf, SIZE_MAX);
501   }
502   else {
503     BaseCompiler_assignGenericName(this, vReg);
504   }
505 }
506 
507 // ============================================================================
508 // [asmjit::BaseCompiler - Jump Annotations]
509 // ============================================================================
510 
newJumpNode(JumpNode ** out,uint32_t instId,uint32_t instOptions,const Operand_ & o0,JumpAnnotation * annotation)511 Error BaseCompiler::newJumpNode(JumpNode** out, uint32_t instId, uint32_t instOptions, const Operand_& o0, JumpAnnotation* annotation) {
512   JumpNode* node = _allocator.allocT<JumpNode>();
513   uint32_t opCount = 1;
514 
515   *out = node;
516   if (ASMJIT_UNLIKELY(!node))
517     return reportError(DebugUtils::errored(kErrorOutOfMemory));
518 
519   node = new(node) JumpNode(this, instId, instOptions, opCount, annotation);
520   node->setOp(0, o0);
521   node->resetOpRange(opCount, JumpNode::kBaseOpCapacity);
522 
523   return kErrorOk;
524 }
525 
emitAnnotatedJump(uint32_t instId,const Operand_ & o0,JumpAnnotation * annotation)526 Error BaseCompiler::emitAnnotatedJump(uint32_t instId, const Operand_& o0, JumpAnnotation* annotation) {
527   uint32_t options = instOptions() | forcedInstOptions();
528   RegOnly extra = extraReg();
529   const char* comment = inlineComment();
530 
531   resetInstOptions();
532   resetInlineComment();
533   resetExtraReg();
534 
535   JumpNode* node;
536   ASMJIT_PROPAGATE(newJumpNode(&node, instId, options, o0, annotation));
537 
538   node->setExtraReg(extra);
539   if (comment)
540     node->setInlineComment(static_cast<char*>(_dataZone.dup(comment, strlen(comment), true)));
541 
542   addNode(node);
543   return kErrorOk;
544 }
545 
newJumpAnnotation()546 JumpAnnotation* BaseCompiler::newJumpAnnotation() {
547   if (_jumpAnnotations.grow(&_allocator, 1) != kErrorOk) {
548     reportError(DebugUtils::errored(kErrorOutOfMemory));
549     return nullptr;
550   }
551 
552   uint32_t id = _jumpAnnotations.size();
553   JumpAnnotation* jumpAnnotation = _allocator.newT<JumpAnnotation>(this, id);
554 
555   if (!jumpAnnotation) {
556     reportError(DebugUtils::errored(kErrorOutOfMemory));
557     return nullptr;
558   }
559 
560   _jumpAnnotations.appendUnsafe(jumpAnnotation);
561   return jumpAnnotation;
562 }
563 
564 // ============================================================================
565 // [asmjit::BaseCompiler - Events]
566 // ============================================================================
567 
onAttach(CodeHolder * code)568 Error BaseCompiler::onAttach(CodeHolder* code) noexcept {
569   ASMJIT_PROPAGATE(Base::onAttach(code));
570 
571   const ArchTraits& archTraits = ArchTraits::byArch(code->arch());
572   uint32_t nativeRegType = Environment::is32Bit(code->arch()) ? BaseReg::kTypeGp32 : BaseReg::kTypeGp64;
573   _gpRegInfo.setSignature(archTraits.regTypeToSignature(nativeRegType));
574 
575   Error err = addPassT<GlobalConstPoolPass>();
576   if (ASMJIT_UNLIKELY(err)) {
577     onDetach(code);
578     return err;
579   }
580 
581   return kErrorOk;
582 }
583 
onDetach(CodeHolder * code)584 Error BaseCompiler::onDetach(CodeHolder* code) noexcept {
585   _func = nullptr;
586   _localConstPool = nullptr;
587   _globalConstPool = nullptr;
588 
589   _vRegArray.reset();
590   _vRegZone.reset();
591 
592   return Base::onDetach(code);
593 }
594 
595 // ============================================================================
596 // [asmjit::FuncPass - Construction / Destruction]
597 // ============================================================================
598 
FuncPass(const char * name)599 FuncPass::FuncPass(const char* name) noexcept
600   : Pass(name) {}
601 
602 // ============================================================================
603 // [asmjit::FuncPass - Run]
604 // ============================================================================
605 
run(Zone * zone,Logger * logger)606 Error FuncPass::run(Zone* zone, Logger* logger) {
607   BaseNode* node = cb()->firstNode();
608   if (!node) return kErrorOk;
609 
610   do {
611     if (node->type() == BaseNode::kNodeFunc) {
612       FuncNode* func = node->as<FuncNode>();
613       node = func->endNode();
614       ASMJIT_PROPAGATE(runOnFunction(zone, logger, func));
615     }
616 
617     // Find a function by skipping all nodes that are not `kNodeFunc`.
618     do {
619       node = node->next();
620     } while (node && node->type() != BaseNode::kNodeFunc);
621   } while (node);
622 
623   return kErrorOk;
624 }
625 
626 ASMJIT_END_NAMESPACE
627 
628 #endif // !ASMJIT_NO_COMPILER
629