1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  *
4  * Copyright 2015 Mozilla Foundation
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 #include "wasm/WasmIonCompile.h"
20 
21 #include "mozilla/MathAlgorithms.h"
22 
23 #include <algorithm>
24 
25 #include "jit/CodeGenerator.h"
26 #include "jit/CompileInfo.h"
27 #include "jit/Ion.h"
28 #include "jit/IonOptimizationLevels.h"
29 #include "js/ScalarType.h"  // js::Scalar::Type
30 #include "wasm/WasmBaselineCompile.h"
31 #include "wasm/WasmBuiltins.h"
32 #include "wasm/WasmGC.h"
33 #include "wasm/WasmGenerator.h"
34 #include "wasm/WasmOpIter.h"
35 #include "wasm/WasmSignalHandlers.h"
36 #include "wasm/WasmStubs.h"
37 #include "wasm/WasmValidate.h"
38 
39 using namespace js;
40 using namespace js::jit;
41 using namespace js::wasm;
42 
43 using mozilla::IsPowerOfTwo;
44 using mozilla::Maybe;
45 using mozilla::Nothing;
46 using mozilla::Some;
47 
48 namespace {
49 
50 using BlockVector = Vector<MBasicBlock*, 8, SystemAllocPolicy>;
51 using DefVector = Vector<MDefinition*, 8, SystemAllocPolicy>;
52 
53 struct IonCompilePolicy {
54   // We store SSA definitions in the value stack.
55   using Value = MDefinition*;
56   using ValueVector = DefVector;
57 
58   // We store loop headers and then/else blocks in the control flow stack.
59   using ControlItem = MBasicBlock*;
60 };
61 
62 using IonOpIter = OpIter<IonCompilePolicy>;
63 
64 class FunctionCompiler;
65 
66 // CallCompileState describes a call that is being compiled.
67 
68 class CallCompileState {
69   // A generator object that is passed each argument as it is compiled.
70   WasmABIArgGenerator abi_;
71 
72   // Accumulates the register arguments while compiling arguments.
73   MWasmCall::Args regArgs_;
74 
75   // Reserved argument for passing Instance* to builtin instance method calls.
76   ABIArg instanceArg_;
77 
78   // The stack area in which the callee will write stack return values, or
79   // nullptr if no stack results.
80   MWasmStackResultArea* stackResultArea_ = nullptr;
81 
82   // Only FunctionCompiler should be directly manipulating CallCompileState.
83   friend class FunctionCompiler;
84 };
85 
86 // Encapsulates the compilation of a single function in an asm.js module. The
87 // function compiler handles the creation and final backend compilation of the
88 // MIR graph.
89 class FunctionCompiler {
90   struct ControlFlowPatch {
91     MControlInstruction* ins;
92     uint32_t index;
ControlFlowPatch__anon23c40cad0111::FunctionCompiler::ControlFlowPatch93     ControlFlowPatch(MControlInstruction* ins, uint32_t index)
94         : ins(ins), index(index) {}
95   };
96 
97   using ControlFlowPatchVector = Vector<ControlFlowPatch, 0, SystemAllocPolicy>;
98   using ControlFlowPatchVectorVector =
99       Vector<ControlFlowPatchVector, 0, SystemAllocPolicy>;
100 
101   const ModuleEnvironment& moduleEnv_;
102   IonOpIter iter_;
103   const FuncCompileInput& func_;
104   const ValTypeVector& locals_;
105   size_t lastReadCallSite_;
106 
107   TempAllocator& alloc_;
108   MIRGraph& graph_;
109   const CompileInfo& info_;
110   MIRGenerator& mirGen_;
111 
112   MBasicBlock* curBlock_;
113   uint32_t maxStackArgBytes_;
114 
115   uint32_t loopDepth_;
116   uint32_t blockDepth_;
117   ControlFlowPatchVectorVector blockPatches_;
118 
119   // TLS pointer argument to the current function.
120   MWasmParameter* tlsPointer_;
121   MWasmParameter* stackResultPointer_;
122 
123  public:
FunctionCompiler(const ModuleEnvironment & moduleEnv,Decoder & decoder,const FuncCompileInput & func,const ValTypeVector & locals,MIRGenerator & mirGen)124   FunctionCompiler(const ModuleEnvironment& moduleEnv, Decoder& decoder,
125                    const FuncCompileInput& func, const ValTypeVector& locals,
126                    MIRGenerator& mirGen)
127       : moduleEnv_(moduleEnv),
128         iter_(moduleEnv, decoder),
129         func_(func),
130         locals_(locals),
131         lastReadCallSite_(0),
132         alloc_(mirGen.alloc()),
133         graph_(mirGen.graph()),
134         info_(mirGen.outerInfo()),
135         mirGen_(mirGen),
136         curBlock_(nullptr),
137         maxStackArgBytes_(0),
138         loopDepth_(0),
139         blockDepth_(0),
140         tlsPointer_(nullptr),
141         stackResultPointer_(nullptr) {}
142 
moduleEnv() const143   const ModuleEnvironment& moduleEnv() const { return moduleEnv_; }
144 
iter()145   IonOpIter& iter() { return iter_; }
alloc() const146   TempAllocator& alloc() const { return alloc_; }
147   // FIXME(1401675): Replace with BlockType.
funcIndex() const148   uint32_t funcIndex() const { return func_.index; }
funcType() const149   const FuncType& funcType() const {
150     return *moduleEnv_.funcs[func_.index].type;
151   }
152 
bytecodeOffset() const153   BytecodeOffset bytecodeOffset() const { return iter_.bytecodeOffset(); }
bytecodeIfNotAsmJS() const154   BytecodeOffset bytecodeIfNotAsmJS() const {
155     return moduleEnv_.isAsmJS() ? BytecodeOffset() : iter_.bytecodeOffset();
156   }
157 
init()158   bool init() {
159     // Prepare the entry block for MIR generation:
160 
161     const ArgTypeVector args(funcType());
162 
163     if (!mirGen_.ensureBallast()) {
164       return false;
165     }
166     if (!newBlock(/* prev */ nullptr, &curBlock_)) {
167       return false;
168     }
169 
170     for (WasmABIArgIter i(args); !i.done(); i++) {
171       MWasmParameter* ins = MWasmParameter::New(alloc(), *i, i.mirType());
172       curBlock_->add(ins);
173       if (args.isSyntheticStackResultPointerArg(i.index())) {
174         MOZ_ASSERT(stackResultPointer_ == nullptr);
175         stackResultPointer_ = ins;
176       } else {
177         curBlock_->initSlot(info().localSlot(args.naturalIndex(i.index())),
178                             ins);
179       }
180       if (!mirGen_.ensureBallast()) {
181         return false;
182       }
183     }
184 
185     // Set up a parameter that receives the hidden TLS pointer argument.
186     tlsPointer_ =
187         MWasmParameter::New(alloc(), ABIArg(WasmTlsReg), MIRType::Pointer);
188     curBlock_->add(tlsPointer_);
189     if (!mirGen_.ensureBallast()) {
190       return false;
191     }
192 
193     for (size_t i = args.lengthWithoutStackResults(); i < locals_.length();
194          i++) {
195       MInstruction* ins = nullptr;
196       switch (locals_[i].kind()) {
197         case ValType::I32:
198           ins = MConstant::New(alloc(), Int32Value(0), MIRType::Int32);
199           break;
200         case ValType::I64:
201           ins = MConstant::NewInt64(alloc(), 0);
202           break;
203         case ValType::V128:
204 #ifdef ENABLE_WASM_SIMD
205           ins =
206               MWasmFloatConstant::NewSimd128(alloc(), SimdConstant::SplatX4(0));
207           break;
208 #else
209           return iter().fail("Ion has no SIMD support yet");
210 #endif
211         case ValType::F32:
212           ins = MConstant::New(alloc(), Float32Value(0.f), MIRType::Float32);
213           break;
214         case ValType::F64:
215           ins = MConstant::New(alloc(), DoubleValue(0.0), MIRType::Double);
216           break;
217         case ValType::Rtt:
218         case ValType::Ref:
219           ins = MWasmNullConstant::New(alloc());
220           break;
221       }
222 
223       curBlock_->add(ins);
224       curBlock_->initSlot(info().localSlot(i), ins);
225       if (!mirGen_.ensureBallast()) {
226         return false;
227       }
228     }
229 
230     return true;
231   }
232 
finish()233   void finish() {
234     mirGen().initWasmMaxStackArgBytes(maxStackArgBytes_);
235 
236     MOZ_ASSERT(loopDepth_ == 0);
237     MOZ_ASSERT(blockDepth_ == 0);
238 #ifdef DEBUG
239     for (ControlFlowPatchVector& patches : blockPatches_) {
240       MOZ_ASSERT(patches.empty());
241     }
242 #endif
243     MOZ_ASSERT(inDeadCode());
244     MOZ_ASSERT(done(), "all bytes must be consumed");
245     MOZ_ASSERT(func_.callSiteLineNums.length() == lastReadCallSite_);
246   }
247 
248   /************************* Read-only interface (after local scope setup) */
249 
mirGen() const250   MIRGenerator& mirGen() const { return mirGen_; }
mirGraph() const251   MIRGraph& mirGraph() const { return graph_; }
info() const252   const CompileInfo& info() const { return info_; }
253 
getLocalDef(unsigned slot)254   MDefinition* getLocalDef(unsigned slot) {
255     if (inDeadCode()) {
256       return nullptr;
257     }
258     return curBlock_->getSlot(info().localSlot(slot));
259   }
260 
locals() const261   const ValTypeVector& locals() const { return locals_; }
262 
263   /***************************** Code generation (after local scope setup) */
264 
constant(const Value & v,MIRType type)265   MDefinition* constant(const Value& v, MIRType type) {
266     if (inDeadCode()) {
267       return nullptr;
268     }
269     MConstant* constant = MConstant::New(alloc(), v, type);
270     curBlock_->add(constant);
271     return constant;
272   }
273 
constant(float f)274   MDefinition* constant(float f) {
275     if (inDeadCode()) {
276       return nullptr;
277     }
278     auto* cst = MWasmFloatConstant::NewFloat32(alloc(), f);
279     curBlock_->add(cst);
280     return cst;
281   }
282 
constant(double d)283   MDefinition* constant(double d) {
284     if (inDeadCode()) {
285       return nullptr;
286     }
287     auto* cst = MWasmFloatConstant::NewDouble(alloc(), d);
288     curBlock_->add(cst);
289     return cst;
290   }
291 
constant(int64_t i)292   MDefinition* constant(int64_t i) {
293     if (inDeadCode()) {
294       return nullptr;
295     }
296     MConstant* constant = MConstant::NewInt64(alloc(), i);
297     curBlock_->add(constant);
298     return constant;
299   }
300 
301 #ifdef ENABLE_WASM_SIMD
constant(V128 v)302   MDefinition* constant(V128 v) {
303     if (inDeadCode()) {
304       return nullptr;
305     }
306     MWasmFloatConstant* constant = MWasmFloatConstant::NewSimd128(
307         alloc(), SimdConstant::CreateSimd128((int8_t*)v.bytes));
308     curBlock_->add(constant);
309     return constant;
310   }
311 #endif
312 
nullRefConstant()313   MDefinition* nullRefConstant() {
314     if (inDeadCode()) {
315       return nullptr;
316     }
317     // MConstant has a lot of baggage so we don't use that here.
318     MWasmNullConstant* constant = MWasmNullConstant::New(alloc());
319     curBlock_->add(constant);
320     return constant;
321   }
322 
fence()323   void fence() {
324     if (inDeadCode()) {
325       return;
326     }
327     MWasmFence* ins = MWasmFence::New(alloc());
328     curBlock_->add(ins);
329   }
330 
331   template <class T>
unary(MDefinition * op)332   MDefinition* unary(MDefinition* op) {
333     if (inDeadCode()) {
334       return nullptr;
335     }
336     T* ins = T::New(alloc(), op);
337     curBlock_->add(ins);
338     return ins;
339   }
340 
341   template <class T>
unary(MDefinition * op,MIRType type)342   MDefinition* unary(MDefinition* op, MIRType type) {
343     if (inDeadCode()) {
344       return nullptr;
345     }
346     T* ins = T::New(alloc(), op, type);
347     curBlock_->add(ins);
348     return ins;
349   }
350 
351   template <class T>
binary(MDefinition * lhs,MDefinition * rhs)352   MDefinition* binary(MDefinition* lhs, MDefinition* rhs) {
353     if (inDeadCode()) {
354       return nullptr;
355     }
356     T* ins = T::New(alloc(), lhs, rhs);
357     curBlock_->add(ins);
358     return ins;
359   }
360 
361   template <class T>
binary(MDefinition * lhs,MDefinition * rhs,MIRType type)362   MDefinition* binary(MDefinition* lhs, MDefinition* rhs, MIRType type) {
363     if (inDeadCode()) {
364       return nullptr;
365     }
366     T* ins = T::New(alloc(), lhs, rhs, type);
367     curBlock_->add(ins);
368     return ins;
369   }
370 
ursh(MDefinition * lhs,MDefinition * rhs,MIRType type)371   MDefinition* ursh(MDefinition* lhs, MDefinition* rhs, MIRType type) {
372     if (inDeadCode()) {
373       return nullptr;
374     }
375     auto* ins = MUrsh::NewWasm(alloc(), lhs, rhs, type);
376     curBlock_->add(ins);
377     return ins;
378   }
379 
add(MDefinition * lhs,MDefinition * rhs,MIRType type)380   MDefinition* add(MDefinition* lhs, MDefinition* rhs, MIRType type) {
381     if (inDeadCode()) {
382       return nullptr;
383     }
384     auto* ins = MAdd::NewWasm(alloc(), lhs, rhs, type);
385     curBlock_->add(ins);
386     return ins;
387   }
388 
mustPreserveNaN(MIRType type)389   bool mustPreserveNaN(MIRType type) {
390     return IsFloatingPointType(type) && !moduleEnv().isAsmJS();
391   }
392 
sub(MDefinition * lhs,MDefinition * rhs,MIRType type)393   MDefinition* sub(MDefinition* lhs, MDefinition* rhs, MIRType type) {
394     if (inDeadCode()) {
395       return nullptr;
396     }
397 
398     // wasm can't fold x - 0.0 because of NaN with custom payloads.
399     MSub* ins = MSub::NewWasm(alloc(), lhs, rhs, type, mustPreserveNaN(type));
400     curBlock_->add(ins);
401     return ins;
402   }
403 
nearbyInt(MDefinition * input,RoundingMode roundingMode)404   MDefinition* nearbyInt(MDefinition* input, RoundingMode roundingMode) {
405     if (inDeadCode()) {
406       return nullptr;
407     }
408 
409     auto* ins = MNearbyInt::New(alloc(), input, input->type(), roundingMode);
410     curBlock_->add(ins);
411     return ins;
412   }
413 
minMax(MDefinition * lhs,MDefinition * rhs,MIRType type,bool isMax)414   MDefinition* minMax(MDefinition* lhs, MDefinition* rhs, MIRType type,
415                       bool isMax) {
416     if (inDeadCode()) {
417       return nullptr;
418     }
419 
420     if (mustPreserveNaN(type)) {
421       // Convert signaling NaN to quiet NaNs.
422       MDefinition* zero = constant(DoubleValue(0.0), type);
423       lhs = sub(lhs, zero, type);
424       rhs = sub(rhs, zero, type);
425     }
426 
427     MMinMax* ins = MMinMax::NewWasm(alloc(), lhs, rhs, type, isMax);
428     curBlock_->add(ins);
429     return ins;
430   }
431 
mul(MDefinition * lhs,MDefinition * rhs,MIRType type,MMul::Mode mode)432   MDefinition* mul(MDefinition* lhs, MDefinition* rhs, MIRType type,
433                    MMul::Mode mode) {
434     if (inDeadCode()) {
435       return nullptr;
436     }
437 
438     // wasm can't fold x * 1.0 because of NaN with custom payloads.
439     auto* ins =
440         MMul::NewWasm(alloc(), lhs, rhs, type, mode, mustPreserveNaN(type));
441     curBlock_->add(ins);
442     return ins;
443   }
444 
div(MDefinition * lhs,MDefinition * rhs,MIRType type,bool unsignd)445   MDefinition* div(MDefinition* lhs, MDefinition* rhs, MIRType type,
446                    bool unsignd) {
447     if (inDeadCode()) {
448       return nullptr;
449     }
450     bool trapOnError = !moduleEnv().isAsmJS();
451     if (!unsignd && type == MIRType::Int32) {
452       // Enforce the signedness of the operation by coercing the operands
453       // to signed.  Otherwise, operands that "look" unsigned to Ion but
454       // are not unsigned to Baldr (eg, unsigned right shifts) may lead to
455       // the operation being executed unsigned.  Applies to mod() as well.
456       //
457       // Do this for Int32 only since Int64 is not subject to the same
458       // issues.
459       //
460       // Note the offsets passed to MWasmBuiltinTruncateToInt32 are wrong here,
461       // but it doesn't matter: they're not codegen'd to calls since inputs
462       // already are int32.
463       auto* lhs2 = createTruncateToInt32(lhs);
464       curBlock_->add(lhs2);
465       lhs = lhs2;
466       auto* rhs2 = createTruncateToInt32(rhs);
467       curBlock_->add(rhs2);
468       rhs = rhs2;
469     }
470 
471     // For x86 and arm we implement i64 div via c++ builtin.
472     // A call to c++ builtin requires tls pointer.
473 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
474     if (type == MIRType::Int64) {
475       auto* ins =
476           MWasmBuiltinDivI64::New(alloc(), lhs, rhs, tlsPointer_, unsignd,
477                                   trapOnError, bytecodeOffset());
478       curBlock_->add(ins);
479       return ins;
480     }
481 #endif
482 
483     auto* ins = MDiv::New(alloc(), lhs, rhs, type, unsignd, trapOnError,
484                           bytecodeOffset(), mustPreserveNaN(type));
485     curBlock_->add(ins);
486     return ins;
487   }
488 
createTruncateToInt32(MDefinition * op)489   MInstruction* createTruncateToInt32(MDefinition* op) {
490     if (op->type() == MIRType::Double || op->type() == MIRType::Float32) {
491       return MWasmBuiltinTruncateToInt32::New(alloc(), op, tlsPointer_);
492     }
493 
494     return MTruncateToInt32::New(alloc(), op);
495   }
496 
mod(MDefinition * lhs,MDefinition * rhs,MIRType type,bool unsignd)497   MDefinition* mod(MDefinition* lhs, MDefinition* rhs, MIRType type,
498                    bool unsignd) {
499     if (inDeadCode()) {
500       return nullptr;
501     }
502     bool trapOnError = !moduleEnv().isAsmJS();
503     if (!unsignd && type == MIRType::Int32) {
504       // See block comment in div().
505       auto* lhs2 = createTruncateToInt32(lhs);
506       curBlock_->add(lhs2);
507       lhs = lhs2;
508       auto* rhs2 = createTruncateToInt32(rhs);
509       curBlock_->add(rhs2);
510       rhs = rhs2;
511     }
512 
513     // For x86 and arm we implement i64 mod via c++ builtin.
514     // A call to c++ builtin requires tls pointer.
515 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
516     if (type == MIRType::Int64) {
517       auto* ins =
518           MWasmBuiltinModI64::New(alloc(), lhs, rhs, tlsPointer_, unsignd,
519                                   trapOnError, bytecodeOffset());
520       curBlock_->add(ins);
521       return ins;
522     }
523 #endif
524 
525     // Should be handled separately because we call BuiltinThunk for this case
526     // and so, need to add the dependency from tlsPointer.
527     if (type == MIRType::Double) {
528       auto* ins = MWasmBuiltinModD::New(alloc(), lhs, rhs, tlsPointer_, type,
529                                         bytecodeOffset());
530       curBlock_->add(ins);
531       return ins;
532     }
533 
534     auto* ins = MMod::New(alloc(), lhs, rhs, type, unsignd, trapOnError,
535                           bytecodeOffset());
536     curBlock_->add(ins);
537     return ins;
538   }
539 
bitnot(MDefinition * op)540   MDefinition* bitnot(MDefinition* op) {
541     if (inDeadCode()) {
542       return nullptr;
543     }
544     auto* ins = MBitNot::New(alloc(), op);
545     curBlock_->add(ins);
546     return ins;
547   }
548 
select(MDefinition * trueExpr,MDefinition * falseExpr,MDefinition * condExpr)549   MDefinition* select(MDefinition* trueExpr, MDefinition* falseExpr,
550                       MDefinition* condExpr) {
551     if (inDeadCode()) {
552       return nullptr;
553     }
554     auto* ins = MWasmSelect::New(alloc(), trueExpr, falseExpr, condExpr);
555     curBlock_->add(ins);
556     return ins;
557   }
558 
extendI32(MDefinition * op,bool isUnsigned)559   MDefinition* extendI32(MDefinition* op, bool isUnsigned) {
560     if (inDeadCode()) {
561       return nullptr;
562     }
563     auto* ins = MExtendInt32ToInt64::New(alloc(), op, isUnsigned);
564     curBlock_->add(ins);
565     return ins;
566   }
567 
signExtend(MDefinition * op,uint32_t srcSize,uint32_t targetSize)568   MDefinition* signExtend(MDefinition* op, uint32_t srcSize,
569                           uint32_t targetSize) {
570     if (inDeadCode()) {
571       return nullptr;
572     }
573     MInstruction* ins;
574     switch (targetSize) {
575       case 4: {
576         MSignExtendInt32::Mode mode;
577         switch (srcSize) {
578           case 1:
579             mode = MSignExtendInt32::Byte;
580             break;
581           case 2:
582             mode = MSignExtendInt32::Half;
583             break;
584           default:
585             MOZ_CRASH("Bad sign extension");
586         }
587         ins = MSignExtendInt32::New(alloc(), op, mode);
588         break;
589       }
590       case 8: {
591         MSignExtendInt64::Mode mode;
592         switch (srcSize) {
593           case 1:
594             mode = MSignExtendInt64::Byte;
595             break;
596           case 2:
597             mode = MSignExtendInt64::Half;
598             break;
599           case 4:
600             mode = MSignExtendInt64::Word;
601             break;
602           default:
603             MOZ_CRASH("Bad sign extension");
604         }
605         ins = MSignExtendInt64::New(alloc(), op, mode);
606         break;
607       }
608       default: {
609         MOZ_CRASH("Bad sign extension");
610       }
611     }
612     curBlock_->add(ins);
613     return ins;
614   }
615 
convertI64ToFloatingPoint(MDefinition * op,MIRType type,bool isUnsigned)616   MDefinition* convertI64ToFloatingPoint(MDefinition* op, MIRType type,
617                                          bool isUnsigned) {
618     if (inDeadCode()) {
619       return nullptr;
620     }
621 #if defined(JS_CODEGEN_ARM)
622     auto* ins = MBuiltinInt64ToFloatingPoint::New(
623         alloc(), op, tlsPointer_, type, bytecodeOffset(), isUnsigned);
624 #else
625     auto* ins = MInt64ToFloatingPoint::New(alloc(), op, type, bytecodeOffset(),
626                                            isUnsigned);
627 #endif
628     curBlock_->add(ins);
629     return ins;
630   }
631 
rotate(MDefinition * input,MDefinition * count,MIRType type,bool left)632   MDefinition* rotate(MDefinition* input, MDefinition* count, MIRType type,
633                       bool left) {
634     if (inDeadCode()) {
635       return nullptr;
636     }
637     auto* ins = MRotate::New(alloc(), input, count, type, left);
638     curBlock_->add(ins);
639     return ins;
640   }
641 
642   template <class T>
truncate(MDefinition * op,TruncFlags flags)643   MDefinition* truncate(MDefinition* op, TruncFlags flags) {
644     if (inDeadCode()) {
645       return nullptr;
646     }
647     auto* ins = T::New(alloc(), op, flags, bytecodeOffset());
648     curBlock_->add(ins);
649     return ins;
650   }
651 
652 #if defined(JS_CODEGEN_ARM)
truncateWithTls(MDefinition * op,TruncFlags flags)653   MDefinition* truncateWithTls(MDefinition* op, TruncFlags flags) {
654     if (inDeadCode()) {
655       return nullptr;
656     }
657     auto* ins = MWasmBuiltinTruncateToInt64::New(alloc(), op, tlsPointer_,
658                                                  flags, bytecodeOffset());
659     curBlock_->add(ins);
660     return ins;
661   }
662 #endif
663 
compare(MDefinition * lhs,MDefinition * rhs,JSOp op,MCompare::CompareType type)664   MDefinition* compare(MDefinition* lhs, MDefinition* rhs, JSOp op,
665                        MCompare::CompareType type) {
666     if (inDeadCode()) {
667       return nullptr;
668     }
669     auto* ins = MCompare::NewWasm(alloc(), lhs, rhs, op, type);
670     curBlock_->add(ins);
671     return ins;
672   }
673 
assign(unsigned slot,MDefinition * def)674   void assign(unsigned slot, MDefinition* def) {
675     if (inDeadCode()) {
676       return;
677     }
678     curBlock_->setSlot(info().localSlot(slot), def);
679   }
680 
681 #ifdef ENABLE_WASM_SIMD
682   // About Wasm SIMD as supported by Ion:
683   //
684   // The expectation is that Ion will only ever support SIMD on x86 and x64,
685   // since Cranelift will be the optimizing compiler for Arm64, ARMv7 will cease
686   // to be a tier-1 platform soon, and MIPS32 and MIPS64 will never implement
687   // SIMD.
688   //
689   // The division of the operations into MIR nodes reflects that expectation,
690   // and is a good fit for x86/x64.  Should the expectation change we'll
691   // possibly want to re-architect the SIMD support to be a little more general.
692   //
693   // Most SIMD operations map directly to a single MIR node that ultimately ends
694   // up being expanded in the macroassembler.
695   //
696   // Some SIMD operations that do have a complete macroassembler expansion are
697   // open-coded into multiple MIR nodes here; in some cases that's just
698   // convenience, in other cases it may also allow them to benefit from Ion
699   // optimizations.  The reason for the expansions will be documented by a
700   // comment.
701 
702   // (v128,v128) -> v128 effect-free binary operations
binarySimd128(MDefinition * lhs,MDefinition * rhs,bool commutative,SimdOp op)703   MDefinition* binarySimd128(MDefinition* lhs, MDefinition* rhs,
704                              bool commutative, SimdOp op) {
705     if (inDeadCode()) {
706       return nullptr;
707     }
708 
709     MOZ_ASSERT(lhs->type() == MIRType::Simd128 &&
710                rhs->type() == MIRType::Simd128);
711 
712     auto* ins = MWasmBinarySimd128::New(alloc(), lhs, rhs, commutative, op);
713     curBlock_->add(ins);
714     return ins;
715   }
716 
717   // (v128,i32) -> v128 effect-free shift operations
shiftSimd128(MDefinition * lhs,MDefinition * rhs,SimdOp op)718   MDefinition* shiftSimd128(MDefinition* lhs, MDefinition* rhs, SimdOp op) {
719     if (inDeadCode()) {
720       return nullptr;
721     }
722 
723     MOZ_ASSERT(lhs->type() == MIRType::Simd128 &&
724                rhs->type() == MIRType::Int32);
725 
726     auto* ins = MWasmShiftSimd128::New(alloc(), lhs, rhs, op);
727     curBlock_->add(ins);
728     return ins;
729   }
730 
731   // (v128,scalar,imm) -> v128
replaceLaneSimd128(MDefinition * lhs,MDefinition * rhs,uint32_t laneIndex,SimdOp op)732   MDefinition* replaceLaneSimd128(MDefinition* lhs, MDefinition* rhs,
733                                   uint32_t laneIndex, SimdOp op) {
734     if (inDeadCode()) {
735       return nullptr;
736     }
737 
738     MOZ_ASSERT(lhs->type() == MIRType::Simd128);
739 
740     auto* ins = MWasmReplaceLaneSimd128::New(alloc(), lhs, rhs, laneIndex, op);
741     curBlock_->add(ins);
742     return ins;
743   }
744 
745   // (scalar) -> v128 effect-free unary operations
scalarToSimd128(MDefinition * src,SimdOp op)746   MDefinition* scalarToSimd128(MDefinition* src, SimdOp op) {
747     if (inDeadCode()) {
748       return nullptr;
749     }
750 
751     auto* ins = MWasmScalarToSimd128::New(alloc(), src, op);
752     curBlock_->add(ins);
753     return ins;
754   }
755 
756   // (v128) -> v128 effect-free unary operations
unarySimd128(MDefinition * src,SimdOp op)757   MDefinition* unarySimd128(MDefinition* src, SimdOp op) {
758     if (inDeadCode()) {
759       return nullptr;
760     }
761 
762     MOZ_ASSERT(src->type() == MIRType::Simd128);
763     auto* ins = MWasmUnarySimd128::New(alloc(), src, op);
764     curBlock_->add(ins);
765     return ins;
766   }
767 
768   // (v128, imm) -> scalar effect-free unary operations
reduceSimd128(MDefinition * src,SimdOp op,ValType outType,uint32_t imm=0)769   MDefinition* reduceSimd128(MDefinition* src, SimdOp op, ValType outType,
770                              uint32_t imm = 0) {
771     if (inDeadCode()) {
772       return nullptr;
773     }
774 
775     MOZ_ASSERT(src->type() == MIRType::Simd128);
776     auto* ins =
777         MWasmReduceSimd128::New(alloc(), src, op, ToMIRType(outType), imm);
778     curBlock_->add(ins);
779     return ins;
780   }
781 
782   // (v128, v128, v128) -> v128 effect-free operations
bitselectSimd128(MDefinition * v1,MDefinition * v2,MDefinition * control)783   MDefinition* bitselectSimd128(MDefinition* v1, MDefinition* v2,
784                                 MDefinition* control) {
785     if (inDeadCode()) {
786       return nullptr;
787     }
788 
789     MOZ_ASSERT(v1->type() == MIRType::Simd128);
790     MOZ_ASSERT(v2->type() == MIRType::Simd128);
791     MOZ_ASSERT(control->type() == MIRType::Simd128);
792     auto* ins = MWasmBitselectSimd128::New(alloc(), v1, v2, control);
793     curBlock_->add(ins);
794     return ins;
795   }
796 
797   // (v128, v128, imm_v128) -> v128 effect-free operations
shuffleSimd128(MDefinition * v1,MDefinition * v2,V128 control)798   MDefinition* shuffleSimd128(MDefinition* v1, MDefinition* v2, V128 control) {
799     if (inDeadCode()) {
800       return nullptr;
801     }
802 
803     MOZ_ASSERT(v1->type() == MIRType::Simd128);
804     MOZ_ASSERT(v2->type() == MIRType::Simd128);
805     auto* ins = MWasmShuffleSimd128::New(
806         alloc(), v1, v2,
807         SimdConstant::CreateX16(reinterpret_cast<int8_t*>(control.bytes)));
808     curBlock_->add(ins);
809     return ins;
810   }
811 
loadSplatSimd128(Scalar::Type viewType,const LinearMemoryAddress<MDefinition * > & addr,wasm::SimdOp splatOp)812   MDefinition* loadSplatSimd128(Scalar::Type viewType,
813                                 const LinearMemoryAddress<MDefinition*>& addr,
814                                 wasm::SimdOp splatOp) {
815     if (inDeadCode()) {
816       return nullptr;
817     }
818 
819     MemoryAccessDesc access(viewType, addr.align, addr.offset,
820                             bytecodeIfNotAsmJS());
821 
822     // Generate better code (on x86)
823     if (viewType == Scalar::Float64) {
824       access.setSplatSimd128Load();
825       return load(addr.base, &access, ValType::V128);
826     }
827 
828     ValType resultType = ValType::I32;
829     if (viewType == Scalar::Float32) {
830       resultType = ValType::F32;
831       splatOp = wasm::SimdOp::F32x4Splat;
832     }
833     auto* scalar = load(addr.base, &access, resultType);
834     if (!inDeadCode() && !scalar) {
835       return nullptr;
836     }
837     return scalarToSimd128(scalar, splatOp);
838   }
839 
loadExtendSimd128(const LinearMemoryAddress<MDefinition * > & addr,wasm::SimdOp op)840   MDefinition* loadExtendSimd128(const LinearMemoryAddress<MDefinition*>& addr,
841                                  wasm::SimdOp op) {
842     if (inDeadCode()) {
843       return nullptr;
844     }
845 
846     // Generate better code (on x86) by loading as a double with an
847     // operation that sign extends directly.
848     MemoryAccessDesc access(Scalar::Float64, addr.align, addr.offset,
849                             bytecodeIfNotAsmJS());
850     access.setWidenSimd128Load(op);
851     return load(addr.base, &access, ValType::V128);
852   }
853 
loadZeroSimd128(Scalar::Type viewType,size_t numBytes,const LinearMemoryAddress<MDefinition * > & addr)854   MDefinition* loadZeroSimd128(Scalar::Type viewType, size_t numBytes,
855                                const LinearMemoryAddress<MDefinition*>& addr) {
856     if (inDeadCode()) {
857       return nullptr;
858     }
859 
860     MemoryAccessDesc access(viewType, addr.align, addr.offset,
861                             bytecodeIfNotAsmJS());
862     access.setZeroExtendSimd128Load();
863     return load(addr.base, &access, ValType::V128);
864   }
865 
loadLaneSimd128(uint32_t laneSize,const LinearMemoryAddress<MDefinition * > & addr,uint32_t laneIndex,MDefinition * src)866   MDefinition* loadLaneSimd128(uint32_t laneSize,
867                                const LinearMemoryAddress<MDefinition*>& addr,
868                                uint32_t laneIndex, MDefinition* src) {
869     if (inDeadCode()) {
870       return nullptr;
871     }
872 
873     MemoryAccessDesc access(Scalar::Simd128, addr.align, addr.offset,
874                             bytecodeIfNotAsmJS());
875     MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
876     MDefinition* base = addr.base;
877     MOZ_ASSERT(!moduleEnv_.isAsmJS());
878     checkOffsetAndAlignmentAndBounds(&access, &base);
879     MInstruction* load = MWasmLoadLaneSimd128::New(
880         alloc(), memoryBase, base, access, laneSize, laneIndex, src);
881     if (!load) {
882       return nullptr;
883     }
884     curBlock_->add(load);
885     return load;
886   }
887 
storeLaneSimd128(uint32_t laneSize,const LinearMemoryAddress<MDefinition * > & addr,uint32_t laneIndex,MDefinition * src)888   void storeLaneSimd128(uint32_t laneSize,
889                         const LinearMemoryAddress<MDefinition*>& addr,
890                         uint32_t laneIndex, MDefinition* src) {
891     if (inDeadCode()) {
892       return;
893     }
894     MemoryAccessDesc access(Scalar::Simd128, addr.align, addr.offset,
895                             bytecodeIfNotAsmJS());
896     MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
897     MDefinition* base = addr.base;
898     MOZ_ASSERT(!moduleEnv_.isAsmJS());
899     checkOffsetAndAlignmentAndBounds(&access, &base);
900     MInstruction* store = MWasmStoreLaneSimd128::New(
901         alloc(), memoryBase, base, access, laneSize, laneIndex, src);
902     if (!store) {
903       return;
904     }
905     curBlock_->add(store);
906   }
907 #endif  // ENABLE_WASM_SIMD
908 
909  private:
maybeLoadMemoryBase()910   MWasmLoadTls* maybeLoadMemoryBase() {
911     MWasmLoadTls* load = nullptr;
912 #ifdef JS_CODEGEN_X86
913     AliasSet aliases = !moduleEnv_.memory->canMovingGrow()
914                            ? AliasSet::None()
915                            : AliasSet::Load(AliasSet::WasmHeapMeta);
916     load = MWasmLoadTls::New(alloc(), tlsPointer_,
917                              offsetof(wasm::TlsData, memoryBase),
918                              MIRType::Pointer, aliases);
919     curBlock_->add(load);
920 #endif
921     return load;
922   }
923 
maybeLoadBoundsCheckLimit(MIRType type)924   MWasmLoadTls* maybeLoadBoundsCheckLimit(MIRType type) {
925 #ifdef JS_64BIT
926     MOZ_ASSERT(type == MIRType::Int32 || type == MIRType::Int64);
927 #else
928     MOZ_ASSERT(type == MIRType::Int32);
929 #endif
930     if (moduleEnv_.hugeMemoryEnabled()) {
931       return nullptr;
932     }
933     AliasSet aliases = !moduleEnv_.memory->canMovingGrow()
934                            ? AliasSet::None()
935                            : AliasSet::Load(AliasSet::WasmHeapMeta);
936     auto* load = MWasmLoadTls::New(alloc(), tlsPointer_,
937                                    offsetof(wasm::TlsData, boundsCheckLimit),
938                                    type, aliases);
939     curBlock_->add(load);
940     return load;
941   }
942 
943  public:
memoryBase()944   MWasmHeapBase* memoryBase() {
945     MWasmHeapBase* base = nullptr;
946     AliasSet aliases = !moduleEnv_.memory->canMovingGrow()
947                            ? AliasSet::None()
948                            : AliasSet::Load(AliasSet::WasmHeapMeta);
949     base = MWasmHeapBase::New(alloc(), tlsPointer_, aliases);
950     curBlock_->add(base);
951     return base;
952   }
953 
954  private:
955   // Only sets *mustAdd if it also returns true.
needAlignmentCheck(MemoryAccessDesc * access,MDefinition * base,bool * mustAdd)956   bool needAlignmentCheck(MemoryAccessDesc* access, MDefinition* base,
957                           bool* mustAdd) {
958     MOZ_ASSERT(!*mustAdd);
959 
960     // asm.js accesses are always aligned and need no checks.
961     if (moduleEnv_.isAsmJS() || !access->isAtomic()) {
962       return false;
963     }
964 
965     if (base->isConstant()) {
966       int32_t ptr = base->toConstant()->toInt32();
967       // OK to wrap around the address computation here.
968       if (((ptr + access->offset()) & (access->byteSize() - 1)) == 0) {
969         return false;
970       }
971     }
972 
973     *mustAdd = (access->offset() & (access->byteSize() - 1)) != 0;
974     return true;
975   }
976 
checkOffsetAndAlignmentAndBounds(MemoryAccessDesc * access,MDefinition ** base)977   void checkOffsetAndAlignmentAndBounds(MemoryAccessDesc* access,
978                                         MDefinition** base) {
979     MOZ_ASSERT(!inDeadCode());
980     MOZ_ASSERT(!moduleEnv_.isAsmJS());
981 
982     uint32_t offsetGuardLimit =
983         GetMaxOffsetGuardLimit(moduleEnv_.hugeMemoryEnabled());
984 
985     // Fold a constant base into the offset and make the base 0, provided the
986     // offset stays below the guard limit.  The reason for folding the base into
987     // the offset rather than vice versa is that a small offset can be ignored
988     // by both explicit bounds checking and bounds check elimination.
989     if ((*base)->isConstant()) {
990       uint32_t basePtr = (*base)->toConstant()->toInt32();
991       uint32_t offset = access->offset();
992 
993       if (offset < offsetGuardLimit && basePtr < offsetGuardLimit - offset) {
994         auto* ins = MConstant::New(alloc(), Int32Value(0), MIRType::Int32);
995         curBlock_->add(ins);
996         *base = ins;
997         access->setOffset(access->offset() + basePtr);
998       }
999     }
1000 
1001     bool mustAdd = false;
1002     bool alignmentCheck = needAlignmentCheck(access, *base, &mustAdd);
1003 
1004     // If the offset is bigger than the guard region, a separate instruction is
1005     // necessary to add the offset to the base and check for overflow.
1006     //
1007     // Also add the offset if we have a Wasm atomic access that needs alignment
1008     // checking and the offset affects alignment.
1009     if (access->offset() >= offsetGuardLimit || mustAdd ||
1010         !JitOptions.wasmFoldOffsets) {
1011       *base = computeEffectiveAddress(*base, access);
1012     }
1013 
1014     if (alignmentCheck) {
1015       curBlock_->add(MWasmAlignmentCheck::New(
1016           alloc(), *base, access->byteSize(), bytecodeOffset()));
1017     }
1018 
1019 #ifdef JS_64BIT
1020     // If the bounds check uses the full 64 bits of the bounds check limit, then
1021     // *base must be zero-extended to 64 bits before checking and wrapped back
1022     // to 32-bits after Spectre masking.  (And it's important that the value we
1023     // end up with has flowed through the Spectre mask.)
1024     //
1025     // If the memory's max size is known to be smaller than 64K pages exactly,
1026     // we can use a 32-bit check and avoid extension and wrapping.
1027     bool check64 = !moduleEnv_.memory->boundsCheckLimitIs32Bits() &&
1028                    ArrayBufferObject::maxBufferByteLength() >= 0x100000000;
1029 #else
1030     bool check64 = false;
1031 #endif
1032     MWasmLoadTls* boundsCheckLimit =
1033         maybeLoadBoundsCheckLimit(check64 ? MIRType::Int64 : MIRType::Int32);
1034     if (boundsCheckLimit) {
1035       // At the outset, actualBase could be the result of pretty much any i32
1036       // operation, or it could be the load of an i32 constant.  We may assume
1037       // the value has a canonical representation for the platform, see doc
1038       // block in MacroAssembler.h.
1039       MDefinition* actualBase = *base;
1040 
1041       // Extend the index value to perform a 64-bit bounds check if the memory
1042       // can be 4GB.
1043 
1044       if (check64) {
1045         auto* extended = MWasmExtendU32Index::New(alloc(), actualBase);
1046         curBlock_->add(extended);
1047         actualBase = extended;
1048       }
1049       auto* ins = MWasmBoundsCheck::New(alloc(), actualBase, boundsCheckLimit,
1050                                         bytecodeOffset());
1051       curBlock_->add(ins);
1052       actualBase = ins;
1053 
1054       // If we're masking, then we update *base to create a dependency chain
1055       // through the masked index.  But we will first need to wrap the index
1056       // value if it was extended above.
1057 
1058       if (JitOptions.spectreIndexMasking) {
1059         if (check64) {
1060           auto* wrapped = MWasmWrapU32Index::New(alloc(), actualBase);
1061           curBlock_->add(wrapped);
1062           actualBase = wrapped;
1063         }
1064         *base = actualBase;
1065       }
1066     }
1067   }
1068 
isSmallerAccessForI64(ValType result,const MemoryAccessDesc * access)1069   bool isSmallerAccessForI64(ValType result, const MemoryAccessDesc* access) {
1070     if (result == ValType::I64 && access->byteSize() <= 4) {
1071       // These smaller accesses should all be zero-extending.
1072       MOZ_ASSERT(!isSignedIntType(access->type()));
1073       return true;
1074     }
1075     return false;
1076   }
1077 
1078  public:
computeEffectiveAddress(MDefinition * base,MemoryAccessDesc * access)1079   MDefinition* computeEffectiveAddress(MDefinition* base,
1080                                        MemoryAccessDesc* access) {
1081     if (inDeadCode()) {
1082       return nullptr;
1083     }
1084     if (!access->offset()) {
1085       return base;
1086     }
1087     auto* ins =
1088         MWasmAddOffset::New(alloc(), base, access->offset(), bytecodeOffset());
1089     curBlock_->add(ins);
1090     access->clearOffset();
1091     return ins;
1092   }
1093 
load(MDefinition * base,MemoryAccessDesc * access,ValType result)1094   MDefinition* load(MDefinition* base, MemoryAccessDesc* access,
1095                     ValType result) {
1096     if (inDeadCode()) {
1097       return nullptr;
1098     }
1099 
1100     MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
1101     MInstruction* load = nullptr;
1102     if (moduleEnv_.isAsmJS()) {
1103       MOZ_ASSERT(access->offset() == 0);
1104       MWasmLoadTls* boundsCheckLimit =
1105           maybeLoadBoundsCheckLimit(MIRType::Int32);
1106       load = MAsmJSLoadHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
1107                                  access->type());
1108     } else {
1109       checkOffsetAndAlignmentAndBounds(access, &base);
1110       load =
1111           MWasmLoad::New(alloc(), memoryBase, base, *access, ToMIRType(result));
1112     }
1113     if (!load) {
1114       return nullptr;
1115     }
1116     curBlock_->add(load);
1117     return load;
1118   }
1119 
store(MDefinition * base,MemoryAccessDesc * access,MDefinition * v)1120   void store(MDefinition* base, MemoryAccessDesc* access, MDefinition* v) {
1121     if (inDeadCode()) {
1122       return;
1123     }
1124 
1125     MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
1126     MInstruction* store = nullptr;
1127     if (moduleEnv_.isAsmJS()) {
1128       MOZ_ASSERT(access->offset() == 0);
1129       MWasmLoadTls* boundsCheckLimit =
1130           maybeLoadBoundsCheckLimit(MIRType::Int32);
1131       store = MAsmJSStoreHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
1132                                    access->type(), v);
1133     } else {
1134       checkOffsetAndAlignmentAndBounds(access, &base);
1135       store = MWasmStore::New(alloc(), memoryBase, base, *access, v);
1136     }
1137     if (!store) {
1138       return;
1139     }
1140     curBlock_->add(store);
1141   }
1142 
atomicCompareExchangeHeap(MDefinition * base,MemoryAccessDesc * access,ValType result,MDefinition * oldv,MDefinition * newv)1143   MDefinition* atomicCompareExchangeHeap(MDefinition* base,
1144                                          MemoryAccessDesc* access,
1145                                          ValType result, MDefinition* oldv,
1146                                          MDefinition* newv) {
1147     if (inDeadCode()) {
1148       return nullptr;
1149     }
1150 
1151     checkOffsetAndAlignmentAndBounds(access, &base);
1152 
1153     if (isSmallerAccessForI64(result, access)) {
1154       auto* cvtOldv =
1155           MWrapInt64ToInt32::New(alloc(), oldv, /*bottomHalf=*/true);
1156       curBlock_->add(cvtOldv);
1157       oldv = cvtOldv;
1158 
1159       auto* cvtNewv =
1160           MWrapInt64ToInt32::New(alloc(), newv, /*bottomHalf=*/true);
1161       curBlock_->add(cvtNewv);
1162       newv = cvtNewv;
1163     }
1164 
1165     MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
1166     MInstruction* cas =
1167         MWasmCompareExchangeHeap::New(alloc(), bytecodeOffset(), memoryBase,
1168                                       base, *access, oldv, newv, tlsPointer_);
1169     if (!cas) {
1170       return nullptr;
1171     }
1172     curBlock_->add(cas);
1173 
1174     if (isSmallerAccessForI64(result, access)) {
1175       cas = MExtendInt32ToInt64::New(alloc(), cas, true);
1176       curBlock_->add(cas);
1177     }
1178 
1179     return cas;
1180   }
1181 
atomicExchangeHeap(MDefinition * base,MemoryAccessDesc * access,ValType result,MDefinition * value)1182   MDefinition* atomicExchangeHeap(MDefinition* base, MemoryAccessDesc* access,
1183                                   ValType result, MDefinition* value) {
1184     if (inDeadCode()) {
1185       return nullptr;
1186     }
1187 
1188     checkOffsetAndAlignmentAndBounds(access, &base);
1189 
1190     if (isSmallerAccessForI64(result, access)) {
1191       auto* cvtValue =
1192           MWrapInt64ToInt32::New(alloc(), value, /*bottomHalf=*/true);
1193       curBlock_->add(cvtValue);
1194       value = cvtValue;
1195     }
1196 
1197     MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
1198     MInstruction* xchg =
1199         MWasmAtomicExchangeHeap::New(alloc(), bytecodeOffset(), memoryBase,
1200                                      base, *access, value, tlsPointer_);
1201     if (!xchg) {
1202       return nullptr;
1203     }
1204     curBlock_->add(xchg);
1205 
1206     if (isSmallerAccessForI64(result, access)) {
1207       xchg = MExtendInt32ToInt64::New(alloc(), xchg, true);
1208       curBlock_->add(xchg);
1209     }
1210 
1211     return xchg;
1212   }
1213 
atomicBinopHeap(AtomicOp op,MDefinition * base,MemoryAccessDesc * access,ValType result,MDefinition * value)1214   MDefinition* atomicBinopHeap(AtomicOp op, MDefinition* base,
1215                                MemoryAccessDesc* access, ValType result,
1216                                MDefinition* value) {
1217     if (inDeadCode()) {
1218       return nullptr;
1219     }
1220 
1221     checkOffsetAndAlignmentAndBounds(access, &base);
1222 
1223     if (isSmallerAccessForI64(result, access)) {
1224       auto* cvtValue =
1225           MWrapInt64ToInt32::New(alloc(), value, /*bottomHalf=*/true);
1226       curBlock_->add(cvtValue);
1227       value = cvtValue;
1228     }
1229 
1230     MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
1231     MInstruction* binop =
1232         MWasmAtomicBinopHeap::New(alloc(), bytecodeOffset(), op, memoryBase,
1233                                   base, *access, value, tlsPointer_);
1234     if (!binop) {
1235       return nullptr;
1236     }
1237     curBlock_->add(binop);
1238 
1239     if (isSmallerAccessForI64(result, access)) {
1240       binop = MExtendInt32ToInt64::New(alloc(), binop, true);
1241       curBlock_->add(binop);
1242     }
1243 
1244     return binop;
1245   }
1246 
loadGlobalVar(unsigned globalDataOffset,bool isConst,bool isIndirect,MIRType type)1247   MDefinition* loadGlobalVar(unsigned globalDataOffset, bool isConst,
1248                              bool isIndirect, MIRType type) {
1249     if (inDeadCode()) {
1250       return nullptr;
1251     }
1252 
1253     MInstruction* load;
1254     if (isIndirect) {
1255       // Pull a pointer to the value out of TlsData::globalArea, then
1256       // load from that pointer.  Note that the pointer is immutable
1257       // even though the value it points at may change, hence the use of
1258       // |true| for the first node's |isConst| value, irrespective of
1259       // the |isConst| formal parameter to this method.  The latter
1260       // applies to the denoted value as a whole.
1261       auto* cellPtr =
1262           MWasmLoadGlobalVar::New(alloc(), MIRType::Pointer, globalDataOffset,
1263                                   /*isConst=*/true, tlsPointer_);
1264       curBlock_->add(cellPtr);
1265       load = MWasmLoadGlobalCell::New(alloc(), type, cellPtr);
1266     } else {
1267       // Pull the value directly out of TlsData::globalArea.
1268       load = MWasmLoadGlobalVar::New(alloc(), type, globalDataOffset, isConst,
1269                                      tlsPointer_);
1270     }
1271     curBlock_->add(load);
1272     return load;
1273   }
1274 
storeGlobalVar(uint32_t globalDataOffset,bool isIndirect,MDefinition * v)1275   MInstruction* storeGlobalVar(uint32_t globalDataOffset, bool isIndirect,
1276                                MDefinition* v) {
1277     if (inDeadCode()) {
1278       return nullptr;
1279     }
1280 
1281     MInstruction* store;
1282     MInstruction* valueAddr = nullptr;
1283     if (isIndirect) {
1284       // Pull a pointer to the value out of TlsData::globalArea, then
1285       // store through that pointer.
1286       auto* cellPtr =
1287           MWasmLoadGlobalVar::New(alloc(), MIRType::Pointer, globalDataOffset,
1288                                   /*isConst=*/true, tlsPointer_);
1289       curBlock_->add(cellPtr);
1290       if (v->type() == MIRType::RefOrNull) {
1291         valueAddr = cellPtr;
1292         store = MWasmStoreRef::New(alloc(), tlsPointer_, valueAddr, v,
1293                                    AliasSet::WasmGlobalCell);
1294       } else {
1295         store = MWasmStoreGlobalCell::New(alloc(), v, cellPtr);
1296       }
1297     } else {
1298       // Store the value directly in TlsData::globalArea.
1299       if (v->type() == MIRType::RefOrNull) {
1300         valueAddr = MWasmDerivedPointer::New(
1301             alloc(), tlsPointer_,
1302             offsetof(wasm::TlsData, globalArea) + globalDataOffset);
1303         curBlock_->add(valueAddr);
1304         store = MWasmStoreRef::New(alloc(), tlsPointer_, valueAddr, v,
1305                                    AliasSet::WasmGlobalVar);
1306       } else {
1307         store =
1308             MWasmStoreGlobalVar::New(alloc(), globalDataOffset, v, tlsPointer_);
1309       }
1310     }
1311     curBlock_->add(store);
1312 
1313     return valueAddr;
1314   }
1315 
addInterruptCheck()1316   void addInterruptCheck() {
1317     if (inDeadCode()) {
1318       return;
1319     }
1320     curBlock_->add(
1321         MWasmInterruptCheck::New(alloc(), tlsPointer_, bytecodeOffset()));
1322   }
1323 
1324   /***************************************************************** Calls */
1325 
1326   // The IonMonkey backend maintains a single stack offset (from the stack
1327   // pointer to the base of the frame) by adding the total amount of spill
1328   // space required plus the maximum stack required for argument passing.
1329   // Since we do not use IonMonkey's MPrepareCall/MPassArg/MCall, we must
1330   // manually accumulate, for the entire function, the maximum required stack
1331   // space for argument passing. (This is passed to the CodeGenerator via
1332   // MIRGenerator::maxWasmStackArgBytes.) This is just be the maximum of the
1333   // stack space required for each individual call (as determined by the call
1334   // ABI).
1335 
1336   // Operations that modify a CallCompileState.
1337 
passInstance(MIRType instanceType,CallCompileState * args)1338   bool passInstance(MIRType instanceType, CallCompileState* args) {
1339     if (inDeadCode()) {
1340       return true;
1341     }
1342 
1343     // Should only pass an instance once.  And it must be a non-GC pointer.
1344     MOZ_ASSERT(args->instanceArg_ == ABIArg());
1345     MOZ_ASSERT(instanceType == MIRType::Pointer);
1346     args->instanceArg_ = args->abi_.next(MIRType::Pointer);
1347     return true;
1348   }
1349 
1350   // Do not call this directly.  Call one of the passArg() variants instead.
passArgWorker(MDefinition * argDef,MIRType type,CallCompileState * call)1351   bool passArgWorker(MDefinition* argDef, MIRType type,
1352                      CallCompileState* call) {
1353     ABIArg arg = call->abi_.next(type);
1354     switch (arg.kind()) {
1355 #ifdef JS_CODEGEN_REGISTER_PAIR
1356       case ABIArg::GPR_PAIR: {
1357         auto mirLow =
1358             MWrapInt64ToInt32::New(alloc(), argDef, /* bottomHalf = */ true);
1359         curBlock_->add(mirLow);
1360         auto mirHigh =
1361             MWrapInt64ToInt32::New(alloc(), argDef, /* bottomHalf = */ false);
1362         curBlock_->add(mirHigh);
1363         return call->regArgs_.append(
1364                    MWasmCall::Arg(AnyRegister(arg.gpr64().low), mirLow)) &&
1365                call->regArgs_.append(
1366                    MWasmCall::Arg(AnyRegister(arg.gpr64().high), mirHigh));
1367       }
1368 #endif
1369       case ABIArg::GPR:
1370       case ABIArg::FPU:
1371         return call->regArgs_.append(MWasmCall::Arg(arg.reg(), argDef));
1372       case ABIArg::Stack: {
1373         auto* mir =
1374             MWasmStackArg::New(alloc(), arg.offsetFromArgBase(), argDef);
1375         curBlock_->add(mir);
1376         return true;
1377       }
1378       case ABIArg::Uninitialized:
1379         MOZ_ASSERT_UNREACHABLE("Uninitialized ABIArg kind");
1380     }
1381     MOZ_CRASH("Unknown ABIArg kind.");
1382   }
1383 
passArg(MDefinition * argDef,MIRType type,CallCompileState * call)1384   bool passArg(MDefinition* argDef, MIRType type, CallCompileState* call) {
1385     if (inDeadCode()) {
1386       return true;
1387     }
1388     return passArgWorker(argDef, type, call);
1389   }
1390 
passArg(MDefinition * argDef,ValType type,CallCompileState * call)1391   bool passArg(MDefinition* argDef, ValType type, CallCompileState* call) {
1392     if (inDeadCode()) {
1393       return true;
1394     }
1395     return passArgWorker(argDef, ToMIRType(type), call);
1396   }
1397 
1398   // If the call returns results on the stack, prepare a stack area to receive
1399   // them, and pass the address of the stack area to the callee as an additional
1400   // argument.
passStackResultAreaCallArg(const ResultType & resultType,CallCompileState * call)1401   bool passStackResultAreaCallArg(const ResultType& resultType,
1402                                   CallCompileState* call) {
1403     if (inDeadCode()) {
1404       return true;
1405     }
1406     ABIResultIter iter(resultType);
1407     while (!iter.done() && iter.cur().inRegister()) {
1408       iter.next();
1409     }
1410     if (iter.done()) {
1411       // No stack results.
1412       return true;
1413     }
1414 
1415     auto* stackResultArea = MWasmStackResultArea::New(alloc());
1416     if (!stackResultArea) {
1417       return false;
1418     }
1419     if (!stackResultArea->init(alloc(), iter.remaining())) {
1420       return false;
1421     }
1422     for (uint32_t base = iter.index(); !iter.done(); iter.next()) {
1423       MWasmStackResultArea::StackResult loc(iter.cur().stackOffset(),
1424                                             ToMIRType(iter.cur().type()));
1425       stackResultArea->initResult(iter.index() - base, loc);
1426     }
1427     curBlock_->add(stackResultArea);
1428     if (!passArg(stackResultArea, MIRType::Pointer, call)) {
1429       return false;
1430     }
1431     call->stackResultArea_ = stackResultArea;
1432     return true;
1433   }
1434 
finishCall(CallCompileState * call)1435   bool finishCall(CallCompileState* call) {
1436     if (inDeadCode()) {
1437       return true;
1438     }
1439 
1440     if (!call->regArgs_.append(
1441             MWasmCall::Arg(AnyRegister(WasmTlsReg), tlsPointer_))) {
1442       return false;
1443     }
1444 
1445     uint32_t stackBytes = call->abi_.stackBytesConsumedSoFar();
1446 
1447     maxStackArgBytes_ = std::max(maxStackArgBytes_, stackBytes);
1448     return true;
1449   }
1450 
1451   // Wrappers for creating various kinds of calls.
1452 
collectUnaryCallResult(MIRType type,MDefinition ** result)1453   bool collectUnaryCallResult(MIRType type, MDefinition** result) {
1454     MInstruction* def;
1455     switch (type) {
1456       case MIRType::Int32:
1457         def = MWasmRegisterResult::New(alloc(), MIRType::Int32, ReturnReg);
1458         break;
1459       case MIRType::Int64:
1460         def = MWasmRegister64Result::New(alloc(), ReturnReg64);
1461         break;
1462       case MIRType::Float32:
1463         def = MWasmFloatRegisterResult::New(alloc(), type, ReturnFloat32Reg);
1464         break;
1465       case MIRType::Double:
1466         def = MWasmFloatRegisterResult::New(alloc(), type, ReturnDoubleReg);
1467         break;
1468 #ifdef ENABLE_WASM_SIMD
1469       case MIRType::Simd128:
1470         def = MWasmFloatRegisterResult::New(alloc(), type, ReturnSimd128Reg);
1471         break;
1472 #endif
1473       case MIRType::RefOrNull:
1474         def = MWasmRegisterResult::New(alloc(), MIRType::RefOrNull, ReturnReg);
1475         break;
1476       default:
1477         MOZ_CRASH("unexpected MIRType result for builtin call");
1478     }
1479 
1480     if (!def) {
1481       return false;
1482     }
1483 
1484     curBlock_->add(def);
1485     *result = def;
1486 
1487     return true;
1488   }
1489 
collectCallResults(const ResultType & type,MWasmStackResultArea * stackResultArea,DefVector * results)1490   bool collectCallResults(const ResultType& type,
1491                           MWasmStackResultArea* stackResultArea,
1492                           DefVector* results) {
1493     if (!results->reserve(type.length())) {
1494       return false;
1495     }
1496 
1497     // The result iterator goes in the order in which results would be popped
1498     // off; we want the order in which they would be pushed.
1499     ABIResultIter iter(type);
1500     uint32_t stackResultCount = 0;
1501     while (!iter.done()) {
1502       if (iter.cur().onStack()) {
1503         stackResultCount++;
1504       }
1505       iter.next();
1506     }
1507 
1508     for (iter.switchToPrev(); !iter.done(); iter.prev()) {
1509       if (!mirGen().ensureBallast()) {
1510         return false;
1511       }
1512       const ABIResult& result = iter.cur();
1513       MInstruction* def;
1514       if (result.inRegister()) {
1515         switch (result.type().kind()) {
1516           case wasm::ValType::I32:
1517             def =
1518                 MWasmRegisterResult::New(alloc(), MIRType::Int32, result.gpr());
1519             break;
1520           case wasm::ValType::I64:
1521             def = MWasmRegister64Result::New(alloc(), result.gpr64());
1522             break;
1523           case wasm::ValType::F32:
1524             def = MWasmFloatRegisterResult::New(alloc(), MIRType::Float32,
1525                                                 result.fpr());
1526             break;
1527           case wasm::ValType::F64:
1528             def = MWasmFloatRegisterResult::New(alloc(), MIRType::Double,
1529                                                 result.fpr());
1530             break;
1531           case wasm::ValType::Rtt:
1532           case wasm::ValType::Ref:
1533             def = MWasmRegisterResult::New(alloc(), MIRType::RefOrNull,
1534                                            result.gpr());
1535             break;
1536           case wasm::ValType::V128:
1537 #ifdef ENABLE_WASM_SIMD
1538             def = MWasmFloatRegisterResult::New(alloc(), MIRType::Simd128,
1539                                                 result.fpr());
1540 #else
1541             return this->iter().fail("Ion has no SIMD support yet");
1542 #endif
1543         }
1544       } else {
1545         MOZ_ASSERT(stackResultArea);
1546         MOZ_ASSERT(stackResultCount);
1547         uint32_t idx = --stackResultCount;
1548         def = MWasmStackResult::New(alloc(), stackResultArea, idx);
1549       }
1550 
1551       if (!def) {
1552         return false;
1553       }
1554       curBlock_->add(def);
1555       results->infallibleAppend(def);
1556     }
1557 
1558     MOZ_ASSERT(results->length() == type.length());
1559 
1560     return true;
1561   }
1562 
callDirect(const FuncType & funcType,uint32_t funcIndex,uint32_t lineOrBytecode,const CallCompileState & call,DefVector * results)1563   bool callDirect(const FuncType& funcType, uint32_t funcIndex,
1564                   uint32_t lineOrBytecode, const CallCompileState& call,
1565                   DefVector* results) {
1566     if (inDeadCode()) {
1567       return true;
1568     }
1569 
1570     CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Func);
1571     ResultType resultType = ResultType::Vector(funcType.results());
1572     auto callee = CalleeDesc::function(funcIndex);
1573     ArgTypeVector args(funcType);
1574     auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_,
1575                                StackArgAreaSizeUnaligned(args));
1576     if (!ins) {
1577       return false;
1578     }
1579 
1580     curBlock_->add(ins);
1581 
1582     return collectCallResults(resultType, call.stackResultArea_, results);
1583   }
1584 
callIndirect(uint32_t funcTypeIndex,uint32_t tableIndex,MDefinition * index,uint32_t lineOrBytecode,const CallCompileState & call,DefVector * results)1585   bool callIndirect(uint32_t funcTypeIndex, uint32_t tableIndex,
1586                     MDefinition* index, uint32_t lineOrBytecode,
1587                     const CallCompileState& call, DefVector* results) {
1588     if (inDeadCode()) {
1589       return true;
1590     }
1591 
1592     const FuncType& funcType = moduleEnv_.types[funcTypeIndex].funcType();
1593     const TypeIdDesc& funcTypeId = moduleEnv_.typeIds[funcTypeIndex];
1594 
1595     CalleeDesc callee;
1596     if (moduleEnv_.isAsmJS()) {
1597       MOZ_ASSERT(tableIndex == 0);
1598       MOZ_ASSERT(funcTypeId.kind() == TypeIdDescKind::None);
1599       const TableDesc& table =
1600           moduleEnv_.tables[moduleEnv_.asmJSSigToTableIndex[funcTypeIndex]];
1601       MOZ_ASSERT(IsPowerOfTwo(table.initialLength));
1602 
1603       MConstant* mask =
1604           MConstant::New(alloc(), Int32Value(table.initialLength - 1));
1605       curBlock_->add(mask);
1606       MBitAnd* maskedIndex = MBitAnd::New(alloc(), index, mask, MIRType::Int32);
1607       curBlock_->add(maskedIndex);
1608 
1609       index = maskedIndex;
1610       callee = CalleeDesc::asmJSTable(table);
1611     } else {
1612       MOZ_ASSERT(funcTypeId.kind() != TypeIdDescKind::None);
1613       const TableDesc& table = moduleEnv_.tables[tableIndex];
1614       callee = CalleeDesc::wasmTable(table, funcTypeId);
1615     }
1616 
1617     CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Dynamic);
1618     ArgTypeVector args(funcType);
1619     ResultType resultType = ResultType::Vector(funcType.results());
1620     auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_,
1621                                StackArgAreaSizeUnaligned(args), index);
1622     if (!ins) {
1623       return false;
1624     }
1625 
1626     curBlock_->add(ins);
1627 
1628     return collectCallResults(resultType, call.stackResultArea_, results);
1629   }
1630 
callImport(unsigned globalDataOffset,uint32_t lineOrBytecode,const CallCompileState & call,const FuncType & funcType,DefVector * results)1631   bool callImport(unsigned globalDataOffset, uint32_t lineOrBytecode,
1632                   const CallCompileState& call, const FuncType& funcType,
1633                   DefVector* results) {
1634     if (inDeadCode()) {
1635       return true;
1636     }
1637 
1638     CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Dynamic);
1639     auto callee = CalleeDesc::import(globalDataOffset);
1640     ArgTypeVector args(funcType);
1641     ResultType resultType = ResultType::Vector(funcType.results());
1642     auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_,
1643                                StackArgAreaSizeUnaligned(args));
1644     if (!ins) {
1645       return false;
1646     }
1647 
1648     curBlock_->add(ins);
1649 
1650     return collectCallResults(resultType, call.stackResultArea_, results);
1651   }
1652 
builtinCall(const SymbolicAddressSignature & builtin,uint32_t lineOrBytecode,const CallCompileState & call,MDefinition ** def)1653   bool builtinCall(const SymbolicAddressSignature& builtin,
1654                    uint32_t lineOrBytecode, const CallCompileState& call,
1655                    MDefinition** def) {
1656     if (inDeadCode()) {
1657       *def = nullptr;
1658       return true;
1659     }
1660 
1661     MOZ_ASSERT(builtin.failureMode == FailureMode::Infallible);
1662 
1663     CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Symbolic);
1664     auto callee = CalleeDesc::builtin(builtin.identity);
1665     auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_,
1666                                StackArgAreaSizeUnaligned(builtin));
1667     if (!ins) {
1668       return false;
1669     }
1670 
1671     curBlock_->add(ins);
1672 
1673     return collectUnaryCallResult(builtin.retType, def);
1674   }
1675 
builtinInstanceMethodCall(const SymbolicAddressSignature & builtin,uint32_t lineOrBytecode,const CallCompileState & call,MDefinition ** def=nullptr)1676   bool builtinInstanceMethodCall(const SymbolicAddressSignature& builtin,
1677                                  uint32_t lineOrBytecode,
1678                                  const CallCompileState& call,
1679                                  MDefinition** def = nullptr) {
1680     MOZ_ASSERT_IF(!def, builtin.retType == MIRType::None);
1681     if (inDeadCode()) {
1682       if (def) {
1683         *def = nullptr;
1684       }
1685       return true;
1686     }
1687 
1688     CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Symbolic);
1689     auto* ins = MWasmCall::NewBuiltinInstanceMethodCall(
1690         alloc(), desc, builtin.identity, builtin.failureMode, call.instanceArg_,
1691         call.regArgs_, StackArgAreaSizeUnaligned(builtin));
1692     if (!ins) {
1693       return false;
1694     }
1695 
1696     curBlock_->add(ins);
1697 
1698     return def ? collectUnaryCallResult(builtin.retType, def) : true;
1699   }
1700 
1701   /*********************************************** Control flow generation */
1702 
inDeadCode() const1703   inline bool inDeadCode() const { return curBlock_ == nullptr; }
1704 
returnValues(const DefVector & values)1705   bool returnValues(const DefVector& values) {
1706     if (inDeadCode()) {
1707       return true;
1708     }
1709 
1710     if (values.empty()) {
1711       curBlock_->end(MWasmReturnVoid::New(alloc(), tlsPointer_));
1712     } else {
1713       ResultType resultType = ResultType::Vector(funcType().results());
1714       ABIResultIter iter(resultType);
1715       // Switch to iterate in FIFO order instead of the default LIFO.
1716       while (!iter.done()) {
1717         iter.next();
1718       }
1719       iter.switchToPrev();
1720       for (uint32_t i = 0; !iter.done(); iter.prev(), i++) {
1721         if (!mirGen().ensureBallast()) {
1722           return false;
1723         }
1724         const ABIResult& result = iter.cur();
1725         if (result.onStack()) {
1726           MOZ_ASSERT(iter.remaining() > 1);
1727           if (result.type().isReference()) {
1728             auto* loc = MWasmDerivedPointer::New(alloc(), stackResultPointer_,
1729                                                  result.stackOffset());
1730             curBlock_->add(loc);
1731             auto* store =
1732                 MWasmStoreRef::New(alloc(), tlsPointer_, loc, values[i],
1733                                    AliasSet::WasmStackResult);
1734             curBlock_->add(store);
1735           } else {
1736             auto* store = MWasmStoreStackResult::New(
1737                 alloc(), stackResultPointer_, result.stackOffset(), values[i]);
1738             curBlock_->add(store);
1739           }
1740         } else {
1741           MOZ_ASSERT(iter.remaining() == 1);
1742           MOZ_ASSERT(i + 1 == values.length());
1743           curBlock_->end(MWasmReturn::New(alloc(), values[i], tlsPointer_));
1744         }
1745       }
1746     }
1747     curBlock_ = nullptr;
1748     return true;
1749   }
1750 
unreachableTrap()1751   void unreachableTrap() {
1752     if (inDeadCode()) {
1753       return;
1754     }
1755 
1756     auto* ins =
1757         MWasmTrap::New(alloc(), wasm::Trap::Unreachable, bytecodeOffset());
1758     curBlock_->end(ins);
1759     curBlock_ = nullptr;
1760   }
1761 
1762  private:
numPushed(MBasicBlock * block)1763   static uint32_t numPushed(MBasicBlock* block) {
1764     return block->stackDepth() - block->info().firstStackSlot();
1765   }
1766 
1767  public:
pushDefs(const DefVector & defs)1768   [[nodiscard]] bool pushDefs(const DefVector& defs) {
1769     if (inDeadCode()) {
1770       return true;
1771     }
1772     MOZ_ASSERT(numPushed(curBlock_) == 0);
1773     if (!curBlock_->ensureHasSlots(defs.length())) {
1774       return false;
1775     }
1776     for (MDefinition* def : defs) {
1777       MOZ_ASSERT(def->type() != MIRType::None);
1778       curBlock_->push(def);
1779     }
1780     return true;
1781   }
1782 
popPushedDefs(DefVector * defs)1783   bool popPushedDefs(DefVector* defs) {
1784     size_t n = numPushed(curBlock_);
1785     if (!defs->resizeUninitialized(n)) {
1786       return false;
1787     }
1788     for (; n > 0; n--) {
1789       MDefinition* def = curBlock_->pop();
1790       MOZ_ASSERT(def->type() != MIRType::Value);
1791       (*defs)[n - 1] = def;
1792     }
1793     return true;
1794   }
1795 
1796  private:
addJoinPredecessor(const DefVector & defs,MBasicBlock ** joinPred)1797   bool addJoinPredecessor(const DefVector& defs, MBasicBlock** joinPred) {
1798     *joinPred = curBlock_;
1799     if (inDeadCode()) {
1800       return true;
1801     }
1802     return pushDefs(defs);
1803   }
1804 
1805  public:
branchAndStartThen(MDefinition * cond,MBasicBlock ** elseBlock)1806   bool branchAndStartThen(MDefinition* cond, MBasicBlock** elseBlock) {
1807     if (inDeadCode()) {
1808       *elseBlock = nullptr;
1809     } else {
1810       MBasicBlock* thenBlock;
1811       if (!newBlock(curBlock_, &thenBlock)) {
1812         return false;
1813       }
1814       if (!newBlock(curBlock_, elseBlock)) {
1815         return false;
1816       }
1817 
1818       curBlock_->end(MTest::New(alloc(), cond, thenBlock, *elseBlock));
1819 
1820       curBlock_ = thenBlock;
1821       mirGraph().moveBlockToEnd(curBlock_);
1822     }
1823 
1824     return startBlock();
1825   }
1826 
switchToElse(MBasicBlock * elseBlock,MBasicBlock ** thenJoinPred)1827   bool switchToElse(MBasicBlock* elseBlock, MBasicBlock** thenJoinPred) {
1828     DefVector values;
1829     if (!finishBlock(&values)) {
1830       return false;
1831     }
1832 
1833     if (!elseBlock) {
1834       *thenJoinPred = nullptr;
1835     } else {
1836       if (!addJoinPredecessor(values, thenJoinPred)) {
1837         return false;
1838       }
1839 
1840       curBlock_ = elseBlock;
1841       mirGraph().moveBlockToEnd(curBlock_);
1842     }
1843 
1844     return startBlock();
1845   }
1846 
joinIfElse(MBasicBlock * thenJoinPred,DefVector * defs)1847   bool joinIfElse(MBasicBlock* thenJoinPred, DefVector* defs) {
1848     DefVector values;
1849     if (!finishBlock(&values)) {
1850       return false;
1851     }
1852 
1853     if (!thenJoinPred && inDeadCode()) {
1854       return true;
1855     }
1856 
1857     MBasicBlock* elseJoinPred;
1858     if (!addJoinPredecessor(values, &elseJoinPred)) {
1859       return false;
1860     }
1861 
1862     mozilla::Array<MBasicBlock*, 2> blocks;
1863     size_t numJoinPreds = 0;
1864     if (thenJoinPred) {
1865       blocks[numJoinPreds++] = thenJoinPred;
1866     }
1867     if (elseJoinPred) {
1868       blocks[numJoinPreds++] = elseJoinPred;
1869     }
1870 
1871     if (numJoinPreds == 0) {
1872       return true;
1873     }
1874 
1875     MBasicBlock* join;
1876     if (!goToNewBlock(blocks[0], &join)) {
1877       return false;
1878     }
1879     for (size_t i = 1; i < numJoinPreds; ++i) {
1880       if (!goToExistingBlock(blocks[i], join)) {
1881         return false;
1882       }
1883     }
1884 
1885     curBlock_ = join;
1886     return popPushedDefs(defs);
1887   }
1888 
startBlock()1889   bool startBlock() {
1890     MOZ_ASSERT_IF(blockDepth_ < blockPatches_.length(),
1891                   blockPatches_[blockDepth_].empty());
1892     blockDepth_++;
1893     return true;
1894   }
1895 
finishBlock(DefVector * defs)1896   bool finishBlock(DefVector* defs) {
1897     MOZ_ASSERT(blockDepth_);
1898     uint32_t topLabel = --blockDepth_;
1899     return bindBranches(topLabel, defs);
1900   }
1901 
startLoop(MBasicBlock ** loopHeader,size_t paramCount)1902   bool startLoop(MBasicBlock** loopHeader, size_t paramCount) {
1903     *loopHeader = nullptr;
1904 
1905     blockDepth_++;
1906     loopDepth_++;
1907 
1908     if (inDeadCode()) {
1909       return true;
1910     }
1911 
1912     // Create the loop header.
1913     MOZ_ASSERT(curBlock_->loopDepth() == loopDepth_ - 1);
1914     *loopHeader = MBasicBlock::New(mirGraph(), info(), curBlock_,
1915                                    MBasicBlock::PENDING_LOOP_HEADER);
1916     if (!*loopHeader) {
1917       return false;
1918     }
1919 
1920     (*loopHeader)->setLoopDepth(loopDepth_);
1921     mirGraph().addBlock(*loopHeader);
1922     curBlock_->end(MGoto::New(alloc(), *loopHeader));
1923 
1924     DefVector loopParams;
1925     if (!iter().getResults(paramCount, &loopParams)) {
1926       return false;
1927     }
1928     for (size_t i = 0; i < paramCount; i++) {
1929       MPhi* phi = MPhi::New(alloc(), loopParams[i]->type());
1930       if (!phi) {
1931         return false;
1932       }
1933       if (!phi->reserveLength(2)) {
1934         return false;
1935       }
1936       (*loopHeader)->addPhi(phi);
1937       phi->addInput(loopParams[i]);
1938       loopParams[i] = phi;
1939     }
1940     iter().setResults(paramCount, loopParams);
1941 
1942     MBasicBlock* body;
1943     if (!goToNewBlock(*loopHeader, &body)) {
1944       return false;
1945     }
1946     curBlock_ = body;
1947     return true;
1948   }
1949 
1950  private:
fixupRedundantPhis(MBasicBlock * b)1951   void fixupRedundantPhis(MBasicBlock* b) {
1952     for (size_t i = 0, depth = b->stackDepth(); i < depth; i++) {
1953       MDefinition* def = b->getSlot(i);
1954       if (def->isUnused()) {
1955         b->setSlot(i, def->toPhi()->getOperand(0));
1956       }
1957     }
1958   }
1959 
setLoopBackedge(MBasicBlock * loopEntry,MBasicBlock * loopBody,MBasicBlock * backedge,size_t paramCount)1960   bool setLoopBackedge(MBasicBlock* loopEntry, MBasicBlock* loopBody,
1961                        MBasicBlock* backedge, size_t paramCount) {
1962     if (!loopEntry->setBackedgeWasm(backedge, paramCount)) {
1963       return false;
1964     }
1965 
1966     // Flag all redundant phis as unused.
1967     for (MPhiIterator phi = loopEntry->phisBegin(); phi != loopEntry->phisEnd();
1968          phi++) {
1969       MOZ_ASSERT(phi->numOperands() == 2);
1970       if (phi->getOperand(0) == phi->getOperand(1)) {
1971         phi->setUnused();
1972       }
1973     }
1974 
1975     // Fix up phis stored in the slots Vector of pending blocks.
1976     for (ControlFlowPatchVector& patches : blockPatches_) {
1977       for (ControlFlowPatch& p : patches) {
1978         MBasicBlock* block = p.ins->block();
1979         if (block->loopDepth() >= loopEntry->loopDepth()) {
1980           fixupRedundantPhis(block);
1981         }
1982       }
1983     }
1984 
1985     // The loop body, if any, might be referencing recycled phis too.
1986     if (loopBody) {
1987       fixupRedundantPhis(loopBody);
1988     }
1989 
1990     // Discard redundant phis and add to the free list.
1991     for (MPhiIterator phi = loopEntry->phisBegin();
1992          phi != loopEntry->phisEnd();) {
1993       MPhi* entryDef = *phi++;
1994       if (!entryDef->isUnused()) {
1995         continue;
1996       }
1997 
1998       entryDef->justReplaceAllUsesWith(entryDef->getOperand(0));
1999       loopEntry->discardPhi(entryDef);
2000       mirGraph().addPhiToFreeList(entryDef);
2001     }
2002 
2003     return true;
2004   }
2005 
2006  public:
closeLoop(MBasicBlock * loopHeader,DefVector * loopResults)2007   bool closeLoop(MBasicBlock* loopHeader, DefVector* loopResults) {
2008     MOZ_ASSERT(blockDepth_ >= 1);
2009     MOZ_ASSERT(loopDepth_);
2010 
2011     uint32_t headerLabel = blockDepth_ - 1;
2012 
2013     if (!loopHeader) {
2014       MOZ_ASSERT(inDeadCode());
2015       MOZ_ASSERT(headerLabel >= blockPatches_.length() ||
2016                  blockPatches_[headerLabel].empty());
2017       blockDepth_--;
2018       loopDepth_--;
2019       return true;
2020     }
2021 
2022     // Op::Loop doesn't have an implicit backedge so temporarily set
2023     // aside the end of the loop body to bind backedges.
2024     MBasicBlock* loopBody = curBlock_;
2025     curBlock_ = nullptr;
2026 
2027     // As explained in bug 1253544, Ion apparently has an invariant that
2028     // there is only one backedge to loop headers. To handle wasm's ability
2029     // to have multiple backedges to the same loop header, we bind all those
2030     // branches as forward jumps to a single backward jump. This is
2031     // unfortunate but the optimizer is able to fold these into single jumps
2032     // to backedges.
2033     DefVector backedgeValues;
2034     if (!bindBranches(headerLabel, &backedgeValues)) {
2035       return false;
2036     }
2037 
2038     MOZ_ASSERT(loopHeader->loopDepth() == loopDepth_);
2039 
2040     if (curBlock_) {
2041       // We're on the loop backedge block, created by bindBranches.
2042       for (size_t i = 0, n = numPushed(curBlock_); i != n; i++) {
2043         curBlock_->pop();
2044       }
2045 
2046       if (!pushDefs(backedgeValues)) {
2047         return false;
2048       }
2049 
2050       MOZ_ASSERT(curBlock_->loopDepth() == loopDepth_);
2051       curBlock_->end(MGoto::New(alloc(), loopHeader));
2052       if (!setLoopBackedge(loopHeader, loopBody, curBlock_,
2053                            backedgeValues.length())) {
2054         return false;
2055       }
2056     }
2057 
2058     curBlock_ = loopBody;
2059 
2060     loopDepth_--;
2061 
2062     // If the loop depth still at the inner loop body, correct it.
2063     if (curBlock_ && curBlock_->loopDepth() != loopDepth_) {
2064       MBasicBlock* out;
2065       if (!goToNewBlock(curBlock_, &out)) {
2066         return false;
2067       }
2068       curBlock_ = out;
2069     }
2070 
2071     blockDepth_ -= 1;
2072     return inDeadCode() || popPushedDefs(loopResults);
2073   }
2074 
addControlFlowPatch(MControlInstruction * ins,uint32_t relative,uint32_t index)2075   bool addControlFlowPatch(MControlInstruction* ins, uint32_t relative,
2076                            uint32_t index) {
2077     MOZ_ASSERT(relative < blockDepth_);
2078     uint32_t absolute = blockDepth_ - 1 - relative;
2079 
2080     if (absolute >= blockPatches_.length() &&
2081         !blockPatches_.resize(absolute + 1)) {
2082       return false;
2083     }
2084 
2085     return blockPatches_[absolute].append(ControlFlowPatch(ins, index));
2086   }
2087 
br(uint32_t relativeDepth,const DefVector & values)2088   bool br(uint32_t relativeDepth, const DefVector& values) {
2089     if (inDeadCode()) {
2090       return true;
2091     }
2092 
2093     MGoto* jump = MGoto::New(alloc());
2094     if (!addControlFlowPatch(jump, relativeDepth, MGoto::TargetIndex)) {
2095       return false;
2096     }
2097 
2098     if (!pushDefs(values)) {
2099       return false;
2100     }
2101 
2102     curBlock_->end(jump);
2103     curBlock_ = nullptr;
2104     return true;
2105   }
2106 
brIf(uint32_t relativeDepth,const DefVector & values,MDefinition * condition)2107   bool brIf(uint32_t relativeDepth, const DefVector& values,
2108             MDefinition* condition) {
2109     if (inDeadCode()) {
2110       return true;
2111     }
2112 
2113     MBasicBlock* joinBlock = nullptr;
2114     if (!newBlock(curBlock_, &joinBlock)) {
2115       return false;
2116     }
2117 
2118     MTest* test = MTest::New(alloc(), condition, joinBlock);
2119     if (!addControlFlowPatch(test, relativeDepth, MTest::TrueBranchIndex)) {
2120       return false;
2121     }
2122 
2123     if (!pushDefs(values)) {
2124       return false;
2125     }
2126 
2127     curBlock_->end(test);
2128     curBlock_ = joinBlock;
2129     return true;
2130   }
2131 
brTable(MDefinition * operand,uint32_t defaultDepth,const Uint32Vector & depths,const DefVector & values)2132   bool brTable(MDefinition* operand, uint32_t defaultDepth,
2133                const Uint32Vector& depths, const DefVector& values) {
2134     if (inDeadCode()) {
2135       return true;
2136     }
2137 
2138     size_t numCases = depths.length();
2139     MOZ_ASSERT(numCases <= INT32_MAX);
2140     MOZ_ASSERT(numCases);
2141 
2142     MTableSwitch* table =
2143         MTableSwitch::New(alloc(), operand, 0, int32_t(numCases - 1));
2144 
2145     size_t defaultIndex;
2146     if (!table->addDefault(nullptr, &defaultIndex)) {
2147       return false;
2148     }
2149     if (!addControlFlowPatch(table, defaultDepth, defaultIndex)) {
2150       return false;
2151     }
2152 
2153     using IndexToCaseMap =
2154         HashMap<uint32_t, uint32_t, DefaultHasher<uint32_t>, SystemAllocPolicy>;
2155 
2156     IndexToCaseMap indexToCase;
2157     if (!indexToCase.put(defaultDepth, defaultIndex)) {
2158       return false;
2159     }
2160 
2161     for (size_t i = 0; i < numCases; i++) {
2162       uint32_t depth = depths[i];
2163 
2164       size_t caseIndex;
2165       IndexToCaseMap::AddPtr p = indexToCase.lookupForAdd(depth);
2166       if (!p) {
2167         if (!table->addSuccessor(nullptr, &caseIndex)) {
2168           return false;
2169         }
2170         if (!addControlFlowPatch(table, depth, caseIndex)) {
2171           return false;
2172         }
2173         if (!indexToCase.add(p, depth, caseIndex)) {
2174           return false;
2175         }
2176       } else {
2177         caseIndex = p->value();
2178       }
2179 
2180       if (!table->addCase(caseIndex)) {
2181         return false;
2182       }
2183     }
2184 
2185     if (!pushDefs(values)) {
2186       return false;
2187     }
2188 
2189     curBlock_->end(table);
2190     curBlock_ = nullptr;
2191 
2192     return true;
2193   }
2194 
2195   /************************************************************ DECODING ***/
2196 
readCallSiteLineOrBytecode()2197   uint32_t readCallSiteLineOrBytecode() {
2198     if (!func_.callSiteLineNums.empty()) {
2199       return func_.callSiteLineNums[lastReadCallSite_++];
2200     }
2201     return iter_.lastOpcodeOffset();
2202   }
2203 
2204 #if DEBUG
done() const2205   bool done() const { return iter_.done(); }
2206 #endif
2207 
2208   /*************************************************************************/
2209  private:
newBlock(MBasicBlock * pred,MBasicBlock ** block)2210   bool newBlock(MBasicBlock* pred, MBasicBlock** block) {
2211     *block = MBasicBlock::New(mirGraph(), info(), pred, MBasicBlock::NORMAL);
2212     if (!*block) {
2213       return false;
2214     }
2215     mirGraph().addBlock(*block);
2216     (*block)->setLoopDepth(loopDepth_);
2217     return true;
2218   }
2219 
goToNewBlock(MBasicBlock * pred,MBasicBlock ** block)2220   bool goToNewBlock(MBasicBlock* pred, MBasicBlock** block) {
2221     if (!newBlock(pred, block)) {
2222       return false;
2223     }
2224     pred->end(MGoto::New(alloc(), *block));
2225     return true;
2226   }
2227 
goToExistingBlock(MBasicBlock * prev,MBasicBlock * next)2228   bool goToExistingBlock(MBasicBlock* prev, MBasicBlock* next) {
2229     MOZ_ASSERT(prev);
2230     MOZ_ASSERT(next);
2231     prev->end(MGoto::New(alloc(), next));
2232     return next->addPredecessor(alloc(), prev);
2233   }
2234 
bindBranches(uint32_t absolute,DefVector * defs)2235   bool bindBranches(uint32_t absolute, DefVector* defs) {
2236     if (absolute >= blockPatches_.length() || blockPatches_[absolute].empty()) {
2237       return inDeadCode() || popPushedDefs(defs);
2238     }
2239 
2240     ControlFlowPatchVector& patches = blockPatches_[absolute];
2241     MControlInstruction* ins = patches[0].ins;
2242     MBasicBlock* pred = ins->block();
2243 
2244     MBasicBlock* join = nullptr;
2245     if (!newBlock(pred, &join)) {
2246       return false;
2247     }
2248 
2249     pred->mark();
2250     ins->replaceSuccessor(patches[0].index, join);
2251 
2252     for (size_t i = 1; i < patches.length(); i++) {
2253       ins = patches[i].ins;
2254 
2255       pred = ins->block();
2256       if (!pred->isMarked()) {
2257         if (!join->addPredecessor(alloc(), pred)) {
2258           return false;
2259         }
2260         pred->mark();
2261       }
2262 
2263       ins->replaceSuccessor(patches[i].index, join);
2264     }
2265 
2266     MOZ_ASSERT_IF(curBlock_, !curBlock_->isMarked());
2267     for (uint32_t i = 0; i < join->numPredecessors(); i++) {
2268       join->getPredecessor(i)->unmark();
2269     }
2270 
2271     if (curBlock_ && !goToExistingBlock(curBlock_, join)) {
2272       return false;
2273     }
2274 
2275     curBlock_ = join;
2276 
2277     if (!popPushedDefs(defs)) {
2278       return false;
2279     }
2280 
2281     patches.clear();
2282     return true;
2283   }
2284 };
2285 
2286 template <>
unary(MDefinition * op)2287 MDefinition* FunctionCompiler::unary<MToFloat32>(MDefinition* op) {
2288   if (inDeadCode()) {
2289     return nullptr;
2290   }
2291   auto* ins = MToFloat32::New(alloc(), op, mustPreserveNaN(op->type()));
2292   curBlock_->add(ins);
2293   return ins;
2294 }
2295 
2296 template <>
unary(MDefinition * op)2297 MDefinition* FunctionCompiler::unary<MWasmBuiltinTruncateToInt32>(
2298     MDefinition* op) {
2299   if (inDeadCode()) {
2300     return nullptr;
2301   }
2302   auto* ins = MWasmBuiltinTruncateToInt32::New(alloc(), op, tlsPointer_,
2303                                                bytecodeOffset());
2304   curBlock_->add(ins);
2305   return ins;
2306 }
2307 
2308 template <>
unary(MDefinition * op)2309 MDefinition* FunctionCompiler::unary<MNot>(MDefinition* op) {
2310   if (inDeadCode()) {
2311     return nullptr;
2312   }
2313   auto* ins = MNot::NewInt32(alloc(), op);
2314   curBlock_->add(ins);
2315   return ins;
2316 }
2317 
2318 template <>
unary(MDefinition * op,MIRType type)2319 MDefinition* FunctionCompiler::unary<MAbs>(MDefinition* op, MIRType type) {
2320   if (inDeadCode()) {
2321     return nullptr;
2322   }
2323   auto* ins = MAbs::NewWasm(alloc(), op, type);
2324   curBlock_->add(ins);
2325   return ins;
2326 }
2327 
2328 }  // end anonymous namespace
2329 
EmitI32Const(FunctionCompiler & f)2330 static bool EmitI32Const(FunctionCompiler& f) {
2331   int32_t i32;
2332   if (!f.iter().readI32Const(&i32)) {
2333     return false;
2334   }
2335 
2336   f.iter().setResult(f.constant(Int32Value(i32), MIRType::Int32));
2337   return true;
2338 }
2339 
EmitI64Const(FunctionCompiler & f)2340 static bool EmitI64Const(FunctionCompiler& f) {
2341   int64_t i64;
2342   if (!f.iter().readI64Const(&i64)) {
2343     return false;
2344   }
2345 
2346   f.iter().setResult(f.constant(i64));
2347   return true;
2348 }
2349 
EmitF32Const(FunctionCompiler & f)2350 static bool EmitF32Const(FunctionCompiler& f) {
2351   float f32;
2352   if (!f.iter().readF32Const(&f32)) {
2353     return false;
2354   }
2355 
2356   f.iter().setResult(f.constant(f32));
2357   return true;
2358 }
2359 
EmitF64Const(FunctionCompiler & f)2360 static bool EmitF64Const(FunctionCompiler& f) {
2361   double f64;
2362   if (!f.iter().readF64Const(&f64)) {
2363     return false;
2364   }
2365 
2366   f.iter().setResult(f.constant(f64));
2367   return true;
2368 }
2369 
EmitBlock(FunctionCompiler & f)2370 static bool EmitBlock(FunctionCompiler& f) {
2371   ResultType params;
2372   return f.iter().readBlock(&params) && f.startBlock();
2373 }
2374 
EmitLoop(FunctionCompiler & f)2375 static bool EmitLoop(FunctionCompiler& f) {
2376   ResultType params;
2377   if (!f.iter().readLoop(&params)) {
2378     return false;
2379   }
2380 
2381   MBasicBlock* loopHeader;
2382   if (!f.startLoop(&loopHeader, params.length())) {
2383     return false;
2384   }
2385 
2386   f.addInterruptCheck();
2387 
2388   f.iter().controlItem() = loopHeader;
2389   return true;
2390 }
2391 
EmitIf(FunctionCompiler & f)2392 static bool EmitIf(FunctionCompiler& f) {
2393   ResultType params;
2394   MDefinition* condition = nullptr;
2395   if (!f.iter().readIf(&params, &condition)) {
2396     return false;
2397   }
2398 
2399   MBasicBlock* elseBlock;
2400   if (!f.branchAndStartThen(condition, &elseBlock)) {
2401     return false;
2402   }
2403 
2404   f.iter().controlItem() = elseBlock;
2405   return true;
2406 }
2407 
EmitElse(FunctionCompiler & f)2408 static bool EmitElse(FunctionCompiler& f) {
2409   ResultType paramType;
2410   ResultType resultType;
2411   DefVector thenValues;
2412   if (!f.iter().readElse(&paramType, &resultType, &thenValues)) {
2413     return false;
2414   }
2415 
2416   if (!f.pushDefs(thenValues)) {
2417     return false;
2418   }
2419 
2420   if (!f.switchToElse(f.iter().controlItem(), &f.iter().controlItem())) {
2421     return false;
2422   }
2423 
2424   return true;
2425 }
2426 
EmitEnd(FunctionCompiler & f)2427 static bool EmitEnd(FunctionCompiler& f) {
2428   LabelKind kind;
2429   ResultType type;
2430   DefVector preJoinDefs;
2431   DefVector resultsForEmptyElse;
2432   if (!f.iter().readEnd(&kind, &type, &preJoinDefs, &resultsForEmptyElse)) {
2433     return false;
2434   }
2435 
2436   MBasicBlock* block = f.iter().controlItem();
2437   f.iter().popEnd();
2438 
2439   if (!f.pushDefs(preJoinDefs)) {
2440     return false;
2441   }
2442 
2443   DefVector postJoinDefs;
2444   switch (kind) {
2445     case LabelKind::Body:
2446       MOZ_ASSERT(f.iter().controlStackEmpty());
2447       if (!f.finishBlock(&postJoinDefs)) {
2448         return false;
2449       }
2450       if (!f.returnValues(postJoinDefs)) {
2451         return false;
2452       }
2453       return f.iter().endFunction(f.iter().end());
2454     case LabelKind::Block:
2455       if (!f.finishBlock(&postJoinDefs)) {
2456         return false;
2457       }
2458       break;
2459     case LabelKind::Loop:
2460       if (!f.closeLoop(block, &postJoinDefs)) {
2461         return false;
2462       }
2463       break;
2464     case LabelKind::Then: {
2465       // If we didn't see an Else, create a trivial else block so that we create
2466       // a diamond anyway, to preserve Ion invariants.
2467       if (!f.switchToElse(block, &block)) {
2468         return false;
2469       }
2470 
2471       if (!f.pushDefs(resultsForEmptyElse)) {
2472         return false;
2473       }
2474 
2475       if (!f.joinIfElse(block, &postJoinDefs)) {
2476         return false;
2477       }
2478       break;
2479     }
2480     case LabelKind::Else:
2481       if (!f.joinIfElse(block, &postJoinDefs)) {
2482         return false;
2483       }
2484       break;
2485 #ifdef ENABLE_WASM_EXCEPTIONS
2486     case LabelKind::Try:
2487       MOZ_CRASH("NYI");
2488       break;
2489     case LabelKind::Catch:
2490       MOZ_CRASH("NYI");
2491       break;
2492     case LabelKind::CatchAll:
2493       MOZ_CRASH("NYI");
2494       break;
2495 #endif
2496   }
2497 
2498   MOZ_ASSERT_IF(!f.inDeadCode(), postJoinDefs.length() == type.length());
2499   f.iter().setResults(postJoinDefs.length(), postJoinDefs);
2500 
2501   return true;
2502 }
2503 
EmitBr(FunctionCompiler & f)2504 static bool EmitBr(FunctionCompiler& f) {
2505   uint32_t relativeDepth;
2506   ResultType type;
2507   DefVector values;
2508   if (!f.iter().readBr(&relativeDepth, &type, &values)) {
2509     return false;
2510   }
2511 
2512   return f.br(relativeDepth, values);
2513 }
2514 
EmitBrIf(FunctionCompiler & f)2515 static bool EmitBrIf(FunctionCompiler& f) {
2516   uint32_t relativeDepth;
2517   ResultType type;
2518   DefVector values;
2519   MDefinition* condition;
2520   if (!f.iter().readBrIf(&relativeDepth, &type, &values, &condition)) {
2521     return false;
2522   }
2523 
2524   return f.brIf(relativeDepth, values, condition);
2525 }
2526 
EmitBrTable(FunctionCompiler & f)2527 static bool EmitBrTable(FunctionCompiler& f) {
2528   Uint32Vector depths;
2529   uint32_t defaultDepth;
2530   ResultType branchValueType;
2531   DefVector branchValues;
2532   MDefinition* index;
2533   if (!f.iter().readBrTable(&depths, &defaultDepth, &branchValueType,
2534                             &branchValues, &index)) {
2535     return false;
2536   }
2537 
2538   // If all the targets are the same, or there are no targets, we can just
2539   // use a goto. This is not just an optimization: MaybeFoldConditionBlock
2540   // assumes that tables have more than one successor.
2541   bool allSameDepth = true;
2542   for (uint32_t depth : depths) {
2543     if (depth != defaultDepth) {
2544       allSameDepth = false;
2545       break;
2546     }
2547   }
2548 
2549   if (allSameDepth) {
2550     return f.br(defaultDepth, branchValues);
2551   }
2552 
2553   return f.brTable(index, defaultDepth, depths, branchValues);
2554 }
2555 
EmitReturn(FunctionCompiler & f)2556 static bool EmitReturn(FunctionCompiler& f) {
2557   DefVector values;
2558   if (!f.iter().readReturn(&values)) {
2559     return false;
2560   }
2561 
2562   return f.returnValues(values);
2563 }
2564 
EmitUnreachable(FunctionCompiler & f)2565 static bool EmitUnreachable(FunctionCompiler& f) {
2566   if (!f.iter().readUnreachable()) {
2567     return false;
2568   }
2569 
2570   f.unreachableTrap();
2571   return true;
2572 }
2573 
2574 #ifdef ENABLE_WASM_EXCEPTIONS
EmitTry(FunctionCompiler & f)2575 static bool EmitTry(FunctionCompiler& f) {
2576   ResultType params;
2577   if (!f.iter().readTry(&params)) {
2578     return false;
2579   }
2580 
2581   MOZ_CRASH("NYI");
2582 }
2583 
EmitCatch(FunctionCompiler & f)2584 static bool EmitCatch(FunctionCompiler& f) {
2585   LabelKind kind;
2586   uint32_t eventIndex;
2587   ResultType paramType, resultType;
2588   DefVector tryValues;
2589   if (!f.iter().readCatch(&kind, &eventIndex, &paramType, &resultType,
2590                           &tryValues)) {
2591     return false;
2592   }
2593 
2594   MOZ_CRASH("NYI");
2595 }
2596 
EmitCatchAll(FunctionCompiler & f)2597 static bool EmitCatchAll(FunctionCompiler& f) {
2598   LabelKind kind;
2599   ResultType paramType, resultType;
2600   DefVector tryValues;
2601   if (!f.iter().readCatchAll(&kind, &paramType, &resultType, &tryValues)) {
2602     return false;
2603   }
2604 
2605   MOZ_CRASH("NYI");
2606 }
2607 
EmitDelegate(FunctionCompiler & f)2608 static bool EmitDelegate(FunctionCompiler& f) {
2609   uint32_t relativeDepth;
2610   ResultType resultType;
2611   DefVector tryValues;
2612   if (!f.iter().readDelegate(&relativeDepth, &resultType, &tryValues)) {
2613     return false;
2614   }
2615   f.iter().popDelegate();
2616 
2617   MOZ_CRASH("NYI");
2618 }
2619 
EmitThrow(FunctionCompiler & f)2620 static bool EmitThrow(FunctionCompiler& f) {
2621   uint32_t exnIndex;
2622   DefVector argValues;
2623   if (!f.iter().readThrow(&exnIndex, &argValues)) {
2624     return false;
2625   }
2626 
2627   MOZ_CRASH("NYI");
2628 }
2629 
EmitRethrow(FunctionCompiler & f)2630 static bool EmitRethrow(FunctionCompiler& f) {
2631   uint32_t relativeDepth;
2632   if (!f.iter().readRethrow(&relativeDepth)) {
2633     return false;
2634   }
2635 
2636   MOZ_CRASH("NYI");
2637 }
2638 #endif
2639 
EmitCallArgs(FunctionCompiler & f,const FuncType & funcType,const DefVector & args,CallCompileState * call)2640 static bool EmitCallArgs(FunctionCompiler& f, const FuncType& funcType,
2641                          const DefVector& args, CallCompileState* call) {
2642   for (size_t i = 0, n = funcType.args().length(); i < n; ++i) {
2643     if (!f.mirGen().ensureBallast()) {
2644       return false;
2645     }
2646     if (!f.passArg(args[i], funcType.args()[i], call)) {
2647       return false;
2648     }
2649   }
2650 
2651   ResultType resultType = ResultType::Vector(funcType.results());
2652   if (!f.passStackResultAreaCallArg(resultType, call)) {
2653     return false;
2654   }
2655 
2656   return f.finishCall(call);
2657 }
2658 
EmitCall(FunctionCompiler & f,bool asmJSFuncDef)2659 static bool EmitCall(FunctionCompiler& f, bool asmJSFuncDef) {
2660   uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
2661 
2662   uint32_t funcIndex;
2663   DefVector args;
2664   if (asmJSFuncDef) {
2665     if (!f.iter().readOldCallDirect(f.moduleEnv().numFuncImports(), &funcIndex,
2666                                     &args)) {
2667       return false;
2668     }
2669   } else {
2670     if (!f.iter().readCall(&funcIndex, &args)) {
2671       return false;
2672     }
2673   }
2674 
2675   if (f.inDeadCode()) {
2676     return true;
2677   }
2678 
2679   const FuncType& funcType = *f.moduleEnv().funcs[funcIndex].type;
2680 
2681   CallCompileState call;
2682   if (!EmitCallArgs(f, funcType, args, &call)) {
2683     return false;
2684   }
2685 
2686   DefVector results;
2687   if (f.moduleEnv().funcIsImport(funcIndex)) {
2688     uint32_t globalDataOffset =
2689         f.moduleEnv().funcImportGlobalDataOffsets[funcIndex];
2690     if (!f.callImport(globalDataOffset, lineOrBytecode, call, funcType,
2691                       &results)) {
2692       return false;
2693     }
2694   } else {
2695     if (!f.callDirect(funcType, funcIndex, lineOrBytecode, call, &results)) {
2696       return false;
2697     }
2698   }
2699 
2700   f.iter().setResults(results.length(), results);
2701   return true;
2702 }
2703 
EmitCallIndirect(FunctionCompiler & f,bool oldStyle)2704 static bool EmitCallIndirect(FunctionCompiler& f, bool oldStyle) {
2705   uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
2706 
2707   uint32_t funcTypeIndex;
2708   uint32_t tableIndex;
2709   MDefinition* callee;
2710   DefVector args;
2711   if (oldStyle) {
2712     tableIndex = 0;
2713     if (!f.iter().readOldCallIndirect(&funcTypeIndex, &callee, &args)) {
2714       return false;
2715     }
2716   } else {
2717     if (!f.iter().readCallIndirect(&funcTypeIndex, &tableIndex, &callee,
2718                                    &args)) {
2719       return false;
2720     }
2721   }
2722 
2723   if (f.inDeadCode()) {
2724     return true;
2725   }
2726 
2727   const FuncType& funcType = f.moduleEnv().types[funcTypeIndex].funcType();
2728 
2729   CallCompileState call;
2730   if (!EmitCallArgs(f, funcType, args, &call)) {
2731     return false;
2732   }
2733 
2734   DefVector results;
2735   if (!f.callIndirect(funcTypeIndex, tableIndex, callee, lineOrBytecode, call,
2736                       &results)) {
2737     return false;
2738   }
2739 
2740   f.iter().setResults(results.length(), results);
2741   return true;
2742 }
2743 
EmitGetLocal(FunctionCompiler & f)2744 static bool EmitGetLocal(FunctionCompiler& f) {
2745   uint32_t id;
2746   if (!f.iter().readGetLocal(f.locals(), &id)) {
2747     return false;
2748   }
2749 
2750   f.iter().setResult(f.getLocalDef(id));
2751   return true;
2752 }
2753 
EmitSetLocal(FunctionCompiler & f)2754 static bool EmitSetLocal(FunctionCompiler& f) {
2755   uint32_t id;
2756   MDefinition* value;
2757   if (!f.iter().readSetLocal(f.locals(), &id, &value)) {
2758     return false;
2759   }
2760 
2761   f.assign(id, value);
2762   return true;
2763 }
2764 
EmitTeeLocal(FunctionCompiler & f)2765 static bool EmitTeeLocal(FunctionCompiler& f) {
2766   uint32_t id;
2767   MDefinition* value;
2768   if (!f.iter().readTeeLocal(f.locals(), &id, &value)) {
2769     return false;
2770   }
2771 
2772   f.assign(id, value);
2773   return true;
2774 }
2775 
EmitGetGlobal(FunctionCompiler & f)2776 static bool EmitGetGlobal(FunctionCompiler& f) {
2777   uint32_t id;
2778   if (!f.iter().readGetGlobal(&id)) {
2779     return false;
2780   }
2781 
2782   const GlobalDesc& global = f.moduleEnv().globals[id];
2783   if (!global.isConstant()) {
2784     f.iter().setResult(f.loadGlobalVar(global.offset(), !global.isMutable(),
2785                                        global.isIndirect(),
2786                                        ToMIRType(global.type())));
2787     return true;
2788   }
2789 
2790   LitVal value = global.constantValue();
2791   MIRType mirType = ToMIRType(value.type());
2792 
2793   MDefinition* result;
2794   switch (value.type().kind()) {
2795     case ValType::I32:
2796       result = f.constant(Int32Value(value.i32()), mirType);
2797       break;
2798     case ValType::I64:
2799       result = f.constant(int64_t(value.i64()));
2800       break;
2801     case ValType::F32:
2802       result = f.constant(value.f32());
2803       break;
2804     case ValType::F64:
2805       result = f.constant(value.f64());
2806       break;
2807     case ValType::V128:
2808 #ifdef ENABLE_WASM_SIMD
2809       result = f.constant(value.v128());
2810       break;
2811 #else
2812       return f.iter().fail("Ion has no SIMD support yet");
2813 #endif
2814     case ValType::Ref:
2815       switch (value.type().refTypeKind()) {
2816         case RefType::Func:
2817         case RefType::Extern:
2818         case RefType::Eq:
2819           MOZ_ASSERT(value.ref().isNull());
2820           result = f.nullRefConstant();
2821           break;
2822         case RefType::TypeIndex:
2823           MOZ_CRASH("unexpected reference type in EmitGetGlobal");
2824       }
2825       break;
2826     default:
2827       MOZ_CRASH("unexpected type in EmitGetGlobal");
2828   }
2829 
2830   f.iter().setResult(result);
2831   return true;
2832 }
2833 
EmitSetGlobal(FunctionCompiler & f)2834 static bool EmitSetGlobal(FunctionCompiler& f) {
2835   uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
2836 
2837   uint32_t id;
2838   MDefinition* value;
2839   if (!f.iter().readSetGlobal(&id, &value)) {
2840     return false;
2841   }
2842 
2843   const GlobalDesc& global = f.moduleEnv().globals[id];
2844   MOZ_ASSERT(global.isMutable());
2845   MInstruction* barrierAddr =
2846       f.storeGlobalVar(global.offset(), global.isIndirect(), value);
2847 
2848   // We always call the C++ postbarrier because the location will never be in
2849   // the nursery, and the value stored will very frequently be in the nursery.
2850   // The C++ postbarrier performs any necessary filtering.
2851 
2852   if (barrierAddr) {
2853     const SymbolicAddressSignature& callee = SASigPostBarrierFiltering;
2854     CallCompileState args;
2855     if (!f.passInstance(callee.argTypes[0], &args)) {
2856       return false;
2857     }
2858     if (!f.passArg(barrierAddr, callee.argTypes[1], &args)) {
2859       return false;
2860     }
2861     f.finishCall(&args);
2862     if (!f.builtinInstanceMethodCall(callee, lineOrBytecode, args)) {
2863       return false;
2864     }
2865   }
2866 
2867   return true;
2868 }
2869 
EmitTeeGlobal(FunctionCompiler & f)2870 static bool EmitTeeGlobal(FunctionCompiler& f) {
2871   uint32_t id;
2872   MDefinition* value;
2873   if (!f.iter().readTeeGlobal(&id, &value)) {
2874     return false;
2875   }
2876 
2877   const GlobalDesc& global = f.moduleEnv().globals[id];
2878   MOZ_ASSERT(global.isMutable());
2879 
2880   f.storeGlobalVar(global.offset(), global.isIndirect(), value);
2881   return true;
2882 }
2883 
2884 template <typename MIRClass>
EmitUnary(FunctionCompiler & f,ValType operandType)2885 static bool EmitUnary(FunctionCompiler& f, ValType operandType) {
2886   MDefinition* input;
2887   if (!f.iter().readUnary(operandType, &input)) {
2888     return false;
2889   }
2890 
2891   f.iter().setResult(f.unary<MIRClass>(input));
2892   return true;
2893 }
2894 
2895 template <typename MIRClass>
EmitConversion(FunctionCompiler & f,ValType operandType,ValType resultType)2896 static bool EmitConversion(FunctionCompiler& f, ValType operandType,
2897                            ValType resultType) {
2898   MDefinition* input;
2899   if (!f.iter().readConversion(operandType, resultType, &input)) {
2900     return false;
2901   }
2902 
2903   f.iter().setResult(f.unary<MIRClass>(input));
2904   return true;
2905 }
2906 
2907 template <typename MIRClass>
EmitUnaryWithType(FunctionCompiler & f,ValType operandType,MIRType mirType)2908 static bool EmitUnaryWithType(FunctionCompiler& f, ValType operandType,
2909                               MIRType mirType) {
2910   MDefinition* input;
2911   if (!f.iter().readUnary(operandType, &input)) {
2912     return false;
2913   }
2914 
2915   f.iter().setResult(f.unary<MIRClass>(input, mirType));
2916   return true;
2917 }
2918 
2919 template <typename MIRClass>
EmitConversionWithType(FunctionCompiler & f,ValType operandType,ValType resultType,MIRType mirType)2920 static bool EmitConversionWithType(FunctionCompiler& f, ValType operandType,
2921                                    ValType resultType, MIRType mirType) {
2922   MDefinition* input;
2923   if (!f.iter().readConversion(operandType, resultType, &input)) {
2924     return false;
2925   }
2926 
2927   f.iter().setResult(f.unary<MIRClass>(input, mirType));
2928   return true;
2929 }
2930 
EmitTruncate(FunctionCompiler & f,ValType operandType,ValType resultType,bool isUnsigned,bool isSaturating)2931 static bool EmitTruncate(FunctionCompiler& f, ValType operandType,
2932                          ValType resultType, bool isUnsigned,
2933                          bool isSaturating) {
2934   MDefinition* input = nullptr;
2935   if (!f.iter().readConversion(operandType, resultType, &input)) {
2936     return false;
2937   }
2938 
2939   TruncFlags flags = 0;
2940   if (isUnsigned) {
2941     flags |= TRUNC_UNSIGNED;
2942   }
2943   if (isSaturating) {
2944     flags |= TRUNC_SATURATING;
2945   }
2946   if (resultType == ValType::I32) {
2947     if (f.moduleEnv().isAsmJS()) {
2948       if (input && (input->type() == MIRType::Double ||
2949                     input->type() == MIRType::Float32)) {
2950         f.iter().setResult(f.unary<MWasmBuiltinTruncateToInt32>(input));
2951       } else {
2952         f.iter().setResult(f.unary<MTruncateToInt32>(input));
2953       }
2954     } else {
2955       f.iter().setResult(f.truncate<MWasmTruncateToInt32>(input, flags));
2956     }
2957   } else {
2958     MOZ_ASSERT(resultType == ValType::I64);
2959     MOZ_ASSERT(!f.moduleEnv().isAsmJS());
2960 #if defined(JS_CODEGEN_ARM)
2961     f.iter().setResult(f.truncateWithTls(input, flags));
2962 #else
2963     f.iter().setResult(f.truncate<MWasmTruncateToInt64>(input, flags));
2964 #endif
2965   }
2966   return true;
2967 }
2968 
EmitSignExtend(FunctionCompiler & f,uint32_t srcSize,uint32_t targetSize)2969 static bool EmitSignExtend(FunctionCompiler& f, uint32_t srcSize,
2970                            uint32_t targetSize) {
2971   MDefinition* input;
2972   ValType type = targetSize == 4 ? ValType::I32 : ValType::I64;
2973   if (!f.iter().readConversion(type, type, &input)) {
2974     return false;
2975   }
2976 
2977   f.iter().setResult(f.signExtend(input, srcSize, targetSize));
2978   return true;
2979 }
2980 
EmitExtendI32(FunctionCompiler & f,bool isUnsigned)2981 static bool EmitExtendI32(FunctionCompiler& f, bool isUnsigned) {
2982   MDefinition* input;
2983   if (!f.iter().readConversion(ValType::I32, ValType::I64, &input)) {
2984     return false;
2985   }
2986 
2987   f.iter().setResult(f.extendI32(input, isUnsigned));
2988   return true;
2989 }
2990 
EmitConvertI64ToFloatingPoint(FunctionCompiler & f,ValType resultType,MIRType mirType,bool isUnsigned)2991 static bool EmitConvertI64ToFloatingPoint(FunctionCompiler& f,
2992                                           ValType resultType, MIRType mirType,
2993                                           bool isUnsigned) {
2994   MDefinition* input;
2995   if (!f.iter().readConversion(ValType::I64, resultType, &input)) {
2996     return false;
2997   }
2998 
2999   f.iter().setResult(f.convertI64ToFloatingPoint(input, mirType, isUnsigned));
3000   return true;
3001 }
3002 
EmitReinterpret(FunctionCompiler & f,ValType resultType,ValType operandType,MIRType mirType)3003 static bool EmitReinterpret(FunctionCompiler& f, ValType resultType,
3004                             ValType operandType, MIRType mirType) {
3005   MDefinition* input;
3006   if (!f.iter().readConversion(operandType, resultType, &input)) {
3007     return false;
3008   }
3009 
3010   f.iter().setResult(f.unary<MWasmReinterpret>(input, mirType));
3011   return true;
3012 }
3013 
EmitAdd(FunctionCompiler & f,ValType type,MIRType mirType)3014 static bool EmitAdd(FunctionCompiler& f, ValType type, MIRType mirType) {
3015   MDefinition* lhs;
3016   MDefinition* rhs;
3017   if (!f.iter().readBinary(type, &lhs, &rhs)) {
3018     return false;
3019   }
3020 
3021   f.iter().setResult(f.add(lhs, rhs, mirType));
3022   return true;
3023 }
3024 
EmitSub(FunctionCompiler & f,ValType type,MIRType mirType)3025 static bool EmitSub(FunctionCompiler& f, ValType type, MIRType mirType) {
3026   MDefinition* lhs;
3027   MDefinition* rhs;
3028   if (!f.iter().readBinary(type, &lhs, &rhs)) {
3029     return false;
3030   }
3031 
3032   f.iter().setResult(f.sub(lhs, rhs, mirType));
3033   return true;
3034 }
3035 
EmitRotate(FunctionCompiler & f,ValType type,bool isLeftRotation)3036 static bool EmitRotate(FunctionCompiler& f, ValType type, bool isLeftRotation) {
3037   MDefinition* lhs;
3038   MDefinition* rhs;
3039   if (!f.iter().readBinary(type, &lhs, &rhs)) {
3040     return false;
3041   }
3042 
3043   MDefinition* result = f.rotate(lhs, rhs, ToMIRType(type), isLeftRotation);
3044   f.iter().setResult(result);
3045   return true;
3046 }
3047 
EmitBitNot(FunctionCompiler & f,ValType operandType)3048 static bool EmitBitNot(FunctionCompiler& f, ValType operandType) {
3049   MDefinition* input;
3050   if (!f.iter().readUnary(operandType, &input)) {
3051     return false;
3052   }
3053 
3054   f.iter().setResult(f.bitnot(input));
3055   return true;
3056 }
3057 
3058 template <typename MIRClass>
EmitBitwise(FunctionCompiler & f,ValType operandType,MIRType mirType)3059 static bool EmitBitwise(FunctionCompiler& f, ValType operandType,
3060                         MIRType mirType) {
3061   MDefinition* lhs;
3062   MDefinition* rhs;
3063   if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
3064     return false;
3065   }
3066 
3067   f.iter().setResult(f.binary<MIRClass>(lhs, rhs, mirType));
3068   return true;
3069 }
3070 
EmitUrsh(FunctionCompiler & f,ValType operandType,MIRType mirType)3071 static bool EmitUrsh(FunctionCompiler& f, ValType operandType,
3072                      MIRType mirType) {
3073   MDefinition* lhs;
3074   MDefinition* rhs;
3075   if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
3076     return false;
3077   }
3078 
3079   f.iter().setResult(f.ursh(lhs, rhs, mirType));
3080   return true;
3081 }
3082 
EmitMul(FunctionCompiler & f,ValType operandType,MIRType mirType)3083 static bool EmitMul(FunctionCompiler& f, ValType operandType, MIRType mirType) {
3084   MDefinition* lhs;
3085   MDefinition* rhs;
3086   if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
3087     return false;
3088   }
3089 
3090   f.iter().setResult(
3091       f.mul(lhs, rhs, mirType,
3092             mirType == MIRType::Int32 ? MMul::Integer : MMul::Normal));
3093   return true;
3094 }
3095 
EmitDiv(FunctionCompiler & f,ValType operandType,MIRType mirType,bool isUnsigned)3096 static bool EmitDiv(FunctionCompiler& f, ValType operandType, MIRType mirType,
3097                     bool isUnsigned) {
3098   MDefinition* lhs;
3099   MDefinition* rhs;
3100   if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
3101     return false;
3102   }
3103 
3104   f.iter().setResult(f.div(lhs, rhs, mirType, isUnsigned));
3105   return true;
3106 }
3107 
EmitRem(FunctionCompiler & f,ValType operandType,MIRType mirType,bool isUnsigned)3108 static bool EmitRem(FunctionCompiler& f, ValType operandType, MIRType mirType,
3109                     bool isUnsigned) {
3110   MDefinition* lhs;
3111   MDefinition* rhs;
3112   if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
3113     return false;
3114   }
3115 
3116   f.iter().setResult(f.mod(lhs, rhs, mirType, isUnsigned));
3117   return true;
3118 }
3119 
EmitMinMax(FunctionCompiler & f,ValType operandType,MIRType mirType,bool isMax)3120 static bool EmitMinMax(FunctionCompiler& f, ValType operandType,
3121                        MIRType mirType, bool isMax) {
3122   MDefinition* lhs;
3123   MDefinition* rhs;
3124   if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
3125     return false;
3126   }
3127 
3128   f.iter().setResult(f.minMax(lhs, rhs, mirType, isMax));
3129   return true;
3130 }
3131 
EmitCopySign(FunctionCompiler & f,ValType operandType)3132 static bool EmitCopySign(FunctionCompiler& f, ValType operandType) {
3133   MDefinition* lhs;
3134   MDefinition* rhs;
3135   if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
3136     return false;
3137   }
3138 
3139   f.iter().setResult(f.binary<MCopySign>(lhs, rhs, ToMIRType(operandType)));
3140   return true;
3141 }
3142 
EmitComparison(FunctionCompiler & f,ValType operandType,JSOp compareOp,MCompare::CompareType compareType)3143 static bool EmitComparison(FunctionCompiler& f, ValType operandType,
3144                            JSOp compareOp, MCompare::CompareType compareType) {
3145   MDefinition* lhs;
3146   MDefinition* rhs;
3147   if (!f.iter().readComparison(operandType, &lhs, &rhs)) {
3148     return false;
3149   }
3150 
3151   f.iter().setResult(f.compare(lhs, rhs, compareOp, compareType));
3152   return true;
3153 }
3154 
EmitSelect(FunctionCompiler & f,bool typed)3155 static bool EmitSelect(FunctionCompiler& f, bool typed) {
3156   StackType type;
3157   MDefinition* trueValue;
3158   MDefinition* falseValue;
3159   MDefinition* condition;
3160   if (!f.iter().readSelect(typed, &type, &trueValue, &falseValue, &condition)) {
3161     return false;
3162   }
3163 
3164   f.iter().setResult(f.select(trueValue, falseValue, condition));
3165   return true;
3166 }
3167 
EmitLoad(FunctionCompiler & f,ValType type,Scalar::Type viewType)3168 static bool EmitLoad(FunctionCompiler& f, ValType type, Scalar::Type viewType) {
3169   LinearMemoryAddress<MDefinition*> addr;
3170   if (!f.iter().readLoad(type, Scalar::byteSize(viewType), &addr)) {
3171     return false;
3172   }
3173 
3174   MemoryAccessDesc access(viewType, addr.align, addr.offset,
3175                           f.bytecodeIfNotAsmJS());
3176   auto* ins = f.load(addr.base, &access, type);
3177   if (!f.inDeadCode() && !ins) {
3178     return false;
3179   }
3180 
3181   f.iter().setResult(ins);
3182   return true;
3183 }
3184 
EmitStore(FunctionCompiler & f,ValType resultType,Scalar::Type viewType)3185 static bool EmitStore(FunctionCompiler& f, ValType resultType,
3186                       Scalar::Type viewType) {
3187   LinearMemoryAddress<MDefinition*> addr;
3188   MDefinition* value;
3189   if (!f.iter().readStore(resultType, Scalar::byteSize(viewType), &addr,
3190                           &value)) {
3191     return false;
3192   }
3193 
3194   MemoryAccessDesc access(viewType, addr.align, addr.offset,
3195                           f.bytecodeIfNotAsmJS());
3196 
3197   f.store(addr.base, &access, value);
3198   return true;
3199 }
3200 
EmitTeeStore(FunctionCompiler & f,ValType resultType,Scalar::Type viewType)3201 static bool EmitTeeStore(FunctionCompiler& f, ValType resultType,
3202                          Scalar::Type viewType) {
3203   LinearMemoryAddress<MDefinition*> addr;
3204   MDefinition* value;
3205   if (!f.iter().readTeeStore(resultType, Scalar::byteSize(viewType), &addr,
3206                              &value)) {
3207     return false;
3208   }
3209 
3210   MemoryAccessDesc access(viewType, addr.align, addr.offset,
3211                           f.bytecodeIfNotAsmJS());
3212 
3213   f.store(addr.base, &access, value);
3214   return true;
3215 }
3216 
EmitTeeStoreWithCoercion(FunctionCompiler & f,ValType resultType,Scalar::Type viewType)3217 static bool EmitTeeStoreWithCoercion(FunctionCompiler& f, ValType resultType,
3218                                      Scalar::Type viewType) {
3219   LinearMemoryAddress<MDefinition*> addr;
3220   MDefinition* value;
3221   if (!f.iter().readTeeStore(resultType, Scalar::byteSize(viewType), &addr,
3222                              &value)) {
3223     return false;
3224   }
3225 
3226   if (resultType == ValType::F32 && viewType == Scalar::Float64) {
3227     value = f.unary<MToDouble>(value);
3228   } else if (resultType == ValType::F64 && viewType == Scalar::Float32) {
3229     value = f.unary<MToFloat32>(value);
3230   } else {
3231     MOZ_CRASH("unexpected coerced store");
3232   }
3233 
3234   MemoryAccessDesc access(viewType, addr.align, addr.offset,
3235                           f.bytecodeIfNotAsmJS());
3236 
3237   f.store(addr.base, &access, value);
3238   return true;
3239 }
3240 
TryInlineUnaryBuiltin(FunctionCompiler & f,SymbolicAddress callee,MDefinition * input)3241 static bool TryInlineUnaryBuiltin(FunctionCompiler& f, SymbolicAddress callee,
3242                                   MDefinition* input) {
3243   if (!input) {
3244     return false;
3245   }
3246 
3247   MOZ_ASSERT(IsFloatingPointType(input->type()));
3248 
3249   RoundingMode mode;
3250   if (!IsRoundingFunction(callee, &mode)) {
3251     return false;
3252   }
3253 
3254   if (!MNearbyInt::HasAssemblerSupport(mode)) {
3255     return false;
3256   }
3257 
3258   f.iter().setResult(f.nearbyInt(input, mode));
3259   return true;
3260 }
3261 
EmitUnaryMathBuiltinCall(FunctionCompiler & f,const SymbolicAddressSignature & callee)3262 static bool EmitUnaryMathBuiltinCall(FunctionCompiler& f,
3263                                      const SymbolicAddressSignature& callee) {
3264   MOZ_ASSERT(callee.numArgs == 1);
3265 
3266   uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
3267 
3268   MDefinition* input;
3269   if (!f.iter().readUnary(ValType(callee.argTypes[0]), &input)) {
3270     return false;
3271   }
3272 
3273   if (TryInlineUnaryBuiltin(f, callee.identity, input)) {
3274     return true;
3275   }
3276 
3277   CallCompileState call;
3278   if (!f.passArg(input, callee.argTypes[0], &call)) {
3279     return false;
3280   }
3281 
3282   if (!f.finishCall(&call)) {
3283     return false;
3284   }
3285 
3286   MDefinition* def;
3287   if (!f.builtinCall(callee, lineOrBytecode, call, &def)) {
3288     return false;
3289   }
3290 
3291   f.iter().setResult(def);
3292   return true;
3293 }
3294 
EmitBinaryMathBuiltinCall(FunctionCompiler & f,const SymbolicAddressSignature & callee)3295 static bool EmitBinaryMathBuiltinCall(FunctionCompiler& f,
3296                                       const SymbolicAddressSignature& callee) {
3297   MOZ_ASSERT(callee.numArgs == 2);
3298   MOZ_ASSERT(callee.argTypes[0] == callee.argTypes[1]);
3299 
3300   uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
3301 
3302   CallCompileState call;
3303   MDefinition* lhs;
3304   MDefinition* rhs;
3305   // This call to readBinary assumes both operands have the same type.
3306   if (!f.iter().readBinary(ValType(callee.argTypes[0]), &lhs, &rhs)) {
3307     return false;
3308   }
3309 
3310   if (!f.passArg(lhs, callee.argTypes[0], &call)) {
3311     return false;
3312   }
3313 
3314   if (!f.passArg(rhs, callee.argTypes[1], &call)) {
3315     return false;
3316   }
3317 
3318   if (!f.finishCall(&call)) {
3319     return false;
3320   }
3321 
3322   MDefinition* def;
3323   if (!f.builtinCall(callee, lineOrBytecode, call, &def)) {
3324     return false;
3325   }
3326 
3327   f.iter().setResult(def);
3328   return true;
3329 }
3330 
EmitMemoryGrow(FunctionCompiler & f)3331 static bool EmitMemoryGrow(FunctionCompiler& f) {
3332   uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
3333 
3334   const SymbolicAddressSignature& callee = SASigMemoryGrow;
3335   CallCompileState args;
3336   if (!f.passInstance(callee.argTypes[0], &args)) {
3337     return false;
3338   }
3339 
3340   MDefinition* delta;
3341   if (!f.iter().readMemoryGrow(&delta)) {
3342     return false;
3343   }
3344 
3345   if (!f.passArg(delta, callee.argTypes[1], &args)) {
3346     return false;
3347   }
3348 
3349   f.finishCall(&args);
3350 
3351   MDefinition* ret;
3352   if (!f.builtinInstanceMethodCall(callee, lineOrBytecode, args, &ret)) {
3353     return false;
3354   }
3355 
3356   f.iter().setResult(ret);
3357   return true;
3358 }
3359 
EmitMemorySize(FunctionCompiler & f)3360 static bool EmitMemorySize(FunctionCompiler& f) {
3361   uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
3362 
3363   const SymbolicAddressSignature& callee = SASigMemorySize;
3364   CallCompileState args;
3365 
3366   if (!f.iter().readMemorySize()) {
3367     return false;
3368   }
3369 
3370   if (!f.passInstance(callee.argTypes[0], &args)) {
3371     return false;
3372   }
3373 
3374   f.finishCall(&args);
3375 
3376   MDefinition* ret;
3377   if (!f.builtinInstanceMethodCall(callee, lineOrBytecode, args, &ret)) {
3378     return false;
3379   }
3380 
3381   f.iter().setResult(ret);
3382   return true;
3383 }
3384 
EmitAtomicCmpXchg(FunctionCompiler & f,ValType type,Scalar::Type viewType)3385 static bool EmitAtomicCmpXchg(FunctionCompiler& f, ValType type,
3386                               Scalar::Type viewType) {
3387   LinearMemoryAddress<MDefinition*> addr;
3388   MDefinition* oldValue;
3389   MDefinition* newValue;
3390   if (!f.iter().readAtomicCmpXchg(&addr, type, byteSize(viewType), &oldValue,
3391                                   &newValue)) {
3392     return false;
3393   }
3394 
3395   MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
3396                           Synchronization::Full());
3397   auto* ins =
3398       f.atomicCompareExchangeHeap(addr.base, &access, type, oldValue, newValue);
3399   if (!f.inDeadCode() && !ins) {
3400     return false;
3401   }
3402 
3403   f.iter().setResult(ins);
3404   return true;
3405 }
3406 
EmitAtomicLoad(FunctionCompiler & f,ValType type,Scalar::Type viewType)3407 static bool EmitAtomicLoad(FunctionCompiler& f, ValType type,
3408                            Scalar::Type viewType) {
3409   LinearMemoryAddress<MDefinition*> addr;
3410   if (!f.iter().readAtomicLoad(&addr, type, byteSize(viewType))) {
3411     return false;
3412   }
3413 
3414   MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
3415                           Synchronization::Load());
3416   auto* ins = f.load(addr.base, &access, type);
3417   if (!f.inDeadCode() && !ins) {
3418     return false;
3419   }
3420 
3421   f.iter().setResult(ins);
3422   return true;
3423 }
3424 
EmitAtomicRMW(FunctionCompiler & f,ValType type,Scalar::Type viewType,jit::AtomicOp op)3425 static bool EmitAtomicRMW(FunctionCompiler& f, ValType type,
3426                           Scalar::Type viewType, jit::AtomicOp op) {
3427   LinearMemoryAddress<MDefinition*> addr;
3428   MDefinition* value;
3429   if (!f.iter().readAtomicRMW(&addr, type, byteSize(viewType), &value)) {
3430     return false;
3431   }
3432 
3433   MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
3434                           Synchronization::Full());
3435   auto* ins = f.atomicBinopHeap(op, addr.base, &access, type, value);
3436   if (!f.inDeadCode() && !ins) {
3437     return false;
3438   }
3439 
3440   f.iter().setResult(ins);
3441   return true;
3442 }
3443 
EmitAtomicStore(FunctionCompiler & f,ValType type,Scalar::Type viewType)3444 static bool EmitAtomicStore(FunctionCompiler& f, ValType type,
3445                             Scalar::Type viewType) {
3446   LinearMemoryAddress<MDefinition*> addr;
3447   MDefinition* value;
3448   if (!f.iter().readAtomicStore(&addr, type, byteSize(viewType), &value)) {
3449     return false;
3450   }
3451 
3452   MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
3453                           Synchronization::Store());
3454   f.store(addr.base, &access, value);
3455   return true;
3456 }
3457 
EmitWait(FunctionCompiler & f,ValType type,uint32_t byteSize)3458 static bool EmitWait(FunctionCompiler& f, ValType type, uint32_t byteSize) {
3459   MOZ_ASSERT(type == ValType::I32 || type == ValType::I64);
3460   MOZ_ASSERT(SizeOf(type) == byteSize);
3461 
3462   uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
3463 
3464   const SymbolicAddressSignature& callee =
3465       type == ValType::I32 ? SASigWaitI32 : SASigWaitI64;
3466   CallCompileState args;
3467   if (!f.passInstance(callee.argTypes[0], &args)) {
3468     return false;
3469   }
3470 
3471   LinearMemoryAddress<MDefinition*> addr;
3472   MDefinition* expected;
3473   MDefinition* timeout;
3474   if (!f.iter().readWait(&addr, type, byteSize, &expected, &timeout)) {
3475     return false;
3476   }
3477 
3478   MemoryAccessDesc access(type == ValType::I32 ? Scalar::Int32 : Scalar::Int64,
3479                           addr.align, addr.offset, f.bytecodeOffset());
3480   MDefinition* ptr = f.computeEffectiveAddress(addr.base, &access);
3481   if (!f.inDeadCode() && !ptr) {
3482     return false;
3483   }
3484 
3485   if (!f.passArg(ptr, callee.argTypes[1], &args)) {
3486     return false;
3487   }
3488 
3489   MOZ_ASSERT(ToMIRType(type) == callee.argTypes[2]);
3490   if (!f.passArg(expected, callee.argTypes[2], &args)) {
3491     return false;
3492   }
3493 
3494   if (!f.passArg(timeout, callee.argTypes[3], &args)) {
3495     return false;
3496   }
3497 
3498   if (!f.finishCall(&args)) {
3499     return false;
3500   }
3501 
3502   MDefinition* ret;
3503   if (!f.builtinInstanceMethodCall(callee, lineOrBytecode, args, &ret)) {
3504     return false;
3505   }
3506 
3507   f.iter().setResult(ret);
3508   return true;
3509 }
3510 
EmitFence(FunctionCompiler & f)3511 static bool EmitFence(FunctionCompiler& f) {
3512   if (!f.iter().readFence()) {
3513     return false;
3514   }
3515 
3516   f.fence();
3517   return true;
3518 }
3519 
EmitWake(FunctionCompiler & f)3520 static bool EmitWake(FunctionCompiler& f) {
3521   uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
3522 
3523   const SymbolicAddressSignature& callee = SASigWake;
3524   CallCompileState args;
3525   if (!f.passInstance(callee.argTypes[0], &args)) {
3526     return false;
3527   }
3528 
3529   LinearMemoryAddress<MDefinition*> addr;
3530   MDefinition* count;
3531   if (!f.iter().readWake(&addr, &count)) {
3532     return false;
3533   }
3534 
3535   MemoryAccessDesc access(Scalar::Int32, addr.align, addr.offset,
3536                           f.bytecodeOffset());
3537   MDefinition* ptr = f.computeEffectiveAddress(addr.base, &access);
3538   if (!f.inDeadCode() && !ptr) {
3539     return false;
3540   }
3541 
3542   if (!f.passArg(ptr, callee.argTypes[1], &args)) {
3543     return false;
3544   }
3545 
3546   if (!f.passArg(count, callee.argTypes[2], &args)) {
3547     return false;
3548   }
3549 
3550   if (!f.finishCall(&args)) {
3551     return false;
3552   }
3553 
3554   MDefinition* ret;
3555   if (!f.builtinInstanceMethodCall(callee, lineOrBytecode, args, &ret)) {
3556     return false;
3557   }
3558 
3559   f.iter().setResult(ret);
3560   return true;
3561 }
3562 
EmitAtomicXchg(FunctionCompiler & f,ValType type,Scalar::Type viewType)3563 static bool EmitAtomicXchg(FunctionCompiler& f, ValType type,
3564                            Scalar::Type viewType) {
3565   LinearMemoryAddress<MDefinition*> addr;
3566   MDefinition* value;
3567   if (!f.iter().readAtomicRMW(&addr, type, byteSize(viewType), &value)) {
3568     return false;
3569   }
3570 
3571   MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
3572                           Synchronization::Full());
3573   MDefinition* ins = f.atomicExchangeHeap(addr.base, &access, type, value);
3574   if (!f.inDeadCode() && !ins) {
3575     return false;
3576   }
3577 
3578   f.iter().setResult(ins);
3579   return true;
3580 }
3581 
EmitMemCopyCall(FunctionCompiler & f,MDefinition * dst,MDefinition * src,MDefinition * len)3582 static bool EmitMemCopyCall(FunctionCompiler& f, MDefinition* dst,
3583                             MDefinition* src, MDefinition* len) {
3584   uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
3585 
3586   const SymbolicAddressSignature& callee =
3587       (f.moduleEnv().usesSharedMemory() ? SASigMemCopyShared32
3588                                         : SASigMemCopy32);
3589   CallCompileState args;
3590   if (!f.passInstance(callee.argTypes[0], &args)) {
3591     return false;
3592   }
3593 
3594   if (!f.passArg(dst, callee.argTypes[1], &args)) {
3595     return false;
3596   }
3597   if (!f.passArg(src, callee.argTypes[2], &args)) {
3598     return false;
3599   }
3600   if (!f.passArg(len, callee.argTypes[3], &args)) {
3601     return false;
3602   }
3603   MDefinition* memoryBase = f.memoryBase();
3604   if (!f.passArg(memoryBase, callee.argTypes[4], &args)) {
3605     return false;
3606   }
3607   if (!f.finishCall(&args)) {
3608     return false;
3609   }
3610 
3611   return f.builtinInstanceMethodCall(callee, lineOrBytecode, args);
3612 }
3613 
EmitMemCopyInline(FunctionCompiler & f,MDefinition * dst,MDefinition * src,MDefinition * len)3614 static bool EmitMemCopyInline(FunctionCompiler& f, MDefinition* dst,
3615                               MDefinition* src, MDefinition* len) {
3616   MOZ_ASSERT(MaxInlineMemoryCopyLength != 0);
3617 
3618   MOZ_ASSERT(len->isConstant() && len->type() == MIRType::Int32);
3619   uint32_t length = len->toConstant()->toInt32();
3620   MOZ_ASSERT(length != 0 && length <= MaxInlineMemoryCopyLength);
3621 
3622   // Compute the number of copies of each width we will need to do
3623   size_t remainder = length;
3624 #ifdef JS_64BIT
3625   size_t numCopies8 = remainder / sizeof(uint64_t);
3626   remainder %= sizeof(uint64_t);
3627 #endif
3628   size_t numCopies4 = remainder / sizeof(uint32_t);
3629   remainder %= sizeof(uint32_t);
3630   size_t numCopies2 = remainder / sizeof(uint16_t);
3631   remainder %= sizeof(uint16_t);
3632   size_t numCopies1 = remainder;
3633 
3634   // Load all source bytes from low to high using the widest transfer width we
3635   // can for the system. We will trap without writing anything if any source
3636   // byte is out-of-bounds.
3637   size_t offset = 0;
3638   DefVector loadedValues;
3639 
3640 #ifdef JS_64BIT
3641   for (uint32_t i = 0; i < numCopies8; i++) {
3642     MemoryAccessDesc access(Scalar::Int64, 1, offset, f.bytecodeOffset());
3643     auto* load = f.load(src, &access, ValType::I64);
3644     if (!load || !loadedValues.append(load)) {
3645       return false;
3646     }
3647 
3648     offset += sizeof(uint64_t);
3649   }
3650 #endif
3651 
3652   for (uint32_t i = 0; i < numCopies4; i++) {
3653     MemoryAccessDesc access(Scalar::Uint32, 1, offset, f.bytecodeOffset());
3654     auto* load = f.load(src, &access, ValType::I32);
3655     if (!load || !loadedValues.append(load)) {
3656       return false;
3657     }
3658 
3659     offset += sizeof(uint32_t);
3660   }
3661 
3662   if (numCopies2) {
3663     MemoryAccessDesc access(Scalar::Uint16, 1, offset, f.bytecodeOffset());
3664     auto* load = f.load(src, &access, ValType::I32);
3665     if (!load || !loadedValues.append(load)) {
3666       return false;
3667     }
3668 
3669     offset += sizeof(uint16_t);
3670   }
3671 
3672   if (numCopies1) {
3673     MemoryAccessDesc access(Scalar::Uint8, 1, offset, f.bytecodeOffset());
3674     auto* load = f.load(src, &access, ValType::I32);
3675     if (!load || !loadedValues.append(load)) {
3676       return false;
3677     }
3678   }
3679 
3680   // Store all source bytes to the destination from high to low. We will trap
3681   // without writing anything on the first store if any dest byte is
3682   // out-of-bounds.
3683   offset = length;
3684 
3685   if (numCopies1) {
3686     offset -= sizeof(uint8_t);
3687 
3688     MemoryAccessDesc access(Scalar::Uint8, 1, offset, f.bytecodeOffset());
3689     auto* value = loadedValues.popCopy();
3690     f.store(dst, &access, value);
3691   }
3692 
3693   if (numCopies2) {
3694     offset -= sizeof(uint16_t);
3695 
3696     MemoryAccessDesc access(Scalar::Uint16, 1, offset, f.bytecodeOffset());
3697     auto* value = loadedValues.popCopy();
3698     f.store(dst, &access, value);
3699   }
3700 
3701   for (uint32_t i = 0; i < numCopies4; i++) {
3702     offset -= sizeof(uint32_t);
3703 
3704     MemoryAccessDesc access(Scalar::Uint32, 1, offset, f.bytecodeOffset());
3705     auto* value = loadedValues.popCopy();
3706     f.store(dst, &access, value);
3707   }
3708 
3709 #ifdef JS_64BIT
3710   for (uint32_t i = 0; i < numCopies8; i++) {
3711     offset -= sizeof(uint64_t);
3712 
3713     MemoryAccessDesc access(Scalar::Int64, 1, offset, f.bytecodeOffset());
3714     auto* value = loadedValues.popCopy();
3715     f.store(dst, &access, value);
3716   }
3717 #endif
3718 
3719   return true;
3720 }
3721 
EmitMemCopy(FunctionCompiler & f)3722 static bool EmitMemCopy(FunctionCompiler& f) {
3723   MDefinition *dst, *src, *len;
3724   uint32_t dstMemIndex;
3725   uint32_t srcMemIndex;
3726   if (!f.iter().readMemOrTableCopy(true, &dstMemIndex, &dst, &srcMemIndex, &src,
3727                                    &len)) {
3728     return false;
3729   }
3730 
3731   if (f.inDeadCode()) {
3732     return true;
3733   }
3734 
3735   if (MacroAssembler::SupportsFastUnalignedAccesses() && len->isConstant() &&
3736       len->type() == MIRType::Int32 && len->toConstant()->toInt32() != 0 &&
3737       uint32_t(len->toConstant()->toInt32()) <= MaxInlineMemoryCopyLength) {
3738     return EmitMemCopyInline(f, dst, src, len);
3739   }
3740   return EmitMemCopyCall(f, dst, src, len);
3741 }
3742 
EmitTableCopy(FunctionCompiler & f)3743 static bool EmitTableCopy(FunctionCompiler& f) {
3744   MDefinition *dst, *src, *len;
3745   uint32_t dstTableIndex;
3746   uint32_t srcTableIndex;
3747   if (!f.iter().readMemOrTableCopy(false, &dstTableIndex, &dst, &srcTableIndex,
3748                                    &src, &len)) {
3749     return false;
3750   }
3751 
3752   if (f.inDeadCode()) {
3753     return true;
3754   }
3755 
3756   uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
3757 
3758   const SymbolicAddressSignature& callee = SASigTableCopy;
3759   CallCompileState args;
3760   if (!f.passInstance(callee.argTypes[0], &args)) {
3761     return false;
3762   }
3763 
3764   if (!f.passArg(dst, callee.argTypes[1], &args)) {
3765     return false;
3766   }
3767   if (!f.passArg(src, callee.argTypes[2], &args)) {
3768     return false;
3769   }
3770   if (!f.passArg(len, callee.argTypes[3], &args)) {
3771     return false;
3772   }
3773   MDefinition* dti = f.constant(Int32Value(dstTableIndex), MIRType::Int32);
3774   if (!dti) {
3775     return false;
3776   }
3777   if (!f.passArg(dti, callee.argTypes[4], &args)) {
3778     return false;
3779   }
3780   MDefinition* sti = f.constant(Int32Value(srcTableIndex), MIRType::Int32);
3781   if (!sti) {
3782     return false;
3783   }
3784   if (!f.passArg(sti, callee.argTypes[5], &args)) {
3785     return false;
3786   }
3787   if (!f.finishCall(&args)) {
3788     return false;
3789   }
3790 
3791   return f.builtinInstanceMethodCall(callee, lineOrBytecode, args);
3792 }
3793 
EmitDataOrElemDrop(FunctionCompiler & f,bool isData)3794 static bool EmitDataOrElemDrop(FunctionCompiler& f, bool isData) {
3795   uint32_t segIndexVal = 0;
3796   if (!f.iter().readDataOrElemDrop(isData, &segIndexVal)) {
3797     return false;
3798   }
3799 
3800   if (f.inDeadCode()) {
3801     return true;
3802   }
3803 
3804   uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
3805 
3806   const SymbolicAddressSignature& callee =
3807       isData ? SASigDataDrop : SASigElemDrop;
3808   CallCompileState args;
3809   if (!f.passInstance(callee.argTypes[0], &args)) {
3810     return false;
3811   }
3812 
3813   MDefinition* segIndex =
3814       f.constant(Int32Value(int32_t(segIndexVal)), MIRType::Int32);
3815   if (!f.passArg(segIndex, callee.argTypes[1], &args)) {
3816     return false;
3817   }
3818 
3819   if (!f.finishCall(&args)) {
3820     return false;
3821   }
3822 
3823   return f.builtinInstanceMethodCall(callee, lineOrBytecode, args);
3824 }
3825 
EmitMemFillCall(FunctionCompiler & f,MDefinition * start,MDefinition * val,MDefinition * len)3826 static bool EmitMemFillCall(FunctionCompiler& f, MDefinition* start,
3827                             MDefinition* val, MDefinition* len) {
3828   uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
3829 
3830   const SymbolicAddressSignature& callee =
3831       f.moduleEnv().usesSharedMemory() ? SASigMemFillShared32 : SASigMemFill32;
3832   CallCompileState args;
3833   if (!f.passInstance(callee.argTypes[0], &args)) {
3834     return false;
3835   }
3836 
3837   if (!f.passArg(start, callee.argTypes[1], &args)) {
3838     return false;
3839   }
3840   if (!f.passArg(val, callee.argTypes[2], &args)) {
3841     return false;
3842   }
3843   if (!f.passArg(len, callee.argTypes[3], &args)) {
3844     return false;
3845   }
3846   MDefinition* memoryBase = f.memoryBase();
3847   if (!f.passArg(memoryBase, callee.argTypes[4], &args)) {
3848     return false;
3849   }
3850 
3851   if (!f.finishCall(&args)) {
3852     return false;
3853   }
3854 
3855   return f.builtinInstanceMethodCall(callee, lineOrBytecode, args);
3856 }
3857 
EmitMemFillInline(FunctionCompiler & f,MDefinition * start,MDefinition * val,MDefinition * len)3858 static bool EmitMemFillInline(FunctionCompiler& f, MDefinition* start,
3859                               MDefinition* val, MDefinition* len) {
3860   MOZ_ASSERT(MaxInlineMemoryFillLength != 0);
3861 
3862   MOZ_ASSERT(len->isConstant() && len->type() == MIRType::Int32 &&
3863              val->isConstant() && val->type() == MIRType::Int32);
3864 
3865   uint32_t length = len->toConstant()->toInt32();
3866   uint32_t value = val->toConstant()->toInt32();
3867   MOZ_ASSERT(length != 0 && length <= MaxInlineMemoryFillLength);
3868 
3869   // Compute the number of copies of each width we will need to do
3870   size_t remainder = length;
3871 #ifdef JS_64BIT
3872   size_t numCopies8 = remainder / sizeof(uint64_t);
3873   remainder %= sizeof(uint64_t);
3874 #endif
3875   size_t numCopies4 = remainder / sizeof(uint32_t);
3876   remainder %= sizeof(uint32_t);
3877   size_t numCopies2 = remainder / sizeof(uint16_t);
3878   remainder %= sizeof(uint16_t);
3879   size_t numCopies1 = remainder;
3880 
3881   // Generate splatted definitions for wider fills as needed
3882 #ifdef JS_64BIT
3883   MDefinition* val8 =
3884       numCopies8 ? f.constant(int64_t(SplatByteToUInt<uint64_t>(value, 8)))
3885                  : nullptr;
3886 #endif
3887   MDefinition* val4 =
3888       numCopies4 ? f.constant(Int32Value(SplatByteToUInt<uint32_t>(value, 4)),
3889                               MIRType::Int32)
3890                  : nullptr;
3891   MDefinition* val2 =
3892       numCopies2 ? f.constant(Int32Value(SplatByteToUInt<uint32_t>(value, 2)),
3893                               MIRType::Int32)
3894                  : nullptr;
3895 
3896   // Store the fill value to the destination from high to low. We will trap
3897   // without writing anything on the first store if any dest byte is
3898   // out-of-bounds.
3899   size_t offset = length;
3900 
3901   if (numCopies1) {
3902     offset -= sizeof(uint8_t);
3903 
3904     MemoryAccessDesc access(Scalar::Uint8, 1, offset, f.bytecodeOffset());
3905     f.store(start, &access, val);
3906   }
3907 
3908   if (numCopies2) {
3909     offset -= sizeof(uint16_t);
3910 
3911     MemoryAccessDesc access(Scalar::Uint16, 1, offset, f.bytecodeOffset());
3912     f.store(start, &access, val2);
3913   }
3914 
3915   for (uint32_t i = 0; i < numCopies4; i++) {
3916     offset -= sizeof(uint32_t);
3917 
3918     MemoryAccessDesc access(Scalar::Uint32, 1, offset, f.bytecodeOffset());
3919     f.store(start, &access, val4);
3920   }
3921 
3922 #ifdef JS_64BIT
3923   for (uint32_t i = 0; i < numCopies8; i++) {
3924     offset -= sizeof(uint64_t);
3925 
3926     MemoryAccessDesc access(Scalar::Int64, 1, offset, f.bytecodeOffset());
3927     f.store(start, &access, val8);
3928   }
3929 #endif
3930 
3931   return true;
3932 }
3933 
EmitMemFill(FunctionCompiler & f)3934 static bool EmitMemFill(FunctionCompiler& f) {
3935   MDefinition *start, *val, *len;
3936   if (!f.iter().readMemFill(&start, &val, &len)) {
3937     return false;
3938   }
3939 
3940   if (f.inDeadCode()) {
3941     return true;
3942   }
3943 
3944   if (MacroAssembler::SupportsFastUnalignedAccesses() && len->isConstant() &&
3945       len->type() == MIRType::Int32 && len->toConstant()->toInt32() != 0 &&
3946       uint32_t(len->toConstant()->toInt32()) <= MaxInlineMemoryFillLength &&
3947       val->isConstant() && val->type() == MIRType::Int32) {
3948     return EmitMemFillInline(f, start, val, len);
3949   }
3950   return EmitMemFillCall(f, start, val, len);
3951 }
3952 
EmitMemOrTableInit(FunctionCompiler & f,bool isMem)3953 static bool EmitMemOrTableInit(FunctionCompiler& f, bool isMem) {
3954   uint32_t segIndexVal = 0, dstTableIndex = 0;
3955   MDefinition *dstOff, *srcOff, *len;
3956   if (!f.iter().readMemOrTableInit(isMem, &segIndexVal, &dstTableIndex, &dstOff,
3957                                    &srcOff, &len)) {
3958     return false;
3959   }
3960 
3961   if (f.inDeadCode()) {
3962     return true;
3963   }
3964 
3965   uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
3966 
3967   const SymbolicAddressSignature& callee =
3968       isMem ? SASigMemInit32 : SASigTableInit;
3969   CallCompileState args;
3970   if (!f.passInstance(callee.argTypes[0], &args)) {
3971     return false;
3972   }
3973 
3974   if (!f.passArg(dstOff, callee.argTypes[1], &args)) {
3975     return false;
3976   }
3977   if (!f.passArg(srcOff, callee.argTypes[2], &args)) {
3978     return false;
3979   }
3980   if (!f.passArg(len, callee.argTypes[3], &args)) {
3981     return false;
3982   }
3983 
3984   MDefinition* segIndex =
3985       f.constant(Int32Value(int32_t(segIndexVal)), MIRType::Int32);
3986   if (!f.passArg(segIndex, callee.argTypes[4], &args)) {
3987     return false;
3988   }
3989   if (!isMem) {
3990     MDefinition* dti = f.constant(Int32Value(dstTableIndex), MIRType::Int32);
3991     if (!dti) {
3992       return false;
3993     }
3994     if (!f.passArg(dti, callee.argTypes[5], &args)) {
3995       return false;
3996     }
3997   }
3998   if (!f.finishCall(&args)) {
3999     return false;
4000   }
4001 
4002   return f.builtinInstanceMethodCall(callee, lineOrBytecode, args);
4003 }
4004 
4005 // Note, table.{get,grow,set} on table(funcref) are currently rejected by the
4006 // verifier.
4007 
EmitTableFill(FunctionCompiler & f)4008 static bool EmitTableFill(FunctionCompiler& f) {
4009   uint32_t tableIndex;
4010   MDefinition *start, *val, *len;
4011   if (!f.iter().readTableFill(&tableIndex, &start, &val, &len)) {
4012     return false;
4013   }
4014 
4015   if (f.inDeadCode()) {
4016     return true;
4017   }
4018 
4019   uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
4020 
4021   const SymbolicAddressSignature& callee = SASigTableFill;
4022   CallCompileState args;
4023   if (!f.passInstance(callee.argTypes[0], &args)) {
4024     return false;
4025   }
4026 
4027   if (!f.passArg(start, callee.argTypes[1], &args)) {
4028     return false;
4029   }
4030   if (!f.passArg(val, callee.argTypes[2], &args)) {
4031     return false;
4032   }
4033   if (!f.passArg(len, callee.argTypes[3], &args)) {
4034     return false;
4035   }
4036 
4037   MDefinition* tableIndexArg =
4038       f.constant(Int32Value(tableIndex), MIRType::Int32);
4039   if (!tableIndexArg) {
4040     return false;
4041   }
4042   if (!f.passArg(tableIndexArg, callee.argTypes[4], &args)) {
4043     return false;
4044   }
4045 
4046   if (!f.finishCall(&args)) {
4047     return false;
4048   }
4049 
4050   return f.builtinInstanceMethodCall(callee, lineOrBytecode, args);
4051 }
4052 
EmitTableGet(FunctionCompiler & f)4053 static bool EmitTableGet(FunctionCompiler& f) {
4054   uint32_t tableIndex;
4055   MDefinition* index;
4056   if (!f.iter().readTableGet(&tableIndex, &index)) {
4057     return false;
4058   }
4059 
4060   if (f.inDeadCode()) {
4061     return true;
4062   }
4063 
4064   uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
4065 
4066   const SymbolicAddressSignature& callee = SASigTableGet;
4067   CallCompileState args;
4068   if (!f.passInstance(callee.argTypes[0], &args)) {
4069     return false;
4070   }
4071 
4072   if (!f.passArg(index, callee.argTypes[1], &args)) {
4073     return false;
4074   }
4075 
4076   MDefinition* tableIndexArg =
4077       f.constant(Int32Value(tableIndex), MIRType::Int32);
4078   if (!tableIndexArg) {
4079     return false;
4080   }
4081   if (!f.passArg(tableIndexArg, callee.argTypes[2], &args)) {
4082     return false;
4083   }
4084 
4085   if (!f.finishCall(&args)) {
4086     return false;
4087   }
4088 
4089   // The return value here is either null, denoting an error, or a short-lived
4090   // pointer to a location containing a possibly-null ref.
4091   MDefinition* ret;
4092   if (!f.builtinInstanceMethodCall(callee, lineOrBytecode, args, &ret)) {
4093     return false;
4094   }
4095 
4096   f.iter().setResult(ret);
4097   return true;
4098 }
4099 
EmitTableGrow(FunctionCompiler & f)4100 static bool EmitTableGrow(FunctionCompiler& f) {
4101   uint32_t tableIndex;
4102   MDefinition* initValue;
4103   MDefinition* delta;
4104   if (!f.iter().readTableGrow(&tableIndex, &initValue, &delta)) {
4105     return false;
4106   }
4107 
4108   if (f.inDeadCode()) {
4109     return true;
4110   }
4111 
4112   uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
4113 
4114   const SymbolicAddressSignature& callee = SASigTableGrow;
4115   CallCompileState args;
4116   if (!f.passInstance(callee.argTypes[0], &args)) {
4117     return false;
4118   }
4119 
4120   if (!f.passArg(initValue, callee.argTypes[1], &args)) {
4121     return false;
4122   }
4123 
4124   if (!f.passArg(delta, callee.argTypes[2], &args)) {
4125     return false;
4126   }
4127 
4128   MDefinition* tableIndexArg =
4129       f.constant(Int32Value(tableIndex), MIRType::Int32);
4130   if (!tableIndexArg) {
4131     return false;
4132   }
4133   if (!f.passArg(tableIndexArg, callee.argTypes[3], &args)) {
4134     return false;
4135   }
4136 
4137   if (!f.finishCall(&args)) {
4138     return false;
4139   }
4140 
4141   MDefinition* ret;
4142   if (!f.builtinInstanceMethodCall(callee, lineOrBytecode, args, &ret)) {
4143     return false;
4144   }
4145 
4146   f.iter().setResult(ret);
4147   return true;
4148 }
4149 
EmitTableSet(FunctionCompiler & f)4150 static bool EmitTableSet(FunctionCompiler& f) {
4151   uint32_t tableIndex;
4152   MDefinition* index;
4153   MDefinition* value;
4154   if (!f.iter().readTableSet(&tableIndex, &index, &value)) {
4155     return false;
4156   }
4157 
4158   if (f.inDeadCode()) {
4159     return true;
4160   }
4161 
4162   uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
4163 
4164   const SymbolicAddressSignature& callee = SASigTableSet;
4165   CallCompileState args;
4166   if (!f.passInstance(callee.argTypes[0], &args)) {
4167     return false;
4168   }
4169 
4170   if (!f.passArg(index, callee.argTypes[1], &args)) {
4171     return false;
4172   }
4173 
4174   if (!f.passArg(value, callee.argTypes[2], &args)) {
4175     return false;
4176   }
4177 
4178   MDefinition* tableIndexArg =
4179       f.constant(Int32Value(tableIndex), MIRType::Int32);
4180   if (!tableIndexArg) {
4181     return false;
4182   }
4183   if (!f.passArg(tableIndexArg, callee.argTypes[3], &args)) {
4184     return false;
4185   }
4186 
4187   if (!f.finishCall(&args)) {
4188     return false;
4189   }
4190 
4191   return f.builtinInstanceMethodCall(callee, lineOrBytecode, args);
4192 }
4193 
EmitTableSize(FunctionCompiler & f)4194 static bool EmitTableSize(FunctionCompiler& f) {
4195   uint32_t tableIndex;
4196   if (!f.iter().readTableSize(&tableIndex)) {
4197     return false;
4198   }
4199 
4200   if (f.inDeadCode()) {
4201     return true;
4202   }
4203 
4204   uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
4205 
4206   const SymbolicAddressSignature& callee = SASigTableSize;
4207   CallCompileState args;
4208   if (!f.passInstance(callee.argTypes[0], &args)) {
4209     return false;
4210   }
4211 
4212   MDefinition* tableIndexArg =
4213       f.constant(Int32Value(tableIndex), MIRType::Int32);
4214   if (!tableIndexArg) {
4215     return false;
4216   }
4217   if (!f.passArg(tableIndexArg, callee.argTypes[1], &args)) {
4218     return false;
4219   }
4220 
4221   if (!f.finishCall(&args)) {
4222     return false;
4223   }
4224 
4225   MDefinition* ret;
4226   if (!f.builtinInstanceMethodCall(callee, lineOrBytecode, args, &ret)) {
4227     return false;
4228   }
4229 
4230   f.iter().setResult(ret);
4231   return true;
4232 }
4233 
EmitRefFunc(FunctionCompiler & f)4234 static bool EmitRefFunc(FunctionCompiler& f) {
4235   uint32_t funcIndex;
4236   if (!f.iter().readRefFunc(&funcIndex)) {
4237     return false;
4238   }
4239 
4240   if (f.inDeadCode()) {
4241     return true;
4242   }
4243 
4244   uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
4245 
4246   const SymbolicAddressSignature& callee = SASigRefFunc;
4247   CallCompileState args;
4248   if (!f.passInstance(callee.argTypes[0], &args)) {
4249     return false;
4250   }
4251 
4252   MDefinition* funcIndexArg = f.constant(Int32Value(funcIndex), MIRType::Int32);
4253   if (!funcIndexArg) {
4254     return false;
4255   }
4256   if (!f.passArg(funcIndexArg, callee.argTypes[1], &args)) {
4257     return false;
4258   }
4259 
4260   if (!f.finishCall(&args)) {
4261     return false;
4262   }
4263 
4264   // The return value here is either null, denoting an error, or a short-lived
4265   // pointer to a location containing a possibly-null ref.
4266   MDefinition* ret;
4267   if (!f.builtinInstanceMethodCall(callee, lineOrBytecode, args, &ret)) {
4268     return false;
4269   }
4270 
4271   f.iter().setResult(ret);
4272   return true;
4273 }
4274 
EmitRefNull(FunctionCompiler & f)4275 static bool EmitRefNull(FunctionCompiler& f) {
4276   RefType type;
4277   if (!f.iter().readRefNull(&type)) {
4278     return false;
4279   }
4280 
4281   if (f.inDeadCode()) {
4282     return true;
4283   }
4284 
4285   MDefinition* nullVal = f.nullRefConstant();
4286   if (!nullVal) {
4287     return false;
4288   }
4289   f.iter().setResult(nullVal);
4290   return true;
4291 }
4292 
EmitRefIsNull(FunctionCompiler & f)4293 static bool EmitRefIsNull(FunctionCompiler& f) {
4294   MDefinition* input;
4295   if (!f.iter().readRefIsNull(&input)) {
4296     return false;
4297   }
4298 
4299   if (f.inDeadCode()) {
4300     return true;
4301   }
4302 
4303   MDefinition* nullVal = f.nullRefConstant();
4304   if (!nullVal) {
4305     return false;
4306   }
4307   f.iter().setResult(
4308       f.compare(input, nullVal, JSOp::Eq, MCompare::Compare_RefOrNull));
4309   return true;
4310 }
4311 
4312 #ifdef ENABLE_WASM_SIMD
EmitConstSimd128(FunctionCompiler & f)4313 static bool EmitConstSimd128(FunctionCompiler& f) {
4314   V128 v128;
4315   if (!f.iter().readV128Const(&v128)) {
4316     return false;
4317   }
4318 
4319   f.iter().setResult(f.constant(v128));
4320   return true;
4321 }
4322 
EmitBinarySimd128(FunctionCompiler & f,bool commutative,SimdOp op)4323 static bool EmitBinarySimd128(FunctionCompiler& f, bool commutative,
4324                               SimdOp op) {
4325   MDefinition* lhs;
4326   MDefinition* rhs;
4327   if (!f.iter().readBinary(ValType::V128, &lhs, &rhs)) {
4328     return false;
4329   }
4330 
4331   f.iter().setResult(f.binarySimd128(lhs, rhs, commutative, op));
4332   return true;
4333 }
4334 
EmitShiftSimd128(FunctionCompiler & f,SimdOp op)4335 static bool EmitShiftSimd128(FunctionCompiler& f, SimdOp op) {
4336   MDefinition* lhs;
4337   MDefinition* rhs;
4338   if (!f.iter().readVectorShift(&lhs, &rhs)) {
4339     return false;
4340   }
4341 
4342   f.iter().setResult(f.shiftSimd128(lhs, rhs, op));
4343   return true;
4344 }
4345 
EmitSplatSimd128(FunctionCompiler & f,ValType inType,SimdOp op)4346 static bool EmitSplatSimd128(FunctionCompiler& f, ValType inType, SimdOp op) {
4347   MDefinition* src;
4348   if (!f.iter().readConversion(inType, ValType::V128, &src)) {
4349     return false;
4350   }
4351 
4352   f.iter().setResult(f.scalarToSimd128(src, op));
4353   return true;
4354 }
4355 
EmitUnarySimd128(FunctionCompiler & f,SimdOp op)4356 static bool EmitUnarySimd128(FunctionCompiler& f, SimdOp op) {
4357   MDefinition* src;
4358   if (!f.iter().readUnary(ValType::V128, &src)) {
4359     return false;
4360   }
4361 
4362   f.iter().setResult(f.unarySimd128(src, op));
4363   return true;
4364 }
4365 
EmitReduceSimd128(FunctionCompiler & f,SimdOp op)4366 static bool EmitReduceSimd128(FunctionCompiler& f, SimdOp op) {
4367   MDefinition* src;
4368   if (!f.iter().readConversion(ValType::V128, ValType::I32, &src)) {
4369     return false;
4370   }
4371 
4372   f.iter().setResult(f.reduceSimd128(src, op, ValType::I32));
4373   return true;
4374 }
4375 
EmitExtractLaneSimd128(FunctionCompiler & f,ValType outType,uint32_t laneLimit,SimdOp op)4376 static bool EmitExtractLaneSimd128(FunctionCompiler& f, ValType outType,
4377                                    uint32_t laneLimit, SimdOp op) {
4378   uint32_t laneIndex;
4379   MDefinition* src;
4380   if (!f.iter().readExtractLane(outType, laneLimit, &laneIndex, &src)) {
4381     return false;
4382   }
4383 
4384   f.iter().setResult(f.reduceSimd128(src, op, outType, laneIndex));
4385   return true;
4386 }
4387 
EmitReplaceLaneSimd128(FunctionCompiler & f,ValType laneType,uint32_t laneLimit,SimdOp op)4388 static bool EmitReplaceLaneSimd128(FunctionCompiler& f, ValType laneType,
4389                                    uint32_t laneLimit, SimdOp op) {
4390   uint32_t laneIndex;
4391   MDefinition* lhs;
4392   MDefinition* rhs;
4393   if (!f.iter().readReplaceLane(laneType, laneLimit, &laneIndex, &lhs, &rhs)) {
4394     return false;
4395   }
4396 
4397   f.iter().setResult(f.replaceLaneSimd128(lhs, rhs, laneIndex, op));
4398   return true;
4399 }
4400 
EmitBitselectSimd128(FunctionCompiler & f)4401 static bool EmitBitselectSimd128(FunctionCompiler& f) {
4402   MDefinition* v1;
4403   MDefinition* v2;
4404   MDefinition* control;
4405   if (!f.iter().readVectorSelect(&v1, &v2, &control)) {
4406     return false;
4407   }
4408 
4409   f.iter().setResult(f.bitselectSimd128(v1, v2, control));
4410   return true;
4411 }
4412 
EmitShuffleSimd128(FunctionCompiler & f)4413 static bool EmitShuffleSimd128(FunctionCompiler& f) {
4414   MDefinition* v1;
4415   MDefinition* v2;
4416   V128 control;
4417   if (!f.iter().readVectorShuffle(&v1, &v2, &control)) {
4418     return false;
4419   }
4420 
4421 #  ifdef ENABLE_WASM_SIMD_WORMHOLE
4422   if (f.moduleEnv().simdWormholeEnabled() && IsWormholeTrigger(control)) {
4423     switch (control.bytes[15]) {
4424       case 0:
4425         f.iter().setResult(
4426             f.binarySimd128(v1, v2, false, wasm::SimdOp::MozWHSELFTEST));
4427         return true;
4428 #    if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
4429       case 1:
4430         f.iter().setResult(
4431             f.binarySimd128(v1, v2, false, wasm::SimdOp::MozWHPMADDUBSW));
4432         return true;
4433       case 2:
4434         f.iter().setResult(
4435             f.binarySimd128(v1, v2, false, wasm::SimdOp::MozWHPMADDWD));
4436         return true;
4437 #    endif
4438       default:
4439         return f.iter().fail("Unrecognized wormhole opcode");
4440     }
4441   }
4442 #  endif
4443 
4444   f.iter().setResult(f.shuffleSimd128(v1, v2, control));
4445   return true;
4446 }
4447 
EmitLoadSplatSimd128(FunctionCompiler & f,Scalar::Type viewType,wasm::SimdOp splatOp)4448 static bool EmitLoadSplatSimd128(FunctionCompiler& f, Scalar::Type viewType,
4449                                  wasm::SimdOp splatOp) {
4450   LinearMemoryAddress<MDefinition*> addr;
4451   if (!f.iter().readLoadSplat(Scalar::byteSize(viewType), &addr)) {
4452     return false;
4453   }
4454 
4455   f.iter().setResult(f.loadSplatSimd128(viewType, addr, splatOp));
4456   return true;
4457 }
4458 
EmitLoadExtendSimd128(FunctionCompiler & f,wasm::SimdOp op)4459 static bool EmitLoadExtendSimd128(FunctionCompiler& f, wasm::SimdOp op) {
4460   LinearMemoryAddress<MDefinition*> addr;
4461   if (!f.iter().readLoadExtend(&addr)) {
4462     return false;
4463   }
4464 
4465   f.iter().setResult(f.loadExtendSimd128(addr, op));
4466   return true;
4467 }
4468 
EmitLoadZeroSimd128(FunctionCompiler & f,Scalar::Type viewType,size_t numBytes)4469 static bool EmitLoadZeroSimd128(FunctionCompiler& f, Scalar::Type viewType,
4470                                 size_t numBytes) {
4471   LinearMemoryAddress<MDefinition*> addr;
4472   if (!f.iter().readLoadSplat(numBytes, &addr)) {
4473     return false;
4474   }
4475 
4476   f.iter().setResult(f.loadZeroSimd128(viewType, numBytes, addr));
4477   return true;
4478 }
4479 
EmitLoadLaneSimd128(FunctionCompiler & f,uint32_t laneSize)4480 static bool EmitLoadLaneSimd128(FunctionCompiler& f, uint32_t laneSize) {
4481   uint32_t laneIndex;
4482   MDefinition* src;
4483   LinearMemoryAddress<MDefinition*> addr;
4484   if (!f.iter().readLoadLane(laneSize, &addr, &laneIndex, &src)) {
4485     return false;
4486   }
4487 
4488   f.iter().setResult(f.loadLaneSimd128(laneSize, addr, laneIndex, src));
4489   return true;
4490 }
4491 
EmitStoreLaneSimd128(FunctionCompiler & f,uint32_t laneSize)4492 static bool EmitStoreLaneSimd128(FunctionCompiler& f, uint32_t laneSize) {
4493   uint32_t laneIndex;
4494   MDefinition* src;
4495   LinearMemoryAddress<MDefinition*> addr;
4496   if (!f.iter().readStoreLane(laneSize, &addr, &laneIndex, &src)) {
4497     return false;
4498   }
4499 
4500   f.storeLaneSimd128(laneSize, addr, laneIndex, src);
4501   return true;
4502 }
4503 #endif
4504 
EmitBodyExprs(FunctionCompiler & f)4505 static bool EmitBodyExprs(FunctionCompiler& f) {
4506   if (!f.iter().startFunction(f.funcIndex())) {
4507     return false;
4508   }
4509 
4510 #define CHECK(c)          \
4511   if (!(c)) return false; \
4512   break
4513 
4514   while (true) {
4515     if (!f.mirGen().ensureBallast()) {
4516       return false;
4517     }
4518 
4519     OpBytes op;
4520     if (!f.iter().readOp(&op)) {
4521       return false;
4522     }
4523 
4524     switch (op.b0) {
4525       case uint16_t(Op::End):
4526         if (!EmitEnd(f)) {
4527           return false;
4528         }
4529         if (f.iter().controlStackEmpty()) {
4530           return true;
4531         }
4532         break;
4533 
4534       // Control opcodes
4535       case uint16_t(Op::Unreachable):
4536         CHECK(EmitUnreachable(f));
4537       case uint16_t(Op::Nop):
4538         CHECK(f.iter().readNop());
4539       case uint16_t(Op::Block):
4540         CHECK(EmitBlock(f));
4541       case uint16_t(Op::Loop):
4542         CHECK(EmitLoop(f));
4543       case uint16_t(Op::If):
4544         CHECK(EmitIf(f));
4545       case uint16_t(Op::Else):
4546         CHECK(EmitElse(f));
4547 #ifdef ENABLE_WASM_EXCEPTIONS
4548       case uint16_t(Op::Try):
4549         if (!f.moduleEnv().exceptionsEnabled()) {
4550           return f.iter().unrecognizedOpcode(&op);
4551         }
4552         CHECK(EmitTry(f));
4553       case uint16_t(Op::Catch):
4554         if (!f.moduleEnv().exceptionsEnabled()) {
4555           return f.iter().unrecognizedOpcode(&op);
4556         }
4557         CHECK(EmitCatch(f));
4558       case uint16_t(Op::CatchAll):
4559         if (!f.moduleEnv().exceptionsEnabled()) {
4560           return f.iter().unrecognizedOpcode(&op);
4561         }
4562         CHECK(EmitCatchAll(f));
4563       case uint16_t(Op::Delegate):
4564         if (!f.moduleEnv().exceptionsEnabled()) {
4565           return f.iter().unrecognizedOpcode(&op);
4566         }
4567         if (!EmitDelegate(f)) {
4568           return false;
4569         }
4570         break;
4571       case uint16_t(Op::Throw):
4572         if (!f.moduleEnv().exceptionsEnabled()) {
4573           return f.iter().unrecognizedOpcode(&op);
4574         }
4575         CHECK(EmitThrow(f));
4576       case uint16_t(Op::Rethrow):
4577         if (!f.moduleEnv().exceptionsEnabled()) {
4578           return f.iter().unrecognizedOpcode(&op);
4579         }
4580         CHECK(EmitRethrow(f));
4581 #endif
4582       case uint16_t(Op::Br):
4583         CHECK(EmitBr(f));
4584       case uint16_t(Op::BrIf):
4585         CHECK(EmitBrIf(f));
4586       case uint16_t(Op::BrTable):
4587         CHECK(EmitBrTable(f));
4588       case uint16_t(Op::Return):
4589         CHECK(EmitReturn(f));
4590 
4591       // Calls
4592       case uint16_t(Op::Call):
4593         CHECK(EmitCall(f, /* asmJSFuncDef = */ false));
4594       case uint16_t(Op::CallIndirect):
4595         CHECK(EmitCallIndirect(f, /* oldStyle = */ false));
4596 
4597       // Parametric operators
4598       case uint16_t(Op::Drop):
4599         CHECK(f.iter().readDrop());
4600       case uint16_t(Op::SelectNumeric):
4601         CHECK(EmitSelect(f, /*typed*/ false));
4602       case uint16_t(Op::SelectTyped):
4603         CHECK(EmitSelect(f, /*typed*/ true));
4604 
4605       // Locals and globals
4606       case uint16_t(Op::GetLocal):
4607         CHECK(EmitGetLocal(f));
4608       case uint16_t(Op::SetLocal):
4609         CHECK(EmitSetLocal(f));
4610       case uint16_t(Op::TeeLocal):
4611         CHECK(EmitTeeLocal(f));
4612       case uint16_t(Op::GetGlobal):
4613         CHECK(EmitGetGlobal(f));
4614       case uint16_t(Op::SetGlobal):
4615         CHECK(EmitSetGlobal(f));
4616       case uint16_t(Op::TableGet):
4617         CHECK(EmitTableGet(f));
4618       case uint16_t(Op::TableSet):
4619         CHECK(EmitTableSet(f));
4620 
4621       // Memory-related operators
4622       case uint16_t(Op::I32Load):
4623         CHECK(EmitLoad(f, ValType::I32, Scalar::Int32));
4624       case uint16_t(Op::I64Load):
4625         CHECK(EmitLoad(f, ValType::I64, Scalar::Int64));
4626       case uint16_t(Op::F32Load):
4627         CHECK(EmitLoad(f, ValType::F32, Scalar::Float32));
4628       case uint16_t(Op::F64Load):
4629         CHECK(EmitLoad(f, ValType::F64, Scalar::Float64));
4630       case uint16_t(Op::I32Load8S):
4631         CHECK(EmitLoad(f, ValType::I32, Scalar::Int8));
4632       case uint16_t(Op::I32Load8U):
4633         CHECK(EmitLoad(f, ValType::I32, Scalar::Uint8));
4634       case uint16_t(Op::I32Load16S):
4635         CHECK(EmitLoad(f, ValType::I32, Scalar::Int16));
4636       case uint16_t(Op::I32Load16U):
4637         CHECK(EmitLoad(f, ValType::I32, Scalar::Uint16));
4638       case uint16_t(Op::I64Load8S):
4639         CHECK(EmitLoad(f, ValType::I64, Scalar::Int8));
4640       case uint16_t(Op::I64Load8U):
4641         CHECK(EmitLoad(f, ValType::I64, Scalar::Uint8));
4642       case uint16_t(Op::I64Load16S):
4643         CHECK(EmitLoad(f, ValType::I64, Scalar::Int16));
4644       case uint16_t(Op::I64Load16U):
4645         CHECK(EmitLoad(f, ValType::I64, Scalar::Uint16));
4646       case uint16_t(Op::I64Load32S):
4647         CHECK(EmitLoad(f, ValType::I64, Scalar::Int32));
4648       case uint16_t(Op::I64Load32U):
4649         CHECK(EmitLoad(f, ValType::I64, Scalar::Uint32));
4650       case uint16_t(Op::I32Store):
4651         CHECK(EmitStore(f, ValType::I32, Scalar::Int32));
4652       case uint16_t(Op::I64Store):
4653         CHECK(EmitStore(f, ValType::I64, Scalar::Int64));
4654       case uint16_t(Op::F32Store):
4655         CHECK(EmitStore(f, ValType::F32, Scalar::Float32));
4656       case uint16_t(Op::F64Store):
4657         CHECK(EmitStore(f, ValType::F64, Scalar::Float64));
4658       case uint16_t(Op::I32Store8):
4659         CHECK(EmitStore(f, ValType::I32, Scalar::Int8));
4660       case uint16_t(Op::I32Store16):
4661         CHECK(EmitStore(f, ValType::I32, Scalar::Int16));
4662       case uint16_t(Op::I64Store8):
4663         CHECK(EmitStore(f, ValType::I64, Scalar::Int8));
4664       case uint16_t(Op::I64Store16):
4665         CHECK(EmitStore(f, ValType::I64, Scalar::Int16));
4666       case uint16_t(Op::I64Store32):
4667         CHECK(EmitStore(f, ValType::I64, Scalar::Int32));
4668       case uint16_t(Op::MemorySize):
4669         CHECK(EmitMemorySize(f));
4670       case uint16_t(Op::MemoryGrow):
4671         CHECK(EmitMemoryGrow(f));
4672 
4673       // Constants
4674       case uint16_t(Op::I32Const):
4675         CHECK(EmitI32Const(f));
4676       case uint16_t(Op::I64Const):
4677         CHECK(EmitI64Const(f));
4678       case uint16_t(Op::F32Const):
4679         CHECK(EmitF32Const(f));
4680       case uint16_t(Op::F64Const):
4681         CHECK(EmitF64Const(f));
4682 
4683       // Comparison operators
4684       case uint16_t(Op::I32Eqz):
4685         CHECK(EmitConversion<MNot>(f, ValType::I32, ValType::I32));
4686       case uint16_t(Op::I32Eq):
4687         CHECK(
4688             EmitComparison(f, ValType::I32, JSOp::Eq, MCompare::Compare_Int32));
4689       case uint16_t(Op::I32Ne):
4690         CHECK(
4691             EmitComparison(f, ValType::I32, JSOp::Ne, MCompare::Compare_Int32));
4692       case uint16_t(Op::I32LtS):
4693         CHECK(
4694             EmitComparison(f, ValType::I32, JSOp::Lt, MCompare::Compare_Int32));
4695       case uint16_t(Op::I32LtU):
4696         CHECK(EmitComparison(f, ValType::I32, JSOp::Lt,
4697                              MCompare::Compare_UInt32));
4698       case uint16_t(Op::I32GtS):
4699         CHECK(
4700             EmitComparison(f, ValType::I32, JSOp::Gt, MCompare::Compare_Int32));
4701       case uint16_t(Op::I32GtU):
4702         CHECK(EmitComparison(f, ValType::I32, JSOp::Gt,
4703                              MCompare::Compare_UInt32));
4704       case uint16_t(Op::I32LeS):
4705         CHECK(
4706             EmitComparison(f, ValType::I32, JSOp::Le, MCompare::Compare_Int32));
4707       case uint16_t(Op::I32LeU):
4708         CHECK(EmitComparison(f, ValType::I32, JSOp::Le,
4709                              MCompare::Compare_UInt32));
4710       case uint16_t(Op::I32GeS):
4711         CHECK(
4712             EmitComparison(f, ValType::I32, JSOp::Ge, MCompare::Compare_Int32));
4713       case uint16_t(Op::I32GeU):
4714         CHECK(EmitComparison(f, ValType::I32, JSOp::Ge,
4715                              MCompare::Compare_UInt32));
4716       case uint16_t(Op::I64Eqz):
4717         CHECK(EmitConversion<MNot>(f, ValType::I64, ValType::I32));
4718       case uint16_t(Op::I64Eq):
4719         CHECK(
4720             EmitComparison(f, ValType::I64, JSOp::Eq, MCompare::Compare_Int64));
4721       case uint16_t(Op::I64Ne):
4722         CHECK(
4723             EmitComparison(f, ValType::I64, JSOp::Ne, MCompare::Compare_Int64));
4724       case uint16_t(Op::I64LtS):
4725         CHECK(
4726             EmitComparison(f, ValType::I64, JSOp::Lt, MCompare::Compare_Int64));
4727       case uint16_t(Op::I64LtU):
4728         CHECK(EmitComparison(f, ValType::I64, JSOp::Lt,
4729                              MCompare::Compare_UInt64));
4730       case uint16_t(Op::I64GtS):
4731         CHECK(
4732             EmitComparison(f, ValType::I64, JSOp::Gt, MCompare::Compare_Int64));
4733       case uint16_t(Op::I64GtU):
4734         CHECK(EmitComparison(f, ValType::I64, JSOp::Gt,
4735                              MCompare::Compare_UInt64));
4736       case uint16_t(Op::I64LeS):
4737         CHECK(
4738             EmitComparison(f, ValType::I64, JSOp::Le, MCompare::Compare_Int64));
4739       case uint16_t(Op::I64LeU):
4740         CHECK(EmitComparison(f, ValType::I64, JSOp::Le,
4741                              MCompare::Compare_UInt64));
4742       case uint16_t(Op::I64GeS):
4743         CHECK(
4744             EmitComparison(f, ValType::I64, JSOp::Ge, MCompare::Compare_Int64));
4745       case uint16_t(Op::I64GeU):
4746         CHECK(EmitComparison(f, ValType::I64, JSOp::Ge,
4747                              MCompare::Compare_UInt64));
4748       case uint16_t(Op::F32Eq):
4749         CHECK(EmitComparison(f, ValType::F32, JSOp::Eq,
4750                              MCompare::Compare_Float32));
4751       case uint16_t(Op::F32Ne):
4752         CHECK(EmitComparison(f, ValType::F32, JSOp::Ne,
4753                              MCompare::Compare_Float32));
4754       case uint16_t(Op::F32Lt):
4755         CHECK(EmitComparison(f, ValType::F32, JSOp::Lt,
4756                              MCompare::Compare_Float32));
4757       case uint16_t(Op::F32Gt):
4758         CHECK(EmitComparison(f, ValType::F32, JSOp::Gt,
4759                              MCompare::Compare_Float32));
4760       case uint16_t(Op::F32Le):
4761         CHECK(EmitComparison(f, ValType::F32, JSOp::Le,
4762                              MCompare::Compare_Float32));
4763       case uint16_t(Op::F32Ge):
4764         CHECK(EmitComparison(f, ValType::F32, JSOp::Ge,
4765                              MCompare::Compare_Float32));
4766       case uint16_t(Op::F64Eq):
4767         CHECK(EmitComparison(f, ValType::F64, JSOp::Eq,
4768                              MCompare::Compare_Double));
4769       case uint16_t(Op::F64Ne):
4770         CHECK(EmitComparison(f, ValType::F64, JSOp::Ne,
4771                              MCompare::Compare_Double));
4772       case uint16_t(Op::F64Lt):
4773         CHECK(EmitComparison(f, ValType::F64, JSOp::Lt,
4774                              MCompare::Compare_Double));
4775       case uint16_t(Op::F64Gt):
4776         CHECK(EmitComparison(f, ValType::F64, JSOp::Gt,
4777                              MCompare::Compare_Double));
4778       case uint16_t(Op::F64Le):
4779         CHECK(EmitComparison(f, ValType::F64, JSOp::Le,
4780                              MCompare::Compare_Double));
4781       case uint16_t(Op::F64Ge):
4782         CHECK(EmitComparison(f, ValType::F64, JSOp::Ge,
4783                              MCompare::Compare_Double));
4784 
4785       // Numeric operators
4786       case uint16_t(Op::I32Clz):
4787         CHECK(EmitUnaryWithType<MClz>(f, ValType::I32, MIRType::Int32));
4788       case uint16_t(Op::I32Ctz):
4789         CHECK(EmitUnaryWithType<MCtz>(f, ValType::I32, MIRType::Int32));
4790       case uint16_t(Op::I32Popcnt):
4791         CHECK(EmitUnaryWithType<MPopcnt>(f, ValType::I32, MIRType::Int32));
4792       case uint16_t(Op::I32Add):
4793         CHECK(EmitAdd(f, ValType::I32, MIRType::Int32));
4794       case uint16_t(Op::I32Sub):
4795         CHECK(EmitSub(f, ValType::I32, MIRType::Int32));
4796       case uint16_t(Op::I32Mul):
4797         CHECK(EmitMul(f, ValType::I32, MIRType::Int32));
4798       case uint16_t(Op::I32DivS):
4799       case uint16_t(Op::I32DivU):
4800         CHECK(
4801             EmitDiv(f, ValType::I32, MIRType::Int32, Op(op.b0) == Op::I32DivU));
4802       case uint16_t(Op::I32RemS):
4803       case uint16_t(Op::I32RemU):
4804         CHECK(
4805             EmitRem(f, ValType::I32, MIRType::Int32, Op(op.b0) == Op::I32RemU));
4806       case uint16_t(Op::I32And):
4807         CHECK(EmitBitwise<MBitAnd>(f, ValType::I32, MIRType::Int32));
4808       case uint16_t(Op::I32Or):
4809         CHECK(EmitBitwise<MBitOr>(f, ValType::I32, MIRType::Int32));
4810       case uint16_t(Op::I32Xor):
4811         CHECK(EmitBitwise<MBitXor>(f, ValType::I32, MIRType::Int32));
4812       case uint16_t(Op::I32Shl):
4813         CHECK(EmitBitwise<MLsh>(f, ValType::I32, MIRType::Int32));
4814       case uint16_t(Op::I32ShrS):
4815         CHECK(EmitBitwise<MRsh>(f, ValType::I32, MIRType::Int32));
4816       case uint16_t(Op::I32ShrU):
4817         CHECK(EmitUrsh(f, ValType::I32, MIRType::Int32));
4818       case uint16_t(Op::I32Rotl):
4819       case uint16_t(Op::I32Rotr):
4820         CHECK(EmitRotate(f, ValType::I32, Op(op.b0) == Op::I32Rotl));
4821       case uint16_t(Op::I64Clz):
4822         CHECK(EmitUnaryWithType<MClz>(f, ValType::I64, MIRType::Int64));
4823       case uint16_t(Op::I64Ctz):
4824         CHECK(EmitUnaryWithType<MCtz>(f, ValType::I64, MIRType::Int64));
4825       case uint16_t(Op::I64Popcnt):
4826         CHECK(EmitUnaryWithType<MPopcnt>(f, ValType::I64, MIRType::Int64));
4827       case uint16_t(Op::I64Add):
4828         CHECK(EmitAdd(f, ValType::I64, MIRType::Int64));
4829       case uint16_t(Op::I64Sub):
4830         CHECK(EmitSub(f, ValType::I64, MIRType::Int64));
4831       case uint16_t(Op::I64Mul):
4832         CHECK(EmitMul(f, ValType::I64, MIRType::Int64));
4833       case uint16_t(Op::I64DivS):
4834       case uint16_t(Op::I64DivU):
4835         CHECK(
4836             EmitDiv(f, ValType::I64, MIRType::Int64, Op(op.b0) == Op::I64DivU));
4837       case uint16_t(Op::I64RemS):
4838       case uint16_t(Op::I64RemU):
4839         CHECK(
4840             EmitRem(f, ValType::I64, MIRType::Int64, Op(op.b0) == Op::I64RemU));
4841       case uint16_t(Op::I64And):
4842         CHECK(EmitBitwise<MBitAnd>(f, ValType::I64, MIRType::Int64));
4843       case uint16_t(Op::I64Or):
4844         CHECK(EmitBitwise<MBitOr>(f, ValType::I64, MIRType::Int64));
4845       case uint16_t(Op::I64Xor):
4846         CHECK(EmitBitwise<MBitXor>(f, ValType::I64, MIRType::Int64));
4847       case uint16_t(Op::I64Shl):
4848         CHECK(EmitBitwise<MLsh>(f, ValType::I64, MIRType::Int64));
4849       case uint16_t(Op::I64ShrS):
4850         CHECK(EmitBitwise<MRsh>(f, ValType::I64, MIRType::Int64));
4851       case uint16_t(Op::I64ShrU):
4852         CHECK(EmitUrsh(f, ValType::I64, MIRType::Int64));
4853       case uint16_t(Op::I64Rotl):
4854       case uint16_t(Op::I64Rotr):
4855         CHECK(EmitRotate(f, ValType::I64, Op(op.b0) == Op::I64Rotl));
4856       case uint16_t(Op::F32Abs):
4857         CHECK(EmitUnaryWithType<MAbs>(f, ValType::F32, MIRType::Float32));
4858       case uint16_t(Op::F32Neg):
4859         CHECK(EmitUnaryWithType<MWasmNeg>(f, ValType::F32, MIRType::Float32));
4860       case uint16_t(Op::F32Ceil):
4861         CHECK(EmitUnaryMathBuiltinCall(f, SASigCeilF));
4862       case uint16_t(Op::F32Floor):
4863         CHECK(EmitUnaryMathBuiltinCall(f, SASigFloorF));
4864       case uint16_t(Op::F32Trunc):
4865         CHECK(EmitUnaryMathBuiltinCall(f, SASigTruncF));
4866       case uint16_t(Op::F32Nearest):
4867         CHECK(EmitUnaryMathBuiltinCall(f, SASigNearbyIntF));
4868       case uint16_t(Op::F32Sqrt):
4869         CHECK(EmitUnaryWithType<MSqrt>(f, ValType::F32, MIRType::Float32));
4870       case uint16_t(Op::F32Add):
4871         CHECK(EmitAdd(f, ValType::F32, MIRType::Float32));
4872       case uint16_t(Op::F32Sub):
4873         CHECK(EmitSub(f, ValType::F32, MIRType::Float32));
4874       case uint16_t(Op::F32Mul):
4875         CHECK(EmitMul(f, ValType::F32, MIRType::Float32));
4876       case uint16_t(Op::F32Div):
4877         CHECK(EmitDiv(f, ValType::F32, MIRType::Float32,
4878                       /* isUnsigned = */ false));
4879       case uint16_t(Op::F32Min):
4880       case uint16_t(Op::F32Max):
4881         CHECK(EmitMinMax(f, ValType::F32, MIRType::Float32,
4882                          Op(op.b0) == Op::F32Max));
4883       case uint16_t(Op::F32CopySign):
4884         CHECK(EmitCopySign(f, ValType::F32));
4885       case uint16_t(Op::F64Abs):
4886         CHECK(EmitUnaryWithType<MAbs>(f, ValType::F64, MIRType::Double));
4887       case uint16_t(Op::F64Neg):
4888         CHECK(EmitUnaryWithType<MWasmNeg>(f, ValType::F64, MIRType::Double));
4889       case uint16_t(Op::F64Ceil):
4890         CHECK(EmitUnaryMathBuiltinCall(f, SASigCeilD));
4891       case uint16_t(Op::F64Floor):
4892         CHECK(EmitUnaryMathBuiltinCall(f, SASigFloorD));
4893       case uint16_t(Op::F64Trunc):
4894         CHECK(EmitUnaryMathBuiltinCall(f, SASigTruncD));
4895       case uint16_t(Op::F64Nearest):
4896         CHECK(EmitUnaryMathBuiltinCall(f, SASigNearbyIntD));
4897       case uint16_t(Op::F64Sqrt):
4898         CHECK(EmitUnaryWithType<MSqrt>(f, ValType::F64, MIRType::Double));
4899       case uint16_t(Op::F64Add):
4900         CHECK(EmitAdd(f, ValType::F64, MIRType::Double));
4901       case uint16_t(Op::F64Sub):
4902         CHECK(EmitSub(f, ValType::F64, MIRType::Double));
4903       case uint16_t(Op::F64Mul):
4904         CHECK(EmitMul(f, ValType::F64, MIRType::Double));
4905       case uint16_t(Op::F64Div):
4906         CHECK(EmitDiv(f, ValType::F64, MIRType::Double,
4907                       /* isUnsigned = */ false));
4908       case uint16_t(Op::F64Min):
4909       case uint16_t(Op::F64Max):
4910         CHECK(EmitMinMax(f, ValType::F64, MIRType::Double,
4911                          Op(op.b0) == Op::F64Max));
4912       case uint16_t(Op::F64CopySign):
4913         CHECK(EmitCopySign(f, ValType::F64));
4914 
4915       // Conversions
4916       case uint16_t(Op::I32WrapI64):
4917         CHECK(EmitConversion<MWrapInt64ToInt32>(f, ValType::I64, ValType::I32));
4918       case uint16_t(Op::I32TruncSF32):
4919       case uint16_t(Op::I32TruncUF32):
4920         CHECK(EmitTruncate(f, ValType::F32, ValType::I32,
4921                            Op(op.b0) == Op::I32TruncUF32, false));
4922       case uint16_t(Op::I32TruncSF64):
4923       case uint16_t(Op::I32TruncUF64):
4924         CHECK(EmitTruncate(f, ValType::F64, ValType::I32,
4925                            Op(op.b0) == Op::I32TruncUF64, false));
4926       case uint16_t(Op::I64ExtendSI32):
4927       case uint16_t(Op::I64ExtendUI32):
4928         CHECK(EmitExtendI32(f, Op(op.b0) == Op::I64ExtendUI32));
4929       case uint16_t(Op::I64TruncSF32):
4930       case uint16_t(Op::I64TruncUF32):
4931         CHECK(EmitTruncate(f, ValType::F32, ValType::I64,
4932                            Op(op.b0) == Op::I64TruncUF32, false));
4933       case uint16_t(Op::I64TruncSF64):
4934       case uint16_t(Op::I64TruncUF64):
4935         CHECK(EmitTruncate(f, ValType::F64, ValType::I64,
4936                            Op(op.b0) == Op::I64TruncUF64, false));
4937       case uint16_t(Op::F32ConvertSI32):
4938         CHECK(EmitConversion<MToFloat32>(f, ValType::I32, ValType::F32));
4939       case uint16_t(Op::F32ConvertUI32):
4940         CHECK(EmitConversion<MWasmUnsignedToFloat32>(f, ValType::I32,
4941                                                      ValType::F32));
4942       case uint16_t(Op::F32ConvertSI64):
4943       case uint16_t(Op::F32ConvertUI64):
4944         CHECK(EmitConvertI64ToFloatingPoint(f, ValType::F32, MIRType::Float32,
4945                                             Op(op.b0) == Op::F32ConvertUI64));
4946       case uint16_t(Op::F32DemoteF64):
4947         CHECK(EmitConversion<MToFloat32>(f, ValType::F64, ValType::F32));
4948       case uint16_t(Op::F64ConvertSI32):
4949         CHECK(EmitConversion<MToDouble>(f, ValType::I32, ValType::F64));
4950       case uint16_t(Op::F64ConvertUI32):
4951         CHECK(EmitConversion<MWasmUnsignedToDouble>(f, ValType::I32,
4952                                                     ValType::F64));
4953       case uint16_t(Op::F64ConvertSI64):
4954       case uint16_t(Op::F64ConvertUI64):
4955         CHECK(EmitConvertI64ToFloatingPoint(f, ValType::F64, MIRType::Double,
4956                                             Op(op.b0) == Op::F64ConvertUI64));
4957       case uint16_t(Op::F64PromoteF32):
4958         CHECK(EmitConversion<MToDouble>(f, ValType::F32, ValType::F64));
4959 
4960       // Reinterpretations
4961       case uint16_t(Op::I32ReinterpretF32):
4962         CHECK(EmitReinterpret(f, ValType::I32, ValType::F32, MIRType::Int32));
4963       case uint16_t(Op::I64ReinterpretF64):
4964         CHECK(EmitReinterpret(f, ValType::I64, ValType::F64, MIRType::Int64));
4965       case uint16_t(Op::F32ReinterpretI32):
4966         CHECK(EmitReinterpret(f, ValType::F32, ValType::I32, MIRType::Float32));
4967       case uint16_t(Op::F64ReinterpretI64):
4968         CHECK(EmitReinterpret(f, ValType::F64, ValType::I64, MIRType::Double));
4969 
4970 #ifdef ENABLE_WASM_GC
4971       case uint16_t(Op::RefEq):
4972         if (!f.moduleEnv().gcEnabled()) {
4973           return f.iter().unrecognizedOpcode(&op);
4974         }
4975         CHECK(EmitComparison(f, RefType::extern_(), JSOp::Eq,
4976                              MCompare::Compare_RefOrNull));
4977 #endif
4978       case uint16_t(Op::RefFunc):
4979         CHECK(EmitRefFunc(f));
4980       case uint16_t(Op::RefNull):
4981         CHECK(EmitRefNull(f));
4982       case uint16_t(Op::RefIsNull):
4983         CHECK(EmitRefIsNull(f));
4984 
4985       // Sign extensions
4986       case uint16_t(Op::I32Extend8S):
4987         CHECK(EmitSignExtend(f, 1, 4));
4988       case uint16_t(Op::I32Extend16S):
4989         CHECK(EmitSignExtend(f, 2, 4));
4990       case uint16_t(Op::I64Extend8S):
4991         CHECK(EmitSignExtend(f, 1, 8));
4992       case uint16_t(Op::I64Extend16S):
4993         CHECK(EmitSignExtend(f, 2, 8));
4994       case uint16_t(Op::I64Extend32S):
4995         CHECK(EmitSignExtend(f, 4, 8));
4996 
4997         // Gc operations
4998 #ifdef ENABLE_WASM_GC
4999       case uint16_t(Op::GcPrefix): {
5000         return f.iter().unrecognizedOpcode(&op);
5001       }
5002 #endif
5003 
5004       // SIMD operations
5005 #ifdef ENABLE_WASM_SIMD
5006       case uint16_t(Op::SimdPrefix): {
5007         if (!f.moduleEnv().v128Enabled()) {
5008           return f.iter().unrecognizedOpcode(&op);
5009         }
5010         switch (op.b1) {
5011           case uint32_t(SimdOp::V128Const):
5012             CHECK(EmitConstSimd128(f));
5013           case uint32_t(SimdOp::V128Load):
5014             CHECK(EmitLoad(f, ValType::V128, Scalar::Simd128));
5015           case uint32_t(SimdOp::V128Store):
5016             CHECK(EmitStore(f, ValType::V128, Scalar::Simd128));
5017           case uint32_t(SimdOp::V128And):
5018           case uint32_t(SimdOp::V128Or):
5019           case uint32_t(SimdOp::V128Xor):
5020           case uint32_t(SimdOp::I8x16AvgrU):
5021           case uint32_t(SimdOp::I16x8AvgrU):
5022           case uint32_t(SimdOp::I8x16Add):
5023           case uint32_t(SimdOp::I8x16AddSaturateS):
5024           case uint32_t(SimdOp::I8x16AddSaturateU):
5025           case uint32_t(SimdOp::I8x16MinS):
5026           case uint32_t(SimdOp::I8x16MinU):
5027           case uint32_t(SimdOp::I8x16MaxS):
5028           case uint32_t(SimdOp::I8x16MaxU):
5029           case uint32_t(SimdOp::I16x8Add):
5030           case uint32_t(SimdOp::I16x8AddSaturateS):
5031           case uint32_t(SimdOp::I16x8AddSaturateU):
5032           case uint32_t(SimdOp::I16x8Mul):
5033           case uint32_t(SimdOp::I16x8MinS):
5034           case uint32_t(SimdOp::I16x8MinU):
5035           case uint32_t(SimdOp::I16x8MaxS):
5036           case uint32_t(SimdOp::I16x8MaxU):
5037           case uint32_t(SimdOp::I32x4Add):
5038           case uint32_t(SimdOp::I32x4Mul):
5039           case uint32_t(SimdOp::I32x4MinS):
5040           case uint32_t(SimdOp::I32x4MinU):
5041           case uint32_t(SimdOp::I32x4MaxS):
5042           case uint32_t(SimdOp::I32x4MaxU):
5043           case uint32_t(SimdOp::I64x2Add):
5044           case uint32_t(SimdOp::I64x2Mul):
5045           case uint32_t(SimdOp::F32x4Add):
5046           case uint32_t(SimdOp::F32x4Mul):
5047           case uint32_t(SimdOp::F32x4Min):
5048           case uint32_t(SimdOp::F32x4Max):
5049           case uint32_t(SimdOp::F64x2Add):
5050           case uint32_t(SimdOp::F64x2Mul):
5051           case uint32_t(SimdOp::F64x2Min):
5052           case uint32_t(SimdOp::F64x2Max):
5053           case uint32_t(SimdOp::I8x16Eq):
5054           case uint32_t(SimdOp::I8x16Ne):
5055           case uint32_t(SimdOp::I16x8Eq):
5056           case uint32_t(SimdOp::I16x8Ne):
5057           case uint32_t(SimdOp::I32x4Eq):
5058           case uint32_t(SimdOp::I32x4Ne):
5059           case uint32_t(SimdOp::I64x2Eq):
5060           case uint32_t(SimdOp::I64x2Ne):
5061           case uint32_t(SimdOp::F32x4Eq):
5062           case uint32_t(SimdOp::F32x4Ne):
5063           case uint32_t(SimdOp::F64x2Eq):
5064           case uint32_t(SimdOp::F64x2Ne):
5065           case uint32_t(SimdOp::I32x4DotSI16x8):
5066           case uint32_t(SimdOp::I16x8ExtMulLowSI8x16):
5067           case uint32_t(SimdOp::I16x8ExtMulHighSI8x16):
5068           case uint32_t(SimdOp::I16x8ExtMulLowUI8x16):
5069           case uint32_t(SimdOp::I16x8ExtMulHighUI8x16):
5070           case uint32_t(SimdOp::I32x4ExtMulLowSI16x8):
5071           case uint32_t(SimdOp::I32x4ExtMulHighSI16x8):
5072           case uint32_t(SimdOp::I32x4ExtMulLowUI16x8):
5073           case uint32_t(SimdOp::I32x4ExtMulHighUI16x8):
5074           case uint32_t(SimdOp::I64x2ExtMulLowSI32x4):
5075           case uint32_t(SimdOp::I64x2ExtMulHighSI32x4):
5076           case uint32_t(SimdOp::I64x2ExtMulLowUI32x4):
5077           case uint32_t(SimdOp::I64x2ExtMulHighUI32x4):
5078           case uint32_t(SimdOp::I16x8Q15MulrSatS):
5079             CHECK(EmitBinarySimd128(f, /* commutative= */ true, SimdOp(op.b1)));
5080           case uint32_t(SimdOp::V128AndNot):
5081           case uint32_t(SimdOp::I8x16Sub):
5082           case uint32_t(SimdOp::I8x16SubSaturateS):
5083           case uint32_t(SimdOp::I8x16SubSaturateU):
5084           case uint32_t(SimdOp::I16x8Sub):
5085           case uint32_t(SimdOp::I16x8SubSaturateS):
5086           case uint32_t(SimdOp::I16x8SubSaturateU):
5087           case uint32_t(SimdOp::I32x4Sub):
5088           case uint32_t(SimdOp::I64x2Sub):
5089           case uint32_t(SimdOp::F32x4Sub):
5090           case uint32_t(SimdOp::F32x4Div):
5091           case uint32_t(SimdOp::F64x2Sub):
5092           case uint32_t(SimdOp::F64x2Div):
5093           case uint32_t(SimdOp::I8x16NarrowSI16x8):
5094           case uint32_t(SimdOp::I8x16NarrowUI16x8):
5095           case uint32_t(SimdOp::I16x8NarrowSI32x4):
5096           case uint32_t(SimdOp::I16x8NarrowUI32x4):
5097           case uint32_t(SimdOp::I8x16LtS):
5098           case uint32_t(SimdOp::I8x16LtU):
5099           case uint32_t(SimdOp::I8x16GtS):
5100           case uint32_t(SimdOp::I8x16GtU):
5101           case uint32_t(SimdOp::I8x16LeS):
5102           case uint32_t(SimdOp::I8x16LeU):
5103           case uint32_t(SimdOp::I8x16GeS):
5104           case uint32_t(SimdOp::I8x16GeU):
5105           case uint32_t(SimdOp::I16x8LtS):
5106           case uint32_t(SimdOp::I16x8LtU):
5107           case uint32_t(SimdOp::I16x8GtS):
5108           case uint32_t(SimdOp::I16x8GtU):
5109           case uint32_t(SimdOp::I16x8LeS):
5110           case uint32_t(SimdOp::I16x8LeU):
5111           case uint32_t(SimdOp::I16x8GeS):
5112           case uint32_t(SimdOp::I16x8GeU):
5113           case uint32_t(SimdOp::I32x4LtS):
5114           case uint32_t(SimdOp::I32x4LtU):
5115           case uint32_t(SimdOp::I32x4GtS):
5116           case uint32_t(SimdOp::I32x4GtU):
5117           case uint32_t(SimdOp::I32x4LeS):
5118           case uint32_t(SimdOp::I32x4LeU):
5119           case uint32_t(SimdOp::I32x4GeS):
5120           case uint32_t(SimdOp::I32x4GeU):
5121           case uint32_t(SimdOp::I64x2LtS):
5122           case uint32_t(SimdOp::I64x2GtS):
5123           case uint32_t(SimdOp::I64x2LeS):
5124           case uint32_t(SimdOp::I64x2GeS):
5125           case uint32_t(SimdOp::F32x4Lt):
5126           case uint32_t(SimdOp::F32x4Gt):
5127           case uint32_t(SimdOp::F32x4Le):
5128           case uint32_t(SimdOp::F32x4Ge):
5129           case uint32_t(SimdOp::F64x2Lt):
5130           case uint32_t(SimdOp::F64x2Gt):
5131           case uint32_t(SimdOp::F64x2Le):
5132           case uint32_t(SimdOp::F64x2Ge):
5133           case uint32_t(SimdOp::V8x16Swizzle):
5134           case uint32_t(SimdOp::F32x4PMax):
5135           case uint32_t(SimdOp::F32x4PMin):
5136           case uint32_t(SimdOp::F64x2PMax):
5137           case uint32_t(SimdOp::F64x2PMin):
5138             CHECK(
5139                 EmitBinarySimd128(f, /* commutative= */ false, SimdOp(op.b1)));
5140           case uint32_t(SimdOp::I8x16Splat):
5141           case uint32_t(SimdOp::I16x8Splat):
5142           case uint32_t(SimdOp::I32x4Splat):
5143             CHECK(EmitSplatSimd128(f, ValType::I32, SimdOp(op.b1)));
5144           case uint32_t(SimdOp::I64x2Splat):
5145             CHECK(EmitSplatSimd128(f, ValType::I64, SimdOp(op.b1)));
5146           case uint32_t(SimdOp::F32x4Splat):
5147             CHECK(EmitSplatSimd128(f, ValType::F32, SimdOp(op.b1)));
5148           case uint32_t(SimdOp::F64x2Splat):
5149             CHECK(EmitSplatSimd128(f, ValType::F64, SimdOp(op.b1)));
5150           case uint32_t(SimdOp::I8x16Neg):
5151           case uint32_t(SimdOp::I16x8Neg):
5152           case uint32_t(SimdOp::I16x8WidenLowSI8x16):
5153           case uint32_t(SimdOp::I16x8WidenHighSI8x16):
5154           case uint32_t(SimdOp::I16x8WidenLowUI8x16):
5155           case uint32_t(SimdOp::I16x8WidenHighUI8x16):
5156           case uint32_t(SimdOp::I32x4Neg):
5157           case uint32_t(SimdOp::I32x4WidenLowSI16x8):
5158           case uint32_t(SimdOp::I32x4WidenHighSI16x8):
5159           case uint32_t(SimdOp::I32x4WidenLowUI16x8):
5160           case uint32_t(SimdOp::I32x4WidenHighUI16x8):
5161           case uint32_t(SimdOp::I32x4TruncSSatF32x4):
5162           case uint32_t(SimdOp::I32x4TruncUSatF32x4):
5163           case uint32_t(SimdOp::I64x2Neg):
5164           case uint32_t(SimdOp::I64x2WidenLowSI32x4):
5165           case uint32_t(SimdOp::I64x2WidenHighSI32x4):
5166           case uint32_t(SimdOp::I64x2WidenLowUI32x4):
5167           case uint32_t(SimdOp::I64x2WidenHighUI32x4):
5168           case uint32_t(SimdOp::F32x4Abs):
5169           case uint32_t(SimdOp::F32x4Neg):
5170           case uint32_t(SimdOp::F32x4Sqrt):
5171           case uint32_t(SimdOp::F32x4ConvertSI32x4):
5172           case uint32_t(SimdOp::F32x4ConvertUI32x4):
5173           case uint32_t(SimdOp::F64x2Abs):
5174           case uint32_t(SimdOp::F64x2Neg):
5175           case uint32_t(SimdOp::F64x2Sqrt):
5176           case uint32_t(SimdOp::V128Not):
5177           case uint32_t(SimdOp::I8x16Popcnt):
5178           case uint32_t(SimdOp::I8x16Abs):
5179           case uint32_t(SimdOp::I16x8Abs):
5180           case uint32_t(SimdOp::I32x4Abs):
5181           case uint32_t(SimdOp::I64x2Abs):
5182           case uint32_t(SimdOp::F32x4Ceil):
5183           case uint32_t(SimdOp::F32x4Floor):
5184           case uint32_t(SimdOp::F32x4Trunc):
5185           case uint32_t(SimdOp::F32x4Nearest):
5186           case uint32_t(SimdOp::F64x2Ceil):
5187           case uint32_t(SimdOp::F64x2Floor):
5188           case uint32_t(SimdOp::F64x2Trunc):
5189           case uint32_t(SimdOp::F64x2Nearest):
5190           case uint32_t(SimdOp::F32x4DemoteF64x2Zero):
5191           case uint32_t(SimdOp::F64x2PromoteLowF32x4):
5192           case uint32_t(SimdOp::F64x2ConvertLowI32x4S):
5193           case uint32_t(SimdOp::F64x2ConvertLowI32x4U):
5194           case uint32_t(SimdOp::I32x4TruncSatF64x2SZero):
5195           case uint32_t(SimdOp::I32x4TruncSatF64x2UZero):
5196           case uint32_t(SimdOp::I16x8ExtAddPairwiseI8x16S):
5197           case uint32_t(SimdOp::I16x8ExtAddPairwiseI8x16U):
5198           case uint32_t(SimdOp::I32x4ExtAddPairwiseI16x8S):
5199           case uint32_t(SimdOp::I32x4ExtAddPairwiseI16x8U):
5200             CHECK(EmitUnarySimd128(f, SimdOp(op.b1)));
5201           case uint32_t(SimdOp::V128AnyTrue):
5202           case uint32_t(SimdOp::I8x16AllTrue):
5203           case uint32_t(SimdOp::I16x8AllTrue):
5204           case uint32_t(SimdOp::I32x4AllTrue):
5205           case uint32_t(SimdOp::I64x2AllTrue):
5206           case uint32_t(SimdOp::I8x16Bitmask):
5207           case uint32_t(SimdOp::I16x8Bitmask):
5208           case uint32_t(SimdOp::I32x4Bitmask):
5209           case uint32_t(SimdOp::I64x2Bitmask):
5210             CHECK(EmitReduceSimd128(f, SimdOp(op.b1)));
5211           case uint32_t(SimdOp::I8x16Shl):
5212           case uint32_t(SimdOp::I8x16ShrS):
5213           case uint32_t(SimdOp::I8x16ShrU):
5214           case uint32_t(SimdOp::I16x8Shl):
5215           case uint32_t(SimdOp::I16x8ShrS):
5216           case uint32_t(SimdOp::I16x8ShrU):
5217           case uint32_t(SimdOp::I32x4Shl):
5218           case uint32_t(SimdOp::I32x4ShrS):
5219           case uint32_t(SimdOp::I32x4ShrU):
5220           case uint32_t(SimdOp::I64x2Shl):
5221           case uint32_t(SimdOp::I64x2ShrS):
5222           case uint32_t(SimdOp::I64x2ShrU):
5223             CHECK(EmitShiftSimd128(f, SimdOp(op.b1)));
5224           case uint32_t(SimdOp::I8x16ExtractLaneS):
5225           case uint32_t(SimdOp::I8x16ExtractLaneU):
5226             CHECK(EmitExtractLaneSimd128(f, ValType::I32, 16, SimdOp(op.b1)));
5227           case uint32_t(SimdOp::I16x8ExtractLaneS):
5228           case uint32_t(SimdOp::I16x8ExtractLaneU):
5229             CHECK(EmitExtractLaneSimd128(f, ValType::I32, 8, SimdOp(op.b1)));
5230           case uint32_t(SimdOp::I32x4ExtractLane):
5231             CHECK(EmitExtractLaneSimd128(f, ValType::I32, 4, SimdOp(op.b1)));
5232           case uint32_t(SimdOp::I64x2ExtractLane):
5233             CHECK(EmitExtractLaneSimd128(f, ValType::I64, 2, SimdOp(op.b1)));
5234           case uint32_t(SimdOp::F32x4ExtractLane):
5235             CHECK(EmitExtractLaneSimd128(f, ValType::F32, 4, SimdOp(op.b1)));
5236           case uint32_t(SimdOp::F64x2ExtractLane):
5237             CHECK(EmitExtractLaneSimd128(f, ValType::F64, 2, SimdOp(op.b1)));
5238           case uint32_t(SimdOp::I8x16ReplaceLane):
5239             CHECK(EmitReplaceLaneSimd128(f, ValType::I32, 16, SimdOp(op.b1)));
5240           case uint32_t(SimdOp::I16x8ReplaceLane):
5241             CHECK(EmitReplaceLaneSimd128(f, ValType::I32, 8, SimdOp(op.b1)));
5242           case uint32_t(SimdOp::I32x4ReplaceLane):
5243             CHECK(EmitReplaceLaneSimd128(f, ValType::I32, 4, SimdOp(op.b1)));
5244           case uint32_t(SimdOp::I64x2ReplaceLane):
5245             CHECK(EmitReplaceLaneSimd128(f, ValType::I64, 2, SimdOp(op.b1)));
5246           case uint32_t(SimdOp::F32x4ReplaceLane):
5247             CHECK(EmitReplaceLaneSimd128(f, ValType::F32, 4, SimdOp(op.b1)));
5248           case uint32_t(SimdOp::F64x2ReplaceLane):
5249             CHECK(EmitReplaceLaneSimd128(f, ValType::F64, 2, SimdOp(op.b1)));
5250           case uint32_t(SimdOp::V128Bitselect):
5251             CHECK(EmitBitselectSimd128(f));
5252           case uint32_t(SimdOp::V8x16Shuffle):
5253             CHECK(EmitShuffleSimd128(f));
5254           case uint32_t(SimdOp::V8x16LoadSplat):
5255             CHECK(EmitLoadSplatSimd128(f, Scalar::Uint8, SimdOp::I8x16Splat));
5256           case uint32_t(SimdOp::V16x8LoadSplat):
5257             CHECK(EmitLoadSplatSimd128(f, Scalar::Uint16, SimdOp::I16x8Splat));
5258           case uint32_t(SimdOp::V32x4LoadSplat):
5259             CHECK(EmitLoadSplatSimd128(f, Scalar::Float32, SimdOp::I32x4Splat));
5260           case uint32_t(SimdOp::V64x2LoadSplat):
5261             CHECK(EmitLoadSplatSimd128(f, Scalar::Float64, SimdOp::I64x2Splat));
5262           case uint32_t(SimdOp::I16x8LoadS8x8):
5263           case uint32_t(SimdOp::I16x8LoadU8x8):
5264           case uint32_t(SimdOp::I32x4LoadS16x4):
5265           case uint32_t(SimdOp::I32x4LoadU16x4):
5266           case uint32_t(SimdOp::I64x2LoadS32x2):
5267           case uint32_t(SimdOp::I64x2LoadU32x2):
5268             CHECK(EmitLoadExtendSimd128(f, SimdOp(op.b1)));
5269           case uint32_t(SimdOp::V128Load32Zero):
5270             CHECK(EmitLoadZeroSimd128(f, Scalar::Float32, 4));
5271           case uint32_t(SimdOp::V128Load64Zero):
5272             CHECK(EmitLoadZeroSimd128(f, Scalar::Float64, 8));
5273           case uint32_t(SimdOp::V128Load8Lane):
5274             CHECK(EmitLoadLaneSimd128(f, 1));
5275           case uint32_t(SimdOp::V128Load16Lane):
5276             CHECK(EmitLoadLaneSimd128(f, 2));
5277           case uint32_t(SimdOp::V128Load32Lane):
5278             CHECK(EmitLoadLaneSimd128(f, 4));
5279           case uint32_t(SimdOp::V128Load64Lane):
5280             CHECK(EmitLoadLaneSimd128(f, 8));
5281           case uint32_t(SimdOp::V128Store8Lane):
5282             CHECK(EmitStoreLaneSimd128(f, 1));
5283           case uint32_t(SimdOp::V128Store16Lane):
5284             CHECK(EmitStoreLaneSimd128(f, 2));
5285           case uint32_t(SimdOp::V128Store32Lane):
5286             CHECK(EmitStoreLaneSimd128(f, 4));
5287           case uint32_t(SimdOp::V128Store64Lane):
5288             CHECK(EmitStoreLaneSimd128(f, 8));
5289           default:
5290             return f.iter().unrecognizedOpcode(&op);
5291         }  // switch (op.b1)
5292         break;
5293       }
5294 #endif
5295 
5296       // Miscellaneous operations
5297       case uint16_t(Op::MiscPrefix): {
5298         switch (op.b1) {
5299           case uint32_t(MiscOp::I32TruncSSatF32):
5300           case uint32_t(MiscOp::I32TruncUSatF32):
5301             CHECK(EmitTruncate(f, ValType::F32, ValType::I32,
5302                                MiscOp(op.b1) == MiscOp::I32TruncUSatF32, true));
5303           case uint32_t(MiscOp::I32TruncSSatF64):
5304           case uint32_t(MiscOp::I32TruncUSatF64):
5305             CHECK(EmitTruncate(f, ValType::F64, ValType::I32,
5306                                MiscOp(op.b1) == MiscOp::I32TruncUSatF64, true));
5307           case uint32_t(MiscOp::I64TruncSSatF32):
5308           case uint32_t(MiscOp::I64TruncUSatF32):
5309             CHECK(EmitTruncate(f, ValType::F32, ValType::I64,
5310                                MiscOp(op.b1) == MiscOp::I64TruncUSatF32, true));
5311           case uint32_t(MiscOp::I64TruncSSatF64):
5312           case uint32_t(MiscOp::I64TruncUSatF64):
5313             CHECK(EmitTruncate(f, ValType::F64, ValType::I64,
5314                                MiscOp(op.b1) == MiscOp::I64TruncUSatF64, true));
5315           case uint32_t(MiscOp::MemCopy):
5316             CHECK(EmitMemCopy(f));
5317           case uint32_t(MiscOp::DataDrop):
5318             CHECK(EmitDataOrElemDrop(f, /*isData=*/true));
5319           case uint32_t(MiscOp::MemFill):
5320             CHECK(EmitMemFill(f));
5321           case uint32_t(MiscOp::MemInit):
5322             CHECK(EmitMemOrTableInit(f, /*isMem=*/true));
5323           case uint32_t(MiscOp::TableCopy):
5324             CHECK(EmitTableCopy(f));
5325           case uint32_t(MiscOp::ElemDrop):
5326             CHECK(EmitDataOrElemDrop(f, /*isData=*/false));
5327           case uint32_t(MiscOp::TableInit):
5328             CHECK(EmitMemOrTableInit(f, /*isMem=*/false));
5329           case uint32_t(MiscOp::TableFill):
5330             CHECK(EmitTableFill(f));
5331           case uint32_t(MiscOp::TableGrow):
5332             CHECK(EmitTableGrow(f));
5333           case uint32_t(MiscOp::TableSize):
5334             CHECK(EmitTableSize(f));
5335           default:
5336             return f.iter().unrecognizedOpcode(&op);
5337         }
5338         break;
5339       }
5340 
5341       // Thread operations
5342       case uint16_t(Op::ThreadPrefix): {
5343         // Though thread ops can be used on nonshared memories, we make them
5344         // unavailable if shared memory has been disabled in the prefs, for
5345         // maximum predictability and safety and consistency with JS.
5346         if (f.moduleEnv().sharedMemoryEnabled() == Shareable::False) {
5347           return f.iter().unrecognizedOpcode(&op);
5348         }
5349         switch (op.b1) {
5350           case uint32_t(ThreadOp::Wake):
5351             CHECK(EmitWake(f));
5352 
5353           case uint32_t(ThreadOp::I32Wait):
5354             CHECK(EmitWait(f, ValType::I32, 4));
5355           case uint32_t(ThreadOp::I64Wait):
5356             CHECK(EmitWait(f, ValType::I64, 8));
5357           case uint32_t(ThreadOp::Fence):
5358             CHECK(EmitFence(f));
5359 
5360           case uint32_t(ThreadOp::I32AtomicLoad):
5361             CHECK(EmitAtomicLoad(f, ValType::I32, Scalar::Int32));
5362           case uint32_t(ThreadOp::I64AtomicLoad):
5363             CHECK(EmitAtomicLoad(f, ValType::I64, Scalar::Int64));
5364           case uint32_t(ThreadOp::I32AtomicLoad8U):
5365             CHECK(EmitAtomicLoad(f, ValType::I32, Scalar::Uint8));
5366           case uint32_t(ThreadOp::I32AtomicLoad16U):
5367             CHECK(EmitAtomicLoad(f, ValType::I32, Scalar::Uint16));
5368           case uint32_t(ThreadOp::I64AtomicLoad8U):
5369             CHECK(EmitAtomicLoad(f, ValType::I64, Scalar::Uint8));
5370           case uint32_t(ThreadOp::I64AtomicLoad16U):
5371             CHECK(EmitAtomicLoad(f, ValType::I64, Scalar::Uint16));
5372           case uint32_t(ThreadOp::I64AtomicLoad32U):
5373             CHECK(EmitAtomicLoad(f, ValType::I64, Scalar::Uint32));
5374 
5375           case uint32_t(ThreadOp::I32AtomicStore):
5376             CHECK(EmitAtomicStore(f, ValType::I32, Scalar::Int32));
5377           case uint32_t(ThreadOp::I64AtomicStore):
5378             CHECK(EmitAtomicStore(f, ValType::I64, Scalar::Int64));
5379           case uint32_t(ThreadOp::I32AtomicStore8U):
5380             CHECK(EmitAtomicStore(f, ValType::I32, Scalar::Uint8));
5381           case uint32_t(ThreadOp::I32AtomicStore16U):
5382             CHECK(EmitAtomicStore(f, ValType::I32, Scalar::Uint16));
5383           case uint32_t(ThreadOp::I64AtomicStore8U):
5384             CHECK(EmitAtomicStore(f, ValType::I64, Scalar::Uint8));
5385           case uint32_t(ThreadOp::I64AtomicStore16U):
5386             CHECK(EmitAtomicStore(f, ValType::I64, Scalar::Uint16));
5387           case uint32_t(ThreadOp::I64AtomicStore32U):
5388             CHECK(EmitAtomicStore(f, ValType::I64, Scalar::Uint32));
5389 
5390           case uint32_t(ThreadOp::I32AtomicAdd):
5391             CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
5392                                 AtomicFetchAddOp));
5393           case uint32_t(ThreadOp::I64AtomicAdd):
5394             CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
5395                                 AtomicFetchAddOp));
5396           case uint32_t(ThreadOp::I32AtomicAdd8U):
5397             CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
5398                                 AtomicFetchAddOp));
5399           case uint32_t(ThreadOp::I32AtomicAdd16U):
5400             CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
5401                                 AtomicFetchAddOp));
5402           case uint32_t(ThreadOp::I64AtomicAdd8U):
5403             CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
5404                                 AtomicFetchAddOp));
5405           case uint32_t(ThreadOp::I64AtomicAdd16U):
5406             CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
5407                                 AtomicFetchAddOp));
5408           case uint32_t(ThreadOp::I64AtomicAdd32U):
5409             CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
5410                                 AtomicFetchAddOp));
5411 
5412           case uint32_t(ThreadOp::I32AtomicSub):
5413             CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
5414                                 AtomicFetchSubOp));
5415           case uint32_t(ThreadOp::I64AtomicSub):
5416             CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
5417                                 AtomicFetchSubOp));
5418           case uint32_t(ThreadOp::I32AtomicSub8U):
5419             CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
5420                                 AtomicFetchSubOp));
5421           case uint32_t(ThreadOp::I32AtomicSub16U):
5422             CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
5423                                 AtomicFetchSubOp));
5424           case uint32_t(ThreadOp::I64AtomicSub8U):
5425             CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
5426                                 AtomicFetchSubOp));
5427           case uint32_t(ThreadOp::I64AtomicSub16U):
5428             CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
5429                                 AtomicFetchSubOp));
5430           case uint32_t(ThreadOp::I64AtomicSub32U):
5431             CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
5432                                 AtomicFetchSubOp));
5433 
5434           case uint32_t(ThreadOp::I32AtomicAnd):
5435             CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
5436                                 AtomicFetchAndOp));
5437           case uint32_t(ThreadOp::I64AtomicAnd):
5438             CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
5439                                 AtomicFetchAndOp));
5440           case uint32_t(ThreadOp::I32AtomicAnd8U):
5441             CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
5442                                 AtomicFetchAndOp));
5443           case uint32_t(ThreadOp::I32AtomicAnd16U):
5444             CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
5445                                 AtomicFetchAndOp));
5446           case uint32_t(ThreadOp::I64AtomicAnd8U):
5447             CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
5448                                 AtomicFetchAndOp));
5449           case uint32_t(ThreadOp::I64AtomicAnd16U):
5450             CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
5451                                 AtomicFetchAndOp));
5452           case uint32_t(ThreadOp::I64AtomicAnd32U):
5453             CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
5454                                 AtomicFetchAndOp));
5455 
5456           case uint32_t(ThreadOp::I32AtomicOr):
5457             CHECK(
5458                 EmitAtomicRMW(f, ValType::I32, Scalar::Int32, AtomicFetchOrOp));
5459           case uint32_t(ThreadOp::I64AtomicOr):
5460             CHECK(
5461                 EmitAtomicRMW(f, ValType::I64, Scalar::Int64, AtomicFetchOrOp));
5462           case uint32_t(ThreadOp::I32AtomicOr8U):
5463             CHECK(
5464                 EmitAtomicRMW(f, ValType::I32, Scalar::Uint8, AtomicFetchOrOp));
5465           case uint32_t(ThreadOp::I32AtomicOr16U):
5466             CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
5467                                 AtomicFetchOrOp));
5468           case uint32_t(ThreadOp::I64AtomicOr8U):
5469             CHECK(
5470                 EmitAtomicRMW(f, ValType::I64, Scalar::Uint8, AtomicFetchOrOp));
5471           case uint32_t(ThreadOp::I64AtomicOr16U):
5472             CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
5473                                 AtomicFetchOrOp));
5474           case uint32_t(ThreadOp::I64AtomicOr32U):
5475             CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
5476                                 AtomicFetchOrOp));
5477 
5478           case uint32_t(ThreadOp::I32AtomicXor):
5479             CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
5480                                 AtomicFetchXorOp));
5481           case uint32_t(ThreadOp::I64AtomicXor):
5482             CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
5483                                 AtomicFetchXorOp));
5484           case uint32_t(ThreadOp::I32AtomicXor8U):
5485             CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
5486                                 AtomicFetchXorOp));
5487           case uint32_t(ThreadOp::I32AtomicXor16U):
5488             CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
5489                                 AtomicFetchXorOp));
5490           case uint32_t(ThreadOp::I64AtomicXor8U):
5491             CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
5492                                 AtomicFetchXorOp));
5493           case uint32_t(ThreadOp::I64AtomicXor16U):
5494             CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
5495                                 AtomicFetchXorOp));
5496           case uint32_t(ThreadOp::I64AtomicXor32U):
5497             CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
5498                                 AtomicFetchXorOp));
5499 
5500           case uint32_t(ThreadOp::I32AtomicXchg):
5501             CHECK(EmitAtomicXchg(f, ValType::I32, Scalar::Int32));
5502           case uint32_t(ThreadOp::I64AtomicXchg):
5503             CHECK(EmitAtomicXchg(f, ValType::I64, Scalar::Int64));
5504           case uint32_t(ThreadOp::I32AtomicXchg8U):
5505             CHECK(EmitAtomicXchg(f, ValType::I32, Scalar::Uint8));
5506           case uint32_t(ThreadOp::I32AtomicXchg16U):
5507             CHECK(EmitAtomicXchg(f, ValType::I32, Scalar::Uint16));
5508           case uint32_t(ThreadOp::I64AtomicXchg8U):
5509             CHECK(EmitAtomicXchg(f, ValType::I64, Scalar::Uint8));
5510           case uint32_t(ThreadOp::I64AtomicXchg16U):
5511             CHECK(EmitAtomicXchg(f, ValType::I64, Scalar::Uint16));
5512           case uint32_t(ThreadOp::I64AtomicXchg32U):
5513             CHECK(EmitAtomicXchg(f, ValType::I64, Scalar::Uint32));
5514 
5515           case uint32_t(ThreadOp::I32AtomicCmpXchg):
5516             CHECK(EmitAtomicCmpXchg(f, ValType::I32, Scalar::Int32));
5517           case uint32_t(ThreadOp::I64AtomicCmpXchg):
5518             CHECK(EmitAtomicCmpXchg(f, ValType::I64, Scalar::Int64));
5519           case uint32_t(ThreadOp::I32AtomicCmpXchg8U):
5520             CHECK(EmitAtomicCmpXchg(f, ValType::I32, Scalar::Uint8));
5521           case uint32_t(ThreadOp::I32AtomicCmpXchg16U):
5522             CHECK(EmitAtomicCmpXchg(f, ValType::I32, Scalar::Uint16));
5523           case uint32_t(ThreadOp::I64AtomicCmpXchg8U):
5524             CHECK(EmitAtomicCmpXchg(f, ValType::I64, Scalar::Uint8));
5525           case uint32_t(ThreadOp::I64AtomicCmpXchg16U):
5526             CHECK(EmitAtomicCmpXchg(f, ValType::I64, Scalar::Uint16));
5527           case uint32_t(ThreadOp::I64AtomicCmpXchg32U):
5528             CHECK(EmitAtomicCmpXchg(f, ValType::I64, Scalar::Uint32));
5529 
5530           default:
5531             return f.iter().unrecognizedOpcode(&op);
5532         }
5533         break;
5534       }
5535 
5536       // asm.js-specific operators
5537       case uint16_t(Op::MozPrefix): {
5538         if (!f.moduleEnv().isAsmJS()) {
5539           return f.iter().unrecognizedOpcode(&op);
5540         }
5541         switch (op.b1) {
5542           case uint32_t(MozOp::TeeGlobal):
5543             CHECK(EmitTeeGlobal(f));
5544           case uint32_t(MozOp::I32Min):
5545           case uint32_t(MozOp::I32Max):
5546             CHECK(EmitMinMax(f, ValType::I32, MIRType::Int32,
5547                              MozOp(op.b1) == MozOp::I32Max));
5548           case uint32_t(MozOp::I32Neg):
5549             CHECK(EmitUnaryWithType<MWasmNeg>(f, ValType::I32, MIRType::Int32));
5550           case uint32_t(MozOp::I32BitNot):
5551             CHECK(EmitBitNot(f, ValType::I32));
5552           case uint32_t(MozOp::I32Abs):
5553             CHECK(EmitUnaryWithType<MAbs>(f, ValType::I32, MIRType::Int32));
5554           case uint32_t(MozOp::F32TeeStoreF64):
5555             CHECK(EmitTeeStoreWithCoercion(f, ValType::F32, Scalar::Float64));
5556           case uint32_t(MozOp::F64TeeStoreF32):
5557             CHECK(EmitTeeStoreWithCoercion(f, ValType::F64, Scalar::Float32));
5558           case uint32_t(MozOp::I32TeeStore8):
5559             CHECK(EmitTeeStore(f, ValType::I32, Scalar::Int8));
5560           case uint32_t(MozOp::I32TeeStore16):
5561             CHECK(EmitTeeStore(f, ValType::I32, Scalar::Int16));
5562           case uint32_t(MozOp::I64TeeStore8):
5563             CHECK(EmitTeeStore(f, ValType::I64, Scalar::Int8));
5564           case uint32_t(MozOp::I64TeeStore16):
5565             CHECK(EmitTeeStore(f, ValType::I64, Scalar::Int16));
5566           case uint32_t(MozOp::I64TeeStore32):
5567             CHECK(EmitTeeStore(f, ValType::I64, Scalar::Int32));
5568           case uint32_t(MozOp::I32TeeStore):
5569             CHECK(EmitTeeStore(f, ValType::I32, Scalar::Int32));
5570           case uint32_t(MozOp::I64TeeStore):
5571             CHECK(EmitTeeStore(f, ValType::I64, Scalar::Int64));
5572           case uint32_t(MozOp::F32TeeStore):
5573             CHECK(EmitTeeStore(f, ValType::F32, Scalar::Float32));
5574           case uint32_t(MozOp::F64TeeStore):
5575             CHECK(EmitTeeStore(f, ValType::F64, Scalar::Float64));
5576           case uint32_t(MozOp::F64Mod):
5577             CHECK(EmitRem(f, ValType::F64, MIRType::Double,
5578                           /* isUnsigned = */ false));
5579           case uint32_t(MozOp::F64Sin):
5580             CHECK(EmitUnaryMathBuiltinCall(f, SASigSinD));
5581           case uint32_t(MozOp::F64Cos):
5582             CHECK(EmitUnaryMathBuiltinCall(f, SASigCosD));
5583           case uint32_t(MozOp::F64Tan):
5584             CHECK(EmitUnaryMathBuiltinCall(f, SASigTanD));
5585           case uint32_t(MozOp::F64Asin):
5586             CHECK(EmitUnaryMathBuiltinCall(f, SASigASinD));
5587           case uint32_t(MozOp::F64Acos):
5588             CHECK(EmitUnaryMathBuiltinCall(f, SASigACosD));
5589           case uint32_t(MozOp::F64Atan):
5590             CHECK(EmitUnaryMathBuiltinCall(f, SASigATanD));
5591           case uint32_t(MozOp::F64Exp):
5592             CHECK(EmitUnaryMathBuiltinCall(f, SASigExpD));
5593           case uint32_t(MozOp::F64Log):
5594             CHECK(EmitUnaryMathBuiltinCall(f, SASigLogD));
5595           case uint32_t(MozOp::F64Pow):
5596             CHECK(EmitBinaryMathBuiltinCall(f, SASigPowD));
5597           case uint32_t(MozOp::F64Atan2):
5598             CHECK(EmitBinaryMathBuiltinCall(f, SASigATan2D));
5599           case uint32_t(MozOp::OldCallDirect):
5600             CHECK(EmitCall(f, /* asmJSFuncDef = */ true));
5601           case uint32_t(MozOp::OldCallIndirect):
5602             CHECK(EmitCallIndirect(f, /* oldStyle = */ true));
5603 
5604           default:
5605             return f.iter().unrecognizedOpcode(&op);
5606         }
5607         break;
5608       }
5609 
5610       default:
5611         return f.iter().unrecognizedOpcode(&op);
5612     }
5613   }
5614 
5615   MOZ_CRASH("unreachable");
5616 
5617 #undef CHECK
5618 }
5619 
IonCompileFunctions(const ModuleEnvironment & moduleEnv,const CompilerEnvironment & compilerEnv,LifoAlloc & lifo,const FuncCompileInputVector & inputs,CompiledCode * code,UniqueChars * error)5620 bool wasm::IonCompileFunctions(const ModuleEnvironment& moduleEnv,
5621                                const CompilerEnvironment& compilerEnv,
5622                                LifoAlloc& lifo,
5623                                const FuncCompileInputVector& inputs,
5624                                CompiledCode* code, UniqueChars* error) {
5625   MOZ_ASSERT(compilerEnv.tier() == Tier::Optimized);
5626   MOZ_ASSERT(compilerEnv.debug() == DebugEnabled::False);
5627   MOZ_ASSERT(compilerEnv.optimizedBackend() == OptimizedBackend::Ion);
5628 
5629   TempAllocator alloc(&lifo);
5630   JitContext jitContext(&alloc);
5631   MOZ_ASSERT(IsCompilingWasm());
5632   WasmMacroAssembler masm(alloc, moduleEnv);
5633 #if defined(JS_CODEGEN_ARM64)
5634   masm.SetStackPointer64(PseudoStackPointer64);
5635 #endif
5636 
5637   // Swap in already-allocated empty vectors to avoid malloc/free.
5638   MOZ_ASSERT(code->empty());
5639   if (!code->swap(masm)) {
5640     return false;
5641   }
5642 
5643   // Create a description of the stack layout created by GenerateTrapExit().
5644   MachineState trapExitLayout;
5645   size_t trapExitLayoutNumWords;
5646   GenerateTrapExitMachineState(&trapExitLayout, &trapExitLayoutNumWords);
5647 
5648   for (const FuncCompileInput& func : inputs) {
5649     JitSpewCont(JitSpew_Codegen, "\n");
5650     JitSpew(JitSpew_Codegen,
5651             "# ================================"
5652             "==================================");
5653     JitSpew(JitSpew_Codegen, "# ==");
5654     JitSpew(JitSpew_Codegen,
5655             "# wasm::IonCompileFunctions: starting on function index %d",
5656             (int)func.index);
5657 
5658     Decoder d(func.begin, func.end, func.lineOrBytecode, error);
5659 
5660     // Build the local types vector.
5661 
5662     const FuncType& funcType = *moduleEnv.funcs[func.index].type;
5663     const TypeIdDesc& funcTypeId = *moduleEnv.funcs[func.index].typeId;
5664     ValTypeVector locals;
5665     if (!locals.appendAll(funcType.args())) {
5666       return false;
5667     }
5668     if (!DecodeLocalEntries(d, moduleEnv.types, moduleEnv.features, &locals)) {
5669       return false;
5670     }
5671 
5672     // Set up for Ion compilation.
5673 
5674     const JitCompileOptions options;
5675     MIRGraph graph(&alloc);
5676     CompileInfo compileInfo(locals.length());
5677     MIRGenerator mir(nullptr, options, &alloc, &graph, &compileInfo,
5678                      IonOptimizations.get(OptimizationLevel::Wasm));
5679     if (moduleEnv.usesMemory()) {
5680       mir.initMinWasmHeapLength(moduleEnv.memory->initialLength32());
5681     }
5682 
5683     // Build MIR graph
5684     {
5685       FunctionCompiler f(moduleEnv, d, func, locals, mir);
5686       if (!f.init()) {
5687         return false;
5688       }
5689 
5690       if (!f.startBlock()) {
5691         return false;
5692       }
5693 
5694       if (!EmitBodyExprs(f)) {
5695         return false;
5696       }
5697 
5698       f.finish();
5699     }
5700 
5701     // Compile MIR graph
5702     {
5703       jit::SpewBeginWasmFunction(&mir, func.index);
5704       jit::AutoSpewEndFunction spewEndFunction(&mir);
5705 
5706       if (!OptimizeMIR(&mir)) {
5707         return false;
5708       }
5709 
5710       LIRGraph* lir = GenerateLIR(&mir);
5711       if (!lir) {
5712         return false;
5713       }
5714 
5715       CodeGenerator codegen(&mir, lir, &masm);
5716 
5717       BytecodeOffset prologueTrapOffset(func.lineOrBytecode);
5718       FuncOffsets offsets;
5719       ArgTypeVector args(funcType);
5720       if (!codegen.generateWasm(funcTypeId, prologueTrapOffset, args,
5721                                 trapExitLayout, trapExitLayoutNumWords,
5722                                 &offsets, &code->stackMaps)) {
5723         return false;
5724       }
5725 
5726       if (!code->codeRanges.emplaceBack(func.index, func.lineOrBytecode,
5727                                         offsets)) {
5728         return false;
5729       }
5730     }
5731 
5732     JitSpew(JitSpew_Codegen,
5733             "# wasm::IonCompileFunctions: completed function index %d",
5734             (int)func.index);
5735     JitSpew(JitSpew_Codegen, "# ==");
5736     JitSpew(JitSpew_Codegen,
5737             "# ================================"
5738             "==================================");
5739     JitSpewCont(JitSpew_Codegen, "\n");
5740   }
5741 
5742   masm.finish();
5743   if (masm.oom()) {
5744     return false;
5745   }
5746 
5747   return code->swap(masm);
5748 }
5749 
IonPlatformSupport()5750 bool js::wasm::IonPlatformSupport() {
5751 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) ||    \
5752     defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || \
5753     defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_ARM64)
5754   return true;
5755 #else
5756   return false;
5757 #endif
5758 }
5759