1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #ifndef jit_MacroAssembler_h
8 #define jit_MacroAssembler_h
9 
10 #include "mozilla/EndianUtils.h"
11 #include "mozilla/MacroForEach.h"
12 #include "mozilla/MathAlgorithms.h"
13 
14 #include "vm/Realm.h"
15 
16 #if defined(JS_CODEGEN_X86)
17 #  include "jit/x86/MacroAssembler-x86.h"
18 #elif defined(JS_CODEGEN_X64)
19 #  include "jit/x64/MacroAssembler-x64.h"
20 #elif defined(JS_CODEGEN_ARM)
21 #  include "jit/arm/MacroAssembler-arm.h"
22 #elif defined(JS_CODEGEN_ARM64)
23 #  include "jit/arm64/MacroAssembler-arm64.h"
24 #elif defined(JS_CODEGEN_MIPS32)
25 #  include "jit/mips32/MacroAssembler-mips32.h"
26 #elif defined(JS_CODEGEN_MIPS64)
27 #  include "jit/mips64/MacroAssembler-mips64.h"
28 #elif defined(JS_CODEGEN_NONE)
29 #  include "jit/none/MacroAssembler-none.h"
30 #else
31 #  error "Unknown architecture!"
32 #endif
33 #include "jit/AtomicOp.h"
34 #include "jit/IonInstrumentation.h"
35 #include "jit/IonTypes.h"
36 #include "jit/JitRealm.h"
37 #include "jit/TemplateObject.h"
38 #include "jit/VMFunctions.h"
39 #include "util/Memory.h"
40 #include "vm/ProxyObject.h"
41 #include "vm/Shape.h"
42 #include "vm/TypedArrayObject.h"
43 
44 // [SMDOC] MacroAssembler multi-platform overview
45 //
46 // * How to read/write MacroAssembler method declarations:
47 //
48 // The following macros are made to avoid #ifdef around each method declarations
49 // of the Macro Assembler, and they are also used as an hint on the location of
50 // the implementations of each method.  For example, the following declaration
51 //
52 //   void Pop(FloatRegister t) DEFINED_ON(x86_shared, arm);
53 //
54 // suggests the MacroAssembler::Pop(FloatRegister) method is implemented in
55 // x86-shared/MacroAssembler-x86-shared.h, and also in arm/MacroAssembler-arm.h.
56 //
57 // - If there is no annotation, then there is only one generic definition in
58 //   MacroAssembler.cpp.
59 //
60 // - If the declaration is "inline", then the method definition(s) would be in
61 //   the "-inl.h" variant of the same file(s).
62 //
63 // The script check_macroassembler_style.py (which runs on every build) is
64 // used to verify that method definitions match the annotation on the method
65 // declarations.  If there is any difference, then you either forgot to define
66 // the method in one of the macro assembler, or you forgot to update the
67 // annotation of the macro assembler declaration.
68 //
69 // Some convenient short-cuts are used to avoid repeating the same list of
70 // architectures on each method declaration, such as PER_ARCH and
71 // PER_SHARED_ARCH.
72 //
73 // Functions that are architecture-agnostic and are the same for all
74 // architectures, that it's necessary to define inline *in this header* to
75 // avoid used-before-defined warnings/errors that would occur if the
76 // definitions were in MacroAssembler-inl.h, should use the OOL_IN_HEADER
77 // marker at end of the declaration:
78 //
79 //   inline uint32_t framePushed() const OOL_IN_HEADER;
80 //
81 // Such functions should then be defined immediately after MacroAssembler's
82 // definition, for example:
83 //
84 //   //{{{ check_macroassembler_style
85 //   inline uint32_t
86 //   MacroAssembler::framePushed() const
87 //   {
88 //       return framePushed_;
89 //   }
90 //   ////}}} check_macroassembler_style
91 
92 #define ALL_ARCH mips32, mips64, arm, arm64, x86, x64
93 #define ALL_SHARED_ARCH arm, arm64, x86_shared, mips_shared
94 
95 // * How this macro works:
96 //
97 // DEFINED_ON is a macro which check if, for the current architecture, the
98 // method is defined on the macro assembler or not.
99 //
100 // For each architecture, we have a macro named DEFINED_ON_arch.  This macro is
101 // empty if this is not the current architecture.  Otherwise it must be either
102 // set to "define" or "crash" (only used for the none target so far).
103 //
104 // The DEFINED_ON macro maps the list of architecture names given as arguments
105 // to a list of macro names.  For example,
106 //
107 //   DEFINED_ON(arm, x86_shared)
108 //
109 // is expanded to
110 //
111 //   DEFINED_ON_none DEFINED_ON_arm DEFINED_ON_x86_shared
112 //
113 // which are later expanded on ARM, x86, x64 by DEFINED_ON_EXPAND_ARCH_RESULTS
114 // to
115 //
116 //   define
117 //
118 // or if the JIT is disabled or set to no architecture to
119 //
120 //   crash
121 //
122 // or to nothing, if the current architecture is not listed in the list of
123 // arguments of DEFINED_ON.  Note, only one of the DEFINED_ON_arch macro
124 // contributes to the non-empty result, which is the macro of the current
125 // architecture if it is listed in the arguments of DEFINED_ON.
126 //
127 // This result is appended to DEFINED_ON_RESULT_ before expanding the macro,
128 // which results in either no annotation, a MOZ_CRASH(), or a "= delete"
129 // annotation on the method declaration.
130 
131 #define DEFINED_ON_x86
132 #define DEFINED_ON_x64
133 #define DEFINED_ON_x86_shared
134 #define DEFINED_ON_arm
135 #define DEFINED_ON_arm64
136 #define DEFINED_ON_mips32
137 #define DEFINED_ON_mips64
138 #define DEFINED_ON_mips_shared
139 #define DEFINED_ON_none
140 
141 // Specialize for each architecture.
142 #if defined(JS_CODEGEN_X86)
143 #  undef DEFINED_ON_x86
144 #  define DEFINED_ON_x86 define
145 #  undef DEFINED_ON_x86_shared
146 #  define DEFINED_ON_x86_shared define
147 #elif defined(JS_CODEGEN_X64)
148 #  undef DEFINED_ON_x64
149 #  define DEFINED_ON_x64 define
150 #  undef DEFINED_ON_x86_shared
151 #  define DEFINED_ON_x86_shared define
152 #elif defined(JS_CODEGEN_ARM)
153 #  undef DEFINED_ON_arm
154 #  define DEFINED_ON_arm define
155 #elif defined(JS_CODEGEN_ARM64)
156 #  undef DEFINED_ON_arm64
157 #  define DEFINED_ON_arm64 define
158 #elif defined(JS_CODEGEN_MIPS32)
159 #  undef DEFINED_ON_mips32
160 #  define DEFINED_ON_mips32 define
161 #  undef DEFINED_ON_mips_shared
162 #  define DEFINED_ON_mips_shared define
163 #elif defined(JS_CODEGEN_MIPS64)
164 #  undef DEFINED_ON_mips64
165 #  define DEFINED_ON_mips64 define
166 #  undef DEFINED_ON_mips_shared
167 #  define DEFINED_ON_mips_shared define
168 #elif defined(JS_CODEGEN_NONE)
169 #  undef DEFINED_ON_none
170 #  define DEFINED_ON_none crash
171 #else
172 #  error "Unknown architecture!"
173 #endif
174 
175 #define DEFINED_ON_RESULT_crash \
176   { MOZ_CRASH(); }
177 #define DEFINED_ON_RESULT_define
178 #define DEFINED_ON_RESULT_ = delete
179 
180 #define DEFINED_ON_DISPATCH_RESULT_2(Macro, Result) Macro##Result
181 #define DEFINED_ON_DISPATCH_RESULT(...) \
182   DEFINED_ON_DISPATCH_RESULT_2(DEFINED_ON_RESULT_, __VA_ARGS__)
183 
184 // We need to let the evaluation of MOZ_FOR_EACH terminates.
185 #define DEFINED_ON_EXPAND_ARCH_RESULTS_3(ParenResult) \
186   DEFINED_ON_DISPATCH_RESULT ParenResult
187 #define DEFINED_ON_EXPAND_ARCH_RESULTS_2(ParenResult) \
188   DEFINED_ON_EXPAND_ARCH_RESULTS_3(ParenResult)
189 #define DEFINED_ON_EXPAND_ARCH_RESULTS(ParenResult) \
190   DEFINED_ON_EXPAND_ARCH_RESULTS_2(ParenResult)
191 
192 #define DEFINED_ON_FWDARCH(Arch) DEFINED_ON_##Arch
193 #define DEFINED_ON_MAP_ON_ARCHS(ArchList) \
194   DEFINED_ON_EXPAND_ARCH_RESULTS(         \
195       (MOZ_FOR_EACH(DEFINED_ON_FWDARCH, (), ArchList)))
196 
197 #define DEFINED_ON(...) DEFINED_ON_MAP_ON_ARCHS((none, __VA_ARGS__))
198 
199 #define PER_ARCH DEFINED_ON(ALL_ARCH)
200 #define PER_SHARED_ARCH DEFINED_ON(ALL_SHARED_ARCH)
201 #define OOL_IN_HEADER
202 
203 #if MOZ_LITTLE_ENDIAN()
204 #  define IMM32_16ADJ(X) (X) << 16
205 #else
206 #  define IMM32_16ADJ(X) (X)
207 #endif
208 
209 namespace js {
210 namespace jit {
211 
212 // Defined in JitFrames.h
213 enum class ExitFrameType : uint8_t;
214 
215 class AutoSaveLiveRegisters;
216 
217 enum class CheckUnsafeCallWithABI {
218   // Require the callee to use AutoUnsafeCallWithABI.
219   Check,
220 
221   // We pushed an exit frame so this callWithABI can safely GC and walk the
222   // stack.
223   DontCheckHasExitFrame,
224 
225   // Don't check this callWithABI uses AutoUnsafeCallWithABI, for instance
226   // because we're calling a simple helper function (like malloc or js_free)
227   // that we can't change and/or that we know won't GC.
228   DontCheckOther,
229 };
230 
231 enum class CharEncoding { Latin1, TwoByte };
232 
233 // The public entrypoint for emitting assembly. Note that a MacroAssembler can
234 // use cx->lifoAlloc, so take care not to interleave masm use with other
235 // lifoAlloc use if one will be destroyed before the other.
236 class MacroAssembler : public MacroAssemblerSpecific {
thisFromCtor()237   MacroAssembler* thisFromCtor() { return this; }
238 
239  public:
240   /*
241    * Base class for creating a branch.
242    */
243   class Branch {
244     bool init_;
245     Condition cond_;
246     Label* jump_;
247     Register reg_;
248 
249    public:
Branch()250     Branch()
251         : init_(false),
252           cond_(Equal),
253           jump_(nullptr),
254           reg_(Register::FromCode(0))  // Quell compiler warnings.
255     {}
256 
Branch(Condition cond,Register reg,Label * jump)257     Branch(Condition cond, Register reg, Label* jump)
258         : init_(true), cond_(cond), jump_(jump), reg_(reg) {}
259 
isInitialized()260     bool isInitialized() const { return init_; }
261 
cond()262     Condition cond() const { return cond_; }
263 
jump()264     Label* jump() const { return jump_; }
265 
reg()266     Register reg() const { return reg_; }
267 
invertCondition()268     void invertCondition() { cond_ = InvertCondition(cond_); }
269 
relink(Label * jump)270     void relink(Label* jump) { jump_ = jump; }
271   };
272 
273   /*
274    * Creates a branch based on a GCPtr.
275    */
276   class BranchGCPtr : public Branch {
277     ImmGCPtr ptr_;
278 
279    public:
BranchGCPtr()280     BranchGCPtr() : Branch(), ptr_(ImmGCPtr(nullptr)) {}
281 
BranchGCPtr(Condition cond,Register reg,ImmGCPtr ptr,Label * jump)282     BranchGCPtr(Condition cond, Register reg, ImmGCPtr ptr, Label* jump)
283         : Branch(cond, reg, jump), ptr_(ptr) {}
284 
285     void emit(MacroAssembler& masm);
286   };
287 
288   mozilla::Maybe<JitContext> jitContext_;
289   mozilla::Maybe<AutoJitContextAlloc> alloc_;
290 
291  private:
292   // Labels for handling exceptions and failures.
293   NonAssertingLabel failureLabel_;
294 
295  protected:
296   // Constructors are protected. Use one of the derived classes!
297   MacroAssembler();
298 
299   // This constructor should only be used when there is no JitContext active
300   // (for example, Trampoline-$(ARCH).cpp and IonCaches.cpp).
301   explicit MacroAssembler(JSContext* cx);
302 
303   // wasm compilation handles its own JitContext-pushing
304   struct WasmToken {};
305   explicit MacroAssembler(WasmToken, TempAllocator& alloc);
306 
307  public:
moveResolver()308   MoveResolver& moveResolver() {
309     // As an optimization, the MoveResolver is a persistent data structure
310     // shared between visitors in the CodeGenerator. This assertion
311     // checks that state is not leaking from visitor to visitor
312     // via an unresolved addMove().
313     MOZ_ASSERT(moveResolver_.hasNoPendingMoves());
314     return moveResolver_;
315   }
316 
instructionsSize()317   size_t instructionsSize() const { return size(); }
318 
319 #ifdef JS_HAS_HIDDEN_SP
320   void Push(RegisterOrSP reg);
321 #endif
322 
323   //{{{ check_macroassembler_decl_style
324  public:
325   // ===============================================================
326   // MacroAssembler high-level usage.
327 
328   // Flushes the assembly buffer, on platforms that need it.
329   void flush() PER_SHARED_ARCH;
330 
331   // Add a comment that is visible in the pretty printed assembly code.
332   void comment(const char* msg) PER_SHARED_ARCH;
333 
334   // ===============================================================
335   // Frame manipulation functions.
336 
337   inline uint32_t framePushed() const OOL_IN_HEADER;
338   inline void setFramePushed(uint32_t framePushed) OOL_IN_HEADER;
339   inline void adjustFrame(int32_t value) OOL_IN_HEADER;
340 
341   // Adjust the frame, to account for implicit modification of the stack
342   // pointer, such that callee can remove arguments on the behalf of the
343   // caller.
344   inline void implicitPop(uint32_t bytes) OOL_IN_HEADER;
345 
346  private:
347   // This field is used to statically (at compilation time) emulate a frame
348   // pointer by keeping track of stack manipulations.
349   //
350   // It is maintained by all stack manipulation functions below.
351   uint32_t framePushed_;
352 
353  public:
354   // ===============================================================
355   // Stack manipulation functions.
356 
357   void PushRegsInMask(LiveRegisterSet set)
358       DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
359   void PushRegsInMask(LiveGeneralRegisterSet set);
360 
361   // Like PushRegsInMask, but instead of pushing the registers, store them to
362   // |dest|. |dest| should point to the end of the reserved space, so the
363   // first register will be stored at |dest.offset - sizeof(register)|.
364   void storeRegsInMask(LiveRegisterSet set, Address dest, Register scratch)
365       DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
366 
367   void PopRegsInMask(LiveRegisterSet set);
368   void PopRegsInMask(LiveGeneralRegisterSet set);
369   void PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
370       DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
371 
372   void Push(const Operand op) DEFINED_ON(x86_shared);
373   void Push(Register reg) PER_SHARED_ARCH;
374   void Push(Register reg1, Register reg2, Register reg3, Register reg4)
375       DEFINED_ON(arm64);
376   void Push(const Imm32 imm) PER_SHARED_ARCH;
377   void Push(const ImmWord imm) PER_SHARED_ARCH;
378   void Push(const ImmPtr imm) PER_SHARED_ARCH;
379   void Push(const ImmGCPtr ptr) PER_SHARED_ARCH;
380   void Push(FloatRegister reg) PER_SHARED_ARCH;
381   void PushBoxed(FloatRegister reg) PER_ARCH;
382   void PushFlags() DEFINED_ON(x86_shared);
383   void Push(jsid id, Register scratchReg);
384   void Push(const Address& addr);
385   void Push(TypedOrValueRegister v);
386   void Push(const ConstantOrRegister& v);
387   void Push(const ValueOperand& val);
388   void Push(const Value& val);
389   void Push(JSValueType type, Register reg);
390   void Push(const Register64 reg);
391   void PushValue(const Address& addr);
392   void PushEmptyRooted(VMFunctionData::RootType rootType);
393   inline CodeOffset PushWithPatch(ImmWord word);
394   inline CodeOffset PushWithPatch(ImmPtr imm);
395 
396   void Pop(const Operand op) DEFINED_ON(x86_shared);
397   void Pop(Register reg) PER_SHARED_ARCH;
398   void Pop(FloatRegister t) PER_SHARED_ARCH;
399   void Pop(const ValueOperand& val) PER_SHARED_ARCH;
400   void PopFlags() DEFINED_ON(x86_shared);
401   void PopStackPtr() DEFINED_ON(arm, mips_shared, x86_shared);
402   void popRooted(VMFunctionData::RootType rootType, Register cellReg,
403                  const ValueOperand& valueReg);
404 
405   // Move the stack pointer based on the requested amount.
406   void adjustStack(int amount);
407   void freeStack(uint32_t amount);
408 
409   // Warning: This method does not update the framePushed() counter.
410   void freeStack(Register amount);
411 
412  private:
413   // ===============================================================
414   // Register allocation fields.
415 #ifdef DEBUG
416   friend AutoRegisterScope;
417   friend AutoFloatRegisterScope;
418   // Used to track register scopes for debug builds.
419   // Manipulated by the AutoGenericRegisterScope class.
420   AllocatableRegisterSet debugTrackedRegisters_;
421 #endif  // DEBUG
422 
423  public:
424   // ===============================================================
425   // Simple call functions.
426 
427   // The returned CodeOffset is the assembler offset for the instruction
428   // immediately following the call; that is, for the return point.
429   CodeOffset call(Register reg) PER_SHARED_ARCH;
430   CodeOffset call(Label* label) PER_SHARED_ARCH;
431 
432   void call(const Address& addr) PER_SHARED_ARCH;
433   void call(ImmWord imm) PER_SHARED_ARCH;
434   // Call a target native function, which is neither traceable nor movable.
435   void call(ImmPtr imm) PER_SHARED_ARCH;
436   CodeOffset call(wasm::SymbolicAddress imm) PER_SHARED_ARCH;
437   inline CodeOffset call(const wasm::CallSiteDesc& desc,
438                          wasm::SymbolicAddress imm);
439 
440   // Call a target JitCode, which must be traceable, and may be movable.
441   void call(JitCode* c) PER_SHARED_ARCH;
442 
443   inline void call(TrampolinePtr code);
444 
445   inline CodeOffset call(const wasm::CallSiteDesc& desc, const Register reg);
446   inline CodeOffset call(const wasm::CallSiteDesc& desc, uint32_t funcDefIndex);
447   inline void call(const wasm::CallSiteDesc& desc, wasm::Trap trap);
448 
449   CodeOffset callWithPatch() PER_SHARED_ARCH;
450   void patchCall(uint32_t callerOffset, uint32_t calleeOffset) PER_SHARED_ARCH;
451 
452   // Push the return address and make a call. On platforms where this function
453   // is not defined, push the link register (pushReturnAddress) at the entry
454   // point of the callee.
455   void callAndPushReturnAddress(Register reg) DEFINED_ON(x86_shared);
456   void callAndPushReturnAddress(Label* label) DEFINED_ON(x86_shared);
457 
458   // These do not adjust framePushed().
459   void pushReturnAddress() DEFINED_ON(mips_shared, arm, arm64);
460   void popReturnAddress() DEFINED_ON(mips_shared, arm, arm64);
461 
462   // Useful for dealing with two-valued returns.
463   void moveRegPair(Register src0, Register src1, Register dst0, Register dst1,
464                    MoveOp::Type type = MoveOp::GENERAL);
465 
466  public:
467   // ===============================================================
468   // Patchable near/far jumps.
469 
470   // "Far jumps" provide the ability to jump to any uint32_t offset from any
471   // other uint32_t offset without using a constant pool (thus returning a
472   // simple CodeOffset instead of a CodeOffsetJump).
473   CodeOffset farJumpWithPatch() PER_SHARED_ARCH;
474   void patchFarJump(CodeOffset farJump, uint32_t targetOffset) PER_SHARED_ARCH;
475 
476   // Emit a nop that can be patched to and from a nop and a call with int32
477   // relative displacement.
478   CodeOffset nopPatchableToCall() PER_SHARED_ARCH;
479   void nopPatchableToCall(const wasm::CallSiteDesc& desc);
480   static void patchNopToCall(uint8_t* callsite,
481                              uint8_t* target) PER_SHARED_ARCH;
482   static void patchCallToNop(uint8_t* callsite) PER_SHARED_ARCH;
483 
484   // These methods are like movWithPatch/PatchDataWithValueCheck but allow
485   // using pc-relative addressing on certain platforms (RIP-relative LEA on x64,
486   // ADR instruction on arm64).
487   //
488   // Note: "Near" applies to ARM64 where the target must be within 1 MB (this is
489   // release-asserted).
490   CodeOffset moveNearAddressWithPatch(Register dest)
491       DEFINED_ON(x86, x64, arm, arm64, mips_shared);
492   static void patchNearAddressMove(CodeLocationLabel loc,
493                                    CodeLocationLabel target)
494       DEFINED_ON(x86, x64, arm, arm64, mips_shared);
495 
496  public:
497   // ===============================================================
498   // [SMDOC] JIT-to-C++ Function Calls (callWithABI)
499   //
500   // callWithABI is used to make a call using the standard C/C++ system ABI.
501   //
502   // callWithABI is a low level interface for making calls, as such every call
503   // made with callWithABI should be organized with 6 steps: spilling live
504   // registers, aligning the stack, listing arguments of the called function,
505   // calling a function pointer, extracting the returned value and restoring
506   // live registers.
507   //
508   // A more detailed example of the six stages:
509   //
510   // 1) Saving of registers that are live. This will vary depending on which
511   //    SpiderMonkey compiler you are working on. Registers that shouldn't be
512   //    restored can be excluded.
513   //
514   //      LiveRegisterSet volatileRegs(...);
515   //      volatileRegs.take(scratch);
516   //      masm.PushRegsInMask(volatileRegs);
517   //
518   // 2) Align the stack to perform the call with the correct stack alignment.
519   //
520   //    When the stack pointer alignment is unknown and cannot be corrected
521   //    when generating the code, setupUnalignedABICall must be used to
522   //    dynamically align the stack pointer to the expectation of the ABI.
523   //    When the stack pointer is known at JIT compilation time, the stack can
524   //    be fixed manually and setupAlignedABICall and setupWasmABICall can be
525   //    used.
526   //
527   //    setupWasmABICall is a special case of setupAlignedABICall as
528   //    SpiderMonkey's WebAssembly implementation mostly follow the system
529   //    ABI, except for float/double arguments, which always use floating
530   //    point registers, even if this is not supported by the system ABI.
531   //
532   //      masm.setupUnalignedABICall(scratch);
533   //
534   // 3) Passing arguments. Arguments are passed left-to-right.
535   //
536   //      masm.passABIArg(scratch);
537   //      masm.passABIArg(FloatOp0, MoveOp::Double);
538   //
539   //    Note how float register arguments are annotated with MoveOp::Double.
540   //
541   //    Concerning stack-relative address, see the note on passABIArg.
542   //
543   // 4) Make the call:
544   //
545   //      masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, Callee));
546   //
547   //    In the case where the call returns a double, that needs to be
548   //    indicated to the callWithABI like this:
549   //
550   //      masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ...), MoveOp::DOUBLE);
551   //
552   //    There are overloads to allow calls to registers and addresses.
553   //
554   // 5) Take care of the ReturnReg or ReturnDoubleReg
555   //
556   //      masm.mov(ReturnReg, scratch1);
557   //
558   // 6) Restore the potentially clobbered volatile registers
559   //
560   //      masm.PopRegsInMask(volatileRegs);
561   //
562   //    If expecting a returned value, this call should use
563   //    PopRegsInMaskIgnore to filter out the registers which are containing
564   //    the returned value.
565   //
566   // Unless an exit frame is pushed prior to the setupABICall, the callee
567   // should not GC. To ensure this is the case callWithABI is instrumented to
568   // make sure that in the default case callees are annotated with an
569   // AutoUnsafeCallWithABI on the stack.
570   //
571   // A callWithABI can opt out of checking, if for example it is known there
572   // is an exit frame, or the callee is known not to GC.
573   //
574   // If your callee needs to be able to GC, consider using a VMFunction, or
575   // create a fake exit frame, and instrument the TraceJitExitFrame
576   // accordingly.
577 
578   // Setup a call to C/C++ code, given the assumption that the framePushed
579   // accruately define the state of the stack, and that the top of the stack
580   // was properly aligned. Note that this only supports cdecl.
581   void setupAlignedABICall();  // CRASH_ON(arm64)
582 
583   // As setupAlignedABICall, but for WebAssembly native ABI calls, which pass
584   // through a builtin thunk that uses the wasm ABI. All the wasm ABI calls
585   // can be native, since we always know the stack alignment a priori.
586   void setupWasmABICall();  // CRASH_ON(arm64)
587 
588   // Setup an ABI call for when the alignment is not known. This may need a
589   // scratch register.
590   void setupUnalignedABICall(Register scratch) PER_ARCH;
591 
592   // Arguments must be assigned to a C/C++ call in order. They are moved
593   // in parallel immediately before performing the call. This process may
594   // temporarily use more stack, in which case esp-relative addresses will be
595   // automatically adjusted. It is extremely important that esp-relative
596   // addresses are computed *after* setupABICall(). Furthermore, no
597   // operations should be emitted while setting arguments.
598   void passABIArg(const MoveOperand& from, MoveOp::Type type);
599   inline void passABIArg(Register reg);
600   inline void passABIArg(FloatRegister reg, MoveOp::Type type);
601 
602   inline void callWithABI(
603       void* fun, MoveOp::Type result = MoveOp::GENERAL,
604       CheckUnsafeCallWithABI check = CheckUnsafeCallWithABI::Check);
605   inline void callWithABI(Register fun, MoveOp::Type result = MoveOp::GENERAL);
606   inline void callWithABI(const Address& fun,
607                           MoveOp::Type result = MoveOp::GENERAL);
608 
609   CodeOffset callWithABI(wasm::BytecodeOffset offset, wasm::SymbolicAddress fun,
610                          MoveOp::Type result = MoveOp::GENERAL);
611   void callDebugWithABI(wasm::SymbolicAddress fun,
612                         MoveOp::Type result = MoveOp::GENERAL);
613 
614  private:
615   // Reinitialize the variables which have to be cleared before making a call
616   // with callWithABI.
617   void setupABICall();
618 
619   // Reserve the stack and resolve the arguments move.
620   void callWithABIPre(uint32_t* stackAdjust,
621                       bool callFromWasm = false) PER_ARCH;
622 
623   // Emits a call to a C/C++ function, resolving all argument moves.
624   void callWithABINoProfiler(void* fun, MoveOp::Type result,
625                              CheckUnsafeCallWithABI check);
626   void callWithABINoProfiler(Register fun, MoveOp::Type result) PER_ARCH;
627   void callWithABINoProfiler(const Address& fun, MoveOp::Type result) PER_ARCH;
628 
629   // Restore the stack to its state before the setup function call.
630   void callWithABIPost(uint32_t stackAdjust, MoveOp::Type result,
631                        bool callFromWasm = false) PER_ARCH;
632 
633   // Create the signature to be able to decode the arguments of a native
634   // function, when calling a function within the simulator.
635   inline void appendSignatureType(MoveOp::Type type);
636   inline ABIFunctionType signature() const;
637 
638   // Private variables used to handle moves between registers given as
639   // arguments to passABIArg and the list of ABI registers expected for the
640   // signature of the function.
641   MoveResolver moveResolver_;
642 
643   // Architecture specific implementation which specify how registers & stack
644   // offsets are used for calling a function.
645   ABIArgGenerator abiArgs_;
646 
647 #ifdef DEBUG
648   // Flag use to assert that we use ABI function in the right context.
649   bool inCall_;
650 #endif
651 
652   // If set by setupUnalignedABICall then callWithABI will pop the stack
653   // register which is on the stack.
654   bool dynamicAlignment_;
655 
656 #ifdef JS_SIMULATOR
657   // The signature is used to accumulate all types of arguments which are used
658   // by the caller. This is used by the simulators to decode the arguments
659   // properly, and cast the function pointer to the right type.
660   uint32_t signature_;
661 #endif
662 
663  public:
664   // ===============================================================
665   // Jit Frames.
666   //
667   // These functions are used to build the content of the Jit frames.  See
668   // CommonFrameLayout class, and all its derivatives. The content should be
669   // pushed in the opposite order as the fields of the structures, such that
670   // the structures can be used to interpret the content of the stack.
671 
672   // Call the Jit function, and push the return address (or let the callee
673   // push the return address).
674   //
675   // These functions return the offset of the return address, in order to use
676   // the return address to index the safepoints, which are used to list all
677   // live registers.
678   inline uint32_t callJitNoProfiler(Register callee);
679   inline uint32_t callJit(Register callee);
680   inline uint32_t callJit(JitCode* code);
681   inline uint32_t callJit(TrampolinePtr code);
682   inline uint32_t callJit(ImmPtr callee);
683 
684   // The frame descriptor is the second field of all Jit frames, pushed before
685   // calling the Jit function.  It is a composite value defined in JitFrames.h
686   inline void makeFrameDescriptor(Register frameSizeReg, FrameType type,
687                                   uint32_t headerSize);
688 
689   // Push the frame descriptor, based on the statically known framePushed.
690   inline void pushStaticFrameDescriptor(FrameType type, uint32_t headerSize);
691 
692   // Push the callee token of a JSFunction which pointer is stored in the
693   // |callee| register. The callee token is packed with a |constructing| flag
694   // which correspond to the fact that the JS function is called with "new" or
695   // not.
696   inline void PushCalleeToken(Register callee, bool constructing);
697 
698   // Unpack a callee token located at the |token| address, and return the
699   // JSFunction pointer in the |dest| register.
700   inline void loadFunctionFromCalleeToken(Address token, Register dest);
701 
702   // This function emulates a call by pushing an exit frame on the stack,
703   // except that the fake-function is inlined within the body of the caller.
704   //
705   // This function assumes that the current frame is an IonJS frame.
706   //
707   // This function returns the offset of the /fake/ return address, in order to
708   // use the return address to index the safepoints, which are used to list all
709   // live registers.
710   //
711   // This function should be balanced with a call to adjustStack, to pop the
712   // exit frame and emulate the return statement of the inlined function.
713   inline uint32_t buildFakeExitFrame(Register scratch);
714 
715  private:
716   // This function is used by buildFakeExitFrame to push a fake return address
717   // on the stack. This fake return address should never be used for resuming
718   // any execution, and can even be an invalid pointer into the instruction
719   // stream, as long as it does not alias any other.
720   uint32_t pushFakeReturnAddress(Register scratch) PER_SHARED_ARCH;
721 
722  public:
723   // ===============================================================
724   // Exit frame footer.
725   //
726   // When calling outside the Jit we push an exit frame. To mark the stack
727   // correctly, we have to push additional information, called the Exit frame
728   // footer, which is used to identify how the stack is marked.
729   //
730   // See JitFrames.h, and MarkJitExitFrame in JitFrames.cpp.
731 
732   // Push stub code and the VMFunctionData pointer.
733   inline void enterExitFrame(Register cxreg, Register scratch,
734                              const VMFunctionData* f);
735 
736   // Push an exit frame token to identify which fake exit frame this footer
737   // corresponds to.
738   inline void enterFakeExitFrame(Register cxreg, Register scratch,
739                                  ExitFrameType type);
740 
741   // Push an exit frame token for a native call.
742   inline void enterFakeExitFrameForNative(Register cxreg, Register scratch,
743                                           bool isConstructing);
744 
745   // Pop ExitFrame footer in addition to the extra frame.
746   inline void leaveExitFrame(size_t extraFrame = 0);
747 
748  private:
749   // Save the top of the stack into JitActivation::packedExitFP of the
750   // current thread, which should be the location of the latest exit frame.
751   void linkExitFrame(Register cxreg, Register scratch);
752 
753  public:
754   // ===============================================================
755   // Move instructions
756 
757   inline void move64(Imm64 imm, Register64 dest) PER_ARCH;
758   inline void move64(Register64 src, Register64 dest) PER_ARCH;
759 
760   inline void moveFloat32ToGPR(FloatRegister src,
761                                Register dest) PER_SHARED_ARCH;
762   inline void moveGPRToFloat32(Register src,
763                                FloatRegister dest) PER_SHARED_ARCH;
764 
765   inline void moveDoubleToGPR64(FloatRegister src, Register64 dest) PER_ARCH;
766   inline void moveGPR64ToDouble(Register64 src, FloatRegister dest) PER_ARCH;
767 
768   inline void move8SignExtend(Register src, Register dest) PER_SHARED_ARCH;
769   inline void move16SignExtend(Register src, Register dest) PER_SHARED_ARCH;
770 
771   // move64To32 will clear the high bits of `dest` on 64-bit systems.
772   inline void move64To32(Register64 src, Register dest) PER_ARCH;
773 
774   inline void move32To64ZeroExtend(Register src, Register64 dest) PER_ARCH;
775 
776   // On x86, `dest` must be edx:eax for the sign extend operations.
777   inline void move8To64SignExtend(Register src, Register64 dest) PER_ARCH;
778   inline void move16To64SignExtend(Register src, Register64 dest) PER_ARCH;
779   inline void move32To64SignExtend(Register src, Register64 dest) PER_ARCH;
780 
781   inline void move32ZeroExtendToPtr(Register src, Register dest) PER_ARCH;
782 
783   // Copy a constant, typed-register, or a ValueOperand into a ValueOperand
784   // destination.
785   inline void moveValue(const ConstantOrRegister& src,
786                         const ValueOperand& dest);
787   void moveValue(const TypedOrValueRegister& src,
788                  const ValueOperand& dest) PER_ARCH;
789   void moveValue(const ValueOperand& src, const ValueOperand& dest) PER_ARCH;
790   void moveValue(const Value& src, const ValueOperand& dest) PER_ARCH;
791 
792   // ===============================================================
793   // Load instructions
794 
795   inline void load32SignExtendToPtr(const Address& src, Register dest) PER_ARCH;
796 
797   inline void loadAbiReturnAddress(Register dest) PER_SHARED_ARCH;
798 
799  public:
800   // ===============================================================
801   // Logical instructions
802 
803   inline void not32(Register reg) PER_SHARED_ARCH;
804 
805   inline void and32(Register src, Register dest) PER_SHARED_ARCH;
806   inline void and32(Imm32 imm, Register dest) PER_SHARED_ARCH;
807   inline void and32(Imm32 imm, Register src, Register dest) DEFINED_ON(arm64);
808   inline void and32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
809   inline void and32(const Address& src, Register dest) PER_SHARED_ARCH;
810 
811   inline void andPtr(Register src, Register dest) PER_ARCH;
812   inline void andPtr(Imm32 imm, Register dest) PER_ARCH;
813 
814   inline void and64(Imm64 imm, Register64 dest) PER_ARCH;
815   inline void or64(Imm64 imm, Register64 dest) PER_ARCH;
816   inline void xor64(Imm64 imm, Register64 dest) PER_ARCH;
817 
818   inline void or32(Register src, Register dest) PER_SHARED_ARCH;
819   inline void or32(Imm32 imm, Register dest) PER_SHARED_ARCH;
820   inline void or32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
821 
822   inline void orPtr(Register src, Register dest) PER_ARCH;
823   inline void orPtr(Imm32 imm, Register dest) PER_ARCH;
824 
825   inline void and64(Register64 src, Register64 dest) PER_ARCH;
826   inline void or64(Register64 src, Register64 dest) PER_ARCH;
827   inline void xor64(Register64 src, Register64 dest) PER_ARCH;
828 
829   inline void xor32(Register src, Register dest) PER_SHARED_ARCH;
830   inline void xor32(Imm32 imm, Register dest) PER_SHARED_ARCH;
831 
832   inline void xorPtr(Register src, Register dest) PER_ARCH;
833   inline void xorPtr(Imm32 imm, Register dest) PER_ARCH;
834 
835   inline void and64(const Operand& src, Register64 dest)
836       DEFINED_ON(x64, mips64);
837   inline void or64(const Operand& src, Register64 dest) DEFINED_ON(x64, mips64);
838   inline void xor64(const Operand& src, Register64 dest)
839       DEFINED_ON(x64, mips64);
840 
841   // ===============================================================
842   // Swap instructions
843 
844   // Swap the two lower bytes and sign extend the result to 32-bit.
845   inline void byteSwap16SignExtend(Register reg) PER_SHARED_ARCH;
846 
847   // Swap the two lower bytes and zero extend the result to 32-bit.
848   inline void byteSwap16ZeroExtend(Register reg) PER_SHARED_ARCH;
849 
850   // Swap all four bytes in a 32-bit integer.
851   inline void byteSwap32(Register reg) PER_SHARED_ARCH;
852 
853   // Swap all eight bytes in a 64-bit integer.
854   inline void byteSwap64(Register64 reg) PER_ARCH;
855 
856   // ===============================================================
857   // Arithmetic functions
858 
859   inline void add32(Register src, Register dest) PER_SHARED_ARCH;
860   inline void add32(Imm32 imm, Register dest) PER_SHARED_ARCH;
861   inline void add32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
862   inline void add32(Imm32 imm, const AbsoluteAddress& dest)
863       DEFINED_ON(x86_shared);
864 
865   inline void addPtr(Register src, Register dest) PER_ARCH;
866   inline void addPtr(Register src1, Register src2, Register dest)
867       DEFINED_ON(arm64);
868   inline void addPtr(Imm32 imm, Register dest) PER_ARCH;
869   inline void addPtr(Imm32 imm, Register src, Register dest) DEFINED_ON(arm64);
870   inline void addPtr(ImmWord imm, Register dest) PER_ARCH;
871   inline void addPtr(ImmPtr imm, Register dest);
872   inline void addPtr(Imm32 imm, const Address& dest)
873       DEFINED_ON(mips_shared, arm, arm64, x86, x64);
874   inline void addPtr(Imm32 imm, const AbsoluteAddress& dest)
875       DEFINED_ON(x86, x64);
876   inline void addPtr(const Address& src, Register dest)
877       DEFINED_ON(mips_shared, arm, arm64, x86, x64);
878 
879   inline void add64(Register64 src, Register64 dest) PER_ARCH;
880   inline void add64(Imm32 imm, Register64 dest) PER_ARCH;
881   inline void add64(Imm64 imm, Register64 dest) PER_ARCH;
882   inline void add64(const Operand& src, Register64 dest)
883       DEFINED_ON(x64, mips64);
884 
885   inline void addFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
886 
887   // Compute dest=SP-imm where dest is a pointer registers and not SP.  The
888   // offset returned from sub32FromStackPtrWithPatch() must be passed to
889   // patchSub32FromStackPtr().
890   inline CodeOffset sub32FromStackPtrWithPatch(Register dest) PER_ARCH;
891   inline void patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) PER_ARCH;
892 
893   inline void addDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
894   inline void addConstantDouble(double d, FloatRegister dest) DEFINED_ON(x86);
895 
896   inline void sub32(const Address& src, Register dest) PER_SHARED_ARCH;
897   inline void sub32(Register src, Register dest) PER_SHARED_ARCH;
898   inline void sub32(Imm32 imm, Register dest) PER_SHARED_ARCH;
899 
900   inline void subPtr(Register src, Register dest) PER_ARCH;
901   inline void subPtr(Register src, const Address& dest)
902       DEFINED_ON(mips_shared, arm, arm64, x86, x64);
903   inline void subPtr(Imm32 imm, Register dest) PER_ARCH;
904   inline void subPtr(ImmWord imm, Register dest) DEFINED_ON(x64);
905   inline void subPtr(const Address& addr, Register dest)
906       DEFINED_ON(mips_shared, arm, arm64, x86, x64);
907 
908   inline void sub64(Register64 src, Register64 dest) PER_ARCH;
909   inline void sub64(Imm64 imm, Register64 dest) PER_ARCH;
910   inline void sub64(const Operand& src, Register64 dest)
911       DEFINED_ON(x64, mips64);
912 
913   inline void subFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
914 
915   inline void subDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
916 
917   // On x86-shared, srcDest must be eax and edx will be clobbered.
918   inline void mul32(Register rhs, Register srcDest) PER_SHARED_ARCH;
919 
920   inline void mul32(Register src1, Register src2, Register dest, Label* onOver)
921       DEFINED_ON(arm64);
922 
923   inline void mul64(const Operand& src, const Register64& dest) DEFINED_ON(x64);
924   inline void mul64(const Operand& src, const Register64& dest,
925                     const Register temp) DEFINED_ON(x64, mips64);
926   inline void mul64(Imm64 imm, const Register64& dest) PER_ARCH;
927   inline void mul64(Imm64 imm, const Register64& dest, const Register temp)
928       DEFINED_ON(x86, x64, arm, mips32, mips64);
929   inline void mul64(const Register64& src, const Register64& dest,
930                     const Register temp) PER_ARCH;
931 
932   inline void mulBy3(Register src, Register dest) PER_ARCH;
933 
934   inline void mulFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
935   inline void mulDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
936 
937   inline void mulDoublePtr(ImmPtr imm, Register temp, FloatRegister dest)
938       DEFINED_ON(mips_shared, arm, arm64, x86, x64);
939 
940   // Perform an integer division, returning the integer part rounded toward
941   // zero. rhs must not be zero, and the division must not overflow.
942   //
943   // On x86_shared, srcDest must be eax and edx will be clobbered.
944   // On ARM, the chip must have hardware division instructions.
945   inline void quotient32(Register rhs, Register srcDest,
946                          bool isUnsigned) PER_SHARED_ARCH;
947 
948   // Perform an integer division, returning the remainder part.
949   // rhs must not be zero, and the division must not overflow.
950   //
951   // On x86_shared, srcDest must be eax and edx will be clobbered.
952   // On ARM, the chip must have hardware division instructions.
953   inline void remainder32(Register rhs, Register srcDest,
954                           bool isUnsigned) PER_SHARED_ARCH;
955 
956   // Perform an integer division, returning the integer part rounded toward
957   // zero. rhs must not be zero, and the division must not overflow.
958   //
959   // This variant preserves registers, and doesn't require hardware division
960   // instructions on ARM (will call out to a runtime routine).
961   //
962   // rhs is preserved, srdDest is clobbered.
963   void flexibleRemainder32(Register rhs, Register srcDest, bool isUnsigned,
964                            const LiveRegisterSet& volatileLiveRegs)
965       DEFINED_ON(mips_shared, arm, arm64, x86_shared);
966 
967   // Perform an integer division, returning the integer part rounded toward
968   // zero. rhs must not be zero, and the division must not overflow.
969   //
970   // This variant preserves registers, and doesn't require hardware division
971   // instructions on ARM (will call out to a runtime routine).
972   //
973   // rhs is preserved, srdDest is clobbered.
974   void flexibleQuotient32(Register rhs, Register srcDest, bool isUnsigned,
975                           const LiveRegisterSet& volatileLiveRegs)
976       DEFINED_ON(mips_shared, arm, arm64, x86_shared);
977 
978   // Perform an integer division, returning the integer part rounded toward
979   // zero. rhs must not be zero, and the division must not overflow. The
980   // remainder is stored into the third argument register here.
981   //
982   // This variant preserves registers, and doesn't require hardware division
983   // instructions on ARM (will call out to a runtime routine).
984   //
985   // rhs is preserved, srdDest and remOutput are clobbered.
986   void flexibleDivMod32(Register rhs, Register srcDest, Register remOutput,
987                         bool isUnsigned,
988                         const LiveRegisterSet& volatileLiveRegs)
989       DEFINED_ON(mips_shared, arm, arm64, x86_shared);
990 
991   inline void divFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
992   inline void divDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
993 
994   inline void inc64(AbsoluteAddress dest) PER_ARCH;
995 
996   inline void neg32(Register reg) PER_SHARED_ARCH;
997   inline void neg64(Register64 reg) PER_ARCH;
998   inline void negPtr(Register reg) PER_ARCH;
999 
1000   inline void negateFloat(FloatRegister reg) PER_SHARED_ARCH;
1001 
1002   inline void negateDouble(FloatRegister reg) PER_SHARED_ARCH;
1003 
1004   inline void absFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
1005   inline void absDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
1006 
1007   inline void sqrtFloat32(FloatRegister src,
1008                           FloatRegister dest) PER_SHARED_ARCH;
1009   inline void sqrtDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
1010 
1011   void floorFloat32ToInt32(FloatRegister src, Register dest,
1012                            Label* fail) PER_SHARED_ARCH;
1013   void floorDoubleToInt32(FloatRegister src, Register dest,
1014                           Label* fail) PER_SHARED_ARCH;
1015 
1016   void ceilFloat32ToInt32(FloatRegister src, Register dest,
1017                           Label* fail) PER_SHARED_ARCH;
1018   void ceilDoubleToInt32(FloatRegister src, Register dest,
1019                          Label* fail) PER_SHARED_ARCH;
1020 
1021   void roundFloat32ToInt32(FloatRegister src, Register dest, FloatRegister temp,
1022                            Label* fail) PER_SHARED_ARCH;
1023   void roundDoubleToInt32(FloatRegister src, Register dest, FloatRegister temp,
1024                           Label* fail) PER_SHARED_ARCH;
1025 
1026   // srcDest = {min,max}{Float32,Double}(srcDest, other)
1027   // For min and max, handle NaN specially if handleNaN is true.
1028 
1029   inline void minFloat32(FloatRegister other, FloatRegister srcDest,
1030                          bool handleNaN) PER_SHARED_ARCH;
1031   inline void minDouble(FloatRegister other, FloatRegister srcDest,
1032                         bool handleNaN) PER_SHARED_ARCH;
1033 
1034   inline void maxFloat32(FloatRegister other, FloatRegister srcDest,
1035                          bool handleNaN) PER_SHARED_ARCH;
1036   inline void maxDouble(FloatRegister other, FloatRegister srcDest,
1037                         bool handleNaN) PER_SHARED_ARCH;
1038 
1039   // Compute |pow(base, power)| and store the result in |dest|. If the result
1040   // exceeds the int32 range, jumps to |onOver|.
1041   // |base| and |power| are preserved, the other input registers are clobbered.
1042   void pow32(Register base, Register power, Register dest, Register temp1,
1043              Register temp2, Label* onOver);
1044 
1045   // ===============================================================
1046   // Shift functions
1047 
1048   // For shift-by-register there may be platform-specific variations, for
1049   // example, x86 will perform the shift mod 32 but ARM will perform the shift
1050   // mod 256.
1051   //
1052   // For shift-by-immediate the platform assembler may restrict the immediate,
1053   // for example, the ARM assembler requires the count for 32-bit shifts to be
1054   // in the range [0,31].
1055 
1056   inline void lshift32(Imm32 shift, Register srcDest) PER_SHARED_ARCH;
1057   inline void rshift32(Imm32 shift, Register srcDest) PER_SHARED_ARCH;
1058   inline void rshift32Arithmetic(Imm32 shift, Register srcDest) PER_SHARED_ARCH;
1059 
1060   inline void lshiftPtr(Imm32 imm, Register dest) PER_ARCH;
1061   inline void rshiftPtr(Imm32 imm, Register dest) PER_ARCH;
1062   inline void rshiftPtr(Imm32 imm, Register src, Register dest)
1063       DEFINED_ON(arm64);
1064   inline void rshiftPtrArithmetic(Imm32 imm, Register dest) PER_ARCH;
1065 
1066   inline void lshift64(Imm32 imm, Register64 dest) PER_ARCH;
1067   inline void rshift64(Imm32 imm, Register64 dest) PER_ARCH;
1068   inline void rshift64Arithmetic(Imm32 imm, Register64 dest) PER_ARCH;
1069 
1070   // On x86_shared these have the constraint that shift must be in CL.
1071   inline void lshift32(Register shift, Register srcDest) PER_SHARED_ARCH;
1072   inline void rshift32(Register shift, Register srcDest) PER_SHARED_ARCH;
1073   inline void rshift32Arithmetic(Register shift,
1074                                  Register srcDest) PER_SHARED_ARCH;
1075 
1076   // These variants may use the stack, but do not have the above constraint.
1077   inline void flexibleLshift32(Register shift,
1078                                Register srcDest) PER_SHARED_ARCH;
1079   inline void flexibleRshift32(Register shift,
1080                                Register srcDest) PER_SHARED_ARCH;
1081   inline void flexibleRshift32Arithmetic(Register shift,
1082                                          Register srcDest) PER_SHARED_ARCH;
1083 
1084   inline void lshift64(Register shift, Register64 srcDest) PER_ARCH;
1085   inline void rshift64(Register shift, Register64 srcDest) PER_ARCH;
1086   inline void rshift64Arithmetic(Register shift, Register64 srcDest) PER_ARCH;
1087 
1088   // ===============================================================
1089   // Rotation functions
1090   // Note: - on x86 and x64 the count register must be in CL.
1091   //       - on x64 the temp register should be InvalidReg.
1092 
1093   inline void rotateLeft(Imm32 count, Register input,
1094                          Register dest) PER_SHARED_ARCH;
1095   inline void rotateLeft(Register count, Register input,
1096                          Register dest) PER_SHARED_ARCH;
1097   inline void rotateLeft64(Imm32 count, Register64 input, Register64 dest)
1098       DEFINED_ON(x64);
1099   inline void rotateLeft64(Register count, Register64 input, Register64 dest)
1100       DEFINED_ON(x64);
1101   inline void rotateLeft64(Imm32 count, Register64 input, Register64 dest,
1102                            Register temp) PER_ARCH;
1103   inline void rotateLeft64(Register count, Register64 input, Register64 dest,
1104                            Register temp) PER_ARCH;
1105 
1106   inline void rotateRight(Imm32 count, Register input,
1107                           Register dest) PER_SHARED_ARCH;
1108   inline void rotateRight(Register count, Register input,
1109                           Register dest) PER_SHARED_ARCH;
1110   inline void rotateRight64(Imm32 count, Register64 input, Register64 dest)
1111       DEFINED_ON(x64);
1112   inline void rotateRight64(Register count, Register64 input, Register64 dest)
1113       DEFINED_ON(x64);
1114   inline void rotateRight64(Imm32 count, Register64 input, Register64 dest,
1115                             Register temp) PER_ARCH;
1116   inline void rotateRight64(Register count, Register64 input, Register64 dest,
1117                             Register temp) PER_ARCH;
1118 
1119   // ===============================================================
1120   // Bit counting functions
1121 
1122   // knownNotZero may be true only if the src is known not to be zero.
1123   inline void clz32(Register src, Register dest,
1124                     bool knownNotZero) PER_SHARED_ARCH;
1125   inline void ctz32(Register src, Register dest,
1126                     bool knownNotZero) PER_SHARED_ARCH;
1127 
1128   inline void clz64(Register64 src, Register dest) PER_ARCH;
1129   inline void ctz64(Register64 src, Register dest) PER_ARCH;
1130 
1131   // On x86_shared, temp may be Invalid only if the chip has the POPCNT
1132   // instruction. On ARM, temp may never be Invalid.
1133   inline void popcnt32(Register src, Register dest,
1134                        Register temp) PER_SHARED_ARCH;
1135 
1136   // temp may be invalid only if the chip has the POPCNT instruction.
1137   inline void popcnt64(Register64 src, Register64 dest, Register temp) PER_ARCH;
1138 
1139   // ===============================================================
1140   // Condition functions
1141 
1142   template <typename T1, typename T2>
1143   inline void cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest)
1144       DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
1145 
1146   template <typename T1, typename T2>
1147   inline void cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) PER_ARCH;
1148 
1149   // ===============================================================
1150   // Branch functions
1151 
1152   template <class L>
1153   inline void branch32(Condition cond, Register lhs, Register rhs,
1154                        L label) PER_SHARED_ARCH;
1155   template <class L>
1156   inline void branch32(Condition cond, Register lhs, Imm32 rhs,
1157                        L label) PER_SHARED_ARCH;
1158 
1159   inline void branch32(Condition cond, Register lhs, const Address& rhs,
1160                        Label* label) DEFINED_ON(arm64);
1161 
1162   inline void branch32(Condition cond, const Address& lhs, Register rhs,
1163                        Label* label) PER_SHARED_ARCH;
1164   inline void branch32(Condition cond, const Address& lhs, Imm32 rhs,
1165                        Label* label) PER_SHARED_ARCH;
1166 
1167   inline void branch32(Condition cond, const AbsoluteAddress& lhs, Register rhs,
1168                        Label* label)
1169       DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1170   inline void branch32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs,
1171                        Label* label)
1172       DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1173 
1174   inline void branch32(Condition cond, const BaseIndex& lhs, Register rhs,
1175                        Label* label) DEFINED_ON(x86_shared);
1176   inline void branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs,
1177                        Label* label) PER_SHARED_ARCH;
1178 
1179   inline void branch32(Condition cond, const Operand& lhs, Register rhs,
1180                        Label* label) DEFINED_ON(x86_shared);
1181   inline void branch32(Condition cond, const Operand& lhs, Imm32 rhs,
1182                        Label* label) DEFINED_ON(x86_shared);
1183 
1184   inline void branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs,
1185                        Label* label)
1186       DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1187 
1188   // The supported condition are Equal, NotEqual, LessThan(orEqual),
1189   // GreaterThan(orEqual), Below(orEqual) and Above(orEqual). When a fail label
1190   // is not defined it will fall through to next instruction, else jump to the
1191   // fail label.
1192   inline void branch64(Condition cond, Register64 lhs, Imm64 val,
1193                        Label* success, Label* fail = nullptr) PER_ARCH;
1194   inline void branch64(Condition cond, Register64 lhs, Register64 rhs,
1195                        Label* success, Label* fail = nullptr) PER_ARCH;
1196   // On x86 and x64 NotEqual and Equal conditions are allowed for the branch64
1197   // variants with Address as lhs. On others only the NotEqual condition.
1198   inline void branch64(Condition cond, const Address& lhs, Imm64 val,
1199                        Label* label) PER_ARCH;
1200 
1201   // Compare the value at |lhs| with the value at |rhs|.  The scratch
1202   // register *must not* be the base of |lhs| or |rhs|.
1203   inline void branch64(Condition cond, const Address& lhs, const Address& rhs,
1204                        Register scratch, Label* label) PER_ARCH;
1205 
1206   template <class L>
1207   inline void branchPtr(Condition cond, Register lhs, Register rhs,
1208                         L label) PER_SHARED_ARCH;
1209   inline void branchPtr(Condition cond, Register lhs, Imm32 rhs,
1210                         Label* label) PER_SHARED_ARCH;
1211   inline void branchPtr(Condition cond, Register lhs, ImmPtr rhs,
1212                         Label* label) PER_SHARED_ARCH;
1213   inline void branchPtr(Condition cond, Register lhs, ImmGCPtr rhs,
1214                         Label* label) PER_SHARED_ARCH;
1215   inline void branchPtr(Condition cond, Register lhs, ImmWord rhs,
1216                         Label* label) PER_SHARED_ARCH;
1217 
1218   template <class L>
1219   inline void branchPtr(Condition cond, const Address& lhs, Register rhs,
1220                         L label) PER_SHARED_ARCH;
1221   inline void branchPtr(Condition cond, const Address& lhs, ImmPtr rhs,
1222                         Label* label) PER_SHARED_ARCH;
1223   inline void branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs,
1224                         Label* label) PER_SHARED_ARCH;
1225   inline void branchPtr(Condition cond, const Address& lhs, ImmWord rhs,
1226                         Label* label) PER_SHARED_ARCH;
1227 
1228   inline void branchPtr(Condition cond, const BaseIndex& lhs, ImmWord rhs,
1229                         Label* label) PER_SHARED_ARCH;
1230 
1231   inline void branchPtr(Condition cond, const AbsoluteAddress& lhs,
1232                         Register rhs, Label* label)
1233       DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1234   inline void branchPtr(Condition cond, const AbsoluteAddress& lhs, ImmWord rhs,
1235                         Label* label)
1236       DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1237 
1238   inline void branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs,
1239                         Label* label)
1240       DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1241 
1242   // Given a pointer to a GC Cell, retrieve the StoreBuffer pointer from its
1243   // chunk trailer, or nullptr if it is in the tenured heap.
1244   void loadStoreBuffer(Register ptr, Register buffer) PER_ARCH;
1245 
1246   void branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp,
1247                                Label* label)
1248       DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1249   void branchPtrInNurseryChunk(Condition cond, const Address& address,
1250                                Register temp, Label* label) DEFINED_ON(x86);
1251   void branchValueIsNurseryCell(Condition cond, const Address& address,
1252                                 Register temp, Label* label) PER_ARCH;
1253   void branchValueIsNurseryCell(Condition cond, ValueOperand value,
1254                                 Register temp, Label* label) PER_ARCH;
1255 
1256   // This function compares a Value (lhs) which is having a private pointer
1257   // boxed inside a js::Value, with a raw pointer (rhs).
1258   inline void branchPrivatePtr(Condition cond, const Address& lhs, Register rhs,
1259                                Label* label) PER_ARCH;
1260 
1261   inline void branchFloat(DoubleCondition cond, FloatRegister lhs,
1262                           FloatRegister rhs, Label* label) PER_SHARED_ARCH;
1263 
1264   // Truncate a double/float32 to int32 and when it doesn't fit an int32 it will
1265   // jump to the failure label. This particular variant is allowed to return the
1266   // value module 2**32, which isn't implemented on all architectures. E.g. the
1267   // x64 variants will do this only in the int64_t range.
1268   inline void branchTruncateFloat32MaybeModUint32(FloatRegister src,
1269                                                   Register dest, Label* fail)
1270       DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1271   inline void branchTruncateDoubleMaybeModUint32(FloatRegister src,
1272                                                  Register dest, Label* fail)
1273       DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1274 
1275   // Truncate a double/float32 to intptr and when it doesn't fit jump to the
1276   // failure label.
1277   inline void branchTruncateFloat32ToPtr(FloatRegister src, Register dest,
1278                                          Label* fail) DEFINED_ON(x86, x64);
1279   inline void branchTruncateDoubleToPtr(FloatRegister src, Register dest,
1280                                         Label* fail) DEFINED_ON(x86, x64);
1281 
1282   // Truncate a double/float32 to int32 and when it doesn't fit jump to the
1283   // failure label.
1284   inline void branchTruncateFloat32ToInt32(FloatRegister src, Register dest,
1285                                            Label* fail)
1286       DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1287   inline void branchTruncateDoubleToInt32(FloatRegister src, Register dest,
1288                                           Label* fail)
1289       DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1290 
1291   inline void branchDouble(DoubleCondition cond, FloatRegister lhs,
1292                            FloatRegister rhs, Label* label) PER_SHARED_ARCH;
1293 
1294   inline void branchDoubleNotInInt64Range(Address src, Register temp,
1295                                           Label* fail);
1296   inline void branchDoubleNotInUInt64Range(Address src, Register temp,
1297                                            Label* fail);
1298   inline void branchFloat32NotInInt64Range(Address src, Register temp,
1299                                            Label* fail);
1300   inline void branchFloat32NotInUInt64Range(Address src, Register temp,
1301                                             Label* fail);
1302 
1303   template <typename T>
1304   inline void branchAdd32(Condition cond, T src, Register dest,
1305                           Label* label) PER_SHARED_ARCH;
1306   template <typename T>
1307   inline void branchSub32(Condition cond, T src, Register dest,
1308                           Label* label) PER_SHARED_ARCH;
1309   template <typename T>
1310   inline void branchMul32(Condition cond, T src, Register dest,
1311                           Label* label) PER_SHARED_ARCH;
1312   template <typename T>
1313   inline void branchRshift32(Condition cond, T src, Register dest,
1314                              Label* label) PER_SHARED_ARCH;
1315 
1316   inline void branchNeg32(Condition cond, Register reg,
1317                           Label* label) PER_SHARED_ARCH;
1318 
1319   inline void decBranchPtr(Condition cond, Register lhs, Imm32 rhs,
1320                            Label* label) PER_SHARED_ARCH;
1321 
1322   template <class L>
1323   inline void branchTest32(Condition cond, Register lhs, Register rhs,
1324                            L label) PER_SHARED_ARCH;
1325   template <class L>
1326   inline void branchTest32(Condition cond, Register lhs, Imm32 rhs,
1327                            L label) PER_SHARED_ARCH;
1328   inline void branchTest32(Condition cond, const Address& lhs, Imm32 rhh,
1329                            Label* label) PER_SHARED_ARCH;
1330   inline void branchTest32(Condition cond, const AbsoluteAddress& lhs,
1331                            Imm32 rhs, Label* label)
1332       DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1333 
1334   template <class L>
1335   inline void branchTestPtr(Condition cond, Register lhs, Register rhs,
1336                             L label) PER_SHARED_ARCH;
1337   inline void branchTestPtr(Condition cond, Register lhs, Imm32 rhs,
1338                             Label* label) PER_SHARED_ARCH;
1339   inline void branchTestPtr(Condition cond, const Address& lhs, Imm32 rhs,
1340                             Label* label) PER_SHARED_ARCH;
1341 
1342   template <class L>
1343   inline void branchTest64(Condition cond, Register64 lhs, Register64 rhs,
1344                            Register temp, L label) PER_ARCH;
1345 
1346   // Branches to |label| if |reg| is false. |reg| should be a C++ bool.
1347   template <class L>
1348   inline void branchIfFalseBool(Register reg, L label);
1349 
1350   // Branches to |label| if |reg| is true. |reg| should be a C++ bool.
1351   inline void branchIfTrueBool(Register reg, Label* label);
1352 
1353   inline void branchIfRope(Register str, Label* label);
1354   inline void branchIfNotRope(Register str, Label* label);
1355 
1356   inline void branchLatin1String(Register string, Label* label);
1357   inline void branchTwoByteString(Register string, Label* label);
1358 
1359   inline void branchIfNegativeBigInt(Register bigInt, Label* label);
1360 
1361   inline void branchTestFunctionFlags(Register fun, uint32_t flags,
1362                                       Condition cond, Label* label);
1363 
1364   inline void branchIfFunctionHasNoJitEntry(Register fun, bool isConstructing,
1365                                             Label* label);
1366   inline void branchIfInterpreted(Register fun, bool isConstructing,
1367                                   Label* label);
1368 
1369   inline void branchIfScriptHasJitScript(Register script, Label* label);
1370   inline void branchIfScriptHasNoJitScript(Register script, Label* label);
1371   inline void loadJitScript(Register script, Register dest);
1372 
1373   // Loads the function length. This handles interpreted, native, and bound
1374   // functions. The caller is responsible for checking that INTERPRETED_LAZY and
1375   // RESOLVED_LENGTH flags are not set.
1376   void loadFunctionLength(Register func, Register funFlags, Register output,
1377                           Label* slowPath);
1378 
1379   inline void branchFunctionKind(Condition cond,
1380                                  FunctionFlags::FunctionKind kind, Register fun,
1381                                  Register scratch, Label* label);
1382 
1383   inline void branchIfObjectEmulatesUndefined(Register objReg, Register scratch,
1384                                               Label* slowCheck, Label* label);
1385 
1386   // For all methods below: spectreRegToZero is a register that will be zeroed
1387   // on speculatively executed code paths (when the branch should be taken but
1388   // branch prediction speculates it isn't). Usually this will be the object
1389   // register but the caller may pass a different register.
1390 
1391   inline void branchTestObjClass(Condition cond, Register obj,
1392                                  const JSClass* clasp, Register scratch,
1393                                  Register spectreRegToZero, Label* label);
1394   inline void branchTestObjClassNoSpectreMitigations(Condition cond,
1395                                                      Register obj,
1396                                                      const JSClass* clasp,
1397                                                      Register scratch,
1398                                                      Label* label);
1399 
1400   inline void branchTestObjClass(Condition cond, Register obj,
1401                                  const Address& clasp, Register scratch,
1402                                  Register spectreRegToZero, Label* label);
1403   inline void branchTestObjClassNoSpectreMitigations(Condition cond,
1404                                                      Register obj,
1405                                                      const Address& clasp,
1406                                                      Register scratch,
1407                                                      Label* label);
1408 
1409   inline void branchTestObjShape(Condition cond, Register obj,
1410                                  const Shape* shape, Register scratch,
1411                                  Register spectreRegToZero, Label* label);
1412   inline void branchTestObjShapeNoSpectreMitigations(Condition cond,
1413                                                      Register obj,
1414                                                      const Shape* shape,
1415                                                      Label* label);
1416 
1417   inline void branchTestObjShape(Condition cond, Register obj, Register shape,
1418                                  Register scratch, Register spectreRegToZero,
1419                                  Label* label);
1420   inline void branchTestObjShapeNoSpectreMitigations(Condition cond,
1421                                                      Register obj,
1422                                                      Register shape,
1423                                                      Label* label);
1424 
1425   inline void branchTestObjGroup(Condition cond, Register obj,
1426                                  const ObjectGroup* group, Register scratch,
1427                                  Register spectreRegToZero, Label* label);
1428   inline void branchTestObjGroupNoSpectreMitigations(Condition cond,
1429                                                      Register obj,
1430                                                      const ObjectGroup* group,
1431                                                      Label* label);
1432 
1433   inline void branchTestObjGroup(Condition cond, Register obj, Register group,
1434                                  Register scratch, Register spectreRegToZero,
1435                                  Label* label);
1436   inline void branchTestObjGroupNoSpectreMitigations(Condition cond,
1437                                                      Register obj,
1438                                                      Register group,
1439                                                      Label* label);
1440 
1441   void branchTestObjGroup(Condition cond, Register obj, const Address& group,
1442                           Register scratch, Register spectreRegToZero,
1443                           Label* label);
1444   void branchTestObjGroupNoSpectreMitigations(Condition cond, Register obj,
1445                                               const Address& group,
1446                                               Register scratch, Label* label);
1447 
1448   // TODO: audit/fix callers to be Spectre safe.
1449   inline void branchTestObjShapeUnsafe(Condition cond, Register obj,
1450                                        Register shape, Label* label);
1451   inline void branchTestObjGroupUnsafe(Condition cond, Register obj,
1452                                        const ObjectGroup* group, Label* label);
1453 
1454   void branchTestObjCompartment(Condition cond, Register obj,
1455                                 const Address& compartment, Register scratch,
1456                                 Label* label);
1457   void branchTestObjCompartment(Condition cond, Register obj,
1458                                 const JS::Compartment* compartment,
1459                                 Register scratch, Label* label);
1460   void branchIfObjGroupHasNoAddendum(Register obj, Register scratch,
1461                                      Label* label);
1462   void branchIfPretenuredGroup(Register group, Label* label);
1463   void branchIfPretenuredGroup(const ObjectGroup* group, Register scratch,
1464                                Label* label);
1465 
1466   void branchIfNonNativeObj(Register obj, Register scratch, Label* label);
1467 
1468   void branchIfInlineTypedObject(Register obj, Register scratch, Label* label);
1469 
1470   inline void branchTestClassIsProxy(bool proxy, Register clasp, Label* label);
1471 
1472   inline void branchTestObjectIsProxy(bool proxy, Register object,
1473                                       Register scratch, Label* label);
1474 
1475   inline void branchTestProxyHandlerFamily(Condition cond, Register proxy,
1476                                            Register scratch,
1477                                            const void* handlerp, Label* label);
1478 
1479   void copyObjGroupNoPreBarrier(Register sourceObj, Register destObj,
1480                                 Register scratch);
1481 
1482   void loadTypedObjectDescr(Register obj, Register dest);
1483   void loadTypedObjectLength(Register obj, Register dest);
1484 
1485   // Emit type case branch on tag matching if the type tag in the definition
1486   // might actually be that type.
1487   void maybeBranchTestType(MIRType type, MDefinition* maybeDef, Register tag,
1488                            Label* label);
1489 
1490   inline void branchTestNeedsIncrementalBarrier(Condition cond, Label* label);
1491   inline void branchTestNeedsIncrementalBarrierAnyZone(Condition cond,
1492                                                        Label* label,
1493                                                        Register scratch);
1494 
1495   // Perform a type-test on a tag of a Value (32bits boxing), or the tagged
1496   // value (64bits boxing).
1497   inline void branchTestUndefined(Condition cond, Register tag,
1498                                   Label* label) PER_SHARED_ARCH;
1499   inline void branchTestInt32(Condition cond, Register tag,
1500                               Label* label) PER_SHARED_ARCH;
1501   inline void branchTestDouble(Condition cond, Register tag, Label* label)
1502       DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1503   inline void branchTestNumber(Condition cond, Register tag,
1504                                Label* label) PER_SHARED_ARCH;
1505   inline void branchTestBoolean(Condition cond, Register tag,
1506                                 Label* label) PER_SHARED_ARCH;
1507   inline void branchTestString(Condition cond, Register tag,
1508                                Label* label) PER_SHARED_ARCH;
1509   inline void branchTestSymbol(Condition cond, Register tag,
1510                                Label* label) PER_SHARED_ARCH;
1511   inline void branchTestBigInt(Condition cond, Register tag,
1512                                Label* label) PER_SHARED_ARCH;
1513   inline void branchTestNull(Condition cond, Register tag,
1514                              Label* label) PER_SHARED_ARCH;
1515   inline void branchTestObject(Condition cond, Register tag,
1516                                Label* label) PER_SHARED_ARCH;
1517   inline void branchTestPrimitive(Condition cond, Register tag,
1518                                   Label* label) PER_SHARED_ARCH;
1519   inline void branchTestMagic(Condition cond, Register tag,
1520                               Label* label) PER_SHARED_ARCH;
1521 
1522   // Perform a type-test on a Value, addressed by Address or BaseIndex, or
1523   // loaded into ValueOperand.
1524   // BaseIndex and ValueOperand variants clobber the ScratchReg on x64.
1525   // All Variants clobber the ScratchReg on arm64.
1526   inline void branchTestUndefined(Condition cond, const Address& address,
1527                                   Label* label) PER_SHARED_ARCH;
1528   inline void branchTestUndefined(Condition cond, const BaseIndex& address,
1529                                   Label* label) PER_SHARED_ARCH;
1530   inline void branchTestUndefined(Condition cond, const ValueOperand& value,
1531                                   Label* label)
1532       DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1533 
1534   inline void branchTestInt32(Condition cond, const Address& address,
1535                               Label* label) PER_SHARED_ARCH;
1536   inline void branchTestInt32(Condition cond, const BaseIndex& address,
1537                               Label* label) PER_SHARED_ARCH;
1538   inline void branchTestInt32(Condition cond, const ValueOperand& value,
1539                               Label* label)
1540       DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1541 
1542   inline void branchTestDouble(Condition cond, const Address& address,
1543                                Label* label) PER_SHARED_ARCH;
1544   inline void branchTestDouble(Condition cond, const BaseIndex& address,
1545                                Label* label) PER_SHARED_ARCH;
1546   inline void branchTestDouble(Condition cond, const ValueOperand& value,
1547                                Label* label)
1548       DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1549 
1550   inline void branchTestNumber(Condition cond, const ValueOperand& value,
1551                                Label* label)
1552       DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1553 
1554   inline void branchTestBoolean(Condition cond, const Address& address,
1555                                 Label* label) PER_SHARED_ARCH;
1556   inline void branchTestBoolean(Condition cond, const BaseIndex& address,
1557                                 Label* label) PER_SHARED_ARCH;
1558   inline void branchTestBoolean(Condition cond, const ValueOperand& value,
1559                                 Label* label)
1560       DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1561 
1562   inline void branchTestString(Condition cond, const Address& address,
1563                                Label* label) PER_SHARED_ARCH;
1564   inline void branchTestString(Condition cond, const BaseIndex& address,
1565                                Label* label) PER_SHARED_ARCH;
1566   inline void branchTestString(Condition cond, const ValueOperand& value,
1567                                Label* label)
1568       DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1569 
1570   inline void branchTestSymbol(Condition cond, const Address& address,
1571                                Label* label) PER_SHARED_ARCH;
1572   inline void branchTestSymbol(Condition cond, const BaseIndex& address,
1573                                Label* label) PER_SHARED_ARCH;
1574   inline void branchTestSymbol(Condition cond, const ValueOperand& value,
1575                                Label* label)
1576       DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1577 
1578   inline void branchTestBigInt(Condition cond, const Address& address,
1579                                Label* label) PER_SHARED_ARCH;
1580   inline void branchTestBigInt(Condition cond, const BaseIndex& address,
1581                                Label* label) PER_SHARED_ARCH;
1582   inline void branchTestBigInt(Condition cond, const ValueOperand& value,
1583                                Label* label)
1584       DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1585 
1586   inline void branchTestNull(Condition cond, const Address& address,
1587                              Label* label) PER_SHARED_ARCH;
1588   inline void branchTestNull(Condition cond, const BaseIndex& address,
1589                              Label* label) PER_SHARED_ARCH;
1590   inline void branchTestNull(Condition cond, const ValueOperand& value,
1591                              Label* label)
1592       DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1593 
1594   // Clobbers the ScratchReg on x64.
1595   inline void branchTestObject(Condition cond, const Address& address,
1596                                Label* label) PER_SHARED_ARCH;
1597   inline void branchTestObject(Condition cond, const BaseIndex& address,
1598                                Label* label) PER_SHARED_ARCH;
1599   inline void branchTestObject(Condition cond, const ValueOperand& value,
1600                                Label* label)
1601       DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1602 
1603   inline void branchTestGCThing(Condition cond, const Address& address,
1604                                 Label* label) PER_SHARED_ARCH;
1605   inline void branchTestGCThing(Condition cond, const BaseIndex& address,
1606                                 Label* label) PER_SHARED_ARCH;
1607   inline void branchTestGCThing(Condition cond, const ValueOperand& value,
1608                                 Label* label) PER_SHARED_ARCH;
1609 
1610   inline void branchTestPrimitive(Condition cond, const ValueOperand& value,
1611                                   Label* label)
1612       DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1613 
1614   inline void branchTestMagic(Condition cond, const Address& address,
1615                               Label* label) PER_SHARED_ARCH;
1616   inline void branchTestMagic(Condition cond, const BaseIndex& address,
1617                               Label* label) PER_SHARED_ARCH;
1618   template <class L>
1619   inline void branchTestMagic(Condition cond, const ValueOperand& value,
1620                               L label)
1621       DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1622 
1623   inline void branchTestMagic(Condition cond, const Address& valaddr,
1624                               JSWhyMagic why, Label* label) PER_ARCH;
1625 
1626   inline void branchTestMagicValue(Condition cond, const ValueOperand& val,
1627                                    JSWhyMagic why, Label* label);
1628 
1629   void branchTestValue(Condition cond, const ValueOperand& lhs,
1630                        const Value& rhs, Label* label) PER_ARCH;
1631 
1632   // Checks if given Value is evaluated to true or false in a condition.
1633   // The type of the value should match the type of the method.
1634   inline void branchTestInt32Truthy(bool truthy, const ValueOperand& value,
1635                                     Label* label)
1636       DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1637   inline void branchTestDoubleTruthy(bool truthy, FloatRegister reg,
1638                                      Label* label) PER_SHARED_ARCH;
1639   inline void branchTestBooleanTruthy(bool truthy, const ValueOperand& value,
1640                                       Label* label) PER_ARCH;
1641   inline void branchTestStringTruthy(bool truthy, const ValueOperand& value,
1642                                      Label* label)
1643       DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1644   inline void branchTestBigIntTruthy(bool truthy, const ValueOperand& value,
1645                                      Label* label)
1646       DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1647 
1648   // Create an unconditional branch to the address given as argument.
1649   inline void branchToComputedAddress(const BaseIndex& address) PER_ARCH;
1650 
1651  private:
1652   template <typename T, typename S, typename L>
1653   inline void branchPtrImpl(Condition cond, const T& lhs, const S& rhs, L label)
1654       DEFINED_ON(x86_shared);
1655 
1656   void branchPtrInNurseryChunkImpl(Condition cond, Register ptr, Label* label)
1657       DEFINED_ON(x86);
1658   template <typename T>
1659   void branchValueIsNurseryCellImpl(Condition cond, const T& value,
1660                                     Register temp, Label* label)
1661       DEFINED_ON(arm64, x64, mips64);
1662 
1663   template <typename T>
1664   inline void branchTestUndefinedImpl(Condition cond, const T& t, Label* label)
1665       DEFINED_ON(arm, arm64, x86_shared);
1666   template <typename T>
1667   inline void branchTestInt32Impl(Condition cond, const T& t, Label* label)
1668       DEFINED_ON(arm, arm64, x86_shared);
1669   template <typename T>
1670   inline void branchTestDoubleImpl(Condition cond, const T& t, Label* label)
1671       DEFINED_ON(arm, arm64, x86_shared);
1672   template <typename T>
1673   inline void branchTestNumberImpl(Condition cond, const T& t, Label* label)
1674       DEFINED_ON(arm, arm64, x86_shared);
1675   template <typename T>
1676   inline void branchTestBooleanImpl(Condition cond, const T& t, Label* label)
1677       DEFINED_ON(arm, arm64, x86_shared);
1678   template <typename T>
1679   inline void branchTestStringImpl(Condition cond, const T& t, Label* label)
1680       DEFINED_ON(arm, arm64, x86_shared);
1681   template <typename T>
1682   inline void branchTestSymbolImpl(Condition cond, const T& t, Label* label)
1683       DEFINED_ON(arm, arm64, x86_shared);
1684   template <typename T>
1685   inline void branchTestBigIntImpl(Condition cond, const T& t, Label* label)
1686       DEFINED_ON(arm, arm64, x86_shared);
1687   template <typename T>
1688   inline void branchTestNullImpl(Condition cond, const T& t, Label* label)
1689       DEFINED_ON(arm, arm64, x86_shared);
1690   template <typename T>
1691   inline void branchTestObjectImpl(Condition cond, const T& t, Label* label)
1692       DEFINED_ON(arm, arm64, x86_shared);
1693   template <typename T>
1694   inline void branchTestGCThingImpl(Condition cond, const T& t,
1695                                     Label* label) PER_SHARED_ARCH;
1696   template <typename T>
1697   inline void branchTestPrimitiveImpl(Condition cond, const T& t, Label* label)
1698       DEFINED_ON(arm, arm64, x86_shared);
1699   template <typename T, class L>
1700   inline void branchTestMagicImpl(Condition cond, const T& t, L label)
1701       DEFINED_ON(arm, arm64, x86_shared);
1702 
1703  public:
1704   // The fallibleUnbox* methods below combine a Value type check with an unbox.
1705   // Especially on 64-bit platforms this can be implemented more efficiently
1706   // than a separate branch + unbox.
1707   //
1708   // |src| and |dest| can be the same register, but |dest| may hold garbage on
1709   // failure.
1710   inline void fallibleUnboxPtr(const ValueOperand& src, Register dest,
1711                                JSValueType type, Label* fail) PER_ARCH;
1712   inline void fallibleUnboxPtr(const Address& src, Register dest,
1713                                JSValueType type, Label* fail) PER_ARCH;
1714   inline void fallibleUnboxPtr(const BaseIndex& src, Register dest,
1715                                JSValueType type, Label* fail) PER_ARCH;
1716   template <typename T>
1717   inline void fallibleUnboxInt32(const T& src, Register dest, Label* fail);
1718   template <typename T>
1719   inline void fallibleUnboxBoolean(const T& src, Register dest, Label* fail);
1720   template <typename T>
1721   inline void fallibleUnboxObject(const T& src, Register dest, Label* fail);
1722   template <typename T>
1723   inline void fallibleUnboxString(const T& src, Register dest, Label* fail);
1724   template <typename T>
1725   inline void fallibleUnboxSymbol(const T& src, Register dest, Label* fail);
1726   template <typename T>
1727   inline void fallibleUnboxBigInt(const T& src, Register dest, Label* fail);
1728 
1729   inline void cmp32Move32(Condition cond, Register lhs, Register rhs,
1730                           Register src, Register dest)
1731       DEFINED_ON(arm, arm64, mips_shared, x86_shared);
1732 
1733   inline void cmp32Move32(Condition cond, Register lhs, const Address& rhs,
1734                           Register src, Register dest)
1735       DEFINED_ON(arm, arm64, mips_shared, x86_shared);
1736 
1737   inline void cmpPtrMovePtr(Condition cond, Register lhs, Register rhs,
1738                             Register src, Register dest)
1739       DEFINED_ON(x64);
1740 
1741   inline void cmp32Load32(Condition cond, Register lhs, const Address& rhs,
1742                           const Address& src, Register dest)
1743       DEFINED_ON(arm, arm64, mips_shared, x86_shared);
1744 
1745   inline void cmp32Load32(Condition cond, Register lhs, Register rhs,
1746                           const Address& src, Register dest)
1747       DEFINED_ON(arm, arm64, mips_shared, x86_shared);
1748 
1749   inline void cmp32LoadPtr(Condition cond, const Address& lhs, Imm32 rhs,
1750                            const Address& src, Register dest)
1751       DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1752 
1753   inline void cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
1754                            Register src, Register dest)
1755       DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1756 
1757   inline void test32LoadPtr(Condition cond, const Address& addr, Imm32 mask,
1758                             const Address& src, Register dest)
1759       DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1760 
1761   inline void test32MovePtr(Condition cond, const Address& addr, Imm32 mask,
1762                             Register src, Register dest)
1763       DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1764 
1765   // Conditional move for Spectre mitigations.
1766   inline void spectreMovePtr(Condition cond, Register src, Register dest)
1767       DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1768 
1769   // Zeroes dest if the condition is true.
1770   inline void spectreZeroRegister(Condition cond, Register scratch,
1771                                   Register dest)
1772       DEFINED_ON(arm, arm64, mips_shared, x86_shared);
1773 
1774   // Performs a bounds check and zeroes the index register if out-of-bounds
1775   // (to mitigate Spectre).
1776  private:
1777   inline void spectreBoundsCheck32(Register index, const Operand& length,
1778                                    Register maybeScratch, Label* failure)
1779       DEFINED_ON(x86);
1780 
1781  public:
1782   inline void spectreBoundsCheck32(Register index, Register length,
1783                                    Register maybeScratch, Label* failure)
1784       DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1785   inline void spectreBoundsCheck32(Register index, const Address& length,
1786                                    Register maybeScratch, Label* failure)
1787       DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1788 
1789   // ========================================================================
1790   // Canonicalization primitives.
1791   inline void canonicalizeDouble(FloatRegister reg);
1792   inline void canonicalizeDoubleIfDeterministic(FloatRegister reg);
1793 
1794   inline void canonicalizeFloat(FloatRegister reg);
1795   inline void canonicalizeFloatIfDeterministic(FloatRegister reg);
1796 
1797  public:
1798   // ========================================================================
1799   // Memory access primitives.
1800   inline void storeUncanonicalizedDouble(FloatRegister src, const Address& dest)
1801       DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
1802   inline void storeUncanonicalizedDouble(FloatRegister src,
1803                                          const BaseIndex& dest)
1804       DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
1805   inline void storeUncanonicalizedDouble(FloatRegister src, const Operand& dest)
1806       DEFINED_ON(x86_shared);
1807 
1808   template <class T>
1809   inline void storeDouble(FloatRegister src, const T& dest);
1810 
1811   template <class T>
1812   inline void boxDouble(FloatRegister src, const T& dest);
1813 
1814   using MacroAssemblerSpecific::boxDouble;
1815 
1816   inline void storeUncanonicalizedFloat32(FloatRegister src,
1817                                           const Address& dest)
1818       DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
1819   inline void storeUncanonicalizedFloat32(FloatRegister src,
1820                                           const BaseIndex& dest)
1821       DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
1822   inline void storeUncanonicalizedFloat32(FloatRegister src,
1823                                           const Operand& dest)
1824       DEFINED_ON(x86_shared);
1825 
1826   template <class T>
1827   inline void storeFloat32(FloatRegister src, const T& dest);
1828 
1829   inline void storeFloat32x3(FloatRegister src,
1830                              const Address& dest) PER_SHARED_ARCH;
1831   inline void storeFloat32x3(FloatRegister src,
1832                              const BaseIndex& dest) PER_SHARED_ARCH;
1833 
1834   template <typename T>
1835   void storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
1836                          const T& dest, MIRType slotType) PER_ARCH;
1837 
1838   inline void memoryBarrier(MemoryBarrierBits barrier) PER_SHARED_ARCH;
1839 
1840  public:
1841   // ========================================================================
1842   // Wasm SIMD
1843   //
1844   // Naming is "operationSimd128" when operate on the whole vector, otherwise
1845   // it's "operation<Type><Size>x<Lanes>".
1846   //
1847   // For microarchitectural reasons we can in principle get a performance win by
1848   // using int or float specific instructions in the operationSimd128 case when
1849   // we know that subsequent operations on the result are int or float oriented.
1850   // In practice, we don't care about that yet.
1851   //
1852   // The order of operations here follows those in the SIMD overview document,
1853   // https://github.com/WebAssembly/simd/blob/master/proposals/simd/SIMD.md.
1854   //
1855   // Since we must target Intel SSE indefinitely and SSE is one-address or
1856   // two-address, these porting interfaces are nearly all one-address or
1857   // two-address.  In the future, if we decide to target Intel AVX or other
1858   // three-address architectures from Ion we may add additional interfaces.
1859   //
1860   // Conventions for argument order and naming and semantics:
1861   //  - Condition codes come first.
1862   //  - Other immediates (masks, shift counts) come next.
1863   //  - Operands come next:
1864   //    - For a binary operator where the left-hand-side has the same type as
1865   //      the result, one register parameter is normally named `lhsDest` and is
1866   //      both the left-hand side and destination; the other parameter is named
1867   //      `rhs` and is the right-hand side.  `rhs` comes first, `lhsDest`
1868   //      second.  `rhs` and `lhsDest` may be the same register (if rhs is
1869   //      a register).
1870   //    - For a unary operator, the input is named `src` and the output is named
1871   //      `dest`.  `src` comes first, `dest` second.  `src` and `dest` may be
1872   //      the same register (if `src` is a register).
1873   //  - Temp registers follow operands and are named `temp` if there's only one,
1874   //    otherwise `temp1`, `temp2`, etc regardless of type.  GPR temps precede
1875   //    FPU temps.  If there are several temps then they must be distinct
1876   //    registers, and they must be distinct from the operand registers unless
1877   //    noted.
1878 
1879   // Moves
1880 
1881   inline void moveSimd128(FloatRegister src, FloatRegister dest)
1882       DEFINED_ON(x86_shared);
1883 
1884   // Constants
1885 
1886   inline void zeroSimd128(FloatRegister dest) DEFINED_ON(x86_shared);
1887 
1888   inline void loadConstantSimd128(const SimdConstant& v, FloatRegister dest)
1889       DEFINED_ON(x86_shared);
1890 
1891   // Splat
1892 
1893   inline void splatX16(Register src, FloatRegister dest) DEFINED_ON(x86_shared);
1894 
1895   inline void splatX8(Register src, FloatRegister dest) DEFINED_ON(x86_shared);
1896 
1897   inline void splatX4(Register src, FloatRegister dest) DEFINED_ON(x86_shared);
1898 
1899   inline void splatX4(FloatRegister src, FloatRegister dest)
1900       DEFINED_ON(x86_shared);
1901 
1902   inline void splatX2(Register64 src, FloatRegister dest) DEFINED_ON(x64);
1903 
1904   inline void splatX2(FloatRegister src, FloatRegister dest)
1905       DEFINED_ON(x86_shared);
1906 
1907   // Extract lane as scalar.  Float extraction does not canonicalize the value.
1908 
1909   inline void extractLaneInt8x16(uint32_t lane, FloatRegister src,
1910                                  Register dest) DEFINED_ON(x86_shared);
1911 
1912   inline void unsignedExtractLaneInt8x16(uint32_t lane, FloatRegister src,
1913                                          Register dest) DEFINED_ON(x86_shared);
1914 
1915   inline void extractLaneInt16x8(uint32_t lane, FloatRegister src,
1916                                  Register dest) DEFINED_ON(x86_shared);
1917 
1918   inline void unsignedExtractLaneInt16x8(uint32_t lane, FloatRegister src,
1919                                          Register dest) DEFINED_ON(x86_shared);
1920 
1921   inline void extractLaneInt32x4(uint32_t lane, FloatRegister src,
1922                                  Register dest) DEFINED_ON(x86_shared);
1923 
1924   inline void extractLaneInt64x2(uint32_t lane, FloatRegister src,
1925                                  Register64 dest) DEFINED_ON(x64);
1926 
1927   inline void extractLaneFloat32x4(uint32_t lane, FloatRegister src,
1928                                    FloatRegister dest) DEFINED_ON(x86_shared);
1929 
1930   inline void extractLaneFloat64x2(uint32_t lane, FloatRegister src,
1931                                    FloatRegister dest) DEFINED_ON(x86_shared);
1932 
1933   // Replace lane value
1934 
1935   inline void replaceLaneInt8x16(unsigned lane, Register rhs,
1936                                  FloatRegister lhsDest) DEFINED_ON(x86_shared);
1937 
1938   inline void replaceLaneInt16x8(unsigned lane, Register rhs,
1939                                  FloatRegister lhsDest) DEFINED_ON(x86_shared);
1940 
1941   inline void replaceLaneInt32x4(unsigned lane, Register rhs,
1942                                  FloatRegister lhsDest) DEFINED_ON(x86_shared);
1943 
1944   inline void replaceLaneInt64x2(unsigned lane, Register64 rhs,
1945                                  FloatRegister lhsDest) DEFINED_ON(x64);
1946 
1947   inline void replaceLaneFloat32x4(unsigned lane, FloatRegister rhs,
1948                                    FloatRegister lhsDest)
1949       DEFINED_ON(x86_shared);
1950 
1951   inline void replaceLaneFloat64x2(unsigned lane, FloatRegister rhs,
1952                                    FloatRegister lhsDest)
1953       DEFINED_ON(x86_shared);
1954 
1955   // Shuffle - blend and permute with immediate indices, and its many
1956   // specializations.  Lane values other than those mentioned are illegal.
1957 
1958   // lane values 0..31
1959   inline void shuffleInt8x16(const uint8_t lanes[16], FloatRegister rhs,
1960                              FloatRegister lhsDest, FloatRegister temp)
1961       DEFINED_ON(x86_shared);
1962 
1963   // lane values 0 (select from lhs) or FF (select from rhs).
1964   inline void blendInt8x16(const uint8_t lanes[16], FloatRegister rhs,
1965                            FloatRegister lhsDest, FloatRegister temp)
1966       DEFINED_ON(x86_shared);
1967 
1968   // lane values 0 (select from lhs) or FFFF (select from rhs).
1969   inline void blendInt16x8(const uint16_t lanes[8], FloatRegister rhs,
1970                            FloatRegister lhsDest) DEFINED_ON(x86_shared);
1971 
1972   inline void interleaveHighInt8x16(FloatRegister rhs, FloatRegister lhsDest)
1973       DEFINED_ON(x86_shared);
1974 
1975   inline void interleaveHighInt16x8(FloatRegister rhs, FloatRegister lhsDest)
1976       DEFINED_ON(x86_shared);
1977 
1978   inline void interleaveHighInt32x4(FloatRegister rhs, FloatRegister lhsDest)
1979       DEFINED_ON(x86_shared);
1980 
1981   inline void interleaveLowInt8x16(FloatRegister rhs, FloatRegister lhsDest)
1982       DEFINED_ON(x86_shared);
1983 
1984   inline void interleaveLowInt16x8(FloatRegister rhs, FloatRegister lhsDest)
1985       DEFINED_ON(x86_shared);
1986 
1987   inline void interleaveLowInt32x4(FloatRegister rhs, FloatRegister lhsDest)
1988       DEFINED_ON(x86_shared);
1989 
1990   // Permute - permute with immediate indices.
1991 
1992   // lane values 0..15
1993   inline void permuteInt8x16(const uint8_t lanes[16], FloatRegister src,
1994                              FloatRegister dest) DEFINED_ON(x86_shared);
1995 
1996   // lane values 0..3 [sic].
1997   inline void permuteHighInt16x8(const uint16_t lanes[4], FloatRegister src,
1998                                  FloatRegister dest) DEFINED_ON(x86_shared);
1999 
2000   // lane values 0..3.
2001   inline void permuteLowInt16x8(const uint16_t lanes[4], FloatRegister src,
2002                                 FloatRegister dest) DEFINED_ON(x86_shared);
2003 
2004   // lane values 0..3
2005   inline void permuteInt32x4(const uint32_t lanes[4], FloatRegister src,
2006                              FloatRegister dest) DEFINED_ON(x86_shared);
2007 
2008   // low_16_bytes_of((lhsDest ++ rhs) >> shift*8), shift must be < 32
2009   inline void concatAndRightShiftInt8x16(FloatRegister rhs,
2010                                          FloatRegister lhsDest, uint32_t shift)
2011       DEFINED_ON(x86_shared);
2012 
2013   // Shift bytes with immediate count, shifting in zeroes.  Shift count 0..15.
2014 
2015   inline void leftShiftSimd128(Imm32 count, FloatRegister src,
2016                                FloatRegister dest) DEFINED_ON(x86_shared);
2017 
2018   inline void rightShiftSimd128(Imm32 count, FloatRegister src,
2019                                 FloatRegister dest) DEFINED_ON(x86_shared);
2020 
2021   // Swizzle - permute with variable indices.  `rhs` holds the lanes parameter.
2022 
2023   inline void swizzleInt8x16(FloatRegister rhs, FloatRegister lhsDest,
2024                              FloatRegister temp) DEFINED_ON(x86_shared);
2025 
2026   // Integer Add
2027 
2028   inline void addInt8x16(FloatRegister rhs, FloatRegister lhsDest)
2029       DEFINED_ON(x86_shared);
2030 
2031   inline void addInt16x8(FloatRegister rhs, FloatRegister lhsDest)
2032       DEFINED_ON(x86_shared);
2033 
2034   inline void addInt32x4(FloatRegister rhs, FloatRegister lhsDest)
2035       DEFINED_ON(x86_shared);
2036 
2037   inline void addInt64x2(FloatRegister rhs, FloatRegister lhsDest)
2038       DEFINED_ON(x86_shared);
2039 
2040   // Integer Subtract
2041 
2042   inline void subInt8x16(FloatRegister rhs, FloatRegister lhsDest)
2043       DEFINED_ON(x86_shared);
2044 
2045   inline void subInt16x8(FloatRegister rhs, FloatRegister lhsDest)
2046       DEFINED_ON(x86_shared);
2047 
2048   inline void subInt32x4(FloatRegister rhs, FloatRegister lhsDest)
2049       DEFINED_ON(x86_shared);
2050 
2051   inline void subInt64x2(FloatRegister rhs, FloatRegister lhsDest)
2052       DEFINED_ON(x86_shared);
2053 
2054   // Integer Multiply
2055 
2056   inline void mulInt16x8(FloatRegister rhs, FloatRegister lhsDest)
2057       DEFINED_ON(x86_shared);
2058 
2059   inline void mulInt32x4(FloatRegister rhs, FloatRegister lhsDest)
2060       DEFINED_ON(x86_shared);
2061 
2062   inline void mulInt64x2(FloatRegister rhs, FloatRegister lhsDest,
2063                          Register64 temp) DEFINED_ON(x64);
2064 
2065   // Integer Negate
2066 
2067   inline void negInt8x16(FloatRegister src, FloatRegister dest)
2068       DEFINED_ON(x86_shared);
2069 
2070   inline void negInt16x8(FloatRegister src, FloatRegister dest)
2071       DEFINED_ON(x86_shared);
2072 
2073   inline void negInt32x4(FloatRegister src, FloatRegister dest)
2074       DEFINED_ON(x86_shared);
2075 
2076   inline void negInt64x2(FloatRegister src, FloatRegister dest)
2077       DEFINED_ON(x86_shared);
2078 
2079   // Saturating integer add
2080 
2081   inline void addSatInt8x16(FloatRegister rhs, FloatRegister lhsDest)
2082       DEFINED_ON(x86_shared);
2083 
2084   inline void unsignedAddSatInt8x16(FloatRegister rhs, FloatRegister lhsDest)
2085       DEFINED_ON(x86_shared);
2086 
2087   inline void addSatInt16x8(FloatRegister rhs, FloatRegister lhsDest)
2088       DEFINED_ON(x86_shared);
2089 
2090   inline void unsignedAddSatInt16x8(FloatRegister rhs, FloatRegister lhsDest)
2091       DEFINED_ON(x86_shared);
2092 
2093   // Saturating integer subtract
2094 
2095   inline void subSatInt8x16(FloatRegister rhs, FloatRegister lhsDest)
2096       DEFINED_ON(x86_shared);
2097 
2098   inline void unsignedSubSatInt8x16(FloatRegister rhs, FloatRegister lhsDest)
2099       DEFINED_ON(x86_shared);
2100 
2101   inline void subSatInt16x8(FloatRegister rhs, FloatRegister lhsDest)
2102       DEFINED_ON(x86_shared);
2103 
2104   inline void unsignedSubSatInt16x8(FloatRegister rhs, FloatRegister lhsDest)
2105       DEFINED_ON(x86_shared);
2106 
2107   // Lane-wise integer minimum
2108 
2109   inline void minInt8x16(FloatRegister rhs, FloatRegister lhsDest)
2110       DEFINED_ON(x86_shared);
2111 
2112   inline void unsignedMinInt8x16(FloatRegister rhs, FloatRegister lhsDest)
2113       DEFINED_ON(x86_shared);
2114 
2115   inline void minInt16x8(FloatRegister rhs, FloatRegister lhsDest)
2116       DEFINED_ON(x86_shared);
2117 
2118   inline void unsignedMinInt16x8(FloatRegister rhs, FloatRegister lhsDest)
2119       DEFINED_ON(x86_shared);
2120 
2121   inline void minInt32x4(FloatRegister rhs, FloatRegister lhsDest)
2122       DEFINED_ON(x86_shared);
2123 
2124   inline void unsignedMinInt32x4(FloatRegister rhs, FloatRegister lhsDest)
2125       DEFINED_ON(x86_shared);
2126 
2127   // Lane-wise integer maximum
2128 
2129   inline void maxInt8x16(FloatRegister rhs, FloatRegister lhsDest)
2130       DEFINED_ON(x86_shared);
2131 
2132   inline void unsignedMaxInt8x16(FloatRegister rhs, FloatRegister lhsDest)
2133       DEFINED_ON(x86_shared);
2134 
2135   inline void maxInt16x8(FloatRegister rhs, FloatRegister lhsDest)
2136       DEFINED_ON(x86_shared);
2137 
2138   inline void unsignedMaxInt16x8(FloatRegister rhs, FloatRegister lhsDest)
2139       DEFINED_ON(x86_shared);
2140 
2141   inline void maxInt32x4(FloatRegister rhs, FloatRegister lhsDest)
2142       DEFINED_ON(x86_shared);
2143 
2144   inline void unsignedMaxInt32x4(FloatRegister rhs, FloatRegister lhsDest)
2145       DEFINED_ON(x86_shared);
2146 
2147   // Lane-wise integer rounding average
2148 
2149   inline void averageInt8x16(FloatRegister rhs, FloatRegister lhsDest)
2150       DEFINED_ON(x86_shared);
2151 
2152   inline void averageInt16x8(FloatRegister rhs, FloatRegister lhsDest)
2153       DEFINED_ON(x86_shared);
2154 
2155   // Lane-wise integer absolute value
2156 
2157   inline void absInt8x16(FloatRegister src, FloatRegister dest)
2158       DEFINED_ON(x86_shared);
2159 
2160   inline void absInt16x8(FloatRegister src, FloatRegister dest)
2161       DEFINED_ON(x86_shared);
2162 
2163   inline void absInt32x4(FloatRegister src, FloatRegister dest)
2164       DEFINED_ON(x86_shared);
2165 
2166   // Left shift by scalar.  Immediates must have been masked; shifts of zero
2167   // will work but may or may not generate code.
2168 
2169   inline void leftShiftInt8x16(Register rhs, FloatRegister lhsDest,
2170                                Register temp1, FloatRegister temp2)
2171       DEFINED_ON(x86_shared);
2172 
2173   inline void leftShiftInt8x16(Imm32 count, FloatRegister src,
2174                                FloatRegister dest) DEFINED_ON(x86_shared);
2175 
2176   inline void leftShiftInt16x8(Register rhs, FloatRegister lhsDest,
2177                                Register temp) DEFINED_ON(x86_shared);
2178 
2179   inline void leftShiftInt16x8(Imm32 count, FloatRegister src,
2180                                FloatRegister dest) DEFINED_ON(x86_shared);
2181 
2182   inline void leftShiftInt32x4(Register rhs, FloatRegister lhsDest,
2183                                Register temp) DEFINED_ON(x86_shared);
2184 
2185   inline void leftShiftInt32x4(Imm32 count, FloatRegister src,
2186                                FloatRegister dest) DEFINED_ON(x86_shared);
2187 
2188   inline void leftShiftInt64x2(Register rhs, FloatRegister lhsDest,
2189                                Register temp) DEFINED_ON(x86_shared);
2190 
2191   inline void leftShiftInt64x2(Imm32 count, FloatRegister src,
2192                                FloatRegister dest) DEFINED_ON(x86_shared);
2193 
2194   // Right shift by scalar.  Immediates must have been masked; shifts of zero
2195   // will work but may or may not generate code.
2196 
2197   inline void rightShiftInt8x16(Register rhs, FloatRegister lhsDest,
2198                                 Register temp1, FloatRegister temp2)
2199       DEFINED_ON(x86_shared);
2200 
2201   inline void rightShiftInt8x16(Imm32 count, FloatRegister src,
2202                                 FloatRegister dest, FloatRegister temp)
2203       DEFINED_ON(x86_shared);
2204 
2205   inline void unsignedRightShiftInt8x16(Register rhs, FloatRegister lhsDest,
2206                                         Register temp1, FloatRegister temp2)
2207       DEFINED_ON(x86_shared);
2208 
2209   inline void unsignedRightShiftInt8x16(Imm32 count, FloatRegister src,
2210                                         FloatRegister dest)
2211       DEFINED_ON(x86_shared);
2212 
2213   inline void rightShiftInt16x8(Register rhs, FloatRegister lhsDest,
2214                                 Register temp) DEFINED_ON(x86_shared);
2215 
2216   inline void rightShiftInt16x8(Imm32 count, FloatRegister src,
2217                                 FloatRegister dest) DEFINED_ON(x86_shared);
2218 
2219   inline void unsignedRightShiftInt16x8(Register rhs, FloatRegister lhsDest,
2220                                         Register temp) DEFINED_ON(x86_shared);
2221 
2222   inline void unsignedRightShiftInt16x8(Imm32 count, FloatRegister src,
2223                                         FloatRegister dest)
2224       DEFINED_ON(x86_shared);
2225 
2226   inline void rightShiftInt32x4(Register rhs, FloatRegister lhsDest,
2227                                 Register temp) DEFINED_ON(x86_shared);
2228 
2229   inline void rightShiftInt32x4(Imm32 count, FloatRegister src,
2230                                 FloatRegister dest) DEFINED_ON(x86_shared);
2231 
2232   inline void unsignedRightShiftInt32x4(Register rhs, FloatRegister lhsDest,
2233                                         Register temp) DEFINED_ON(x86_shared);
2234 
2235   inline void unsignedRightShiftInt32x4(Imm32 count, FloatRegister src,
2236                                         FloatRegister dest)
2237       DEFINED_ON(x86_shared);
2238 
2239   // `rhs` must be the CL register and it must have been masked so that its
2240   // value is <= 63.
2241   inline void rightShiftInt64x2(Register rhs, FloatRegister lhsDest)
2242       DEFINED_ON(x64);
2243 
2244   inline void rightShiftInt64x2(Imm32 count, FloatRegister src,
2245                                 FloatRegister dest) DEFINED_ON(x64);
2246 
2247   inline void unsignedRightShiftInt64x2(Register rhs, FloatRegister lhsDest,
2248                                         Register temp) DEFINED_ON(x86_shared);
2249 
2250   inline void unsignedRightShiftInt64x2(Imm32 count, FloatRegister src,
2251                                         FloatRegister dest)
2252       DEFINED_ON(x86_shared);
2253 
2254   // Bitwise and, or, xor, not
2255 
2256   inline void bitwiseAndSimd128(FloatRegister rhs, FloatRegister lhsDest)
2257       DEFINED_ON(x86_shared);
2258 
2259   inline void bitwiseOrSimd128(FloatRegister rhs, FloatRegister lhsDest)
2260       DEFINED_ON(x86_shared);
2261 
2262   inline void bitwiseXorSimd128(FloatRegister rhs, FloatRegister lhsDest)
2263       DEFINED_ON(x86_shared);
2264 
2265   inline void bitwiseNotSimd128(FloatRegister src, FloatRegister dest)
2266       DEFINED_ON(x86_shared);
2267 
2268   // Bitwise AND with complement: dest = ~lhs & rhs, note this is not what Wasm
2269   // wants but what the hardware offers.  Hence the name.
2270 
2271   inline void bitwiseNotAndSimd128(FloatRegister rhs, FloatRegister lhsDest)
2272       DEFINED_ON(x86_shared);
2273 
2274   // Bitwise select
2275 
2276   inline void bitwiseSelectSimd128(FloatRegister mask, FloatRegister onTrue,
2277                                    FloatRegister onFalse, FloatRegister dest,
2278                                    FloatRegister temp) DEFINED_ON(x86_shared);
2279 
2280   // Any lane true, ie, any bit set
2281 
2282   inline void anyTrueSimd128(FloatRegister src, Register dest) DEFINED_ON(x64);
2283 
2284   // All lanes true
2285 
2286   inline void allTrueInt8x16(FloatRegister src, Register dest)
2287       DEFINED_ON(x86_shared);
2288 
2289   inline void allTrueInt16x8(FloatRegister src, Register dest)
2290       DEFINED_ON(x86_shared);
2291 
2292   inline void allTrueInt32x4(FloatRegister src, Register dest)
2293       DEFINED_ON(x86_shared);
2294 
2295   // Comparisons (integer and floating-point)
2296 
2297   inline void compareInt8x16(Assembler::Condition cond, FloatRegister rhs,
2298                              FloatRegister lhsDest) DEFINED_ON(x86_shared);
2299 
2300   inline void unsignedCompareInt8x16(Assembler::Condition cond,
2301                                      FloatRegister rhs, FloatRegister lhsDest,
2302                                      FloatRegister temp1, FloatRegister temp2)
2303       DEFINED_ON(x86_shared);
2304 
2305   inline void compareInt16x8(Assembler::Condition cond, FloatRegister rhs,
2306                              FloatRegister lhsDest) DEFINED_ON(x86_shared);
2307 
2308   inline void unsignedCompareInt16x8(Assembler::Condition cond,
2309                                      FloatRegister rhs, FloatRegister lhsDest,
2310                                      FloatRegister temp1, FloatRegister temp2)
2311       DEFINED_ON(x86_shared);
2312 
2313   inline void compareInt32x4(Assembler::Condition cond, FloatRegister rhs,
2314                              FloatRegister lhsDest) DEFINED_ON(x86_shared);
2315 
2316   inline void unsignedCompareInt32x4(Assembler::Condition cond,
2317                                      FloatRegister rhs, FloatRegister lhsDest,
2318                                      FloatRegister temp1, FloatRegister temp2)
2319       DEFINED_ON(x86_shared);
2320 
2321   inline void compareFloat32x4(Assembler::Condition cond, FloatRegister rhs,
2322                                FloatRegister lhsDest) DEFINED_ON(x86_shared);
2323 
2324   inline void compareFloat64x2(Assembler::Condition cond, FloatRegister rhs,
2325                                FloatRegister lhsDest) DEFINED_ON(x86_shared);
2326 
2327   // Load
2328 
2329   inline void loadUnalignedSimd128(const Address& src, FloatRegister dest)
2330       DEFINED_ON(x86_shared);
2331 
2332   inline void loadUnalignedSimd128(const BaseIndex& src, FloatRegister dest)
2333       DEFINED_ON(x86_shared);
2334 
2335   // Store
2336 
2337   inline void storeUnalignedSimd128(FloatRegister src, const Address& dest)
2338       DEFINED_ON(x86_shared);
2339 
2340   inline void storeUnalignedSimd128(FloatRegister src, const BaseIndex& dest)
2341       DEFINED_ON(x86_shared);
2342 
2343   // Floating point negation.  The input and output registers must differ.
2344 
2345   inline void negFloat32x4(FloatRegister src, FloatRegister dest)
2346       DEFINED_ON(x86_shared);
2347 
2348   inline void negFloat64x2(FloatRegister src, FloatRegister dest)
2349       DEFINED_ON(x86_shared);
2350 
2351   // Floating point absolute value
2352 
2353   inline void absFloat32x4(FloatRegister src, FloatRegister dest)
2354       DEFINED_ON(x86_shared);
2355 
2356   inline void absFloat64x2(FloatRegister src, FloatRegister dest)
2357       DEFINED_ON(x86_shared);
2358 
2359   // NaN-propagating minimum
2360 
2361   inline void minFloat32x4(FloatRegister rhs, FloatRegister lhsDest)
2362       DEFINED_ON(x86_shared);
2363 
2364   inline void minFloat64x2(FloatRegister rhs, FloatRegister lhsDest)
2365       DEFINED_ON(x86_shared);
2366 
2367   // NaN-propagating maximum
2368 
2369   inline void maxFloat32x4(FloatRegister rhs, FloatRegister lhsDest,
2370                            FloatRegister temp) DEFINED_ON(x86_shared);
2371 
2372   inline void maxFloat64x2(FloatRegister rhs, FloatRegister lhsDest,
2373                            FloatRegister temp) DEFINED_ON(x86_shared);
2374 
2375   // Floating add
2376 
2377   inline void addFloat32x4(FloatRegister rhs, FloatRegister lhsDest)
2378       DEFINED_ON(x86_shared);
2379 
2380   inline void addFloat64x2(FloatRegister rhs, FloatRegister lhsDest)
2381       DEFINED_ON(x86_shared);
2382 
2383   // Floating subtract
2384 
2385   inline void subFloat32x4(FloatRegister rhs, FloatRegister lhsDest)
2386       DEFINED_ON(x86_shared);
2387 
2388   inline void subFloat64x2(FloatRegister rhs, FloatRegister lhsDest)
2389       DEFINED_ON(x86_shared);
2390 
2391   // Floating division
2392 
2393   inline void divFloat32x4(FloatRegister rhs, FloatRegister lhsDest)
2394       DEFINED_ON(x86_shared);
2395 
2396   inline void divFloat64x2(FloatRegister rhs, FloatRegister lhsDest)
2397       DEFINED_ON(x86_shared);
2398 
2399   // Floating Multiply
2400 
2401   inline void mulFloat32x4(FloatRegister rhs, FloatRegister lhsDest)
2402       DEFINED_ON(x86_shared);
2403 
2404   inline void mulFloat64x2(FloatRegister rhs, FloatRegister lhsDest)
2405       DEFINED_ON(x86_shared);
2406 
2407   // Floating square root
2408 
2409   inline void sqrtFloat32x4(FloatRegister src, FloatRegister dest)
2410       DEFINED_ON(x86_shared);
2411 
2412   inline void sqrtFloat64x2(FloatRegister src, FloatRegister dest)
2413       DEFINED_ON(x86_shared);
2414 
2415   // Integer to floating point with rounding
2416 
2417   inline void convertInt32x4ToFloat32x4(FloatRegister src, FloatRegister dest)
2418       DEFINED_ON(x86_shared);
2419 
2420   inline void unsignedConvertInt32x4ToFloat32x4(FloatRegister src,
2421                                                 FloatRegister dest)
2422       DEFINED_ON(x86_shared);
2423 
2424   // Floating point to integer with saturation
2425 
2426   inline void truncSatFloat32x4ToInt32x4(FloatRegister src, FloatRegister dest)
2427       DEFINED_ON(x86_shared);
2428 
2429   inline void unsignedTruncSatFloat32x4ToInt32x4(FloatRegister src,
2430                                                  FloatRegister dest,
2431                                                  FloatRegister temp)
2432       DEFINED_ON(x86_shared);
2433 
2434   // Integer to integer narrowing
2435 
2436   inline void narrowInt16x8(FloatRegister rhs, FloatRegister lhsDest)
2437       DEFINED_ON(x86_shared);
2438 
2439   inline void unsignedNarrowInt16x8(FloatRegister rhs, FloatRegister lhsDest)
2440       DEFINED_ON(x86_shared);
2441 
2442   inline void narrowInt32x4(FloatRegister rhs, FloatRegister lhsDest)
2443       DEFINED_ON(x86_shared);
2444 
2445   inline void unsignedNarrowInt32x4(FloatRegister rhs, FloatRegister lhsDest)
2446       DEFINED_ON(x86_shared);
2447 
2448   // Integer to integer widening
2449 
2450   inline void widenLowInt8x16(FloatRegister src, FloatRegister dest)
2451       DEFINED_ON(x86_shared);
2452 
2453   inline void widenHighInt8x16(FloatRegister src, FloatRegister dest)
2454       DEFINED_ON(x86_shared);
2455 
2456   inline void unsignedWidenLowInt8x16(FloatRegister src, FloatRegister dest)
2457       DEFINED_ON(x86_shared);
2458 
2459   inline void unsignedWidenHighInt8x16(FloatRegister src, FloatRegister dest)
2460       DEFINED_ON(x86_shared);
2461 
2462   inline void widenLowInt16x8(FloatRegister src, FloatRegister dest)
2463       DEFINED_ON(x86_shared);
2464 
2465   inline void widenHighInt16x8(FloatRegister src, FloatRegister dest)
2466       DEFINED_ON(x86_shared);
2467 
2468   inline void unsignedWidenLowInt16x8(FloatRegister src, FloatRegister dest)
2469       DEFINED_ON(x86_shared);
2470 
2471   inline void unsignedWidenHighInt16x8(FloatRegister src, FloatRegister dest)
2472       DEFINED_ON(x86_shared);
2473 
2474   inline void widenLowInt32x4(FloatRegister src, FloatRegister dest)
2475       DEFINED_ON(x86_shared);
2476 
2477   inline void unsignedWidenLowInt32x4(FloatRegister src, FloatRegister dest)
2478       DEFINED_ON(x86_shared);
2479 
2480  public:
2481   // ========================================================================
2482   // Truncate floating point.
2483 
2484   // Undefined behaviour when truncation is outside Int64 range.
2485   // Needs a temp register if SSE3 is not present.
2486   inline void truncateFloat32ToInt64(Address src, Address dest, Register temp)
2487       DEFINED_ON(x86_shared);
2488   inline void truncateFloat32ToUInt64(Address src, Address dest, Register temp,
2489                                       FloatRegister floatTemp)
2490       DEFINED_ON(x86, x64);
2491   inline void truncateDoubleToInt64(Address src, Address dest, Register temp)
2492       DEFINED_ON(x86_shared);
2493   inline void truncateDoubleToUInt64(Address src, Address dest, Register temp,
2494                                      FloatRegister floatTemp)
2495       DEFINED_ON(x86, x64);
2496 
2497  public:
2498   // ========================================================================
2499   // Convert floating point.
2500 
2501   // temp required on x86 and x64; must be undefined on mips64.
2502   void convertUInt64ToFloat32(Register64 src, FloatRegister dest, Register temp)
2503       DEFINED_ON(arm64, mips64, x64, x86);
2504 
2505   void convertInt64ToFloat32(Register64 src, FloatRegister dest)
2506       DEFINED_ON(arm64, mips64, x64, x86);
2507 
2508   bool convertUInt64ToDoubleNeedsTemp() PER_ARCH;
2509 
2510   // temp required when convertUInt64ToDoubleNeedsTemp() returns true.
2511   void convertUInt64ToDouble(Register64 src, FloatRegister dest,
2512                              Register temp) PER_ARCH;
2513 
2514   void convertInt64ToDouble(Register64 src, FloatRegister dest)
2515       DEFINED_ON(arm64, mips64, x64, x86);
2516 
2517  public:
2518   // ========================================================================
2519   // wasm support
2520 
2521   CodeOffset wasmTrapInstruction() PER_SHARED_ARCH;
2522 
2523   void wasmTrap(wasm::Trap trap, wasm::BytecodeOffset bytecodeOffset);
2524   void wasmInterruptCheck(Register tls, wasm::BytecodeOffset bytecodeOffset);
2525 
2526   // Returns a pair: the offset of the undefined (trapping) instruction, and
2527   // the number of extra bytes of stack allocated prior to the trap
2528   // instruction proper.
2529   std::pair<CodeOffset, uint32_t> wasmReserveStackChecked(
2530       uint32_t amount, wasm::BytecodeOffset trapOffset);
2531 
2532   // Emit a bounds check against the wasm heap limit, jumping to 'label' if
2533   // 'cond' holds. If JitOptions.spectreMaskIndex is true, in speculative
2534   // executions 'index' is saturated in-place to 'boundsCheckLimit'.
2535   void wasmBoundsCheck(Condition cond, Register index,
2536                        Register boundsCheckLimit, Label* label)
2537       DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
2538 
2539   void wasmBoundsCheck(Condition cond, Register index, Address boundsCheckLimit,
2540                        Label* label)
2541       DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
2542 
2543   // Each wasm load/store instruction appends its own wasm::Trap::OutOfBounds.
2544   void wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr,
2545                 AnyRegister out) DEFINED_ON(x86, x64);
2546   void wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAddr,
2547                    Register64 out) DEFINED_ON(x86, x64);
2548   void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value,
2549                  Operand dstAddr) DEFINED_ON(x86, x64);
2550   void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value,
2551                     Operand dstAddr) DEFINED_ON(x86);
2552 
2553   // For all the ARM and ARM64 wasmLoad and wasmStore functions, `ptr` MUST
2554   // equal `ptrScratch`, and that register will be updated based on conditions
2555   // listed below (where it is only mentioned as `ptr`).
2556 
2557   // `ptr` will be updated if access.offset() != 0 or access.type() ==
2558   // Scalar::Int64.
2559   void wasmLoad(const wasm::MemoryAccessDesc& access, Register memoryBase,
2560                 Register ptr, Register ptrScratch, AnyRegister output)
2561       DEFINED_ON(arm, arm64, mips_shared);
2562   void wasmLoadI64(const wasm::MemoryAccessDesc& access, Register memoryBase,
2563                    Register ptr, Register ptrScratch, Register64 output)
2564       DEFINED_ON(arm, arm64, mips32, mips64);
2565   void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value,
2566                  Register memoryBase, Register ptr, Register ptrScratch)
2567       DEFINED_ON(arm, arm64, mips_shared);
2568   void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value,
2569                     Register memoryBase, Register ptr, Register ptrScratch)
2570       DEFINED_ON(arm, arm64, mips32, mips64);
2571 
2572   // `ptr` will always be updated.
2573   void wasmUnalignedLoad(const wasm::MemoryAccessDesc& access,
2574                          Register memoryBase, Register ptr, Register ptrScratch,
2575                          Register output, Register tmp)
2576       DEFINED_ON(arm, mips32, mips64);
2577 
2578   // ARM: `ptr` will always be updated and `tmp1` is always needed.  `tmp2` is
2579   // needed for Float32; `tmp2` and `tmp3` are needed for Float64.  Temps must
2580   // be Invalid when they are not needed.
2581   // MIPS: `ptr` will always be updated.
2582   void wasmUnalignedLoadFP(const wasm::MemoryAccessDesc& access,
2583                            Register memoryBase, Register ptr,
2584                            Register ptrScratch, FloatRegister output,
2585                            Register tmp1, Register tmp2, Register tmp3)
2586       DEFINED_ON(arm, mips32, mips64);
2587 
2588   // `ptr` will always be updated.
2589   void wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access,
2590                             Register memoryBase, Register ptr,
2591                             Register ptrScratch, Register64 output,
2592                             Register tmp) DEFINED_ON(arm, mips32, mips64);
2593 
2594   // ARM: `ptr` and `value` will always be updated.  'tmp' must be Invalid.
2595   // MIPS: `ptr` will always be updated.
2596   void wasmUnalignedStore(const wasm::MemoryAccessDesc& access, Register value,
2597                           Register memoryBase, Register ptr,
2598                           Register ptrScratch, Register tmp)
2599       DEFINED_ON(arm, mips32, mips64);
2600 
2601   // `ptr` will always be updated.
2602   void wasmUnalignedStoreFP(const wasm::MemoryAccessDesc& access,
2603                             FloatRegister floatValue, Register memoryBase,
2604                             Register ptr, Register ptrScratch, Register tmp)
2605       DEFINED_ON(arm, mips32, mips64);
2606 
2607   // `ptr` will always be updated.
2608   void wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access,
2609                              Register64 value, Register memoryBase,
2610                              Register ptr, Register ptrScratch, Register tmp)
2611       DEFINED_ON(arm, mips32, mips64);
2612 
2613   // wasm specific methods, used in both the wasm baseline compiler and ion.
2614 
2615   // The truncate-to-int32 methods do not bind the rejoin label; clients must
2616   // do so if oolWasmTruncateCheckF64ToI32() can jump to it.
2617   void wasmTruncateDoubleToUInt32(FloatRegister input, Register output,
2618                                   bool isSaturating, Label* oolEntry) PER_ARCH;
2619   void wasmTruncateDoubleToInt32(FloatRegister input, Register output,
2620                                  bool isSaturating,
2621                                  Label* oolEntry) PER_SHARED_ARCH;
2622   void oolWasmTruncateCheckF64ToI32(FloatRegister input, Register output,
2623                                     TruncFlags flags, wasm::BytecodeOffset off,
2624                                     Label* rejoin)
2625       DEFINED_ON(arm, arm64, x86_shared, mips_shared);
2626 
2627   void wasmTruncateFloat32ToUInt32(FloatRegister input, Register output,
2628                                    bool isSaturating, Label* oolEntry) PER_ARCH;
2629   void wasmTruncateFloat32ToInt32(FloatRegister input, Register output,
2630                                   bool isSaturating,
2631                                   Label* oolEntry) PER_SHARED_ARCH;
2632   void oolWasmTruncateCheckF32ToI32(FloatRegister input, Register output,
2633                                     TruncFlags flags, wasm::BytecodeOffset off,
2634                                     Label* rejoin)
2635       DEFINED_ON(arm, arm64, x86_shared, mips_shared);
2636 
2637   // The truncate-to-int64 methods will always bind the `oolRejoin` label
2638   // after the last emitted instruction.
2639   void wasmTruncateDoubleToInt64(FloatRegister input, Register64 output,
2640                                  bool isSaturating, Label* oolEntry,
2641                                  Label* oolRejoin, FloatRegister tempDouble)
2642       DEFINED_ON(arm64, x86, x64, mips64);
2643   void wasmTruncateDoubleToUInt64(FloatRegister input, Register64 output,
2644                                   bool isSaturating, Label* oolEntry,
2645                                   Label* oolRejoin, FloatRegister tempDouble)
2646       DEFINED_ON(arm64, x86, x64, mips64);
2647   void oolWasmTruncateCheckF64ToI64(FloatRegister input, Register64 output,
2648                                     TruncFlags flags, wasm::BytecodeOffset off,
2649                                     Label* rejoin)
2650       DEFINED_ON(arm, arm64, x86_shared, mips_shared);
2651 
2652   void wasmTruncateFloat32ToInt64(FloatRegister input, Register64 output,
2653                                   bool isSaturating, Label* oolEntry,
2654                                   Label* oolRejoin, FloatRegister tempDouble)
2655       DEFINED_ON(arm64, x86, x64, mips64);
2656   void wasmTruncateFloat32ToUInt64(FloatRegister input, Register64 output,
2657                                    bool isSaturating, Label* oolEntry,
2658                                    Label* oolRejoin, FloatRegister tempDouble)
2659       DEFINED_ON(arm64, x86, x64, mips64);
2660   void oolWasmTruncateCheckF32ToI64(FloatRegister input, Register64 output,
2661                                     TruncFlags flags, wasm::BytecodeOffset off,
2662                                     Label* rejoin)
2663       DEFINED_ON(arm, arm64, x86_shared, mips_shared);
2664 
2665   // This function takes care of loading the callee's TLS and pinned regs but
2666   // it is the caller's responsibility to save/restore TLS or pinned regs.
2667   CodeOffset wasmCallImport(const wasm::CallSiteDesc& desc,
2668                             const wasm::CalleeDesc& callee);
2669 
2670   // WasmTableCallIndexReg must contain the index of the indirect call.
2671   CodeOffset wasmCallIndirect(const wasm::CallSiteDesc& desc,
2672                               const wasm::CalleeDesc& callee,
2673                               bool needsBoundsCheck);
2674 
2675   // This function takes care of loading the pointer to the current instance
2676   // as the implicit first argument. It preserves TLS and pinned registers.
2677   // (TLS & pinned regs are non-volatile registers in the system ABI).
2678   CodeOffset wasmCallBuiltinInstanceMethod(const wasm::CallSiteDesc& desc,
2679                                            const ABIArg& instanceArg,
2680                                            wasm::SymbolicAddress builtin,
2681                                            wasm::FailureMode failureMode);
2682 
2683   // As enterFakeExitFrame(), but using register conventions appropriate for
2684   // wasm stubs.
2685   void enterFakeExitFrameForWasm(Register cxreg, Register scratch,
2686                                  ExitFrameType type) PER_SHARED_ARCH;
2687 
2688  public:
2689   // ========================================================================
2690   // Barrier functions.
2691 
2692   void emitPreBarrierFastPath(JSRuntime* rt, MIRType type, Register temp1,
2693                               Register temp2, Register temp3, Label* noBarrier);
2694 
2695  public:
2696   // ========================================================================
2697   // Clamping functions.
2698 
2699   inline void clampIntToUint8(Register reg) PER_SHARED_ARCH;
2700 
2701  public:
2702   // ========================================================================
2703   // Primitive atomic operations.
2704   //
2705   // If the access is from JS and the eventual destination of the result is a
2706   // js::Value, it's probably best to use the JS-specific versions of these,
2707   // see further below.
2708   //
2709   // Temp registers must be defined unless otherwise noted in the per-function
2710   // constraints.
2711 
2712   // 8-bit, 16-bit, and 32-bit wide operations.
2713   //
2714   // The 8-bit and 16-bit operations zero-extend or sign-extend the result to
2715   // 32 bits, according to `type`. On 64-bit systems, the upper 32 bits of the
2716   // result will be zero on some platforms (eg, on x64) and will be the sign
2717   // extension of the lower bits on other platforms (eg, MIPS).
2718 
2719   // CompareExchange with memory.  Return the value that was in memory,
2720   // whether we wrote or not.
2721   //
2722   // x86-shared: `output` must be eax.
2723   // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
2724   // and 16-bit wide operations.
2725 
2726   void compareExchange(Scalar::Type type, const Synchronization& sync,
2727                        const Address& mem, Register expected,
2728                        Register replacement, Register output)
2729       DEFINED_ON(arm, arm64, x86_shared);
2730 
2731   void compareExchange(Scalar::Type type, const Synchronization& sync,
2732                        const BaseIndex& mem, Register expected,
2733                        Register replacement, Register output)
2734       DEFINED_ON(arm, arm64, x86_shared);
2735 
2736   void compareExchange(Scalar::Type type, const Synchronization& sync,
2737                        const Address& mem, Register expected,
2738                        Register replacement, Register valueTemp,
2739                        Register offsetTemp, Register maskTemp, Register output)
2740       DEFINED_ON(mips_shared);
2741 
2742   void compareExchange(Scalar::Type type, const Synchronization& sync,
2743                        const BaseIndex& mem, Register expected,
2744                        Register replacement, Register valueTemp,
2745                        Register offsetTemp, Register maskTemp, Register output)
2746       DEFINED_ON(mips_shared);
2747 
2748   // x64: `output` must be rax.
2749   // ARM: Registers must be distinct; `replacement` and `output` must be
2750   // (even,odd) pairs.
2751 
2752   void compareExchange64(const Synchronization& sync, const Address& mem,
2753                          Register64 expected, Register64 replacement,
2754                          Register64 output) DEFINED_ON(arm, arm64, x64);
2755 
2756   // Exchange with memory.  Return the value initially in memory.
2757   // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
2758   // and 16-bit wide operations.
2759 
2760   void atomicExchange(Scalar::Type type, const Synchronization& sync,
2761                       const Address& mem, Register value, Register output)
2762       DEFINED_ON(arm, arm64, x86_shared);
2763 
2764   void atomicExchange(Scalar::Type type, const Synchronization& sync,
2765                       const BaseIndex& mem, Register value, Register output)
2766       DEFINED_ON(arm, arm64, x86_shared);
2767 
2768   void atomicExchange(Scalar::Type type, const Synchronization& sync,
2769                       const Address& mem, Register value, Register valueTemp,
2770                       Register offsetTemp, Register maskTemp, Register output)
2771       DEFINED_ON(mips_shared);
2772 
2773   void atomicExchange(Scalar::Type type, const Synchronization& sync,
2774                       const BaseIndex& mem, Register value, Register valueTemp,
2775                       Register offsetTemp, Register maskTemp, Register output)
2776       DEFINED_ON(mips_shared);
2777 
2778   void atomicExchange64(const Synchronization& sync, const Address& mem,
2779                         Register64 value, Register64 output)
2780       DEFINED_ON(arm64, x64);
2781 
2782   // Read-modify-write with memory.  Return the value in memory before the
2783   // operation.
2784   //
2785   // x86-shared:
2786   //   For 8-bit operations, `value` and `output` must have a byte subregister.
2787   //   For Add and Sub, `temp` must be invalid.
2788   //   For And, Or, and Xor, `output` must be eax and `temp` must have a byte
2789   //   subregister.
2790   //
2791   // ARM: Registers `value` and `output` must differ.
2792   // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
2793   // and 16-bit wide operations; `value` and `output` must differ.
2794 
2795   void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
2796                      AtomicOp op, Register value, const Address& mem,
2797                      Register temp, Register output)
2798       DEFINED_ON(arm, arm64, x86_shared);
2799 
2800   void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
2801                      AtomicOp op, Imm32 value, const Address& mem,
2802                      Register temp, Register output) DEFINED_ON(x86_shared);
2803 
2804   void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
2805                      AtomicOp op, Register value, const BaseIndex& mem,
2806                      Register temp, Register output)
2807       DEFINED_ON(arm, arm64, x86_shared);
2808 
2809   void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
2810                      AtomicOp op, Imm32 value, const BaseIndex& mem,
2811                      Register temp, Register output) DEFINED_ON(x86_shared);
2812 
2813   void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
2814                      AtomicOp op, Register value, const Address& mem,
2815                      Register valueTemp, Register offsetTemp, Register maskTemp,
2816                      Register output) DEFINED_ON(mips_shared);
2817 
2818   void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
2819                      AtomicOp op, Register value, const BaseIndex& mem,
2820                      Register valueTemp, Register offsetTemp, Register maskTemp,
2821                      Register output) DEFINED_ON(mips_shared);
2822 
2823   // x64:
2824   //   For Add and Sub, `temp` must be invalid.
2825   //   For And, Or, and Xor, `output` must be eax and `temp` must have a byte
2826   //   subregister.
2827 
2828   void atomicFetchOp64(const Synchronization& sync, AtomicOp op,
2829                        Register64 value, const Address& mem, Register64 temp,
2830                        Register64 output) DEFINED_ON(arm64, x64);
2831 
2832   // ========================================================================
2833   // Wasm atomic operations.
2834   //
2835   // Constraints, when omitted, are exactly as for the primitive operations
2836   // above.
2837 
2838   void wasmCompareExchange(const wasm::MemoryAccessDesc& access,
2839                            const Address& mem, Register expected,
2840                            Register replacement, Register output)
2841       DEFINED_ON(arm, arm64, x86_shared);
2842 
2843   void wasmCompareExchange(const wasm::MemoryAccessDesc& access,
2844                            const BaseIndex& mem, Register expected,
2845                            Register replacement, Register output)
2846       DEFINED_ON(arm, arm64, x86_shared);
2847 
2848   void wasmCompareExchange(const wasm::MemoryAccessDesc& access,
2849                            const Address& mem, Register expected,
2850                            Register replacement, Register valueTemp,
2851                            Register offsetTemp, Register maskTemp,
2852                            Register output) DEFINED_ON(mips_shared);
2853 
2854   void wasmCompareExchange(const wasm::MemoryAccessDesc& access,
2855                            const BaseIndex& mem, Register expected,
2856                            Register replacement, Register valueTemp,
2857                            Register offsetTemp, Register maskTemp,
2858                            Register output) DEFINED_ON(mips_shared);
2859 
2860   void wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
2861                           const Address& mem, Register value, Register output)
2862       DEFINED_ON(arm, arm64, x86_shared);
2863 
2864   void wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
2865                           const BaseIndex& mem, Register value, Register output)
2866       DEFINED_ON(arm, arm64, x86_shared);
2867 
2868   void wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
2869                           const Address& mem, Register value,
2870                           Register valueTemp, Register offsetTemp,
2871                           Register maskTemp, Register output)
2872       DEFINED_ON(mips_shared);
2873 
2874   void wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
2875                           const BaseIndex& mem, Register value,
2876                           Register valueTemp, Register offsetTemp,
2877                           Register maskTemp, Register output)
2878       DEFINED_ON(mips_shared);
2879 
2880   void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
2881                          Register value, const Address& mem, Register temp,
2882                          Register output) DEFINED_ON(arm, arm64, x86_shared);
2883 
2884   void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
2885                          Imm32 value, const Address& mem, Register temp,
2886                          Register output) DEFINED_ON(x86_shared);
2887 
2888   void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
2889                          Register value, const BaseIndex& mem, Register temp,
2890                          Register output) DEFINED_ON(arm, arm64, x86_shared);
2891 
2892   void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
2893                          Imm32 value, const BaseIndex& mem, Register temp,
2894                          Register output) DEFINED_ON(x86_shared);
2895 
2896   void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
2897                          Register value, const Address& mem, Register valueTemp,
2898                          Register offsetTemp, Register maskTemp,
2899                          Register output) DEFINED_ON(mips_shared);
2900 
2901   void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
2902                          Register value, const BaseIndex& mem,
2903                          Register valueTemp, Register offsetTemp,
2904                          Register maskTemp, Register output)
2905       DEFINED_ON(mips_shared);
2906 
2907   // Read-modify-write with memory.  Return no value.
2908   //
2909   // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
2910   // and 16-bit wide operations.
2911 
2912   void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
2913                           Register value, const Address& mem, Register temp)
2914       DEFINED_ON(arm, arm64, x86_shared);
2915 
2916   void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
2917                           Imm32 value, const Address& mem, Register temp)
2918       DEFINED_ON(x86_shared);
2919 
2920   void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
2921                           Register value, const BaseIndex& mem, Register temp)
2922       DEFINED_ON(arm, arm64, x86_shared);
2923 
2924   void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
2925                           Imm32 value, const BaseIndex& mem, Register temp)
2926       DEFINED_ON(x86_shared);
2927 
2928   void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
2929                           Register value, const Address& mem,
2930                           Register valueTemp, Register offsetTemp,
2931                           Register maskTemp) DEFINED_ON(mips_shared);
2932 
2933   void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
2934                           Register value, const BaseIndex& mem,
2935                           Register valueTemp, Register offsetTemp,
2936                           Register maskTemp) DEFINED_ON(mips_shared);
2937 
2938   // 64-bit wide operations.
2939 
2940   // 64-bit atomic load.  On 64-bit systems, use regular wasm load with
2941   // Synchronization::Load, not this method.
2942   //
2943   // x86: `temp` must be ecx:ebx; `output` must be edx:eax.
2944   // ARM: `temp` should be invalid; `output` must be (even,odd) pair.
2945   // MIPS32: `temp` should be invalid.
2946 
2947   void wasmAtomicLoad64(const wasm::MemoryAccessDesc& access,
2948                         const Address& mem, Register64 temp, Register64 output)
2949       DEFINED_ON(arm, mips32, x86);
2950 
2951   void wasmAtomicLoad64(const wasm::MemoryAccessDesc& access,
2952                         const BaseIndex& mem, Register64 temp,
2953                         Register64 output) DEFINED_ON(arm, mips32, x86);
2954 
2955   // x86: `expected` must be the same as `output`, and must be edx:eax.
2956   // x86: `replacement` must be ecx:ebx.
2957   // x64: `output` must be rax.
2958   // ARM: Registers must be distinct; `replacement` and `output` must be
2959   // (even,odd) pairs.
2960   // ARM64: The base register in `mem` must not overlap `output`.
2961   // MIPS: Registers must be distinct.
2962 
2963   void wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
2964                              const Address& mem, Register64 expected,
2965                              Register64 replacement,
2966                              Register64 output) PER_ARCH;
2967 
2968   void wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
2969                              const BaseIndex& mem, Register64 expected,
2970                              Register64 replacement,
2971                              Register64 output) PER_ARCH;
2972 
2973   // x86: `value` must be ecx:ebx; `output` must be edx:eax.
2974   // ARM: Registers must be distinct; `value` and `output` must be (even,odd)
2975   // pairs.
2976   // MIPS: Registers must be distinct.
2977 
2978   void wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
2979                             const Address& mem, Register64 value,
2980                             Register64 output) PER_ARCH;
2981 
2982   void wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
2983                             const BaseIndex& mem, Register64 value,
2984                             Register64 output) PER_ARCH;
2985 
2986   // x86: `output` must be edx:eax, `temp` must be ecx:ebx.
2987   // x64: For And, Or, and Xor `output` must be rax.
2988   // ARM: Registers must be distinct; `temp` and `output` must be (even,odd)
2989   // pairs.
2990   // MIPS: Registers must be distinct.
2991   // MIPS32: `temp` should be invalid.
2992 
2993   void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access, AtomicOp op,
2994                            Register64 value, const Address& mem,
2995                            Register64 temp, Register64 output)
2996       DEFINED_ON(arm, arm64, mips32, mips64, x64);
2997 
2998   void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access, AtomicOp op,
2999                            Register64 value, const BaseIndex& mem,
3000                            Register64 temp, Register64 output)
3001       DEFINED_ON(arm, arm64, mips32, mips64, x64);
3002 
3003   void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access, AtomicOp op,
3004                            const Address& value, const Address& mem,
3005                            Register64 temp, Register64 output) DEFINED_ON(x86);
3006 
3007   void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access, AtomicOp op,
3008                            const Address& value, const BaseIndex& mem,
3009                            Register64 temp, Register64 output) DEFINED_ON(x86);
3010 
3011   // Here `value` can be any register.
3012 
3013   void wasmAtomicEffectOp64(const wasm::MemoryAccessDesc& access, AtomicOp op,
3014                             Register64 value, const BaseIndex& mem)
3015       DEFINED_ON(x64);
3016 
3017   // ========================================================================
3018   // JS atomic operations.
3019   //
3020   // Here the arrayType must be a type that is valid for JS.  As of 2017 that
3021   // is an 8-bit, 16-bit, or 32-bit integer type.
3022   //
3023   // If arrayType is Scalar::Uint32 then:
3024   //
3025   //   - `output` must be a float register (this is bug 1077305)
3026   //   - if the operation takes one temp register then `temp` must be defined
3027   //   - if the operation takes two temp registers then `temp2` must be defined.
3028   //
3029   // Otherwise `output` must be a GPR and `temp`/`temp2` should be InvalidReg.
3030   // (`temp1` must always be valid.)
3031   //
3032   // For additional register constraints, see the primitive 32-bit operations
3033   // and/or wasm operations above.
3034 
3035   void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
3036                          const Address& mem, Register expected,
3037                          Register replacement, Register temp,
3038                          AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
3039 
3040   void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
3041                          const BaseIndex& mem, Register expected,
3042                          Register replacement, Register temp,
3043                          AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
3044 
3045   void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
3046                          const Address& mem, Register expected,
3047                          Register replacement, Register valueTemp,
3048                          Register offsetTemp, Register maskTemp, Register temp,
3049                          AnyRegister output) DEFINED_ON(mips_shared);
3050 
3051   void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
3052                          const BaseIndex& mem, Register expected,
3053                          Register replacement, Register valueTemp,
3054                          Register offsetTemp, Register maskTemp, Register temp,
3055                          AnyRegister output) DEFINED_ON(mips_shared);
3056 
3057   void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
3058                         const Address& mem, Register value, Register temp,
3059                         AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
3060 
3061   void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
3062                         const BaseIndex& mem, Register value, Register temp,
3063                         AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
3064 
3065   void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
3066                         const Address& mem, Register value, Register valueTemp,
3067                         Register offsetTemp, Register maskTemp, Register temp,
3068                         AnyRegister output) DEFINED_ON(mips_shared);
3069 
3070   void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
3071                         const BaseIndex& mem, Register value,
3072                         Register valueTemp, Register offsetTemp,
3073                         Register maskTemp, Register temp, AnyRegister output)
3074       DEFINED_ON(mips_shared);
3075 
3076   void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
3077                        AtomicOp op, Register value, const Address& mem,
3078                        Register temp1, Register temp2, AnyRegister output)
3079       DEFINED_ON(arm, arm64, x86_shared);
3080 
3081   void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
3082                        AtomicOp op, Register value, const BaseIndex& mem,
3083                        Register temp1, Register temp2, AnyRegister output)
3084       DEFINED_ON(arm, arm64, x86_shared);
3085 
3086   void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
3087                        AtomicOp op, Imm32 value, const Address& mem,
3088                        Register temp1, Register temp2, AnyRegister output)
3089       DEFINED_ON(x86_shared);
3090 
3091   void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
3092                        AtomicOp op, Imm32 value, const BaseIndex& mem,
3093                        Register temp1, Register temp2, AnyRegister output)
3094       DEFINED_ON(x86_shared);
3095 
3096   void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
3097                        AtomicOp op, Register value, const Address& mem,
3098                        Register valueTemp, Register offsetTemp,
3099                        Register maskTemp, Register temp, AnyRegister output)
3100       DEFINED_ON(mips_shared);
3101 
3102   void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
3103                        AtomicOp op, Register value, const BaseIndex& mem,
3104                        Register valueTemp, Register offsetTemp,
3105                        Register maskTemp, Register temp, AnyRegister output)
3106       DEFINED_ON(mips_shared);
3107 
3108   void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
3109                         AtomicOp op, Register value, const Address& mem,
3110                         Register temp) DEFINED_ON(arm, arm64, x86_shared);
3111 
3112   void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
3113                         AtomicOp op, Register value, const BaseIndex& mem,
3114                         Register temp) DEFINED_ON(arm, arm64, x86_shared);
3115 
3116   void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
3117                         AtomicOp op, Imm32 value, const Address& mem,
3118                         Register temp) DEFINED_ON(x86_shared);
3119 
3120   void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
3121                         AtomicOp op, Imm32 value, const BaseIndex& mem,
3122                         Register temp) DEFINED_ON(x86_shared);
3123 
3124   void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
3125                         AtomicOp op, Register value, const Address& mem,
3126                         Register valueTemp, Register offsetTemp,
3127                         Register maskTemp) DEFINED_ON(mips_shared);
3128 
3129   void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
3130                         AtomicOp op, Register value, const BaseIndex& mem,
3131                         Register valueTemp, Register offsetTemp,
3132                         Register maskTemp) DEFINED_ON(mips_shared);
3133 
3134   // ========================================================================
3135   // Spectre Mitigations.
3136   //
3137   // Spectre attacks are side-channel attacks based on cache pollution or
3138   // slow-execution of some instructions. We have multiple spectre mitigations
3139   // possible:
3140   //
3141   //   - Stop speculative executions, with memory barriers. Memory barriers
3142   //     force all branches depending on loads to be resolved, and thus
3143   //     resolve all miss-speculated paths.
3144   //
3145   //   - Use conditional move instructions. Some CPUs have a branch predictor,
3146   //     and not a flag predictor. In such cases, using a conditional move
3147   //     instruction to zero some pointer/index is enough to add a
3148   //     data-dependency which prevents any futher executions until the load is
3149   //     resolved.
3150 
3151   void spectreMaskIndex(Register index, Register length, Register output);
3152   void spectreMaskIndex(Register index, const Address& length, Register output);
3153 
3154   // The length must be a power of two. Performs a bounds check and Spectre
3155   // index masking.
3156   void boundsCheck32PowerOfTwo(Register index, uint32_t length, Label* failure);
3157 
3158   void speculationBarrier() PER_SHARED_ARCH;
3159 
3160   //}}} check_macroassembler_decl_style
3161  public:
3162   // Emits a test of a value against all types in a TypeSet. A scratch
3163   // register is required.
3164   template <typename Source>
3165   void guardTypeSet(const Source& address, const TypeSet* types,
3166                     BarrierKind kind, Register unboxScratch,
3167                     Register objScratch, Register spectreRegToZero,
3168                     Label* miss);
3169 
3170   void guardObjectType(Register obj, const TypeSet* types, Register scratch,
3171                        Register spectreRegToZero, Label* miss);
3172 
3173 #ifdef DEBUG
3174   void guardTypeSetMightBeIncomplete(const TypeSet* types, Register obj,
3175                                      Register scratch, Label* label);
3176 #endif
3177 
3178   // Unsafe here means the caller is responsible for Spectre mitigations if
3179   // needed. Prefer branchTestObjGroup or one of the other masm helpers!
loadObjGroupUnsafe(Register obj,Register dest)3180   void loadObjGroupUnsafe(Register obj, Register dest) {
3181     loadPtr(Address(obj, JSObject::offsetOfGroup()), dest);
3182   }
loadObjClassUnsafe(Register obj,Register dest)3183   void loadObjClassUnsafe(Register obj, Register dest) {
3184     loadPtr(Address(obj, JSObject::offsetOfGroup()), dest);
3185     loadPtr(Address(dest, ObjectGroup::offsetOfClasp()), dest);
3186   }
3187 
3188   template <typename EmitPreBarrier>
3189   inline void storeObjGroup(Register group, Register obj,
3190                             EmitPreBarrier emitPreBarrier);
3191   template <typename EmitPreBarrier>
3192   inline void storeObjGroup(ObjectGroup* group, Register obj,
3193                             EmitPreBarrier emitPreBarrier);
3194   template <typename EmitPreBarrier>
3195   inline void storeObjShape(Register shape, Register obj,
3196                             EmitPreBarrier emitPreBarrier);
3197   template <typename EmitPreBarrier>
3198   inline void storeObjShape(Shape* shape, Register obj,
3199                             EmitPreBarrier emitPreBarrier);
3200 
loadObjPrivate(Register obj,uint32_t nfixed,Register dest)3201   void loadObjPrivate(Register obj, uint32_t nfixed, Register dest) {
3202     loadPtr(Address(obj, NativeObject::getPrivateDataOffset(nfixed)), dest);
3203   }
3204 
loadObjProto(Register obj,Register dest)3205   void loadObjProto(Register obj, Register dest) {
3206     loadPtr(Address(obj, JSObject::offsetOfGroup()), dest);
3207     loadPtr(Address(dest, ObjectGroup::offsetOfProto()), dest);
3208   }
3209 
loadStringLength(Register str,Register dest)3210   void loadStringLength(Register str, Register dest) {
3211     load32(Address(str, JSString::offsetOfLength()), dest);
3212   }
3213 
3214   void loadStringChars(Register str, Register dest, CharEncoding encoding);
3215 
3216   void loadNonInlineStringChars(Register str, Register dest,
3217                                 CharEncoding encoding);
3218   void loadNonInlineStringCharsForStore(Register str, Register dest);
3219   void storeNonInlineStringChars(Register chars, Register str);
3220 
3221   void loadInlineStringChars(Register str, Register dest,
3222                              CharEncoding encoding);
3223   void loadInlineStringCharsForStore(Register str, Register dest);
3224 
3225   void loadStringChar(Register str, Register index, Register output,
3226                       Register scratch, Label* fail);
3227 
3228   void loadRopeLeftChild(Register str, Register dest);
3229   void storeRopeChildren(Register left, Register right, Register str);
3230 
3231   void loadDependentStringBase(Register str, Register dest);
3232   void storeDependentStringBase(Register base, Register str);
3233 
3234   void loadStringIndexValue(Register str, Register dest, Label* fail);
3235 
3236   /**
3237    * Store the character in |src| to |dest|.
3238    */
3239   template <typename T>
storeChar(const T & src,Address dest,CharEncoding encoding)3240   void storeChar(const T& src, Address dest, CharEncoding encoding) {
3241     if (encoding == CharEncoding::Latin1) {
3242       store8(src, dest);
3243     } else {
3244       store16(src, dest);
3245     }
3246   }
3247 
3248   /**
3249    * Load the character at |src| into |dest|.
3250    */
3251   template <typename T>
loadChar(const T & src,Register dest,CharEncoding encoding)3252   void loadChar(const T& src, Register dest, CharEncoding encoding) {
3253     if (encoding == CharEncoding::Latin1) {
3254       load8ZeroExtend(src, dest);
3255     } else {
3256       load16ZeroExtend(src, dest);
3257     }
3258   }
3259 
3260   /**
3261    * Load the character at |chars[index + offset]| into |dest|. The optional
3262    * offset argument is not scaled to the character encoding.
3263    */
3264   void loadChar(Register chars, Register index, Register dest,
3265                 CharEncoding encoding, int32_t offset = 0);
3266 
3267   /**
3268    * Add |index| to |chars| so that |chars| now points at |chars[index]|.
3269    */
3270   void addToCharPtr(Register chars, Register index, CharEncoding encoding);
3271 
3272  private:
3273   void loadBigIntDigits(Register bigInt, Register digits);
3274 
3275  public:
3276   /**
3277    * Load the first [u]int64 value from |bigInt| into |dest|.
3278    */
3279   void loadBigInt64(Register bigInt, Register64 dest);
3280 
3281   /**
3282    * Load the first digit from |bigInt| into |dest|. Handles the case when the
3283    * BigInt digits length is zero.
3284    *
3285    * Note: A BigInt digit is a pointer-sized value.
3286    */
3287   void loadFirstBigIntDigitOrZero(Register bigInt, Register dest);
3288 
3289   /**
3290    * Initialize a BigInt from |dest|. Clobbers |val|!
3291    */
3292   void initializeBigInt64(Scalar::Type type, Register bigInt, Register64 val);
3293 
3294   void loadJSContext(Register dest);
3295 
3296   void switchToRealm(Register realm);
3297   void switchToRealm(const void* realm, Register scratch);
3298   void switchToObjectRealm(Register obj, Register scratch);
3299   void switchToBaselineFrameRealm(Register scratch);
3300   void switchToWasmTlsRealm(Register scratch1, Register scratch2);
3301   void debugAssertContextRealm(const void* realm, Register scratch);
3302 
loadJitActivation(Register dest)3303   void loadJitActivation(Register dest) {
3304     loadJSContext(dest);
3305     loadPtr(Address(dest, offsetof(JSContext, activation_)), dest);
3306   }
3307 
3308   void guardGroupHasUnanalyzedNewScript(Register group, Register scratch,
3309                                         Label* fail);
3310 
3311   void loadWasmTlsRegFromFrame(Register dest = WasmTlsReg);
3312 
3313   template <typename T>
loadTypedOrValue(const T & src,TypedOrValueRegister dest)3314   void loadTypedOrValue(const T& src, TypedOrValueRegister dest) {
3315     if (dest.hasValue()) {
3316       loadValue(src, dest.valueReg());
3317     } else {
3318       loadUnboxedValue(src, dest.type(), dest.typedReg());
3319     }
3320   }
3321 
3322   template <typename T>
loadElementTypedOrValue(const T & src,TypedOrValueRegister dest,bool holeCheck,Label * hole)3323   void loadElementTypedOrValue(const T& src, TypedOrValueRegister dest,
3324                                bool holeCheck, Label* hole) {
3325     if (dest.hasValue()) {
3326       loadValue(src, dest.valueReg());
3327       if (holeCheck) {
3328         branchTestMagic(Assembler::Equal, dest.valueReg(), hole);
3329       }
3330     } else {
3331       if (holeCheck) {
3332         branchTestMagic(Assembler::Equal, src, hole);
3333       }
3334       loadUnboxedValue(src, dest.type(), dest.typedReg());
3335     }
3336   }
3337 
3338   template <typename T>
storeTypedOrValue(TypedOrValueRegister src,const T & dest)3339   void storeTypedOrValue(TypedOrValueRegister src, const T& dest) {
3340     if (src.hasValue()) {
3341       storeValue(src.valueReg(), dest);
3342     } else if (IsFloatingPointType(src.type())) {
3343       FloatRegister reg = src.typedReg().fpu();
3344       if (src.type() == MIRType::Float32) {
3345         ScratchDoubleScope fpscratch(*this);
3346         convertFloat32ToDouble(reg, fpscratch);
3347         boxDouble(fpscratch, dest);
3348       } else {
3349         boxDouble(reg, dest);
3350       }
3351     } else {
3352       storeValue(ValueTypeFromMIRType(src.type()), src.typedReg().gpr(), dest);
3353     }
3354   }
3355 
3356   template <typename T>
3357   inline void storeObjectOrNull(Register src, const T& dest);
3358 
3359   template <typename T>
storeConstantOrRegister(const ConstantOrRegister & src,const T & dest)3360   void storeConstantOrRegister(const ConstantOrRegister& src, const T& dest) {
3361     if (src.constant()) {
3362       storeValue(src.value(), dest);
3363     } else {
3364       storeTypedOrValue(src.reg(), dest);
3365     }
3366   }
3367 
storeCallPointerResult(Register reg)3368   void storeCallPointerResult(Register reg) {
3369     if (reg != ReturnReg) {
3370       mov(ReturnReg, reg);
3371     }
3372   }
3373 
3374   inline void storeCallBoolResult(Register reg);
3375   inline void storeCallInt32Result(Register reg);
3376 
storeCallFloatResult(FloatRegister reg)3377   void storeCallFloatResult(FloatRegister reg) {
3378     if (reg != ReturnDoubleReg) {
3379       moveDouble(ReturnDoubleReg, reg);
3380     }
3381   }
3382 
3383   inline void storeCallResultValue(AnyRegister dest, JSValueType type);
3384 
storeCallResultValue(ValueOperand dest)3385   void storeCallResultValue(ValueOperand dest) {
3386 #if defined(JS_NUNBOX32)
3387     // reshuffle the return registers used for a call result to store into
3388     // dest, using ReturnReg as a scratch register if necessary. This must
3389     // only be called after returning from a call, at a point when the
3390     // return register is not live. XXX would be better to allow wrappers
3391     // to store the return value to different places.
3392     if (dest.typeReg() == JSReturnReg_Data) {
3393       if (dest.payloadReg() == JSReturnReg_Type) {
3394         // swap the two registers.
3395         mov(JSReturnReg_Type, ReturnReg);
3396         mov(JSReturnReg_Data, JSReturnReg_Type);
3397         mov(ReturnReg, JSReturnReg_Data);
3398       } else {
3399         mov(JSReturnReg_Data, dest.payloadReg());
3400         mov(JSReturnReg_Type, dest.typeReg());
3401       }
3402     } else {
3403       mov(JSReturnReg_Type, dest.typeReg());
3404       mov(JSReturnReg_Data, dest.payloadReg());
3405     }
3406 #elif defined(JS_PUNBOX64)
3407     if (dest.valueReg() != JSReturnReg) {
3408       mov(JSReturnReg, dest.valueReg());
3409     }
3410 #else
3411 #  error "Bad architecture"
3412 #endif
3413   }
3414 
3415   inline void storeCallResultValue(TypedOrValueRegister dest);
3416 
3417  private:
3418   template <typename T>
unguardedCallPreBarrier(const T & address,MIRType type)3419   void unguardedCallPreBarrier(const T& address, MIRType type) {
3420     Label done;
3421     if (type == MIRType::Value) {
3422       branchTestGCThing(Assembler::NotEqual, address, &done);
3423     } else if (type == MIRType::Object || type == MIRType::String) {
3424       branchPtr(Assembler::Equal, address, ImmWord(0), &done);
3425     }
3426 
3427     Push(PreBarrierReg);
3428     computeEffectiveAddress(address, PreBarrierReg);
3429 
3430     const JitRuntime* rt = GetJitContext()->runtime->jitRuntime();
3431     TrampolinePtr preBarrier = rt->preBarrier(type);
3432 
3433     call(preBarrier);
3434     Pop(PreBarrierReg);
3435     bind(&done);
3436   }
3437 
3438  public:
3439   template <typename T>
guardedCallPreBarrier(const T & address,MIRType type)3440   void guardedCallPreBarrier(const T& address, MIRType type) {
3441     Label done;
3442     branchTestNeedsIncrementalBarrier(Assembler::Zero, &done);
3443     unguardedCallPreBarrier(address, type);
3444     bind(&done);
3445   }
3446 
3447   // Like guardedCallPreBarrier, but unlike guardedCallPreBarrier this can be
3448   // called from runtime-wide trampolines because it loads cx->zone (instead of
3449   // baking in the current Zone) if JitContext::realm is nullptr.
3450   template <typename T>
guardedCallPreBarrierAnyZone(const T & address,MIRType type,Register scratch)3451   void guardedCallPreBarrierAnyZone(const T& address, MIRType type,
3452                                     Register scratch) {
3453     Label done;
3454     branchTestNeedsIncrementalBarrierAnyZone(Assembler::Zero, &done, scratch);
3455     unguardedCallPreBarrier(address, type);
3456     bind(&done);
3457   }
3458 
3459   template <typename T>
3460   void loadFromTypedArray(Scalar::Type arrayType, const T& src,
3461                           AnyRegister dest, Register temp, Label* fail);
3462 
3463   template <typename T>
3464   void loadFromTypedArray(Scalar::Type arrayType, const T& src,
3465                           const ValueOperand& dest, bool allowDouble,
3466                           Register temp, Label* fail);
3467 
3468   template <typename T>
3469   void loadFromTypedBigIntArray(Scalar::Type arrayType, const T& src,
3470                                 Register bigInt, Register64 temp);
3471 
3472   template <typename S, typename T>
storeToTypedIntArray(Scalar::Type arrayType,const S & value,const T & dest)3473   void storeToTypedIntArray(Scalar::Type arrayType, const S& value,
3474                             const T& dest) {
3475     switch (arrayType) {
3476       case Scalar::Int8:
3477       case Scalar::Uint8:
3478       case Scalar::Uint8Clamped:
3479         store8(value, dest);
3480         break;
3481       case Scalar::Int16:
3482       case Scalar::Uint16:
3483         store16(value, dest);
3484         break;
3485       case Scalar::Int32:
3486       case Scalar::Uint32:
3487         store32(value, dest);
3488         break;
3489       default:
3490         MOZ_CRASH("Invalid typed array type");
3491     }
3492   }
3493 
3494   void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value,
3495                               const BaseIndex& dest);
3496   void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value,
3497                               const Address& dest);
3498 
3499   void storeToTypedBigIntArray(Scalar::Type arrayType, Register64 value,
3500                                const BaseIndex& dest);
3501   void storeToTypedBigIntArray(Scalar::Type arrayType, Register64 value,
3502                                const Address& dest);
3503 
3504   void memoryBarrierBefore(const Synchronization& sync);
3505   void memoryBarrierAfter(const Synchronization& sync);
3506 
3507   void debugAssertIsObject(const ValueOperand& val);
3508   void debugAssertObjHasFixedSlots(Register obj, Register scratch);
3509 
3510   void branchIfNativeIteratorNotReusable(Register ni, Label* notReusable);
3511 
3512   void iteratorMore(Register obj, ValueOperand output, Register temp);
3513   void iteratorClose(Register obj, Register temp1, Register temp2,
3514                      Register temp3);
3515 
3516   using MacroAssemblerSpecific::extractTag;
extractTag(const TypedOrValueRegister & reg,Register scratch)3517   MOZ_MUST_USE Register extractTag(const TypedOrValueRegister& reg,
3518                                    Register scratch) {
3519     if (reg.hasValue()) {
3520       return extractTag(reg.valueReg(), scratch);
3521     }
3522     mov(ImmWord(MIRTypeToTag(reg.type())), scratch);
3523     return scratch;
3524   }
3525 
3526   using MacroAssemblerSpecific::extractObject;
extractObject(const TypedOrValueRegister & reg,Register scratch)3527   MOZ_MUST_USE Register extractObject(const TypedOrValueRegister& reg,
3528                                       Register scratch) {
3529     if (reg.hasValue()) {
3530       return extractObject(reg.valueReg(), scratch);
3531     }
3532     MOZ_ASSERT(reg.type() == MIRType::Object);
3533     return reg.typedReg().gpr();
3534   }
3535 
3536   // Inline version of js_TypedArray_uint8_clamp_double.
3537   // This function clobbers the input register.
3538   void clampDoubleToUint8(FloatRegister input, Register output) PER_ARCH;
3539 
3540   using MacroAssemblerSpecific::ensureDouble;
3541 
3542   template <typename S>
ensureDouble(const S & source,FloatRegister dest,Label * failure)3543   void ensureDouble(const S& source, FloatRegister dest, Label* failure) {
3544     Label isDouble, done;
3545     branchTestDouble(Assembler::Equal, source, &isDouble);
3546     branchTestInt32(Assembler::NotEqual, source, failure);
3547 
3548     convertInt32ToDouble(source, dest);
3549     jump(&done);
3550 
3551     bind(&isDouble);
3552     unboxDouble(source, dest);
3553 
3554     bind(&done);
3555   }
3556 
3557   // Inline allocation.
3558  private:
3559   void checkAllocatorState(Label* fail);
3560   bool shouldNurseryAllocate(gc::AllocKind allocKind,
3561                              gc::InitialHeap initialHeap);
3562   void nurseryAllocateObject(Register result, Register temp,
3563                              gc::AllocKind allocKind, size_t nDynamicSlots,
3564                              Label* fail);
3565   void bumpPointerAllocate(Register result, Register temp, Label* fail,
3566                            CompileZone* zone, void* posAddr,
3567                            const void* curEddAddr, JS::TraceKind traceKind,
3568                            uint32_t size);
3569 
3570   void freeListAllocate(Register result, Register temp, gc::AllocKind allocKind,
3571                         Label* fail);
3572   void allocateObject(Register result, Register temp, gc::AllocKind allocKind,
3573                       uint32_t nDynamicSlots, gc::InitialHeap initialHeap,
3574                       Label* fail);
3575   void nurseryAllocateString(Register result, Register temp,
3576                              gc::AllocKind allocKind, Label* fail);
3577   void allocateString(Register result, Register temp, gc::AllocKind allocKind,
3578                       gc::InitialHeap initialHeap, Label* fail);
3579   void nurseryAllocateBigInt(Register result, Register temp, Label* fail);
3580   void allocateNonObject(Register result, Register temp,
3581                          gc::AllocKind allocKind, Label* fail);
3582   void copySlotsFromTemplate(Register obj,
3583                              const NativeTemplateObject& templateObj,
3584                              uint32_t start, uint32_t end);
3585   void fillSlotsWithConstantValue(Address addr, Register temp, uint32_t start,
3586                                   uint32_t end, const Value& v);
3587   void fillSlotsWithUndefined(Address addr, Register temp, uint32_t start,
3588                               uint32_t end);
3589   void fillSlotsWithUninitialized(Address addr, Register temp, uint32_t start,
3590                                   uint32_t end);
3591 
3592   void initGCSlots(Register obj, Register temp,
3593                    const NativeTemplateObject& templateObj, bool initContents);
3594 
3595  public:
3596   void callFreeStub(Register slots);
3597   void createGCObject(Register result, Register temp,
3598                       const TemplateObject& templateObj,
3599                       gc::InitialHeap initialHeap, Label* fail,
3600                       bool initContents = true);
3601 
3602   void initGCThing(Register obj, Register temp,
3603                    const TemplateObject& templateObj, bool initContents = true);
3604 
3605   enum class TypedArrayLength { Fixed, Dynamic };
3606 
3607   void initTypedArraySlots(Register obj, Register temp, Register lengthReg,
3608                            LiveRegisterSet liveRegs, Label* fail,
3609                            TypedArrayObject* templateObj,
3610                            TypedArrayLength lengthKind);
3611 
3612   void newGCString(Register result, Register temp, Label* fail,
3613                    bool attemptNursery);
3614   void newGCFatInlineString(Register result, Register temp, Label* fail,
3615                             bool attemptNursery);
3616 
3617   void newGCBigInt(Register result, Register temp, Label* fail,
3618                    bool attemptNursery);
3619 
3620   // Compares two strings for equality based on the JSOP.
3621   // This checks for identical pointers, atoms and length and fails for
3622   // everything else.
3623   void compareStrings(JSOp op, Register left, Register right, Register result,
3624                       Label* fail);
3625 
3626   // Result of the typeof operation. Falls back to slow-path for proxies.
3627   void typeOfObject(Register objReg, Register scratch, Label* slow,
3628                     Label* isObject, Label* isCallable, Label* isUndefined);
3629 
3630   // Implementation of IsCallable. Doesn't handle proxies.
isCallable(Register obj,Register output,Label * isProxy)3631   void isCallable(Register obj, Register output, Label* isProxy) {
3632     isCallableOrConstructor(true, obj, output, isProxy);
3633   }
isConstructor(Register obj,Register output,Label * isProxy)3634   void isConstructor(Register obj, Register output, Label* isProxy) {
3635     isCallableOrConstructor(false, obj, output, isProxy);
3636   }
3637 
3638  private:
3639   void isCallableOrConstructor(bool isCallable, Register obj, Register output,
3640                                Label* isProxy);
3641 
3642  public:
3643   // Generates code used to complete a bailout.
3644   void generateBailoutTail(Register scratch, Register bailoutInfo);
3645 
3646   void assertRectifierFrameParentType(Register frameType);
3647 
3648  public:
3649 #ifndef JS_CODEGEN_ARM64
3650   // StackPointer manipulation functions.
3651   // On ARM64, the StackPointer is implemented as two synchronized registers.
3652   // Code shared across platforms must use these functions to be valid.
3653   template <typename T>
3654   inline void addToStackPtr(T t);
3655   template <typename T>
3656   inline void addStackPtrTo(T t);
3657 
3658   void subFromStackPtr(Imm32 imm32) DEFINED_ON(mips32, mips64, arm, x86, x64);
3659   void subFromStackPtr(Register reg);
3660 
3661   template <typename T>
subStackPtrFrom(T t)3662   void subStackPtrFrom(T t) {
3663     subPtr(getStackPointer(), t);
3664   }
3665 
3666   template <typename T>
andToStackPtr(T t)3667   void andToStackPtr(T t) {
3668     andPtr(t, getStackPointer());
3669   }
3670   template <typename T>
andStackPtrTo(T t)3671   void andStackPtrTo(T t) {
3672     andPtr(getStackPointer(), t);
3673   }
3674 
3675   template <typename T>
moveToStackPtr(T t)3676   void moveToStackPtr(T t) {
3677     movePtr(t, getStackPointer());
3678   }
3679   template <typename T>
moveStackPtrTo(T t)3680   void moveStackPtrTo(T t) {
3681     movePtr(getStackPointer(), t);
3682   }
3683 
3684   template <typename T>
loadStackPtr(T t)3685   void loadStackPtr(T t) {
3686     loadPtr(t, getStackPointer());
3687   }
3688   template <typename T>
storeStackPtr(T t)3689   void storeStackPtr(T t) {
3690     storePtr(getStackPointer(), t);
3691   }
3692 
3693   // StackPointer testing functions.
3694   // On ARM64, sp can function as the zero register depending on context.
3695   // Code shared across platforms must use these functions to be valid.
3696   template <typename T>
3697   inline void branchTestStackPtr(Condition cond, T t, Label* label);
3698   template <typename T>
3699   inline void branchStackPtr(Condition cond, T rhs, Label* label);
3700   template <typename T>
3701   inline void branchStackPtrRhs(Condition cond, T lhs, Label* label);
3702 
3703   // Move the stack pointer based on the requested amount.
3704   inline void reserveStack(uint32_t amount);
3705 #else  // !JS_CODEGEN_ARM64
3706   void reserveStack(uint32_t amount);
3707 #endif
3708 
3709  public:
enableProfilingInstrumentation()3710   void enableProfilingInstrumentation() {
3711     emitProfilingInstrumentation_ = true;
3712   }
3713 
3714  private:
3715   // This class is used to surround call sites throughout the assembler. This
3716   // is used by callWithABI, and callJit functions, except if suffixed by
3717   // NoProfiler.
3718   class AutoProfilerCallInstrumentation {
3719     MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER;
3720 
3721    public:
3722     explicit AutoProfilerCallInstrumentation(
3723         MacroAssembler& masm MOZ_GUARD_OBJECT_NOTIFIER_PARAM);
3724     ~AutoProfilerCallInstrumentation() = default;
3725   };
3726   friend class AutoProfilerCallInstrumentation;
3727 
appendProfilerCallSite(CodeOffset label)3728   void appendProfilerCallSite(CodeOffset label) {
3729     propagateOOM(profilerCallSites_.append(label));
3730   }
3731 
3732   // Fix up the code pointers to be written for locations where profilerCallSite
3733   // emitted moves of RIP to a register.
3734   void linkProfilerCallSites(JitCode* code);
3735 
3736   // This field is used to manage profiling instrumentation output. If
3737   // provided and enabled, then instrumentation will be emitted around call
3738   // sites.
3739   bool emitProfilingInstrumentation_;
3740 
3741   // Record locations of the call sites.
3742   Vector<CodeOffset, 0, SystemAllocPolicy> profilerCallSites_;
3743 
3744  public:
3745   void loadJitCodeRaw(Register func, Register dest);
3746   void loadJitCodeNoArgCheck(Register func, Register dest);
3747 
3748   void loadBaselineFramePtr(Register framePtr, Register dest);
3749 
pushBaselineFramePtr(Register framePtr,Register scratch)3750   void pushBaselineFramePtr(Register framePtr, Register scratch) {
3751     loadBaselineFramePtr(framePtr, scratch);
3752     push(scratch);
3753   }
3754 
PushBaselineFramePtr(Register framePtr,Register scratch)3755   void PushBaselineFramePtr(Register framePtr, Register scratch) {
3756     loadBaselineFramePtr(framePtr, scratch);
3757     Push(scratch);
3758   }
3759 
3760   using MacroAssemblerSpecific::movePtr;
3761 
movePtr(TrampolinePtr ptr,Register dest)3762   void movePtr(TrampolinePtr ptr, Register dest) {
3763     movePtr(ImmPtr(ptr.value), dest);
3764   }
3765 
3766  private:
3767   void handleFailure();
3768 
3769  public:
exceptionLabel()3770   Label* exceptionLabel() {
3771     // Exceptions are currently handled the same way as sequential failures.
3772     return &failureLabel_;
3773   }
3774 
failureLabel()3775   Label* failureLabel() { return &failureLabel_; }
3776 
3777   void finish();
3778   void link(JitCode* code);
3779 
3780   void assumeUnreachable(const char* output);
3781 
3782   template <typename T>
3783   void assertTestInt32(Condition cond, const T& value, const char* output);
3784 
3785   void printf(const char* output);
3786   void printf(const char* output, Register value);
3787 
3788 #ifdef JS_TRACE_LOGGING
loadTraceLogger(Register logger)3789   void loadTraceLogger(Register logger) {
3790     loadJSContext(logger);
3791     loadPtr(Address(logger, offsetof(JSContext, traceLogger)), logger);
3792   }
3793   void tracelogStartId(Register logger, uint32_t textId, bool force = false);
3794   void tracelogStartId(Register logger, Register textId);
3795   void tracelogStartEvent(Register logger, Register event);
3796   void tracelogStopId(Register logger, uint32_t textId, bool force = false);
3797   void tracelogStopId(Register logger, Register textId);
3798 #endif
3799 
3800 #define DISPATCH_FLOATING_POINT_OP(method, type, arg1d, arg1f, arg2) \
3801   MOZ_ASSERT(IsFloatingPointType(type));                             \
3802   if (type == MIRType::Double)                                       \
3803     method##Double(arg1d, arg2);                                     \
3804   else                                                               \
3805     method##Float32(arg1f, arg2);
3806 
loadConstantFloatingPoint(double d,float f,FloatRegister dest,MIRType destType)3807   void loadConstantFloatingPoint(double d, float f, FloatRegister dest,
3808                                  MIRType destType) {
3809     DISPATCH_FLOATING_POINT_OP(loadConstant, destType, d, f, dest);
3810   }
boolValueToFloatingPoint(ValueOperand value,FloatRegister dest,MIRType destType)3811   void boolValueToFloatingPoint(ValueOperand value, FloatRegister dest,
3812                                 MIRType destType) {
3813     DISPATCH_FLOATING_POINT_OP(boolValueTo, destType, value, value, dest);
3814   }
int32ValueToFloatingPoint(ValueOperand value,FloatRegister dest,MIRType destType)3815   void int32ValueToFloatingPoint(ValueOperand value, FloatRegister dest,
3816                                  MIRType destType) {
3817     DISPATCH_FLOATING_POINT_OP(int32ValueTo, destType, value, value, dest);
3818   }
convertInt32ToFloatingPoint(Register src,FloatRegister dest,MIRType destType)3819   void convertInt32ToFloatingPoint(Register src, FloatRegister dest,
3820                                    MIRType destType) {
3821     DISPATCH_FLOATING_POINT_OP(convertInt32To, destType, src, src, dest);
3822   }
3823 
3824 #undef DISPATCH_FLOATING_POINT_OP
3825 
3826   void convertValueToFloatingPoint(ValueOperand value, FloatRegister output,
3827                                    Label* fail, MIRType outputType);
3828 
3829   void outOfLineTruncateSlow(FloatRegister src, Register dest,
3830                              bool widenFloatToDouble, bool compilingWasm,
3831                              wasm::BytecodeOffset callOffset);
3832 
3833   void convertInt32ValueToDouble(const Address& address, Register scratch,
3834                                  Label* done);
3835   void convertInt32ValueToDouble(ValueOperand val);
3836 
convertValueToDouble(ValueOperand value,FloatRegister output,Label * fail)3837   void convertValueToDouble(ValueOperand value, FloatRegister output,
3838                             Label* fail) {
3839     convertValueToFloatingPoint(value, output, fail, MIRType::Double);
3840   }
3841 
convertValueToFloat(ValueOperand value,FloatRegister output,Label * fail)3842   void convertValueToFloat(ValueOperand value, FloatRegister output,
3843                            Label* fail) {
3844     convertValueToFloatingPoint(value, output, fail, MIRType::Float32);
3845   }
3846 
3847   //
3848   // Functions for converting values to int.
3849   //
3850   void convertDoubleToInt(FloatRegister src, Register output,
3851                           FloatRegister temp, Label* truncateFail, Label* fail,
3852                           IntConversionBehavior behavior);
3853 
3854   // Strings may be handled by providing labels to jump to when the behavior
3855   // is truncation or clamping. The subroutine, usually an OOL call, is
3856   // passed the unboxed string in |stringReg| and should convert it to a
3857   // double store into |temp|.
3858   void convertValueToInt(
3859       ValueOperand value, MDefinition* input, Label* handleStringEntry,
3860       Label* handleStringRejoin, Label* truncateDoubleSlow, Register stringReg,
3861       FloatRegister temp, Register output, Label* fail,
3862       IntConversionBehavior behavior,
3863       IntConversionInputKind conversion = IntConversionInputKind::Any);
3864 
3865   // This carries over the MToNumberInt32 operation on the ValueOperand
3866   // input; see comment at the top of this class.
3867   void convertValueToInt32(
3868       ValueOperand value, MDefinition* input, FloatRegister temp,
3869       Register output, Label* fail, bool negativeZeroCheck,
3870       IntConversionInputKind conversion = IntConversionInputKind::Any) {
3871     convertValueToInt(
3872         value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail,
3873         negativeZeroCheck ? IntConversionBehavior::NegativeZeroCheck
3874                           : IntConversionBehavior::Normal,
3875         conversion);
3876   }
3877 
3878   // This carries over the MTruncateToInt32 operation on the ValueOperand
3879   // input; see the comment at the top of this class.
truncateValueToInt32(ValueOperand value,MDefinition * input,Label * handleStringEntry,Label * handleStringRejoin,Label * truncateDoubleSlow,Register stringReg,FloatRegister temp,Register output,Label * fail)3880   void truncateValueToInt32(ValueOperand value, MDefinition* input,
3881                             Label* handleStringEntry, Label* handleStringRejoin,
3882                             Label* truncateDoubleSlow, Register stringReg,
3883                             FloatRegister temp, Register output, Label* fail) {
3884     convertValueToInt(value, input, handleStringEntry, handleStringRejoin,
3885                       truncateDoubleSlow, stringReg, temp, output, fail,
3886                       IntConversionBehavior::Truncate);
3887   }
3888 
truncateValueToInt32(ValueOperand value,FloatRegister temp,Register output,Label * fail)3889   void truncateValueToInt32(ValueOperand value, FloatRegister temp,
3890                             Register output, Label* fail) {
3891     truncateValueToInt32(value, nullptr, nullptr, nullptr, nullptr, InvalidReg,
3892                          temp, output, fail);
3893   }
3894 
3895   // Truncates, i.e. removes any fractional parts, but doesn't wrap around to
3896   // the int32 range.
truncateNoWrapValueToInt32(ValueOperand value,MDefinition * input,FloatRegister temp,Register output,Label * truncateDoubleSlow,Label * fail)3897   void truncateNoWrapValueToInt32(ValueOperand value, MDefinition* input,
3898                                   FloatRegister temp, Register output,
3899                                   Label* truncateDoubleSlow, Label* fail) {
3900     convertValueToInt(value, input, nullptr, nullptr, truncateDoubleSlow,
3901                       InvalidReg, temp, output, fail,
3902                       IntConversionBehavior::TruncateNoWrap);
3903   }
3904 
3905   // Convenience functions for clamping values to uint8.
clampValueToUint8(ValueOperand value,MDefinition * input,Label * handleStringEntry,Label * handleStringRejoin,Register stringReg,FloatRegister temp,Register output,Label * fail)3906   void clampValueToUint8(ValueOperand value, MDefinition* input,
3907                          Label* handleStringEntry, Label* handleStringRejoin,
3908                          Register stringReg, FloatRegister temp,
3909                          Register output, Label* fail) {
3910     convertValueToInt(value, input, handleStringEntry, handleStringRejoin,
3911                       nullptr, stringReg, temp, output, fail,
3912                       IntConversionBehavior::ClampToUint8);
3913   }
3914 
3915   MOZ_MUST_USE bool icBuildOOLFakeExitFrame(void* fakeReturnAddr,
3916                                             AutoSaveLiveRegisters& save);
3917 
3918   // Align the stack pointer based on the number of arguments which are pushed
3919   // on the stack, such that the JitFrameLayout would be correctly aligned on
3920   // the JitStackAlignment.
3921   void alignJitStackBasedOnNArgs(Register nargs, bool countIncludesThis);
3922   void alignJitStackBasedOnNArgs(uint32_t argc);
3923 
3924   inline void assertStackAlignment(uint32_t alignment, int32_t offset = 0);
3925 
3926   void performPendingReadBarriers();
3927 
3928   void touchFrameValues(Register numStackValues, Register scratch1,
3929                         Register scratch2);
3930 
3931  private:
3932   // Methods to get a singleton object or object group from a type set without
3933   // a read barrier, and record the result so that we can perform the barrier
3934   // later.
3935   JSObject* getSingletonAndDelayBarrier(const TypeSet* types, size_t i);
3936   ObjectGroup* getGroupAndDelayBarrier(const TypeSet* types, size_t i);
3937 
3938   Vector<JSObject*, 0, SystemAllocPolicy> pendingObjectReadBarriers_;
3939   Vector<ObjectGroup*, 0, SystemAllocPolicy> pendingObjectGroupReadBarriers_;
3940 };
3941 
3942 // StackMacroAssembler checks no GC will happen while it's on the stack.
3943 class MOZ_RAII StackMacroAssembler : public MacroAssembler {
3944   JS::AutoCheckCannotGC nogc;
3945 
3946  public:
StackMacroAssembler()3947   StackMacroAssembler() : MacroAssembler() {}
StackMacroAssembler(JSContext * cx)3948   explicit StackMacroAssembler(JSContext* cx) : MacroAssembler(cx) {}
3949 };
3950 
3951 // WasmMacroAssembler does not contain GC pointers, so it doesn't need the no-GC
3952 // checking StackMacroAssembler has.
3953 class MOZ_RAII WasmMacroAssembler : public MacroAssembler {
3954  public:
3955   explicit WasmMacroAssembler(TempAllocator& alloc, bool limitedSize = true)
MacroAssembler(WasmToken (),alloc)3956       : MacroAssembler(WasmToken(), alloc) {
3957     if (!limitedSize) {
3958       setUnlimitedBuffer();
3959     }
3960   }
~WasmMacroAssembler()3961   ~WasmMacroAssembler() { assertNoGCThings(); }
3962 };
3963 
3964 // Heap-allocated MacroAssembler used for Ion off-thread code generation.
3965 // GC cancels off-thread compilations.
3966 class IonHeapMacroAssembler : public MacroAssembler {
3967  public:
IonHeapMacroAssembler()3968   IonHeapMacroAssembler() : MacroAssembler() {
3969     MOZ_ASSERT(CurrentThreadIsIonCompiling());
3970   }
3971 };
3972 
3973 //{{{ check_macroassembler_style
framePushed()3974 inline uint32_t MacroAssembler::framePushed() const { return framePushed_; }
3975 
setFramePushed(uint32_t framePushed)3976 inline void MacroAssembler::setFramePushed(uint32_t framePushed) {
3977   framePushed_ = framePushed;
3978 }
3979 
adjustFrame(int32_t value)3980 inline void MacroAssembler::adjustFrame(int32_t value) {
3981   MOZ_ASSERT_IF(value < 0, framePushed_ >= uint32_t(-value));
3982   setFramePushed(framePushed_ + value);
3983 }
3984 
implicitPop(uint32_t bytes)3985 inline void MacroAssembler::implicitPop(uint32_t bytes) {
3986   MOZ_ASSERT(bytes % sizeof(intptr_t) == 0);
3987   MOZ_ASSERT(bytes <= INT32_MAX);
3988   adjustFrame(-int32_t(bytes));
3989 }
3990 //}}} check_macroassembler_style
3991 
JSOpToDoubleCondition(JSOp op)3992 static inline Assembler::DoubleCondition JSOpToDoubleCondition(JSOp op) {
3993   switch (op) {
3994     case JSOp::Eq:
3995     case JSOp::StrictEq:
3996       return Assembler::DoubleEqual;
3997     case JSOp::Ne:
3998     case JSOp::StrictNe:
3999       return Assembler::DoubleNotEqualOrUnordered;
4000     case JSOp::Lt:
4001       return Assembler::DoubleLessThan;
4002     case JSOp::Le:
4003       return Assembler::DoubleLessThanOrEqual;
4004     case JSOp::Gt:
4005       return Assembler::DoubleGreaterThan;
4006     case JSOp::Ge:
4007       return Assembler::DoubleGreaterThanOrEqual;
4008     default:
4009       MOZ_CRASH("Unexpected comparison operation");
4010   }
4011 }
4012 
4013 // Note: the op may have been inverted during lowering (to put constants in a
4014 // position where they can be immediates), so it is important to use the
4015 // lir->jsop() instead of the mir->jsop() when it is present.
JSOpToCondition(JSOp op,bool isSigned)4016 static inline Assembler::Condition JSOpToCondition(JSOp op, bool isSigned) {
4017   if (isSigned) {
4018     switch (op) {
4019       case JSOp::Eq:
4020       case JSOp::StrictEq:
4021         return Assembler::Equal;
4022       case JSOp::Ne:
4023       case JSOp::StrictNe:
4024         return Assembler::NotEqual;
4025       case JSOp::Lt:
4026         return Assembler::LessThan;
4027       case JSOp::Le:
4028         return Assembler::LessThanOrEqual;
4029       case JSOp::Gt:
4030         return Assembler::GreaterThan;
4031       case JSOp::Ge:
4032         return Assembler::GreaterThanOrEqual;
4033       default:
4034         MOZ_CRASH("Unrecognized comparison operation");
4035     }
4036   } else {
4037     switch (op) {
4038       case JSOp::Eq:
4039       case JSOp::StrictEq:
4040         return Assembler::Equal;
4041       case JSOp::Ne:
4042       case JSOp::StrictNe:
4043         return Assembler::NotEqual;
4044       case JSOp::Lt:
4045         return Assembler::Below;
4046       case JSOp::Le:
4047         return Assembler::BelowOrEqual;
4048       case JSOp::Gt:
4049         return Assembler::Above;
4050       case JSOp::Ge:
4051         return Assembler::AboveOrEqual;
4052       default:
4053         MOZ_CRASH("Unrecognized comparison operation");
4054     }
4055   }
4056 }
4057 
StackDecrementForCall(uint32_t alignment,size_t bytesAlreadyPushed,size_t bytesToPush)4058 static inline size_t StackDecrementForCall(uint32_t alignment,
4059                                            size_t bytesAlreadyPushed,
4060                                            size_t bytesToPush) {
4061   return bytesToPush +
4062          ComputeByteAlignment(bytesAlreadyPushed + bytesToPush, alignment);
4063 }
4064 
ToMIRType(MIRType t)4065 static inline MIRType ToMIRType(MIRType t) { return t; }
4066 
ToMIRType(ABIArgType argType)4067 static inline MIRType ToMIRType(ABIArgType argType) {
4068   switch (argType) {
4069     case ArgType_General:
4070       return MIRType::Pointer;
4071     case ArgType_Float64:
4072       return MIRType::Double;
4073     case ArgType_Float32:
4074       return MIRType::Float32;
4075     case ArgType_Int32:
4076       return MIRType::Int32;
4077     case ArgType_Int64:
4078       return MIRType::Int64;
4079     default:
4080       break;
4081   }
4082   MOZ_CRASH("unexpected argType");
4083 }
4084 
4085 template <class VecT>
4086 class ABIArgIter {
4087   ABIArgGenerator gen_;
4088   const VecT& types_;
4089   unsigned i_;
4090 
settle()4091   void settle() {
4092     if (!done()) gen_.next(ToMIRType(types_[i_]));
4093   }
4094 
4095  public:
ABIArgIter(const VecT & types)4096   explicit ABIArgIter(const VecT& types) : types_(types), i_(0) { settle(); }
4097   void operator++(int) {
4098     MOZ_ASSERT(!done());
4099     i_++;
4100     settle();
4101   }
done()4102   bool done() const { return i_ == types_.length(); }
4103 
4104   ABIArg* operator->() {
4105     MOZ_ASSERT(!done());
4106     return &gen_.current();
4107   }
4108   ABIArg& operator*() {
4109     MOZ_ASSERT(!done());
4110     return gen_.current();
4111   }
4112 
index()4113   unsigned index() const {
4114     MOZ_ASSERT(!done());
4115     return i_;
4116   }
mirType()4117   MIRType mirType() const {
4118     MOZ_ASSERT(!done());
4119     return ToMIRType(types_[i_]);
4120   }
stackBytesConsumedSoFar()4121   uint32_t stackBytesConsumedSoFar() const {
4122     return gen_.stackBytesConsumedSoFar();
4123   }
4124 };
4125 
4126 }  // namespace jit
4127 }  // namespace js
4128 
4129 #endif /* jit_MacroAssembler_h */
4130