1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #ifndef jit_MacroAssembler_h
8 #define jit_MacroAssembler_h
9 
10 #include "mozilla/MacroForEach.h"
11 #include "mozilla/MathAlgorithms.h"
12 
13 #include "jscompartment.h"
14 
15 #if defined(JS_CODEGEN_X86)
16 # include "jit/x86/MacroAssembler-x86.h"
17 #elif defined(JS_CODEGEN_X64)
18 # include "jit/x64/MacroAssembler-x64.h"
19 #elif defined(JS_CODEGEN_ARM)
20 # include "jit/arm/MacroAssembler-arm.h"
21 #elif defined(JS_CODEGEN_ARM64)
22 # include "jit/arm64/MacroAssembler-arm64.h"
23 #elif defined(JS_CODEGEN_MIPS32)
24 # include "jit/mips32/MacroAssembler-mips32.h"
25 #elif defined(JS_CODEGEN_MIPS64)
26 # include "jit/mips64/MacroAssembler-mips64.h"
27 #elif defined(JS_CODEGEN_NONE)
28 # include "jit/none/MacroAssembler-none.h"
29 #else
30 # error "Unknown architecture!"
31 #endif
32 #include "jit/AtomicOp.h"
33 #include "jit/IonInstrumentation.h"
34 #include "jit/JitCompartment.h"
35 #include "jit/VMFunctions.h"
36 #include "vm/ProxyObject.h"
37 #include "vm/Shape.h"
38 #include "vm/UnboxedObject.h"
39 
40 // * How to read/write MacroAssembler method declarations:
41 //
42 // The following macros are made to avoid #ifdef around each method declarations
43 // of the Macro Assembler, and they are also used as an hint on the location of
44 // the implementations of each method.  For example, the following declaration
45 //
46 //   void Pop(FloatRegister t) DEFINED_ON(x86_shared, arm);
47 //
48 // suggests the MacroAssembler::Pop(FloatRegister) method is implemented in
49 // x86-shared/MacroAssembler-x86-shared.h, and also in arm/MacroAssembler-arm.h.
50 //
51 // - If there is no annotation, then there is only one generic definition in
52 //   MacroAssembler.cpp.
53 //
54 // - If the declaration is "inline", then the method definition(s) would be in
55 //   the "-inl.h" variant of the same file(s).
56 //
57 // The script check_macroassembler_style.py (check-masm target of the Makefile)
58 // is used to verify that method definitions are matching the annotation added
59 // to the method declarations.  If there is any difference, then you either
60 // forgot to define the method in one of the macro assembler, or you forgot to
61 // update the annotation of the macro assembler declaration.
62 //
63 // Some convenient short-cuts are used to avoid repeating the same list of
64 // architectures on each method declaration, such as PER_ARCH and
65 // PER_SHARED_ARCH.
66 
67 # define ALL_ARCH mips32, mips64, arm, arm64, x86, x64
68 # define ALL_SHARED_ARCH arm, arm64, x86_shared, mips_shared
69 
70 // * How this macro works:
71 //
72 // DEFINED_ON is a macro which check if, for the current architecture, the
73 // method is defined on the macro assembler or not.
74 //
75 // For each architecutre, we have a macro named DEFINED_ON_arch.  This macro is
76 // empty if this is not the current architecture.  Otherwise it must be either
77 // set to "define" or "crash" (only use for the none target so-far).
78 //
79 // The DEFINED_ON macro maps the list of architecture names given as argument to
80 // a list of macro names.  For example,
81 //
82 //   DEFINED_ON(arm, x86_shared)
83 //
84 // is expanded to
85 //
86 //   DEFINED_ON_none DEFINED_ON_arm DEFINED_ON_x86_shared
87 //
88 // which are later expanded on ARM, x86, x64 by DEFINED_ON_EXPAND_ARCH_RESULTS
89 // to
90 //
91 //   define
92 //
93 // or if the JIT is disabled or set to no architecture to
94 //
95 //   crash
96 //
97 // or to nothing, if the current architecture is not lsited in the list of
98 // arguments of DEFINED_ON.  Note, only one of the DEFINED_ON_arch macro
99 // contributes to the non-empty result, which is the macro of the current
100 // architecture if it is listed in the arguments of DEFINED_ON.
101 //
102 // This result is appended to DEFINED_ON_RESULT_ before expanding the macro,
103 // which result is either no annotation, a MOZ_CRASH(), or a "= delete"
104 // annotation on the method declaration.
105 
106 # define DEFINED_ON_x86
107 # define DEFINED_ON_x64
108 # define DEFINED_ON_x86_shared
109 # define DEFINED_ON_arm
110 # define DEFINED_ON_arm64
111 # define DEFINED_ON_mips32
112 # define DEFINED_ON_mips64
113 # define DEFINED_ON_mips_shared
114 # define DEFINED_ON_none
115 
116 // Specialize for each architecture.
117 #if defined(JS_CODEGEN_X86)
118 # undef DEFINED_ON_x86
119 # define DEFINED_ON_x86 define
120 # undef DEFINED_ON_x86_shared
121 # define DEFINED_ON_x86_shared define
122 #elif defined(JS_CODEGEN_X64)
123 # undef DEFINED_ON_x64
124 # define DEFINED_ON_x64 define
125 # undef DEFINED_ON_x86_shared
126 # define DEFINED_ON_x86_shared define
127 #elif defined(JS_CODEGEN_ARM)
128 # undef DEFINED_ON_arm
129 # define DEFINED_ON_arm define
130 #elif defined(JS_CODEGEN_ARM64)
131 # undef DEFINED_ON_arm64
132 # define DEFINED_ON_arm64 define
133 #elif defined(JS_CODEGEN_MIPS32)
134 # undef DEFINED_ON_mips32
135 # define DEFINED_ON_mips32 define
136 # undef DEFINED_ON_mips_shared
137 # define DEFINED_ON_mips_shared define
138 #elif defined(JS_CODEGEN_MIPS64)
139 # undef DEFINED_ON_mips64
140 # define DEFINED_ON_mips64 define
141 # undef DEFINED_ON_mips_shared
142 # define DEFINED_ON_mips_shared define
143 #elif defined(JS_CODEGEN_NONE)
144 # undef DEFINED_ON_none
145 # define DEFINED_ON_none crash
146 #else
147 # error "Unknown architecture!"
148 #endif
149 
150 # define DEFINED_ON_RESULT_crash   { MOZ_CRASH(); }
151 # define DEFINED_ON_RESULT_define
152 # define DEFINED_ON_RESULT_        = delete
153 
154 # define DEFINED_ON_DISPATCH_RESULT_2(Macro, Result) \
155     Macro ## Result
156 # define DEFINED_ON_DISPATCH_RESULT(...)     \
157     DEFINED_ON_DISPATCH_RESULT_2(DEFINED_ON_RESULT_, __VA_ARGS__)
158 
159 // We need to let the evaluation of MOZ_FOR_EACH terminates.
160 # define DEFINED_ON_EXPAND_ARCH_RESULTS_3(ParenResult)  \
161     DEFINED_ON_DISPATCH_RESULT ParenResult
162 # define DEFINED_ON_EXPAND_ARCH_RESULTS_2(ParenResult)  \
163     DEFINED_ON_EXPAND_ARCH_RESULTS_3 (ParenResult)
164 # define DEFINED_ON_EXPAND_ARCH_RESULTS(ParenResult)    \
165     DEFINED_ON_EXPAND_ARCH_RESULTS_2 (ParenResult)
166 
167 # define DEFINED_ON_FWDARCH(Arch) DEFINED_ON_ ## Arch
168 # define DEFINED_ON_MAP_ON_ARCHS(ArchList)              \
169     DEFINED_ON_EXPAND_ARCH_RESULTS(                     \
170       (MOZ_FOR_EACH(DEFINED_ON_FWDARCH, (), ArchList)))
171 
172 # define DEFINED_ON(...)                                \
173     DEFINED_ON_MAP_ON_ARCHS((none, __VA_ARGS__))
174 
175 # define PER_ARCH DEFINED_ON(ALL_ARCH)
176 # define PER_SHARED_ARCH DEFINED_ON(ALL_SHARED_ARCH)
177 
178 
179 #ifdef IS_LITTLE_ENDIAN
180 #define IMM32_16ADJ(X) X << 16
181 #else
182 #define IMM32_16ADJ(X) X
183 #endif
184 
185 namespace js {
186 namespace jit {
187 
188 // Defined in JitFrames.h
189 enum ExitFrameTokenValues;
190 
191 // The public entrypoint for emitting assembly. Note that a MacroAssembler can
192 // use cx->lifoAlloc, so take care not to interleave masm use with other
193 // lifoAlloc use if one will be destroyed before the other.
194 class MacroAssembler : public MacroAssemblerSpecific
195 {
thisFromCtor()196     MacroAssembler* thisFromCtor() {
197         return this;
198     }
199 
200   public:
201     class AutoRooter : public JS::AutoGCRooter
202     {
203         MacroAssembler* masm_;
204 
205       public:
AutoRooter(JSContext * cx,MacroAssembler * masm)206         AutoRooter(JSContext* cx, MacroAssembler* masm)
207           : JS::AutoGCRooter(cx, IONMASM),
208             masm_(masm)
209         { }
210 
masm()211         MacroAssembler* masm() const {
212             return masm_;
213         }
214     };
215 
216     /*
217      * Base class for creating a branch.
218      */
219     class Branch
220     {
221         bool init_;
222         Condition cond_;
223         Label* jump_;
224         Register reg_;
225 
226       public:
Branch()227         Branch()
228           : init_(false),
229             cond_(Equal),
230             jump_(nullptr),
231             reg_(Register::FromCode(0))      // Quell compiler warnings.
232         { }
233 
Branch(Condition cond,Register reg,Label * jump)234         Branch(Condition cond, Register reg, Label* jump)
235           : init_(true),
236             cond_(cond),
237             jump_(jump),
238             reg_(reg)
239         { }
240 
isInitialized()241         bool isInitialized() const {
242             return init_;
243         }
244 
cond()245         Condition cond() const {
246             return cond_;
247         }
248 
jump()249         Label* jump() const {
250             return jump_;
251         }
252 
reg()253         Register reg() const {
254             return reg_;
255         }
256 
invertCondition()257         void invertCondition() {
258             cond_ = InvertCondition(cond_);
259         }
260 
relink(Label * jump)261         void relink(Label* jump) {
262             jump_ = jump;
263         }
264 
265         virtual void emit(MacroAssembler& masm) = 0;
266     };
267 
268     /*
269      * Creates a branch based on a specific TypeSet::Type.
270      * Note: emits number test (int/double) for TypeSet::DoubleType()
271      */
272     class BranchType : public Branch
273     {
274         TypeSet::Type type_;
275 
276       public:
BranchType()277         BranchType()
278           : Branch(),
279             type_(TypeSet::UnknownType())
280         { }
281 
BranchType(Condition cond,Register reg,TypeSet::Type type,Label * jump)282         BranchType(Condition cond, Register reg, TypeSet::Type type, Label* jump)
283           : Branch(cond, reg, jump),
284             type_(type)
285         { }
286 
emit(MacroAssembler & masm)287         void emit(MacroAssembler& masm) {
288             MOZ_ASSERT(isInitialized());
289             MIRType mirType = MIRType_None;
290 
291             if (type_.isPrimitive()) {
292                 if (type_.isMagicArguments())
293                     mirType = MIRType_MagicOptimizedArguments;
294                 else
295                     mirType = MIRTypeFromValueType(type_.primitive());
296             } else if (type_.isAnyObject()) {
297                 mirType = MIRType_Object;
298             } else {
299                 MOZ_CRASH("Unknown conversion to mirtype");
300             }
301 
302             if (mirType == MIRType_Double)
303                 masm.branchTestNumber(cond(), reg(), jump());
304             else
305                 masm.branchTestMIRType(cond(), reg(), mirType, jump());
306         }
307 
308     };
309 
310     /*
311      * Creates a branch based on a GCPtr.
312      */
313     class BranchGCPtr : public Branch
314     {
315         ImmGCPtr ptr_;
316 
317       public:
BranchGCPtr()318         BranchGCPtr()
319           : Branch(),
320             ptr_(ImmGCPtr(nullptr))
321         { }
322 
BranchGCPtr(Condition cond,Register reg,ImmGCPtr ptr,Label * jump)323         BranchGCPtr(Condition cond, Register reg, ImmGCPtr ptr, Label* jump)
324           : Branch(cond, reg, jump),
325             ptr_(ptr)
326         { }
327 
emit(MacroAssembler & masm)328         void emit(MacroAssembler& masm) {
329             MOZ_ASSERT(isInitialized());
330             masm.branchPtr(cond(), reg(), ptr_, jump());
331         }
332     };
333 
334     mozilla::Maybe<AutoRooter> autoRooter_;
335     mozilla::Maybe<JitContext> jitContext_;
336     mozilla::Maybe<AutoJitContextAlloc> alloc_;
337 
338   private:
339     // Labels for handling exceptions and failures.
340     NonAssertingLabel failureLabel_;
341 
342     // Asm failure labels
343     NonAssertingLabel asmStackOverflowLabel_;
344     NonAssertingLabel asmSyncInterruptLabel_;
345     NonAssertingLabel asmOnConversionErrorLabel_;
346     NonAssertingLabel asmOnOutOfBoundsLabel_;
347 
348   public:
MacroAssembler()349     MacroAssembler()
350       : framePushed_(0),
351 #ifdef DEBUG
352         inCall_(false),
353 #endif
354         emitProfilingInstrumentation_(false)
355     {
356         JitContext* jcx = GetJitContext();
357         JSContext* cx = jcx->cx;
358         if (cx)
359             constructRoot(cx);
360 
361         if (!jcx->temp) {
362             MOZ_ASSERT(cx);
363             alloc_.emplace(cx);
364         }
365 
366         moveResolver_.setAllocator(*jcx->temp);
367 
368 #if defined(JS_CODEGEN_ARM)
369         initWithAllocator();
370         m_buffer.id = jcx->getNextAssemblerId();
371 #elif defined(JS_CODEGEN_ARM64)
372         initWithAllocator();
373         armbuffer_.id = jcx->getNextAssemblerId();
374 #endif
375     }
376 
377     // This constructor should only be used when there is no JitContext active
378     // (for example, Trampoline-$(ARCH).cpp and IonCaches.cpp).
379     explicit MacroAssembler(JSContext* cx, IonScript* ion = nullptr,
380                             JSScript* script = nullptr, jsbytecode* pc = nullptr);
381 
382     // asm.js compilation handles its own JitContext-pushing
383     struct AsmJSToken {};
MacroAssembler(AsmJSToken,TempAllocator * alloc)384     explicit MacroAssembler(AsmJSToken, TempAllocator *alloc)
385       : framePushed_(0),
386 #ifdef DEBUG
387         inCall_(false),
388 #endif
389         emitProfilingInstrumentation_(false)
390     {
391         if (alloc)
392             moveResolver_.setAllocator(*alloc);
393 
394 #if defined(JS_CODEGEN_ARM)
395         initWithAllocator();
396         m_buffer.id = 0;
397 #elif defined(JS_CODEGEN_ARM64)
398         initWithAllocator();
399         armbuffer_.id = 0;
400 #endif
401     }
402 
constructRoot(JSContext * cx)403     void constructRoot(JSContext* cx) {
404         autoRooter_.emplace(cx, this);
405     }
406 
moveResolver()407     MoveResolver& moveResolver() {
408         return moveResolver_;
409     }
410 
instructionsSize()411     size_t instructionsSize() const {
412         return size();
413     }
414 
415     //{{{ check_macroassembler_style
416   public:
417     // ===============================================================
418     // Frame manipulation functions.
419 
420     inline uint32_t framePushed() const;
421     inline void setFramePushed(uint32_t framePushed);
422     inline void adjustFrame(int32_t value);
423 
424     // Adjust the frame, to account for implicit modification of the stack
425     // pointer, such that callee can remove arguments on the behalf of the
426     // caller.
427     inline void implicitPop(uint32_t bytes);
428 
429   private:
430     // This field is used to statically (at compilation time) emulate a frame
431     // pointer by keeping track of stack manipulations.
432     //
433     // It is maintained by all stack manipulation functions below.
434     uint32_t framePushed_;
435 
436   public:
437     // ===============================================================
438     // Stack manipulation functions.
439 
440     void PushRegsInMask(LiveRegisterSet set)
441                             DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
442     void PushRegsInMask(LiveGeneralRegisterSet set);
443 
444     void PopRegsInMask(LiveRegisterSet set);
445     void PopRegsInMask(LiveGeneralRegisterSet set);
446     void PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
447                                  DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
448 
449     void Push(const Operand op) DEFINED_ON(x86_shared);
450     void Push(Register reg) PER_SHARED_ARCH;
451     void Push(Register reg1, Register reg2, Register reg3, Register reg4) DEFINED_ON(arm64);
452     void Push(const Imm32 imm) PER_SHARED_ARCH;
453     void Push(const ImmWord imm) PER_SHARED_ARCH;
454     void Push(const ImmPtr imm) PER_SHARED_ARCH;
455     void Push(const ImmGCPtr ptr) PER_SHARED_ARCH;
456     void Push(FloatRegister reg) PER_SHARED_ARCH;
457     void Push(jsid id, Register scratchReg);
458     void Push(TypedOrValueRegister v);
459     void Push(ConstantOrRegister v);
460     void Push(const ValueOperand& val);
461     void Push(const Value& val);
462     void Push(JSValueType type, Register reg);
463     void PushValue(const Address& addr);
464     void PushEmptyRooted(VMFunction::RootType rootType);
465     inline CodeOffset PushWithPatch(ImmWord word);
466     inline CodeOffset PushWithPatch(ImmPtr imm);
467 
468     void Pop(const Operand op) DEFINED_ON(x86_shared);
469     void Pop(Register reg) PER_SHARED_ARCH;
470     void Pop(FloatRegister t) DEFINED_ON(x86_shared);
471     void Pop(const ValueOperand& val) PER_SHARED_ARCH;
472     void popRooted(VMFunction::RootType rootType, Register cellReg, const ValueOperand& valueReg);
473 
474     // Move the stack pointer based on the requested amount.
475     void adjustStack(int amount);
476     void reserveStack(uint32_t amount) PER_ARCH;
477     void freeStack(uint32_t amount);
478 
479     // Warning: This method does not update the framePushed() counter.
480     void freeStack(Register amount);
481 
482   private:
483     // ===============================================================
484     // Register allocation fields.
485 #ifdef DEBUG
486     friend AutoRegisterScope;
487     friend AutoFloatRegisterScope;
488     // Used to track register scopes for debug builds.
489     // Manipulated by the AutoGenericRegisterScope class.
490     AllocatableRegisterSet debugTrackedRegisters_;
491 #endif // DEBUG
492 
493   public:
494     // ===============================================================
495     // Simple call functions.
496 
497     CodeOffset call(Register reg) PER_SHARED_ARCH;
498     CodeOffset call(Label* label) PER_SHARED_ARCH;
499     void call(const Address& addr) DEFINED_ON(x86_shared);
500     void call(ImmWord imm) PER_SHARED_ARCH;
501     // Call a target native function, which is neither traceable nor movable.
502     void call(ImmPtr imm) PER_SHARED_ARCH;
503     void call(wasm::SymbolicAddress imm) PER_SHARED_ARCH;
504     // Call a target JitCode, which must be traceable, and may be movable.
505     void call(JitCode* c) PER_SHARED_ARCH;
506 
507     inline void call(const wasm::CallSiteDesc& desc, const Register reg);
508     inline void call(const wasm::CallSiteDesc& desc, Label* label);
509     inline void call(const wasm::CallSiteDesc& desc, AsmJSInternalCallee callee);
510 
511     CodeOffset callWithPatch() PER_SHARED_ARCH;
512     void patchCall(uint32_t callerOffset, uint32_t calleeOffset) PER_SHARED_ARCH;
513 
514     // Push the return address and make a call. On platforms where this function
515     // is not defined, push the link register (pushReturnAddress) at the entry
516     // point of the callee.
517     void callAndPushReturnAddress(Register reg) DEFINED_ON(mips_shared, x86_shared);
518     void callAndPushReturnAddress(Label* label) DEFINED_ON(mips_shared, x86_shared);
519 
520     void pushReturnAddress() DEFINED_ON(arm, arm64);
521 
522   public:
523     // ===============================================================
524     // ABI function calls.
525 
526     // Setup a call to C/C++ code, given the assumption that the framePushed
527     // accruately define the state of the stack, and that the top of the stack
528     // was properly aligned. Note that this only supports cdecl.
529     void setupAlignedABICall(); // CRASH_ON(arm64)
530 
531     // Setup an ABI call for when the alignment is not known. This may need a
532     // scratch register.
533     void setupUnalignedABICall(Register scratch) PER_ARCH;
534 
535     // Arguments must be assigned to a C/C++ call in order. They are moved
536     // in parallel immediately before performing the call. This process may
537     // temporarily use more stack, in which case esp-relative addresses will be
538     // automatically adjusted. It is extremely important that esp-relative
539     // addresses are computed *after* setupABICall(). Furthermore, no
540     // operations should be emitted while setting arguments.
541     void passABIArg(const MoveOperand& from, MoveOp::Type type);
542     inline void passABIArg(Register reg);
543     inline void passABIArg(FloatRegister reg, MoveOp::Type type);
544 
545     template <typename T>
546     inline void callWithABI(const T& fun, MoveOp::Type result = MoveOp::GENERAL);
547 
548   private:
549     // Reinitialize the variables which have to be cleared before making a call
550     // with callWithABI.
551     void setupABICall();
552 
553     // Reserve the stack and resolve the arguments move.
554     void callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS = false) PER_ARCH;
555 
556     // Emits a call to a C/C++ function, resolving all argument moves.
557     void callWithABINoProfiler(void* fun, MoveOp::Type result);
558     void callWithABINoProfiler(wasm::SymbolicAddress imm, MoveOp::Type result);
559     void callWithABINoProfiler(Register fun, MoveOp::Type result) PER_ARCH;
560     void callWithABINoProfiler(const Address& fun, MoveOp::Type result) PER_ARCH;
561 
562     // Restore the stack to its state before the setup function call.
563     void callWithABIPost(uint32_t stackAdjust, MoveOp::Type result) PER_ARCH;
564 
565     // Create the signature to be able to decode the arguments of a native
566     // function, when calling a function within the simulator.
567     inline void appendSignatureType(MoveOp::Type type);
568     inline ABIFunctionType signature() const;
569 
570     // Private variables used to handle moves between registers given as
571     // arguments to passABIArg and the list of ABI registers expected for the
572     // signature of the function.
573     MoveResolver moveResolver_;
574 
575     // Architecture specific implementation which specify how registers & stack
576     // offsets are used for calling a function.
577     ABIArgGenerator abiArgs_;
578 
579 #ifdef DEBUG
580     // Flag use to assert that we use ABI function in the right context.
581     bool inCall_;
582 #endif
583 
584     // If set by setupUnalignedABICall then callWithABI will pop the stack
585     // register which is on the stack.
586     bool dynamicAlignment_;
587 
588 #ifdef JS_SIMULATOR
589     // The signature is used to accumulate all types of arguments which are used
590     // by the caller. This is used by the simulators to decode the arguments
591     // properly, and cast the function pointer to the right type.
592     uint32_t signature_;
593 #endif
594 
595   public:
596     // ===============================================================
597     // Jit Frames.
598     //
599     // These functions are used to build the content of the Jit frames.  See
600     // CommonFrameLayout class, and all its derivatives. The content should be
601     // pushed in the opposite order as the fields of the structures, such that
602     // the structures can be used to interpret the content of the stack.
603 
604     // Call the Jit function, and push the return address (or let the callee
605     // push the return address).
606     //
607     // These functions return the offset of the return address, in order to use
608     // the return address to index the safepoints, which are used to list all
609     // live registers.
610     inline uint32_t callJitNoProfiler(Register callee);
611     inline uint32_t callJit(Register callee);
612     inline uint32_t callJit(JitCode* code);
613 
614     // The frame descriptor is the second field of all Jit frames, pushed before
615     // calling the Jit function.  It is a composite value defined in JitFrames.h
616     inline void makeFrameDescriptor(Register frameSizeReg, FrameType type);
617 
618     // Push the frame descriptor, based on the statically known framePushed.
619     inline void pushStaticFrameDescriptor(FrameType type);
620 
621     // Push the callee token of a JSFunction which pointer is stored in the
622     // |callee| register. The callee token is packed with a |constructing| flag
623     // which correspond to the fact that the JS function is called with "new" or
624     // not.
625     inline void PushCalleeToken(Register callee, bool constructing);
626 
627     // Unpack a callee token located at the |token| address, and return the
628     // JSFunction pointer in the |dest| register.
629     inline void loadFunctionFromCalleeToken(Address token, Register dest);
630 
631     // This function emulates a call by pushing an exit frame on the stack,
632     // except that the fake-function is inlined within the body of the caller.
633     //
634     // This function assumes that the current frame is an IonJS frame.
635     //
636     // This function returns the offset of the /fake/ return address, in order to use
637     // the return address to index the safepoints, which are used to list all
638     // live registers.
639     //
640     // This function should be balanced with a call to adjustStack, to pop the
641     // exit frame and emulate the return statement of the inlined function.
642     inline uint32_t buildFakeExitFrame(Register scratch);
643 
644   private:
645     // This function is used by buildFakeExitFrame to push a fake return address
646     // on the stack. This fake return address should never be used for resuming
647     // any execution, and can even be an invalid pointer into the instruction
648     // stream, as long as it does not alias any other.
649     uint32_t pushFakeReturnAddress(Register scratch) PER_SHARED_ARCH;
650 
651   public:
652     // ===============================================================
653     // Exit frame footer.
654     //
655     // When calling outside the Jit we push an exit frame. To mark the stack
656     // correctly, we have to push additional information, called the Exit frame
657     // footer, which is used to identify how the stack is marked.
658     //
659     // See JitFrames.h, and MarkJitExitFrame in JitFrames.cpp.
660 
661     // If the current piece of code might be garbage collected, then the exit
662     // frame footer must contain a pointer to the current JitCode, such that the
663     // garbage collector can keep the code alive as long this code is on the
664     // stack. This function pushes a placeholder which is replaced when the code
665     // is linked.
666     inline void PushStubCode();
667 
668     // Return true if the code contains a self-reference which needs to be
669     // patched when the code is linked.
670     inline bool hasSelfReference() const;
671 
672     // Push stub code and the VMFunction pointer.
673     inline void enterExitFrame(const VMFunction* f = nullptr);
674 
675     // Push an exit frame token to identify which fake exit frame this footer
676     // corresponds to.
677     inline void enterFakeExitFrame(enum ExitFrameTokenValues token);
678 
679     // Push an exit frame token for a native call.
680     inline void enterFakeExitFrameForNative(bool isConstructing);
681 
682     // Pop ExitFrame footer in addition to the extra frame.
683     inline void leaveExitFrame(size_t extraFrame = 0);
684 
685   private:
686     // Save the top of the stack into PerThreadData::jitTop of the main thread,
687     // which should be the location of the latest exit frame.
688     void linkExitFrame();
689 
690     // Patch the value of PushStubCode with the pointer to the finalized code.
691     void linkSelfReference(JitCode* code);
692 
693     // If the JitCode that created this assembler needs to transition into the VM,
694     // we want to store the JitCode on the stack in order to mark it during a GC.
695     // This is a reference to a patch location where the JitCode* will be written.
696     CodeOffset selfReferencePatch_;
697 
698   public:
699     // ===============================================================
700     // Logical instructions
701 
702     inline void not32(Register reg) PER_SHARED_ARCH;
703 
704     inline void and32(Register src, Register dest) PER_SHARED_ARCH;
705     inline void and32(Imm32 imm, Register dest) PER_SHARED_ARCH;
706     inline void and32(Imm32 imm, Register src, Register dest) DEFINED_ON(arm64);
707     inline void and32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
708     inline void and32(const Address& src, Register dest) PER_SHARED_ARCH;
709 
710     inline void andPtr(Register src, Register dest) PER_ARCH;
711     inline void andPtr(Imm32 imm, Register dest) PER_ARCH;
712 
713     inline void and64(Imm64 imm, Register64 dest) PER_ARCH;
714 
715     inline void or32(Register src, Register dest) PER_SHARED_ARCH;
716     inline void or32(Imm32 imm, Register dest) PER_SHARED_ARCH;
717     inline void or32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
718 
719     inline void orPtr(Register src, Register dest) PER_ARCH;
720     inline void orPtr(Imm32 imm, Register dest) PER_ARCH;
721 
722     inline void or64(Register64 src, Register64 dest) PER_ARCH;
723     inline void xor64(Register64 src, Register64 dest) PER_ARCH;
724 
725     inline void xor32(Register src, Register dest) DEFINED_ON(x86_shared);
726     inline void xor32(Imm32 imm, Register dest) PER_SHARED_ARCH;
727 
728     inline void xorPtr(Register src, Register dest) PER_ARCH;
729     inline void xorPtr(Imm32 imm, Register dest) PER_ARCH;
730 
731     // ===============================================================
732     // Arithmetic functions
733 
734     inline void sub32(const Address& src, Register dest) PER_SHARED_ARCH;
735     inline void sub32(Register src, Register dest) PER_SHARED_ARCH;
736     inline void sub32(Imm32 imm, Register dest) PER_SHARED_ARCH;
737 
738     inline void add64(Register64 src, Register64 dest) PER_ARCH;
739 
740     // ===============================================================
741     // Shift functions
742 
743     inline void lshiftPtr(Imm32 imm, Register dest) PER_ARCH;
744 
745     inline void lshift64(Imm32 imm, Register64 dest) PER_ARCH;
746 
747     inline void rshiftPtr(Imm32 imm, Register dest) PER_ARCH;
748     inline void rshiftPtr(Imm32 imm, Register src, Register dest) DEFINED_ON(arm64);
749 
750     inline void rshiftPtrArithmetic(Imm32 imm, Register dest) PER_ARCH;
751 
752     inline void rshift64(Imm32 imm, Register64 dest) PER_ARCH;
753 
754     //}}} check_macroassembler_style
755   public:
756 
757     // Emits a test of a value against all types in a TypeSet. A scratch
758     // register is required.
759     template <typename Source>
760     void guardTypeSet(const Source& address, const TypeSet* types, BarrierKind kind, Register scratch, Label* miss);
761 
762     void guardObjectType(Register obj, const TypeSet* types, Register scratch, Label* miss);
763 
764     template <typename TypeSet>
765     void guardTypeSetMightBeIncomplete(TypeSet* types, Register obj, Register scratch, Label* label);
766 
loadObjShape(Register objReg,Register dest)767     void loadObjShape(Register objReg, Register dest) {
768         loadPtr(Address(objReg, JSObject::offsetOfShape()), dest);
769     }
loadObjGroup(Register objReg,Register dest)770     void loadObjGroup(Register objReg, Register dest) {
771         loadPtr(Address(objReg, JSObject::offsetOfGroup()), dest);
772     }
loadBaseShape(Register objReg,Register dest)773     void loadBaseShape(Register objReg, Register dest) {
774         loadObjShape(objReg, dest);
775         loadPtr(Address(dest, Shape::offsetOfBase()), dest);
776     }
loadObjClass(Register objReg,Register dest)777     void loadObjClass(Register objReg, Register dest) {
778         loadObjGroup(objReg, dest);
779         loadPtr(Address(dest, ObjectGroup::offsetOfClasp()), dest);
780     }
branchTestObjClass(Condition cond,Register obj,Register scratch,const js::Class * clasp,Label * label)781     void branchTestObjClass(Condition cond, Register obj, Register scratch, const js::Class* clasp,
782                             Label* label) {
783         loadObjGroup(obj, scratch);
784         branchPtr(cond, Address(scratch, ObjectGroup::offsetOfClasp()), ImmPtr(clasp), label);
785     }
branchTestObjShape(Condition cond,Register obj,const Shape * shape,Label * label)786     void branchTestObjShape(Condition cond, Register obj, const Shape* shape, Label* label) {
787         branchPtr(cond, Address(obj, JSObject::offsetOfShape()), ImmGCPtr(shape), label);
788     }
branchTestObjShape(Condition cond,Register obj,Register shape,Label * label)789     void branchTestObjShape(Condition cond, Register obj, Register shape, Label* label) {
790         branchPtr(cond, Address(obj, JSObject::offsetOfShape()), shape, label);
791     }
branchTestObjGroup(Condition cond,Register obj,ObjectGroup * group,Label * label)792     void branchTestObjGroup(Condition cond, Register obj, ObjectGroup* group, Label* label) {
793         branchPtr(cond, Address(obj, JSObject::offsetOfGroup()), ImmGCPtr(group), label);
794     }
branchTestObjGroup(Condition cond,Register obj,Register group,Label * label)795     void branchTestObjGroup(Condition cond, Register obj, Register group, Label* label) {
796         branchPtr(cond, Address(obj, JSObject::offsetOfGroup()), group, label);
797     }
branchTestProxyHandlerFamily(Condition cond,Register proxy,Register scratch,const void * handlerp,Label * label)798     void branchTestProxyHandlerFamily(Condition cond, Register proxy, Register scratch,
799                                       const void* handlerp, Label* label) {
800         Address handlerAddr(proxy, ProxyObject::offsetOfHandler());
801         loadPtr(handlerAddr, scratch);
802         Address familyAddr(scratch, BaseProxyHandler::offsetOfFamily());
803         branchPtr(cond, familyAddr, ImmPtr(handlerp), label);
804     }
805 
806     template <typename Value>
branchTestMIRType(Condition cond,const Value & val,MIRType type,Label * label)807     void branchTestMIRType(Condition cond, const Value& val, MIRType type, Label* label) {
808         switch (type) {
809           case MIRType_Null:      return branchTestNull(cond, val, label);
810           case MIRType_Undefined: return branchTestUndefined(cond, val, label);
811           case MIRType_Boolean:   return branchTestBoolean(cond, val, label);
812           case MIRType_Int32:     return branchTestInt32(cond, val, label);
813           case MIRType_String:    return branchTestString(cond, val, label);
814           case MIRType_Symbol:    return branchTestSymbol(cond, val, label);
815           case MIRType_Object:    return branchTestObject(cond, val, label);
816           case MIRType_Double:    return branchTestDouble(cond, val, label);
817           case MIRType_MagicOptimizedArguments: // Fall through.
818           case MIRType_MagicIsConstructing:
819           case MIRType_MagicHole: return branchTestMagic(cond, val, label);
820           default:
821             MOZ_CRASH("Bad MIRType");
822         }
823     }
824 
825     // Branches to |label| if |reg| is false. |reg| should be a C++ bool.
branchIfFalseBool(Register reg,Label * label)826     void branchIfFalseBool(Register reg, Label* label) {
827         // Note that C++ bool is only 1 byte, so ignore the higher-order bits.
828         branchTest32(Assembler::Zero, reg, Imm32(0xFF), label);
829     }
830 
831     // Branches to |label| if |reg| is true. |reg| should be a C++ bool.
branchIfTrueBool(Register reg,Label * label)832     void branchIfTrueBool(Register reg, Label* label) {
833         // Note that C++ bool is only 1 byte, so ignore the higher-order bits.
834         branchTest32(Assembler::NonZero, reg, Imm32(0xFF), label);
835     }
836 
loadObjPrivate(Register obj,uint32_t nfixed,Register dest)837     void loadObjPrivate(Register obj, uint32_t nfixed, Register dest) {
838         loadPtr(Address(obj, NativeObject::getPrivateDataOffset(nfixed)), dest);
839     }
840 
loadObjProto(Register obj,Register dest)841     void loadObjProto(Register obj, Register dest) {
842         loadPtr(Address(obj, JSObject::offsetOfGroup()), dest);
843         loadPtr(Address(dest, ObjectGroup::offsetOfProto()), dest);
844     }
845 
loadStringLength(Register str,Register dest)846     void loadStringLength(Register str, Register dest) {
847         load32(Address(str, JSString::offsetOfLength()), dest);
848     }
849 
850     void loadStringChars(Register str, Register dest);
851     void loadStringChar(Register str, Register index, Register output);
852 
branchIfRope(Register str,Label * label)853     void branchIfRope(Register str, Label* label) {
854         Address flags(str, JSString::offsetOfFlags());
855         static_assert(JSString::ROPE_FLAGS == 0, "Rope type flags must be 0");
856         branchTest32(Assembler::Zero, flags, Imm32(JSString::TYPE_FLAGS_MASK), label);
857     }
858 
loadJSContext(Register dest)859     void loadJSContext(Register dest) {
860         loadPtr(AbsoluteAddress(GetJitContext()->runtime->addressOfJSContext()), dest);
861     }
loadJitActivation(Register dest)862     void loadJitActivation(Register dest) {
863         loadPtr(AbsoluteAddress(GetJitContext()->runtime->addressOfActivation()), dest);
864     }
865 
866     template<typename T>
loadTypedOrValue(const T & src,TypedOrValueRegister dest)867     void loadTypedOrValue(const T& src, TypedOrValueRegister dest) {
868         if (dest.hasValue())
869             loadValue(src, dest.valueReg());
870         else
871             loadUnboxedValue(src, dest.type(), dest.typedReg());
872     }
873 
874     template<typename T>
loadElementTypedOrValue(const T & src,TypedOrValueRegister dest,bool holeCheck,Label * hole)875     void loadElementTypedOrValue(const T& src, TypedOrValueRegister dest, bool holeCheck,
876                                  Label* hole) {
877         if (dest.hasValue()) {
878             loadValue(src, dest.valueReg());
879             if (holeCheck)
880                 branchTestMagic(Assembler::Equal, dest.valueReg(), hole);
881         } else {
882             if (holeCheck)
883                 branchTestMagic(Assembler::Equal, src, hole);
884             loadUnboxedValue(src, dest.type(), dest.typedReg());
885         }
886     }
887 
888     template <typename T>
storeTypedOrValue(TypedOrValueRegister src,const T & dest)889     void storeTypedOrValue(TypedOrValueRegister src, const T& dest) {
890         if (src.hasValue()) {
891             storeValue(src.valueReg(), dest);
892         } else if (IsFloatingPointType(src.type())) {
893             FloatRegister reg = src.typedReg().fpu();
894             if (src.type() == MIRType_Float32) {
895                 convertFloat32ToDouble(reg, ScratchDoubleReg);
896                 reg = ScratchDoubleReg;
897             }
898             storeDouble(reg, dest);
899         } else {
900             storeValue(ValueTypeFromMIRType(src.type()), src.typedReg().gpr(), dest);
901         }
902     }
903 
904     template <typename T>
storeObjectOrNull(Register src,const T & dest)905     void storeObjectOrNull(Register src, const T& dest) {
906         Label notNull, done;
907         branchTestPtr(Assembler::NonZero, src, src, &notNull);
908         storeValue(NullValue(), dest);
909         jump(&done);
910         bind(&notNull);
911         storeValue(JSVAL_TYPE_OBJECT, src, dest);
912         bind(&done);
913     }
914 
915     template <typename T>
storeConstantOrRegister(ConstantOrRegister src,const T & dest)916     void storeConstantOrRegister(ConstantOrRegister src, const T& dest) {
917         if (src.constant())
918             storeValue(src.value(), dest);
919         else
920             storeTypedOrValue(src.reg(), dest);
921     }
922 
storeCallResult(Register reg)923     void storeCallResult(Register reg) {
924         if (reg != ReturnReg)
925             mov(ReturnReg, reg);
926     }
927 
storeCallFloatResult(FloatRegister reg)928     void storeCallFloatResult(FloatRegister reg) {
929         if (reg != ReturnDoubleReg)
930             moveDouble(ReturnDoubleReg, reg);
931     }
932 
storeCallResultValue(AnyRegister dest)933     void storeCallResultValue(AnyRegister dest) {
934 #if defined(JS_NUNBOX32)
935         unboxValue(ValueOperand(JSReturnReg_Type, JSReturnReg_Data), dest);
936 #elif defined(JS_PUNBOX64)
937         unboxValue(ValueOperand(JSReturnReg), dest);
938 #else
939 #error "Bad architecture"
940 #endif
941     }
942 
storeCallResultValue(ValueOperand dest)943     void storeCallResultValue(ValueOperand dest) {
944 #if defined(JS_NUNBOX32)
945         // reshuffle the return registers used for a call result to store into
946         // dest, using ReturnReg as a scratch register if necessary. This must
947         // only be called after returning from a call, at a point when the
948         // return register is not live. XXX would be better to allow wrappers
949         // to store the return value to different places.
950         if (dest.typeReg() == JSReturnReg_Data) {
951             if (dest.payloadReg() == JSReturnReg_Type) {
952                 // swap the two registers.
953                 mov(JSReturnReg_Type, ReturnReg);
954                 mov(JSReturnReg_Data, JSReturnReg_Type);
955                 mov(ReturnReg, JSReturnReg_Data);
956             } else {
957                 mov(JSReturnReg_Data, dest.payloadReg());
958                 mov(JSReturnReg_Type, dest.typeReg());
959             }
960         } else {
961             mov(JSReturnReg_Type, dest.typeReg());
962             mov(JSReturnReg_Data, dest.payloadReg());
963         }
964 #elif defined(JS_PUNBOX64)
965         if (dest.valueReg() != JSReturnReg)
966             mov(JSReturnReg, dest.valueReg());
967 #else
968 #error "Bad architecture"
969 #endif
970     }
971 
storeCallResultValue(TypedOrValueRegister dest)972     void storeCallResultValue(TypedOrValueRegister dest) {
973         if (dest.hasValue())
974             storeCallResultValue(dest.valueReg());
975         else
976             storeCallResultValue(dest.typedReg());
977     }
978 
979     template <typename T>
extractString(const T & source,Register scratch)980     Register extractString(const T& source, Register scratch) {
981         return extractObject(source, scratch);
982     }
983 
branchIfFunctionHasNoScript(Register fun,Label * label)984     void branchIfFunctionHasNoScript(Register fun, Label* label) {
985         // 16-bit loads are slow and unaligned 32-bit loads may be too so
986         // perform an aligned 32-bit load and adjust the bitmask accordingly.
987         MOZ_ASSERT(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0);
988         MOZ_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2);
989         Address address(fun, JSFunction::offsetOfNargs());
990         int32_t bit = IMM32_16ADJ(JSFunction::INTERPRETED);
991         branchTest32(Assembler::Zero, address, Imm32(bit), label);
992     }
branchIfInterpreted(Register fun,Label * label)993     void branchIfInterpreted(Register fun, Label* label) {
994         // 16-bit loads are slow and unaligned 32-bit loads may be too so
995         // perform an aligned 32-bit load and adjust the bitmask accordingly.
996         MOZ_ASSERT(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0);
997         MOZ_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2);
998         Address address(fun, JSFunction::offsetOfNargs());
999         int32_t bit = IMM32_16ADJ(JSFunction::INTERPRETED);
1000         branchTest32(Assembler::NonZero, address, Imm32(bit), label);
1001     }
1002 
1003     void branchIfNotInterpretedConstructor(Register fun, Register scratch, Label* label);
1004 
bumpKey(Int32Key * key,int diff)1005     void bumpKey(Int32Key* key, int diff) {
1006         if (key->isRegister())
1007             add32(Imm32(diff), key->reg());
1008         else
1009             key->bumpConstant(diff);
1010     }
1011 
storeKey(const Int32Key & key,const Address & dest)1012     void storeKey(const Int32Key& key, const Address& dest) {
1013         if (key.isRegister())
1014             store32(key.reg(), dest);
1015         else
1016             store32(Imm32(key.constant()), dest);
1017     }
1018 
1019     template<typename T>
branchKey(Condition cond,const T & length,const Int32Key & key,Label * label)1020     void branchKey(Condition cond, const T& length, const Int32Key& key, Label* label) {
1021         if (key.isRegister())
1022             branch32(cond, length, key.reg(), label);
1023         else
1024             branch32(cond, length, Imm32(key.constant()), label);
1025     }
1026 
branchTestNeedsIncrementalBarrier(Condition cond,Label * label)1027     void branchTestNeedsIncrementalBarrier(Condition cond, Label* label) {
1028         MOZ_ASSERT(cond == Zero || cond == NonZero);
1029         CompileZone* zone = GetJitContext()->compartment->zone();
1030         AbsoluteAddress needsBarrierAddr(zone->addressOfNeedsIncrementalBarrier());
1031         branchTest32(cond, needsBarrierAddr, Imm32(0x1), label);
1032     }
1033 
1034     template <typename T>
callPreBarrier(const T & address,MIRType type)1035     void callPreBarrier(const T& address, MIRType type) {
1036         Label done;
1037 
1038         if (type == MIRType_Value)
1039             branchTestGCThing(Assembler::NotEqual, address, &done);
1040 
1041         Push(PreBarrierReg);
1042         computeEffectiveAddress(address, PreBarrierReg);
1043 
1044         const JitRuntime* rt = GetJitContext()->runtime->jitRuntime();
1045         JitCode* preBarrier = rt->preBarrier(type);
1046 
1047         call(preBarrier);
1048         Pop(PreBarrierReg);
1049 
1050         bind(&done);
1051     }
1052 
1053     template <typename T>
patchableCallPreBarrier(const T & address,MIRType type)1054     void patchableCallPreBarrier(const T& address, MIRType type) {
1055         Label done;
1056 
1057         // All barriers are off by default.
1058         // They are enabled if necessary at the end of CodeGenerator::generate().
1059         CodeOffset nopJump = toggledJump(&done);
1060         writePrebarrierOffset(nopJump);
1061 
1062         callPreBarrier(address, type);
1063         jump(&done);
1064 
1065         haltingAlign(8);
1066         bind(&done);
1067     }
1068 
canonicalizeDouble(FloatRegister reg)1069     void canonicalizeDouble(FloatRegister reg) {
1070         Label notNaN;
1071         branchDouble(DoubleOrdered, reg, reg, &notNaN);
1072         loadConstantDouble(JS::GenericNaN(), reg);
1073         bind(&notNaN);
1074     }
1075 
canonicalizeFloat(FloatRegister reg)1076     void canonicalizeFloat(FloatRegister reg) {
1077         Label notNaN;
1078         branchFloat(DoubleOrdered, reg, reg, &notNaN);
1079         loadConstantFloat32(float(JS::GenericNaN()), reg);
1080         bind(&notNaN);
1081     }
1082 
1083     template<typename T>
1084     void loadFromTypedArray(Scalar::Type arrayType, const T& src, AnyRegister dest, Register temp, Label* fail,
1085                             bool canonicalizeDoubles = true, unsigned numElems = 0);
1086 
1087     template<typename T>
1088     void loadFromTypedArray(Scalar::Type arrayType, const T& src, const ValueOperand& dest, bool allowDouble,
1089                             Register temp, Label* fail);
1090 
1091     template<typename S, typename T>
storeToTypedIntArray(Scalar::Type arrayType,const S & value,const T & dest)1092     void storeToTypedIntArray(Scalar::Type arrayType, const S& value, const T& dest) {
1093         switch (arrayType) {
1094           case Scalar::Int8:
1095           case Scalar::Uint8:
1096           case Scalar::Uint8Clamped:
1097             store8(value, dest);
1098             break;
1099           case Scalar::Int16:
1100           case Scalar::Uint16:
1101             store16(value, dest);
1102             break;
1103           case Scalar::Int32:
1104           case Scalar::Uint32:
1105             store32(value, dest);
1106             break;
1107           default:
1108             MOZ_CRASH("Invalid typed array type");
1109         }
1110     }
1111 
1112     void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const BaseIndex& dest,
1113                                 unsigned numElems = 0);
1114     void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const Address& dest,
1115                                 unsigned numElems = 0);
1116 
1117     // Load a property from an UnboxedPlainObject or UnboxedArrayObject.
1118     template <typename T>
1119     void loadUnboxedProperty(T address, JSValueType type, TypedOrValueRegister output);
1120 
1121     // Store a property to an UnboxedPlainObject, without triggering barriers.
1122     // If failure is null, the value definitely has a type suitable for storing
1123     // in the property.
1124     template <typename T>
1125     void storeUnboxedProperty(T address, JSValueType type,
1126                               ConstantOrRegister value, Label* failure);
1127 
1128     void checkUnboxedArrayCapacity(Register obj, const Int32Key& index, Register temp,
1129                                    Label* failure);
1130 
extractString(const Address & address,Register scratch)1131     Register extractString(const Address& address, Register scratch) {
1132         return extractObject(address, scratch);
1133     }
extractString(const ValueOperand & value,Register scratch)1134     Register extractString(const ValueOperand& value, Register scratch) {
1135         return extractObject(value, scratch);
1136     }
1137 
1138     using MacroAssemblerSpecific::extractTag;
extractTag(const TypedOrValueRegister & reg,Register scratch)1139     Register extractTag(const TypedOrValueRegister& reg, Register scratch) {
1140         if (reg.hasValue())
1141             return extractTag(reg.valueReg(), scratch);
1142         mov(ImmWord(MIRTypeToTag(reg.type())), scratch);
1143         return scratch;
1144     }
1145 
1146     using MacroAssemblerSpecific::extractObject;
extractObject(const TypedOrValueRegister & reg,Register scratch)1147     Register extractObject(const TypedOrValueRegister& reg, Register scratch) {
1148         if (reg.hasValue())
1149             return extractObject(reg.valueReg(), scratch);
1150         MOZ_ASSERT(reg.type() == MIRType_Object);
1151         return reg.typedReg().gpr();
1152     }
1153 
1154     // Inline version of js_TypedArray_uint8_clamp_double.
1155     // This function clobbers the input register.
1156     void clampDoubleToUint8(FloatRegister input, Register output) PER_ARCH;
1157 
1158     using MacroAssemblerSpecific::ensureDouble;
1159 
1160     template <typename S>
ensureDouble(const S & source,FloatRegister dest,Label * failure)1161     void ensureDouble(const S& source, FloatRegister dest, Label* failure) {
1162         Label isDouble, done;
1163         branchTestDouble(Assembler::Equal, source, &isDouble);
1164         branchTestInt32(Assembler::NotEqual, source, failure);
1165 
1166         convertInt32ToDouble(source, dest);
1167         jump(&done);
1168 
1169         bind(&isDouble);
1170         unboxDouble(source, dest);
1171 
1172         bind(&done);
1173     }
1174 
1175     // Emit type case branch on tag matching if the type tag in the definition
1176     // might actually be that type.
1177     void branchEqualTypeIfNeeded(MIRType type, MDefinition* maybeDef, Register tag, Label* label);
1178 
1179     // Inline allocation.
1180   private:
1181     void checkAllocatorState(Label* fail);
1182     bool shouldNurseryAllocate(gc::AllocKind allocKind, gc::InitialHeap initialHeap);
1183     void nurseryAllocate(Register result, Register temp, gc::AllocKind allocKind,
1184                          size_t nDynamicSlots, gc::InitialHeap initialHeap, Label* fail);
1185     void freeListAllocate(Register result, Register temp, gc::AllocKind allocKind, Label* fail);
1186     void allocateObject(Register result, Register temp, gc::AllocKind allocKind,
1187                         uint32_t nDynamicSlots, gc::InitialHeap initialHeap, Label* fail);
1188     void allocateNonObject(Register result, Register temp, gc::AllocKind allocKind, Label* fail);
1189     void copySlotsFromTemplate(Register obj, const NativeObject* templateObj,
1190                                uint32_t start, uint32_t end);
1191     void fillSlotsWithConstantValue(Address addr, Register temp, uint32_t start, uint32_t end,
1192                                     const Value& v);
1193     void fillSlotsWithUndefined(Address addr, Register temp, uint32_t start, uint32_t end);
1194     void fillSlotsWithUninitialized(Address addr, Register temp, uint32_t start, uint32_t end);
1195     void initGCSlots(Register obj, Register temp, NativeObject* templateObj, bool initContents);
1196 
1197   public:
1198     void callMallocStub(size_t nbytes, Register result, Label* fail);
1199     void callFreeStub(Register slots);
1200     void createGCObject(Register result, Register temp, JSObject* templateObj,
1201                         gc::InitialHeap initialHeap, Label* fail, bool initContents = true,
1202                         bool convertDoubleElements = false);
1203 
1204     void initGCThing(Register obj, Register temp, JSObject* templateObj,
1205                      bool initContents = true, bool convertDoubleElements = false);
1206 
1207     void initUnboxedObjectContents(Register object, UnboxedPlainObject* templateObject);
1208 
1209     void newGCString(Register result, Register temp, Label* fail);
1210     void newGCFatInlineString(Register result, Register temp, Label* fail);
1211 
1212     // Compares two strings for equality based on the JSOP.
1213     // This checks for identical pointers, atoms and length and fails for everything else.
1214     void compareStrings(JSOp op, Register left, Register right, Register result,
1215                         Label* fail);
1216 
1217   public:
1218     // Generates code used to complete a bailout.
1219     void generateBailoutTail(Register scratch, Register bailoutInfo);
1220 
branchTestObjectTruthy(bool truthy,Register objReg,Register scratch,Label * slowCheck,Label * checked)1221     void branchTestObjectTruthy(bool truthy, Register objReg, Register scratch,
1222                                 Label* slowCheck, Label* checked)
1223     {
1224         // The branches to out-of-line code here implement a conservative version
1225         // of the JSObject::isWrapper test performed in EmulatesUndefined.  If none
1226         // of the branches are taken, we can check class flags directly.
1227         loadObjClass(objReg, scratch);
1228         Address flags(scratch, Class::offsetOfFlags());
1229 
1230         branchTestClassIsProxy(true, scratch, slowCheck);
1231 
1232         Condition cond = truthy ? Assembler::Zero : Assembler::NonZero;
1233         branchTest32(cond, flags, Imm32(JSCLASS_EMULATES_UNDEFINED), checked);
1234     }
1235 
branchTestClassIsProxy(bool proxy,Register clasp,Label * label)1236     void branchTestClassIsProxy(bool proxy, Register clasp, Label* label)
1237     {
1238         branchTest32(proxy ? Assembler::NonZero : Assembler::Zero,
1239                      Address(clasp, Class::offsetOfFlags()),
1240                      Imm32(JSCLASS_IS_PROXY), label);
1241     }
1242 
branchTestObjectIsProxy(bool proxy,Register object,Register scratch,Label * label)1243     void branchTestObjectIsProxy(bool proxy, Register object, Register scratch, Label* label)
1244     {
1245         loadObjClass(object, scratch);
1246         branchTestClassIsProxy(proxy, scratch, label);
1247     }
1248 
1249     inline void branchFunctionKind(Condition cond, JSFunction::FunctionKind kind, Register fun,
1250                                    Register scratch, Label* label);
1251 
1252   public:
1253 #ifndef JS_CODEGEN_ARM64
1254     // StackPointer manipulation functions.
1255     // On ARM64, the StackPointer is implemented as two synchronized registers.
1256     // Code shared across platforms must use these functions to be valid.
1257     template <typename T>
addToStackPtr(T t)1258     void addToStackPtr(T t) { addPtr(t, getStackPointer()); }
1259     template <typename T>
addStackPtrTo(T t)1260     void addStackPtrTo(T t) { addPtr(getStackPointer(), t); }
1261 
1262     template <typename T>
subFromStackPtr(T t)1263     void subFromStackPtr(T t) { subPtr(t, getStackPointer()); }
1264     template <typename T>
subStackPtrFrom(T t)1265     void subStackPtrFrom(T t) { subPtr(getStackPointer(), t); }
1266 
1267     template <typename T>
andToStackPtr(T t)1268     void andToStackPtr(T t) { andPtr(t, getStackPointer()); }
1269     template <typename T>
andStackPtrTo(T t)1270     void andStackPtrTo(T t) { andPtr(getStackPointer(), t); }
1271 
1272     template <typename T>
moveToStackPtr(T t)1273     void moveToStackPtr(T t) { movePtr(t, getStackPointer()); }
1274     template <typename T>
moveStackPtrTo(T t)1275     void moveStackPtrTo(T t) { movePtr(getStackPointer(), t); }
1276 
1277     template <typename T>
loadStackPtr(T t)1278     void loadStackPtr(T t) { loadPtr(t, getStackPointer()); }
1279     template <typename T>
storeStackPtr(T t)1280     void storeStackPtr(T t) { storePtr(getStackPointer(), t); }
1281 
1282     // StackPointer testing functions.
1283     // On ARM64, sp can function as the zero register depending on context.
1284     // Code shared across platforms must use these functions to be valid.
1285     template <typename T>
branchTestStackPtr(Condition cond,T t,Label * label)1286     void branchTestStackPtr(Condition cond, T t, Label* label) {
1287         branchTestPtr(cond, getStackPointer(), t, label);
1288     }
1289     template <typename T>
branchStackPtr(Condition cond,T rhs,Label * label)1290     void branchStackPtr(Condition cond, T rhs, Label* label) {
1291         branchPtr(cond, getStackPointer(), rhs, label);
1292     }
1293     template <typename T>
branchStackPtrRhs(Condition cond,T lhs,Label * label)1294     void branchStackPtrRhs(Condition cond, T lhs, Label* label) {
1295         branchPtr(cond, lhs, getStackPointer(), label);
1296     }
1297 #endif // !JS_CODEGEN_ARM64
1298 
1299   public:
enableProfilingInstrumentation()1300     void enableProfilingInstrumentation() {
1301         emitProfilingInstrumentation_ = true;
1302     }
1303 
1304   private:
1305     // This class is used to surround call sites throughout the assembler. This
1306     // is used by callWithABI, and callJit functions, except if suffixed by
1307     // NoProfiler.
1308     class AutoProfilerCallInstrumentation {
1309         MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER;
1310 
1311       public:
1312         explicit AutoProfilerCallInstrumentation(MacroAssembler& masm
1313                                                  MOZ_GUARD_OBJECT_NOTIFIER_PARAM);
~AutoProfilerCallInstrumentation()1314         ~AutoProfilerCallInstrumentation() {}
1315     };
1316     friend class AutoProfilerCallInstrumentation;
1317 
appendProfilerCallSite(CodeOffset label)1318     void appendProfilerCallSite(CodeOffset label) {
1319         propagateOOM(profilerCallSites_.append(label));
1320     }
1321 
1322     // Fix up the code pointers to be written for locations where profilerCallSite
1323     // emitted moves of RIP to a register.
1324     void linkProfilerCallSites(JitCode* code);
1325 
1326     // This field is used to manage profiling instrumentation output. If
1327     // provided and enabled, then instrumentation will be emitted around call
1328     // sites.
1329     bool emitProfilingInstrumentation_;
1330 
1331     // Record locations of the call sites.
1332     Vector<CodeOffset, 0, SystemAllocPolicy> profilerCallSites_;
1333 
1334   public:
1335     void loadBaselineOrIonRaw(Register script, Register dest, Label* failure);
1336     void loadBaselineOrIonNoArgCheck(Register callee, Register dest, Label* failure);
1337 
1338     void loadBaselineFramePtr(Register framePtr, Register dest);
1339 
pushBaselineFramePtr(Register framePtr,Register scratch)1340     void pushBaselineFramePtr(Register framePtr, Register scratch) {
1341         loadBaselineFramePtr(framePtr, scratch);
1342         push(scratch);
1343     }
1344 
1345   private:
1346     void handleFailure();
1347 
1348   public:
exceptionLabel()1349     Label* exceptionLabel() {
1350         // Exceptions are currently handled the same way as sequential failures.
1351         return &failureLabel_;
1352     }
1353 
failureLabel()1354     Label* failureLabel() {
1355         return &failureLabel_;
1356     }
1357 
asmSyncInterruptLabel()1358     Label* asmSyncInterruptLabel() {
1359         return &asmSyncInterruptLabel_;
1360     }
asmSyncInterruptLabel()1361     const Label* asmSyncInterruptLabel() const {
1362         return &asmSyncInterruptLabel_;
1363     }
asmStackOverflowLabel()1364     Label* asmStackOverflowLabel() {
1365         return &asmStackOverflowLabel_;
1366     }
asmStackOverflowLabel()1367     const Label* asmStackOverflowLabel() const {
1368         return &asmStackOverflowLabel_;
1369     }
asmOnOutOfBoundsLabel()1370     Label* asmOnOutOfBoundsLabel() {
1371         return &asmOnOutOfBoundsLabel_;
1372     }
asmOnOutOfBoundsLabel()1373     const Label* asmOnOutOfBoundsLabel() const {
1374         return &asmOnOutOfBoundsLabel_;
1375     }
asmOnConversionErrorLabel()1376     Label* asmOnConversionErrorLabel() {
1377         return &asmOnConversionErrorLabel_;
1378     }
asmOnConversionErrorLabel()1379     const Label* asmOnConversionErrorLabel() const {
1380         return &asmOnConversionErrorLabel_;
1381     }
1382 
1383     bool asmMergeWith(const MacroAssembler& masm);
1384     void finish();
1385     void link(JitCode* code);
1386 
1387     void assumeUnreachable(const char* output);
1388 
1389     template<typename T>
1390     void assertTestInt32(Condition cond, const T& value, const char* output);
1391 
1392     void printf(const char* output);
1393     void printf(const char* output, Register value);
1394 
1395 #ifdef JS_TRACE_LOGGING
1396     void tracelogStartId(Register logger, uint32_t textId, bool force = false);
1397     void tracelogStartId(Register logger, Register textId);
1398     void tracelogStartEvent(Register logger, Register event);
1399     void tracelogStopId(Register logger, uint32_t textId, bool force = false);
1400     void tracelogStopId(Register logger, Register textId);
1401 #endif
1402 
1403 #define DISPATCH_FLOATING_POINT_OP(method, type, arg1d, arg1f, arg2)    \
1404     MOZ_ASSERT(IsFloatingPointType(type));                              \
1405     if (type == MIRType_Double)                                         \
1406         method##Double(arg1d, arg2);                                    \
1407     else                                                                \
1408         method##Float32(arg1f, arg2);                                   \
1409 
loadConstantFloatingPoint(double d,float f,FloatRegister dest,MIRType destType)1410     void loadConstantFloatingPoint(double d, float f, FloatRegister dest, MIRType destType) {
1411         DISPATCH_FLOATING_POINT_OP(loadConstant, destType, d, f, dest);
1412     }
boolValueToFloatingPoint(ValueOperand value,FloatRegister dest,MIRType destType)1413     void boolValueToFloatingPoint(ValueOperand value, FloatRegister dest, MIRType destType) {
1414         DISPATCH_FLOATING_POINT_OP(boolValueTo, destType, value, value, dest);
1415     }
int32ValueToFloatingPoint(ValueOperand value,FloatRegister dest,MIRType destType)1416     void int32ValueToFloatingPoint(ValueOperand value, FloatRegister dest, MIRType destType) {
1417         DISPATCH_FLOATING_POINT_OP(int32ValueTo, destType, value, value, dest);
1418     }
convertInt32ToFloatingPoint(Register src,FloatRegister dest,MIRType destType)1419     void convertInt32ToFloatingPoint(Register src, FloatRegister dest, MIRType destType) {
1420         DISPATCH_FLOATING_POINT_OP(convertInt32To, destType, src, src, dest);
1421     }
1422 
1423 #undef DISPATCH_FLOATING_POINT_OP
1424 
1425     void convertValueToFloatingPoint(ValueOperand value, FloatRegister output, Label* fail,
1426                                      MIRType outputType);
1427     bool convertValueToFloatingPoint(JSContext* cx, const Value& v, FloatRegister output,
1428                                      Label* fail, MIRType outputType);
1429     bool convertConstantOrRegisterToFloatingPoint(JSContext* cx, ConstantOrRegister src,
1430                                                   FloatRegister output, Label* fail,
1431                                                   MIRType outputType);
1432     void convertTypedOrValueToFloatingPoint(TypedOrValueRegister src, FloatRegister output,
1433                                             Label* fail, MIRType outputType);
1434 
1435     void convertInt32ValueToDouble(const Address& address, Register scratch, Label* done);
convertValueToDouble(ValueOperand value,FloatRegister output,Label * fail)1436     void convertValueToDouble(ValueOperand value, FloatRegister output, Label* fail) {
1437         convertValueToFloatingPoint(value, output, fail, MIRType_Double);
1438     }
convertValueToDouble(JSContext * cx,const Value & v,FloatRegister output,Label * fail)1439     bool convertValueToDouble(JSContext* cx, const Value& v, FloatRegister output, Label* fail) {
1440         return convertValueToFloatingPoint(cx, v, output, fail, MIRType_Double);
1441     }
convertConstantOrRegisterToDouble(JSContext * cx,ConstantOrRegister src,FloatRegister output,Label * fail)1442     bool convertConstantOrRegisterToDouble(JSContext* cx, ConstantOrRegister src,
1443                                            FloatRegister output, Label* fail)
1444     {
1445         return convertConstantOrRegisterToFloatingPoint(cx, src, output, fail, MIRType_Double);
1446     }
convertTypedOrValueToDouble(TypedOrValueRegister src,FloatRegister output,Label * fail)1447     void convertTypedOrValueToDouble(TypedOrValueRegister src, FloatRegister output, Label* fail) {
1448         convertTypedOrValueToFloatingPoint(src, output, fail, MIRType_Double);
1449     }
1450 
convertValueToFloat(ValueOperand value,FloatRegister output,Label * fail)1451     void convertValueToFloat(ValueOperand value, FloatRegister output, Label* fail) {
1452         convertValueToFloatingPoint(value, output, fail, MIRType_Float32);
1453     }
convertValueToFloat(JSContext * cx,const Value & v,FloatRegister output,Label * fail)1454     bool convertValueToFloat(JSContext* cx, const Value& v, FloatRegister output, Label* fail) {
1455         return convertValueToFloatingPoint(cx, v, output, fail, MIRType_Float32);
1456     }
convertConstantOrRegisterToFloat(JSContext * cx,ConstantOrRegister src,FloatRegister output,Label * fail)1457     bool convertConstantOrRegisterToFloat(JSContext* cx, ConstantOrRegister src,
1458                                           FloatRegister output, Label* fail)
1459     {
1460         return convertConstantOrRegisterToFloatingPoint(cx, src, output, fail, MIRType_Float32);
1461     }
convertTypedOrValueToFloat(TypedOrValueRegister src,FloatRegister output,Label * fail)1462     void convertTypedOrValueToFloat(TypedOrValueRegister src, FloatRegister output, Label* fail) {
1463         convertTypedOrValueToFloatingPoint(src, output, fail, MIRType_Float32);
1464     }
1465 
1466     enum IntConversionBehavior {
1467         IntConversion_Normal,
1468         IntConversion_NegativeZeroCheck,
1469         IntConversion_Truncate,
1470         IntConversion_ClampToUint8,
1471     };
1472 
1473     enum IntConversionInputKind {
1474         IntConversion_NumbersOnly,
1475         IntConversion_NumbersOrBoolsOnly,
1476         IntConversion_Any
1477     };
1478 
1479     //
1480     // Functions for converting values to int.
1481     //
1482     void convertDoubleToInt(FloatRegister src, Register output, FloatRegister temp,
1483                             Label* truncateFail, Label* fail, IntConversionBehavior behavior);
1484 
1485     // Strings may be handled by providing labels to jump to when the behavior
1486     // is truncation or clamping. The subroutine, usually an OOL call, is
1487     // passed the unboxed string in |stringReg| and should convert it to a
1488     // double store into |temp|.
1489     void convertValueToInt(ValueOperand value, MDefinition* input,
1490                            Label* handleStringEntry, Label* handleStringRejoin,
1491                            Label* truncateDoubleSlow,
1492                            Register stringReg, FloatRegister temp, Register output,
1493                            Label* fail, IntConversionBehavior behavior,
1494                            IntConversionInputKind conversion = IntConversion_Any);
convertValueToInt(ValueOperand value,FloatRegister temp,Register output,Label * fail,IntConversionBehavior behavior)1495     void convertValueToInt(ValueOperand value, FloatRegister temp, Register output, Label* fail,
1496                            IntConversionBehavior behavior)
1497     {
1498         convertValueToInt(value, nullptr, nullptr, nullptr, nullptr, InvalidReg, temp, output,
1499                           fail, behavior);
1500     }
1501     bool convertValueToInt(JSContext* cx, const Value& v, Register output, Label* fail,
1502                            IntConversionBehavior behavior);
1503     bool convertConstantOrRegisterToInt(JSContext* cx, ConstantOrRegister src, FloatRegister temp,
1504                                         Register output, Label* fail, IntConversionBehavior behavior);
1505     void convertTypedOrValueToInt(TypedOrValueRegister src, FloatRegister temp, Register output,
1506                                   Label* fail, IntConversionBehavior behavior);
1507 
1508     //
1509     // Convenience functions for converting values to int32.
1510     //
convertValueToInt32(ValueOperand value,FloatRegister temp,Register output,Label * fail,bool negativeZeroCheck)1511     void convertValueToInt32(ValueOperand value, FloatRegister temp, Register output, Label* fail,
1512                              bool negativeZeroCheck)
1513     {
1514         convertValueToInt(value, temp, output, fail, negativeZeroCheck
1515                           ? IntConversion_NegativeZeroCheck
1516                           : IntConversion_Normal);
1517     }
1518     void convertValueToInt32(ValueOperand value, MDefinition* input,
1519                              FloatRegister temp, Register output, Label* fail,
1520                              bool negativeZeroCheck, IntConversionInputKind conversion = IntConversion_Any)
1521     {
1522         convertValueToInt(value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail,
1523                           negativeZeroCheck
1524                           ? IntConversion_NegativeZeroCheck
1525                           : IntConversion_Normal,
1526                           conversion);
1527     }
convertValueToInt32(JSContext * cx,const Value & v,Register output,Label * fail,bool negativeZeroCheck)1528     bool convertValueToInt32(JSContext* cx, const Value& v, Register output, Label* fail,
1529                              bool negativeZeroCheck)
1530     {
1531         return convertValueToInt(cx, v, output, fail, negativeZeroCheck
1532                                  ? IntConversion_NegativeZeroCheck
1533                                  : IntConversion_Normal);
1534     }
convertConstantOrRegisterToInt32(JSContext * cx,ConstantOrRegister src,FloatRegister temp,Register output,Label * fail,bool negativeZeroCheck)1535     bool convertConstantOrRegisterToInt32(JSContext* cx, ConstantOrRegister src, FloatRegister temp,
1536                                           Register output, Label* fail, bool negativeZeroCheck)
1537     {
1538         return convertConstantOrRegisterToInt(cx, src, temp, output, fail, negativeZeroCheck
1539                                               ? IntConversion_NegativeZeroCheck
1540                                               : IntConversion_Normal);
1541     }
convertTypedOrValueToInt32(TypedOrValueRegister src,FloatRegister temp,Register output,Label * fail,bool negativeZeroCheck)1542     void convertTypedOrValueToInt32(TypedOrValueRegister src, FloatRegister temp, Register output,
1543                                     Label* fail, bool negativeZeroCheck)
1544     {
1545         convertTypedOrValueToInt(src, temp, output, fail, negativeZeroCheck
1546                                  ? IntConversion_NegativeZeroCheck
1547                                  : IntConversion_Normal);
1548     }
1549 
1550     //
1551     // Convenience functions for truncating values to int32.
1552     //
truncateValueToInt32(ValueOperand value,FloatRegister temp,Register output,Label * fail)1553     void truncateValueToInt32(ValueOperand value, FloatRegister temp, Register output, Label* fail) {
1554         convertValueToInt(value, temp, output, fail, IntConversion_Truncate);
1555     }
truncateValueToInt32(ValueOperand value,MDefinition * input,Label * handleStringEntry,Label * handleStringRejoin,Label * truncateDoubleSlow,Register stringReg,FloatRegister temp,Register output,Label * fail)1556     void truncateValueToInt32(ValueOperand value, MDefinition* input,
1557                               Label* handleStringEntry, Label* handleStringRejoin,
1558                               Label* truncateDoubleSlow,
1559                               Register stringReg, FloatRegister temp, Register output, Label* fail)
1560     {
1561         convertValueToInt(value, input, handleStringEntry, handleStringRejoin, truncateDoubleSlow,
1562                           stringReg, temp, output, fail, IntConversion_Truncate);
1563     }
truncateValueToInt32(ValueOperand value,MDefinition * input,FloatRegister temp,Register output,Label * fail)1564     void truncateValueToInt32(ValueOperand value, MDefinition* input,
1565                               FloatRegister temp, Register output, Label* fail)
1566     {
1567         convertValueToInt(value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail,
1568                           IntConversion_Truncate);
1569     }
truncateValueToInt32(JSContext * cx,const Value & v,Register output,Label * fail)1570     bool truncateValueToInt32(JSContext* cx, const Value& v, Register output, Label* fail) {
1571         return convertValueToInt(cx, v, output, fail, IntConversion_Truncate);
1572     }
truncateConstantOrRegisterToInt32(JSContext * cx,ConstantOrRegister src,FloatRegister temp,Register output,Label * fail)1573     bool truncateConstantOrRegisterToInt32(JSContext* cx, ConstantOrRegister src, FloatRegister temp,
1574                                            Register output, Label* fail)
1575     {
1576         return convertConstantOrRegisterToInt(cx, src, temp, output, fail, IntConversion_Truncate);
1577     }
truncateTypedOrValueToInt32(TypedOrValueRegister src,FloatRegister temp,Register output,Label * fail)1578     void truncateTypedOrValueToInt32(TypedOrValueRegister src, FloatRegister temp, Register output,
1579                                      Label* fail)
1580     {
1581         convertTypedOrValueToInt(src, temp, output, fail, IntConversion_Truncate);
1582     }
1583 
1584     // Convenience functions for clamping values to uint8.
clampValueToUint8(ValueOperand value,FloatRegister temp,Register output,Label * fail)1585     void clampValueToUint8(ValueOperand value, FloatRegister temp, Register output, Label* fail) {
1586         convertValueToInt(value, temp, output, fail, IntConversion_ClampToUint8);
1587     }
clampValueToUint8(ValueOperand value,MDefinition * input,Label * handleStringEntry,Label * handleStringRejoin,Register stringReg,FloatRegister temp,Register output,Label * fail)1588     void clampValueToUint8(ValueOperand value, MDefinition* input,
1589                            Label* handleStringEntry, Label* handleStringRejoin,
1590                            Register stringReg, FloatRegister temp, Register output, Label* fail)
1591     {
1592         convertValueToInt(value, input, handleStringEntry, handleStringRejoin, nullptr,
1593                           stringReg, temp, output, fail, IntConversion_ClampToUint8);
1594     }
clampValueToUint8(ValueOperand value,MDefinition * input,FloatRegister temp,Register output,Label * fail)1595     void clampValueToUint8(ValueOperand value, MDefinition* input,
1596                            FloatRegister temp, Register output, Label* fail)
1597     {
1598         convertValueToInt(value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail,
1599                           IntConversion_ClampToUint8);
1600     }
clampValueToUint8(JSContext * cx,const Value & v,Register output,Label * fail)1601     bool clampValueToUint8(JSContext* cx, const Value& v, Register output, Label* fail) {
1602         return convertValueToInt(cx, v, output, fail, IntConversion_ClampToUint8);
1603     }
clampConstantOrRegisterToUint8(JSContext * cx,ConstantOrRegister src,FloatRegister temp,Register output,Label * fail)1604     bool clampConstantOrRegisterToUint8(JSContext* cx, ConstantOrRegister src, FloatRegister temp,
1605                                         Register output, Label* fail)
1606     {
1607         return convertConstantOrRegisterToInt(cx, src, temp, output, fail,
1608                                               IntConversion_ClampToUint8);
1609     }
clampTypedOrValueToUint8(TypedOrValueRegister src,FloatRegister temp,Register output,Label * fail)1610     void clampTypedOrValueToUint8(TypedOrValueRegister src, FloatRegister temp, Register output,
1611                                   Label* fail)
1612     {
1613         convertTypedOrValueToInt(src, temp, output, fail, IntConversion_ClampToUint8);
1614     }
1615 
1616   public:
1617     class AfterICSaveLive {
1618         friend class MacroAssembler;
AfterICSaveLive(uint32_t initialStack)1619         explicit AfterICSaveLive(uint32_t initialStack)
1620 #ifdef JS_DEBUG
1621           : initialStack(initialStack)
1622 #endif
1623         {}
1624 
1625       public:
1626 #ifdef JS_DEBUG
1627         uint32_t initialStack;
1628 #endif
1629         uint32_t alignmentPadding;
1630     };
1631 
1632     void alignFrameForICArguments(AfterICSaveLive& aic) PER_ARCH;
1633     void restoreFrameAlignmentForICArguments(AfterICSaveLive& aic) PER_ARCH;
1634 
1635     AfterICSaveLive icSaveLive(LiveRegisterSet& liveRegs);
1636     bool icBuildOOLFakeExitFrame(void* fakeReturnAddr, AfterICSaveLive& aic);
1637     void icRestoreLive(LiveRegisterSet& liveRegs, AfterICSaveLive& aic);
1638 
1639     // Align the stack pointer based on the number of arguments which are pushed
1640     // on the stack, such that the JitFrameLayout would be correctly aligned on
1641     // the JitStackAlignment.
1642     void alignJitStackBasedOnNArgs(Register nargs);
1643     void alignJitStackBasedOnNArgs(uint32_t nargs);
1644 
1645     void assertStackAlignment(uint32_t alignment, int32_t offset = 0) {
1646 #ifdef DEBUG
1647         Label ok, bad;
1648         MOZ_ASSERT(IsPowerOfTwo(alignment));
1649 
1650         // Wrap around the offset to be a non-negative number.
1651         offset %= alignment;
1652         if (offset < 0)
1653             offset += alignment;
1654 
1655         // Test if each bit from offset is set.
1656         uint32_t off = offset;
1657         while (off) {
1658             uint32_t lowestBit = 1 << mozilla::CountTrailingZeroes32(off);
1659             branchTestStackPtr(Assembler::Zero, Imm32(lowestBit), &bad);
1660             off ^= lowestBit;
1661         }
1662 
1663         // Check that all remaining bits are zero.
1664         branchTestStackPtr(Assembler::Zero, Imm32((alignment - 1) ^ offset), &ok);
1665 
1666         bind(&bad);
1667         breakpoint();
1668         bind(&ok);
1669 #endif
1670     }
1671 };
1672 
1673 static inline Assembler::DoubleCondition
JSOpToDoubleCondition(JSOp op)1674 JSOpToDoubleCondition(JSOp op)
1675 {
1676     switch (op) {
1677       case JSOP_EQ:
1678       case JSOP_STRICTEQ:
1679         return Assembler::DoubleEqual;
1680       case JSOP_NE:
1681       case JSOP_STRICTNE:
1682         return Assembler::DoubleNotEqualOrUnordered;
1683       case JSOP_LT:
1684         return Assembler::DoubleLessThan;
1685       case JSOP_LE:
1686         return Assembler::DoubleLessThanOrEqual;
1687       case JSOP_GT:
1688         return Assembler::DoubleGreaterThan;
1689       case JSOP_GE:
1690         return Assembler::DoubleGreaterThanOrEqual;
1691       default:
1692         MOZ_CRASH("Unexpected comparison operation");
1693     }
1694 }
1695 
1696 // Note: the op may have been inverted during lowering (to put constants in a
1697 // position where they can be immediates), so it is important to use the
1698 // lir->jsop() instead of the mir->jsop() when it is present.
1699 static inline Assembler::Condition
JSOpToCondition(JSOp op,bool isSigned)1700 JSOpToCondition(JSOp op, bool isSigned)
1701 {
1702     if (isSigned) {
1703         switch (op) {
1704           case JSOP_EQ:
1705           case JSOP_STRICTEQ:
1706             return Assembler::Equal;
1707           case JSOP_NE:
1708           case JSOP_STRICTNE:
1709             return Assembler::NotEqual;
1710           case JSOP_LT:
1711             return Assembler::LessThan;
1712           case JSOP_LE:
1713             return Assembler::LessThanOrEqual;
1714           case JSOP_GT:
1715             return Assembler::GreaterThan;
1716           case JSOP_GE:
1717             return Assembler::GreaterThanOrEqual;
1718           default:
1719             MOZ_CRASH("Unrecognized comparison operation");
1720         }
1721     } else {
1722         switch (op) {
1723           case JSOP_EQ:
1724           case JSOP_STRICTEQ:
1725             return Assembler::Equal;
1726           case JSOP_NE:
1727           case JSOP_STRICTNE:
1728             return Assembler::NotEqual;
1729           case JSOP_LT:
1730             return Assembler::Below;
1731           case JSOP_LE:
1732             return Assembler::BelowOrEqual;
1733           case JSOP_GT:
1734             return Assembler::Above;
1735           case JSOP_GE:
1736             return Assembler::AboveOrEqual;
1737           default:
1738             MOZ_CRASH("Unrecognized comparison operation");
1739         }
1740     }
1741 }
1742 
1743 static inline size_t
StackDecrementForCall(uint32_t alignment,size_t bytesAlreadyPushed,size_t bytesToPush)1744 StackDecrementForCall(uint32_t alignment, size_t bytesAlreadyPushed, size_t bytesToPush)
1745 {
1746     return bytesToPush +
1747            ComputeByteAlignment(bytesAlreadyPushed + bytesToPush, alignment);
1748 }
1749 
1750 static inline MIRType
ToMIRType(MIRType t)1751 ToMIRType(MIRType t)
1752 {
1753     return t;
1754 }
1755 
1756 template <class VecT>
1757 class ABIArgIter
1758 {
1759     ABIArgGenerator gen_;
1760     const VecT& types_;
1761     unsigned i_;
1762 
settle()1763     void settle() { if (!done()) gen_.next(ToMIRType(types_[i_])); }
1764 
1765   public:
ABIArgIter(const VecT & types)1766     explicit ABIArgIter(const VecT& types) : types_(types), i_(0) { settle(); }
1767     void operator++(int) { MOZ_ASSERT(!done()); i_++; settle(); }
done()1768     bool done() const { return i_ == types_.length(); }
1769 
1770     ABIArg* operator->() { MOZ_ASSERT(!done()); return &gen_.current(); }
1771     ABIArg& operator*() { MOZ_ASSERT(!done()); return gen_.current(); }
1772 
index()1773     unsigned index() const { MOZ_ASSERT(!done()); return i_; }
mirType()1774     MIRType mirType() const { MOZ_ASSERT(!done()); return ToMIRType(types_[i_]); }
stackBytesConsumedSoFar()1775     uint32_t stackBytesConsumedSoFar() const { return gen_.stackBytesConsumedSoFar(); }
1776 };
1777 
1778 } // namespace jit
1779 } // namespace js
1780 
1781 #endif /* jit_MacroAssembler_h */
1782