1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/CodeGenerator.h"
8 
9 #include "mozilla/Assertions.h"
10 #include "mozilla/Casting.h"
11 #include "mozilla/DebugOnly.h"
12 #include "mozilla/EndianUtils.h"
13 #include "mozilla/EnumeratedArray.h"
14 #include "mozilla/EnumeratedRange.h"
15 #include "mozilla/IntegerTypeTraits.h"
16 #include "mozilla/MathAlgorithms.h"
17 #include "mozilla/ScopeExit.h"
18 #include "mozilla/Tuple.h"
19 
20 #include <limits>
21 #include <type_traits>
22 #include <utility>
23 
24 #include "jslibmath.h"
25 #include "jsmath.h"
26 #include "jsnum.h"
27 
28 #include "builtin/Eval.h"
29 #include "builtin/MapObject.h"
30 #include "builtin/RegExp.h"
31 #include "builtin/SelfHostingDefines.h"
32 #include "builtin/String.h"
33 #include "gc/Nursery.h"
34 #include "irregexp/RegExpTypes.h"
35 #include "jit/BaselineCodeGen.h"
36 #include "jit/CompileInfo.h"
37 #include "jit/InlineScriptTree.h"
38 #include "jit/Invalidation.h"
39 #include "jit/IonIC.h"
40 #include "jit/IonScript.h"
41 #include "jit/JitcodeMap.h"
42 #include "jit/JitFrames.h"
43 #include "jit/JitRealm.h"
44 #include "jit/JitRuntime.h"
45 #include "jit/JitSpewer.h"
46 #include "jit/JitZone.h"
47 #include "jit/Linker.h"
48 #include "jit/Lowering.h"
49 #include "jit/MIRGenerator.h"
50 #include "jit/MoveEmitter.h"
51 #include "jit/RangeAnalysis.h"
52 #include "jit/SafepointIndex.h"
53 #include "jit/SharedICHelpers.h"
54 #include "jit/StackSlotAllocator.h"
55 #include "jit/VMFunctions.h"
56 #include "jit/WarpSnapshot.h"
57 #include "js/experimental/JitInfo.h"  // JSJit{Getter,Setter}CallArgs, JSJitMethodCallArgsTraits, JSJitInfo
58 #include "js/friend/DOMProxy.h"  // JS::ExpandoAndGeneration
59 #include "js/RegExpFlags.h"      // JS::RegExpFlag
60 #include "js/ScalarType.h"       // js::Scalar::Type
61 #include "proxy/DOMProxy.h"
62 #include "util/CheckedArithmetic.h"
63 #include "util/Unicode.h"
64 #include "vm/ArrayBufferViewObject.h"
65 #include "vm/AsyncFunction.h"
66 #include "vm/AsyncIteration.h"
67 #include "vm/BuiltinObjectKind.h"
68 #include "vm/FunctionFlags.h"  // js::FunctionFlags
69 #include "vm/MatchPairs.h"
70 #include "vm/PlainObject.h"  // js::PlainObject
71 #include "vm/RegExpObject.h"
72 #include "vm/RegExpStatics.h"
73 #include "vm/StringObject.h"
74 #include "vm/StringType.h"
75 #include "vm/TraceLogging.h"
76 #include "vm/TypedArrayObject.h"
77 #ifdef MOZ_VTUNE
78 #  include "vtune/VTuneWrapper.h"
79 #endif
80 #include "wasm/WasmGC.h"
81 #include "wasm/WasmStubs.h"
82 
83 #include "builtin/Boolean-inl.h"
84 #include "jit/ABIFunctionList-inl.h"
85 #include "jit/MacroAssembler-inl.h"
86 #include "jit/shared/CodeGenerator-shared-inl.h"
87 #include "jit/shared/Lowering-shared-inl.h"
88 #include "jit/TemplateObject-inl.h"
89 #include "jit/VMFunctionList-inl.h"
90 #include "vm/Interpreter-inl.h"
91 #include "vm/JSScript-inl.h"
92 
93 using namespace js;
94 using namespace js::jit;
95 
96 using JS::GenericNaN;
97 using mozilla::AssertedCast;
98 using mozilla::DebugOnly;
99 using mozilla::FloatingPoint;
100 using mozilla::Maybe;
101 using mozilla::NegativeInfinity;
102 using mozilla::PositiveInfinity;
103 
104 using JS::ExpandoAndGeneration;
105 
106 namespace js {
107 namespace jit {
108 
109 #ifdef CHECK_OSIPOINT_REGISTERS
110 template <class Op>
HandleRegisterDump(Op op,MacroAssembler & masm,LiveRegisterSet liveRegs,Register activation,Register scratch)111 static void HandleRegisterDump(Op op, MacroAssembler& masm,
112                                LiveRegisterSet liveRegs, Register activation,
113                                Register scratch) {
114   const size_t baseOffset = JitActivation::offsetOfRegs();
115 
116   // Handle live GPRs.
117   for (GeneralRegisterIterator iter(liveRegs.gprs()); iter.more(); ++iter) {
118     Register reg = *iter;
119     Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
120 
121     if (reg == activation) {
122       // To use the original value of the activation register (that's
123       // now on top of the stack), we need the scratch register.
124       masm.push(scratch);
125       masm.loadPtr(Address(masm.getStackPointer(), sizeof(uintptr_t)), scratch);
126       op(scratch, dump);
127       masm.pop(scratch);
128     } else {
129       op(reg, dump);
130     }
131   }
132 
133   // Handle live FPRs.
134   for (FloatRegisterIterator iter(liveRegs.fpus()); iter.more(); ++iter) {
135     FloatRegister reg = *iter;
136     Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
137     op(reg, dump);
138   }
139 }
140 
141 class StoreOp {
142   MacroAssembler& masm;
143 
144  public:
StoreOp(MacroAssembler & masm)145   explicit StoreOp(MacroAssembler& masm) : masm(masm) {}
146 
operator ()(Register reg,Address dump)147   void operator()(Register reg, Address dump) { masm.storePtr(reg, dump); }
operator ()(FloatRegister reg,Address dump)148   void operator()(FloatRegister reg, Address dump) {
149     if (reg.isDouble()) {
150       masm.storeDouble(reg, dump);
151     } else if (reg.isSingle()) {
152       masm.storeFloat32(reg, dump);
153     } else if (reg.isSimd128()) {
154       MOZ_CRASH("Unexpected case for SIMD");
155     } else {
156       MOZ_CRASH("Unexpected register type.");
157     }
158   }
159 };
160 
161 class VerifyOp {
162   MacroAssembler& masm;
163   Label* failure_;
164 
165  public:
VerifyOp(MacroAssembler & masm,Label * failure)166   VerifyOp(MacroAssembler& masm, Label* failure)
167       : masm(masm), failure_(failure) {}
168 
operator ()(Register reg,Address dump)169   void operator()(Register reg, Address dump) {
170     masm.branchPtr(Assembler::NotEqual, dump, reg, failure_);
171   }
operator ()(FloatRegister reg,Address dump)172   void operator()(FloatRegister reg, Address dump) {
173     if (reg.isDouble()) {
174       ScratchDoubleScope scratch(masm);
175       masm.loadDouble(dump, scratch);
176       masm.branchDouble(Assembler::DoubleNotEqual, scratch, reg, failure_);
177     } else if (reg.isSingle()) {
178       ScratchFloat32Scope scratch(masm);
179       masm.loadFloat32(dump, scratch);
180       masm.branchFloat(Assembler::DoubleNotEqual, scratch, reg, failure_);
181     } else if (reg.isSimd128()) {
182       MOZ_CRASH("Unexpected case for SIMD");
183     } else {
184       MOZ_CRASH("Unexpected register type.");
185     }
186   }
187 };
188 
verifyOsiPointRegs(LSafepoint * safepoint)189 void CodeGenerator::verifyOsiPointRegs(LSafepoint* safepoint) {
190   // Ensure the live registers stored by callVM did not change between
191   // the call and this OsiPoint. Try-catch relies on this invariant.
192 
193   // Load pointer to the JitActivation in a scratch register.
194   AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
195   Register scratch = allRegs.takeAny();
196   masm.push(scratch);
197   masm.loadJitActivation(scratch);
198 
199   // If we should not check registers (because the instruction did not call
200   // into the VM, or a GC happened), we're done.
201   Label failure, done;
202   Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
203   masm.branch32(Assembler::Equal, checkRegs, Imm32(0), &done);
204 
205   // Having more than one VM function call made in one visit function at
206   // runtime is a sec-ciritcal error, because if we conservatively assume that
207   // one of the function call can re-enter Ion, then the invalidation process
208   // will potentially add a call at a random location, by patching the code
209   // before the return address.
210   masm.branch32(Assembler::NotEqual, checkRegs, Imm32(1), &failure);
211 
212   // Set checkRegs to 0, so that we don't try to verify registers after we
213   // return from this script to the caller.
214   masm.store32(Imm32(0), checkRegs);
215 
216   // Ignore clobbered registers. Some instructions (like LValueToInt32) modify
217   // temps after calling into the VM. This is fine because no other
218   // instructions (including this OsiPoint) will depend on them. Also
219   // backtracking can also use the same register for an input and an output.
220   // These are marked as clobbered and shouldn't get checked.
221   LiveRegisterSet liveRegs;
222   liveRegs.set() = RegisterSet::Intersect(
223       safepoint->liveRegs().set(),
224       RegisterSet::Not(safepoint->clobberedRegs().set()));
225 
226   VerifyOp op(masm, &failure);
227   HandleRegisterDump<VerifyOp>(op, masm, liveRegs, scratch, allRegs.getAny());
228 
229   masm.jump(&done);
230 
231   // Do not profile the callWithABI that occurs below.  This is to avoid a
232   // rare corner case that occurs when profiling interacts with itself:
233   //
234   // When slow profiling assertions are turned on, FunctionBoundary ops
235   // (which update the profiler pseudo-stack) may emit a callVM, which
236   // forces them to have an osi point associated with them.  The
237   // FunctionBoundary for inline function entry is added to the caller's
238   // graph with a PC from the caller's code, but during codegen it modifies
239   // Gecko Profiler instrumentation to add the callee as the current top-most
240   // script. When codegen gets to the OSIPoint, and the callWithABI below is
241   // emitted, the codegen thinks that the current frame is the callee, but
242   // the PC it's using from the OSIPoint refers to the caller.  This causes
243   // the profiler instrumentation of the callWithABI below to ASSERT, since
244   // the script and pc are mismatched.  To avoid this, we simply omit
245   // instrumentation for these callWithABIs.
246 
247   // Any live register captured by a safepoint (other than temp registers)
248   // must remain unchanged between the call and the OsiPoint instruction.
249   masm.bind(&failure);
250   masm.assumeUnreachable("Modified registers between VM call and OsiPoint");
251 
252   masm.bind(&done);
253   masm.pop(scratch);
254 }
255 
shouldVerifyOsiPointRegs(LSafepoint * safepoint)256 bool CodeGenerator::shouldVerifyOsiPointRegs(LSafepoint* safepoint) {
257   if (!checkOsiPointRegisters) {
258     return false;
259   }
260 
261   if (safepoint->liveRegs().emptyGeneral() &&
262       safepoint->liveRegs().emptyFloat()) {
263     return false;  // No registers to check.
264   }
265 
266   return true;
267 }
268 
resetOsiPointRegs(LSafepoint * safepoint)269 void CodeGenerator::resetOsiPointRegs(LSafepoint* safepoint) {
270   if (!shouldVerifyOsiPointRegs(safepoint)) {
271     return;
272   }
273 
274   // Set checkRegs to 0. If we perform a VM call, the instruction
275   // will set it to 1.
276   AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
277   Register scratch = allRegs.takeAny();
278   masm.push(scratch);
279   masm.loadJitActivation(scratch);
280   Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
281   masm.store32(Imm32(0), checkRegs);
282   masm.pop(scratch);
283 }
284 
StoreAllLiveRegs(MacroAssembler & masm,LiveRegisterSet liveRegs)285 static void StoreAllLiveRegs(MacroAssembler& masm, LiveRegisterSet liveRegs) {
286   // Store a copy of all live registers before performing the call.
287   // When we reach the OsiPoint, we can use this to check nothing
288   // modified them in the meantime.
289 
290   // Load pointer to the JitActivation in a scratch register.
291   AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
292   Register scratch = allRegs.takeAny();
293   masm.push(scratch);
294   masm.loadJitActivation(scratch);
295 
296   Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
297   masm.add32(Imm32(1), checkRegs);
298 
299   StoreOp op(masm);
300   HandleRegisterDump<StoreOp>(op, masm, liveRegs, scratch, allRegs.getAny());
301 
302   masm.pop(scratch);
303 }
304 #endif  // CHECK_OSIPOINT_REGISTERS
305 
306 // Before doing any call to Cpp, you should ensure that volatile
307 // registers are evicted by the register allocator.
callVMInternal(VMFunctionId id,LInstruction * ins,const Register * dynStack)308 void CodeGenerator::callVMInternal(VMFunctionId id, LInstruction* ins,
309                                    const Register* dynStack) {
310   TrampolinePtr code = gen->jitRuntime()->getVMWrapper(id);
311   const VMFunctionData& fun = GetVMFunction(id);
312 
313   // Stack is:
314   //    ... frame ...
315   //    [args]
316 #ifdef DEBUG
317   MOZ_ASSERT(pushedArgs_ == fun.explicitArgs);
318   pushedArgs_ = 0;
319 #endif
320 
321 #ifdef CHECK_OSIPOINT_REGISTERS
322   if (shouldVerifyOsiPointRegs(ins->safepoint())) {
323     StoreAllLiveRegs(masm, ins->safepoint()->liveRegs());
324   }
325 #endif
326 
327 #ifdef DEBUG
328   if (ins->mirRaw()) {
329     MOZ_ASSERT(ins->mirRaw()->isInstruction());
330     MInstruction* mir = ins->mirRaw()->toInstruction();
331     MOZ_ASSERT_IF(mir->needsResumePoint(), mir->resumePoint());
332 
333     // If this MIR instruction has an overridden AliasSet, set the JitRuntime's
334     // disallowArbitraryCode_ flag so we can assert this VMFunction doesn't call
335     // RunScript. Whitelist MInterruptCheck and MCheckOverRecursed because
336     // interrupt callbacks can call JS (chrome JS or shell testing functions).
337     bool isWhitelisted = mir->isInterruptCheck() || mir->isCheckOverRecursed();
338     if (!mir->hasDefaultAliasSet() && !isWhitelisted) {
339       const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
340       masm.move32(Imm32(1), ReturnReg);
341       masm.store32(ReturnReg, AbsoluteAddress(addr));
342     }
343   }
344 #endif
345 
346   // Push an exit frame descriptor. If |dynStack| is a valid pointer to a
347   // register, then its value is added to the value of the |framePushed()| to
348   // fill the frame descriptor.
349   if (dynStack) {
350     masm.addPtr(Imm32(masm.framePushed()), *dynStack);
351     masm.makeFrameDescriptor(*dynStack, FrameType::IonJS,
352                              ExitFrameLayout::Size());
353     masm.Push(*dynStack);  // descriptor
354   } else {
355     masm.pushStaticFrameDescriptor(FrameType::IonJS, ExitFrameLayout::Size());
356   }
357 
358   // Call the wrapper function.  The wrapper is in charge to unwind the stack
359   // when returning from the call.  Failures are handled with exceptions based
360   // on the return value of the C functions.  To guard the outcome of the
361   // returned value, use another LIR instruction.
362   uint32_t callOffset = masm.callJit(code);
363   markSafepointAt(callOffset, ins);
364 
365 #ifdef DEBUG
366   // Reset the disallowArbitraryCode flag after the call.
367   {
368     const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
369     masm.push(ReturnReg);
370     masm.move32(Imm32(0), ReturnReg);
371     masm.store32(ReturnReg, AbsoluteAddress(addr));
372     masm.pop(ReturnReg);
373   }
374 #endif
375 
376   // Remove rest of the frame left on the stack. We remove the return address
377   // which is implicitly poped when returning.
378   int framePop = sizeof(ExitFrameLayout) - sizeof(void*);
379 
380   // Pop arguments from framePushed.
381   masm.implicitPop(fun.explicitStackSlots() * sizeof(void*) + framePop);
382   // Stack is:
383   //    ... frame ...
384 }
385 
386 template <typename Fn, Fn fn>
callVM(LInstruction * ins,const Register * dynStack)387 void CodeGenerator::callVM(LInstruction* ins, const Register* dynStack) {
388   VMFunctionId id = VMFunctionToId<Fn, fn>::id;
389   callVMInternal(id, ins, dynStack);
390 }
391 
392 // ArgSeq store arguments for OutOfLineCallVM.
393 //
394 // OutOfLineCallVM are created with "oolCallVM" function. The third argument of
395 // this function is an instance of a class which provides a "generate" in charge
396 // of pushing the argument, with "pushArg", for a VMFunction.
397 //
398 // Such list of arguments can be created by using the "ArgList" function which
399 // creates one instance of "ArgSeq", where the type of the arguments are
400 // inferred from the type of the arguments.
401 //
402 // The list of arguments must be written in the same order as if you were
403 // calling the function in C++.
404 //
405 // Example:
406 //   ArgList(ToRegister(lir->lhs()), ToRegister(lir->rhs()))
407 
408 template <typename... ArgTypes>
409 class ArgSeq {
410   mozilla::Tuple<std::remove_reference_t<ArgTypes>...> args_;
411 
412   template <std::size_t... ISeq>
generate(CodeGenerator * codegen,std::index_sequence<ISeq...>) const413   inline void generate(CodeGenerator* codegen,
414                        std::index_sequence<ISeq...>) const {
415     // Arguments are pushed in reverse order, from last argument to first
416     // argument.
417     (codegen->pushArg(mozilla::Get<sizeof...(ISeq) - 1 - ISeq>(args_)), ...);
418   }
419 
420  public:
ArgSeq(ArgTypes &&...args)421   explicit ArgSeq(ArgTypes&&... args)
422       : args_(std::forward<ArgTypes>(args)...) {}
423 
generate(CodeGenerator * codegen) const424   inline void generate(CodeGenerator* codegen) const {
425     generate(codegen, std::index_sequence_for<ArgTypes...>{});
426   }
427 
428 #ifdef DEBUG
429   static constexpr size_t numArgs = sizeof...(ArgTypes);
430 #endif
431 };
432 
433 template <typename... ArgTypes>
ArgList(ArgTypes &&...args)434 inline ArgSeq<ArgTypes...> ArgList(ArgTypes&&... args) {
435   return ArgSeq<ArgTypes...>(std::forward<ArgTypes>(args)...);
436 }
437 
438 // Store wrappers, to generate the right move of data after the VM call.
439 
440 struct StoreNothing {
generatejs::jit::StoreNothing441   inline void generate(CodeGenerator* codegen) const {}
clobberedjs::jit::StoreNothing442   inline LiveRegisterSet clobbered() const {
443     return LiveRegisterSet();  // No register gets clobbered
444   }
445 };
446 
447 class StoreRegisterTo {
448  private:
449   Register out_;
450 
451  public:
StoreRegisterTo(Register out)452   explicit StoreRegisterTo(Register out) : out_(out) {}
453 
generate(CodeGenerator * codegen) const454   inline void generate(CodeGenerator* codegen) const {
455     // It's okay to use storePointerResultTo here - the VMFunction wrapper
456     // ensures the upper bytes are zero for bool/int32 return values.
457     codegen->storePointerResultTo(out_);
458   }
clobbered() const459   inline LiveRegisterSet clobbered() const {
460     LiveRegisterSet set;
461     set.add(out_);
462     return set;
463   }
464 };
465 
466 class StoreFloatRegisterTo {
467  private:
468   FloatRegister out_;
469 
470  public:
StoreFloatRegisterTo(FloatRegister out)471   explicit StoreFloatRegisterTo(FloatRegister out) : out_(out) {}
472 
generate(CodeGenerator * codegen) const473   inline void generate(CodeGenerator* codegen) const {
474     codegen->storeFloatResultTo(out_);
475   }
clobbered() const476   inline LiveRegisterSet clobbered() const {
477     LiveRegisterSet set;
478     set.add(out_);
479     return set;
480   }
481 };
482 
483 template <typename Output>
484 class StoreValueTo_ {
485  private:
486   Output out_;
487 
488  public:
StoreValueTo_(const Output & out)489   explicit StoreValueTo_(const Output& out) : out_(out) {}
490 
generate(CodeGenerator * codegen) const491   inline void generate(CodeGenerator* codegen) const {
492     codegen->storeResultValueTo(out_);
493   }
clobbered() const494   inline LiveRegisterSet clobbered() const {
495     LiveRegisterSet set;
496     set.add(out_);
497     return set;
498   }
499 };
500 
501 template <typename Output>
StoreValueTo(const Output & out)502 StoreValueTo_<Output> StoreValueTo(const Output& out) {
503   return StoreValueTo_<Output>(out);
504 }
505 
506 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
507 class OutOfLineCallVM : public OutOfLineCodeBase<CodeGenerator> {
508  private:
509   LInstruction* lir_;
510   ArgSeq args_;
511   StoreOutputTo out_;
512 
513  public:
OutOfLineCallVM(LInstruction * lir,const ArgSeq & args,const StoreOutputTo & out)514   OutOfLineCallVM(LInstruction* lir, const ArgSeq& args,
515                   const StoreOutputTo& out)
516       : lir_(lir), args_(args), out_(out) {}
517 
accept(CodeGenerator * codegen)518   void accept(CodeGenerator* codegen) override {
519     codegen->visitOutOfLineCallVM(this);
520   }
521 
lir() const522   LInstruction* lir() const { return lir_; }
args() const523   const ArgSeq& args() const { return args_; }
out() const524   const StoreOutputTo& out() const { return out_; }
525 };
526 
527 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
oolCallVM(LInstruction * lir,const ArgSeq & args,const StoreOutputTo & out)528 OutOfLineCode* CodeGenerator::oolCallVM(LInstruction* lir, const ArgSeq& args,
529                                         const StoreOutputTo& out) {
530   MOZ_ASSERT(lir->mirRaw());
531   MOZ_ASSERT(lir->mirRaw()->isInstruction());
532 
533 #ifdef DEBUG
534   VMFunctionId id = VMFunctionToId<Fn, fn>::id;
535   const VMFunctionData& fun = GetVMFunction(id);
536   MOZ_ASSERT(fun.explicitArgs == args.numArgs);
537   MOZ_ASSERT(fun.returnsData() !=
538              (std::is_same_v<StoreOutputTo, StoreNothing>));
539 #endif
540 
541   OutOfLineCode* ool = new (alloc())
542       OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>(lir, args, out);
543   addOutOfLineCode(ool, lir->mirRaw()->toInstruction());
544   return ool;
545 }
546 
547 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
visitOutOfLineCallVM(OutOfLineCallVM<Fn,fn,ArgSeq,StoreOutputTo> * ool)548 void CodeGenerator::visitOutOfLineCallVM(
549     OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>* ool) {
550   LInstruction* lir = ool->lir();
551 
552   saveLive(lir);
553   ool->args().generate(this);
554   callVM<Fn, fn>(lir);
555   ool->out().generate(this);
556   restoreLiveIgnore(lir, ool->out().clobbered());
557   masm.jump(ool->rejoin());
558 }
559 
560 class OutOfLineICFallback : public OutOfLineCodeBase<CodeGenerator> {
561  private:
562   LInstruction* lir_;
563   size_t cacheIndex_;
564   size_t cacheInfoIndex_;
565 
566  public:
OutOfLineICFallback(LInstruction * lir,size_t cacheIndex,size_t cacheInfoIndex)567   OutOfLineICFallback(LInstruction* lir, size_t cacheIndex,
568                       size_t cacheInfoIndex)
569       : lir_(lir), cacheIndex_(cacheIndex), cacheInfoIndex_(cacheInfoIndex) {}
570 
bind(MacroAssembler * masm)571   void bind(MacroAssembler* masm) override {
572     // The binding of the initial jump is done in
573     // CodeGenerator::visitOutOfLineICFallback.
574   }
575 
cacheIndex() const576   size_t cacheIndex() const { return cacheIndex_; }
cacheInfoIndex() const577   size_t cacheInfoIndex() const { return cacheInfoIndex_; }
lir() const578   LInstruction* lir() const { return lir_; }
579 
accept(CodeGenerator * codegen)580   void accept(CodeGenerator* codegen) override {
581     codegen->visitOutOfLineICFallback(this);
582   }
583 };
584 
addIC(LInstruction * lir,size_t cacheIndex)585 void CodeGeneratorShared::addIC(LInstruction* lir, size_t cacheIndex) {
586   if (cacheIndex == SIZE_MAX) {
587     masm.setOOM();
588     return;
589   }
590 
591   DataPtr<IonIC> cache(this, cacheIndex);
592   MInstruction* mir = lir->mirRaw()->toInstruction();
593   cache->setScriptedLocation(mir->block()->info().script(),
594                              mir->resumePoint()->pc());
595 
596   Register temp = cache->scratchRegisterForEntryJump();
597   icInfo_.back().icOffsetForJump = masm.movWithPatch(ImmWord(-1), temp);
598   masm.jump(Address(temp, 0));
599 
600   MOZ_ASSERT(!icInfo_.empty());
601 
602   OutOfLineICFallback* ool =
603       new (alloc()) OutOfLineICFallback(lir, cacheIndex, icInfo_.length() - 1);
604   addOutOfLineCode(ool, mir);
605 
606   masm.bind(ool->rejoin());
607   cache->setRejoinOffset(CodeOffset(ool->rejoin()->offset()));
608 }
609 
visitOutOfLineICFallback(OutOfLineICFallback * ool)610 void CodeGenerator::visitOutOfLineICFallback(OutOfLineICFallback* ool) {
611   LInstruction* lir = ool->lir();
612   size_t cacheIndex = ool->cacheIndex();
613   size_t cacheInfoIndex = ool->cacheInfoIndex();
614 
615   DataPtr<IonIC> ic(this, cacheIndex);
616 
617   // Register the location of the OOL path in the IC.
618   ic->setFallbackOffset(CodeOffset(masm.currentOffset()));
619 
620   switch (ic->kind()) {
621     case CacheKind::GetProp:
622     case CacheKind::GetElem: {
623       IonGetPropertyIC* getPropIC = ic->asGetPropertyIC();
624 
625       saveLive(lir);
626 
627       pushArg(getPropIC->id());
628       pushArg(getPropIC->value());
629       icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
630       pushArg(ImmGCPtr(gen->outerInfo().script()));
631 
632       using Fn = bool (*)(JSContext*, HandleScript, IonGetPropertyIC*,
633                           HandleValue, HandleValue, MutableHandleValue);
634       callVM<Fn, IonGetPropertyIC::update>(lir);
635 
636       StoreValueTo(getPropIC->output()).generate(this);
637       restoreLiveIgnore(lir, StoreValueTo(getPropIC->output()).clobbered());
638 
639       masm.jump(ool->rejoin());
640       return;
641     }
642     case CacheKind::GetPropSuper:
643     case CacheKind::GetElemSuper: {
644       IonGetPropSuperIC* getPropSuperIC = ic->asGetPropSuperIC();
645 
646       saveLive(lir);
647 
648       pushArg(getPropSuperIC->id());
649       pushArg(getPropSuperIC->receiver());
650       pushArg(getPropSuperIC->object());
651       icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
652       pushArg(ImmGCPtr(gen->outerInfo().script()));
653 
654       using Fn =
655           bool (*)(JSContext*, HandleScript, IonGetPropSuperIC*, HandleObject,
656                    HandleValue, HandleValue, MutableHandleValue);
657       callVM<Fn, IonGetPropSuperIC::update>(lir);
658 
659       StoreValueTo(getPropSuperIC->output()).generate(this);
660       restoreLiveIgnore(lir,
661                         StoreValueTo(getPropSuperIC->output()).clobbered());
662 
663       masm.jump(ool->rejoin());
664       return;
665     }
666     case CacheKind::SetProp:
667     case CacheKind::SetElem: {
668       IonSetPropertyIC* setPropIC = ic->asSetPropertyIC();
669 
670       saveLive(lir);
671 
672       pushArg(setPropIC->rhs());
673       pushArg(setPropIC->id());
674       pushArg(setPropIC->object());
675       icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
676       pushArg(ImmGCPtr(gen->outerInfo().script()));
677 
678       using Fn = bool (*)(JSContext*, HandleScript, IonSetPropertyIC*,
679                           HandleObject, HandleValue, HandleValue);
680       callVM<Fn, IonSetPropertyIC::update>(lir);
681 
682       restoreLive(lir);
683 
684       masm.jump(ool->rejoin());
685       return;
686     }
687     case CacheKind::GetName: {
688       IonGetNameIC* getNameIC = ic->asGetNameIC();
689 
690       saveLive(lir);
691 
692       pushArg(getNameIC->environment());
693       icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
694       pushArg(ImmGCPtr(gen->outerInfo().script()));
695 
696       using Fn = bool (*)(JSContext*, HandleScript, IonGetNameIC*, HandleObject,
697                           MutableHandleValue);
698       callVM<Fn, IonGetNameIC::update>(lir);
699 
700       StoreValueTo(getNameIC->output()).generate(this);
701       restoreLiveIgnore(lir, StoreValueTo(getNameIC->output()).clobbered());
702 
703       masm.jump(ool->rejoin());
704       return;
705     }
706     case CacheKind::BindName: {
707       IonBindNameIC* bindNameIC = ic->asBindNameIC();
708 
709       saveLive(lir);
710 
711       pushArg(bindNameIC->environment());
712       icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
713       pushArg(ImmGCPtr(gen->outerInfo().script()));
714 
715       using Fn =
716           JSObject* (*)(JSContext*, HandleScript, IonBindNameIC*, HandleObject);
717       callVM<Fn, IonBindNameIC::update>(lir);
718 
719       StoreRegisterTo(bindNameIC->output()).generate(this);
720       restoreLiveIgnore(lir, StoreRegisterTo(bindNameIC->output()).clobbered());
721 
722       masm.jump(ool->rejoin());
723       return;
724     }
725     case CacheKind::GetIterator: {
726       IonGetIteratorIC* getIteratorIC = ic->asGetIteratorIC();
727 
728       saveLive(lir);
729 
730       pushArg(getIteratorIC->value());
731       icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
732       pushArg(ImmGCPtr(gen->outerInfo().script()));
733 
734       using Fn = JSObject* (*)(JSContext*, HandleScript, IonGetIteratorIC*,
735                                HandleValue);
736       callVM<Fn, IonGetIteratorIC::update>(lir);
737 
738       StoreRegisterTo(getIteratorIC->output()).generate(this);
739       restoreLiveIgnore(lir,
740                         StoreRegisterTo(getIteratorIC->output()).clobbered());
741 
742       masm.jump(ool->rejoin());
743       return;
744     }
745     case CacheKind::OptimizeSpreadCall: {
746       auto* optimizeSpreadCallIC = ic->asOptimizeSpreadCallIC();
747 
748       saveLive(lir);
749 
750       pushArg(optimizeSpreadCallIC->value());
751       icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
752       pushArg(ImmGCPtr(gen->outerInfo().script()));
753 
754       using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeSpreadCallIC*,
755                           HandleValue, bool*);
756       callVM<Fn, IonOptimizeSpreadCallIC::update>(lir);
757 
758       StoreRegisterTo(optimizeSpreadCallIC->output()).generate(this);
759       restoreLiveIgnore(
760           lir, StoreRegisterTo(optimizeSpreadCallIC->output()).clobbered());
761 
762       masm.jump(ool->rejoin());
763       return;
764     }
765     case CacheKind::In: {
766       IonInIC* inIC = ic->asInIC();
767 
768       saveLive(lir);
769 
770       pushArg(inIC->object());
771       pushArg(inIC->key());
772       icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
773       pushArg(ImmGCPtr(gen->outerInfo().script()));
774 
775       using Fn = bool (*)(JSContext*, HandleScript, IonInIC*, HandleValue,
776                           HandleObject, bool*);
777       callVM<Fn, IonInIC::update>(lir);
778 
779       StoreRegisterTo(inIC->output()).generate(this);
780       restoreLiveIgnore(lir, StoreRegisterTo(inIC->output()).clobbered());
781 
782       masm.jump(ool->rejoin());
783       return;
784     }
785     case CacheKind::HasOwn: {
786       IonHasOwnIC* hasOwnIC = ic->asHasOwnIC();
787 
788       saveLive(lir);
789 
790       pushArg(hasOwnIC->id());
791       pushArg(hasOwnIC->value());
792       icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
793       pushArg(ImmGCPtr(gen->outerInfo().script()));
794 
795       using Fn = bool (*)(JSContext*, HandleScript, IonHasOwnIC*, HandleValue,
796                           HandleValue, int32_t*);
797       callVM<Fn, IonHasOwnIC::update>(lir);
798 
799       StoreRegisterTo(hasOwnIC->output()).generate(this);
800       restoreLiveIgnore(lir, StoreRegisterTo(hasOwnIC->output()).clobbered());
801 
802       masm.jump(ool->rejoin());
803       return;
804     }
805     case CacheKind::CheckPrivateField: {
806       IonCheckPrivateFieldIC* checkPrivateFieldIC = ic->asCheckPrivateFieldIC();
807 
808       saveLive(lir);
809 
810       pushArg(checkPrivateFieldIC->id());
811       pushArg(checkPrivateFieldIC->value());
812 
813       icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
814       pushArg(ImmGCPtr(gen->outerInfo().script()));
815 
816       using Fn = bool (*)(JSContext*, HandleScript, IonCheckPrivateFieldIC*,
817                           HandleValue, HandleValue, bool*);
818       callVM<Fn, IonCheckPrivateFieldIC::update>(lir);
819 
820       StoreRegisterTo(checkPrivateFieldIC->output()).generate(this);
821       restoreLiveIgnore(
822           lir, StoreRegisterTo(checkPrivateFieldIC->output()).clobbered());
823 
824       masm.jump(ool->rejoin());
825       return;
826     }
827     case CacheKind::InstanceOf: {
828       IonInstanceOfIC* hasInstanceOfIC = ic->asInstanceOfIC();
829 
830       saveLive(lir);
831 
832       pushArg(hasInstanceOfIC->rhs());
833       pushArg(hasInstanceOfIC->lhs());
834       icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
835       pushArg(ImmGCPtr(gen->outerInfo().script()));
836 
837       using Fn = bool (*)(JSContext*, HandleScript, IonInstanceOfIC*,
838                           HandleValue lhs, HandleObject rhs, bool* res);
839       callVM<Fn, IonInstanceOfIC::update>(lir);
840 
841       StoreRegisterTo(hasInstanceOfIC->output()).generate(this);
842       restoreLiveIgnore(lir,
843                         StoreRegisterTo(hasInstanceOfIC->output()).clobbered());
844 
845       masm.jump(ool->rejoin());
846       return;
847     }
848     case CacheKind::UnaryArith: {
849       IonUnaryArithIC* unaryArithIC = ic->asUnaryArithIC();
850 
851       saveLive(lir);
852 
853       pushArg(unaryArithIC->input());
854       icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
855       pushArg(ImmGCPtr(gen->outerInfo().script()));
856 
857       using Fn = bool (*)(JSContext * cx, HandleScript outerScript,
858                           IonUnaryArithIC * stub, HandleValue val,
859                           MutableHandleValue res);
860       callVM<Fn, IonUnaryArithIC::update>(lir);
861 
862       StoreValueTo(unaryArithIC->output()).generate(this);
863       restoreLiveIgnore(lir, StoreValueTo(unaryArithIC->output()).clobbered());
864 
865       masm.jump(ool->rejoin());
866       return;
867     }
868     case CacheKind::ToPropertyKey: {
869       IonToPropertyKeyIC* toPropertyKeyIC = ic->asToPropertyKeyIC();
870 
871       saveLive(lir);
872 
873       pushArg(toPropertyKeyIC->input());
874       icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
875       pushArg(ImmGCPtr(gen->outerInfo().script()));
876 
877       using Fn = bool (*)(JSContext * cx, HandleScript outerScript,
878                           IonToPropertyKeyIC * ic, HandleValue val,
879                           MutableHandleValue res);
880       callVM<Fn, IonToPropertyKeyIC::update>(lir);
881 
882       StoreValueTo(toPropertyKeyIC->output()).generate(this);
883       restoreLiveIgnore(lir,
884                         StoreValueTo(toPropertyKeyIC->output()).clobbered());
885 
886       masm.jump(ool->rejoin());
887       return;
888     }
889     case CacheKind::BinaryArith: {
890       IonBinaryArithIC* binaryArithIC = ic->asBinaryArithIC();
891 
892       saveLive(lir);
893 
894       pushArg(binaryArithIC->rhs());
895       pushArg(binaryArithIC->lhs());
896       icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
897       pushArg(ImmGCPtr(gen->outerInfo().script()));
898 
899       using Fn = bool (*)(JSContext * cx, HandleScript outerScript,
900                           IonBinaryArithIC * stub, HandleValue lhs,
901                           HandleValue rhs, MutableHandleValue res);
902       callVM<Fn, IonBinaryArithIC::update>(lir);
903 
904       StoreValueTo(binaryArithIC->output()).generate(this);
905       restoreLiveIgnore(lir, StoreValueTo(binaryArithIC->output()).clobbered());
906 
907       masm.jump(ool->rejoin());
908       return;
909     }
910     case CacheKind::Compare: {
911       IonCompareIC* compareIC = ic->asCompareIC();
912 
913       saveLive(lir);
914 
915       pushArg(compareIC->rhs());
916       pushArg(compareIC->lhs());
917       icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
918       pushArg(ImmGCPtr(gen->outerInfo().script()));
919 
920       using Fn = bool (*)(JSContext * cx, HandleScript outerScript,
921                           IonCompareIC * stub, HandleValue lhs, HandleValue rhs,
922                           bool* res);
923       callVM<Fn, IonCompareIC::update>(lir);
924 
925       StoreRegisterTo(compareIC->output()).generate(this);
926       restoreLiveIgnore(lir, StoreRegisterTo(compareIC->output()).clobbered());
927 
928       masm.jump(ool->rejoin());
929       return;
930     }
931     case CacheKind::Call:
932     case CacheKind::TypeOf:
933     case CacheKind::ToBool:
934     case CacheKind::GetIntrinsic:
935     case CacheKind::NewArray:
936     case CacheKind::NewObject:
937       MOZ_CRASH("Unsupported IC");
938   }
939   MOZ_CRASH();
940 }
941 
templateObj() const942 StringObject* MNewStringObject::templateObj() const {
943   return &templateObj_->as<StringObject>();
944 }
945 
CodeGenerator(MIRGenerator * gen,LIRGraph * graph,MacroAssembler * masm)946 CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph,
947                              MacroAssembler* masm)
948     : CodeGeneratorSpecific(gen, graph, masm),
949       ionScriptLabels_(gen->alloc()),
950       ionNurseryObjectLabels_(gen->alloc()),
951       scriptCounts_(nullptr),
952       realmStubsToReadBarrier_(0) {}
953 
~CodeGenerator()954 CodeGenerator::~CodeGenerator() { js_delete(scriptCounts_); }
955 
956 class OutOfLineZeroIfNaN : public OutOfLineCodeBase<CodeGenerator> {
957   LInstruction* lir_;
958   FloatRegister input_;
959   Register output_;
960 
961  public:
OutOfLineZeroIfNaN(LInstruction * lir,FloatRegister input,Register output)962   OutOfLineZeroIfNaN(LInstruction* lir, FloatRegister input, Register output)
963       : lir_(lir), input_(input), output_(output) {}
964 
accept(CodeGenerator * codegen)965   void accept(CodeGenerator* codegen) override {
966     codegen->visitOutOfLineZeroIfNaN(this);
967   }
lir() const968   LInstruction* lir() const { return lir_; }
input() const969   FloatRegister input() const { return input_; }
output() const970   Register output() const { return output_; }
971 };
972 
visitValueToInt32(LValueToInt32 * lir)973 void CodeGenerator::visitValueToInt32(LValueToInt32* lir) {
974   ValueOperand operand = ToValue(lir, LValueToInt32::Input);
975   Register output = ToRegister(lir->output());
976   FloatRegister temp = ToFloatRegister(lir->tempFloat());
977 
978   Label fails;
979   if (lir->mode() == LValueToInt32::TRUNCATE) {
980     OutOfLineCode* oolDouble = oolTruncateDouble(temp, output, lir->mir());
981 
982     // We can only handle strings in truncation contexts, like bitwise
983     // operations.
984     Register stringReg = ToRegister(lir->temp());
985     using Fn = bool (*)(JSContext*, JSString*, double*);
986     auto* oolString = oolCallVM<Fn, StringToNumber>(lir, ArgList(stringReg),
987                                                     StoreFloatRegisterTo(temp));
988     Label* stringEntry = oolString->entry();
989     Label* stringRejoin = oolString->rejoin();
990 
991     masm.truncateValueToInt32(operand, stringEntry, stringRejoin,
992                               oolDouble->entry(), stringReg, temp, output,
993                               &fails);
994     masm.bind(oolDouble->rejoin());
995   } else if (lir->mode() == LValueToInt32::TRUNCATE_NOWRAP) {
996     auto* ool = new (alloc()) OutOfLineZeroIfNaN(lir, temp, output);
997     addOutOfLineCode(ool, lir->mir());
998     masm.truncateNoWrapValueToInt32(operand, temp, output, ool->entry(),
999                                     &fails);
1000     masm.bind(ool->rejoin());
1001   } else {
1002     masm.convertValueToInt32(operand, temp, output, &fails,
1003                              lir->mirNormal()->needsNegativeZeroCheck(),
1004                              lir->mirNormal()->conversion());
1005   }
1006 
1007   bailoutFrom(&fails, lir->snapshot());
1008 }
1009 
visitOutOfLineZeroIfNaN(OutOfLineZeroIfNaN * ool)1010 void CodeGenerator::visitOutOfLineZeroIfNaN(OutOfLineZeroIfNaN* ool) {
1011   FloatRegister input = ool->input();
1012   Register output = ool->output();
1013 
1014   // NaN triggers the failure path for branchTruncateDoubleToInt32() on x86,
1015   // x64, and ARM64, so handle it here. In all other cases bail out.
1016 
1017   Label fails;
1018   if (input.isSingle()) {
1019     masm.branchFloat(Assembler::DoubleOrdered, input, input, &fails);
1020   } else {
1021     masm.branchDouble(Assembler::DoubleOrdered, input, input, &fails);
1022   }
1023 
1024   // ToInteger(NaN) is 0.
1025   masm.move32(Imm32(0), output);
1026   masm.jump(ool->rejoin());
1027 
1028   bailoutFrom(&fails, ool->lir()->snapshot());
1029 }
1030 
visitValueToDouble(LValueToDouble * lir)1031 void CodeGenerator::visitValueToDouble(LValueToDouble* lir) {
1032   ValueOperand operand = ToValue(lir, LValueToDouble::Input);
1033   FloatRegister output = ToFloatRegister(lir->output());
1034 
1035   // Set if we can handle other primitives beside strings, as long as they're
1036   // guaranteed to never throw. This rules out symbols and BigInts, but allows
1037   // booleans, undefined, and null.
1038   bool hasNonStringPrimitives =
1039       lir->mir()->conversion() == MToFPInstruction::NonStringPrimitives;
1040 
1041   Label isDouble, isInt32, isBool, isNull, isUndefined, done;
1042 
1043   {
1044     ScratchTagScope tag(masm, operand);
1045     masm.splitTagForTest(operand, tag);
1046 
1047     masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
1048     masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
1049 
1050     if (hasNonStringPrimitives) {
1051       masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
1052       masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
1053       masm.branchTestNull(Assembler::Equal, tag, &isNull);
1054     }
1055   }
1056 
1057   bailout(lir->snapshot());
1058 
1059   if (hasNonStringPrimitives) {
1060     masm.bind(&isNull);
1061     masm.loadConstantDouble(0.0, output);
1062     masm.jump(&done);
1063   }
1064 
1065   if (hasNonStringPrimitives) {
1066     masm.bind(&isUndefined);
1067     masm.loadConstantDouble(GenericNaN(), output);
1068     masm.jump(&done);
1069   }
1070 
1071   if (hasNonStringPrimitives) {
1072     masm.bind(&isBool);
1073     masm.boolValueToDouble(operand, output);
1074     masm.jump(&done);
1075   }
1076 
1077   masm.bind(&isInt32);
1078   masm.int32ValueToDouble(operand, output);
1079   masm.jump(&done);
1080 
1081   masm.bind(&isDouble);
1082   masm.unboxDouble(operand, output);
1083   masm.bind(&done);
1084 }
1085 
visitValueToFloat32(LValueToFloat32 * lir)1086 void CodeGenerator::visitValueToFloat32(LValueToFloat32* lir) {
1087   ValueOperand operand = ToValue(lir, LValueToFloat32::Input);
1088   FloatRegister output = ToFloatRegister(lir->output());
1089 
1090   // Set if we can handle other primitives beside strings, as long as they're
1091   // guaranteed to never throw. This rules out symbols and BigInts, but allows
1092   // booleans, undefined, and null.
1093   bool hasNonStringPrimitives =
1094       lir->mir()->conversion() == MToFPInstruction::NonStringPrimitives;
1095 
1096   Label isDouble, isInt32, isBool, isNull, isUndefined, done;
1097 
1098   {
1099     ScratchTagScope tag(masm, operand);
1100     masm.splitTagForTest(operand, tag);
1101 
1102     masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
1103     masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
1104 
1105     if (hasNonStringPrimitives) {
1106       masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
1107       masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
1108       masm.branchTestNull(Assembler::Equal, tag, &isNull);
1109     }
1110   }
1111 
1112   bailout(lir->snapshot());
1113 
1114   if (hasNonStringPrimitives) {
1115     masm.bind(&isNull);
1116     masm.loadConstantFloat32(0.0f, output);
1117     masm.jump(&done);
1118   }
1119 
1120   if (hasNonStringPrimitives) {
1121     masm.bind(&isUndefined);
1122     masm.loadConstantFloat32(float(GenericNaN()), output);
1123     masm.jump(&done);
1124   }
1125 
1126   if (hasNonStringPrimitives) {
1127     masm.bind(&isBool);
1128     masm.boolValueToFloat32(operand, output);
1129     masm.jump(&done);
1130   }
1131 
1132   masm.bind(&isInt32);
1133   masm.int32ValueToFloat32(operand, output);
1134   masm.jump(&done);
1135 
1136   masm.bind(&isDouble);
1137   // ARM and MIPS may not have a double register available if we've
1138   // allocated output as a float32.
1139 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
1140   ScratchDoubleScope fpscratch(masm);
1141   masm.unboxDouble(operand, fpscratch);
1142   masm.convertDoubleToFloat32(fpscratch, output);
1143 #else
1144   masm.unboxDouble(operand, output);
1145   masm.convertDoubleToFloat32(output, output);
1146 #endif
1147   masm.bind(&done);
1148 }
1149 
visitValueToBigInt(LValueToBigInt * lir)1150 void CodeGenerator::visitValueToBigInt(LValueToBigInt* lir) {
1151   ValueOperand operand = ToValue(lir, LValueToBigInt::Input);
1152   Register output = ToRegister(lir->output());
1153 
1154   using Fn = BigInt* (*)(JSContext*, HandleValue);
1155   auto* ool =
1156       oolCallVM<Fn, ToBigInt>(lir, ArgList(operand), StoreRegisterTo(output));
1157 
1158   Register tag = masm.extractTag(operand, output);
1159 
1160   Label notBigInt, done;
1161   masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
1162   masm.unboxBigInt(operand, output);
1163   masm.jump(&done);
1164   masm.bind(&notBigInt);
1165 
1166   masm.branchTestBoolean(Assembler::Equal, tag, ool->entry());
1167   masm.branchTestString(Assembler::Equal, tag, ool->entry());
1168 
1169   // ToBigInt(object) can have side-effects; all other types throw a TypeError.
1170   bailout(lir->snapshot());
1171 
1172   masm.bind(ool->rejoin());
1173   masm.bind(&done);
1174 }
1175 
visitInt32ToDouble(LInt32ToDouble * lir)1176 void CodeGenerator::visitInt32ToDouble(LInt32ToDouble* lir) {
1177   masm.convertInt32ToDouble(ToRegister(lir->input()),
1178                             ToFloatRegister(lir->output()));
1179 }
1180 
visitFloat32ToDouble(LFloat32ToDouble * lir)1181 void CodeGenerator::visitFloat32ToDouble(LFloat32ToDouble* lir) {
1182   masm.convertFloat32ToDouble(ToFloatRegister(lir->input()),
1183                               ToFloatRegister(lir->output()));
1184 }
1185 
visitDoubleToFloat32(LDoubleToFloat32 * lir)1186 void CodeGenerator::visitDoubleToFloat32(LDoubleToFloat32* lir) {
1187   masm.convertDoubleToFloat32(ToFloatRegister(lir->input()),
1188                               ToFloatRegister(lir->output()));
1189 }
1190 
visitInt32ToFloat32(LInt32ToFloat32 * lir)1191 void CodeGenerator::visitInt32ToFloat32(LInt32ToFloat32* lir) {
1192   masm.convertInt32ToFloat32(ToRegister(lir->input()),
1193                              ToFloatRegister(lir->output()));
1194 }
1195 
visitDoubleToInt32(LDoubleToInt32 * lir)1196 void CodeGenerator::visitDoubleToInt32(LDoubleToInt32* lir) {
1197   Label fail;
1198   FloatRegister input = ToFloatRegister(lir->input());
1199   Register output = ToRegister(lir->output());
1200   masm.convertDoubleToInt32(input, output, &fail,
1201                             lir->mir()->needsNegativeZeroCheck());
1202   bailoutFrom(&fail, lir->snapshot());
1203 }
1204 
visitFloat32ToInt32(LFloat32ToInt32 * lir)1205 void CodeGenerator::visitFloat32ToInt32(LFloat32ToInt32* lir) {
1206   Label fail;
1207   FloatRegister input = ToFloatRegister(lir->input());
1208   Register output = ToRegister(lir->output());
1209   masm.convertFloat32ToInt32(input, output, &fail,
1210                              lir->mir()->needsNegativeZeroCheck());
1211   bailoutFrom(&fail, lir->snapshot());
1212 }
1213 
visitDoubleToIntegerInt32(LDoubleToIntegerInt32 * lir)1214 void CodeGenerator::visitDoubleToIntegerInt32(LDoubleToIntegerInt32* lir) {
1215   FloatRegister input = ToFloatRegister(lir->input());
1216   Register output = ToRegister(lir->output());
1217 
1218   auto* ool = new (alloc()) OutOfLineZeroIfNaN(lir, input, output);
1219   addOutOfLineCode(ool, lir->mir());
1220 
1221   masm.branchTruncateDoubleToInt32(input, output, ool->entry());
1222   masm.bind(ool->rejoin());
1223 }
1224 
visitFloat32ToIntegerInt32(LFloat32ToIntegerInt32 * lir)1225 void CodeGenerator::visitFloat32ToIntegerInt32(LFloat32ToIntegerInt32* lir) {
1226   FloatRegister input = ToFloatRegister(lir->input());
1227   Register output = ToRegister(lir->output());
1228 
1229   auto* ool = new (alloc()) OutOfLineZeroIfNaN(lir, input, output);
1230   addOutOfLineCode(ool, lir->mir());
1231 
1232   masm.branchTruncateFloat32ToInt32(input, output, ool->entry());
1233   masm.bind(ool->rejoin());
1234 }
1235 
visitInt32ToIntPtr(LInt32ToIntPtr * lir)1236 void CodeGenerator::visitInt32ToIntPtr(LInt32ToIntPtr* lir) {
1237 #ifdef JS_64BIT
1238   Register output = ToRegister(lir->output());
1239 
1240   // This is a no-op if the input can't be negative. In debug builds assert
1241   // (1) the upper 32 bits are zero and (2) the value <= INT32_MAX so that sign
1242   // extension isn't needed.
1243   if (!lir->mir()->canBeNegative()) {
1244     MOZ_ASSERT(ToRegister(lir->input()) == output);
1245 #  ifdef DEBUG
1246     Label ok;
1247     masm.branchPtr(Assembler::BelowOrEqual, output, ImmWord(INT32_MAX), &ok);
1248     masm.assumeUnreachable("LInt32ToIntPtr: unexpected range for value");
1249     masm.bind(&ok);
1250 #  endif
1251     return;
1252   }
1253 
1254   const LAllocation* input = lir->input();
1255   if (input->isRegister()) {
1256     masm.move32SignExtendToPtr(ToRegister(input), output);
1257   } else {
1258     masm.load32SignExtendToPtr(ToAddress(input), output);
1259   }
1260 #else
1261   MOZ_CRASH("Not used on 32-bit platforms");
1262 #endif
1263 }
1264 
visitNonNegativeIntPtrToInt32(LNonNegativeIntPtrToInt32 * lir)1265 void CodeGenerator::visitNonNegativeIntPtrToInt32(
1266     LNonNegativeIntPtrToInt32* lir) {
1267 #ifdef JS_64BIT
1268   Register output = ToRegister(lir->output());
1269   MOZ_ASSERT(ToRegister(lir->input()) == output);
1270 
1271   Label bail;
1272   masm.guardNonNegativeIntPtrToInt32(output, &bail);
1273   bailoutFrom(&bail, lir->snapshot());
1274 #else
1275   MOZ_CRASH("Not used on 32-bit platforms");
1276 #endif
1277 }
1278 
visitIntPtrToDouble(LIntPtrToDouble * lir)1279 void CodeGenerator::visitIntPtrToDouble(LIntPtrToDouble* lir) {
1280   Register input = ToRegister(lir->input());
1281   FloatRegister output = ToFloatRegister(lir->output());
1282   masm.convertIntPtrToDouble(input, output);
1283 }
1284 
visitAdjustDataViewLength(LAdjustDataViewLength * lir)1285 void CodeGenerator::visitAdjustDataViewLength(LAdjustDataViewLength* lir) {
1286   Register output = ToRegister(lir->output());
1287   MOZ_ASSERT(ToRegister(lir->input()) == output);
1288 
1289   uint32_t byteSize = lir->mir()->byteSize();
1290 
1291 #ifdef DEBUG
1292   Label ok;
1293   masm.branchTestPtr(Assembler::NotSigned, output, output, &ok);
1294   masm.assumeUnreachable("Unexpected negative value in LAdjustDataViewLength");
1295   masm.bind(&ok);
1296 #endif
1297 
1298   Label bail;
1299   masm.branchSubPtr(Assembler::Signed, Imm32(byteSize - 1), output, &bail);
1300   bailoutFrom(&bail, lir->snapshot());
1301 }
1302 
emitOOLTestObject(Register objreg,Label * ifEmulatesUndefined,Label * ifDoesntEmulateUndefined,Register scratch)1303 void CodeGenerator::emitOOLTestObject(Register objreg,
1304                                       Label* ifEmulatesUndefined,
1305                                       Label* ifDoesntEmulateUndefined,
1306                                       Register scratch) {
1307   saveVolatile(scratch);
1308   using Fn = bool (*)(JSObject * obj);
1309   masm.setupUnalignedABICall(scratch);
1310   masm.passABIArg(objreg);
1311   masm.callWithABI<Fn, js::EmulatesUndefined>();
1312   masm.storeCallBoolResult(scratch);
1313   restoreVolatile(scratch);
1314 
1315   masm.branchIfTrueBool(scratch, ifEmulatesUndefined);
1316   masm.jump(ifDoesntEmulateUndefined);
1317 }
1318 
1319 // Base out-of-line code generator for all tests of the truthiness of an
1320 // object, where the object might not be truthy.  (Recall that per spec all
1321 // objects are truthy, but we implement the JSCLASS_EMULATES_UNDEFINED class
1322 // flag to permit objects to look like |undefined| in certain contexts,
1323 // including in object truthiness testing.)  We check truthiness inline except
1324 // when we're testing it on a proxy (or if TI guarantees us that the specified
1325 // object will never emulate |undefined|), in which case out-of-line code will
1326 // call EmulatesUndefined for a conclusive answer.
1327 class OutOfLineTestObject : public OutOfLineCodeBase<CodeGenerator> {
1328   Register objreg_;
1329   Register scratch_;
1330 
1331   Label* ifEmulatesUndefined_;
1332   Label* ifDoesntEmulateUndefined_;
1333 
1334 #ifdef DEBUG
initialized()1335   bool initialized() { return ifEmulatesUndefined_ != nullptr; }
1336 #endif
1337 
1338  public:
OutOfLineTestObject()1339   OutOfLineTestObject()
1340       : ifEmulatesUndefined_(nullptr), ifDoesntEmulateUndefined_(nullptr) {}
1341 
accept(CodeGenerator * codegen)1342   void accept(CodeGenerator* codegen) final {
1343     MOZ_ASSERT(initialized());
1344     codegen->emitOOLTestObject(objreg_, ifEmulatesUndefined_,
1345                                ifDoesntEmulateUndefined_, scratch_);
1346   }
1347 
1348   // Specify the register where the object to be tested is found, labels to
1349   // jump to if the object is truthy or falsy, and a scratch register for
1350   // use in the out-of-line path.
setInputAndTargets(Register objreg,Label * ifEmulatesUndefined,Label * ifDoesntEmulateUndefined,Register scratch)1351   void setInputAndTargets(Register objreg, Label* ifEmulatesUndefined,
1352                           Label* ifDoesntEmulateUndefined, Register scratch) {
1353     MOZ_ASSERT(!initialized());
1354     MOZ_ASSERT(ifEmulatesUndefined);
1355     objreg_ = objreg;
1356     scratch_ = scratch;
1357     ifEmulatesUndefined_ = ifEmulatesUndefined;
1358     ifDoesntEmulateUndefined_ = ifDoesntEmulateUndefined;
1359   }
1360 };
1361 
1362 // A subclass of OutOfLineTestObject containing two extra labels, for use when
1363 // the ifTruthy/ifFalsy labels are needed in inline code as well as out-of-line
1364 // code.  The user should bind these labels in inline code, and specify them as
1365 // targets via setInputAndTargets, as appropriate.
1366 class OutOfLineTestObjectWithLabels : public OutOfLineTestObject {
1367   Label label1_;
1368   Label label2_;
1369 
1370  public:
1371   OutOfLineTestObjectWithLabels() = default;
1372 
label1()1373   Label* label1() { return &label1_; }
label2()1374   Label* label2() { return &label2_; }
1375 };
1376 
testObjectEmulatesUndefinedKernel(Register objreg,Label * ifEmulatesUndefined,Label * ifDoesntEmulateUndefined,Register scratch,OutOfLineTestObject * ool)1377 void CodeGenerator::testObjectEmulatesUndefinedKernel(
1378     Register objreg, Label* ifEmulatesUndefined,
1379     Label* ifDoesntEmulateUndefined, Register scratch,
1380     OutOfLineTestObject* ool) {
1381   ool->setInputAndTargets(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined,
1382                           scratch);
1383 
1384   // Perform a fast-path check of the object's class flags if the object's
1385   // not a proxy.  Let out-of-line code handle the slow cases that require
1386   // saving registers, making a function call, and restoring registers.
1387   masm.branchIfObjectEmulatesUndefined(objreg, scratch, ool->entry(),
1388                                        ifEmulatesUndefined);
1389 }
1390 
branchTestObjectEmulatesUndefined(Register objreg,Label * ifEmulatesUndefined,Label * ifDoesntEmulateUndefined,Register scratch,OutOfLineTestObject * ool)1391 void CodeGenerator::branchTestObjectEmulatesUndefined(
1392     Register objreg, Label* ifEmulatesUndefined,
1393     Label* ifDoesntEmulateUndefined, Register scratch,
1394     OutOfLineTestObject* ool) {
1395   MOZ_ASSERT(!ifDoesntEmulateUndefined->bound(),
1396              "ifDoesntEmulateUndefined will be bound to the fallthrough path");
1397 
1398   testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
1399                                     ifDoesntEmulateUndefined, scratch, ool);
1400   masm.bind(ifDoesntEmulateUndefined);
1401 }
1402 
testObjectEmulatesUndefined(Register objreg,Label * ifEmulatesUndefined,Label * ifDoesntEmulateUndefined,Register scratch,OutOfLineTestObject * ool)1403 void CodeGenerator::testObjectEmulatesUndefined(Register objreg,
1404                                                 Label* ifEmulatesUndefined,
1405                                                 Label* ifDoesntEmulateUndefined,
1406                                                 Register scratch,
1407                                                 OutOfLineTestObject* ool) {
1408   testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
1409                                     ifDoesntEmulateUndefined, scratch, ool);
1410   masm.jump(ifDoesntEmulateUndefined);
1411 }
1412 
testValueTruthyForType(JSValueType type,ScratchTagScope & tag,const ValueOperand & value,Register scratch1,Register scratch2,FloatRegister fr,Label * ifTruthy,Label * ifFalsy,OutOfLineTestObject * ool,bool skipTypeTest)1413 void CodeGenerator::testValueTruthyForType(
1414     JSValueType type, ScratchTagScope& tag, const ValueOperand& value,
1415     Register scratch1, Register scratch2, FloatRegister fr, Label* ifTruthy,
1416     Label* ifFalsy, OutOfLineTestObject* ool, bool skipTypeTest) {
1417 #ifdef DEBUG
1418   if (skipTypeTest) {
1419     Label expected;
1420     masm.branchTestType(Assembler::Equal, tag, type, &expected);
1421     masm.assumeUnreachable("Unexpected Value type in testValueTruthyForType");
1422     masm.bind(&expected);
1423   }
1424 #endif
1425 
1426   // Handle irregular types first.
1427   switch (type) {
1428     case JSVAL_TYPE_UNDEFINED:
1429     case JSVAL_TYPE_NULL:
1430       // Undefined and null are falsy.
1431       if (!skipTypeTest) {
1432         masm.branchTestType(Assembler::Equal, tag, type, ifFalsy);
1433       } else {
1434         masm.jump(ifFalsy);
1435       }
1436       return;
1437     case JSVAL_TYPE_SYMBOL:
1438       // Symbols are truthy.
1439       if (!skipTypeTest) {
1440         masm.branchTestSymbol(Assembler::Equal, tag, ifTruthy);
1441       } else {
1442         masm.jump(ifTruthy);
1443       }
1444       return;
1445     case JSVAL_TYPE_OBJECT: {
1446       Label notObject;
1447       if (!skipTypeTest) {
1448         masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
1449       }
1450       ScratchTagScopeRelease _(&tag);
1451       Register objreg = masm.extractObject(value, scratch1);
1452       testObjectEmulatesUndefined(objreg, ifFalsy, ifTruthy, scratch2, ool);
1453       masm.bind(&notObject);
1454       return;
1455     }
1456     default:
1457       break;
1458   }
1459 
1460   // Check the type of the value (unless this is the last possible type).
1461   Label differentType;
1462   if (!skipTypeTest) {
1463     masm.branchTestType(Assembler::NotEqual, tag, type, &differentType);
1464   }
1465 
1466   // Branch if the value is falsy.
1467   ScratchTagScopeRelease _(&tag);
1468   switch (type) {
1469     case JSVAL_TYPE_BOOLEAN: {
1470       masm.branchTestBooleanTruthy(false, value, ifFalsy);
1471       break;
1472     }
1473     case JSVAL_TYPE_INT32: {
1474       masm.branchTestInt32Truthy(false, value, ifFalsy);
1475       break;
1476     }
1477     case JSVAL_TYPE_STRING: {
1478       masm.branchTestStringTruthy(false, value, ifFalsy);
1479       break;
1480     }
1481     case JSVAL_TYPE_BIGINT: {
1482       masm.branchTestBigIntTruthy(false, value, ifFalsy);
1483       break;
1484     }
1485     case JSVAL_TYPE_DOUBLE: {
1486       masm.unboxDouble(value, fr);
1487       masm.branchTestDoubleTruthy(false, fr, ifFalsy);
1488       break;
1489     }
1490     default:
1491       MOZ_CRASH("Unexpected value type");
1492   }
1493 
1494   // If we reach this point, the value is truthy.  We fall through for
1495   // truthy on the last test; otherwise, branch.
1496   if (!skipTypeTest) {
1497     masm.jump(ifTruthy);
1498   }
1499 
1500   masm.bind(&differentType);
1501 }
1502 
testValueTruthyKernel(const ValueOperand & value,const LDefinition * scratch1,const LDefinition * scratch2,FloatRegister fr,TypeDataList observedTypes,Label * ifTruthy,Label * ifFalsy,OutOfLineTestObject * ool)1503 void CodeGenerator::testValueTruthyKernel(
1504     const ValueOperand& value, const LDefinition* scratch1,
1505     const LDefinition* scratch2, FloatRegister fr, TypeDataList observedTypes,
1506     Label* ifTruthy, Label* ifFalsy, OutOfLineTestObject* ool) {
1507   Register scratch1Reg = ToRegister(scratch1);
1508   Register scratch2Reg = ToRegister(scratch2);
1509   ScratchTagScope tag(masm, value);
1510   masm.splitTagForTest(value, tag);
1511 
1512   const uint32_t NumTypes = 9;
1513   const auto& defaultOrder = {
1514       JSVAL_TYPE_UNDEFINED, JSVAL_TYPE_NULL,   JSVAL_TYPE_BOOLEAN,
1515       JSVAL_TYPE_INT32,     JSVAL_TYPE_OBJECT, JSVAL_TYPE_STRING,
1516       JSVAL_TYPE_DOUBLE,    JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
1517   MOZ_ASSERT(defaultOrder.size() == NumTypes);
1518 
1519   Vector<JSValueType, NumTypes, SystemAllocPolicy> remaining;
1520   MOZ_ALWAYS_TRUE(remaining.reserve(defaultOrder.size()));
1521   remaining.infallibleAppend(defaultOrder.begin(), defaultOrder.end());
1522 
1523   uint32_t numRemaining = remaining.length();
1524 
1525   // Generate tests for previously observed types first.
1526   // The TypeDataList is sorted by descending frequency.
1527   for (auto& observed : observedTypes) {
1528     JSValueType type = observed.type();
1529 
1530     testValueTruthyForType(type, tag, value, scratch1Reg, scratch2Reg, fr,
1531                            ifTruthy, ifFalsy, ool, /*skipTypeTest*/ false);
1532     MOZ_ASSERT(std::count(remaining.begin(), remaining.end(), type) == 1);
1533     remaining.eraseIfEqual(type);
1534     numRemaining--;
1535   }
1536 
1537   // Generate tests for remaining types.
1538   for (auto type : remaining) {
1539     // We don't need a type test for the last possible type.
1540     bool skipTypeTest = numRemaining == 1;
1541     testValueTruthyForType(type, tag, value, scratch1Reg, scratch2Reg, fr,
1542                            ifTruthy, ifFalsy, ool, skipTypeTest);
1543     numRemaining--;
1544   }
1545   MOZ_ASSERT(numRemaining == 0);
1546 
1547   // We fall through if the final test is truthy.
1548 }
1549 
testValueTruthy(const ValueOperand & value,const LDefinition * scratch1,const LDefinition * scratch2,FloatRegister fr,TypeDataList observedTypes,Label * ifTruthy,Label * ifFalsy,OutOfLineTestObject * ool)1550 void CodeGenerator::testValueTruthy(const ValueOperand& value,
1551                                     const LDefinition* scratch1,
1552                                     const LDefinition* scratch2,
1553                                     FloatRegister fr,
1554                                     TypeDataList observedTypes, Label* ifTruthy,
1555                                     Label* ifFalsy, OutOfLineTestObject* ool) {
1556   testValueTruthyKernel(value, scratch1, scratch2, fr, observedTypes, ifTruthy,
1557                         ifFalsy, ool);
1558   masm.jump(ifTruthy);
1559 }
1560 
visitTestBIAndBranch(LTestBIAndBranch * lir)1561 void CodeGenerator::visitTestBIAndBranch(LTestBIAndBranch* lir) {
1562   Label* ifTrueLabel = getJumpLabelForBranch(lir->ifTrue());
1563   Label* ifFalseLabel = getJumpLabelForBranch(lir->ifFalse());
1564   Register input = ToRegister(lir->input());
1565 
1566   if (isNextBlock(lir->ifFalse()->lir())) {
1567     masm.branchIfBigIntIsNonZero(input, ifTrueLabel);
1568   } else if (isNextBlock(lir->ifTrue()->lir())) {
1569     masm.branchIfBigIntIsZero(input, ifFalseLabel);
1570   } else {
1571     masm.branchIfBigIntIsZero(input, ifFalseLabel);
1572     jumpToBlock(lir->ifTrue());
1573   }
1574 }
1575 
visitTestOAndBranch(LTestOAndBranch * lir)1576 void CodeGenerator::visitTestOAndBranch(LTestOAndBranch* lir) {
1577   Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
1578   Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
1579   Register input = ToRegister(lir->input());
1580 
1581   auto* ool = new (alloc()) OutOfLineTestObject();
1582   addOutOfLineCode(ool, lir->mir());
1583 
1584   testObjectEmulatesUndefined(input, falsy, truthy, ToRegister(lir->temp()),
1585                               ool);
1586 }
1587 
visitTestVAndBranch(LTestVAndBranch * lir)1588 void CodeGenerator::visitTestVAndBranch(LTestVAndBranch* lir) {
1589   auto* ool = new (alloc()) OutOfLineTestObject();
1590   addOutOfLineCode(ool, lir->mir());
1591 
1592   Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
1593   Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
1594 
1595   testValueTruthy(ToValue(lir, LTestVAndBranch::Input), lir->temp1(),
1596                   lir->temp2(), ToFloatRegister(lir->tempFloat()),
1597                   lir->mir()->observedTypes(), truthy, falsy, ool);
1598 }
1599 
visitBooleanToString(LBooleanToString * lir)1600 void CodeGenerator::visitBooleanToString(LBooleanToString* lir) {
1601   Register input = ToRegister(lir->input());
1602   Register output = ToRegister(lir->output());
1603   const JSAtomState& names = gen->runtime->names();
1604   Label true_, done;
1605 
1606   masm.branchTest32(Assembler::NonZero, input, input, &true_);
1607   masm.movePtr(ImmGCPtr(names.false_), output);
1608   masm.jump(&done);
1609 
1610   masm.bind(&true_);
1611   masm.movePtr(ImmGCPtr(names.true_), output);
1612 
1613   masm.bind(&done);
1614 }
1615 
emitIntToString(Register input,Register output,Label * ool)1616 void CodeGenerator::emitIntToString(Register input, Register output,
1617                                     Label* ool) {
1618   masm.boundsCheck32PowerOfTwo(input, StaticStrings::INT_STATIC_LIMIT, ool);
1619 
1620   // Fast path for small integers.
1621   masm.movePtr(ImmPtr(&gen->runtime->staticStrings().intStaticTable), output);
1622   masm.loadPtr(BaseIndex(output, input, ScalePointer), output);
1623 }
1624 
visitIntToString(LIntToString * lir)1625 void CodeGenerator::visitIntToString(LIntToString* lir) {
1626   Register input = ToRegister(lir->input());
1627   Register output = ToRegister(lir->output());
1628 
1629   using Fn = JSLinearString* (*)(JSContext*, int);
1630   OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
1631       lir, ArgList(input), StoreRegisterTo(output));
1632 
1633   emitIntToString(input, output, ool->entry());
1634 
1635   masm.bind(ool->rejoin());
1636 }
1637 
visitDoubleToString(LDoubleToString * lir)1638 void CodeGenerator::visitDoubleToString(LDoubleToString* lir) {
1639   FloatRegister input = ToFloatRegister(lir->input());
1640   Register temp = ToRegister(lir->tempInt());
1641   Register output = ToRegister(lir->output());
1642 
1643   using Fn = JSString* (*)(JSContext*, double);
1644   OutOfLineCode* ool = oolCallVM<Fn, NumberToString<CanGC>>(
1645       lir, ArgList(input), StoreRegisterTo(output));
1646 
1647   // Try double to integer conversion and run integer to string code.
1648   masm.convertDoubleToInt32(input, temp, ool->entry(), false);
1649   emitIntToString(temp, output, ool->entry());
1650 
1651   masm.bind(ool->rejoin());
1652 }
1653 
visitValueToString(LValueToString * lir)1654 void CodeGenerator::visitValueToString(LValueToString* lir) {
1655   ValueOperand input = ToValue(lir, LValueToString::Input);
1656   Register output = ToRegister(lir->output());
1657 
1658   using Fn = JSString* (*)(JSContext*, HandleValue);
1659   OutOfLineCode* ool = oolCallVM<Fn, ToStringSlow<CanGC>>(
1660       lir, ArgList(input), StoreRegisterTo(output));
1661 
1662   Label done;
1663   Register tag = masm.extractTag(input, output);
1664   const JSAtomState& names = gen->runtime->names();
1665 
1666   // String
1667   {
1668     Label notString;
1669     masm.branchTestString(Assembler::NotEqual, tag, &notString);
1670     masm.unboxString(input, output);
1671     masm.jump(&done);
1672     masm.bind(&notString);
1673   }
1674 
1675   // Integer
1676   {
1677     Label notInteger;
1678     masm.branchTestInt32(Assembler::NotEqual, tag, &notInteger);
1679     Register unboxed = ToTempUnboxRegister(lir->tempToUnbox());
1680     unboxed = masm.extractInt32(input, unboxed);
1681     emitIntToString(unboxed, output, ool->entry());
1682     masm.jump(&done);
1683     masm.bind(&notInteger);
1684   }
1685 
1686   // Double
1687   {
1688     // Note: no fastpath. Need two extra registers and can only convert doubles
1689     // that fit integers and are smaller than StaticStrings::INT_STATIC_LIMIT.
1690     masm.branchTestDouble(Assembler::Equal, tag, ool->entry());
1691   }
1692 
1693   // Undefined
1694   {
1695     Label notUndefined;
1696     masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
1697     masm.movePtr(ImmGCPtr(names.undefined), output);
1698     masm.jump(&done);
1699     masm.bind(&notUndefined);
1700   }
1701 
1702   // Null
1703   {
1704     Label notNull;
1705     masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
1706     masm.movePtr(ImmGCPtr(names.null), output);
1707     masm.jump(&done);
1708     masm.bind(&notNull);
1709   }
1710 
1711   // Boolean
1712   {
1713     Label notBoolean, true_;
1714     masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
1715     masm.branchTestBooleanTruthy(true, input, &true_);
1716     masm.movePtr(ImmGCPtr(names.false_), output);
1717     masm.jump(&done);
1718     masm.bind(&true_);
1719     masm.movePtr(ImmGCPtr(names.true_), output);
1720     masm.jump(&done);
1721     masm.bind(&notBoolean);
1722   }
1723 
1724   // Objects/symbols are only possible when |mir->mightHaveSideEffects()|.
1725   if (lir->mir()->mightHaveSideEffects()) {
1726     // Object
1727     if (lir->mir()->supportSideEffects()) {
1728       masm.branchTestObject(Assembler::Equal, tag, ool->entry());
1729     } else {
1730       // Bail.
1731       MOZ_ASSERT(lir->mir()->needsSnapshot());
1732       Label bail;
1733       masm.branchTestObject(Assembler::Equal, tag, &bail);
1734       bailoutFrom(&bail, lir->snapshot());
1735     }
1736 
1737     // Symbol
1738     if (lir->mir()->supportSideEffects()) {
1739       masm.branchTestSymbol(Assembler::Equal, tag, ool->entry());
1740     } else {
1741       // Bail.
1742       MOZ_ASSERT(lir->mir()->needsSnapshot());
1743       Label bail;
1744       masm.branchTestSymbol(Assembler::Equal, tag, &bail);
1745       bailoutFrom(&bail, lir->snapshot());
1746     }
1747   }
1748 
1749   // BigInt
1750   {
1751     // No fastpath currently implemented.
1752     masm.branchTestBigInt(Assembler::Equal, tag, ool->entry());
1753   }
1754 
1755   masm.assumeUnreachable("Unexpected type for LValueToString.");
1756 
1757   masm.bind(&done);
1758   masm.bind(ool->rejoin());
1759 }
1760 
1761 using StoreBufferMutationFn = void (*)(js::gc::StoreBuffer*, js::gc::Cell**);
1762 
EmitStoreBufferMutation(MacroAssembler & masm,Register holder,size_t offset,Register buffer,LiveGeneralRegisterSet & liveVolatiles,StoreBufferMutationFn fun)1763 static void EmitStoreBufferMutation(MacroAssembler& masm, Register holder,
1764                                     size_t offset, Register buffer,
1765                                     LiveGeneralRegisterSet& liveVolatiles,
1766                                     StoreBufferMutationFn fun) {
1767   Label callVM;
1768   Label exit;
1769 
1770   // Call into the VM to barrier the write. The only registers that need to
1771   // be preserved are those in liveVolatiles, so once they are saved on the
1772   // stack all volatile registers are available for use.
1773   masm.bind(&callVM);
1774   masm.PushRegsInMask(liveVolatiles);
1775 
1776   AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
1777   regs.takeUnchecked(buffer);
1778   regs.takeUnchecked(holder);
1779   Register addrReg = regs.takeAny();
1780 
1781   masm.computeEffectiveAddress(Address(holder, offset), addrReg);
1782 
1783   bool needExtraReg = !regs.hasAny<GeneralRegisterSet::DefaultType>();
1784   if (needExtraReg) {
1785     masm.push(holder);
1786     masm.setupUnalignedABICall(holder);
1787   } else {
1788     masm.setupUnalignedABICall(regs.takeAny());
1789   }
1790   masm.passABIArg(buffer);
1791   masm.passABIArg(addrReg);
1792   masm.callWithABI(DynamicFunction<StoreBufferMutationFn>(fun), MoveOp::GENERAL,
1793                    CheckUnsafeCallWithABI::DontCheckOther);
1794 
1795   if (needExtraReg) {
1796     masm.pop(holder);
1797   }
1798   masm.PopRegsInMask(liveVolatiles);
1799   masm.bind(&exit);
1800 }
1801 
1802 // Warning: this function modifies prev and next.
EmitPostWriteBarrierS(MacroAssembler & masm,Register holder,size_t offset,Register prev,Register next,LiveGeneralRegisterSet & liveVolatiles)1803 static void EmitPostWriteBarrierS(MacroAssembler& masm, Register holder,
1804                                   size_t offset, Register prev, Register next,
1805                                   LiveGeneralRegisterSet& liveVolatiles) {
1806   Label exit;
1807   Label checkRemove, putCell;
1808 
1809   // if (next && (buffer = next->storeBuffer()))
1810   // but we never pass in nullptr for next.
1811   Register storebuffer = next;
1812   masm.loadStoreBuffer(next, storebuffer);
1813   masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &checkRemove);
1814 
1815   // if (prev && prev->storeBuffer())
1816   masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &putCell);
1817   masm.loadStoreBuffer(prev, prev);
1818   masm.branchPtr(Assembler::NotEqual, prev, ImmWord(0), &exit);
1819 
1820   // buffer->putCell(cellp)
1821   masm.bind(&putCell);
1822   EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
1823                           JSString::addCellAddressToStoreBuffer);
1824   masm.jump(&exit);
1825 
1826   // if (prev && (buffer = prev->storeBuffer()))
1827   masm.bind(&checkRemove);
1828   masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &exit);
1829   masm.loadStoreBuffer(prev, storebuffer);
1830   masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &exit);
1831   EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
1832                           JSString::removeCellAddressFromStoreBuffer);
1833 
1834   masm.bind(&exit);
1835 }
1836 
visitRegExp(LRegExp * lir)1837 void CodeGenerator::visitRegExp(LRegExp* lir) {
1838   Register output = ToRegister(lir->output());
1839   Register temp = ToRegister(lir->temp());
1840   JSObject* source = lir->mir()->source();
1841 
1842   using Fn = JSObject* (*)(JSContext*, Handle<RegExpObject*>);
1843   OutOfLineCode* ool = oolCallVM<Fn, CloneRegExpObject>(
1844       lir, ArgList(ImmGCPtr(source)), StoreRegisterTo(output));
1845   if (lir->mir()->hasShared()) {
1846     TemplateObject templateObject(source);
1847     masm.createGCObject(output, temp, templateObject, gc::DefaultHeap,
1848                         ool->entry());
1849   } else {
1850     masm.jump(ool->entry());
1851   }
1852   masm.bind(ool->rejoin());
1853 }
1854 
1855 static const size_t InputOutputDataSize = sizeof(irregexp::InputOutputData);
1856 
1857 // Amount of space to reserve on the stack when executing RegExps inline.
1858 static const size_t RegExpReservedStack =
1859     InputOutputDataSize + sizeof(MatchPairs) +
1860     RegExpObject::MaxPairCount * sizeof(MatchPair);
1861 
RegExpPairsVectorStartOffset(size_t inputOutputDataStartOffset)1862 static size_t RegExpPairsVectorStartOffset(size_t inputOutputDataStartOffset) {
1863   return inputOutputDataStartOffset + InputOutputDataSize + sizeof(MatchPairs);
1864 }
1865 
RegExpPairCountAddress(MacroAssembler & masm,size_t inputOutputDataStartOffset)1866 static Address RegExpPairCountAddress(MacroAssembler& masm,
1867                                       size_t inputOutputDataStartOffset) {
1868   return Address(masm.getStackPointer(), inputOutputDataStartOffset +
1869                                              InputOutputDataSize +
1870                                              MatchPairs::offsetOfPairCount());
1871 }
1872 
1873 // When the unicode flag is set, if lastIndex points to a trail
1874 // surrogate, we should step back to the corresponding lead surrogate.
1875 // See ExecuteRegExp in builtin/RegExp.cpp for more detail.
StepBackToLeadSurrogate(MacroAssembler & masm,Register regexpShared,Register input,Register lastIndex,Register temp1,Register temp2)1876 static void StepBackToLeadSurrogate(MacroAssembler& masm, Register regexpShared,
1877                                     Register input, Register lastIndex,
1878                                     Register temp1, Register temp2) {
1879   Label done;
1880 
1881   // If the unicode flag is not set, there is nothing to do.
1882   masm.branchTest32(Assembler::Zero,
1883                     Address(regexpShared, RegExpShared::offsetOfFlags()),
1884                     Imm32(int32_t(JS::RegExpFlag::Unicode)), &done);
1885 
1886   // If the input is latin1, there can't be any surrogates.
1887   masm.branchLatin1String(input, &done);
1888 
1889   // Check if |lastIndex > 0 && lastIndex < input->length()|.
1890   // lastIndex should already have no sign here.
1891   masm.branchTest32(Assembler::Zero, lastIndex, lastIndex, &done);
1892   masm.loadStringLength(input, temp1);
1893   masm.branch32(Assembler::AboveOrEqual, lastIndex, temp1, &done);
1894 
1895   // For TrailSurrogateMin ≤ x ≤ TrailSurrogateMax and
1896   // LeadSurrogateMin ≤ x ≤ LeadSurrogateMax, the following
1897   // equations hold.
1898   //
1899   //    SurrogateMin ≤ x ≤ SurrogateMax
1900   // <> SurrogateMin ≤ x ≤ SurrogateMin + 2^10 - 1
1901   // <> ((x - SurrogateMin) >>> 10) = 0    where >>> is an unsigned-shift
1902   // See Hacker's Delight, section 4-1 for details.
1903   //
1904   //    ((x - SurrogateMin) >>> 10) = 0
1905   // <> floor((x - SurrogateMin) / 1024) = 0
1906   // <> floor((x / 1024) - (SurrogateMin / 1024)) = 0
1907   // <> floor(x / 1024) = SurrogateMin / 1024
1908   // <> floor(x / 1024) * 1024 = SurrogateMin
1909   // <> (x >>> 10) << 10 = SurrogateMin
1910   // <> x & ~(2^10 - 1) = SurrogateMin
1911 
1912   constexpr char16_t SurrogateMask = 0xFC00;
1913 
1914   Register charsReg = temp1;
1915   masm.loadStringChars(input, charsReg, CharEncoding::TwoByte);
1916 
1917   // Check if input[lastIndex] is trail surrogate.
1918   masm.loadChar(charsReg, lastIndex, temp2, CharEncoding::TwoByte);
1919   masm.and32(Imm32(SurrogateMask), temp2);
1920   masm.branch32(Assembler::NotEqual, temp2, Imm32(unicode::TrailSurrogateMin),
1921                 &done);
1922 
1923   // Check if input[lastIndex-1] is lead surrogate.
1924   masm.loadChar(charsReg, lastIndex, temp2, CharEncoding::TwoByte,
1925                 -int32_t(sizeof(char16_t)));
1926   masm.and32(Imm32(SurrogateMask), temp2);
1927   masm.branch32(Assembler::NotEqual, temp2, Imm32(unicode::LeadSurrogateMin),
1928                 &done);
1929 
1930   // Move lastIndex back to lead surrogate.
1931   masm.sub32(Imm32(1), lastIndex);
1932 
1933   masm.bind(&done);
1934 }
1935 
UpdateRegExpStatics(MacroAssembler & masm,Register regexp,Register input,Register lastIndex,Register staticsReg,Register temp1,Register temp2,bool stringsCanBeInNursery,LiveGeneralRegisterSet & volatileRegs)1936 static void UpdateRegExpStatics(MacroAssembler& masm, Register regexp,
1937                                 Register input, Register lastIndex,
1938                                 Register staticsReg, Register temp1,
1939                                 Register temp2, bool stringsCanBeInNursery,
1940                                 LiveGeneralRegisterSet& volatileRegs) {
1941   Address pendingInputAddress(staticsReg,
1942                               RegExpStatics::offsetOfPendingInput());
1943   Address matchesInputAddress(staticsReg,
1944                               RegExpStatics::offsetOfMatchesInput());
1945   Address lazySourceAddress(staticsReg, RegExpStatics::offsetOfLazySource());
1946   Address lazyIndexAddress(staticsReg, RegExpStatics::offsetOfLazyIndex());
1947 
1948   masm.guardedCallPreBarrier(pendingInputAddress, MIRType::String);
1949   masm.guardedCallPreBarrier(matchesInputAddress, MIRType::String);
1950   masm.guardedCallPreBarrier(lazySourceAddress, MIRType::String);
1951 
1952   if (stringsCanBeInNursery) {
1953     // Writing into RegExpStatics tenured memory; must post-barrier.
1954     if (staticsReg.volatile_()) {
1955       volatileRegs.add(staticsReg);
1956     }
1957 
1958     masm.loadPtr(pendingInputAddress, temp1);
1959     masm.storePtr(input, pendingInputAddress);
1960     masm.movePtr(input, temp2);
1961     EmitPostWriteBarrierS(masm, staticsReg,
1962                           RegExpStatics::offsetOfPendingInput(),
1963                           temp1 /* prev */, temp2 /* next */, volatileRegs);
1964 
1965     masm.loadPtr(matchesInputAddress, temp1);
1966     masm.storePtr(input, matchesInputAddress);
1967     masm.movePtr(input, temp2);
1968     EmitPostWriteBarrierS(masm, staticsReg,
1969                           RegExpStatics::offsetOfMatchesInput(),
1970                           temp1 /* prev */, temp2 /* next */, volatileRegs);
1971   } else {
1972     masm.storePtr(input, pendingInputAddress);
1973     masm.storePtr(input, matchesInputAddress);
1974   }
1975 
1976   masm.storePtr(lastIndex,
1977                 Address(staticsReg, RegExpStatics::offsetOfLazyIndex()));
1978   masm.store32(
1979       Imm32(1),
1980       Address(staticsReg, RegExpStatics::offsetOfPendingLazyEvaluation()));
1981 
1982   masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
1983                                           RegExpObject::SHARED_SLOT)),
1984                       temp1, JSVAL_TYPE_PRIVATE_GCTHING);
1985   masm.loadPtr(Address(temp1, RegExpShared::offsetOfSource()), temp2);
1986   masm.storePtr(temp2, lazySourceAddress);
1987   masm.load32(Address(temp1, RegExpShared::offsetOfFlags()), temp2);
1988   masm.store32(temp2, Address(staticsReg, RegExpStatics::offsetOfLazyFlags()));
1989 }
1990 
1991 // Prepare an InputOutputData and optional MatchPairs which space has been
1992 // allocated for on the stack, and try to execute a RegExp on a string input.
1993 // If the RegExp was successfully executed and matched the input, fallthrough.
1994 // Otherwise, jump to notFound or failure.
PrepareAndExecuteRegExp(JSContext * cx,MacroAssembler & masm,Register regexp,Register input,Register lastIndex,Register temp1,Register temp2,Register temp3,size_t inputOutputDataStartOffset,bool stringsCanBeInNursery,Label * notFound,Label * failure)1995 static bool PrepareAndExecuteRegExp(JSContext* cx, MacroAssembler& masm,
1996                                     Register regexp, Register input,
1997                                     Register lastIndex, Register temp1,
1998                                     Register temp2, Register temp3,
1999                                     size_t inputOutputDataStartOffset,
2000                                     bool stringsCanBeInNursery, Label* notFound,
2001                                     Label* failure) {
2002   JitSpew(JitSpew_Codegen, "# Emitting PrepareAndExecuteRegExp");
2003 
2004   using irregexp::InputOutputData;
2005 
2006   /*
2007    * [SMDOC] Stack layout for PrepareAndExecuteRegExp
2008    *
2009    * Before this function is called, the caller is responsible for
2010    * allocating enough stack space for the following data:
2011    *
2012    * inputOutputDataStartOffset +-----> +---------------+
2013    *                                    |InputOutputData|
2014    *          inputStartAddress +---------->  inputStart|
2015    *            inputEndAddress +---------->    inputEnd|
2016    *          startIndexAddress +---------->  startIndex|
2017    *             matchesAddress +---------->     matches|-----+
2018    *                                    +---------------+     |
2019    * matchPairs(Address|Offset) +-----> +---------------+  <--+
2020    *                                    |  MatchPairs   |
2021    *           pairCountAddress +---------->    count   |
2022    *        pairsPointerAddress +---------->    pairs   |-----+
2023    *                                    +---------------+     |
2024    * pairsArray(Address|Offset) +-----> +---------------+  <--+
2025    *                                    |   MatchPair   |
2026    *     firstMatchStartAddress +---------->    start   |  <--+
2027    *                                    |       limit   |     |
2028    *                                    +---------------+     |
2029    *                                           .              |
2030    *                                           .  Reserved space for
2031    *                                           .  RegExpObject::MaxPairCount
2032    *                                           .  MatchPair objects
2033    *                                           .              |
2034    *                                    +---------------+     |
2035    *                                    |   MatchPair   |     |
2036    *                                    |       start   |     |
2037    *                                    |       limit   |  <--+
2038    *                                    +---------------+
2039    */
2040 
2041   size_t ioOffset = inputOutputDataStartOffset;
2042   size_t matchPairsOffset = ioOffset + sizeof(InputOutputData);
2043   size_t pairsArrayOffset = matchPairsOffset + sizeof(MatchPairs);
2044 
2045   Address inputStartAddress(masm.getStackPointer(),
2046                             ioOffset + offsetof(InputOutputData, inputStart));
2047   Address inputEndAddress(masm.getStackPointer(),
2048                           ioOffset + offsetof(InputOutputData, inputEnd));
2049   Address startIndexAddress(masm.getStackPointer(),
2050                             ioOffset + offsetof(InputOutputData, startIndex));
2051   Address matchesAddress(masm.getStackPointer(),
2052                          ioOffset + offsetof(InputOutputData, matches));
2053 
2054   Address matchPairsAddress(masm.getStackPointer(), matchPairsOffset);
2055   Address pairCountAddress(masm.getStackPointer(),
2056                            matchPairsOffset + MatchPairs::offsetOfPairCount());
2057   Address pairsPointerAddress(masm.getStackPointer(),
2058                               matchPairsOffset + MatchPairs::offsetOfPairs());
2059 
2060   Address pairsArrayAddress(masm.getStackPointer(), pairsArrayOffset);
2061   Address firstMatchStartAddress(masm.getStackPointer(),
2062                                  pairsArrayOffset + offsetof(MatchPair, start));
2063 
2064   // First, fill in a skeletal MatchPairs instance on the stack. This will be
2065   // passed to the OOL stub in the caller if we aren't able to execute the
2066   // RegExp inline, and that stub needs to be able to determine whether the
2067   // execution finished successfully.
2068 
2069   // Initialize MatchPairs::pairCount to 1. The correct value can only
2070   // be determined after loading the RegExpShared. If the RegExpShared
2071   // has Kind::Atom, this is the correct pairCount.
2072   masm.store32(Imm32(1), pairCountAddress);
2073 
2074   // Initialize MatchPairs::pairs pointer
2075   masm.computeEffectiveAddress(pairsArrayAddress, temp1);
2076   masm.storePtr(temp1, pairsPointerAddress);
2077 
2078   // Initialize MatchPairs::pairs[0]::start to MatchPair::NoMatch
2079   masm.store32(Imm32(MatchPair::NoMatch), firstMatchStartAddress);
2080 
2081   // Check for a linear input string.
2082   masm.branchIfRope(input, failure);
2083 
2084   // Load the RegExpShared.
2085   Register regexpReg = temp1;
2086   Address sharedSlot = Address(
2087       regexp, NativeObject::getFixedSlotOffset(RegExpObject::SHARED_SLOT));
2088   masm.branchTestUndefined(Assembler::Equal, sharedSlot, failure);
2089   masm.unboxNonDouble(sharedSlot, regexpReg, JSVAL_TYPE_PRIVATE_GCTHING);
2090 
2091   // Handle Atom matches
2092   Label notAtom, checkSuccess;
2093   masm.branchPtr(Assembler::Equal,
2094                  Address(regexpReg, RegExpShared::offsetOfPatternAtom()),
2095                  ImmWord(0), &notAtom);
2096   {
2097     LiveGeneralRegisterSet regsToSave(GeneralRegisterSet::Volatile());
2098     regsToSave.takeUnchecked(temp1);
2099     regsToSave.takeUnchecked(temp2);
2100     regsToSave.takeUnchecked(temp3);
2101 
2102     masm.computeEffectiveAddress(matchPairsAddress, temp3);
2103 
2104     masm.PushRegsInMask(regsToSave);
2105     using Fn = RegExpRunStatus (*)(RegExpShared * re, JSLinearString * input,
2106                                    size_t start, MatchPairs * matchPairs);
2107     masm.setupUnalignedABICall(temp2);
2108     masm.passABIArg(regexpReg);
2109     masm.passABIArg(input);
2110     masm.passABIArg(lastIndex);
2111     masm.passABIArg(temp3);
2112     masm.callWithABI<Fn, js::ExecuteRegExpAtomRaw>();
2113 
2114     masm.storeCallInt32Result(temp1);
2115     masm.PopRegsInMask(regsToSave);
2116 
2117     masm.jump(&checkSuccess);
2118   }
2119   masm.bind(&notAtom);
2120 
2121   // Don't handle regexps with too many capture pairs.
2122   masm.load32(Address(regexpReg, RegExpShared::offsetOfPairCount()), temp2);
2123   masm.branch32(Assembler::Above, temp2, Imm32(RegExpObject::MaxPairCount),
2124                 failure);
2125 
2126   // Fill in the pair count in the MatchPairs on the stack.
2127   masm.store32(temp2, pairCountAddress);
2128 
2129   // Update lastIndex if necessary.
2130   StepBackToLeadSurrogate(masm, regexpReg, input, lastIndex, temp2, temp3);
2131 
2132   // Load code pointer and length of input (in bytes).
2133   // Store the input start in the InputOutputData.
2134   Register codePointer = temp1;  // Note: temp1 was previously regexpReg.
2135   Register byteLength = temp3;
2136   {
2137     Label isLatin1, done;
2138     masm.loadStringLength(input, byteLength);
2139 
2140     masm.branchLatin1String(input, &isLatin1);
2141 
2142     // Two-byte input
2143     masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
2144     masm.storePtr(temp2, inputStartAddress);
2145     masm.loadPtr(
2146         Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/false)),
2147         codePointer);
2148     masm.lshiftPtr(Imm32(1), byteLength);
2149     masm.jump(&done);
2150 
2151     // Latin1 input
2152     masm.bind(&isLatin1);
2153     masm.loadStringChars(input, temp2, CharEncoding::Latin1);
2154     masm.storePtr(temp2, inputStartAddress);
2155     masm.loadPtr(
2156         Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/true)),
2157         codePointer);
2158 
2159     masm.bind(&done);
2160 
2161     // Store end pointer
2162     masm.addPtr(byteLength, temp2);
2163     masm.storePtr(temp2, inputEndAddress);
2164   }
2165 
2166   // Guard that the RegExpShared has been compiled for this type of input.
2167   // If it has not been compiled, we fall back to the OOL case, which will
2168   // do a VM call into the interpreter.
2169   // TODO: add an interpreter trampoline?
2170   masm.branchPtr(Assembler::Equal, codePointer, ImmWord(0), failure);
2171   masm.loadPtr(Address(codePointer, JitCode::offsetOfCode()), codePointer);
2172 
2173   // Finish filling in the InputOutputData instance on the stack
2174   masm.computeEffectiveAddress(matchPairsAddress, temp2);
2175   masm.storePtr(temp2, matchesAddress);
2176   masm.storePtr(lastIndex, startIndexAddress);
2177 
2178   // Save any volatile inputs.
2179   LiveGeneralRegisterSet volatileRegs;
2180   if (lastIndex.volatile_()) {
2181     volatileRegs.add(lastIndex);
2182   }
2183   if (input.volatile_()) {
2184     volatileRegs.add(input);
2185   }
2186   if (regexp.volatile_()) {
2187     volatileRegs.add(regexp);
2188   }
2189 
2190 #ifdef JS_TRACE_LOGGING
2191   if (TraceLogTextIdEnabled(TraceLogger_IrregexpExecute)) {
2192     masm.loadTraceLogger(temp2);
2193     masm.tracelogStartId(temp2, TraceLogger_IrregexpExecute);
2194   }
2195 #endif
2196 
2197   // Execute the RegExp.
2198   masm.computeEffectiveAddress(
2199       Address(masm.getStackPointer(), inputOutputDataStartOffset), temp2);
2200   masm.PushRegsInMask(volatileRegs);
2201   masm.setupUnalignedABICall(temp3);
2202   masm.passABIArg(temp2);
2203   masm.callWithABI(codePointer);
2204   masm.storeCallInt32Result(temp1);
2205   masm.PopRegsInMask(volatileRegs);
2206 
2207 #ifdef JS_TRACE_LOGGING
2208   if (TraceLogTextIdEnabled(TraceLogger_IrregexpExecute)) {
2209     masm.loadTraceLogger(temp2);
2210     masm.tracelogStopId(temp2, TraceLogger_IrregexpExecute);
2211   }
2212 #endif
2213 
2214   Label success;
2215   masm.bind(&checkSuccess);
2216   masm.branch32(Assembler::Equal, temp1,
2217                 Imm32(RegExpRunStatus_Success_NotFound), notFound);
2218   masm.branch32(Assembler::Equal, temp1, Imm32(RegExpRunStatus_Error), failure);
2219 
2220   // Lazily update the RegExpStatics.
2221   RegExpStatics* res = GlobalObject::getRegExpStatics(cx, cx->global());
2222   if (!res) {
2223     return false;
2224   }
2225   masm.movePtr(ImmPtr(res), temp1);
2226   UpdateRegExpStatics(masm, regexp, input, lastIndex, temp1, temp2, temp3,
2227                       stringsCanBeInNursery, volatileRegs);
2228 
2229   return true;
2230 }
2231 
2232 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
2233                             Register len, Register byteOpScratch,
2234                             CharEncoding encoding);
2235 
2236 class CreateDependentString {
2237   CharEncoding encoding_;
2238   Register string_;
2239   Register temp1_;
2240   Register temp2_;
2241   Label* failure_;
2242 
2243   enum class FallbackKind : uint8_t {
2244     InlineString,
2245     FatInlineString,
2246     NotInlineString,
2247     Count
2248   };
2249   mozilla::EnumeratedArray<FallbackKind, FallbackKind::Count, Label> fallbacks_,
2250       joins_;
2251 
2252  public:
CreateDependentString(CharEncoding encoding,Register string,Register temp1,Register temp2,Label * failure)2253   CreateDependentString(CharEncoding encoding, Register string, Register temp1,
2254                         Register temp2, Label* failure)
2255       : encoding_(encoding),
2256         string_(string),
2257         temp1_(temp1),
2258         temp2_(temp2),
2259         failure_(failure) {}
2260 
string() const2261   Register string() const { return string_; }
encoding() const2262   CharEncoding encoding() const { return encoding_; }
2263 
2264   // Generate code that creates DependentString.
2265   // Caller should call generateFallback after masm.ret(), to generate
2266   // fallback path.
2267   void generate(MacroAssembler& masm, const JSAtomState& names,
2268                 CompileRuntime* runtime, Register base,
2269                 BaseIndex startIndexAddress, BaseIndex limitIndexAddress,
2270                 bool stringsCanBeInNursery);
2271 
2272   // Generate fallback path for creating DependentString.
2273   void generateFallback(MacroAssembler& masm);
2274 };
2275 
generate(MacroAssembler & masm,const JSAtomState & names,CompileRuntime * runtime,Register base,BaseIndex startIndexAddress,BaseIndex limitIndexAddress,bool stringsCanBeInNursery)2276 void CreateDependentString::generate(MacroAssembler& masm,
2277                                      const JSAtomState& names,
2278                                      CompileRuntime* runtime, Register base,
2279                                      BaseIndex startIndexAddress,
2280                                      BaseIndex limitIndexAddress,
2281                                      bool stringsCanBeInNursery) {
2282   JitSpew(JitSpew_Codegen, "# Emitting CreateDependentString (encoding=%s)",
2283           (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
2284 
2285   auto newGCString = [&](FallbackKind kind) {
2286     uint32_t flags = kind == FallbackKind::InlineString
2287                          ? JSString::INIT_THIN_INLINE_FLAGS
2288                      : kind == FallbackKind::FatInlineString
2289                          ? JSString::INIT_FAT_INLINE_FLAGS
2290                          : JSString::INIT_DEPENDENT_FLAGS;
2291     if (encoding_ == CharEncoding::Latin1) {
2292       flags |= JSString::LATIN1_CHARS_BIT;
2293     }
2294 
2295     if (kind != FallbackKind::FatInlineString) {
2296       masm.newGCString(string_, temp2_, &fallbacks_[kind],
2297                        stringsCanBeInNursery);
2298     } else {
2299       masm.newGCFatInlineString(string_, temp2_, &fallbacks_[kind],
2300                                 stringsCanBeInNursery);
2301     }
2302     masm.bind(&joins_[kind]);
2303     masm.store32(Imm32(flags), Address(string_, JSString::offsetOfFlags()));
2304   };
2305 
2306   // Compute the string length.
2307   masm.load32(startIndexAddress, temp2_);
2308   masm.load32(limitIndexAddress, temp1_);
2309   masm.sub32(temp2_, temp1_);
2310 
2311   Label done, nonEmpty;
2312 
2313   // Zero length matches use the empty string.
2314   masm.branchTest32(Assembler::NonZero, temp1_, temp1_, &nonEmpty);
2315   masm.movePtr(ImmGCPtr(names.empty), string_);
2316   masm.jump(&done);
2317 
2318   masm.bind(&nonEmpty);
2319 
2320   // Complete matches use the base string.
2321   Label nonBaseStringMatch;
2322   masm.branchTest32(Assembler::NonZero, temp2_, temp2_, &nonBaseStringMatch);
2323   masm.branch32(Assembler::NotEqual, Address(base, JSString::offsetOfLength()),
2324                 temp1_, &nonBaseStringMatch);
2325   masm.movePtr(base, string_);
2326   masm.jump(&done);
2327 
2328   masm.bind(&nonBaseStringMatch);
2329 
2330   Label notInline;
2331 
2332   int32_t maxInlineLength = encoding_ == CharEncoding::Latin1
2333                                 ? JSFatInlineString::MAX_LENGTH_LATIN1
2334                                 : JSFatInlineString::MAX_LENGTH_TWO_BYTE;
2335   masm.branch32(Assembler::Above, temp1_, Imm32(maxInlineLength), &notInline);
2336   {
2337     // Make a thin or fat inline string.
2338     Label stringAllocated, fatInline;
2339 
2340     int32_t maxThinInlineLength = encoding_ == CharEncoding::Latin1
2341                                       ? JSThinInlineString::MAX_LENGTH_LATIN1
2342                                       : JSThinInlineString::MAX_LENGTH_TWO_BYTE;
2343     masm.branch32(Assembler::Above, temp1_, Imm32(maxThinInlineLength),
2344                   &fatInline);
2345     if (encoding_ == CharEncoding::Latin1) {
2346       // One character Latin-1 strings can be loaded directly from the
2347       // static strings table.
2348       Label thinInline;
2349       masm.branch32(Assembler::Above, temp1_, Imm32(1), &thinInline);
2350       {
2351         static_assert(
2352             StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
2353             "Latin-1 strings can be loaded from static strings");
2354 
2355         masm.loadStringChars(base, temp1_, encoding_);
2356         masm.loadChar(temp1_, temp2_, temp1_, encoding_);
2357 
2358         masm.movePtr(ImmPtr(&runtime->staticStrings().unitStaticTable),
2359                      string_);
2360         masm.loadPtr(BaseIndex(string_, temp1_, ScalePointer), string_);
2361 
2362         masm.jump(&done);
2363       }
2364       masm.bind(&thinInline);
2365     }
2366     {
2367       newGCString(FallbackKind::InlineString);
2368       masm.jump(&stringAllocated);
2369     }
2370     masm.bind(&fatInline);
2371     { newGCString(FallbackKind::FatInlineString); }
2372     masm.bind(&stringAllocated);
2373 
2374     masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
2375 
2376     masm.push(string_);
2377     masm.push(base);
2378 
2379     // Adjust the start index address for the above pushes.
2380     MOZ_ASSERT(startIndexAddress.base == masm.getStackPointer());
2381     BaseIndex newStartIndexAddress = startIndexAddress;
2382     newStartIndexAddress.offset += 2 * sizeof(void*);
2383 
2384     // Load chars pointer for the new string.
2385     masm.loadInlineStringCharsForStore(string_, string_);
2386 
2387     // Load the source characters pointer.
2388     masm.loadStringChars(base, temp2_, encoding_);
2389     masm.load32(newStartIndexAddress, base);
2390     masm.addToCharPtr(temp2_, base, encoding_);
2391 
2392     CopyStringChars(masm, string_, temp2_, temp1_, base, encoding_);
2393 
2394     masm.pop(base);
2395     masm.pop(string_);
2396 
2397     masm.jump(&done);
2398   }
2399 
2400   masm.bind(&notInline);
2401 
2402   {
2403     // Make a dependent string.
2404     // Warning: string may be tenured (if the fallback case is hit), so
2405     // stores into it must be post barriered.
2406     newGCString(FallbackKind::NotInlineString);
2407 
2408     masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
2409 
2410     masm.loadNonInlineStringChars(base, temp1_, encoding_);
2411     masm.load32(startIndexAddress, temp2_);
2412     masm.addToCharPtr(temp1_, temp2_, encoding_);
2413     masm.storeNonInlineStringChars(temp1_, string_);
2414     masm.storeDependentStringBase(base, string_);
2415     masm.movePtr(base, temp1_);
2416 
2417     // Follow any base pointer if the input is itself a dependent string.
2418     // Watch for undepended strings, which have a base pointer but don't
2419     // actually share their characters with it.
2420     Label noBase;
2421     masm.load32(Address(base, JSString::offsetOfFlags()), temp2_);
2422     masm.and32(Imm32(JSString::TYPE_FLAGS_MASK), temp2_);
2423     masm.branchTest32(Assembler::Zero, temp2_, Imm32(JSString::DEPENDENT_BIT),
2424                       &noBase);
2425     masm.loadDependentStringBase(base, temp1_);
2426     masm.storeDependentStringBase(temp1_, string_);
2427     masm.bind(&noBase);
2428 
2429     // Post-barrier the base store, whether it was the direct or indirect
2430     // base (both will end up in temp1 here).
2431     masm.branchPtrInNurseryChunk(Assembler::Equal, string_, temp2_, &done);
2432     masm.branchPtrInNurseryChunk(Assembler::NotEqual, temp1_, temp2_, &done);
2433 
2434     LiveRegisterSet regsToSave(RegisterSet::Volatile());
2435     regsToSave.takeUnchecked(temp1_);
2436     regsToSave.takeUnchecked(temp2_);
2437 
2438     masm.PushRegsInMask(regsToSave);
2439 
2440     masm.mov(ImmPtr(runtime), temp1_);
2441 
2442     using Fn = void (*)(JSRuntime * rt, js::gc::Cell * cell);
2443     masm.setupUnalignedABICall(temp2_);
2444     masm.passABIArg(temp1_);
2445     masm.passABIArg(string_);
2446     masm.callWithABI<Fn, PostWriteBarrier>();
2447 
2448     masm.PopRegsInMask(regsToSave);
2449   }
2450 
2451   masm.bind(&done);
2452 }
2453 
generateFallback(MacroAssembler & masm)2454 void CreateDependentString::generateFallback(MacroAssembler& masm) {
2455   JitSpew(JitSpew_Codegen,
2456           "# Emitting CreateDependentString fallback (encoding=%s)",
2457           (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
2458 
2459   LiveRegisterSet regsToSave(RegisterSet::Volatile());
2460   regsToSave.takeUnchecked(string_);
2461   regsToSave.takeUnchecked(temp2_);
2462 
2463   for (FallbackKind kind : mozilla::MakeEnumeratedRange(FallbackKind::Count)) {
2464     masm.bind(&fallbacks_[kind]);
2465 
2466     masm.PushRegsInMask(regsToSave);
2467 
2468     using Fn = void* (*)(JSContext * cx);
2469     masm.setupUnalignedABICall(string_);
2470     masm.loadJSContext(string_);
2471     masm.passABIArg(string_);
2472     if (kind == FallbackKind::FatInlineString) {
2473       masm.callWithABI<Fn, AllocateFatInlineString>();
2474     } else {
2475       masm.callWithABI<Fn, AllocateString>();
2476     }
2477     masm.storeCallPointerResult(string_);
2478 
2479     masm.PopRegsInMask(regsToSave);
2480 
2481     masm.branchPtr(Assembler::Equal, string_, ImmWord(0), failure_);
2482 
2483     masm.jump(&joins_[kind]);
2484   }
2485 }
2486 
CreateMatchResultFallback(MacroAssembler & masm,Register object,Register temp1,Register temp2,const TemplateObject & templateObject,Label * fail)2487 static void CreateMatchResultFallback(MacroAssembler& masm, Register object,
2488                                       Register temp1, Register temp2,
2489                                       const TemplateObject& templateObject,
2490                                       Label* fail) {
2491   JitSpew(JitSpew_Codegen, "# Emitting CreateMatchResult fallback");
2492 
2493   MOZ_ASSERT(templateObject.isArrayObject());
2494 
2495   LiveRegisterSet regsToSave(RegisterSet::Volatile());
2496   regsToSave.takeUnchecked(object);
2497   regsToSave.takeUnchecked(temp1);
2498   regsToSave.takeUnchecked(temp2);
2499 
2500   masm.PushRegsInMask(regsToSave);
2501 
2502   using Fn =
2503       void* (*)(JSContext * cx, gc::AllocKind kind, size_t nDynamicSlots);
2504   masm.setupUnalignedABICall(object);
2505 
2506   masm.loadJSContext(object);
2507   masm.passABIArg(object);
2508   masm.move32(Imm32(int32_t(templateObject.getAllocKind())), temp1);
2509   masm.passABIArg(temp1);
2510   masm.move32(
2511       Imm32(int32_t(templateObject.asTemplateNativeObject().numDynamicSlots())),
2512       temp2);
2513   masm.passABIArg(temp2);
2514   masm.callWithABI<Fn, CreateMatchResultFallbackFunc>();
2515   masm.storeCallPointerResult(object);
2516 
2517   masm.PopRegsInMask(regsToSave);
2518 
2519   masm.branchPtr(Assembler::Equal, object, ImmWord(0), fail);
2520 
2521   masm.initGCThing(object, temp1, templateObject, true);
2522 }
2523 
generateRegExpMatcherStub(JSContext * cx)2524 JitCode* JitRealm::generateRegExpMatcherStub(JSContext* cx) {
2525   JitSpew(JitSpew_Codegen, "# Emitting RegExpMatcher stub");
2526 
2527   Register regexp = RegExpMatcherRegExpReg;
2528   Register input = RegExpMatcherStringReg;
2529   Register lastIndex = RegExpMatcherLastIndexReg;
2530   ValueOperand result = JSReturnOperand;
2531 
2532   // We are free to clobber all registers, as LRegExpMatcher is a call
2533   // instruction.
2534   AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2535   regs.take(input);
2536   regs.take(regexp);
2537   regs.take(lastIndex);
2538 
2539   Register temp1 = regs.takeAny();
2540   Register temp2 = regs.takeAny();
2541   Register temp3 = regs.takeAny();
2542   Register temp4 = regs.takeAny();
2543   Register maybeTemp5 = InvalidReg;
2544   if (!regs.empty()) {
2545     // There are not enough registers on x86.
2546     maybeTemp5 = regs.takeAny();
2547   }
2548 
2549   ArrayObject* templateObject =
2550       cx->realm()->regExps.getOrCreateMatchResultTemplateObject(cx);
2551   if (!templateObject) {
2552     return nullptr;
2553   }
2554   TemplateObject templateObj(templateObject);
2555   const TemplateNativeObject& nativeTemplateObj =
2556       templateObj.asTemplateNativeObject();
2557 
2558   // The template object should have enough space for the maximum number of
2559   // pairs this stub can handle.
2560   MOZ_ASSERT(ObjectElements::VALUES_PER_HEADER + RegExpObject::MaxPairCount ==
2561              gc::GetGCKindSlots(templateObj.getAllocKind()));
2562 
2563   StackMacroAssembler masm(cx);
2564 
2565 #ifdef JS_USE_LINK_REGISTER
2566   masm.pushReturnAddress();
2567 #endif
2568 
2569   // The InputOutputData is placed above the return address on the stack.
2570   size_t inputOutputDataStartOffset = sizeof(void*);
2571 
2572   Label notFound, oolEntry;
2573   if (!PrepareAndExecuteRegExp(cx, masm, regexp, input, lastIndex, temp1, temp2,
2574                                temp3, inputOutputDataStartOffset,
2575                                stringsCanBeInNursery, &notFound, &oolEntry)) {
2576     return nullptr;
2577   }
2578 
2579   // If a regexp has named captures, fall back to the OOL stub, which
2580   // will end up calling CreateRegExpMatchResults.
2581   Register shared = temp2;
2582   masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
2583                                           RegExpObject::SHARED_SLOT)),
2584                       shared, JSVAL_TYPE_PRIVATE_GCTHING);
2585   masm.branchPtr(Assembler::NotEqual,
2586                  Address(shared, RegExpShared::offsetOfGroupsTemplate()),
2587                  ImmWord(0), &oolEntry);
2588 
2589   // Similarly, if the |hasIndices| flag is set, fall back to the OOL stub.
2590   masm.branchTest32(Assembler::NonZero,
2591                     Address(shared, RegExpShared::offsetOfFlags()),
2592                     Imm32(int32_t(JS::RegExpFlag::HasIndices)), &oolEntry);
2593 
2594   // Construct the result.
2595   Register object = temp1;
2596   Label matchResultFallback, matchResultJoin;
2597   masm.createGCObject(object, temp2, templateObj, gc::DefaultHeap,
2598                       &matchResultFallback);
2599   masm.bind(&matchResultJoin);
2600 
2601   MOZ_ASSERT(nativeTemplateObj.numFixedSlots() == 0);
2602   // Dynamic slot count is always one less than a power of 2.
2603   MOZ_ASSERT(nativeTemplateObj.numDynamicSlots() == 3);
2604   static_assert(RegExpRealm::MatchResultObjectIndexSlot == 0,
2605                 "First slot holds the 'index' property");
2606   static_assert(RegExpRealm::MatchResultObjectInputSlot == 1,
2607                 "Second slot holds the 'input' property");
2608   static_assert(RegExpRealm::MatchResultObjectGroupsSlot == 2,
2609                 "Third slot holds the 'groups' property");
2610 
2611   // Initialize the slots of the result object with the dummy values
2612   // defined in createMatchResultTemplateObject.
2613   masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
2614   masm.storeValue(
2615       nativeTemplateObj.getSlot(RegExpRealm::MatchResultObjectIndexSlot),
2616       Address(temp2, RegExpRealm::offsetOfMatchResultObjectIndexSlot()));
2617   masm.storeValue(
2618       nativeTemplateObj.getSlot(RegExpRealm::MatchResultObjectInputSlot),
2619       Address(temp2, RegExpRealm::offsetOfMatchResultObjectInputSlot()));
2620   masm.storeValue(
2621       nativeTemplateObj.getSlot(RegExpRealm::MatchResultObjectGroupsSlot),
2622       Address(temp2, RegExpRealm::offsetOfMatchResultObjectGroupsSlot()));
2623 
2624   // clang-format off
2625    /*
2626     * [SMDOC] Stack layout for the RegExpMatcher stub
2627     *
2628     *                                    +---------------+
2629     *                                    |Return-Address |
2630     *                                    +---------------+
2631     * inputOutputDataStartOffset +-----> +---------------+
2632     *                                    |InputOutputData|
2633     *                                    +---------------+
2634     *                                    +---------------+
2635     *                                    |  MatchPairs   |
2636     *           pairsCountAddress +----------->  count   |
2637     *                                    |       pairs   |
2638     *                                    |               |
2639     *                                    +---------------+
2640     *     pairsVectorStartOffset +-----> +---------------+
2641     *                                    |   MatchPair   |
2642     *             matchPairStart +------------>  start   |  <-------+
2643     *             matchPairLimit +------------>  limit   |          | Reserved space for
2644     *                                    +---------------+          | `RegExpObject::MaxPairCount`
2645     *                                           .                   | MatchPair objects.
2646     *                                           .                   |
2647     *                                           .                   | `count` objects will be
2648     *                                    +---------------+          | initialized and can be
2649     *                                    |   MatchPair   |          | accessed below.
2650     *                                    |       start   |  <-------+
2651     *                                    |       limit   |
2652     *                                    +---------------+
2653     */
2654   // clang-format on
2655 
2656   static_assert(sizeof(MatchPair) == 2 * sizeof(int32_t),
2657                 "MatchPair consists of two int32 values representing the start"
2658                 "and the end offset of the match");
2659 
2660   Address pairCountAddress =
2661       RegExpPairCountAddress(masm, inputOutputDataStartOffset);
2662 
2663   size_t pairsVectorStartOffset =
2664       RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
2665   Address firstMatchPairStartAddress(
2666       masm.getStackPointer(),
2667       pairsVectorStartOffset + offsetof(MatchPair, start));
2668 
2669   // Incremented by one below for each match pair.
2670   Register matchIndex = temp2;
2671   masm.move32(Imm32(0), matchIndex);
2672 
2673   // The element in which to store the result of the current match.
2674   size_t elementsOffset = NativeObject::offsetOfFixedElements();
2675   BaseObjectElementIndex objectMatchElement(object, matchIndex, elementsOffset);
2676 
2677   // The current match pair's "start" and "limit" member.
2678   BaseIndex matchPairStart(masm.getStackPointer(), matchIndex, TimesEight,
2679                            pairsVectorStartOffset + offsetof(MatchPair, start));
2680   BaseIndex matchPairLimit(masm.getStackPointer(), matchIndex, TimesEight,
2681                            pairsVectorStartOffset + offsetof(MatchPair, limit));
2682 
2683   Register temp5;
2684   if (maybeTemp5 == InvalidReg) {
2685     // We don't have enough registers for a fifth temporary. Reuse
2686     // |lastIndex| as a temporary. We don't need to restore its value,
2687     // because |lastIndex| is no longer used after a successful match.
2688     // (Neither here nor in the OOL path, cf. js::RegExpMatcherRaw.)
2689     temp5 = lastIndex;
2690   } else {
2691     temp5 = maybeTemp5;
2692   }
2693 
2694   // Loop to construct the match strings. There are two different loops,
2695   // depending on whether the input is a Two-Byte or a Latin-1 string.
2696   CreateDependentString depStrs[]{
2697       {CharEncoding::TwoByte, temp3, temp4, temp5, &oolEntry},
2698       {CharEncoding::Latin1, temp3, temp4, temp5, &oolEntry},
2699   };
2700 
2701   {
2702     Label isLatin1, done;
2703     masm.branchLatin1String(input, &isLatin1);
2704 
2705     for (auto& depStr : depStrs) {
2706       if (depStr.encoding() == CharEncoding::Latin1) {
2707         masm.bind(&isLatin1);
2708       }
2709 
2710       Label matchLoop;
2711       masm.bind(&matchLoop);
2712 
2713       static_assert(MatchPair::NoMatch == -1,
2714                     "MatchPair::start is negative if no match was found");
2715 
2716       Label isUndefined, storeDone;
2717       masm.branch32(Assembler::LessThan, matchPairStart, Imm32(0),
2718                     &isUndefined);
2719       {
2720         depStr.generate(masm, cx->names(), CompileRuntime::get(cx->runtime()),
2721                         input, matchPairStart, matchPairLimit,
2722                         stringsCanBeInNursery);
2723 
2724         // Storing into nursery-allocated results object's elements; no post
2725         // barrier.
2726         masm.storeValue(JSVAL_TYPE_STRING, depStr.string(), objectMatchElement);
2727         masm.jump(&storeDone);
2728       }
2729       masm.bind(&isUndefined);
2730       { masm.storeValue(UndefinedValue(), objectMatchElement); }
2731       masm.bind(&storeDone);
2732 
2733       masm.add32(Imm32(1), matchIndex);
2734       masm.branch32(Assembler::LessThanOrEqual, pairCountAddress, matchIndex,
2735                     &done);
2736       masm.jump(&matchLoop);
2737     }
2738 
2739 #ifdef DEBUG
2740     masm.assumeUnreachable("The match string loop doesn't fall through.");
2741 #endif
2742 
2743     masm.bind(&done);
2744   }
2745 
2746   // Fill in the rest of the output object.
2747   masm.store32(
2748       matchIndex,
2749       Address(object,
2750               elementsOffset + ObjectElements::offsetOfInitializedLength()));
2751   masm.store32(
2752       matchIndex,
2753       Address(object, elementsOffset + ObjectElements::offsetOfLength()));
2754 
2755   masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
2756 
2757   masm.load32(firstMatchPairStartAddress, temp3);
2758   masm.storeValue(JSVAL_TYPE_INT32, temp3, Address(temp2, 0));
2759 
2760   // No post barrier needed (address is within nursery object.)
2761   masm.storeValue(JSVAL_TYPE_STRING, input, Address(temp2, sizeof(Value)));
2762 
2763   // All done!
2764   masm.tagValue(JSVAL_TYPE_OBJECT, object, result);
2765   masm.ret();
2766 
2767   masm.bind(&notFound);
2768   masm.moveValue(NullValue(), result);
2769   masm.ret();
2770 
2771   // Fallback paths for CreateDependentString.
2772   for (auto& depStr : depStrs) {
2773     depStr.generateFallback(masm);
2774   }
2775 
2776   // Fallback path for createGCObject.
2777   masm.bind(&matchResultFallback);
2778   CreateMatchResultFallback(masm, object, temp2, temp3, templateObj, &oolEntry);
2779   masm.jump(&matchResultJoin);
2780 
2781   // Use an undefined value to signal to the caller that the OOL stub needs to
2782   // be called.
2783   masm.bind(&oolEntry);
2784   masm.moveValue(UndefinedValue(), result);
2785   masm.ret();
2786 
2787   Linker linker(masm);
2788   JitCode* code = linker.newCode(cx, CodeKind::Other);
2789   if (!code) {
2790     return nullptr;
2791   }
2792 
2793 #ifdef JS_ION_PERF
2794   writePerfSpewerJitCodeProfile(code, "RegExpMatcherStub");
2795 #endif
2796 #ifdef MOZ_VTUNE
2797   vtune::MarkStub(code, "RegExpMatcherStub");
2798 #endif
2799 
2800   return code;
2801 }
2802 
2803 class OutOfLineRegExpMatcher : public OutOfLineCodeBase<CodeGenerator> {
2804   LRegExpMatcher* lir_;
2805 
2806  public:
OutOfLineRegExpMatcher(LRegExpMatcher * lir)2807   explicit OutOfLineRegExpMatcher(LRegExpMatcher* lir) : lir_(lir) {}
2808 
accept(CodeGenerator * codegen)2809   void accept(CodeGenerator* codegen) override {
2810     codegen->visitOutOfLineRegExpMatcher(this);
2811   }
2812 
lir() const2813   LRegExpMatcher* lir() const { return lir_; }
2814 };
2815 
visitOutOfLineRegExpMatcher(OutOfLineRegExpMatcher * ool)2816 void CodeGenerator::visitOutOfLineRegExpMatcher(OutOfLineRegExpMatcher* ool) {
2817   LRegExpMatcher* lir = ool->lir();
2818   Register lastIndex = ToRegister(lir->lastIndex());
2819   Register input = ToRegister(lir->string());
2820   Register regexp = ToRegister(lir->regexp());
2821 
2822   AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2823   regs.take(lastIndex);
2824   regs.take(input);
2825   regs.take(regexp);
2826   Register temp = regs.takeAny();
2827 
2828   masm.computeEffectiveAddress(
2829       Address(masm.getStackPointer(), InputOutputDataSize), temp);
2830 
2831   pushArg(temp);
2832   pushArg(lastIndex);
2833   pushArg(input);
2834   pushArg(regexp);
2835 
2836   // We are not using oolCallVM because we are in a Call, and that live
2837   // registers are already saved by the the register allocator.
2838   using Fn = bool (*)(JSContext*, HandleObject regexp, HandleString input,
2839                       int32_t lastIndex, MatchPairs * pairs,
2840                       MutableHandleValue output);
2841   callVM<Fn, RegExpMatcherRaw>(lir);
2842 
2843   masm.jump(ool->rejoin());
2844 }
2845 
visitRegExpMatcher(LRegExpMatcher * lir)2846 void CodeGenerator::visitRegExpMatcher(LRegExpMatcher* lir) {
2847   MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
2848   MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
2849   MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpMatcherLastIndexReg);
2850   MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
2851 
2852 #if defined(JS_NUNBOX32)
2853   static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
2854   static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
2855   static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
2856   static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
2857   static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Type);
2858   static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Data);
2859 #elif defined(JS_PUNBOX64)
2860   static_assert(RegExpMatcherRegExpReg != JSReturnReg);
2861   static_assert(RegExpMatcherStringReg != JSReturnReg);
2862   static_assert(RegExpMatcherLastIndexReg != JSReturnReg);
2863 #endif
2864 
2865   masm.reserveStack(RegExpReservedStack);
2866 
2867   OutOfLineRegExpMatcher* ool = new (alloc()) OutOfLineRegExpMatcher(lir);
2868   addOutOfLineCode(ool, lir->mir());
2869 
2870   const JitRealm* jitRealm = gen->realm->jitRealm();
2871   JitCode* regExpMatcherStub =
2872       jitRealm->regExpMatcherStubNoBarrier(&realmStubsToReadBarrier_);
2873   masm.call(regExpMatcherStub);
2874   masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
2875   masm.bind(ool->rejoin());
2876 
2877   masm.freeStack(RegExpReservedStack);
2878 }
2879 
2880 static const int32_t RegExpSearcherResultNotFound = -1;
2881 static const int32_t RegExpSearcherResultFailed = -2;
2882 
generateRegExpSearcherStub(JSContext * cx)2883 JitCode* JitRealm::generateRegExpSearcherStub(JSContext* cx) {
2884   JitSpew(JitSpew_Codegen, "# Emitting RegExpSearcher stub");
2885 
2886   Register regexp = RegExpTesterRegExpReg;
2887   Register input = RegExpTesterStringReg;
2888   Register lastIndex = RegExpTesterLastIndexReg;
2889   Register result = ReturnReg;
2890 
2891   // We are free to clobber all registers, as LRegExpSearcher is a call
2892   // instruction.
2893   AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
2894   regs.take(input);
2895   regs.take(regexp);
2896   regs.take(lastIndex);
2897 
2898   Register temp1 = regs.takeAny();
2899   Register temp2 = regs.takeAny();
2900   Register temp3 = regs.takeAny();
2901 
2902   StackMacroAssembler masm(cx);
2903 
2904 #ifdef JS_USE_LINK_REGISTER
2905   masm.pushReturnAddress();
2906 #endif
2907 
2908   // The InputOutputData is placed above the return address on the stack.
2909   size_t inputOutputDataStartOffset = sizeof(void*);
2910 
2911   Label notFound, oolEntry;
2912   if (!PrepareAndExecuteRegExp(cx, masm, regexp, input, lastIndex, temp1, temp2,
2913                                temp3, inputOutputDataStartOffset,
2914                                stringsCanBeInNursery, &notFound, &oolEntry)) {
2915     return nullptr;
2916   }
2917 
2918   // clang-format off
2919     /*
2920      * [SMDOC] Stack layout for the RegExpSearcher stub
2921      *
2922      *                                    +---------------+
2923      *                                    |Return-Address |
2924      *                                    +---------------+
2925      * inputOutputDataStartOffset +-----> +---------------+
2926      *                                    |InputOutputData|
2927      *                                    +---------------+
2928      *                                    +---------------+
2929      *                                    |  MatchPairs   |
2930      *                                    |       count   |
2931      *                                    |       pairs   |
2932      *                                    |               |
2933      *                                    +---------------+
2934      *     pairsVectorStartOffset +-----> +---------------+
2935      *                                    |   MatchPair   |
2936      *             matchPairStart +------------>  start   |  <-------+
2937      *             matchPairLimit +------------>  limit   |          | Reserved space for
2938      *                                    +---------------+          | `RegExpObject::MaxPairCount`
2939      *                                           .                   | MatchPair objects.
2940      *                                           .                   |
2941      *                                           .                   | Only a single object will
2942      *                                    +---------------+          | be initialized and can be
2943      *                                    |   MatchPair   |          | accessed below.
2944      *                                    |       start   |  <-------+
2945      *                                    |       limit   |
2946      *                                    +---------------+
2947      */
2948   // clang-format on
2949 
2950   size_t pairsVectorStartOffset =
2951       RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
2952   Address matchPairStart(masm.getStackPointer(),
2953                          pairsVectorStartOffset + offsetof(MatchPair, start));
2954   Address matchPairLimit(masm.getStackPointer(),
2955                          pairsVectorStartOffset + offsetof(MatchPair, limit));
2956 
2957   masm.load32(matchPairStart, result);
2958   masm.load32(matchPairLimit, input);
2959   masm.lshiftPtr(Imm32(15), input);
2960   masm.or32(input, result);
2961   masm.ret();
2962 
2963   masm.bind(&notFound);
2964   masm.move32(Imm32(RegExpSearcherResultNotFound), result);
2965   masm.ret();
2966 
2967   masm.bind(&oolEntry);
2968   masm.move32(Imm32(RegExpSearcherResultFailed), result);
2969   masm.ret();
2970 
2971   Linker linker(masm);
2972   JitCode* code = linker.newCode(cx, CodeKind::Other);
2973   if (!code) {
2974     return nullptr;
2975   }
2976 
2977 #ifdef JS_ION_PERF
2978   writePerfSpewerJitCodeProfile(code, "RegExpSearcherStub");
2979 #endif
2980 #ifdef MOZ_VTUNE
2981   vtune::MarkStub(code, "RegExpSearcherStub");
2982 #endif
2983 
2984   return code;
2985 }
2986 
2987 class OutOfLineRegExpSearcher : public OutOfLineCodeBase<CodeGenerator> {
2988   LRegExpSearcher* lir_;
2989 
2990  public:
OutOfLineRegExpSearcher(LRegExpSearcher * lir)2991   explicit OutOfLineRegExpSearcher(LRegExpSearcher* lir) : lir_(lir) {}
2992 
accept(CodeGenerator * codegen)2993   void accept(CodeGenerator* codegen) override {
2994     codegen->visitOutOfLineRegExpSearcher(this);
2995   }
2996 
lir() const2997   LRegExpSearcher* lir() const { return lir_; }
2998 };
2999 
visitOutOfLineRegExpSearcher(OutOfLineRegExpSearcher * ool)3000 void CodeGenerator::visitOutOfLineRegExpSearcher(OutOfLineRegExpSearcher* ool) {
3001   LRegExpSearcher* lir = ool->lir();
3002   Register lastIndex = ToRegister(lir->lastIndex());
3003   Register input = ToRegister(lir->string());
3004   Register regexp = ToRegister(lir->regexp());
3005 
3006   AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
3007   regs.take(lastIndex);
3008   regs.take(input);
3009   regs.take(regexp);
3010   Register temp = regs.takeAny();
3011 
3012   masm.computeEffectiveAddress(
3013       Address(masm.getStackPointer(), InputOutputDataSize), temp);
3014 
3015   pushArg(temp);
3016   pushArg(lastIndex);
3017   pushArg(input);
3018   pushArg(regexp);
3019 
3020   // We are not using oolCallVM because we are in a Call, and that live
3021   // registers are already saved by the the register allocator.
3022   using Fn = bool (*)(JSContext * cx, HandleObject regexp, HandleString input,
3023                       int32_t lastIndex, MatchPairs * pairs, int32_t * result);
3024   callVM<Fn, RegExpSearcherRaw>(lir);
3025 
3026   masm.jump(ool->rejoin());
3027 }
3028 
visitRegExpSearcher(LRegExpSearcher * lir)3029 void CodeGenerator::visitRegExpSearcher(LRegExpSearcher* lir) {
3030   MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpTesterRegExpReg);
3031   MOZ_ASSERT(ToRegister(lir->string()) == RegExpTesterStringReg);
3032   MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpTesterLastIndexReg);
3033   MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
3034 
3035   static_assert(RegExpTesterRegExpReg != ReturnReg);
3036   static_assert(RegExpTesterStringReg != ReturnReg);
3037   static_assert(RegExpTesterLastIndexReg != ReturnReg);
3038 
3039   masm.reserveStack(RegExpReservedStack);
3040 
3041   OutOfLineRegExpSearcher* ool = new (alloc()) OutOfLineRegExpSearcher(lir);
3042   addOutOfLineCode(ool, lir->mir());
3043 
3044   const JitRealm* jitRealm = gen->realm->jitRealm();
3045   JitCode* regExpSearcherStub =
3046       jitRealm->regExpSearcherStubNoBarrier(&realmStubsToReadBarrier_);
3047   masm.call(regExpSearcherStub);
3048   masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpSearcherResultFailed),
3049                 ool->entry());
3050   masm.bind(ool->rejoin());
3051 
3052   masm.freeStack(RegExpReservedStack);
3053 }
3054 
3055 static const int32_t RegExpTesterResultNotFound = -1;
3056 static const int32_t RegExpTesterResultFailed = -2;
3057 
generateRegExpTesterStub(JSContext * cx)3058 JitCode* JitRealm::generateRegExpTesterStub(JSContext* cx) {
3059   JitSpew(JitSpew_Codegen, "# Emitting RegExpTester stub");
3060 
3061   Register regexp = RegExpTesterRegExpReg;
3062   Register input = RegExpTesterStringReg;
3063   Register lastIndex = RegExpTesterLastIndexReg;
3064   Register result = ReturnReg;
3065 
3066   StackMacroAssembler masm(cx);
3067 
3068 #ifdef JS_USE_LINK_REGISTER
3069   masm.pushReturnAddress();
3070 #endif
3071 
3072   // We are free to clobber all registers, as LRegExpTester is a call
3073   // instruction.
3074   AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
3075   regs.take(input);
3076   regs.take(regexp);
3077   regs.take(lastIndex);
3078 
3079   Register temp1 = regs.takeAny();
3080   Register temp2 = regs.takeAny();
3081   Register temp3 = regs.takeAny();
3082 
3083   masm.reserveStack(RegExpReservedStack);
3084 
3085   Label notFound, oolEntry;
3086   if (!PrepareAndExecuteRegExp(cx, masm, regexp, input, lastIndex, temp1, temp2,
3087                                temp3, 0, stringsCanBeInNursery, &notFound,
3088                                &oolEntry)) {
3089     return nullptr;
3090   }
3091 
3092   Label done;
3093 
3094   // In visitRegExpMatcher and visitRegExpSearcher, we reserve stack space
3095   // before calling the stub. For RegExpTester we call the stub before reserving
3096   // stack space, so the offset of the InputOutputData is 0.
3097   size_t inputOutputDataStartOffset = 0;
3098 
3099   size_t pairsVectorStartOffset =
3100       RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
3101   Address matchPairLimit(masm.getStackPointer(),
3102                          pairsVectorStartOffset + offsetof(MatchPair, limit));
3103 
3104   // RegExpTester returns the end index of the match to update lastIndex.
3105   masm.load32(matchPairLimit, result);
3106   masm.jump(&done);
3107 
3108   masm.bind(&notFound);
3109   masm.move32(Imm32(RegExpTesterResultNotFound), result);
3110   masm.jump(&done);
3111 
3112   masm.bind(&oolEntry);
3113   masm.move32(Imm32(RegExpTesterResultFailed), result);
3114 
3115   masm.bind(&done);
3116   masm.freeStack(RegExpReservedStack);
3117   masm.ret();
3118 
3119   Linker linker(masm);
3120   JitCode* code = linker.newCode(cx, CodeKind::Other);
3121   if (!code) {
3122     return nullptr;
3123   }
3124 
3125 #ifdef JS_ION_PERF
3126   writePerfSpewerJitCodeProfile(code, "RegExpTesterStub");
3127 #endif
3128 #ifdef MOZ_VTUNE
3129   vtune::MarkStub(code, "RegExpTesterStub");
3130 #endif
3131 
3132   return code;
3133 }
3134 
3135 class OutOfLineRegExpTester : public OutOfLineCodeBase<CodeGenerator> {
3136   LRegExpTester* lir_;
3137 
3138  public:
OutOfLineRegExpTester(LRegExpTester * lir)3139   explicit OutOfLineRegExpTester(LRegExpTester* lir) : lir_(lir) {}
3140 
accept(CodeGenerator * codegen)3141   void accept(CodeGenerator* codegen) override {
3142     codegen->visitOutOfLineRegExpTester(this);
3143   }
3144 
lir() const3145   LRegExpTester* lir() const { return lir_; }
3146 };
3147 
visitOutOfLineRegExpTester(OutOfLineRegExpTester * ool)3148 void CodeGenerator::visitOutOfLineRegExpTester(OutOfLineRegExpTester* ool) {
3149   LRegExpTester* lir = ool->lir();
3150   Register lastIndex = ToRegister(lir->lastIndex());
3151   Register input = ToRegister(lir->string());
3152   Register regexp = ToRegister(lir->regexp());
3153 
3154   pushArg(lastIndex);
3155   pushArg(input);
3156   pushArg(regexp);
3157 
3158   // We are not using oolCallVM because we are in a Call, and that live
3159   // registers are already saved by the the register allocator.
3160   using Fn = bool (*)(JSContext * cx, HandleObject regexp, HandleString input,
3161                       int32_t lastIndex, int32_t * result);
3162   callVM<Fn, RegExpTesterRaw>(lir);
3163 
3164   masm.jump(ool->rejoin());
3165 }
3166 
visitRegExpTester(LRegExpTester * lir)3167 void CodeGenerator::visitRegExpTester(LRegExpTester* lir) {
3168   MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpTesterRegExpReg);
3169   MOZ_ASSERT(ToRegister(lir->string()) == RegExpTesterStringReg);
3170   MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpTesterLastIndexReg);
3171   MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
3172 
3173   static_assert(RegExpTesterRegExpReg != ReturnReg);
3174   static_assert(RegExpTesterStringReg != ReturnReg);
3175   static_assert(RegExpTesterLastIndexReg != ReturnReg);
3176 
3177   OutOfLineRegExpTester* ool = new (alloc()) OutOfLineRegExpTester(lir);
3178   addOutOfLineCode(ool, lir->mir());
3179 
3180   const JitRealm* jitRealm = gen->realm->jitRealm();
3181   JitCode* regExpTesterStub =
3182       jitRealm->regExpTesterStubNoBarrier(&realmStubsToReadBarrier_);
3183   masm.call(regExpTesterStub);
3184 
3185   masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpTesterResultFailed),
3186                 ool->entry());
3187   masm.bind(ool->rejoin());
3188 }
3189 
3190 class OutOfLineRegExpPrototypeOptimizable
3191     : public OutOfLineCodeBase<CodeGenerator> {
3192   LRegExpPrototypeOptimizable* ins_;
3193 
3194  public:
OutOfLineRegExpPrototypeOptimizable(LRegExpPrototypeOptimizable * ins)3195   explicit OutOfLineRegExpPrototypeOptimizable(LRegExpPrototypeOptimizable* ins)
3196       : ins_(ins) {}
3197 
accept(CodeGenerator * codegen)3198   void accept(CodeGenerator* codegen) override {
3199     codegen->visitOutOfLineRegExpPrototypeOptimizable(this);
3200   }
ins() const3201   LRegExpPrototypeOptimizable* ins() const { return ins_; }
3202 };
3203 
visitRegExpPrototypeOptimizable(LRegExpPrototypeOptimizable * ins)3204 void CodeGenerator::visitRegExpPrototypeOptimizable(
3205     LRegExpPrototypeOptimizable* ins) {
3206   Register object = ToRegister(ins->object());
3207   Register output = ToRegister(ins->output());
3208   Register temp = ToRegister(ins->temp());
3209 
3210   OutOfLineRegExpPrototypeOptimizable* ool =
3211       new (alloc()) OutOfLineRegExpPrototypeOptimizable(ins);
3212   addOutOfLineCode(ool, ins->mir());
3213 
3214   masm.branchIfNotRegExpPrototypeOptimizable(object, temp, ool->entry());
3215   masm.move32(Imm32(0x1), output);
3216 
3217   masm.bind(ool->rejoin());
3218 }
3219 
visitOutOfLineRegExpPrototypeOptimizable(OutOfLineRegExpPrototypeOptimizable * ool)3220 void CodeGenerator::visitOutOfLineRegExpPrototypeOptimizable(
3221     OutOfLineRegExpPrototypeOptimizable* ool) {
3222   LRegExpPrototypeOptimizable* ins = ool->ins();
3223   Register object = ToRegister(ins->object());
3224   Register output = ToRegister(ins->output());
3225 
3226   saveVolatile(output);
3227 
3228   using Fn = bool (*)(JSContext * cx, JSObject * proto);
3229   masm.setupUnalignedABICall(output);
3230   masm.loadJSContext(output);
3231   masm.passABIArg(output);
3232   masm.passABIArg(object);
3233   masm.callWithABI<Fn, RegExpPrototypeOptimizableRaw>();
3234   masm.storeCallBoolResult(output);
3235 
3236   restoreVolatile(output);
3237 
3238   masm.jump(ool->rejoin());
3239 }
3240 
3241 class OutOfLineRegExpInstanceOptimizable
3242     : public OutOfLineCodeBase<CodeGenerator> {
3243   LRegExpInstanceOptimizable* ins_;
3244 
3245  public:
OutOfLineRegExpInstanceOptimizable(LRegExpInstanceOptimizable * ins)3246   explicit OutOfLineRegExpInstanceOptimizable(LRegExpInstanceOptimizable* ins)
3247       : ins_(ins) {}
3248 
accept(CodeGenerator * codegen)3249   void accept(CodeGenerator* codegen) override {
3250     codegen->visitOutOfLineRegExpInstanceOptimizable(this);
3251   }
ins() const3252   LRegExpInstanceOptimizable* ins() const { return ins_; }
3253 };
3254 
visitRegExpInstanceOptimizable(LRegExpInstanceOptimizable * ins)3255 void CodeGenerator::visitRegExpInstanceOptimizable(
3256     LRegExpInstanceOptimizable* ins) {
3257   Register object = ToRegister(ins->object());
3258   Register output = ToRegister(ins->output());
3259   Register temp = ToRegister(ins->temp());
3260 
3261   OutOfLineRegExpInstanceOptimizable* ool =
3262       new (alloc()) OutOfLineRegExpInstanceOptimizable(ins);
3263   addOutOfLineCode(ool, ins->mir());
3264 
3265   masm.branchIfNotRegExpInstanceOptimizable(object, temp, ool->entry());
3266   masm.move32(Imm32(0x1), output);
3267 
3268   masm.bind(ool->rejoin());
3269 }
3270 
visitOutOfLineRegExpInstanceOptimizable(OutOfLineRegExpInstanceOptimizable * ool)3271 void CodeGenerator::visitOutOfLineRegExpInstanceOptimizable(
3272     OutOfLineRegExpInstanceOptimizable* ool) {
3273   LRegExpInstanceOptimizable* ins = ool->ins();
3274   Register object = ToRegister(ins->object());
3275   Register proto = ToRegister(ins->proto());
3276   Register output = ToRegister(ins->output());
3277 
3278   saveVolatile(output);
3279 
3280   using Fn = bool (*)(JSContext * cx, JSObject * obj, JSObject * proto);
3281   masm.setupUnalignedABICall(output);
3282   masm.loadJSContext(output);
3283   masm.passABIArg(output);
3284   masm.passABIArg(object);
3285   masm.passABIArg(proto);
3286   masm.callWithABI<Fn, RegExpInstanceOptimizableRaw>();
3287   masm.storeCallBoolResult(output);
3288 
3289   restoreVolatile(output);
3290 
3291   masm.jump(ool->rejoin());
3292 }
3293 
FindFirstDollarIndex(MacroAssembler & masm,Register str,Register len,Register temp0,Register temp1,Register output,CharEncoding encoding)3294 static void FindFirstDollarIndex(MacroAssembler& masm, Register str,
3295                                  Register len, Register temp0, Register temp1,
3296                                  Register output, CharEncoding encoding) {
3297 #ifdef DEBUG
3298   Label ok;
3299   masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
3300   masm.assumeUnreachable("Length should be greater than 0.");
3301   masm.bind(&ok);
3302 #endif
3303 
3304   Register chars = temp0;
3305   masm.loadStringChars(str, chars, encoding);
3306 
3307   masm.move32(Imm32(0), output);
3308 
3309   Label start, done;
3310   masm.bind(&start);
3311 
3312   Register currentChar = temp1;
3313   masm.loadChar(chars, output, currentChar, encoding);
3314   masm.branch32(Assembler::Equal, currentChar, Imm32('$'), &done);
3315 
3316   masm.add32(Imm32(1), output);
3317   masm.branch32(Assembler::NotEqual, output, len, &start);
3318 
3319   masm.move32(Imm32(-1), output);
3320 
3321   masm.bind(&done);
3322 }
3323 
visitGetFirstDollarIndex(LGetFirstDollarIndex * ins)3324 void CodeGenerator::visitGetFirstDollarIndex(LGetFirstDollarIndex* ins) {
3325   Register str = ToRegister(ins->str());
3326   Register output = ToRegister(ins->output());
3327   Register temp0 = ToRegister(ins->temp0());
3328   Register temp1 = ToRegister(ins->temp1());
3329   Register len = ToRegister(ins->temp2());
3330 
3331   using Fn = bool (*)(JSContext*, JSString*, int32_t*);
3332   OutOfLineCode* ool = oolCallVM<Fn, GetFirstDollarIndexRaw>(
3333       ins, ArgList(str), StoreRegisterTo(output));
3334 
3335   masm.branchIfRope(str, ool->entry());
3336   masm.loadStringLength(str, len);
3337 
3338   Label isLatin1, done;
3339   masm.branchLatin1String(str, &isLatin1);
3340   {
3341     FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
3342                          CharEncoding::TwoByte);
3343     masm.jump(&done);
3344   }
3345   masm.bind(&isLatin1);
3346   {
3347     FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
3348                          CharEncoding::Latin1);
3349   }
3350   masm.bind(&done);
3351   masm.bind(ool->rejoin());
3352 }
3353 
visitStringReplace(LStringReplace * lir)3354 void CodeGenerator::visitStringReplace(LStringReplace* lir) {
3355   if (lir->replacement()->isConstant()) {
3356     pushArg(ImmGCPtr(lir->replacement()->toConstant()->toString()));
3357   } else {
3358     pushArg(ToRegister(lir->replacement()));
3359   }
3360 
3361   if (lir->pattern()->isConstant()) {
3362     pushArg(ImmGCPtr(lir->pattern()->toConstant()->toString()));
3363   } else {
3364     pushArg(ToRegister(lir->pattern()));
3365   }
3366 
3367   if (lir->string()->isConstant()) {
3368     pushArg(ImmGCPtr(lir->string()->toConstant()->toString()));
3369   } else {
3370     pushArg(ToRegister(lir->string()));
3371   }
3372 
3373   using Fn =
3374       JSString* (*)(JSContext*, HandleString, HandleString, HandleString);
3375   if (lir->mir()->isFlatReplacement()) {
3376     callVM<Fn, StringFlatReplaceString>(lir);
3377   } else {
3378     callVM<Fn, StringReplace>(lir);
3379   }
3380 }
3381 
visitBinaryValueCache(LBinaryValueCache * lir)3382 void CodeGenerator::visitBinaryValueCache(LBinaryValueCache* lir) {
3383   LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3384   TypedOrValueRegister lhs =
3385       TypedOrValueRegister(ToValue(lir, LBinaryValueCache::LhsInput));
3386   TypedOrValueRegister rhs =
3387       TypedOrValueRegister(ToValue(lir, LBinaryValueCache::RhsInput));
3388   ValueOperand output = ToOutValue(lir);
3389 
3390   JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
3391 
3392   switch (jsop) {
3393     case JSOp::Add:
3394     case JSOp::Sub:
3395     case JSOp::Mul:
3396     case JSOp::Div:
3397     case JSOp::Mod:
3398     case JSOp::Pow:
3399     case JSOp::BitAnd:
3400     case JSOp::BitOr:
3401     case JSOp::BitXor:
3402     case JSOp::Lsh:
3403     case JSOp::Rsh:
3404     case JSOp::Ursh: {
3405       IonBinaryArithIC ic(liveRegs, lhs, rhs, output);
3406       addIC(lir, allocateIC(ic));
3407       return;
3408     }
3409     default:
3410       MOZ_CRASH("Unsupported jsop in MBinaryValueCache");
3411   }
3412 }
3413 
visitBinaryBoolCache(LBinaryBoolCache * lir)3414 void CodeGenerator::visitBinaryBoolCache(LBinaryBoolCache* lir) {
3415   LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3416   TypedOrValueRegister lhs =
3417       TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::LhsInput));
3418   TypedOrValueRegister rhs =
3419       TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::RhsInput));
3420   Register output = ToRegister(lir->output());
3421 
3422   JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
3423 
3424   switch (jsop) {
3425     case JSOp::Lt:
3426     case JSOp::Le:
3427     case JSOp::Gt:
3428     case JSOp::Ge:
3429     case JSOp::Eq:
3430     case JSOp::Ne:
3431     case JSOp::StrictEq:
3432     case JSOp::StrictNe: {
3433       IonCompareIC ic(liveRegs, lhs, rhs, output);
3434       addIC(lir, allocateIC(ic));
3435       return;
3436     }
3437     default:
3438       MOZ_CRASH("Unsupported jsop in MBinaryBoolCache");
3439   }
3440 }
3441 
visitUnaryCache(LUnaryCache * lir)3442 void CodeGenerator::visitUnaryCache(LUnaryCache* lir) {
3443   LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
3444   TypedOrValueRegister input =
3445       TypedOrValueRegister(ToValue(lir, LUnaryCache::Input));
3446   ValueOperand output = ToOutValue(lir);
3447 
3448   IonUnaryArithIC ic(liveRegs, input, output);
3449   addIC(lir, allocateIC(ic));
3450 }
3451 
visitModuleMetadata(LModuleMetadata * lir)3452 void CodeGenerator::visitModuleMetadata(LModuleMetadata* lir) {
3453   pushArg(ImmPtr(lir->mir()->module()));
3454 
3455   using Fn = JSObject* (*)(JSContext*, HandleObject);
3456   callVM<Fn, js::GetOrCreateModuleMetaObject>(lir);
3457 }
3458 
visitDynamicImport(LDynamicImport * lir)3459 void CodeGenerator::visitDynamicImport(LDynamicImport* lir) {
3460   pushArg(ToValue(lir, LDynamicImport::SpecifierIndex));
3461   pushArg(ImmGCPtr(current->mir()->info().script()));
3462 
3463   using Fn = JSObject* (*)(JSContext*, HandleScript, HandleValue);
3464   callVM<Fn, js::StartDynamicModuleImport>(lir);
3465 }
3466 
visitLambda(LLambda * lir)3467 void CodeGenerator::visitLambda(LLambda* lir) {
3468   Register envChain = ToRegister(lir->environmentChain());
3469   Register output = ToRegister(lir->output());
3470   Register tempReg = ToRegister(lir->temp());
3471   const LambdaFunctionInfo& info = lir->mir()->info();
3472 
3473   using Fn = JSObject* (*)(JSContext*, HandleFunction, HandleObject);
3474   OutOfLineCode* ool = oolCallVM<Fn, js::Lambda>(
3475       lir, ArgList(ImmGCPtr(info.funUnsafe()), envChain),
3476       StoreRegisterTo(output));
3477 
3478   TemplateObject templateObject(info.funUnsafe());
3479   masm.createGCObject(output, tempReg, templateObject, gc::DefaultHeap,
3480                       ool->entry());
3481 
3482   emitLambdaInit(output, envChain, info);
3483 
3484   if (info.flags.isExtended()) {
3485     MOZ_ASSERT(info.flags.allowSuperProperty() ||
3486                info.flags.isSelfHostedBuiltin());
3487     static_assert(FunctionExtended::NUM_EXTENDED_SLOTS == 2,
3488                   "All slots must be initialized");
3489     masm.storeValue(UndefinedValue(),
3490                     Address(output, FunctionExtended::offsetOfExtendedSlot(0)));
3491     masm.storeValue(UndefinedValue(),
3492                     Address(output, FunctionExtended::offsetOfExtendedSlot(1)));
3493   }
3494 
3495   masm.bind(ool->rejoin());
3496 }
3497 
visitLambdaArrow(LLambdaArrow * lir)3498 void CodeGenerator::visitLambdaArrow(LLambdaArrow* lir) {
3499   Register envChain = ToRegister(lir->environmentChain());
3500   ValueOperand newTarget = ToValue(lir, LLambdaArrow::NewTargetValue);
3501   Register output = ToRegister(lir->output());
3502   Register temp = ToRegister(lir->temp());
3503   const LambdaFunctionInfo& info = lir->mir()->info();
3504 
3505   using Fn =
3506       JSObject* (*)(JSContext*, HandleFunction, HandleObject, HandleValue);
3507   OutOfLineCode* ool = oolCallVM<Fn, LambdaArrow>(
3508       lir, ArgList(ImmGCPtr(info.funUnsafe()), envChain, newTarget),
3509       StoreRegisterTo(output));
3510 
3511   TemplateObject templateObject(info.funUnsafe());
3512   masm.createGCObject(output, temp, templateObject, gc::DefaultHeap,
3513                       ool->entry());
3514 
3515   emitLambdaInit(output, envChain, info);
3516 
3517   // Initialize extended slots. Lexical |this| is stored in the first one.
3518   MOZ_ASSERT(info.flags.isExtended());
3519   static_assert(FunctionExtended::NUM_EXTENDED_SLOTS == 2,
3520                 "All slots must be initialized");
3521   static_assert(FunctionExtended::ARROW_NEWTARGET_SLOT == 0,
3522                 "|new.target| must be stored in first slot");
3523   masm.storeValue(newTarget,
3524                   Address(output, FunctionExtended::offsetOfExtendedSlot(0)));
3525   masm.storeValue(UndefinedValue(),
3526                   Address(output, FunctionExtended::offsetOfExtendedSlot(1)));
3527 
3528   masm.bind(ool->rejoin());
3529 }
3530 
emitLambdaInit(Register output,Register envChain,const LambdaFunctionInfo & info)3531 void CodeGenerator::emitLambdaInit(Register output, Register envChain,
3532                                    const LambdaFunctionInfo& info) {
3533   // Initialize nargs and flags. We do this with a single uint32 to avoid
3534   // 16-bit writes.
3535   union {
3536     struct S {
3537       uint16_t nargs;
3538       uint16_t flags;
3539     } s;
3540     uint32_t word;
3541   } u;
3542   u.s.nargs = info.nargs;
3543   u.s.flags = info.flags.toRaw();
3544 
3545   static_assert(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2,
3546                 "the code below needs to be adapted");
3547   masm.store32(Imm32(u.word), Address(output, JSFunction::offsetOfNargs()));
3548   masm.storePtr(ImmGCPtr(info.baseScript),
3549                 Address(output, JSFunction::offsetOfBaseScript()));
3550   masm.storePtr(envChain, Address(output, JSFunction::offsetOfEnvironment()));
3551   // No post barrier needed because output is guaranteed to be allocated in
3552   // the nursery.
3553   masm.storePtr(ImmGCPtr(info.funUnsafe()->displayAtom()),
3554                 Address(output, JSFunction::offsetOfAtom()));
3555 }
3556 
visitFunctionWithProto(LFunctionWithProto * lir)3557 void CodeGenerator::visitFunctionWithProto(LFunctionWithProto* lir) {
3558   Register envChain = ToRegister(lir->environmentChain());
3559   Register prototype = ToRegister(lir->prototype());
3560 
3561   pushArg(prototype);
3562   pushArg(envChain);
3563   pushArg(ImmGCPtr(lir->mir()->function()));
3564 
3565   using Fn =
3566       JSObject* (*)(JSContext*, HandleFunction, HandleObject, HandleObject);
3567   callVM<Fn, js::FunWithProtoOperation>(lir);
3568 }
3569 
visitSetFunName(LSetFunName * lir)3570 void CodeGenerator::visitSetFunName(LSetFunName* lir) {
3571   pushArg(Imm32(lir->mir()->prefixKind()));
3572   pushArg(ToValue(lir, LSetFunName::NameValue));
3573   pushArg(ToRegister(lir->fun()));
3574 
3575   using Fn =
3576       bool (*)(JSContext*, HandleFunction, HandleValue, FunctionPrefixKind);
3577   callVM<Fn, js::SetFunctionName>(lir);
3578 }
3579 
visitOsiPoint(LOsiPoint * lir)3580 void CodeGenerator::visitOsiPoint(LOsiPoint* lir) {
3581   // Note: markOsiPoint ensures enough space exists between the last
3582   // LOsiPoint and this one to patch adjacent call instructions.
3583 
3584   MOZ_ASSERT(masm.framePushed() == frameSize());
3585 
3586   uint32_t osiCallPointOffset = markOsiPoint(lir);
3587 
3588   LSafepoint* safepoint = lir->associatedSafepoint();
3589   MOZ_ASSERT(!safepoint->osiCallPointOffset());
3590   safepoint->setOsiCallPointOffset(osiCallPointOffset);
3591 
3592 #ifdef DEBUG
3593   // There should be no movegroups or other instructions between
3594   // an instruction and its OsiPoint. This is necessary because
3595   // we use the OsiPoint's snapshot from within VM calls.
3596   for (LInstructionReverseIterator iter(current->rbegin(lir));
3597        iter != current->rend(); iter++) {
3598     if (*iter == lir) {
3599       continue;
3600     }
3601     MOZ_ASSERT(!iter->isMoveGroup());
3602     MOZ_ASSERT(iter->safepoint() == safepoint);
3603     break;
3604   }
3605 #endif
3606 
3607 #ifdef CHECK_OSIPOINT_REGISTERS
3608   if (shouldVerifyOsiPointRegs(safepoint)) {
3609     verifyOsiPointRegs(safepoint);
3610   }
3611 #endif
3612 }
3613 
visitPhi(LPhi * lir)3614 void CodeGenerator::visitPhi(LPhi* lir) {
3615   MOZ_CRASH("Unexpected LPhi in CodeGenerator");
3616 }
3617 
visitGoto(LGoto * lir)3618 void CodeGenerator::visitGoto(LGoto* lir) { jumpToBlock(lir->target()); }
3619 
visitTableSwitch(LTableSwitch * ins)3620 void CodeGenerator::visitTableSwitch(LTableSwitch* ins) {
3621   MTableSwitch* mir = ins->mir();
3622   Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
3623   const LAllocation* temp;
3624 
3625   if (mir->getOperand(0)->type() != MIRType::Int32) {
3626     temp = ins->tempInt()->output();
3627 
3628     // The input is a double, so try and convert it to an integer.
3629     // If it does not fit in an integer, take the default case.
3630     masm.convertDoubleToInt32(ToFloatRegister(ins->index()), ToRegister(temp),
3631                               defaultcase, false);
3632   } else {
3633     temp = ins->index();
3634   }
3635 
3636   emitTableSwitchDispatch(mir, ToRegister(temp),
3637                           ToRegisterOrInvalid(ins->tempPointer()));
3638 }
3639 
visitTableSwitchV(LTableSwitchV * ins)3640 void CodeGenerator::visitTableSwitchV(LTableSwitchV* ins) {
3641   MTableSwitch* mir = ins->mir();
3642   Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
3643 
3644   Register index = ToRegister(ins->tempInt());
3645   ValueOperand value = ToValue(ins, LTableSwitchV::InputValue);
3646   Register tag = masm.extractTag(value, index);
3647   masm.branchTestNumber(Assembler::NotEqual, tag, defaultcase);
3648 
3649   Label unboxInt, isInt;
3650   masm.branchTestInt32(Assembler::Equal, tag, &unboxInt);
3651   {
3652     FloatRegister floatIndex = ToFloatRegister(ins->tempFloat());
3653     masm.unboxDouble(value, floatIndex);
3654     masm.convertDoubleToInt32(floatIndex, index, defaultcase, false);
3655     masm.jump(&isInt);
3656   }
3657 
3658   masm.bind(&unboxInt);
3659   masm.unboxInt32(value, index);
3660 
3661   masm.bind(&isInt);
3662 
3663   emitTableSwitchDispatch(mir, index, ToRegisterOrInvalid(ins->tempPointer()));
3664 }
3665 
visitParameter(LParameter * lir)3666 void CodeGenerator::visitParameter(LParameter* lir) {}
3667 
visitCallee(LCallee * lir)3668 void CodeGenerator::visitCallee(LCallee* lir) {
3669   Register callee = ToRegister(lir->output());
3670   Address ptr(masm.getStackPointer(),
3671               frameSize() + JitFrameLayout::offsetOfCalleeToken());
3672 
3673   masm.loadFunctionFromCalleeToken(ptr, callee);
3674 }
3675 
visitIsConstructing(LIsConstructing * lir)3676 void CodeGenerator::visitIsConstructing(LIsConstructing* lir) {
3677   Register output = ToRegister(lir->output());
3678   Address calleeToken(masm.getStackPointer(),
3679                       frameSize() + JitFrameLayout::offsetOfCalleeToken());
3680   masm.loadPtr(calleeToken, output);
3681 
3682   // We must be inside a function.
3683   MOZ_ASSERT(current->mir()->info().script()->function());
3684 
3685   // The low bit indicates whether this call is constructing, just clear the
3686   // other bits.
3687   static_assert(CalleeToken_Function == 0x0,
3688                 "CalleeTokenTag value should match");
3689   static_assert(CalleeToken_FunctionConstructing == 0x1,
3690                 "CalleeTokenTag value should match");
3691   masm.andPtr(Imm32(0x1), output);
3692 }
3693 
visitReturn(LReturn * lir)3694 void CodeGenerator::visitReturn(LReturn* lir) {
3695 #if defined(JS_NUNBOX32)
3696   DebugOnly<LAllocation*> type = lir->getOperand(TYPE_INDEX);
3697   DebugOnly<LAllocation*> payload = lir->getOperand(PAYLOAD_INDEX);
3698   MOZ_ASSERT(ToRegister(type) == JSReturnReg_Type);
3699   MOZ_ASSERT(ToRegister(payload) == JSReturnReg_Data);
3700 #elif defined(JS_PUNBOX64)
3701   DebugOnly<LAllocation*> result = lir->getOperand(0);
3702   MOZ_ASSERT(ToRegister(result) == JSReturnReg);
3703 #endif
3704   // Don't emit a jump to the return label if this is the last block, as
3705   // it'll fall through to the epilogue.
3706   //
3707   // This is -not- true however for a Generator-return, which may appear in the
3708   // middle of the last block, so we should always emit the jump there.
3709   if (current->mir() != *gen->graph().poBegin() || lir->isGenerator()) {
3710     masm.jump(&returnLabel_);
3711   }
3712 }
3713 
visitOsrEntry(LOsrEntry * lir)3714 void CodeGenerator::visitOsrEntry(LOsrEntry* lir) {
3715   Register temp = ToRegister(lir->temp());
3716 
3717   // Remember the OSR entry offset into the code buffer.
3718   masm.flushBuffer();
3719   setOsrEntryOffset(masm.size());
3720 
3721 #ifdef JS_TRACE_LOGGING
3722   if (JS::TraceLoggerSupported()) {
3723     emitTracelogStopEvent(TraceLogger_Baseline);
3724     emitTracelogStartEvent(TraceLogger_IonMonkey);
3725   }
3726 #endif
3727 
3728   // If profiling, save the current frame pointer to a per-thread global field.
3729   if (isProfilerInstrumentationEnabled()) {
3730     masm.profilerEnterFrame(masm.getStackPointer(), temp);
3731   }
3732 
3733   // Allocate the full frame for this function
3734   // Note we have a new entry here. So we reset MacroAssembler::framePushed()
3735   // to 0, before reserving the stack.
3736   MOZ_ASSERT(masm.framePushed() == frameSize());
3737   masm.setFramePushed(0);
3738 
3739   // Ensure that the Ion frames is properly aligned.
3740   masm.assertStackAlignment(JitStackAlignment, 0);
3741 
3742   masm.reserveStack(frameSize());
3743 }
3744 
visitOsrEnvironmentChain(LOsrEnvironmentChain * lir)3745 void CodeGenerator::visitOsrEnvironmentChain(LOsrEnvironmentChain* lir) {
3746   const LAllocation* frame = lir->getOperand(0);
3747   const LDefinition* object = lir->getDef(0);
3748 
3749   const ptrdiff_t frameOffset =
3750       BaselineFrame::reverseOffsetOfEnvironmentChain();
3751 
3752   masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
3753 }
3754 
visitOsrArgumentsObject(LOsrArgumentsObject * lir)3755 void CodeGenerator::visitOsrArgumentsObject(LOsrArgumentsObject* lir) {
3756   const LAllocation* frame = lir->getOperand(0);
3757   const LDefinition* object = lir->getDef(0);
3758 
3759   const ptrdiff_t frameOffset = BaselineFrame::reverseOffsetOfArgsObj();
3760 
3761   masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
3762 }
3763 
visitOsrValue(LOsrValue * value)3764 void CodeGenerator::visitOsrValue(LOsrValue* value) {
3765   const LAllocation* frame = value->getOperand(0);
3766   const ValueOperand out = ToOutValue(value);
3767 
3768   const ptrdiff_t frameOffset = value->mir()->frameOffset();
3769 
3770   masm.loadValue(Address(ToRegister(frame), frameOffset), out);
3771 }
3772 
visitOsrReturnValue(LOsrReturnValue * lir)3773 void CodeGenerator::visitOsrReturnValue(LOsrReturnValue* lir) {
3774   const LAllocation* frame = lir->getOperand(0);
3775   const ValueOperand out = ToOutValue(lir);
3776 
3777   Address flags =
3778       Address(ToRegister(frame), BaselineFrame::reverseOffsetOfFlags());
3779   Address retval =
3780       Address(ToRegister(frame), BaselineFrame::reverseOffsetOfReturnValue());
3781 
3782   masm.moveValue(UndefinedValue(), out);
3783 
3784   Label done;
3785   masm.branchTest32(Assembler::Zero, flags, Imm32(BaselineFrame::HAS_RVAL),
3786                     &done);
3787   masm.loadValue(retval, out);
3788   masm.bind(&done);
3789 }
3790 
visitStackArgT(LStackArgT * lir)3791 void CodeGenerator::visitStackArgT(LStackArgT* lir) {
3792   const LAllocation* arg = lir->getArgument();
3793   MIRType argType = lir->type();
3794   uint32_t argslot = lir->argslot();
3795   MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
3796 
3797   int32_t stack_offset = StackOffsetOfPassedArg(argslot);
3798   Address dest(masm.getStackPointer(), stack_offset);
3799 
3800   if (arg->isFloatReg()) {
3801     masm.boxDouble(ToFloatRegister(arg), dest);
3802   } else if (arg->isRegister()) {
3803     masm.storeValue(ValueTypeFromMIRType(argType), ToRegister(arg), dest);
3804   } else {
3805     masm.storeValue(arg->toConstant()->toJSValue(), dest);
3806   }
3807 }
3808 
visitStackArgV(LStackArgV * lir)3809 void CodeGenerator::visitStackArgV(LStackArgV* lir) {
3810   ValueOperand val = ToValue(lir, 0);
3811   uint32_t argslot = lir->argslot();
3812   MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
3813 
3814   int32_t stack_offset = StackOffsetOfPassedArg(argslot);
3815 
3816   masm.storeValue(val, Address(masm.getStackPointer(), stack_offset));
3817 }
3818 
visitMoveGroup(LMoveGroup * group)3819 void CodeGenerator::visitMoveGroup(LMoveGroup* group) {
3820   if (!group->numMoves()) {
3821     return;
3822   }
3823 
3824   MoveResolver& resolver = masm.moveResolver();
3825 
3826   for (size_t i = 0; i < group->numMoves(); i++) {
3827     const LMove& move = group->getMove(i);
3828 
3829     LAllocation from = move.from();
3830     LAllocation to = move.to();
3831     LDefinition::Type type = move.type();
3832 
3833     // No bogus moves.
3834     MOZ_ASSERT(from != to);
3835     MOZ_ASSERT(!from.isConstant());
3836     MoveOp::Type moveType;
3837     switch (type) {
3838       case LDefinition::OBJECT:
3839       case LDefinition::SLOTS:
3840 #ifdef JS_NUNBOX32
3841       case LDefinition::TYPE:
3842       case LDefinition::PAYLOAD:
3843 #else
3844       case LDefinition::BOX:
3845 #endif
3846       case LDefinition::GENERAL:
3847       case LDefinition::STACKRESULTS:
3848         moveType = MoveOp::GENERAL;
3849         break;
3850       case LDefinition::INT32:
3851         moveType = MoveOp::INT32;
3852         break;
3853       case LDefinition::FLOAT32:
3854         moveType = MoveOp::FLOAT32;
3855         break;
3856       case LDefinition::DOUBLE:
3857         moveType = MoveOp::DOUBLE;
3858         break;
3859       case LDefinition::SIMD128:
3860         moveType = MoveOp::SIMD128;
3861         break;
3862       default:
3863         MOZ_CRASH("Unexpected move type");
3864     }
3865 
3866     masm.propagateOOM(
3867         resolver.addMove(toMoveOperand(from), toMoveOperand(to), moveType));
3868   }
3869 
3870   masm.propagateOOM(resolver.resolve());
3871   if (masm.oom()) {
3872     return;
3873   }
3874 
3875   MoveEmitter emitter(masm);
3876 
3877 #ifdef JS_CODEGEN_X86
3878   if (group->maybeScratchRegister().isGeneralReg()) {
3879     emitter.setScratchRegister(
3880         group->maybeScratchRegister().toGeneralReg()->reg());
3881   } else {
3882     resolver.sortMemoryToMemoryMoves();
3883   }
3884 #endif
3885 
3886   emitter.emit(resolver);
3887   emitter.finish();
3888 }
3889 
visitInteger(LInteger * lir)3890 void CodeGenerator::visitInteger(LInteger* lir) {
3891   masm.move32(Imm32(lir->getValue()), ToRegister(lir->output()));
3892 }
3893 
visitInteger64(LInteger64 * lir)3894 void CodeGenerator::visitInteger64(LInteger64* lir) {
3895   masm.move64(Imm64(lir->getValue()), ToOutRegister64(lir));
3896 }
3897 
visitPointer(LPointer * lir)3898 void CodeGenerator::visitPointer(LPointer* lir) {
3899   masm.movePtr(ImmGCPtr(lir->gcptr()), ToRegister(lir->output()));
3900 }
3901 
visitNurseryObject(LNurseryObject * lir)3902 void CodeGenerator::visitNurseryObject(LNurseryObject* lir) {
3903   Register output = ToRegister(lir->output());
3904   uint32_t nurseryIndex = lir->mir()->nurseryIndex();
3905 
3906   // Load a pointer to the entry in IonScript's nursery objects list.
3907   CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), output);
3908   masm.propagateOOM(ionNurseryObjectLabels_.emplaceBack(label, nurseryIndex));
3909 
3910   // Load the JSObject*.
3911   masm.loadPtr(Address(output, 0), output);
3912 }
3913 
visitKeepAliveObject(LKeepAliveObject * lir)3914 void CodeGenerator::visitKeepAliveObject(LKeepAliveObject* lir) {
3915   // No-op.
3916 }
3917 
visitSlots(LSlots * lir)3918 void CodeGenerator::visitSlots(LSlots* lir) {
3919   Address slots(ToRegister(lir->object()), NativeObject::offsetOfSlots());
3920   masm.loadPtr(slots, ToRegister(lir->output()));
3921 }
3922 
visitLoadDynamicSlotV(LLoadDynamicSlotV * lir)3923 void CodeGenerator::visitLoadDynamicSlotV(LLoadDynamicSlotV* lir) {
3924   ValueOperand dest = ToOutValue(lir);
3925   Register base = ToRegister(lir->input());
3926   int32_t offset = lir->mir()->slot() * sizeof(js::Value);
3927 
3928   masm.loadValue(Address(base, offset), dest);
3929 }
3930 
ToConstantOrRegister(const LAllocation * value,MIRType valueType)3931 static ConstantOrRegister ToConstantOrRegister(const LAllocation* value,
3932                                                MIRType valueType) {
3933   if (value->isConstant()) {
3934     return ConstantOrRegister(value->toConstant()->toJSValue());
3935   }
3936   return TypedOrValueRegister(valueType, ToAnyRegister(value));
3937 }
3938 
visitStoreDynamicSlotT(LStoreDynamicSlotT * lir)3939 void CodeGenerator::visitStoreDynamicSlotT(LStoreDynamicSlotT* lir) {
3940   Register base = ToRegister(lir->slots());
3941   int32_t offset = lir->mir()->slot() * sizeof(js::Value);
3942   Address dest(base, offset);
3943 
3944   if (lir->mir()->needsBarrier()) {
3945     emitPreBarrier(dest);
3946   }
3947 
3948   MIRType valueType = lir->mir()->value()->type();
3949   ConstantOrRegister value = ToConstantOrRegister(lir->value(), valueType);
3950   masm.storeUnboxedValue(value, valueType, dest, lir->mir()->slotType());
3951 }
3952 
visitStoreDynamicSlotV(LStoreDynamicSlotV * lir)3953 void CodeGenerator::visitStoreDynamicSlotV(LStoreDynamicSlotV* lir) {
3954   Register base = ToRegister(lir->slots());
3955   int32_t offset = lir->mir()->slot() * sizeof(Value);
3956 
3957   const ValueOperand value = ToValue(lir, LStoreDynamicSlotV::Value);
3958 
3959   if (lir->mir()->needsBarrier()) {
3960     emitPreBarrier(Address(base, offset));
3961   }
3962 
3963   masm.storeValue(value, Address(base, offset));
3964 }
3965 
visitElements(LElements * lir)3966 void CodeGenerator::visitElements(LElements* lir) {
3967   Address elements(ToRegister(lir->object()), NativeObject::offsetOfElements());
3968   masm.loadPtr(elements, ToRegister(lir->output()));
3969 }
3970 
visitFunctionEnvironment(LFunctionEnvironment * lir)3971 void CodeGenerator::visitFunctionEnvironment(LFunctionEnvironment* lir) {
3972   Address environment(ToRegister(lir->function()),
3973                       JSFunction::offsetOfEnvironment());
3974   masm.loadPtr(environment, ToRegister(lir->output()));
3975 }
3976 
visitHomeObject(LHomeObject * lir)3977 void CodeGenerator::visitHomeObject(LHomeObject* lir) {
3978   Address homeObject(ToRegister(lir->function()),
3979                      FunctionExtended::offsetOfMethodHomeObjectSlot());
3980 #ifdef DEBUG
3981   Label isObject;
3982   masm.branchTestObject(Assembler::Equal, homeObject, &isObject);
3983   masm.assumeUnreachable("[[HomeObject]] must be Object");
3984   masm.bind(&isObject);
3985 #endif
3986   masm.unboxObject(homeObject, ToRegister(lir->output()));
3987 }
3988 
visitHomeObjectSuperBase(LHomeObjectSuperBase * lir)3989 void CodeGenerator::visitHomeObjectSuperBase(LHomeObjectSuperBase* lir) {
3990   Register homeObject = ToRegister(lir->homeObject());
3991   Register output = ToRegister(lir->output());
3992 
3993   using Fn = bool (*)(JSContext*);
3994   OutOfLineCode* ool =
3995       oolCallVM<Fn, ThrowHomeObjectNotObject>(lir, ArgList(), StoreNothing());
3996 
3997   masm.loadObjProto(homeObject, output);
3998 
3999 #ifdef DEBUG
4000   // We won't encounter a lazy proto, because the prototype is guaranteed to
4001   // either be a JSFunction or a PlainObject, and only proxy objects can have a
4002   // lazy proto.
4003   MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
4004 
4005   Label proxyCheckDone;
4006   masm.branchPtr(Assembler::NotEqual, output, ImmWord(1), &proxyCheckDone);
4007   masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperBase");
4008   masm.bind(&proxyCheckDone);
4009 #endif
4010 
4011   masm.branchPtr(Assembler::Equal, output, ImmWord(0), ool->entry());
4012   masm.bind(ool->rejoin());
4013 }
4014 
visitNewLexicalEnvironmentObject(LNewLexicalEnvironmentObject * lir)4015 void CodeGenerator::visitNewLexicalEnvironmentObject(
4016     LNewLexicalEnvironmentObject* lir) {
4017   pushArg(Imm32(gc::DefaultHeap));
4018   pushArg(ToRegister(lir->enclosing()));
4019   pushArg(ImmGCPtr(lir->mir()->scope()));
4020 
4021   using Fn =
4022       BlockLexicalEnvironmentObject* (*)(JSContext*, Handle<LexicalScope*>,
4023                                          HandleObject, gc::InitialHeap);
4024   callVM<Fn, BlockLexicalEnvironmentObject::create>(lir);
4025 }
4026 
visitNewClassBodyEnvironmentObject(LNewClassBodyEnvironmentObject * lir)4027 void CodeGenerator::visitNewClassBodyEnvironmentObject(
4028     LNewClassBodyEnvironmentObject* lir) {
4029   pushArg(Imm32(gc::DefaultHeap));
4030   pushArg(ToRegister(lir->enclosing()));
4031   pushArg(ImmGCPtr(lir->mir()->scope()));
4032 
4033   using Fn =
4034       ClassBodyLexicalEnvironmentObject* (*)(JSContext*,
4035                                              Handle<ClassBodyScope*>,
4036                                              HandleObject, gc::InitialHeap);
4037   callVM<Fn, ClassBodyLexicalEnvironmentObject::create>(lir);
4038 }
4039 
visitCopyLexicalEnvironmentObject(LCopyLexicalEnvironmentObject * lir)4040 void CodeGenerator::visitCopyLexicalEnvironmentObject(
4041     LCopyLexicalEnvironmentObject* lir) {
4042   pushArg(Imm32(lir->mir()->copySlots()));
4043   pushArg(ToRegister(lir->env()));
4044 
4045   using Fn = JSObject* (*)(JSContext*, HandleObject, bool);
4046   callVM<Fn, jit::CopyLexicalEnvironmentObject>(lir);
4047 }
4048 
visitGuardShape(LGuardShape * guard)4049 void CodeGenerator::visitGuardShape(LGuardShape* guard) {
4050   Register obj = ToRegister(guard->input());
4051   Register temp = ToTempRegisterOrInvalid(guard->temp());
4052   Label bail;
4053   masm.branchTestObjShape(Assembler::NotEqual, obj, guard->mir()->shape(), temp,
4054                           obj, &bail);
4055   bailoutFrom(&bail, guard->snapshot());
4056 }
4057 
visitGuardProto(LGuardProto * guard)4058 void CodeGenerator::visitGuardProto(LGuardProto* guard) {
4059   Register obj = ToRegister(guard->object());
4060   Register expected = ToRegister(guard->expected());
4061   Register temp = ToRegister(guard->temp());
4062 
4063   masm.loadObjProto(obj, temp);
4064 
4065   Label bail;
4066   masm.branchPtr(Assembler::NotEqual, temp, expected, &bail);
4067   bailoutFrom(&bail, guard->snapshot());
4068 }
4069 
visitGuardNullProto(LGuardNullProto * guard)4070 void CodeGenerator::visitGuardNullProto(LGuardNullProto* guard) {
4071   Register obj = ToRegister(guard->input());
4072   Register temp = ToRegister(guard->temp());
4073 
4074   masm.loadObjProto(obj, temp);
4075 
4076   Label bail;
4077   masm.branchTestPtr(Assembler::NonZero, temp, temp, &bail);
4078   bailoutFrom(&bail, guard->snapshot());
4079 }
4080 
visitGuardIsNativeObject(LGuardIsNativeObject * guard)4081 void CodeGenerator::visitGuardIsNativeObject(LGuardIsNativeObject* guard) {
4082   Register obj = ToRegister(guard->input());
4083   Register temp = ToRegister(guard->temp());
4084 
4085   Label bail;
4086   masm.branchIfNonNativeObj(obj, temp, &bail);
4087   bailoutFrom(&bail, guard->snapshot());
4088 }
4089 
visitGuardIsProxy(LGuardIsProxy * guard)4090 void CodeGenerator::visitGuardIsProxy(LGuardIsProxy* guard) {
4091   Register obj = ToRegister(guard->input());
4092   Register temp = ToRegister(guard->temp());
4093 
4094   Label bail;
4095   masm.branchTestObjectIsProxy(false, obj, temp, &bail);
4096   bailoutFrom(&bail, guard->snapshot());
4097 }
4098 
visitGuardIsNotProxy(LGuardIsNotProxy * guard)4099 void CodeGenerator::visitGuardIsNotProxy(LGuardIsNotProxy* guard) {
4100   Register obj = ToRegister(guard->input());
4101   Register temp = ToRegister(guard->temp());
4102 
4103   Label bail;
4104   masm.branchTestObjectIsProxy(true, obj, temp, &bail);
4105   bailoutFrom(&bail, guard->snapshot());
4106 }
4107 
visitGuardIsNotDOMProxy(LGuardIsNotDOMProxy * guard)4108 void CodeGenerator::visitGuardIsNotDOMProxy(LGuardIsNotDOMProxy* guard) {
4109   Register proxy = ToRegister(guard->proxy());
4110   Register temp = ToRegister(guard->temp());
4111 
4112   Label bail;
4113   masm.branchTestProxyHandlerFamily(Assembler::Equal, proxy, temp,
4114                                     GetDOMProxyHandlerFamily(), &bail);
4115   bailoutFrom(&bail, guard->snapshot());
4116 }
4117 
visitProxyGet(LProxyGet * lir)4118 void CodeGenerator::visitProxyGet(LProxyGet* lir) {
4119   Register proxy = ToRegister(lir->proxy());
4120   Register temp = ToRegister(lir->temp());
4121 
4122   pushArg(lir->mir()->id(), temp);
4123   pushArg(proxy);
4124 
4125   using Fn = bool (*)(JSContext*, HandleObject, HandleId, MutableHandleValue);
4126   callVM<Fn, ProxyGetProperty>(lir);
4127 }
4128 
visitProxyGetByValue(LProxyGetByValue * lir)4129 void CodeGenerator::visitProxyGetByValue(LProxyGetByValue* lir) {
4130   Register proxy = ToRegister(lir->proxy());
4131   ValueOperand idVal = ToValue(lir, LProxyGetByValue::IdIndex);
4132 
4133   pushArg(idVal);
4134   pushArg(proxy);
4135 
4136   using Fn =
4137       bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
4138   callVM<Fn, ProxyGetPropertyByValue>(lir);
4139 }
4140 
visitProxyHasProp(LProxyHasProp * lir)4141 void CodeGenerator::visitProxyHasProp(LProxyHasProp* lir) {
4142   Register proxy = ToRegister(lir->proxy());
4143   ValueOperand idVal = ToValue(lir, LProxyHasProp::IdIndex);
4144 
4145   pushArg(idVal);
4146   pushArg(proxy);
4147 
4148   using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
4149   if (lir->mir()->hasOwn()) {
4150     callVM<Fn, ProxyHasOwn>(lir);
4151   } else {
4152     callVM<Fn, ProxyHas>(lir);
4153   }
4154 }
4155 
visitProxySet(LProxySet * lir)4156 void CodeGenerator::visitProxySet(LProxySet* lir) {
4157   Register proxy = ToRegister(lir->proxy());
4158   ValueOperand rhs = ToValue(lir, LProxySet::RhsIndex);
4159   Register temp = ToRegister(lir->temp());
4160 
4161   pushArg(Imm32(lir->mir()->strict()));
4162   pushArg(rhs);
4163   pushArg(lir->mir()->id(), temp);
4164   pushArg(proxy);
4165 
4166   using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
4167   callVM<Fn, ProxySetProperty>(lir);
4168 }
4169 
visitProxySetByValue(LProxySetByValue * lir)4170 void CodeGenerator::visitProxySetByValue(LProxySetByValue* lir) {
4171   Register proxy = ToRegister(lir->proxy());
4172   ValueOperand idVal = ToValue(lir, LProxySetByValue::IdIndex);
4173   ValueOperand rhs = ToValue(lir, LProxySetByValue::RhsIndex);
4174 
4175   pushArg(Imm32(lir->mir()->strict()));
4176   pushArg(rhs);
4177   pushArg(idVal);
4178   pushArg(proxy);
4179 
4180   using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
4181   callVM<Fn, ProxySetPropertyByValue>(lir);
4182 }
4183 
visitCallSetArrayLength(LCallSetArrayLength * lir)4184 void CodeGenerator::visitCallSetArrayLength(LCallSetArrayLength* lir) {
4185   Register obj = ToRegister(lir->obj());
4186   ValueOperand rhs = ToValue(lir, LCallSetArrayLength::RhsIndex);
4187 
4188   pushArg(Imm32(lir->mir()->strict()));
4189   pushArg(rhs);
4190   pushArg(obj);
4191 
4192   using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool);
4193   callVM<Fn, jit::SetArrayLength>(lir);
4194 }
4195 
visitMegamorphicLoadSlot(LMegamorphicLoadSlot * lir)4196 void CodeGenerator::visitMegamorphicLoadSlot(LMegamorphicLoadSlot* lir) {
4197   Register obj = ToRegister(lir->object());
4198   Register temp1 = ToRegister(lir->temp1());
4199   Register temp2 = ToRegister(lir->temp2());
4200   Register temp3 = ToRegister(lir->temp3());
4201   ValueOperand output = ToOutValue(lir);
4202 
4203   Label bail;
4204   masm.branchIfNonNativeObj(obj, temp1, &bail);
4205 
4206   masm.pushValue(UndefinedValue());
4207   masm.moveStackPtrTo(temp3);
4208 
4209   using Fn =
4210       bool (*)(JSContext * cx, JSObject * obj, PropertyName * name, Value * vp);
4211   masm.setupUnalignedABICall(temp1);
4212   masm.loadJSContext(temp1);
4213   masm.passABIArg(temp1);
4214   masm.passABIArg(obj);
4215   masm.movePtr(ImmGCPtr(lir->mir()->name()), temp2);
4216   masm.passABIArg(temp2);
4217   masm.passABIArg(temp3);
4218 
4219   masm.callWithABI<Fn, GetNativeDataPropertyPure>();
4220 
4221   MOZ_ASSERT(!output.aliases(ReturnReg));
4222   masm.popValue(output);
4223 
4224   masm.branchIfFalseBool(ReturnReg, &bail);
4225 
4226   if (JitOptions.spectreJitToCxxCalls) {
4227     masm.speculationBarrier();
4228   }
4229 
4230   bailoutFrom(&bail, lir->snapshot());
4231 }
4232 
visitMegamorphicLoadSlotByValue(LMegamorphicLoadSlotByValue * lir)4233 void CodeGenerator::visitMegamorphicLoadSlotByValue(
4234     LMegamorphicLoadSlotByValue* lir) {
4235   Register obj = ToRegister(lir->object());
4236   ValueOperand idVal = ToValue(lir, LMegamorphicLoadSlotByValue::IdIndex);
4237   Register temp1 = ToRegister(lir->temp1());
4238   Register temp2 = ToRegister(lir->temp2());
4239   ValueOperand output = ToOutValue(lir);
4240 
4241   Label bail;
4242   masm.branchIfNonNativeObj(obj, temp1, &bail);
4243 
4244   // idVal will be in vp[0], result will be stored in vp[1].
4245   masm.subFromStackPtr(Imm32(sizeof(Value)));
4246   masm.pushValue(idVal);
4247   masm.moveStackPtrTo(temp1);
4248 
4249   using Fn = bool (*)(JSContext * cx, JSObject * obj, Value * vp);
4250   masm.setupUnalignedABICall(temp2);
4251   masm.loadJSContext(temp2);
4252   masm.passABIArg(temp2);
4253   masm.passABIArg(obj);
4254   masm.passABIArg(temp1);
4255   masm.callWithABI<Fn, GetNativeDataPropertyByValuePure>();
4256 
4257   MOZ_ASSERT(!idVal.aliases(temp1));
4258   masm.mov(ReturnReg, temp1);
4259   masm.popValue(idVal);
4260 
4261   Label ok;
4262   masm.branchIfTrueBool(temp1, &ok);
4263   masm.addToStackPtr(Imm32(sizeof(Value)));  // Discard result Value.
4264   masm.jump(&bail);
4265 
4266   masm.bind(&ok);
4267   if (JitOptions.spectreJitToCxxCalls) {
4268     masm.speculationBarrier();
4269   }
4270   masm.popValue(output);
4271 
4272   bailoutFrom(&bail, lir->snapshot());
4273 }
4274 
visitMegamorphicStoreSlot(LMegamorphicStoreSlot * lir)4275 void CodeGenerator::visitMegamorphicStoreSlot(LMegamorphicStoreSlot* lir) {
4276   Register obj = ToRegister(lir->object());
4277   ValueOperand rhs = ToValue(lir, LMegamorphicStoreSlot::RhsIndex);
4278   Register temp1 = ToRegister(lir->temp1());
4279   Register temp2 = ToRegister(lir->temp2());
4280   Register temp3 = ToRegister(lir->temp3());
4281 
4282   masm.pushValue(rhs);
4283   masm.moveStackPtrTo(temp1);
4284 
4285   using Fn = bool (*)(JSContext * cx, JSObject * obj, PropertyName * name,
4286                       Value * val);
4287   masm.setupUnalignedABICall(temp2);
4288   masm.loadJSContext(temp2);
4289   masm.passABIArg(temp2);
4290   masm.passABIArg(obj);
4291   masm.movePtr(ImmGCPtr(lir->mir()->name()), temp3);
4292   masm.passABIArg(temp3);
4293   masm.passABIArg(temp1);
4294   masm.callWithABI<Fn, SetNativeDataPropertyPure>();
4295 
4296   MOZ_ASSERT(!rhs.aliases(temp1));
4297   masm.mov(ReturnReg, temp1);
4298   masm.popValue(rhs);
4299 
4300   Label bail;
4301   masm.branchIfFalseBool(temp1, &bail);
4302   bailoutFrom(&bail, lir->snapshot());
4303 }
4304 
visitMegamorphicHasProp(LMegamorphicHasProp * lir)4305 void CodeGenerator::visitMegamorphicHasProp(LMegamorphicHasProp* lir) {
4306   Register obj = ToRegister(lir->object());
4307   ValueOperand idVal = ToValue(lir, LMegamorphicHasProp::IdIndex);
4308   Register temp1 = ToRegister(lir->temp1());
4309   Register temp2 = ToRegister(lir->temp2());
4310   Register output = ToRegister(lir->output());
4311 
4312   // idVal will be in vp[0], result will be stored in vp[1].
4313   masm.subFromStackPtr(Imm32(sizeof(Value)));
4314   masm.pushValue(idVal);
4315   masm.moveStackPtrTo(temp1);
4316 
4317   using Fn = bool (*)(JSContext * cx, JSObject * obj, Value * vp);
4318   masm.setupUnalignedABICall(temp2);
4319   masm.loadJSContext(temp2);
4320   masm.passABIArg(temp2);
4321   masm.passABIArg(obj);
4322   masm.passABIArg(temp1);
4323   if (lir->mir()->hasOwn()) {
4324     masm.callWithABI<Fn, HasNativeDataPropertyPure<true>>();
4325   } else {
4326     masm.callWithABI<Fn, HasNativeDataPropertyPure<false>>();
4327   }
4328 
4329   MOZ_ASSERT(!idVal.aliases(temp1));
4330   masm.mov(ReturnReg, temp1);
4331   masm.popValue(idVal);
4332 
4333   Label bail, ok;
4334   masm.branchIfTrueBool(temp1, &ok);
4335   masm.addToStackPtr(Imm32(sizeof(Value)));  // Discard result Value.
4336   masm.jump(&bail);
4337 
4338   masm.bind(&ok);
4339   masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
4340   masm.addToStackPtr(Imm32(sizeof(Value)));
4341 
4342   bailoutFrom(&bail, lir->snapshot());
4343 }
4344 
visitGuardIsNotArrayBufferMaybeShared(LGuardIsNotArrayBufferMaybeShared * guard)4345 void CodeGenerator::visitGuardIsNotArrayBufferMaybeShared(
4346     LGuardIsNotArrayBufferMaybeShared* guard) {
4347   Register obj = ToRegister(guard->input());
4348   Register temp = ToRegister(guard->temp());
4349 
4350   Label bail;
4351   masm.loadObjClassUnsafe(obj, temp);
4352   masm.branchPtr(Assembler::Equal, temp, ImmPtr(&ArrayBufferObject::class_),
4353                  &bail);
4354   masm.branchPtr(Assembler::Equal, temp,
4355                  ImmPtr(&SharedArrayBufferObject::class_), &bail);
4356   bailoutFrom(&bail, guard->snapshot());
4357 }
4358 
visitGuardIsTypedArray(LGuardIsTypedArray * guard)4359 void CodeGenerator::visitGuardIsTypedArray(LGuardIsTypedArray* guard) {
4360   Register obj = ToRegister(guard->input());
4361   Register temp = ToRegister(guard->temp());
4362 
4363   Label bail;
4364   masm.loadObjClassUnsafe(obj, temp);
4365   masm.branchIfClassIsNotTypedArray(temp, &bail);
4366   bailoutFrom(&bail, guard->snapshot());
4367 }
4368 
visitGuardObjectIdentity(LGuardObjectIdentity * guard)4369 void CodeGenerator::visitGuardObjectIdentity(LGuardObjectIdentity* guard) {
4370   Register input = ToRegister(guard->input());
4371   Register expected = ToRegister(guard->expected());
4372 
4373   Assembler::Condition cond =
4374       guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
4375   bailoutCmpPtr(cond, input, expected, guard->snapshot());
4376 }
4377 
visitGuardSpecificFunction(LGuardSpecificFunction * guard)4378 void CodeGenerator::visitGuardSpecificFunction(LGuardSpecificFunction* guard) {
4379   Register input = ToRegister(guard->input());
4380   Register expected = ToRegister(guard->expected());
4381 
4382   bailoutCmpPtr(Assembler::NotEqual, input, expected, guard->snapshot());
4383 }
4384 
visitGuardSpecificAtom(LGuardSpecificAtom * guard)4385 void CodeGenerator::visitGuardSpecificAtom(LGuardSpecificAtom* guard) {
4386   Register str = ToRegister(guard->str());
4387   Register scratch = ToRegister(guard->temp());
4388 
4389   LiveRegisterSet volatileRegs = liveVolatileRegs(guard);
4390   volatileRegs.takeUnchecked(scratch);
4391 
4392   Label bail;
4393   masm.guardSpecificAtom(str, guard->mir()->atom(), scratch, volatileRegs,
4394                          &bail);
4395   bailoutFrom(&bail, guard->snapshot());
4396 }
4397 
visitGuardSpecificSymbol(LGuardSpecificSymbol * guard)4398 void CodeGenerator::visitGuardSpecificSymbol(LGuardSpecificSymbol* guard) {
4399   Register symbol = ToRegister(guard->symbol());
4400 
4401   bailoutCmpPtr(Assembler::NotEqual, symbol, ImmGCPtr(guard->mir()->expected()),
4402                 guard->snapshot());
4403 }
4404 
visitGuardStringToIndex(LGuardStringToIndex * lir)4405 void CodeGenerator::visitGuardStringToIndex(LGuardStringToIndex* lir) {
4406   Register str = ToRegister(lir->string());
4407   Register output = ToRegister(lir->output());
4408 
4409   Label bail, vmCall, done;
4410   masm.loadStringIndexValue(str, output, &vmCall);
4411   masm.jump(&done);
4412 
4413   {
4414     masm.bind(&vmCall);
4415 
4416     LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4417     volatileRegs.takeUnchecked(output);
4418     masm.PushRegsInMask(volatileRegs);
4419 
4420     using Fn = int32_t (*)(JSString * str);
4421     masm.setupUnalignedABICall(output);
4422     masm.passABIArg(str);
4423     masm.callWithABI<Fn, GetIndexFromString>();
4424     masm.storeCallInt32Result(output);
4425 
4426     masm.PopRegsInMask(volatileRegs);
4427 
4428     // GetIndexFromString returns a negative value on failure.
4429     masm.branchTest32(Assembler::Signed, output, output, &bail);
4430   }
4431 
4432   masm.bind(&done);
4433 
4434   bailoutFrom(&bail, lir->snapshot());
4435 }
4436 
visitGuardStringToInt32(LGuardStringToInt32 * lir)4437 void CodeGenerator::visitGuardStringToInt32(LGuardStringToInt32* lir) {
4438   Register str = ToRegister(lir->string());
4439   Register output = ToRegister(lir->output());
4440   Register temp = ToRegister(lir->temp());
4441 
4442   LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4443 
4444   Label bail;
4445   masm.guardStringToInt32(str, output, temp, volatileRegs, &bail);
4446   bailoutFrom(&bail, lir->snapshot());
4447 }
4448 
visitGuardStringToDouble(LGuardStringToDouble * lir)4449 void CodeGenerator::visitGuardStringToDouble(LGuardStringToDouble* lir) {
4450   Register str = ToRegister(lir->string());
4451   FloatRegister output = ToFloatRegister(lir->output());
4452   Register temp1 = ToRegister(lir->temp1());
4453   Register temp2 = ToRegister(lir->temp2());
4454 
4455   Label bail, vmCall, done;
4456   // Use indexed value as fast path if possible.
4457   masm.loadStringIndexValue(str, temp1, &vmCall);
4458   masm.convertInt32ToDouble(temp1, output);
4459   masm.jump(&done);
4460   {
4461     masm.bind(&vmCall);
4462 
4463     // Reserve stack for holding the result value of the call.
4464     masm.reserveStack(sizeof(double));
4465     masm.moveStackPtrTo(temp1);
4466 
4467     LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
4468     volatileRegs.takeUnchecked(temp1);
4469     volatileRegs.takeUnchecked(temp2);
4470     masm.PushRegsInMask(volatileRegs);
4471 
4472     using Fn = bool (*)(JSContext * cx, JSString * str, double* result);
4473     masm.setupUnalignedABICall(temp2);
4474     masm.loadJSContext(temp2);
4475     masm.passABIArg(temp2);
4476     masm.passABIArg(str);
4477     masm.passABIArg(temp1);
4478     masm.callWithABI<Fn, StringToNumberPure>();
4479     masm.mov(ReturnReg, temp1);
4480 
4481     masm.PopRegsInMask(volatileRegs);
4482 
4483     Label ok;
4484     masm.branchIfTrueBool(temp1, &ok);
4485     {
4486       // OOM path, recovered by StringToNumberPure.
4487       //
4488       // Use addToStackPtr instead of freeStack as freeStack tracks stack height
4489       // flow-insensitively, and using it here would confuse the stack height
4490       // tracking.
4491       masm.addToStackPtr(Imm32(sizeof(double)));
4492       masm.jump(&bail);
4493     }
4494     masm.bind(&ok);
4495     masm.Pop(output);
4496   }
4497   masm.bind(&done);
4498 
4499   bailoutFrom(&bail, lir->snapshot());
4500 }
4501 
visitGuardNoDenseElements(LGuardNoDenseElements * guard)4502 void CodeGenerator::visitGuardNoDenseElements(LGuardNoDenseElements* guard) {
4503   Register obj = ToRegister(guard->input());
4504   Register temp = ToRegister(guard->temp());
4505 
4506   // Load obj->elements.
4507   masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), temp);
4508 
4509   // Make sure there are no dense elements.
4510   Address initLength(temp, ObjectElements::offsetOfInitializedLength());
4511   bailoutCmp32(Assembler::NotEqual, initLength, Imm32(0), guard->snapshot());
4512 }
4513 
visitBooleanToInt64(LBooleanToInt64 * lir)4514 void CodeGenerator::visitBooleanToInt64(LBooleanToInt64* lir) {
4515   Register input = ToRegister(lir->input());
4516   Register64 output = ToOutRegister64(lir);
4517 
4518   masm.move32To64ZeroExtend(input, output);
4519 }
4520 
emitStringToInt64(LInstruction * lir,Register input,Register64 output)4521 void CodeGenerator::emitStringToInt64(LInstruction* lir, Register input,
4522                                       Register64 output) {
4523   Register temp = output.scratchReg();
4524 
4525   saveLive(lir);
4526 
4527   masm.reserveStack(sizeof(uint64_t));
4528   masm.moveStackPtrTo(temp);
4529   pushArg(temp);
4530   pushArg(input);
4531 
4532   using Fn = bool (*)(JSContext*, HandleString, uint64_t*);
4533   callVM<Fn, DoStringToInt64>(lir);
4534 
4535   masm.load64(Address(masm.getStackPointer(), 0), output);
4536   masm.freeStack(sizeof(uint64_t));
4537 
4538   restoreLiveIgnore(lir, StoreValueTo(output).clobbered());
4539 }
4540 
visitStringToInt64(LStringToInt64 * lir)4541 void CodeGenerator::visitStringToInt64(LStringToInt64* lir) {
4542   Register input = ToRegister(lir->input());
4543   Register64 output = ToOutRegister64(lir);
4544 
4545   emitStringToInt64(lir, input, output);
4546 }
4547 
visitValueToInt64(LValueToInt64 * lir)4548 void CodeGenerator::visitValueToInt64(LValueToInt64* lir) {
4549   ValueOperand input = ToValue(lir, LValueToInt64::Input);
4550   Register temp = ToRegister(lir->temp());
4551   Register64 output = ToOutRegister64(lir);
4552 
4553   int checks = 3;
4554 
4555   Label fail, done;
4556   // Jump to fail if this is the last check and we fail it,
4557   // otherwise to the next test.
4558   auto emitTestAndUnbox = [&](auto testAndUnbox) {
4559     MOZ_ASSERT(checks > 0);
4560 
4561     checks--;
4562     Label notType;
4563     Label* target = checks ? &notType : &fail;
4564 
4565     testAndUnbox(target);
4566 
4567     if (checks) {
4568       masm.jump(&done);
4569       masm.bind(&notType);
4570     }
4571   };
4572 
4573   Register tag = masm.extractTag(input, temp);
4574 
4575   // BigInt.
4576   emitTestAndUnbox([&](Label* target) {
4577     masm.branchTestBigInt(Assembler::NotEqual, tag, target);
4578     masm.unboxBigInt(input, temp);
4579     masm.loadBigInt64(temp, output);
4580   });
4581 
4582   // Boolean
4583   emitTestAndUnbox([&](Label* target) {
4584     masm.branchTestBoolean(Assembler::NotEqual, tag, target);
4585     masm.unboxBoolean(input, temp);
4586     masm.move32To64ZeroExtend(temp, output);
4587   });
4588 
4589   // String
4590   emitTestAndUnbox([&](Label* target) {
4591     masm.branchTestString(Assembler::NotEqual, tag, target);
4592     masm.unboxString(input, temp);
4593     emitStringToInt64(lir, temp, output);
4594   });
4595 
4596   MOZ_ASSERT(checks == 0);
4597 
4598   bailoutFrom(&fail, lir->snapshot());
4599   masm.bind(&done);
4600 }
4601 
visitTruncateBigIntToInt64(LTruncateBigIntToInt64 * lir)4602 void CodeGenerator::visitTruncateBigIntToInt64(LTruncateBigIntToInt64* lir) {
4603   Register operand = ToRegister(lir->input());
4604   Register64 output = ToOutRegister64(lir);
4605 
4606   masm.loadBigInt64(operand, output);
4607 }
4608 
createBigIntOutOfLine(LInstruction * lir,Scalar::Type type,Register64 input,Register output)4609 OutOfLineCode* CodeGenerator::createBigIntOutOfLine(LInstruction* lir,
4610                                                     Scalar::Type type,
4611                                                     Register64 input,
4612                                                     Register output) {
4613 #if JS_BITS_PER_WORD == 32
4614   using Fn = BigInt* (*)(JSContext*, uint32_t, uint32_t);
4615   auto args = ArgList(input.low, input.high);
4616 #else
4617   using Fn = BigInt* (*)(JSContext*, uint64_t);
4618   auto args = ArgList(input);
4619 #endif
4620 
4621   if (type == Scalar::BigInt64) {
4622     return oolCallVM<Fn, jit::CreateBigIntFromInt64>(lir, args,
4623                                                      StoreRegisterTo(output));
4624   }
4625   MOZ_ASSERT(type == Scalar::BigUint64);
4626   return oolCallVM<Fn, jit::CreateBigIntFromUint64>(lir, args,
4627                                                     StoreRegisterTo(output));
4628 }
4629 
emitCreateBigInt(LInstruction * lir,Scalar::Type type,Register64 input,Register output,Register maybeTemp)4630 void CodeGenerator::emitCreateBigInt(LInstruction* lir, Scalar::Type type,
4631                                      Register64 input, Register output,
4632                                      Register maybeTemp) {
4633   OutOfLineCode* ool = createBigIntOutOfLine(lir, type, input, output);
4634 
4635   if (maybeTemp != InvalidReg) {
4636     masm.newGCBigInt(output, maybeTemp, ool->entry(), bigIntsCanBeInNursery());
4637   } else {
4638     AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
4639     regs.take(input);
4640     regs.take(output);
4641 
4642     Register temp = regs.takeAny();
4643 
4644     masm.push(temp);
4645 
4646     Label fail, ok;
4647     masm.newGCBigInt(output, temp, &fail, bigIntsCanBeInNursery());
4648     masm.pop(temp);
4649     masm.jump(&ok);
4650     masm.bind(&fail);
4651     masm.pop(temp);
4652     masm.jump(ool->entry());
4653     masm.bind(&ok);
4654   }
4655   masm.initializeBigInt64(type, output, input);
4656   masm.bind(ool->rejoin());
4657 }
4658 
visitInt64ToBigInt(LInt64ToBigInt * lir)4659 void CodeGenerator::visitInt64ToBigInt(LInt64ToBigInt* lir) {
4660   Register64 input = ToRegister64(lir->input());
4661   Register temp = ToRegister(lir->temp());
4662   Register output = ToRegister(lir->output());
4663 
4664   emitCreateBigInt(lir, Scalar::BigInt64, input, output, temp);
4665 }
4666 
visitGuardValue(LGuardValue * lir)4667 void CodeGenerator::visitGuardValue(LGuardValue* lir) {
4668   ValueOperand input = ToValue(lir, LGuardValue::Input);
4669   Value expected = lir->mir()->expected();
4670   Label bail;
4671   masm.branchTestValue(Assembler::NotEqual, input, expected, &bail);
4672   bailoutFrom(&bail, lir->snapshot());
4673 }
4674 
visitGuardNullOrUndefined(LGuardNullOrUndefined * lir)4675 void CodeGenerator::visitGuardNullOrUndefined(LGuardNullOrUndefined* lir) {
4676   ValueOperand input = ToValue(lir, LGuardNullOrUndefined::Input);
4677 
4678   ScratchTagScope tag(masm, input);
4679   masm.splitTagForTest(input, tag);
4680 
4681   Label done;
4682   masm.branchTestNull(Assembler::Equal, tag, &done);
4683 
4684   Label bail;
4685   masm.branchTestUndefined(Assembler::NotEqual, tag, &bail);
4686   bailoutFrom(&bail, lir->snapshot());
4687 
4688   masm.bind(&done);
4689 }
4690 
visitGuardFunctionFlags(LGuardFunctionFlags * lir)4691 void CodeGenerator::visitGuardFunctionFlags(LGuardFunctionFlags* lir) {
4692   Register function = ToRegister(lir->function());
4693 
4694   Label bail;
4695   if (uint16_t flags = lir->mir()->expectedFlags()) {
4696     masm.branchTestFunctionFlags(function, flags, Assembler::Zero, &bail);
4697   }
4698   if (uint16_t flags = lir->mir()->unexpectedFlags()) {
4699     masm.branchTestFunctionFlags(function, flags, Assembler::NonZero, &bail);
4700   }
4701   bailoutFrom(&bail, lir->snapshot());
4702 }
4703 
visitGuardFunctionIsNonBuiltinCtor(LGuardFunctionIsNonBuiltinCtor * lir)4704 void CodeGenerator::visitGuardFunctionIsNonBuiltinCtor(
4705     LGuardFunctionIsNonBuiltinCtor* lir) {
4706   Register function = ToRegister(lir->function());
4707   Register temp = ToRegister(lir->temp());
4708 
4709   Label bail;
4710   masm.branchIfNotFunctionIsNonBuiltinCtor(function, temp, &bail);
4711   bailoutFrom(&bail, lir->snapshot());
4712 }
4713 
visitGuardFunctionKind(LGuardFunctionKind * lir)4714 void CodeGenerator::visitGuardFunctionKind(LGuardFunctionKind* lir) {
4715   Register function = ToRegister(lir->function());
4716   Register temp = ToRegister(lir->temp());
4717 
4718   Assembler::Condition cond =
4719       lir->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
4720 
4721   Label bail;
4722   masm.branchFunctionKind(cond, lir->mir()->expected(), function, temp, &bail);
4723   bailoutFrom(&bail, lir->snapshot());
4724 }
4725 
visitGuardFunctionScript(LGuardFunctionScript * lir)4726 void CodeGenerator::visitGuardFunctionScript(LGuardFunctionScript* lir) {
4727   Register function = ToRegister(lir->function());
4728 
4729   Label bail;
4730   Address scriptAddr(function, JSFunction::offsetOfBaseScript());
4731   masm.branchPtr(Assembler::NotEqual, scriptAddr,
4732                  ImmGCPtr(lir->mir()->expected()), &bail);
4733   bailoutFrom(&bail, lir->snapshot());
4734 }
4735 
4736 // Out-of-line path to update the store buffer.
4737 class OutOfLineCallPostWriteBarrier : public OutOfLineCodeBase<CodeGenerator> {
4738   LInstruction* lir_;
4739   const LAllocation* object_;
4740 
4741  public:
OutOfLineCallPostWriteBarrier(LInstruction * lir,const LAllocation * object)4742   OutOfLineCallPostWriteBarrier(LInstruction* lir, const LAllocation* object)
4743       : lir_(lir), object_(object) {}
4744 
accept(CodeGenerator * codegen)4745   void accept(CodeGenerator* codegen) override {
4746     codegen->visitOutOfLineCallPostWriteBarrier(this);
4747   }
4748 
lir() const4749   LInstruction* lir() const { return lir_; }
object() const4750   const LAllocation* object() const { return object_; }
4751 };
4752 
EmitStoreBufferCheckForConstant(MacroAssembler & masm,const gc::TenuredCell * cell,AllocatableGeneralRegisterSet & regs,Label * exit,Label * callVM)4753 static void EmitStoreBufferCheckForConstant(MacroAssembler& masm,
4754                                             const gc::TenuredCell* cell,
4755                                             AllocatableGeneralRegisterSet& regs,
4756                                             Label* exit, Label* callVM) {
4757   Register temp = regs.takeAny();
4758 
4759   gc::Arena* arena = cell->arena();
4760 
4761   Register cells = temp;
4762   masm.loadPtr(AbsoluteAddress(&arena->bufferedCells()), cells);
4763 
4764   size_t index = gc::ArenaCellSet::getCellIndex(cell);
4765   size_t word;
4766   uint32_t mask;
4767   gc::ArenaCellSet::getWordIndexAndMask(index, &word, &mask);
4768   size_t offset = gc::ArenaCellSet::offsetOfBits() + word * sizeof(uint32_t);
4769 
4770   masm.branchTest32(Assembler::NonZero, Address(cells, offset), Imm32(mask),
4771                     exit);
4772 
4773   // Check whether this is the sentinel set and if so call the VM to allocate
4774   // one for this arena.
4775   masm.branchPtr(Assembler::Equal,
4776                  Address(cells, gc::ArenaCellSet::offsetOfArena()),
4777                  ImmPtr(nullptr), callVM);
4778 
4779   // Add the cell to the set.
4780   masm.or32(Imm32(mask), Address(cells, offset));
4781   masm.jump(exit);
4782 
4783   regs.add(temp);
4784 }
4785 
EmitPostWriteBarrier(MacroAssembler & masm,CompileRuntime * runtime,Register objreg,JSObject * maybeConstant,bool isGlobal,AllocatableGeneralRegisterSet & regs)4786 static void EmitPostWriteBarrier(MacroAssembler& masm, CompileRuntime* runtime,
4787                                  Register objreg, JSObject* maybeConstant,
4788                                  bool isGlobal,
4789                                  AllocatableGeneralRegisterSet& regs) {
4790   MOZ_ASSERT_IF(isGlobal, maybeConstant);
4791 
4792   Label callVM;
4793   Label exit;
4794 
4795   // We already have a fast path to check whether a global is in the store
4796   // buffer.
4797   if (!isGlobal && maybeConstant) {
4798     EmitStoreBufferCheckForConstant(masm, &maybeConstant->asTenured(), regs,
4799                                     &exit, &callVM);
4800   }
4801 
4802   // Call into the VM to barrier the write.
4803   masm.bind(&callVM);
4804 
4805   Register runtimereg = regs.takeAny();
4806   masm.mov(ImmPtr(runtime), runtimereg);
4807 
4808   masm.setupUnalignedABICall(regs.takeAny());
4809   masm.passABIArg(runtimereg);
4810   masm.passABIArg(objreg);
4811   if (isGlobal) {
4812     using Fn = void (*)(JSRuntime * rt, GlobalObject * obj);
4813     masm.callWithABI<Fn, PostGlobalWriteBarrier>();
4814   } else {
4815     using Fn = void (*)(JSRuntime * rt, js::gc::Cell * obj);
4816     masm.callWithABI<Fn, PostWriteBarrier>();
4817   }
4818 
4819   masm.bind(&exit);
4820 }
4821 
emitPostWriteBarrier(const LAllocation * obj)4822 void CodeGenerator::emitPostWriteBarrier(const LAllocation* obj) {
4823   AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
4824 
4825   Register objreg;
4826   JSObject* object = nullptr;
4827   bool isGlobal = false;
4828   if (obj->isConstant()) {
4829     object = &obj->toConstant()->toObject();
4830     isGlobal = isGlobalObject(object);
4831     objreg = regs.takeAny();
4832     masm.movePtr(ImmGCPtr(object), objreg);
4833   } else {
4834     objreg = ToRegister(obj);
4835     regs.takeUnchecked(objreg);
4836   }
4837 
4838   EmitPostWriteBarrier(masm, gen->runtime, objreg, object, isGlobal, regs);
4839 }
4840 
emitPostWriteBarrier(Register objreg)4841 void CodeGenerator::emitPostWriteBarrier(Register objreg) {
4842   AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
4843   regs.takeUnchecked(objreg);
4844   EmitPostWriteBarrier(masm, gen->runtime, objreg, nullptr, false, regs);
4845 }
4846 
visitOutOfLineCallPostWriteBarrier(OutOfLineCallPostWriteBarrier * ool)4847 void CodeGenerator::visitOutOfLineCallPostWriteBarrier(
4848     OutOfLineCallPostWriteBarrier* ool) {
4849   saveLiveVolatile(ool->lir());
4850   const LAllocation* obj = ool->object();
4851   emitPostWriteBarrier(obj);
4852   restoreLiveVolatile(ool->lir());
4853 
4854   masm.jump(ool->rejoin());
4855 }
4856 
maybeEmitGlobalBarrierCheck(const LAllocation * maybeGlobal,OutOfLineCode * ool)4857 void CodeGenerator::maybeEmitGlobalBarrierCheck(const LAllocation* maybeGlobal,
4858                                                 OutOfLineCode* ool) {
4859   // Check whether an object is a global that we have already barriered before
4860   // calling into the VM.
4861   //
4862   // We only check for the script's global, not other globals within the same
4863   // compartment, because we bake in a pointer to realm->globalWriteBarriered
4864   // and doing that would be invalid for other realms because they could be
4865   // collected before the Ion code is discarded.
4866 
4867   if (!maybeGlobal->isConstant()) {
4868     return;
4869   }
4870 
4871   JSObject* obj = &maybeGlobal->toConstant()->toObject();
4872   if (gen->realm->maybeGlobal() != obj) {
4873     return;
4874   }
4875 
4876   const uint32_t* addr = gen->realm->addressOfGlobalWriteBarriered();
4877   masm.branch32(Assembler::NotEqual, AbsoluteAddress(addr), Imm32(0),
4878                 ool->rejoin());
4879 }
4880 
4881 template <class LPostBarrierType, MIRType nurseryType>
visitPostWriteBarrierCommon(LPostBarrierType * lir,OutOfLineCode * ool)4882 void CodeGenerator::visitPostWriteBarrierCommon(LPostBarrierType* lir,
4883                                                 OutOfLineCode* ool) {
4884   static_assert(NeedsPostBarrier(nurseryType));
4885 
4886   addOutOfLineCode(ool, lir->mir());
4887 
4888   Register temp = ToTempRegisterOrInvalid(lir->temp());
4889 
4890   if (lir->object()->isConstant()) {
4891     // Constant nursery objects cannot appear here, see
4892     // LIRGenerator::visitPostWriteElementBarrier.
4893     MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
4894   } else {
4895     masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
4896                                  temp, ool->rejoin());
4897   }
4898 
4899   maybeEmitGlobalBarrierCheck(lir->object(), ool);
4900 
4901   Register value = ToRegister(lir->value());
4902   if constexpr (nurseryType == MIRType::Object) {
4903     MOZ_ASSERT(lir->mir()->value()->type() == MIRType::Object);
4904   } else if constexpr (nurseryType == MIRType::String) {
4905     MOZ_ASSERT(lir->mir()->value()->type() == MIRType::String);
4906   } else {
4907     static_assert(nurseryType == MIRType::BigInt);
4908     MOZ_ASSERT(lir->mir()->value()->type() == MIRType::BigInt);
4909   }
4910   masm.branchPtrInNurseryChunk(Assembler::Equal, value, temp, ool->entry());
4911 
4912   masm.bind(ool->rejoin());
4913 }
4914 
4915 template <class LPostBarrierType>
visitPostWriteBarrierCommonV(LPostBarrierType * lir,OutOfLineCode * ool)4916 void CodeGenerator::visitPostWriteBarrierCommonV(LPostBarrierType* lir,
4917                                                  OutOfLineCode* ool) {
4918   addOutOfLineCode(ool, lir->mir());
4919 
4920   Register temp = ToTempRegisterOrInvalid(lir->temp());
4921 
4922   if (lir->object()->isConstant()) {
4923     // Constant nursery objects cannot appear here, see
4924     // LIRGenerator::visitPostWriteElementBarrier.
4925     MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
4926   } else {
4927     masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
4928                                  temp, ool->rejoin());
4929   }
4930 
4931   maybeEmitGlobalBarrierCheck(lir->object(), ool);
4932 
4933   ValueOperand value = ToValue(lir, LPostBarrierType::Input);
4934   masm.branchValueIsNurseryCell(Assembler::Equal, value, temp, ool->entry());
4935 
4936   masm.bind(ool->rejoin());
4937 }
4938 
visitPostWriteBarrierO(LPostWriteBarrierO * lir)4939 void CodeGenerator::visitPostWriteBarrierO(LPostWriteBarrierO* lir) {
4940   auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
4941   visitPostWriteBarrierCommon<LPostWriteBarrierO, MIRType::Object>(lir, ool);
4942 }
4943 
visitPostWriteBarrierS(LPostWriteBarrierS * lir)4944 void CodeGenerator::visitPostWriteBarrierS(LPostWriteBarrierS* lir) {
4945   auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
4946   visitPostWriteBarrierCommon<LPostWriteBarrierS, MIRType::String>(lir, ool);
4947 }
4948 
visitPostWriteBarrierBI(LPostWriteBarrierBI * lir)4949 void CodeGenerator::visitPostWriteBarrierBI(LPostWriteBarrierBI* lir) {
4950   auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
4951   visitPostWriteBarrierCommon<LPostWriteBarrierBI, MIRType::BigInt>(lir, ool);
4952 }
4953 
visitPostWriteBarrierV(LPostWriteBarrierV * lir)4954 void CodeGenerator::visitPostWriteBarrierV(LPostWriteBarrierV* lir) {
4955   auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
4956   visitPostWriteBarrierCommonV(lir, ool);
4957 }
4958 
4959 // Out-of-line path to update the store buffer.
4960 class OutOfLineCallPostWriteElementBarrier
4961     : public OutOfLineCodeBase<CodeGenerator> {
4962   LInstruction* lir_;
4963   const LAllocation* object_;
4964   const LAllocation* index_;
4965 
4966  public:
OutOfLineCallPostWriteElementBarrier(LInstruction * lir,const LAllocation * object,const LAllocation * index)4967   OutOfLineCallPostWriteElementBarrier(LInstruction* lir,
4968                                        const LAllocation* object,
4969                                        const LAllocation* index)
4970       : lir_(lir), object_(object), index_(index) {}
4971 
accept(CodeGenerator * codegen)4972   void accept(CodeGenerator* codegen) override {
4973     codegen->visitOutOfLineCallPostWriteElementBarrier(this);
4974   }
4975 
lir() const4976   LInstruction* lir() const { return lir_; }
4977 
object() const4978   const LAllocation* object() const { return object_; }
4979 
index() const4980   const LAllocation* index() const { return index_; }
4981 };
4982 
visitOutOfLineCallPostWriteElementBarrier(OutOfLineCallPostWriteElementBarrier * ool)4983 void CodeGenerator::visitOutOfLineCallPostWriteElementBarrier(
4984     OutOfLineCallPostWriteElementBarrier* ool) {
4985   saveLiveVolatile(ool->lir());
4986 
4987   const LAllocation* obj = ool->object();
4988   const LAllocation* index = ool->index();
4989 
4990   Register objreg = obj->isConstant() ? InvalidReg : ToRegister(obj);
4991   Register indexreg = ToRegister(index);
4992 
4993   AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
4994   regs.takeUnchecked(indexreg);
4995 
4996   if (obj->isConstant()) {
4997     objreg = regs.takeAny();
4998     masm.movePtr(ImmGCPtr(&obj->toConstant()->toObject()), objreg);
4999   } else {
5000     regs.takeUnchecked(objreg);
5001   }
5002 
5003   Register runtimereg = regs.takeAny();
5004   using Fn = void (*)(JSRuntime * rt, JSObject * obj, int32_t index);
5005   masm.setupUnalignedABICall(runtimereg);
5006   masm.mov(ImmPtr(gen->runtime), runtimereg);
5007   masm.passABIArg(runtimereg);
5008   masm.passABIArg(objreg);
5009   masm.passABIArg(indexreg);
5010   masm.callWithABI<Fn, PostWriteElementBarrier<IndexInBounds::Maybe>>();
5011 
5012   restoreLiveVolatile(ool->lir());
5013 
5014   masm.jump(ool->rejoin());
5015 }
5016 
visitPostWriteElementBarrierO(LPostWriteElementBarrierO * lir)5017 void CodeGenerator::visitPostWriteElementBarrierO(
5018     LPostWriteElementBarrierO* lir) {
5019   auto ool = new (alloc())
5020       OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5021   visitPostWriteBarrierCommon<LPostWriteElementBarrierO, MIRType::Object>(lir,
5022                                                                           ool);
5023 }
5024 
visitPostWriteElementBarrierS(LPostWriteElementBarrierS * lir)5025 void CodeGenerator::visitPostWriteElementBarrierS(
5026     LPostWriteElementBarrierS* lir) {
5027   auto ool = new (alloc())
5028       OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5029   visitPostWriteBarrierCommon<LPostWriteElementBarrierS, MIRType::String>(lir,
5030                                                                           ool);
5031 }
5032 
visitPostWriteElementBarrierBI(LPostWriteElementBarrierBI * lir)5033 void CodeGenerator::visitPostWriteElementBarrierBI(
5034     LPostWriteElementBarrierBI* lir) {
5035   auto ool = new (alloc())
5036       OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5037   visitPostWriteBarrierCommon<LPostWriteElementBarrierBI, MIRType::BigInt>(lir,
5038                                                                            ool);
5039 }
5040 
visitPostWriteElementBarrierV(LPostWriteElementBarrierV * lir)5041 void CodeGenerator::visitPostWriteElementBarrierV(
5042     LPostWriteElementBarrierV* lir) {
5043   auto ool = new (alloc())
5044       OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
5045   visitPostWriteBarrierCommonV(lir, ool);
5046 }
5047 
visitCallNative(LCallNative * call)5048 void CodeGenerator::visitCallNative(LCallNative* call) {
5049   WrappedFunction* target = call->getSingleTarget();
5050   MOZ_ASSERT(target);
5051   MOZ_ASSERT(target->isNativeWithoutJitEntry());
5052 
5053   int callargslot = call->argslot();
5054   int unusedStack = StackOffsetOfPassedArg(callargslot);
5055 
5056   // Registers used for callWithABI() argument-passing.
5057   const Register argContextReg = ToRegister(call->getArgContextReg());
5058   const Register argUintNReg = ToRegister(call->getArgUintNReg());
5059   const Register argVpReg = ToRegister(call->getArgVpReg());
5060 
5061   // Misc. temporary registers.
5062   const Register tempReg = ToRegister(call->getTempReg());
5063 
5064   DebugOnly<uint32_t> initialStack = masm.framePushed();
5065 
5066   masm.checkStackAlignment();
5067 
5068   // Native functions have the signature:
5069   //  bool (*)(JSContext*, unsigned, Value* vp)
5070   // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
5071   // are the function arguments.
5072 
5073   // Allocate space for the outparam, moving the StackPointer to what will be
5074   // &vp[1].
5075   masm.adjustStack(unusedStack);
5076 
5077   // Push a Value containing the callee object: natives are allowed to access
5078   // their callee before setting the return value. The StackPointer is moved
5079   // to &vp[0].
5080   masm.Push(ObjectValue(*target->rawNativeJSFunction()));
5081 
5082   // Preload arguments into registers.
5083   masm.loadJSContext(argContextReg);
5084   masm.move32(Imm32(call->numActualArgs()), argUintNReg);
5085   masm.moveStackPtrTo(argVpReg);
5086 
5087   masm.Push(argUintNReg);
5088 
5089   if (call->mir()->maybeCrossRealm()) {
5090     masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), tempReg);
5091     masm.switchToObjectRealm(tempReg, tempReg);
5092   }
5093 
5094   // Construct native exit frame.
5095   uint32_t safepointOffset = masm.buildFakeExitFrame(tempReg);
5096   masm.enterFakeExitFrameForNative(argContextReg, tempReg,
5097                                    call->mir()->isConstructing());
5098 
5099   markSafepointAt(safepointOffset, call);
5100 
5101   if (JS::TraceLoggerSupported()) {
5102     emitTracelogStartEvent(TraceLogger_Call);
5103   }
5104 
5105   // Construct and execute call.
5106   masm.setupUnalignedABICall(tempReg);
5107   masm.passABIArg(argContextReg);
5108   masm.passABIArg(argUintNReg);
5109   masm.passABIArg(argVpReg);
5110   JSNative native = target->native();
5111   if (call->ignoresReturnValue() && target->hasJitInfo()) {
5112     const JSJitInfo* jitInfo = target->jitInfo();
5113     if (jitInfo->type() == JSJitInfo::IgnoresReturnValueNative) {
5114       native = jitInfo->ignoresReturnValueMethod;
5115     }
5116   }
5117   masm.callWithABI(DynamicFunction<JSNative>(native), MoveOp::GENERAL,
5118                    CheckUnsafeCallWithABI::DontCheckHasExitFrame);
5119 
5120   if (JS::TraceLoggerSupported()) {
5121     emitTracelogStopEvent(TraceLogger_Call);
5122   }
5123 
5124   // Test for failure.
5125   masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
5126 
5127   if (call->mir()->maybeCrossRealm()) {
5128     masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5129   }
5130 
5131   // Load the outparam vp[0] into output register(s).
5132   masm.loadValue(
5133       Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
5134       JSReturnOperand);
5135 
5136   // Until C++ code is instrumented against Spectre, prevent speculative
5137   // execution from returning any private data.
5138   if (JitOptions.spectreJitToCxxCalls && !call->mir()->ignoresReturnValue() &&
5139       call->mir()->hasLiveDefUses()) {
5140     masm.speculationBarrier();
5141   }
5142 
5143   // The next instruction is removing the footer of the exit frame, so there
5144   // is no need for leaveFakeExitFrame.
5145 
5146   // Move the StackPointer back to its original location, unwinding the native
5147   // exit frame.
5148   masm.adjustStack(NativeExitFrameLayout::Size() - unusedStack);
5149   MOZ_ASSERT(masm.framePushed() == initialStack);
5150 }
5151 
LoadDOMPrivate(MacroAssembler & masm,Register obj,Register priv,DOMObjectKind kind)5152 static void LoadDOMPrivate(MacroAssembler& masm, Register obj, Register priv,
5153                            DOMObjectKind kind) {
5154   // Load the value in DOM_OBJECT_SLOT for a native or proxy DOM object. This
5155   // will be in the first slot but may be fixed or non-fixed.
5156   MOZ_ASSERT(obj != priv);
5157 
5158   // Check if it's a proxy.
5159   Label isProxy, done;
5160   if (kind == DOMObjectKind::Unknown) {
5161     masm.branchTestObjectIsProxy(true, obj, priv, &isProxy);
5162   }
5163 
5164   if (kind != DOMObjectKind::Proxy) {
5165     // If it's a native object, the value must be in a fixed slot.
5166     masm.debugAssertObjHasFixedSlots(obj, priv);
5167     masm.loadPrivate(Address(obj, NativeObject::getFixedSlotOffset(0)), priv);
5168     if (kind == DOMObjectKind::Unknown) {
5169       masm.jump(&done);
5170     }
5171   }
5172 
5173   if (kind != DOMObjectKind::Native) {
5174     masm.bind(&isProxy);
5175 #ifdef DEBUG
5176     // Sanity check: it must be a DOM proxy.
5177     Label isDOMProxy;
5178     masm.branchTestProxyHandlerFamily(Assembler::Equal, obj, priv,
5179                                       GetDOMProxyHandlerFamily(), &isDOMProxy);
5180     masm.assumeUnreachable("Expected a DOM proxy");
5181     masm.bind(&isDOMProxy);
5182 #endif
5183     masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), priv);
5184     masm.loadPrivate(
5185         Address(priv, js::detail::ProxyReservedSlots::offsetOfSlot(0)), priv);
5186   }
5187 
5188   masm.bind(&done);
5189 }
5190 
visitCallDOMNative(LCallDOMNative * call)5191 void CodeGenerator::visitCallDOMNative(LCallDOMNative* call) {
5192   WrappedFunction* target = call->getSingleTarget();
5193   MOZ_ASSERT(target);
5194   MOZ_ASSERT(target->isNativeWithoutJitEntry());
5195   MOZ_ASSERT(target->hasJitInfo());
5196   MOZ_ASSERT(call->mir()->isCallDOMNative());
5197 
5198   int callargslot = call->argslot();
5199   int unusedStack = StackOffsetOfPassedArg(callargslot);
5200 
5201   // Registers used for callWithABI() argument-passing.
5202   const Register argJSContext = ToRegister(call->getArgJSContext());
5203   const Register argObj = ToRegister(call->getArgObj());
5204   const Register argPrivate = ToRegister(call->getArgPrivate());
5205   const Register argArgs = ToRegister(call->getArgArgs());
5206 
5207   DebugOnly<uint32_t> initialStack = masm.framePushed();
5208 
5209   masm.checkStackAlignment();
5210 
5211   // DOM methods have the signature:
5212   //  bool (*)(JSContext*, HandleObject, void* private, const
5213   //  JSJitMethodCallArgs& args)
5214   // Where args is initialized from an argc and a vp, vp[0] is space for an
5215   // outparam and the callee, vp[1] is |this|, and vp[2] onward are the
5216   // function arguments.  Note that args stores the argv, not the vp, and
5217   // argv == vp + 2.
5218 
5219   // Nestle the stack up against the pushed arguments, leaving StackPointer at
5220   // &vp[1]
5221   masm.adjustStack(unusedStack);
5222   // argObj is filled with the extracted object, then returned.
5223   Register obj = masm.extractObject(Address(masm.getStackPointer(), 0), argObj);
5224   MOZ_ASSERT(obj == argObj);
5225 
5226   // Push a Value containing the callee object: natives are allowed to access
5227   // their callee before setting the return value. After this the StackPointer
5228   // points to &vp[0].
5229   masm.Push(ObjectValue(*target->rawNativeJSFunction()));
5230 
5231   // Now compute the argv value.  Since StackPointer is pointing to &vp[0] and
5232   // argv is &vp[2] we just need to add 2*sizeof(Value) to the current
5233   // StackPointer.
5234   static_assert(JSJitMethodCallArgsTraits::offsetOfArgv == 0);
5235   static_assert(JSJitMethodCallArgsTraits::offsetOfArgc ==
5236                 IonDOMMethodExitFrameLayoutTraits::offsetOfArgcFromArgv);
5237   masm.computeEffectiveAddress(
5238       Address(masm.getStackPointer(), 2 * sizeof(Value)), argArgs);
5239 
5240   LoadDOMPrivate(masm, obj, argPrivate,
5241                  static_cast<MCallDOMNative*>(call->mir())->objectKind());
5242 
5243   // Push argc from the call instruction into what will become the IonExitFrame
5244   masm.Push(Imm32(call->numActualArgs()));
5245 
5246   // Push our argv onto the stack
5247   masm.Push(argArgs);
5248   // And store our JSJitMethodCallArgs* in argArgs.
5249   masm.moveStackPtrTo(argArgs);
5250 
5251   // Push |this| object for passing HandleObject. We push after argc to
5252   // maintain the same sp-relative location of the object pointer with other
5253   // DOMExitFrames.
5254   masm.Push(argObj);
5255   masm.moveStackPtrTo(argObj);
5256 
5257   if (call->mir()->maybeCrossRealm()) {
5258     // We use argJSContext as scratch register here.
5259     masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), argJSContext);
5260     masm.switchToObjectRealm(argJSContext, argJSContext);
5261   }
5262 
5263   // Construct native exit frame.
5264   uint32_t safepointOffset = masm.buildFakeExitFrame(argJSContext);
5265   masm.loadJSContext(argJSContext);
5266   masm.enterFakeExitFrame(argJSContext, argJSContext,
5267                           ExitFrameType::IonDOMMethod);
5268 
5269   markSafepointAt(safepointOffset, call);
5270 
5271   // Construct and execute call.
5272   masm.setupUnalignedABICall(argJSContext);
5273   masm.loadJSContext(argJSContext);
5274   masm.passABIArg(argJSContext);
5275   masm.passABIArg(argObj);
5276   masm.passABIArg(argPrivate);
5277   masm.passABIArg(argArgs);
5278   masm.callWithABI(DynamicFunction<JSJitMethodOp>(target->jitInfo()->method),
5279                    MoveOp::GENERAL,
5280                    CheckUnsafeCallWithABI::DontCheckHasExitFrame);
5281 
5282   if (target->jitInfo()->isInfallible) {
5283     masm.loadValue(Address(masm.getStackPointer(),
5284                            IonDOMMethodExitFrameLayout::offsetOfResult()),
5285                    JSReturnOperand);
5286   } else {
5287     // Test for failure.
5288     masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
5289 
5290     // Load the outparam vp[0] into output register(s).
5291     masm.loadValue(Address(masm.getStackPointer(),
5292                            IonDOMMethodExitFrameLayout::offsetOfResult()),
5293                    JSReturnOperand);
5294   }
5295 
5296   // Switch back to the current realm if needed. Note: if the DOM method threw
5297   // an exception, the exception handler will do this.
5298   if (call->mir()->maybeCrossRealm()) {
5299     static_assert(!JSReturnOperand.aliases(ReturnReg),
5300                   "Clobbering ReturnReg should not affect the return value");
5301     masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5302   }
5303 
5304   // Until C++ code is instrumented against Spectre, prevent speculative
5305   // execution from returning any private data.
5306   if (JitOptions.spectreJitToCxxCalls && call->mir()->hasLiveDefUses()) {
5307     masm.speculationBarrier();
5308   }
5309 
5310   // The next instruction is removing the footer of the exit frame, so there
5311   // is no need for leaveFakeExitFrame.
5312 
5313   // Move the StackPointer back to its original location, unwinding the native
5314   // exit frame.
5315   masm.adjustStack(IonDOMMethodExitFrameLayout::Size() - unusedStack);
5316   MOZ_ASSERT(masm.framePushed() == initialStack);
5317 }
5318 
visitCallGetIntrinsicValue(LCallGetIntrinsicValue * lir)5319 void CodeGenerator::visitCallGetIntrinsicValue(LCallGetIntrinsicValue* lir) {
5320   pushArg(ImmGCPtr(lir->mir()->name()));
5321 
5322   using Fn = bool (*)(JSContext * cx, HandlePropertyName, MutableHandleValue);
5323   callVM<Fn, GetIntrinsicValue>(lir);
5324 }
5325 
emitCallInvokeFunction(LInstruction * call,Register calleereg,bool constructing,bool ignoresReturnValue,uint32_t argc,uint32_t unusedStack)5326 void CodeGenerator::emitCallInvokeFunction(
5327     LInstruction* call, Register calleereg, bool constructing,
5328     bool ignoresReturnValue, uint32_t argc, uint32_t unusedStack) {
5329   // Nestle %esp up to the argument vector.
5330   // Each path must account for framePushed_ separately, for callVM to be valid.
5331   masm.freeStack(unusedStack);
5332 
5333   pushArg(masm.getStackPointer());  // argv.
5334   pushArg(Imm32(argc));             // argc.
5335   pushArg(Imm32(ignoresReturnValue));
5336   pushArg(Imm32(constructing));  // constructing.
5337   pushArg(calleereg);            // JSFunction*.
5338 
5339   using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
5340                       MutableHandleValue);
5341   callVM<Fn, jit::InvokeFunction>(call);
5342 
5343   // Un-nestle %esp from the argument vector. No prefix was pushed.
5344   masm.reserveStack(unusedStack);
5345 }
5346 
visitCallGeneric(LCallGeneric * call)5347 void CodeGenerator::visitCallGeneric(LCallGeneric* call) {
5348   Register calleereg = ToRegister(call->getFunction());
5349   Register objreg = ToRegister(call->getTempObject());
5350   Register nargsreg = ToRegister(call->getNargsReg());
5351   uint32_t unusedStack = StackOffsetOfPassedArg(call->argslot());
5352   Label invoke, thunk, makeCall, end;
5353 
5354   // Known-target case is handled by LCallKnown.
5355   MOZ_ASSERT(!call->hasSingleTarget());
5356 
5357   masm.checkStackAlignment();
5358 
5359   // Guard that calleereg is actually a function object.
5360   if (call->mir()->needsClassCheck()) {
5361     masm.branchTestObjClass(Assembler::NotEqual, calleereg, &JSFunction::class_,
5362                             nargsreg, calleereg, &invoke);
5363   }
5364 
5365   // Guard that callee allows the [[Call]] or [[Construct]] operation required.
5366   if (call->mir()->isConstructing()) {
5367     masm.branchTestFunctionFlags(calleereg, FunctionFlags::CONSTRUCTOR,
5368                                  Assembler::Zero, &invoke);
5369   } else {
5370     masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
5371                             calleereg, objreg, &invoke);
5372   }
5373 
5374   // Use the slow path if CreateThis was unable to create the |this| object.
5375   if (call->mir()->needsThisCheck()) {
5376     MOZ_ASSERT(call->mir()->isConstructing());
5377     Address thisAddr(masm.getStackPointer(), unusedStack);
5378     masm.branchTestNull(Assembler::Equal, thisAddr, &invoke);
5379   } else {
5380 #ifdef DEBUG
5381     if (call->mir()->isConstructing()) {
5382       Address thisAddr(masm.getStackPointer(), unusedStack);
5383       Label ok;
5384       masm.branchTestNull(Assembler::NotEqual, thisAddr, &ok);
5385       masm.assumeUnreachable("Unexpected null this-value");
5386       masm.bind(&ok);
5387     }
5388 #endif
5389   }
5390 
5391   // Load jitCodeRaw for callee if it exists.
5392   masm.branchIfFunctionHasNoJitEntry(calleereg, call->mir()->isConstructing(),
5393                                      &invoke);
5394   masm.loadJitCodeRaw(calleereg, objreg);
5395 
5396   // Target may be a different realm even if same compartment.
5397   if (call->mir()->maybeCrossRealm()) {
5398     masm.switchToObjectRealm(calleereg, nargsreg);
5399   }
5400 
5401   // Nestle the StackPointer up to the argument vector.
5402   masm.freeStack(unusedStack);
5403 
5404   // Construct the IonFramePrefix.
5405   uint32_t descriptor = MakeFrameDescriptor(
5406       masm.framePushed(), FrameType::IonJS, JitFrameLayout::Size());
5407   masm.Push(Imm32(call->numActualArgs()));
5408   masm.PushCalleeToken(calleereg, call->mir()->isConstructing());
5409   masm.Push(Imm32(descriptor));
5410 
5411   // Check whether the provided arguments satisfy target argc.
5412   // We cannot have lowered to LCallGeneric with a known target. Assert that we
5413   // didn't add any undefineds in WarpBuilder. NB: MCall::numStackArgs includes
5414   // |this|.
5415   DebugOnly<unsigned> numNonArgsOnStack = 1 + call->isConstructing();
5416   MOZ_ASSERT(call->numActualArgs() ==
5417              call->mir()->numStackArgs() - numNonArgsOnStack);
5418   masm.load16ZeroExtend(Address(calleereg, JSFunction::offsetOfNargs()),
5419                         nargsreg);
5420   masm.branch32(Assembler::Above, nargsreg, Imm32(call->numActualArgs()),
5421                 &thunk);
5422   masm.jump(&makeCall);
5423 
5424   // Argument fixup needed. Load the ArgumentsRectifier.
5425   masm.bind(&thunk);
5426   {
5427     TrampolinePtr argumentsRectifier =
5428         gen->jitRuntime()->getArgumentsRectifier();
5429     masm.movePtr(argumentsRectifier, objreg);
5430   }
5431 
5432   // Finally call the function in objreg.
5433   masm.bind(&makeCall);
5434   uint32_t callOffset = masm.callJit(objreg);
5435   markSafepointAt(callOffset, call);
5436 
5437   if (call->mir()->maybeCrossRealm()) {
5438     static_assert(!JSReturnOperand.aliases(ReturnReg),
5439                   "ReturnReg available as scratch after scripted calls");
5440     masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5441   }
5442 
5443   // Increment to remove IonFramePrefix; decrement to fill FrameSizeClass.
5444   // The return address has already been removed from the Ion frame.
5445   int prefixGarbage = sizeof(JitFrameLayout) - sizeof(void*);
5446   masm.adjustStack(prefixGarbage - unusedStack);
5447   masm.jump(&end);
5448 
5449   // Handle uncompiled or native functions.
5450   masm.bind(&invoke);
5451   emitCallInvokeFunction(call, calleereg, call->isConstructing(),
5452                          call->ignoresReturnValue(), call->numActualArgs(),
5453                          unusedStack);
5454 
5455   masm.bind(&end);
5456 
5457   // If the return value of the constructing function is Primitive,
5458   // replace the return value with the Object from CreateThis.
5459   if (call->mir()->isConstructing()) {
5460     Label notPrimitive;
5461     masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5462                              &notPrimitive);
5463     masm.loadValue(Address(masm.getStackPointer(), unusedStack),
5464                    JSReturnOperand);
5465     masm.bind(&notPrimitive);
5466   }
5467 }
5468 
visitCallKnown(LCallKnown * call)5469 void CodeGenerator::visitCallKnown(LCallKnown* call) {
5470   Register calleereg = ToRegister(call->getFunction());
5471   Register objreg = ToRegister(call->getTempObject());
5472   uint32_t unusedStack = StackOffsetOfPassedArg(call->argslot());
5473   WrappedFunction* target = call->getSingleTarget();
5474 
5475   // Native single targets (except wasm) are handled by LCallNative.
5476   MOZ_ASSERT(target->hasJitEntry());
5477 
5478   // Missing arguments must have been explicitly appended by WarpBuilder.
5479   DebugOnly<unsigned> numNonArgsOnStack = 1 + call->isConstructing();
5480   MOZ_ASSERT(target->nargs() <=
5481              call->mir()->numStackArgs() - numNonArgsOnStack);
5482 
5483   MOZ_ASSERT_IF(call->isConstructing(), target->isConstructor());
5484 
5485   masm.checkStackAlignment();
5486 
5487   if (target->isClassConstructor() && !call->isConstructing()) {
5488     emitCallInvokeFunction(call, calleereg, call->isConstructing(),
5489                            call->ignoresReturnValue(), call->numActualArgs(),
5490                            unusedStack);
5491     return;
5492   }
5493 
5494   MOZ_ASSERT_IF(target->isClassConstructor(), call->isConstructing());
5495 
5496   MOZ_ASSERT(!call->mir()->needsThisCheck());
5497 
5498   if (call->mir()->maybeCrossRealm()) {
5499     masm.switchToObjectRealm(calleereg, objreg);
5500   }
5501 
5502   masm.loadJitCodeRaw(calleereg, objreg);
5503 
5504   // Nestle the StackPointer up to the argument vector.
5505   masm.freeStack(unusedStack);
5506 
5507   // Construct the IonFramePrefix.
5508   uint32_t descriptor = MakeFrameDescriptor(
5509       masm.framePushed(), FrameType::IonJS, JitFrameLayout::Size());
5510   masm.Push(Imm32(call->numActualArgs()));
5511   masm.PushCalleeToken(calleereg, call->mir()->isConstructing());
5512   masm.Push(Imm32(descriptor));
5513 
5514   // Finally call the function in objreg.
5515   uint32_t callOffset = masm.callJit(objreg);
5516   markSafepointAt(callOffset, call);
5517 
5518   if (call->mir()->maybeCrossRealm()) {
5519     static_assert(!JSReturnOperand.aliases(ReturnReg),
5520                   "ReturnReg available as scratch after scripted calls");
5521     masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
5522   }
5523 
5524   // Increment to remove IonFramePrefix; decrement to fill FrameSizeClass.
5525   // The return address has already been removed from the Ion frame.
5526   int prefixGarbage = sizeof(JitFrameLayout) - sizeof(void*);
5527   masm.adjustStack(prefixGarbage - unusedStack);
5528 
5529   // If the return value of the constructing function is Primitive,
5530   // replace the return value with the Object from CreateThis.
5531   if (call->mir()->isConstructing()) {
5532     Label notPrimitive;
5533     masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5534                              &notPrimitive);
5535     masm.loadValue(Address(masm.getStackPointer(), unusedStack),
5536                    JSReturnOperand);
5537     masm.bind(&notPrimitive);
5538   }
5539 }
5540 
5541 template <typename T>
emitCallInvokeFunction(T * apply,Register extraStackSize)5542 void CodeGenerator::emitCallInvokeFunction(T* apply, Register extraStackSize) {
5543   Register objreg = ToRegister(apply->getTempObject());
5544   MOZ_ASSERT(objreg != extraStackSize);
5545 
5546   // Push the space used by the arguments.
5547   masm.moveStackPtrTo(objreg);
5548   masm.Push(extraStackSize);
5549 
5550   pushArg(objreg);                                     // argv.
5551   pushArg(ToRegister(apply->getArgc()));               // argc.
5552   pushArg(Imm32(apply->mir()->ignoresReturnValue()));  // ignoresReturnValue.
5553   pushArg(Imm32(apply->mir()->isConstructing()));      // isConstructing.
5554   pushArg(ToRegister(apply->getFunction()));           // JSFunction*.
5555 
5556   // This specialization of callVM restores the extraStackSize after the call.
5557   using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
5558                       MutableHandleValue);
5559   callVM<Fn, jit::InvokeFunction>(apply, &extraStackSize);
5560 
5561   masm.Pop(extraStackSize);
5562 }
5563 
5564 // Do not bailout after the execution of this function since the stack no longer
5565 // correspond to what is expected by the snapshots.
emitAllocateSpaceForApply(Register argcreg,Register extraStackSpace)5566 void CodeGenerator::emitAllocateSpaceForApply(Register argcreg,
5567                                               Register extraStackSpace) {
5568   // Initialize the loop counter AND Compute the stack usage (if == 0)
5569   masm.movePtr(argcreg, extraStackSpace);
5570 
5571   // Align the JitFrameLayout on the JitStackAlignment.
5572   if (JitStackValueAlignment > 1) {
5573     MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
5574                "Stack padding assumes that the frameSize is correct");
5575     MOZ_ASSERT(JitStackValueAlignment == 2);
5576     Label noPaddingNeeded;
5577     // if the number of arguments is odd, then we do not need any padding.
5578     masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
5579     masm.addPtr(Imm32(1), extraStackSpace);
5580     masm.bind(&noPaddingNeeded);
5581   }
5582 
5583   // Reserve space for copying the arguments.
5584   NativeObject::elementsSizeMustNotOverflow();
5585   masm.lshiftPtr(Imm32(ValueShift), extraStackSpace);
5586   masm.subFromStackPtr(extraStackSpace);
5587 
5588 #ifdef DEBUG
5589   // Put a magic value in the space reserved for padding. Note, this code
5590   // cannot be merged with the previous test, as not all architectures can
5591   // write below their stack pointers.
5592   if (JitStackValueAlignment > 1) {
5593     MOZ_ASSERT(JitStackValueAlignment == 2);
5594     Label noPaddingNeeded;
5595     // if the number of arguments is odd, then we do not need any padding.
5596     masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
5597     BaseValueIndex dstPtr(masm.getStackPointer(), argcreg);
5598     masm.storeValue(MagicValue(JS_ARG_POISON), dstPtr);
5599     masm.bind(&noPaddingNeeded);
5600   }
5601 #endif
5602 }
5603 
5604 // Do not bailout after the execution of this function since the stack no longer
5605 // correspond to what is expected by the snapshots.
emitAllocateSpaceForConstructAndPushNewTarget(Register argcreg,Register newTargetAndExtraStackSpace)5606 void CodeGenerator::emitAllocateSpaceForConstructAndPushNewTarget(
5607     Register argcreg, Register newTargetAndExtraStackSpace) {
5608   // Align the JitFrameLayout on the JitStackAlignment. Contrary to
5609   // |emitAllocateSpaceForApply()|, we're always pushing a magic value, because
5610   // we can't write to |newTargetAndExtraStackSpace| before |new.target| has
5611   // been pushed onto the stack.
5612   if (JitStackValueAlignment > 1) {
5613     MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
5614                "Stack padding assumes that the frameSize is correct");
5615     MOZ_ASSERT(JitStackValueAlignment == 2);
5616 
5617     Label noPaddingNeeded;
5618     // If the number of arguments is even, then we do not need any padding.
5619     masm.branchTestPtr(Assembler::Zero, argcreg, Imm32(1), &noPaddingNeeded);
5620     masm.pushValue(MagicValue(JS_ARG_POISON));
5621     masm.bind(&noPaddingNeeded);
5622   }
5623 
5624   // Push |new.target| after the padding value, but before any arguments.
5625   masm.pushValue(JSVAL_TYPE_OBJECT, newTargetAndExtraStackSpace);
5626 
5627   // Initialize the loop counter AND compute the stack usage.
5628   masm.movePtr(argcreg, newTargetAndExtraStackSpace);
5629 
5630   // Reserve space for copying the arguments.
5631   NativeObject::elementsSizeMustNotOverflow();
5632   masm.lshiftPtr(Imm32(ValueShift), newTargetAndExtraStackSpace);
5633   masm.subFromStackPtr(newTargetAndExtraStackSpace);
5634 
5635   // Account for |new.target| which has already been pushed onto the stack.
5636   masm.addPtr(Imm32(sizeof(Value)), newTargetAndExtraStackSpace);
5637 
5638   // And account for the padding.
5639   if (JitStackValueAlignment > 1) {
5640     MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
5641                "Stack padding assumes that the frameSize is correct");
5642     MOZ_ASSERT(JitStackValueAlignment == 2);
5643 
5644     Label noPaddingNeeded;
5645     // If the number of arguments is even, then we do not need any padding.
5646     masm.branchTestPtr(Assembler::Zero, argcreg, Imm32(1), &noPaddingNeeded);
5647     masm.addPtr(Imm32(sizeof(Value)), newTargetAndExtraStackSpace);
5648     masm.bind(&noPaddingNeeded);
5649   }
5650 }
5651 
5652 // Destroys argvIndex and copyreg.
emitCopyValuesForApply(Register argvSrcBase,Register argvIndex,Register copyreg,size_t argvSrcOffset,size_t argvDstOffset)5653 void CodeGenerator::emitCopyValuesForApply(Register argvSrcBase,
5654                                            Register argvIndex, Register copyreg,
5655                                            size_t argvSrcOffset,
5656                                            size_t argvDstOffset) {
5657   Label loop;
5658   masm.bind(&loop);
5659 
5660   // As argvIndex is off by 1, and we use the decBranchPtr instruction
5661   // to loop back, we have to substract the size of the word which are
5662   // copied.
5663   BaseValueIndex srcPtr(argvSrcBase, argvIndex, argvSrcOffset - sizeof(void*));
5664   BaseValueIndex dstPtr(masm.getStackPointer(), argvIndex,
5665                         argvDstOffset - sizeof(void*));
5666   masm.loadPtr(srcPtr, copyreg);
5667   masm.storePtr(copyreg, dstPtr);
5668 
5669   // Handle 32 bits architectures.
5670   if (sizeof(Value) == 2 * sizeof(void*)) {
5671     BaseValueIndex srcPtrLow(argvSrcBase, argvIndex,
5672                              argvSrcOffset - 2 * sizeof(void*));
5673     BaseValueIndex dstPtrLow(masm.getStackPointer(), argvIndex,
5674                              argvDstOffset - 2 * sizeof(void*));
5675     masm.loadPtr(srcPtrLow, copyreg);
5676     masm.storePtr(copyreg, dstPtrLow);
5677   }
5678 
5679   masm.decBranchPtr(Assembler::NonZero, argvIndex, Imm32(1), &loop);
5680 }
5681 
emitPopArguments(Register extraStackSpace)5682 void CodeGenerator::emitPopArguments(Register extraStackSpace) {
5683   // Pop |this| and Arguments.
5684   masm.freeStack(extraStackSpace);
5685 }
5686 
emitPushArguments(LApplyArgsGeneric * apply,Register extraStackSpace)5687 void CodeGenerator::emitPushArguments(LApplyArgsGeneric* apply,
5688                                       Register extraStackSpace) {
5689   // Holds the function nargs. Initially the number of args to the caller.
5690   Register argcreg = ToRegister(apply->getArgc());
5691   Register copyreg = ToRegister(apply->getTempObject());
5692 
5693   Label end;
5694   emitAllocateSpaceForApply(argcreg, extraStackSpace);
5695 
5696   // Skip the copy of arguments if there are none.
5697   masm.branchTestPtr(Assembler::Zero, argcreg, argcreg, &end);
5698 
5699   // clang-format off
5700   //
5701   // We are making a copy of the arguments which are above the JitFrameLayout
5702   // of the current Ion frame.
5703   //
5704   // [arg1] [arg0] <- src [this] [JitFrameLayout] [.. frameSize ..] [pad] [arg1] [arg0] <- dst
5705   //
5706   // clang-format on
5707 
5708   // Compute the source and destination offsets into the stack.
5709   size_t argvSrcOffset = frameSize() + JitFrameLayout::offsetOfActualArgs();
5710   size_t argvDstOffset = 0;
5711 
5712   // Save the extra stack space, and re-use the register as a base.
5713   masm.push(extraStackSpace);
5714   Register argvSrcBase = extraStackSpace;
5715   argvSrcOffset += sizeof(void*);
5716   argvDstOffset += sizeof(void*);
5717 
5718   // Save the actual number of register, and re-use the register as an index
5719   // register.
5720   masm.push(argcreg);
5721   Register argvIndex = argcreg;
5722   argvSrcOffset += sizeof(void*);
5723   argvDstOffset += sizeof(void*);
5724 
5725   // srcPtr = (StackPointer + extraStackSpace) + argvSrcOffset
5726   // dstPtr = (StackPointer                  ) + argvDstOffset
5727   masm.addStackPtrTo(argvSrcBase);
5728 
5729   // Copy arguments.
5730   emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
5731                          argvDstOffset);
5732 
5733   // Restore argcreg and the extra stack space counter.
5734   masm.pop(argcreg);
5735   masm.pop(extraStackSpace);
5736 
5737   // Join with all arguments copied and the extra stack usage computed.
5738   masm.bind(&end);
5739 
5740   // Push |this|.
5741   masm.addPtr(Imm32(sizeof(Value)), extraStackSpace);
5742   masm.pushValue(ToValue(apply, LApplyArgsGeneric::ThisIndex));
5743 }
5744 
emitPushArguments(LApplyArgsObj * apply,Register extraStackSpace)5745 void CodeGenerator::emitPushArguments(LApplyArgsObj* apply,
5746                                       Register extraStackSpace) {
5747   // argc and argsObj are mapped to the same calltemp register.
5748   MOZ_ASSERT(apply->getArgsObj() == apply->getArgc());
5749 
5750   Register tmpArgc = ToRegister(apply->getTempObject());
5751   Register argsObj = ToRegister(apply->getArgsObj());
5752 
5753   // Load argc into tmpArgc.
5754   Address lengthAddr(argsObj, ArgumentsObject::getInitialLengthSlotOffset());
5755   masm.unboxInt32(lengthAddr, tmpArgc);
5756   masm.rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), tmpArgc);
5757 
5758   // Allocate space on the stack for arguments. This modifies extraStackSpace.
5759   emitAllocateSpaceForApply(tmpArgc, extraStackSpace);
5760 
5761   // Load arguments data
5762   masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
5763                    argsObj);
5764   size_t argsSrcOffset = ArgumentsData::offsetOfArgs();
5765 
5766   // This is the end of the lifetime of argsObj.
5767   // After this call, the argsObj register holds the argument count instead.
5768   emitPushArrayAsArguments(tmpArgc, argsObj, extraStackSpace, argsSrcOffset);
5769 
5770   masm.addPtr(Imm32(sizeof(Value)), extraStackSpace);
5771   masm.pushValue(ToValue(apply, LApplyArgsObj::ThisIndex));
5772 }
5773 
emitPushArrayAsArguments(Register tmpArgc,Register srcBaseAndArgc,Register scratch,size_t argvSrcOffset)5774 void CodeGenerator::emitPushArrayAsArguments(Register tmpArgc,
5775                                              Register srcBaseAndArgc,
5776                                              Register scratch,
5777                                              size_t argvSrcOffset) {
5778   // Preconditions:
5779   // 1. |tmpArgc| * sizeof(Value) bytes have been allocated at the top of
5780   //    the stack to hold arguments.
5781   // 2. |srcBaseAndArgc| + |srcOffset| points to an array of |tmpArgc| values.
5782   //
5783   // Postconditions:
5784   // 1. The arguments at |srcBaseAndArgc| + |srcOffset| have been copied into
5785   //    the allocated space.
5786   // 2. |srcBaseAndArgc| now contains the original value of |tmpArgc|.
5787   //
5788   // |scratch| is used as a temp register within this function. It is
5789   // restored before returning.
5790 
5791   Label noCopy, epilogue;
5792 
5793   // Skip the copy of arguments if there are none.
5794   masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
5795 
5796   // Copy the values.  This code is skipped entirely if there are
5797   // no values.
5798   size_t argvDstOffset = 0;
5799 
5800   Register argvSrcBase = srcBaseAndArgc;
5801 
5802   masm.push(scratch);
5803   Register copyreg = scratch;
5804   argvDstOffset += sizeof(void*);
5805 
5806   masm.push(tmpArgc);
5807   Register argvIndex = tmpArgc;
5808   argvDstOffset += sizeof(void*);
5809 
5810   // Copy
5811   emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
5812                          argvDstOffset);
5813 
5814   // Restore.
5815   masm.pop(srcBaseAndArgc);  // srcBaseAndArgc now contains argc.
5816   masm.pop(scratch);
5817   masm.jump(&epilogue);
5818 
5819   // Clear argc if we skipped the copy step.
5820   masm.bind(&noCopy);
5821   masm.movePtr(ImmWord(0), srcBaseAndArgc);
5822 
5823   // Join with all arguments copied and the extra stack usage computed.
5824   // Note, "srcBase" has become "argc".
5825   masm.bind(&epilogue);
5826 }
5827 
emitPushArguments(LApplyArrayGeneric * apply,Register extraStackSpace)5828 void CodeGenerator::emitPushArguments(LApplyArrayGeneric* apply,
5829                                       Register extraStackSpace) {
5830   Register tmpArgc = ToRegister(apply->getTempObject());
5831   Register elementsAndArgc = ToRegister(apply->getElements());
5832 
5833   // Invariants guarded in the caller:
5834   //  - the array is not too long
5835   //  - the array length equals its initialized length
5836 
5837   // The array length is our argc for the purposes of allocating space.
5838   Address length(ToRegister(apply->getElements()),
5839                  ObjectElements::offsetOfLength());
5840   masm.load32(length, tmpArgc);
5841 
5842   // Allocate space for the values.
5843   emitAllocateSpaceForApply(tmpArgc, extraStackSpace);
5844 
5845   // After this call "elements" has become "argc".
5846   size_t elementsOffset = 0;
5847   emitPushArrayAsArguments(tmpArgc, elementsAndArgc, extraStackSpace,
5848                            elementsOffset);
5849 
5850   // Push |this|.
5851   masm.addPtr(Imm32(sizeof(Value)), extraStackSpace);
5852   masm.pushValue(ToValue(apply, LApplyArrayGeneric::ThisIndex));
5853 }
5854 
emitPushArguments(LConstructArrayGeneric * construct,Register extraStackSpace)5855 void CodeGenerator::emitPushArguments(LConstructArrayGeneric* construct,
5856                                       Register extraStackSpace) {
5857   MOZ_ASSERT(extraStackSpace == ToRegister(construct->getNewTarget()));
5858 
5859   Register tmpArgc = ToRegister(construct->getTempObject());
5860   Register elementsAndArgc = ToRegister(construct->getElements());
5861 
5862   // Invariants guarded in the caller:
5863   //  - the array is not too long
5864   //  - the array length equals its initialized length
5865 
5866   // The array length is our argc for the purposes of allocating space.
5867   Address length(ToRegister(construct->getElements()),
5868                  ObjectElements::offsetOfLength());
5869   masm.load32(length, tmpArgc);
5870 
5871   // Allocate space for the values.
5872   emitAllocateSpaceForConstructAndPushNewTarget(tmpArgc, extraStackSpace);
5873 
5874   // After this call "elements" has become "argc" and "newTarget" has become
5875   // "extraStackSpace".
5876   size_t elementsOffset = 0;
5877   emitPushArrayAsArguments(tmpArgc, elementsAndArgc, extraStackSpace,
5878                            elementsOffset);
5879 
5880   // Push |this|.
5881   masm.addPtr(Imm32(sizeof(Value)), extraStackSpace);
5882   masm.pushValue(ToValue(construct, LConstructArrayGeneric::ThisIndex));
5883 }
5884 
5885 template <typename T>
emitApplyGeneric(T * apply)5886 void CodeGenerator::emitApplyGeneric(T* apply) {
5887   // Holds the function object.
5888   Register calleereg = ToRegister(apply->getFunction());
5889 
5890   // Temporary register for modifying the function object.
5891   Register objreg = ToRegister(apply->getTempObject());
5892   Register extraStackSpace = ToRegister(apply->getTempStackCounter());
5893 
5894   // Holds the function nargs, computed in the invoker or (for ApplyArray,
5895   // ConstructArray, or ApplyArgsObj) in the argument pusher.
5896   Register argcreg = ToRegister(apply->getArgc());
5897 
5898   // Copy the arguments of the current function.
5899   //
5900   // In the case of ApplyArray, ConstructArray, or ApplyArgsObj, also
5901   // compute argc. The argc register and the elements/argsObj register
5902   // are the same; argc must not be referenced before the call to
5903   // emitPushArguments() and elements/argsObj must not be referenced
5904   // after it returns.
5905   //
5906   // In the case of ConstructArray, also overwrite newTarget with
5907   // extraStackSpace; newTarget must not be referenced after this point.
5908   //
5909   // objreg is dead across this call.
5910   //
5911   // extraStackSpace is garbage on entry (for ApplyArray and ApplyArgs) and
5912   // defined on exit.
5913   emitPushArguments(apply, extraStackSpace);
5914 
5915   masm.checkStackAlignment();
5916 
5917   bool constructing = apply->mir()->isConstructing();
5918 
5919   // If the function is native, only emit the call to InvokeFunction.
5920   if (apply->hasSingleTarget() &&
5921       apply->getSingleTarget()->isNativeWithoutJitEntry()) {
5922     emitCallInvokeFunction(apply, extraStackSpace);
5923 
5924 #ifdef DEBUG
5925     // Native constructors are guaranteed to return an Object value, so we never
5926     // have to replace a primitive result with the previously allocated Object
5927     // from CreateThis.
5928     if (constructing) {
5929       Label notPrimitive;
5930       masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
5931                                &notPrimitive);
5932       masm.assumeUnreachable("native constructors don't return primitives");
5933       masm.bind(&notPrimitive);
5934     }
5935 #endif
5936 
5937     emitPopArguments(extraStackSpace);
5938     return;
5939   }
5940 
5941   Label end, invoke;
5942 
5943   // Unless already known, guard that calleereg is actually a function object.
5944   if (!apply->hasSingleTarget()) {
5945     masm.branchTestObjClass(Assembler::NotEqual, calleereg, &JSFunction::class_,
5946                             objreg, calleereg, &invoke);
5947   }
5948 
5949   // Guard that calleereg is an interpreted function with a JSScript.
5950   masm.branchIfFunctionHasNoJitEntry(calleereg, constructing, &invoke);
5951 
5952   // Guard that callee allows the [[Call]] or [[Construct]] operation required.
5953   if (constructing) {
5954     masm.branchTestFunctionFlags(calleereg, FunctionFlags::CONSTRUCTOR,
5955                                  Assembler::Zero, &invoke);
5956   } else {
5957     masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
5958                             calleereg, objreg, &invoke);
5959   }
5960 
5961   // Use the slow path if CreateThis was unable to create the |this| object.
5962   if (constructing) {
5963     Address thisAddr(masm.getStackPointer(), 0);
5964     masm.branchTestNull(Assembler::Equal, thisAddr, &invoke);
5965   }
5966 
5967   // Call with an Ion frame or a rectifier frame.
5968   {
5969     if (apply->mir()->maybeCrossRealm()) {
5970       masm.switchToObjectRealm(calleereg, objreg);
5971     }
5972 
5973     // Knowing that calleereg is a non-native function, load jitcode.
5974     masm.loadJitCodeRaw(calleereg, objreg);
5975 
5976     // Create the frame descriptor.
5977     unsigned pushed = masm.framePushed();
5978     Register stackSpace = extraStackSpace;
5979     masm.addPtr(Imm32(pushed), stackSpace);
5980     masm.makeFrameDescriptor(stackSpace, FrameType::IonJS,
5981                              JitFrameLayout::Size());
5982 
5983     masm.Push(argcreg);
5984     masm.PushCalleeToken(calleereg, constructing);
5985     masm.Push(stackSpace);  // descriptor
5986 
5987     Label underflow, rejoin;
5988 
5989     // Check whether the provided arguments satisfy target argc.
5990     if (!apply->hasSingleTarget()) {
5991       Register nformals = extraStackSpace;
5992       masm.load16ZeroExtend(Address(calleereg, JSFunction::offsetOfNargs()),
5993                             nformals);
5994       masm.branch32(Assembler::Below, argcreg, nformals, &underflow);
5995     } else {
5996       masm.branch32(Assembler::Below, argcreg,
5997                     Imm32(apply->getSingleTarget()->nargs()), &underflow);
5998     }
5999 
6000     // Skip the construction of the rectifier frame because we have no
6001     // underflow.
6002     masm.jump(&rejoin);
6003 
6004     // Argument fixup needed. Get ready to call the argumentsRectifier.
6005     {
6006       masm.bind(&underflow);
6007 
6008       // Hardcode the address of the argumentsRectifier code.
6009       TrampolinePtr argumentsRectifier =
6010           gen->jitRuntime()->getArgumentsRectifier();
6011       masm.movePtr(argumentsRectifier, objreg);
6012     }
6013 
6014     masm.bind(&rejoin);
6015 
6016     // Finally call the function in objreg, as assigned by one of the paths
6017     // above.
6018     uint32_t callOffset = masm.callJit(objreg);
6019     markSafepointAt(callOffset, apply);
6020 
6021     if (apply->mir()->maybeCrossRealm()) {
6022       static_assert(!JSReturnOperand.aliases(ReturnReg),
6023                     "ReturnReg available as scratch after scripted calls");
6024       masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
6025     }
6026 
6027     // Recover the number of arguments from the frame descriptor.
6028     masm.loadPtr(Address(masm.getStackPointer(), 0), stackSpace);
6029     masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), stackSpace);
6030     masm.subPtr(Imm32(pushed), stackSpace);
6031 
6032     // Increment to remove IonFramePrefix; decrement to fill FrameSizeClass.
6033     // The return address has already been removed from the Ion frame.
6034     int prefixGarbage = sizeof(JitFrameLayout) - sizeof(void*);
6035     masm.adjustStack(prefixGarbage);
6036     masm.jump(&end);
6037   }
6038 
6039   // Handle uncompiled or native functions.
6040   {
6041     masm.bind(&invoke);
6042     emitCallInvokeFunction(apply, extraStackSpace);
6043   }
6044 
6045   masm.bind(&end);
6046 
6047   // If the return value of the constructing function is Primitive,
6048   // replace the return value with the Object from CreateThis.
6049   if (constructing) {
6050     Label notPrimitive;
6051     masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
6052                              &notPrimitive);
6053     masm.loadValue(Address(masm.getStackPointer(), 0), JSReturnOperand);
6054     masm.bind(&notPrimitive);
6055   }
6056 
6057   // Pop arguments and continue.
6058   emitPopArguments(extraStackSpace);
6059 }
6060 
visitApplyArgsGeneric(LApplyArgsGeneric * apply)6061 void CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric* apply) {
6062   LSnapshot* snapshot = apply->snapshot();
6063   Register argcreg = ToRegister(apply->getArgc());
6064 
6065   // Ensure that we have a reasonable number of arguments.
6066   bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6067 
6068   emitApplyGeneric(apply);
6069 }
6070 
visitApplyArgsObj(LApplyArgsObj * apply)6071 void CodeGenerator::visitApplyArgsObj(LApplyArgsObj* apply) {
6072   Register argsObj = ToRegister(apply->getArgsObj());
6073   Register temp = ToRegister(apply->getTempObject());
6074 
6075   Label bail;
6076   masm.loadArgumentsObjectLength(argsObj, temp, &bail);
6077   masm.branch32(Assembler::Above, temp, Imm32(JIT_ARGS_LENGTH_MAX), &bail);
6078   bailoutFrom(&bail, apply->snapshot());
6079 
6080   emitApplyGeneric(apply);
6081 }
6082 
visitApplyArrayGeneric(LApplyArrayGeneric * apply)6083 void CodeGenerator::visitApplyArrayGeneric(LApplyArrayGeneric* apply) {
6084   LSnapshot* snapshot = apply->snapshot();
6085   Register tmp = ToRegister(apply->getTempObject());
6086 
6087   Address length(ToRegister(apply->getElements()),
6088                  ObjectElements::offsetOfLength());
6089   masm.load32(length, tmp);
6090 
6091   // Ensure that we have a reasonable number of arguments.
6092   bailoutCmp32(Assembler::Above, tmp, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6093 
6094   // Ensure that the array does not contain an uninitialized tail.
6095 
6096   Address initializedLength(ToRegister(apply->getElements()),
6097                             ObjectElements::offsetOfInitializedLength());
6098   masm.sub32(initializedLength, tmp);
6099   bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
6100 
6101   emitApplyGeneric(apply);
6102 }
6103 
visitConstructArrayGeneric(LConstructArrayGeneric * lir)6104 void CodeGenerator::visitConstructArrayGeneric(LConstructArrayGeneric* lir) {
6105   LSnapshot* snapshot = lir->snapshot();
6106   Register tmp = ToRegister(lir->getTempObject());
6107 
6108   Address length(ToRegister(lir->getElements()),
6109                  ObjectElements::offsetOfLength());
6110   masm.load32(length, tmp);
6111 
6112   // Ensure that we have a reasonable number of arguments.
6113   bailoutCmp32(Assembler::Above, tmp, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
6114 
6115   // Ensure that the array does not contain an uninitialized tail.
6116 
6117   Address initializedLength(ToRegister(lir->getElements()),
6118                             ObjectElements::offsetOfInitializedLength());
6119   masm.sub32(initializedLength, tmp);
6120   bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
6121 
6122   emitApplyGeneric(lir);
6123 }
6124 
visitBail(LBail * lir)6125 void CodeGenerator::visitBail(LBail* lir) { bailout(lir->snapshot()); }
6126 
visitUnreachable(LUnreachable * lir)6127 void CodeGenerator::visitUnreachable(LUnreachable* lir) {
6128   masm.assumeUnreachable("end-of-block assumed unreachable");
6129 }
6130 
visitEncodeSnapshot(LEncodeSnapshot * lir)6131 void CodeGenerator::visitEncodeSnapshot(LEncodeSnapshot* lir) {
6132   encode(lir->snapshot());
6133 }
6134 
visitUnreachableResultV(LUnreachableResultV * lir)6135 void CodeGenerator::visitUnreachableResultV(LUnreachableResultV* lir) {
6136   masm.assumeUnreachable("must be unreachable");
6137 }
6138 
visitUnreachableResultT(LUnreachableResultT * lir)6139 void CodeGenerator::visitUnreachableResultT(LUnreachableResultT* lir) {
6140   masm.assumeUnreachable("must be unreachable");
6141 }
6142 
6143 // Out-of-line path to report over-recursed error and fail.
6144 class CheckOverRecursedFailure : public OutOfLineCodeBase<CodeGenerator> {
6145   LInstruction* lir_;
6146 
6147  public:
CheckOverRecursedFailure(LInstruction * lir)6148   explicit CheckOverRecursedFailure(LInstruction* lir) : lir_(lir) {}
6149 
accept(CodeGenerator * codegen)6150   void accept(CodeGenerator* codegen) override {
6151     codegen->visitCheckOverRecursedFailure(this);
6152   }
6153 
lir() const6154   LInstruction* lir() const { return lir_; }
6155 };
6156 
visitCheckOverRecursed(LCheckOverRecursed * lir)6157 void CodeGenerator::visitCheckOverRecursed(LCheckOverRecursed* lir) {
6158   // If we don't push anything on the stack, skip the check.
6159   if (omitOverRecursedCheck()) {
6160     return;
6161   }
6162 
6163   // Ensure that this frame will not cross the stack limit.
6164   // This is a weak check, justified by Ion using the C stack: we must always
6165   // be some distance away from the actual limit, since if the limit is
6166   // crossed, an error must be thrown, which requires more frames.
6167   //
6168   // It must always be possible to trespass past the stack limit.
6169   // Ion may legally place frames very close to the limit. Calling additional
6170   // C functions may then violate the limit without any checking.
6171   //
6172   // Since Ion frames exist on the C stack, the stack limit may be
6173   // dynamically set by JS_SetThreadStackLimit() and JS_SetNativeStackQuota().
6174 
6175   CheckOverRecursedFailure* ool = new (alloc()) CheckOverRecursedFailure(lir);
6176   addOutOfLineCode(ool, lir->mir());
6177 
6178   // Conditional forward (unlikely) branch to failure.
6179   const void* limitAddr = gen->runtime->addressOfJitStackLimit();
6180   masm.branchStackPtrRhs(Assembler::AboveOrEqual, AbsoluteAddress(limitAddr),
6181                          ool->entry());
6182   masm.bind(ool->rejoin());
6183 }
6184 
visitCheckOverRecursedFailure(CheckOverRecursedFailure * ool)6185 void CodeGenerator::visitCheckOverRecursedFailure(
6186     CheckOverRecursedFailure* ool) {
6187   // The OOL path is hit if the recursion depth has been exceeded.
6188   // Throw an InternalError for over-recursion.
6189 
6190   // LFunctionEnvironment can appear before LCheckOverRecursed, so we have
6191   // to save all live registers to avoid crashes if CheckOverRecursed triggers
6192   // a GC.
6193   saveLive(ool->lir());
6194 
6195   using Fn = bool (*)(JSContext*);
6196   callVM<Fn, CheckOverRecursed>(ool->lir());
6197 
6198   restoreLive(ool->lir());
6199   masm.jump(ool->rejoin());
6200 }
6201 
maybeCreateScriptCounts()6202 IonScriptCounts* CodeGenerator::maybeCreateScriptCounts() {
6203   // If scripts are being profiled, create a new IonScriptCounts for the
6204   // profiling data, which will be attached to the associated JSScript or
6205   // wasm module after code generation finishes.
6206   if (!gen->hasProfilingScripts()) {
6207     return nullptr;
6208   }
6209 
6210   // This test inhibits IonScriptCount creation for wasm code which is
6211   // currently incompatible with wasm codegen for two reasons: (1) wasm code
6212   // must be serializable and script count codegen bakes in absolute
6213   // addresses, (2) wasm code does not have a JSScript with which to associate
6214   // code coverage data.
6215   JSScript* script = gen->outerInfo().script();
6216   if (!script) {
6217     return nullptr;
6218   }
6219 
6220   auto counts = MakeUnique<IonScriptCounts>();
6221   if (!counts || !counts->init(graph.numBlocks())) {
6222     return nullptr;
6223   }
6224 
6225   for (size_t i = 0; i < graph.numBlocks(); i++) {
6226     MBasicBlock* block = graph.getBlock(i)->mir();
6227 
6228     uint32_t offset = 0;
6229     char* description = nullptr;
6230     if (MResumePoint* resume = block->entryResumePoint()) {
6231       // Find a PC offset in the outermost script to use. If this
6232       // block is from an inlined script, find a location in the
6233       // outer script to associate information about the inlining
6234       // with.
6235       while (resume->caller()) {
6236         resume = resume->caller();
6237       }
6238       offset = script->pcToOffset(resume->pc());
6239 
6240       if (block->entryResumePoint()->caller()) {
6241         // Get the filename and line number of the inner script.
6242         JSScript* innerScript = block->info().script();
6243         description = js_pod_calloc<char>(200);
6244         if (description) {
6245           snprintf(description, 200, "%s:%u", innerScript->filename(),
6246                    innerScript->lineno());
6247         }
6248       }
6249     }
6250 
6251     if (!counts->block(i).init(block->id(), offset, description,
6252                                block->numSuccessors())) {
6253       return nullptr;
6254     }
6255 
6256     for (size_t j = 0; j < block->numSuccessors(); j++) {
6257       counts->block(i).setSuccessor(
6258           j, skipTrivialBlocks(block->getSuccessor(j))->id());
6259     }
6260   }
6261 
6262   scriptCounts_ = counts.release();
6263   return scriptCounts_;
6264 }
6265 
6266 // Structure for managing the state tracked for a block by script counters.
6267 struct ScriptCountBlockState {
6268   IonBlockCounts& block;
6269   MacroAssembler& masm;
6270 
6271   Sprinter printer;
6272 
6273  public:
ScriptCountBlockStatejs::jit::ScriptCountBlockState6274   ScriptCountBlockState(IonBlockCounts* block, MacroAssembler* masm)
6275       : block(*block), masm(*masm), printer(GetJitContext()->cx, false) {}
6276 
initjs::jit::ScriptCountBlockState6277   bool init() {
6278     if (!printer.init()) {
6279       return false;
6280     }
6281 
6282     // Bump the hit count for the block at the start. This code is not
6283     // included in either the text for the block or the instruction byte
6284     // counts.
6285     masm.inc64(AbsoluteAddress(block.addressOfHitCount()));
6286 
6287     // Collect human readable assembly for the code generated in the block.
6288     masm.setPrinter(&printer);
6289 
6290     return true;
6291   }
6292 
visitInstructionjs::jit::ScriptCountBlockState6293   void visitInstruction(LInstruction* ins) {
6294 #ifdef JS_JITSPEW
6295     // Prefix stream of assembly instructions with their LIR instruction
6296     // name and any associated high level info.
6297     if (const char* extra = ins->getExtraName()) {
6298       printer.printf("[%s:%s]\n", ins->opName(), extra);
6299     } else {
6300       printer.printf("[%s]\n", ins->opName());
6301     }
6302 #endif
6303   }
6304 
~ScriptCountBlockStatejs::jit::ScriptCountBlockState6305   ~ScriptCountBlockState() {
6306     masm.setPrinter(nullptr);
6307 
6308     if (!printer.hadOutOfMemory()) {
6309       block.setCode(printer.string());
6310     }
6311   }
6312 };
6313 
branchIfInvalidated(Register temp,Label * invalidated)6314 void CodeGenerator::branchIfInvalidated(Register temp, Label* invalidated) {
6315   CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), temp);
6316   masm.propagateOOM(ionScriptLabels_.append(label));
6317 
6318   // If IonScript::invalidationCount_ != 0, the script has been invalidated.
6319   masm.branch32(Assembler::NotEqual,
6320                 Address(temp, IonScript::offsetOfInvalidationCount()), Imm32(0),
6321                 invalidated);
6322 }
6323 
6324 #ifdef DEBUG
emitAssertGCThingResult(Register input,const MDefinition * mir)6325 void CodeGenerator::emitAssertGCThingResult(Register input,
6326                                             const MDefinition* mir) {
6327   MIRType type = mir->type();
6328   MOZ_ASSERT(type == MIRType::Object || type == MIRType::String ||
6329              type == MIRType::Symbol || type == MIRType::BigInt);
6330 
6331   AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
6332   regs.take(input);
6333 
6334   Register temp = regs.takeAny();
6335   masm.push(temp);
6336 
6337   // Don't check if the script has been invalidated. In that case invalid
6338   // types are expected (until we reach the OsiPoint and bailout).
6339   Label done;
6340   branchIfInvalidated(temp, &done);
6341 
6342 #  ifndef JS_SIMULATOR
6343   // Check that we have a valid GC pointer.
6344   // Disable for wasm because we don't have a context on wasm compilation
6345   // threads and this needs a context.
6346   // Also disable for simulator builds because the C++ call is a lot slower
6347   // there than on actual hardware.
6348   if (JitOptions.fullDebugChecks && !IsCompilingWasm()) {
6349     saveVolatile();
6350     masm.setupUnalignedABICall(temp);
6351     masm.loadJSContext(temp);
6352     masm.passABIArg(temp);
6353     masm.passABIArg(input);
6354 
6355     switch (type) {
6356       case MIRType::Object: {
6357         using Fn = void (*)(JSContext * cx, JSObject * obj);
6358         masm.callWithABI<Fn, AssertValidObjectPtr>();
6359         break;
6360       }
6361       case MIRType::String: {
6362         using Fn = void (*)(JSContext * cx, JSString * str);
6363         masm.callWithABI<Fn, AssertValidStringPtr>();
6364         break;
6365       }
6366       case MIRType::Symbol: {
6367         using Fn = void (*)(JSContext * cx, JS::Symbol * sym);
6368         masm.callWithABI<Fn, AssertValidSymbolPtr>();
6369         break;
6370       }
6371       case MIRType::BigInt: {
6372         using Fn = void (*)(JSContext * cx, JS::BigInt * bi);
6373         masm.callWithABI<Fn, AssertValidBigIntPtr>();
6374         break;
6375       }
6376       default:
6377         MOZ_CRASH();
6378     }
6379 
6380     restoreVolatile();
6381   }
6382 #  endif
6383 
6384   masm.bind(&done);
6385   masm.pop(temp);
6386 }
6387 
emitAssertResultV(const ValueOperand input,const MDefinition * mir)6388 void CodeGenerator::emitAssertResultV(const ValueOperand input,
6389                                       const MDefinition* mir) {
6390   AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
6391   regs.take(input);
6392 
6393   Register temp1 = regs.takeAny();
6394   Register temp2 = regs.takeAny();
6395   masm.push(temp1);
6396   masm.push(temp2);
6397 
6398   // Don't check if the script has been invalidated. In that case invalid
6399   // types are expected (until we reach the OsiPoint and bailout).
6400   Label done;
6401   branchIfInvalidated(temp1, &done);
6402 
6403   // Check that we have a valid GC pointer.
6404   if (JitOptions.fullDebugChecks) {
6405     saveVolatile();
6406 
6407     masm.pushValue(input);
6408     masm.moveStackPtrTo(temp1);
6409 
6410     using Fn = void (*)(JSContext * cx, Value * v);
6411     masm.setupUnalignedABICall(temp2);
6412     masm.loadJSContext(temp2);
6413     masm.passABIArg(temp2);
6414     masm.passABIArg(temp1);
6415     masm.callWithABI<Fn, AssertValidValue>();
6416     masm.popValue(input);
6417     restoreVolatile();
6418   }
6419 
6420   masm.bind(&done);
6421   masm.pop(temp2);
6422   masm.pop(temp1);
6423 }
6424 
emitGCThingResultChecks(LInstruction * lir,MDefinition * mir)6425 void CodeGenerator::emitGCThingResultChecks(LInstruction* lir,
6426                                             MDefinition* mir) {
6427   if (lir->numDefs() == 0) {
6428     return;
6429   }
6430 
6431   MOZ_ASSERT(lir->numDefs() == 1);
6432   if (lir->getDef(0)->isBogusTemp()) {
6433     return;
6434   }
6435 
6436   Register output = ToRegister(lir->getDef(0));
6437   emitAssertGCThingResult(output, mir);
6438 }
6439 
emitValueResultChecks(LInstruction * lir,MDefinition * mir)6440 void CodeGenerator::emitValueResultChecks(LInstruction* lir, MDefinition* mir) {
6441   if (lir->numDefs() == 0) {
6442     return;
6443   }
6444 
6445   MOZ_ASSERT(lir->numDefs() == BOX_PIECES);
6446   if (!lir->getDef(0)->output()->isRegister()) {
6447     return;
6448   }
6449 
6450   ValueOperand output = ToOutValue(lir);
6451 
6452   emitAssertResultV(output, mir);
6453 }
6454 
emitDebugResultChecks(LInstruction * ins)6455 void CodeGenerator::emitDebugResultChecks(LInstruction* ins) {
6456   // In debug builds, check that LIR instructions return valid values.
6457 
6458   MDefinition* mir = ins->mirRaw();
6459   if (!mir) {
6460     return;
6461   }
6462 
6463   switch (mir->type()) {
6464     case MIRType::Object:
6465     case MIRType::String:
6466     case MIRType::Symbol:
6467     case MIRType::BigInt:
6468       emitGCThingResultChecks(ins, mir);
6469       break;
6470     case MIRType::Value:
6471       emitValueResultChecks(ins, mir);
6472       break;
6473     default:
6474       break;
6475   }
6476 }
6477 
emitDebugForceBailing(LInstruction * lir)6478 void CodeGenerator::emitDebugForceBailing(LInstruction* lir) {
6479   if (MOZ_LIKELY(!gen->options.ionBailAfterEnabled())) {
6480     return;
6481   }
6482   if (!lir->snapshot()) {
6483     return;
6484   }
6485   if (lir->isOsiPoint()) {
6486     return;
6487   }
6488 
6489   masm.comment("emitDebugForceBailing");
6490   const void* bailAfterCounterAddr =
6491       gen->runtime->addressOfIonBailAfterCounter();
6492 
6493   AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
6494 
6495   Label done, notBail, bail;
6496   masm.branch32(Assembler::Equal, AbsoluteAddress(bailAfterCounterAddr),
6497                 Imm32(0), &done);
6498   {
6499     Register temp = regs.takeAny();
6500 
6501     masm.push(temp);
6502     masm.load32(AbsoluteAddress(bailAfterCounterAddr), temp);
6503     masm.sub32(Imm32(1), temp);
6504     masm.store32(temp, AbsoluteAddress(bailAfterCounterAddr));
6505 
6506     masm.branch32(Assembler::NotEqual, temp, Imm32(0), &notBail);
6507     {
6508       masm.pop(temp);
6509       masm.jump(&bail);
6510       bailoutFrom(&bail, lir->snapshot());
6511     }
6512     masm.bind(&notBail);
6513     masm.pop(temp);
6514   }
6515   masm.bind(&done);
6516 }
6517 #endif
6518 
generateBody()6519 bool CodeGenerator::generateBody() {
6520   JitSpewCont(JitSpew_Codegen, "\n");
6521   JitSpew(JitSpew_Codegen, "==== BEGIN CodeGenerator::generateBody ====");
6522   IonScriptCounts* counts = maybeCreateScriptCounts();
6523 
6524   const bool compilingWasm = gen->compilingWasm();
6525 
6526 #if defined(JS_ION_PERF)
6527   PerfSpewer* perfSpewer = &perfSpewer_;
6528   if (compilingWasm) {
6529     perfSpewer = &gen->perfSpewer();
6530   }
6531 #endif
6532 
6533   for (size_t i = 0; i < graph.numBlocks(); i++) {
6534     current = graph.getBlock(i);
6535 
6536     // Don't emit any code for trivial blocks, containing just a goto. Such
6537     // blocks are created to split critical edges, and if we didn't end up
6538     // putting any instructions in them, we can skip them.
6539     if (current->isTrivial()) {
6540       continue;
6541     }
6542 
6543 #ifdef JS_JITSPEW
6544     const char* filename = nullptr;
6545     size_t lineNumber = 0;
6546     unsigned columnNumber = 0;
6547     if (current->mir()->info().script()) {
6548       filename = current->mir()->info().script()->filename();
6549       if (current->mir()->pc()) {
6550         lineNumber = PCToLineNumber(current->mir()->info().script(),
6551                                     current->mir()->pc(), &columnNumber);
6552       }
6553     } else {
6554 #  ifdef DEBUG
6555       lineNumber = current->mir()->lineno();
6556       columnNumber = current->mir()->columnIndex();
6557 #  endif
6558     }
6559     JitSpew(JitSpew_Codegen, "--------------------------------");
6560     JitSpew(JitSpew_Codegen, "# block%zu %s:%zu:%u%s:", i,
6561             filename ? filename : "?", lineNumber, columnNumber,
6562             current->mir()->isLoopHeader() ? " (loop header)" : "");
6563 #endif
6564 
6565     if (current->mir()->isLoopHeader() && compilingWasm) {
6566       masm.nopAlign(CodeAlignment);
6567     }
6568 
6569     masm.bind(current->label());
6570 
6571     mozilla::Maybe<ScriptCountBlockState> blockCounts;
6572     if (counts) {
6573       blockCounts.emplace(&counts->block(i), &masm);
6574       if (!blockCounts->init()) {
6575         return false;
6576       }
6577     }
6578 
6579 #if defined(JS_ION_PERF)
6580     if (!perfSpewer->startBasicBlock(current->mir(), masm)) {
6581       return false;
6582     }
6583 #endif
6584 
6585     for (LInstructionIterator iter = current->begin(); iter != current->end();
6586          iter++) {
6587       if (!alloc().ensureBallast()) {
6588         return false;
6589       }
6590 
6591 #ifdef JS_JITSPEW
6592       JitSpewStart(JitSpew_Codegen, "                                # LIR=%s",
6593                    iter->opName());
6594       if (const char* extra = iter->getExtraName()) {
6595         JitSpewCont(JitSpew_Codegen, ":%s", extra);
6596       }
6597       JitSpewFin(JitSpew_Codegen);
6598 #endif
6599 
6600       if (counts) {
6601         blockCounts->visitInstruction(*iter);
6602       }
6603 
6604 #ifdef CHECK_OSIPOINT_REGISTERS
6605       if (iter->safepoint() && !compilingWasm) {
6606         resetOsiPointRegs(iter->safepoint());
6607       }
6608 #endif
6609 
6610       if (!compilingWasm) {
6611         if (MDefinition* mir = iter->mirRaw()) {
6612           if (!addNativeToBytecodeEntry(mir->trackedSite())) {
6613             return false;
6614           }
6615         }
6616       }
6617 
6618       setElement(*iter);  // needed to encode correct snapshot location.
6619 
6620 #ifdef DEBUG
6621       emitDebugForceBailing(*iter);
6622 #endif
6623 
6624       switch (iter->op()) {
6625 #ifndef JS_CODEGEN_NONE
6626 #  define LIROP(op)              \
6627     case LNode::Opcode::op:      \
6628       visit##op(iter->to##op()); \
6629       break;
6630         LIR_OPCODE_LIST(LIROP)
6631 #  undef LIROP
6632 #endif
6633         case LNode::Opcode::Invalid:
6634         default:
6635           MOZ_CRASH("Invalid LIR op");
6636       }
6637 
6638 #ifdef DEBUG
6639       if (!counts) {
6640         emitDebugResultChecks(*iter);
6641       }
6642 #endif
6643     }
6644     if (masm.oom()) {
6645       return false;
6646     }
6647 
6648 #if defined(JS_ION_PERF)
6649     perfSpewer->endBasicBlock(masm);
6650 #endif
6651   }
6652 
6653   JitSpew(JitSpew_Codegen, "==== END CodeGenerator::generateBody ====\n");
6654   return true;
6655 }
6656 
6657 // Out-of-line object allocation for LNewArray.
6658 class OutOfLineNewArray : public OutOfLineCodeBase<CodeGenerator> {
6659   LNewArray* lir_;
6660 
6661  public:
OutOfLineNewArray(LNewArray * lir)6662   explicit OutOfLineNewArray(LNewArray* lir) : lir_(lir) {}
6663 
accept(CodeGenerator * codegen)6664   void accept(CodeGenerator* codegen) override {
6665     codegen->visitOutOfLineNewArray(this);
6666   }
6667 
lir() const6668   LNewArray* lir() const { return lir_; }
6669 };
6670 
visitNewArrayCallVM(LNewArray * lir)6671 void CodeGenerator::visitNewArrayCallVM(LNewArray* lir) {
6672   Register objReg = ToRegister(lir->output());
6673 
6674   MOZ_ASSERT(!lir->isCall());
6675   saveLive(lir);
6676 
6677   JSObject* templateObject = lir->mir()->templateObject();
6678 
6679   if (templateObject) {
6680     pushArg(ImmGCPtr(templateObject->shape()));
6681     pushArg(Imm32(lir->mir()->length()));
6682 
6683     using Fn = ArrayObject* (*)(JSContext*, uint32_t, HandleShape);
6684     callVM<Fn, NewArrayWithShape>(lir);
6685   } else {
6686     pushArg(Imm32(GenericObject));
6687     pushArg(Imm32(lir->mir()->length()));
6688 
6689     using Fn = ArrayObject* (*)(JSContext*, uint32_t, NewObjectKind);
6690     callVM<Fn, NewArrayOperation>(lir);
6691   }
6692 
6693   if (ReturnReg != objReg) {
6694     masm.movePtr(ReturnReg, objReg);
6695   }
6696 
6697   restoreLive(lir);
6698 }
6699 
visitAtan2D(LAtan2D * lir)6700 void CodeGenerator::visitAtan2D(LAtan2D* lir) {
6701   Register temp = ToRegister(lir->temp());
6702   FloatRegister y = ToFloatRegister(lir->y());
6703   FloatRegister x = ToFloatRegister(lir->x());
6704 
6705   using Fn = double (*)(double x, double y);
6706   masm.setupUnalignedABICall(temp);
6707   masm.passABIArg(y, MoveOp::DOUBLE);
6708   masm.passABIArg(x, MoveOp::DOUBLE);
6709   masm.callWithABI<Fn, ecmaAtan2>(MoveOp::DOUBLE);
6710 
6711   MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
6712 }
6713 
visitHypot(LHypot * lir)6714 void CodeGenerator::visitHypot(LHypot* lir) {
6715   Register temp = ToRegister(lir->temp());
6716   uint32_t numArgs = lir->numArgs();
6717   masm.setupUnalignedABICall(temp);
6718 
6719   for (uint32_t i = 0; i < numArgs; ++i) {
6720     masm.passABIArg(ToFloatRegister(lir->getOperand(i)), MoveOp::DOUBLE);
6721   }
6722 
6723   switch (numArgs) {
6724     case 2: {
6725       using Fn = double (*)(double x, double y);
6726       masm.callWithABI<Fn, ecmaHypot>(MoveOp::DOUBLE);
6727       break;
6728     }
6729     case 3: {
6730       using Fn = double (*)(double x, double y, double z);
6731       masm.callWithABI<Fn, hypot3>(MoveOp::DOUBLE);
6732       break;
6733     }
6734     case 4: {
6735       using Fn = double (*)(double x, double y, double z, double w);
6736       masm.callWithABI<Fn, hypot4>(MoveOp::DOUBLE);
6737       break;
6738     }
6739     default:
6740       MOZ_CRASH("Unexpected number of arguments to hypot function.");
6741   }
6742   MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
6743 }
6744 
visitNewArray(LNewArray * lir)6745 void CodeGenerator::visitNewArray(LNewArray* lir) {
6746   Register objReg = ToRegister(lir->output());
6747   Register tempReg = ToRegister(lir->temp());
6748   DebugOnly<uint32_t> length = lir->mir()->length();
6749 
6750   MOZ_ASSERT(length <= NativeObject::MAX_DENSE_ELEMENTS_COUNT);
6751 
6752   if (lir->mir()->isVMCall()) {
6753     visitNewArrayCallVM(lir);
6754     return;
6755   }
6756 
6757   OutOfLineNewArray* ool = new (alloc()) OutOfLineNewArray(lir);
6758   addOutOfLineCode(ool, lir->mir());
6759 
6760   TemplateObject templateObject(lir->mir()->templateObject());
6761 #ifdef DEBUG
6762   size_t numInlineElements = gc::GetGCKindSlots(templateObject.getAllocKind()) -
6763                              ObjectElements::VALUES_PER_HEADER;
6764   MOZ_ASSERT(length <= numInlineElements,
6765              "Inline allocation only supports inline elements");
6766 #endif
6767   masm.createGCObject(objReg, tempReg, templateObject,
6768                       lir->mir()->initialHeap(), ool->entry());
6769 
6770   masm.bind(ool->rejoin());
6771 }
6772 
visitOutOfLineNewArray(OutOfLineNewArray * ool)6773 void CodeGenerator::visitOutOfLineNewArray(OutOfLineNewArray* ool) {
6774   visitNewArrayCallVM(ool->lir());
6775   masm.jump(ool->rejoin());
6776 }
6777 
visitNewArrayDynamicLength(LNewArrayDynamicLength * lir)6778 void CodeGenerator::visitNewArrayDynamicLength(LNewArrayDynamicLength* lir) {
6779   Register lengthReg = ToRegister(lir->length());
6780   Register objReg = ToRegister(lir->output());
6781   Register tempReg = ToRegister(lir->temp());
6782 
6783   JSObject* templateObject = lir->mir()->templateObject();
6784   gc::InitialHeap initialHeap = lir->mir()->initialHeap();
6785 
6786   using Fn = ArrayObject* (*)(JSContext*, HandleArrayObject, int32_t length);
6787   OutOfLineCode* ool = oolCallVM<Fn, ArrayConstructorOneArg>(
6788       lir, ArgList(ImmGCPtr(templateObject), lengthReg),
6789       StoreRegisterTo(objReg));
6790 
6791   bool canInline = true;
6792   size_t inlineLength = 0;
6793   if (templateObject->as<ArrayObject>().hasFixedElements()) {
6794     size_t numSlots =
6795         gc::GetGCKindSlots(templateObject->asTenured().getAllocKind());
6796     inlineLength = numSlots - ObjectElements::VALUES_PER_HEADER;
6797   } else {
6798     canInline = false;
6799   }
6800 
6801   if (canInline) {
6802     // Try to do the allocation inline if the template object is big enough
6803     // for the length in lengthReg. If the length is bigger we could still
6804     // use the template object and not allocate the elements, but it's more
6805     // efficient to do a single big allocation than (repeatedly) reallocating
6806     // the array later on when filling it.
6807     masm.branch32(Assembler::Above, lengthReg, Imm32(inlineLength),
6808                   ool->entry());
6809 
6810     TemplateObject templateObj(templateObject);
6811     masm.createGCObject(objReg, tempReg, templateObj, initialHeap,
6812                         ool->entry());
6813 
6814     size_t lengthOffset = NativeObject::offsetOfFixedElements() +
6815                           ObjectElements::offsetOfLength();
6816     masm.store32(lengthReg, Address(objReg, lengthOffset));
6817   } else {
6818     masm.jump(ool->entry());
6819   }
6820 
6821   masm.bind(ool->rejoin());
6822 }
6823 
visitNewIterator(LNewIterator * lir)6824 void CodeGenerator::visitNewIterator(LNewIterator* lir) {
6825   Register objReg = ToRegister(lir->output());
6826   Register tempReg = ToRegister(lir->temp());
6827 
6828   OutOfLineCode* ool;
6829   switch (lir->mir()->type()) {
6830     case MNewIterator::ArrayIterator: {
6831       using Fn = ArrayIteratorObject* (*)(JSContext*);
6832       ool = oolCallVM<Fn, NewArrayIterator>(lir, ArgList(),
6833                                             StoreRegisterTo(objReg));
6834       break;
6835     }
6836     case MNewIterator::StringIterator: {
6837       using Fn = StringIteratorObject* (*)(JSContext*);
6838       ool = oolCallVM<Fn, NewStringIterator>(lir, ArgList(),
6839                                              StoreRegisterTo(objReg));
6840       break;
6841     }
6842     case MNewIterator::RegExpStringIterator: {
6843       using Fn = RegExpStringIteratorObject* (*)(JSContext*);
6844       ool = oolCallVM<Fn, NewRegExpStringIterator>(lir, ArgList(),
6845                                                    StoreRegisterTo(objReg));
6846       break;
6847     }
6848     default:
6849       MOZ_CRASH("unexpected iterator type");
6850   }
6851 
6852   TemplateObject templateObject(lir->mir()->templateObject());
6853   masm.createGCObject(objReg, tempReg, templateObject, gc::DefaultHeap,
6854                       ool->entry());
6855 
6856   masm.bind(ool->rejoin());
6857 }
6858 
visitNewTypedArray(LNewTypedArray * lir)6859 void CodeGenerator::visitNewTypedArray(LNewTypedArray* lir) {
6860   Register objReg = ToRegister(lir->output());
6861   Register tempReg = ToRegister(lir->temp1());
6862   Register lengthReg = ToRegister(lir->temp2());
6863   LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
6864 
6865   JSObject* templateObject = lir->mir()->templateObject();
6866   gc::InitialHeap initialHeap = lir->mir()->initialHeap();
6867 
6868   TypedArrayObject* ttemplate = &templateObject->as<TypedArrayObject>();
6869 
6870   size_t n = ttemplate->length();
6871   MOZ_ASSERT(n <= INT32_MAX,
6872              "Template objects are only created for int32 lengths");
6873 
6874   using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
6875   OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
6876       lir, ArgList(ImmGCPtr(templateObject), Imm32(n)),
6877       StoreRegisterTo(objReg));
6878 
6879   TemplateObject templateObj(templateObject);
6880   masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
6881 
6882   masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
6883                            ttemplate, MacroAssembler::TypedArrayLength::Fixed);
6884 
6885   masm.bind(ool->rejoin());
6886 }
6887 
visitNewTypedArrayDynamicLength(LNewTypedArrayDynamicLength * lir)6888 void CodeGenerator::visitNewTypedArrayDynamicLength(
6889     LNewTypedArrayDynamicLength* lir) {
6890   Register lengthReg = ToRegister(lir->length());
6891   Register objReg = ToRegister(lir->output());
6892   Register tempReg = ToRegister(lir->temp());
6893   LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
6894 
6895   JSObject* templateObject = lir->mir()->templateObject();
6896   gc::InitialHeap initialHeap = lir->mir()->initialHeap();
6897 
6898   TypedArrayObject* ttemplate = &templateObject->as<TypedArrayObject>();
6899 
6900   using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
6901   OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
6902       lir, ArgList(ImmGCPtr(templateObject), lengthReg),
6903       StoreRegisterTo(objReg));
6904 
6905   TemplateObject templateObj(templateObject);
6906   masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
6907 
6908   masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(),
6909                            ttemplate,
6910                            MacroAssembler::TypedArrayLength::Dynamic);
6911 
6912   masm.bind(ool->rejoin());
6913 }
6914 
visitNewTypedArrayFromArray(LNewTypedArrayFromArray * lir)6915 void CodeGenerator::visitNewTypedArrayFromArray(LNewTypedArrayFromArray* lir) {
6916   pushArg(ToRegister(lir->array()));
6917   pushArg(ImmGCPtr(lir->mir()->templateObject()));
6918 
6919   using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject);
6920   callVM<Fn, js::NewTypedArrayWithTemplateAndArray>(lir);
6921 }
6922 
visitNewTypedArrayFromArrayBuffer(LNewTypedArrayFromArrayBuffer * lir)6923 void CodeGenerator::visitNewTypedArrayFromArrayBuffer(
6924     LNewTypedArrayFromArrayBuffer* lir) {
6925   pushArg(ToValue(lir, LNewTypedArrayFromArrayBuffer::LengthIndex));
6926   pushArg(ToValue(lir, LNewTypedArrayFromArrayBuffer::ByteOffsetIndex));
6927   pushArg(ToRegister(lir->arrayBuffer()));
6928   pushArg(ImmGCPtr(lir->mir()->templateObject()));
6929 
6930   using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject,
6931                                    HandleValue, HandleValue);
6932   callVM<Fn, js::NewTypedArrayWithTemplateAndBuffer>(lir);
6933 }
6934 
6935 // Out-of-line object allocation for JSOp::NewObject.
6936 class OutOfLineNewObject : public OutOfLineCodeBase<CodeGenerator> {
6937   LNewObject* lir_;
6938 
6939  public:
OutOfLineNewObject(LNewObject * lir)6940   explicit OutOfLineNewObject(LNewObject* lir) : lir_(lir) {}
6941 
accept(CodeGenerator * codegen)6942   void accept(CodeGenerator* codegen) override {
6943     codegen->visitOutOfLineNewObject(this);
6944   }
6945 
lir() const6946   LNewObject* lir() const { return lir_; }
6947 };
6948 
visitNewObjectVMCall(LNewObject * lir)6949 void CodeGenerator::visitNewObjectVMCall(LNewObject* lir) {
6950   Register objReg = ToRegister(lir->output());
6951 
6952   MOZ_ASSERT(!lir->isCall());
6953   saveLive(lir);
6954 
6955   JSObject* templateObject = lir->mir()->templateObject();
6956 
6957   // If we're making a new object with a class prototype (that is, an object
6958   // that derives its class from its prototype instead of being
6959   // PlainObject::class_'d) from self-hosted code, we need a different init
6960   // function.
6961   switch (lir->mir()->mode()) {
6962     case MNewObject::ObjectLiteral:
6963       if (templateObject) {
6964         pushArg(ImmGCPtr(templateObject));
6965 
6966         using Fn = JSObject* (*)(JSContext*, HandleObject);
6967         callVM<Fn, NewObjectOperationWithTemplate>(lir);
6968       } else {
6969         pushArg(Imm32(GenericObject));
6970         pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
6971         pushArg(ImmGCPtr(lir->mir()->block()->info().script()));
6972 
6973         using Fn = JSObject* (*)(JSContext*, HandleScript, jsbytecode * pc,
6974                                  NewObjectKind);
6975         callVM<Fn, NewObjectOperation>(lir);
6976       }
6977       break;
6978     case MNewObject::ObjectCreate:
6979       pushArg(ImmGCPtr(templateObject));
6980 
6981       using Fn = PlainObject* (*)(JSContext*, HandlePlainObject);
6982       callVM<Fn, ObjectCreateWithTemplate>(lir);
6983       break;
6984   }
6985 
6986   if (ReturnReg != objReg) {
6987     masm.movePtr(ReturnReg, objReg);
6988   }
6989 
6990   restoreLive(lir);
6991 }
6992 
ShouldInitFixedSlots(LInstruction * lir,const TemplateObject & obj)6993 static bool ShouldInitFixedSlots(LInstruction* lir, const TemplateObject& obj) {
6994   if (!obj.isNativeObject()) {
6995     return true;
6996   }
6997   const TemplateNativeObject& templateObj = obj.asTemplateNativeObject();
6998 
6999   // Look for StoreFixedSlot instructions following an object allocation
7000   // that write to this object before a GC is triggered or this object is
7001   // passed to a VM call. If all fixed slots will be initialized, the
7002   // allocation code doesn't need to set the slots to |undefined|.
7003 
7004   uint32_t nfixed = templateObj.numUsedFixedSlots();
7005   if (nfixed == 0) {
7006     return false;
7007   }
7008 
7009   // Only optimize if all fixed slots are initially |undefined|, so that we
7010   // can assume incremental pre-barriers are not necessary. See also the
7011   // comment below.
7012   for (uint32_t slot = 0; slot < nfixed; slot++) {
7013     if (!templateObj.getSlot(slot).isUndefined()) {
7014       return true;
7015     }
7016   }
7017 
7018   // Keep track of the fixed slots that are initialized. initializedSlots is
7019   // a bit mask with a bit for each slot.
7020   MOZ_ASSERT(nfixed <= NativeObject::MAX_FIXED_SLOTS);
7021   static_assert(NativeObject::MAX_FIXED_SLOTS <= 32,
7022                 "Slot bits must fit in 32 bits");
7023   uint32_t initializedSlots = 0;
7024   uint32_t numInitialized = 0;
7025 
7026   MInstruction* allocMir = lir->mirRaw()->toInstruction();
7027   MBasicBlock* block = allocMir->block();
7028 
7029   // Skip the allocation instruction.
7030   MInstructionIterator iter = block->begin(allocMir);
7031   MOZ_ASSERT(*iter == allocMir);
7032   iter++;
7033 
7034   while (true) {
7035     for (; iter != block->end(); iter++) {
7036       if (iter->isNop() || iter->isConstant() || iter->isPostWriteBarrier()) {
7037         // These instructions won't trigger a GC or read object slots.
7038         continue;
7039       }
7040 
7041       if (iter->isStoreFixedSlot()) {
7042         MStoreFixedSlot* store = iter->toStoreFixedSlot();
7043         if (store->object() != allocMir) {
7044           return true;
7045         }
7046 
7047         // We may not initialize this object slot on allocation, so the
7048         // pre-barrier could read uninitialized memory. Simply disable
7049         // the barrier for this store: the object was just initialized
7050         // so the barrier is not necessary.
7051         store->setNeedsBarrier(false);
7052 
7053         uint32_t slot = store->slot();
7054         MOZ_ASSERT(slot < nfixed);
7055         if ((initializedSlots & (1 << slot)) == 0) {
7056           numInitialized++;
7057           initializedSlots |= (1 << slot);
7058 
7059           if (numInitialized == nfixed) {
7060             // All fixed slots will be initialized.
7061             MOZ_ASSERT(mozilla::CountPopulation32(initializedSlots) == nfixed);
7062             return false;
7063           }
7064         }
7065         continue;
7066       }
7067 
7068       if (iter->isGoto()) {
7069         block = iter->toGoto()->target();
7070         if (block->numPredecessors() != 1) {
7071           return true;
7072         }
7073         break;
7074       }
7075 
7076       // Unhandled instruction, assume it bails or reads object slots.
7077       return true;
7078     }
7079     iter = block->begin();
7080   }
7081 
7082   MOZ_CRASH("Shouldn't get here");
7083 }
7084 
visitNewObject(LNewObject * lir)7085 void CodeGenerator::visitNewObject(LNewObject* lir) {
7086   Register objReg = ToRegister(lir->output());
7087   Register tempReg = ToRegister(lir->temp());
7088 
7089   if (lir->mir()->isVMCall()) {
7090     visitNewObjectVMCall(lir);
7091     return;
7092   }
7093 
7094   OutOfLineNewObject* ool = new (alloc()) OutOfLineNewObject(lir);
7095   addOutOfLineCode(ool, lir->mir());
7096 
7097   TemplateObject templateObject(lir->mir()->templateObject());
7098 
7099   bool initContents = ShouldInitFixedSlots(lir, templateObject);
7100   masm.createGCObject(objReg, tempReg, templateObject,
7101                       lir->mir()->initialHeap(), ool->entry(), initContents);
7102 
7103   masm.bind(ool->rejoin());
7104 }
7105 
visitOutOfLineNewObject(OutOfLineNewObject * ool)7106 void CodeGenerator::visitOutOfLineNewObject(OutOfLineNewObject* ool) {
7107   visitNewObjectVMCall(ool->lir());
7108   masm.jump(ool->rejoin());
7109 }
7110 
visitNewPlainObject(LNewPlainObject * lir)7111 void CodeGenerator::visitNewPlainObject(LNewPlainObject* lir) {
7112   Register objReg = ToRegister(lir->output());
7113   Register temp0Reg = ToRegister(lir->temp0());
7114   Register temp1Reg = ToRegister(lir->temp1());
7115   Register shapeReg = ToRegister(lir->temp2());
7116 
7117   auto* mir = lir->mir();
7118   const Shape* shape = mir->shape();
7119   gc::InitialHeap initialHeap = mir->initialHeap();
7120   gc::AllocKind allocKind = mir->allocKind();
7121 
7122   using Fn =
7123       JSObject* (*)(JSContext*, HandleShape, gc::AllocKind, gc::InitialHeap);
7124   OutOfLineCode* ool = oolCallVM<Fn, NewPlainObjectOptimizedFallback>(
7125       lir,
7126       ArgList(ImmGCPtr(shape), Imm32(int32_t(allocKind)), Imm32(initialHeap)),
7127       StoreRegisterTo(objReg));
7128 
7129   masm.movePtr(ImmGCPtr(shape), shapeReg);
7130   masm.createPlainGCObject(objReg, shapeReg, temp0Reg, temp1Reg,
7131                            mir->numFixedSlots(), mir->numDynamicSlots(),
7132                            allocKind, initialHeap, ool->entry(),
7133                            AllocSiteInput(gc::CatchAllAllocSite::Optimized));
7134 
7135   masm.bind(ool->rejoin());
7136 }
7137 
visitNewArrayObject(LNewArrayObject * lir)7138 void CodeGenerator::visitNewArrayObject(LNewArrayObject* lir) {
7139   Register objReg = ToRegister(lir->output());
7140   Register temp0Reg = ToRegister(lir->temp0());
7141   Register shapeReg = ToRegister(lir->temp1());
7142 
7143   auto* mir = lir->mir();
7144   uint32_t arrayLength = mir->length();
7145 
7146   gc::AllocKind allocKind = GuessArrayGCKind(arrayLength);
7147   MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
7148   allocKind = ForegroundToBackgroundAllocKind(allocKind);
7149 
7150   uint32_t slotCount = GetGCKindSlots(allocKind);
7151   MOZ_ASSERT(slotCount >= ObjectElements::VALUES_PER_HEADER);
7152   uint32_t arrayCapacity = slotCount - ObjectElements::VALUES_PER_HEADER;
7153 
7154   const Shape* shape = mir->shape();
7155 
7156   NewObjectKind objectKind =
7157       mir->initialHeap() == gc::TenuredHeap ? TenuredObject : GenericObject;
7158 
7159   using Fn =
7160       ArrayObject* (*)(JSContext*, uint32_t, gc::AllocKind, NewObjectKind);
7161   OutOfLineCode* ool = oolCallVM<Fn, NewArrayObjectOptimizedFallback>(
7162       lir,
7163       ArgList(Imm32(arrayLength), Imm32(int32_t(allocKind)), Imm32(objectKind)),
7164       StoreRegisterTo(objReg));
7165 
7166   masm.movePtr(ImmPtr(shape), shapeReg);
7167   masm.createArrayWithFixedElements(
7168       objReg, shapeReg, temp0Reg, arrayLength, arrayCapacity, allocKind,
7169       mir->initialHeap(), ool->entry(),
7170       AllocSiteInput(gc::CatchAllAllocSite::Optimized));
7171   masm.bind(ool->rejoin());
7172 }
7173 
visitNewNamedLambdaObject(LNewNamedLambdaObject * lir)7174 void CodeGenerator::visitNewNamedLambdaObject(LNewNamedLambdaObject* lir) {
7175   Register objReg = ToRegister(lir->output());
7176   Register tempReg = ToRegister(lir->temp());
7177   const CompileInfo& info = lir->mir()->block()->info();
7178 
7179   // If we have a template object, we can inline call object creation.
7180   using Fn =
7181       js::NamedLambdaObject* (*)(JSContext*, HandleFunction, gc::InitialHeap);
7182   OutOfLineCode* ool = oolCallVM<Fn, NamedLambdaObject::createTemplateObject>(
7183       lir, ArgList(ImmGCPtr(info.funMaybeLazy()), Imm32(gc::DefaultHeap)),
7184       StoreRegisterTo(objReg));
7185 
7186   TemplateObject templateObject(lir->mir()->templateObj());
7187 
7188   bool initContents = ShouldInitFixedSlots(lir, templateObject);
7189   masm.createGCObject(objReg, tempReg, templateObject, gc::DefaultHeap,
7190                       ool->entry(), initContents);
7191 
7192   masm.bind(ool->rejoin());
7193 }
7194 
visitNewCallObject(LNewCallObject * lir)7195 void CodeGenerator::visitNewCallObject(LNewCallObject* lir) {
7196   Register objReg = ToRegister(lir->output());
7197   Register tempReg = ToRegister(lir->temp());
7198 
7199   CallObject* templateObj = lir->mir()->templateObject();
7200 
7201   using Fn = JSObject* (*)(JSContext*, HandleShape);
7202   OutOfLineCode* ool = oolCallVM<Fn, NewCallObject>(
7203       lir, ArgList(ImmGCPtr(templateObj->shape())), StoreRegisterTo(objReg));
7204 
7205   // Inline call object creation, using the OOL path only for tricky cases.
7206   TemplateObject templateObject(templateObj);
7207   bool initContents = ShouldInitFixedSlots(lir, templateObject);
7208   masm.createGCObject(objReg, tempReg, templateObject, gc::DefaultHeap,
7209                       ool->entry(), initContents);
7210 
7211   masm.bind(ool->rejoin());
7212 }
7213 
visitNewStringObject(LNewStringObject * lir)7214 void CodeGenerator::visitNewStringObject(LNewStringObject* lir) {
7215   Register input = ToRegister(lir->input());
7216   Register output = ToRegister(lir->output());
7217   Register temp = ToRegister(lir->temp());
7218 
7219   StringObject* templateObj = lir->mir()->templateObj();
7220 
7221   using Fn = JSObject* (*)(JSContext*, HandleString);
7222   OutOfLineCode* ool = oolCallVM<Fn, NewStringObject>(lir, ArgList(input),
7223                                                       StoreRegisterTo(output));
7224 
7225   TemplateObject templateObject(templateObj);
7226   masm.createGCObject(output, temp, templateObject, gc::DefaultHeap,
7227                       ool->entry());
7228 
7229   masm.loadStringLength(input, temp);
7230 
7231   masm.storeValue(JSVAL_TYPE_STRING, input,
7232                   Address(output, StringObject::offsetOfPrimitiveValue()));
7233   masm.storeValue(JSVAL_TYPE_INT32, temp,
7234                   Address(output, StringObject::offsetOfLength()));
7235 
7236   masm.bind(ool->rejoin());
7237 }
7238 
visitInitElemGetterSetter(LInitElemGetterSetter * lir)7239 void CodeGenerator::visitInitElemGetterSetter(LInitElemGetterSetter* lir) {
7240   Register obj = ToRegister(lir->object());
7241   Register value = ToRegister(lir->value());
7242 
7243   pushArg(value);
7244   pushArg(ToValue(lir, LInitElemGetterSetter::IdIndex));
7245   pushArg(obj);
7246   pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
7247 
7248   using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject, HandleValue,
7249                       HandleObject);
7250   callVM<Fn, InitElemGetterSetterOperation>(lir);
7251 }
7252 
visitMutateProto(LMutateProto * lir)7253 void CodeGenerator::visitMutateProto(LMutateProto* lir) {
7254   Register objReg = ToRegister(lir->getObject());
7255 
7256   pushArg(ToValue(lir, LMutateProto::ValueIndex));
7257   pushArg(objReg);
7258 
7259   using Fn = bool (*)(JSContext * cx, HandlePlainObject obj, HandleValue value);
7260   callVM<Fn, MutatePrototype>(lir);
7261 }
7262 
visitInitPropGetterSetter(LInitPropGetterSetter * lir)7263 void CodeGenerator::visitInitPropGetterSetter(LInitPropGetterSetter* lir) {
7264   Register obj = ToRegister(lir->object());
7265   Register value = ToRegister(lir->value());
7266 
7267   pushArg(value);
7268   pushArg(ImmGCPtr(lir->mir()->name()));
7269   pushArg(obj);
7270   pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
7271 
7272   using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject, HandlePropertyName,
7273                       HandleObject);
7274   callVM<Fn, InitPropGetterSetterOperation>(lir);
7275 }
7276 
visitCreateThis(LCreateThis * lir)7277 void CodeGenerator::visitCreateThis(LCreateThis* lir) {
7278   const LAllocation* callee = lir->getCallee();
7279   const LAllocation* newTarget = lir->getNewTarget();
7280 
7281   if (newTarget->isConstant()) {
7282     pushArg(ImmGCPtr(&newTarget->toConstant()->toObject()));
7283   } else {
7284     pushArg(ToRegister(newTarget));
7285   }
7286 
7287   if (callee->isConstant()) {
7288     pushArg(ImmGCPtr(&callee->toConstant()->toObject()));
7289   } else {
7290     pushArg(ToRegister(callee));
7291   }
7292 
7293   using Fn = bool (*)(JSContext * cx, HandleObject callee,
7294                       HandleObject newTarget, MutableHandleValue rval);
7295   callVM<Fn, jit::CreateThisFromIon>(lir);
7296 }
7297 
visitCreateThisWithTemplate(LCreateThisWithTemplate * lir)7298 void CodeGenerator::visitCreateThisWithTemplate(LCreateThisWithTemplate* lir) {
7299   JSObject* templateObject = lir->mir()->templateObject();
7300   Register objReg = ToRegister(lir->output());
7301   Register tempReg = ToRegister(lir->temp());
7302 
7303   using Fn = JSObject* (*)(JSContext*, HandleObject);
7304   OutOfLineCode* ool = oolCallVM<Fn, CreateThisWithTemplate>(
7305       lir, ArgList(ImmGCPtr(templateObject)), StoreRegisterTo(objReg));
7306 
7307   // Allocate. If the FreeList is empty, call to VM, which may GC.
7308   TemplateObject templateObj(templateObject);
7309   bool initContents =
7310       !templateObj.isPlainObject() || ShouldInitFixedSlots(lir, templateObj);
7311   masm.createGCObject(objReg, tempReg, templateObj, lir->mir()->initialHeap(),
7312                       ool->entry(), initContents);
7313 
7314   masm.bind(ool->rejoin());
7315 }
7316 
visitCreateArgumentsObject(LCreateArgumentsObject * lir)7317 void CodeGenerator::visitCreateArgumentsObject(LCreateArgumentsObject* lir) {
7318   // This should be getting constructed in the first block only, and not any OSR
7319   // entry blocks.
7320   MOZ_ASSERT(lir->mir()->block()->id() == 0);
7321 
7322   Register callObj = ToRegister(lir->getCallObject());
7323   Register temp = ToRegister(lir->temp0());
7324   Label done;
7325 
7326   if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
7327     Register objTemp = ToRegister(lir->temp1());
7328     Register cxTemp = ToRegister(lir->temp2());
7329 
7330     masm.Push(callObj);
7331 
7332     // Try to allocate an arguments object. This will leave the reserved
7333     // slots uninitialized, so it's important we don't GC until we
7334     // initialize these slots in ArgumentsObject::finishForIonPure.
7335     Label failure;
7336     TemplateObject templateObject(templateObj);
7337     masm.createGCObject(objTemp, temp, templateObject, gc::DefaultHeap,
7338                         &failure,
7339                         /* initContents = */ false);
7340 
7341     masm.moveStackPtrTo(temp);
7342     masm.addPtr(Imm32(masm.framePushed()), temp);
7343 
7344     using Fn =
7345         ArgumentsObject* (*)(JSContext * cx, jit::JitFrameLayout * frame,
7346                              JSObject * scopeChain, ArgumentsObject * obj);
7347     masm.setupUnalignedABICall(cxTemp);
7348     masm.loadJSContext(cxTemp);
7349     masm.passABIArg(cxTemp);
7350     masm.passABIArg(temp);
7351     masm.passABIArg(callObj);
7352     masm.passABIArg(objTemp);
7353 
7354     masm.callWithABI<Fn, ArgumentsObject::finishForIonPure>();
7355     masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
7356 
7357     // Discard saved callObj on the stack.
7358     masm.addToStackPtr(Imm32(sizeof(uintptr_t)));
7359     masm.jump(&done);
7360 
7361     masm.bind(&failure);
7362     masm.Pop(callObj);
7363   }
7364 
7365   masm.moveStackPtrTo(temp);
7366   masm.addPtr(Imm32(frameSize()), temp);
7367 
7368   pushArg(callObj);
7369   pushArg(temp);
7370 
7371   using Fn = ArgumentsObject* (*)(JSContext*, JitFrameLayout*, HandleObject);
7372   callVM<Fn, ArgumentsObject::createForIon>(lir);
7373 
7374   masm.bind(&done);
7375 }
7376 
visitCreateInlinedArgumentsObject(LCreateInlinedArgumentsObject * lir)7377 void CodeGenerator::visitCreateInlinedArgumentsObject(
7378     LCreateInlinedArgumentsObject* lir) {
7379   Register callObj = ToRegister(lir->getCallObject());
7380   Register callee = ToRegister(lir->getCallee());
7381   Register argsAddress = ToRegister(lir->temp());
7382 
7383   // TODO: Do we have to worry about alignment here?
7384 
7385   // Create a contiguous array of values for ArgumentsObject::create
7386   // by pushing the arguments onto the stack in reverse order.
7387   uint32_t argc = lir->mir()->numActuals();
7388   for (uint32_t i = 0; i < argc; i++) {
7389     uint32_t argNum = argc - i - 1;
7390     uint32_t index = LCreateInlinedArgumentsObject::ArgIndex(argNum);
7391     ConstantOrRegister arg =
7392         toConstantOrRegister(lir, index, lir->mir()->getArg(argNum)->type());
7393     masm.Push(arg);
7394   }
7395   masm.moveStackPtrTo(argsAddress);
7396 
7397   pushArg(Imm32(argc));
7398   pushArg(callObj);
7399   pushArg(callee);
7400   pushArg(argsAddress);
7401 
7402   using Fn = ArgumentsObject* (*)(JSContext*, Value*, HandleFunction,
7403                                   HandleObject, uint32_t);
7404   callVM<Fn, ArgumentsObject::createForInlinedIon>(lir);
7405 
7406   // Discard the array of values.
7407   masm.freeStack(argc * sizeof(Value));
7408 }
7409 
visitGetInlinedArgument(LGetInlinedArgument * lir)7410 void CodeGenerator::visitGetInlinedArgument(LGetInlinedArgument* lir) {
7411   Register index = ToRegister(lir->getIndex());
7412   ValueOperand output = ToOutValue(lir);
7413 
7414   uint32_t numActuals = lir->mir()->numActuals();
7415   MOZ_ASSERT(numActuals > 0 && numActuals <= ArgumentsObject::MaxInlinedArgs);
7416 
7417   // Check the first n-1 possible indices.
7418   Label done;
7419   for (uint32_t i = 0; i < numActuals - 1; i++) {
7420     Label skip;
7421     ConstantOrRegister arg = toConstantOrRegister(
7422         lir, LGetInlinedArgument::ArgIndex(i), lir->mir()->getArg(i)->type());
7423     masm.branch32(Assembler::NotEqual, index, Imm32(i), &skip);
7424     masm.moveValue(arg, output);
7425 
7426     masm.jump(&done);
7427     masm.bind(&skip);
7428   }
7429 
7430 #ifdef DEBUG
7431   Label skip;
7432   masm.branch32(Assembler::Equal, index, Imm32(numActuals - 1), &skip);
7433   masm.assumeUnreachable("LGetInlinedArgument: invalid index");
7434   masm.bind(&skip);
7435 #endif
7436 
7437   // The index has already been bounds-checked, so load the last argument.
7438   uint32_t lastIdx = numActuals - 1;
7439   ConstantOrRegister arg =
7440       toConstantOrRegister(lir, LGetInlinedArgument::ArgIndex(lastIdx),
7441                            lir->mir()->getArg(lastIdx)->type());
7442   masm.moveValue(arg, output);
7443   masm.bind(&done);
7444 }
7445 
visitGetArgumentsObjectArg(LGetArgumentsObjectArg * lir)7446 void CodeGenerator::visitGetArgumentsObjectArg(LGetArgumentsObjectArg* lir) {
7447   Register temp = ToRegister(lir->getTemp(0));
7448   Register argsObj = ToRegister(lir->getArgsObject());
7449   ValueOperand out = ToOutValue(lir);
7450 
7451   masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
7452                    temp);
7453   Address argAddr(temp, ArgumentsData::offsetOfArgs() +
7454                             lir->mir()->argno() * sizeof(Value));
7455   masm.loadValue(argAddr, out);
7456 #ifdef DEBUG
7457   Label success;
7458   masm.branchTestMagic(Assembler::NotEqual, out, &success);
7459   masm.assumeUnreachable(
7460       "Result from ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
7461   masm.bind(&success);
7462 #endif
7463 }
7464 
visitSetArgumentsObjectArg(LSetArgumentsObjectArg * lir)7465 void CodeGenerator::visitSetArgumentsObjectArg(LSetArgumentsObjectArg* lir) {
7466   Register temp = ToRegister(lir->getTemp(0));
7467   Register argsObj = ToRegister(lir->getArgsObject());
7468   ValueOperand value = ToValue(lir, LSetArgumentsObjectArg::ValueIndex);
7469 
7470   masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
7471                    temp);
7472   Address argAddr(temp, ArgumentsData::offsetOfArgs() +
7473                             lir->mir()->argno() * sizeof(Value));
7474   emitPreBarrier(argAddr);
7475 #ifdef DEBUG
7476   Label success;
7477   masm.branchTestMagic(Assembler::NotEqual, argAddr, &success);
7478   masm.assumeUnreachable(
7479       "Result in ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
7480   masm.bind(&success);
7481 #endif
7482   masm.storeValue(value, argAddr);
7483 }
7484 
visitLoadArgumentsObjectArg(LLoadArgumentsObjectArg * lir)7485 void CodeGenerator::visitLoadArgumentsObjectArg(LLoadArgumentsObjectArg* lir) {
7486   Register temp = ToRegister(lir->temp());
7487   Register argsObj = ToRegister(lir->getArgsObject());
7488   Register index = ToRegister(lir->index());
7489   ValueOperand out = ToOutValue(lir);
7490 
7491   Label bail;
7492   masm.loadArgumentsObjectElement(argsObj, index, out, temp, &bail);
7493   bailoutFrom(&bail, lir->snapshot());
7494 }
7495 
visitArgumentsObjectLength(LArgumentsObjectLength * lir)7496 void CodeGenerator::visitArgumentsObjectLength(LArgumentsObjectLength* lir) {
7497   Register argsObj = ToRegister(lir->getArgsObject());
7498   Register out = ToRegister(lir->output());
7499 
7500   Label bail;
7501   masm.loadArgumentsObjectLength(argsObj, out, &bail);
7502   bailoutFrom(&bail, lir->snapshot());
7503 }
7504 
visitGuardArgumentsObjectFlags(LGuardArgumentsObjectFlags * lir)7505 void CodeGenerator::visitGuardArgumentsObjectFlags(
7506     LGuardArgumentsObjectFlags* lir) {
7507   Register argsObj = ToRegister(lir->getArgsObject());
7508   Register temp = ToRegister(lir->temp());
7509 
7510   Label bail;
7511   masm.branchTestArgumentsObjectFlags(argsObj, temp, lir->mir()->flags(),
7512                                       Assembler::NonZero, &bail);
7513   bailoutFrom(&bail, lir->snapshot());
7514 }
7515 
visitReturnFromCtor(LReturnFromCtor * lir)7516 void CodeGenerator::visitReturnFromCtor(LReturnFromCtor* lir) {
7517   ValueOperand value = ToValue(lir, LReturnFromCtor::ValueIndex);
7518   Register obj = ToRegister(lir->getObject());
7519   Register output = ToRegister(lir->output());
7520 
7521   Label valueIsObject, end;
7522 
7523   masm.branchTestObject(Assembler::Equal, value, &valueIsObject);
7524 
7525   // Value is not an object. Return that other object.
7526   masm.movePtr(obj, output);
7527   masm.jump(&end);
7528 
7529   // Value is an object. Return unbox(Value).
7530   masm.bind(&valueIsObject);
7531   Register payload = masm.extractObject(value, output);
7532   if (payload != output) {
7533     masm.movePtr(payload, output);
7534   }
7535 
7536   masm.bind(&end);
7537 }
7538 
7539 class OutOfLineBoxNonStrictThis : public OutOfLineCodeBase<CodeGenerator> {
7540   LBoxNonStrictThis* ins_;
7541 
7542  public:
OutOfLineBoxNonStrictThis(LBoxNonStrictThis * ins)7543   explicit OutOfLineBoxNonStrictThis(LBoxNonStrictThis* ins) : ins_(ins) {}
accept(CodeGenerator * codegen)7544   void accept(CodeGenerator* codegen) override {
7545     codegen->visitOutOfLineBoxNonStrictThis(this);
7546   }
ins() const7547   LBoxNonStrictThis* ins() const { return ins_; }
7548 };
7549 
visitBoxNonStrictThis(LBoxNonStrictThis * lir)7550 void CodeGenerator::visitBoxNonStrictThis(LBoxNonStrictThis* lir) {
7551   ValueOperand value = ToValue(lir, LBoxNonStrictThis::ValueIndex);
7552   Register output = ToRegister(lir->output());
7553 
7554   auto* ool = new (alloc()) OutOfLineBoxNonStrictThis(lir);
7555   addOutOfLineCode(ool, lir->mir());
7556 
7557   masm.fallibleUnboxObject(value, output, ool->entry());
7558   masm.bind(ool->rejoin());
7559 }
7560 
visitOutOfLineBoxNonStrictThis(OutOfLineBoxNonStrictThis * ool)7561 void CodeGenerator::visitOutOfLineBoxNonStrictThis(
7562     OutOfLineBoxNonStrictThis* ool) {
7563   LBoxNonStrictThis* lir = ool->ins();
7564 
7565   ValueOperand value = ToValue(lir, LBoxNonStrictThis::ValueIndex);
7566   Register output = ToRegister(lir->output());
7567 
7568   Label notNullOrUndefined;
7569   {
7570     Label isNullOrUndefined;
7571     ScratchTagScope tag(masm, value);
7572     masm.splitTagForTest(value, tag);
7573     masm.branchTestUndefined(Assembler::Equal, tag, &isNullOrUndefined);
7574     masm.branchTestNull(Assembler::NotEqual, tag, &notNullOrUndefined);
7575     masm.bind(&isNullOrUndefined);
7576     masm.movePtr(ImmGCPtr(lir->mir()->globalThis()), output);
7577     masm.jump(ool->rejoin());
7578   }
7579 
7580   masm.bind(&notNullOrUndefined);
7581 
7582   saveLive(lir);
7583 
7584   pushArg(value);
7585   using Fn = JSObject* (*)(JSContext*, HandleValue);
7586   callVM<Fn, BoxNonStrictThis>(lir);
7587 
7588   StoreRegisterTo(output).generate(this);
7589   restoreLiveIgnore(lir, StoreRegisterTo(output).clobbered());
7590 
7591   masm.jump(ool->rejoin());
7592 }
7593 
visitImplicitThis(LImplicitThis * lir)7594 void CodeGenerator::visitImplicitThis(LImplicitThis* lir) {
7595   pushArg(ImmGCPtr(lir->mir()->name()));
7596   pushArg(ToRegister(lir->env()));
7597 
7598   using Fn = bool (*)(JSContext*, HandleObject, HandlePropertyName,
7599                       MutableHandleValue);
7600   callVM<Fn, ImplicitThisOperation>(lir);
7601 }
7602 
visitArrowNewTarget(LArrowNewTarget * lir)7603 void CodeGenerator::visitArrowNewTarget(LArrowNewTarget* lir) {
7604   Register callee = ToRegister(lir->callee());
7605   ValueOperand output = ToOutValue(lir);
7606   masm.loadValue(
7607       Address(callee, FunctionExtended::offsetOfArrowNewTargetSlot()), output);
7608 }
7609 
visitArrayLength(LArrayLength * lir)7610 void CodeGenerator::visitArrayLength(LArrayLength* lir) {
7611   Register elements = ToRegister(lir->elements());
7612   Register output = ToRegister(lir->output());
7613 
7614   Address length(elements, ObjectElements::offsetOfLength());
7615   masm.load32(length, output);
7616 
7617   // Bail out if the length doesn't fit in int32.
7618   Label bail;
7619   masm.branchTest32(Assembler::Signed, output, output, &bail);
7620   bailoutFrom(&bail, lir->snapshot());
7621 }
7622 
SetLengthFromIndex(MacroAssembler & masm,const LAllocation * index,const Address & length)7623 static void SetLengthFromIndex(MacroAssembler& masm, const LAllocation* index,
7624                                const Address& length) {
7625   if (index->isConstant()) {
7626     masm.store32(Imm32(ToInt32(index) + 1), length);
7627   } else {
7628     Register newLength = ToRegister(index);
7629     masm.add32(Imm32(1), newLength);
7630     masm.store32(newLength, length);
7631     masm.sub32(Imm32(1), newLength);
7632   }
7633 }
7634 
visitSetArrayLength(LSetArrayLength * lir)7635 void CodeGenerator::visitSetArrayLength(LSetArrayLength* lir) {
7636   Address length(ToRegister(lir->elements()), ObjectElements::offsetOfLength());
7637   SetLengthFromIndex(masm, lir->index(), length);
7638 }
7639 
visitFunctionLength(LFunctionLength * lir)7640 void CodeGenerator::visitFunctionLength(LFunctionLength* lir) {
7641   Register function = ToRegister(lir->function());
7642   Register output = ToRegister(lir->output());
7643 
7644   Label bail;
7645 
7646   // Get the JSFunction flags.
7647   masm.load16ZeroExtend(Address(function, JSFunction::offsetOfFlags()), output);
7648 
7649   // Functions with a SelfHostedLazyScript must be compiled with the slow-path
7650   // before the function length is known. If the length was previously resolved,
7651   // the length property may be shadowed.
7652   masm.branchTest32(
7653       Assembler::NonZero, output,
7654       Imm32(FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH),
7655       &bail);
7656 
7657   masm.loadFunctionLength(function, output, output, &bail);
7658 
7659   bailoutFrom(&bail, lir->snapshot());
7660 }
7661 
visitFunctionName(LFunctionName * lir)7662 void CodeGenerator::visitFunctionName(LFunctionName* lir) {
7663   Register function = ToRegister(lir->function());
7664   Register output = ToRegister(lir->output());
7665 
7666   Label bail;
7667 
7668   const JSAtomState& names = gen->runtime->names();
7669   masm.loadFunctionName(function, output, ImmGCPtr(names.empty), &bail);
7670 
7671   bailoutFrom(&bail, lir->snapshot());
7672 }
7673 
7674 template <class OrderedHashTable>
7675 static void RangeFront(MacroAssembler&, Register, Register, Register);
7676 
7677 template <>
RangeFront(MacroAssembler & masm,Register range,Register i,Register front)7678 void RangeFront<ValueMap>(MacroAssembler& masm, Register range, Register i,
7679                           Register front) {
7680   masm.loadPtr(Address(range, ValueMap::Range::offsetOfHashTable()), front);
7681   masm.loadPtr(Address(front, ValueMap::offsetOfImplData()), front);
7682 
7683   MOZ_ASSERT(ValueMap::offsetOfImplDataElement() == 0,
7684              "offsetof(Data, element) is 0");
7685   static_assert(ValueMap::sizeofImplData() == 24, "sizeof(Data) is 24");
7686   masm.mulBy3(i, i);
7687   masm.lshiftPtr(Imm32(3), i);
7688   masm.addPtr(i, front);
7689 }
7690 
7691 template <>
RangeFront(MacroAssembler & masm,Register range,Register i,Register front)7692 void RangeFront<ValueSet>(MacroAssembler& masm, Register range, Register i,
7693                           Register front) {
7694   masm.loadPtr(Address(range, ValueSet::Range::offsetOfHashTable()), front);
7695   masm.loadPtr(Address(front, ValueSet::offsetOfImplData()), front);
7696 
7697   MOZ_ASSERT(ValueSet::offsetOfImplDataElement() == 0,
7698              "offsetof(Data, element) is 0");
7699   static_assert(ValueSet::sizeofImplData() == 16, "sizeof(Data) is 16");
7700   masm.lshiftPtr(Imm32(4), i);
7701   masm.addPtr(i, front);
7702 }
7703 
7704 template <class OrderedHashTable>
RangePopFront(MacroAssembler & masm,Register range,Register front,Register dataLength,Register temp)7705 static void RangePopFront(MacroAssembler& masm, Register range, Register front,
7706                           Register dataLength, Register temp) {
7707   Register i = temp;
7708 
7709   masm.add32(Imm32(1),
7710              Address(range, OrderedHashTable::Range::offsetOfCount()));
7711 
7712   masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), i);
7713 
7714   Label done, seek;
7715   masm.bind(&seek);
7716   masm.add32(Imm32(1), i);
7717   masm.branch32(Assembler::AboveOrEqual, i, dataLength, &done);
7718 
7719   // We can add sizeof(Data) to |front| to select the next element, because
7720   // |front| and |range.ht.data[i]| point to the same location.
7721   MOZ_ASSERT(OrderedHashTable::offsetOfImplDataElement() == 0,
7722              "offsetof(Data, element) is 0");
7723   masm.addPtr(Imm32(OrderedHashTable::sizeofImplData()), front);
7724 
7725   masm.branchTestMagic(Assembler::Equal,
7726                        Address(front, OrderedHashTable::offsetOfEntryKey()),
7727                        JS_HASH_KEY_EMPTY, &seek);
7728 
7729   masm.bind(&done);
7730   masm.store32(i, Address(range, OrderedHashTable::Range::offsetOfI()));
7731 }
7732 
7733 template <class OrderedHashTable>
RangeDestruct(MacroAssembler & masm,Register iter,Register range,Register temp0,Register temp1)7734 static inline void RangeDestruct(MacroAssembler& masm, Register iter,
7735                                  Register range, Register temp0,
7736                                  Register temp1) {
7737   Register next = temp0;
7738   Register prevp = temp1;
7739 
7740   masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfNext()), next);
7741   masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfPrevP()), prevp);
7742   masm.storePtr(next, Address(prevp, 0));
7743 
7744   Label hasNoNext;
7745   masm.branchTestPtr(Assembler::Zero, next, next, &hasNoNext);
7746 
7747   masm.storePtr(prevp, Address(next, OrderedHashTable::Range::offsetOfPrevP()));
7748 
7749   masm.bind(&hasNoNext);
7750 
7751   Label nurseryAllocated;
7752   masm.branchPtrInNurseryChunk(Assembler::Equal, iter, temp0,
7753                                &nurseryAllocated);
7754 
7755   masm.callFreeStub(range);
7756 
7757   masm.bind(&nurseryAllocated);
7758 }
7759 
7760 template <>
emitLoadIteratorValues(Register result,Register temp,Register front)7761 void CodeGenerator::emitLoadIteratorValues<ValueMap>(Register result,
7762                                                      Register temp,
7763                                                      Register front) {
7764   size_t elementsOffset = NativeObject::offsetOfFixedElements();
7765 
7766   Address keyAddress(front, ValueMap::Entry::offsetOfKey());
7767   Address valueAddress(front, ValueMap::Entry::offsetOfValue());
7768   Address keyElemAddress(result, elementsOffset);
7769   Address valueElemAddress(result, elementsOffset + sizeof(Value));
7770   masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
7771   masm.guardedCallPreBarrier(valueElemAddress, MIRType::Value);
7772   masm.storeValue(keyAddress, keyElemAddress, temp);
7773   masm.storeValue(valueAddress, valueElemAddress, temp);
7774 
7775   Label emitBarrier, skipBarrier;
7776   masm.branchValueIsNurseryCell(Assembler::Equal, keyAddress, temp,
7777                                 &emitBarrier);
7778   masm.branchValueIsNurseryCell(Assembler::NotEqual, valueAddress, temp,
7779                                 &skipBarrier);
7780   {
7781     masm.bind(&emitBarrier);
7782     saveVolatile(temp);
7783     emitPostWriteBarrier(result);
7784     restoreVolatile(temp);
7785   }
7786   masm.bind(&skipBarrier);
7787 }
7788 
7789 template <>
emitLoadIteratorValues(Register result,Register temp,Register front)7790 void CodeGenerator::emitLoadIteratorValues<ValueSet>(Register result,
7791                                                      Register temp,
7792                                                      Register front) {
7793   size_t elementsOffset = NativeObject::offsetOfFixedElements();
7794 
7795   Address keyAddress(front, ValueSet::offsetOfEntryKey());
7796   Address keyElemAddress(result, elementsOffset);
7797   masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
7798   masm.storeValue(keyAddress, keyElemAddress, temp);
7799 
7800   Label skipBarrier;
7801   masm.branchValueIsNurseryCell(Assembler::NotEqual, keyAddress, temp,
7802                                 &skipBarrier);
7803   {
7804     saveVolatile(temp);
7805     emitPostWriteBarrier(result);
7806     restoreVolatile(temp);
7807   }
7808   masm.bind(&skipBarrier);
7809 }
7810 
7811 template <class IteratorObject, class OrderedHashTable>
emitGetNextEntryForIterator(LGetNextEntryForIterator * lir)7812 void CodeGenerator::emitGetNextEntryForIterator(LGetNextEntryForIterator* lir) {
7813   Register iter = ToRegister(lir->iter());
7814   Register result = ToRegister(lir->result());
7815   Register temp = ToRegister(lir->temp0());
7816   Register dataLength = ToRegister(lir->temp1());
7817   Register range = ToRegister(lir->temp2());
7818   Register output = ToRegister(lir->output());
7819 
7820 #ifdef DEBUG
7821   // Self-hosted code is responsible for ensuring GetNextEntryForIterator is
7822   // only called with the correct iterator class. Assert here all self-
7823   // hosted callers of GetNextEntryForIterator perform this class check.
7824   // No Spectre mitigations are needed because this is DEBUG-only code.
7825   Label success;
7826   masm.branchTestObjClassNoSpectreMitigations(
7827       Assembler::Equal, iter, &IteratorObject::class_, temp, &success);
7828   masm.assumeUnreachable("Iterator object should have the correct class.");
7829   masm.bind(&success);
7830 #endif
7831 
7832   masm.loadPrivate(Address(iter, NativeObject::getFixedSlotOffset(
7833                                      IteratorObject::RangeSlot)),
7834                    range);
7835 
7836   Label iterAlreadyDone, iterDone, done;
7837   masm.branchTestPtr(Assembler::Zero, range, range, &iterAlreadyDone);
7838 
7839   masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), temp);
7840   masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfHashTable()),
7841                dataLength);
7842   masm.load32(Address(dataLength, OrderedHashTable::offsetOfImplDataLength()),
7843               dataLength);
7844   masm.branch32(Assembler::AboveOrEqual, temp, dataLength, &iterDone);
7845   {
7846     masm.push(iter);
7847 
7848     Register front = iter;
7849     RangeFront<OrderedHashTable>(masm, range, temp, front);
7850 
7851     emitLoadIteratorValues<OrderedHashTable>(result, temp, front);
7852 
7853     RangePopFront<OrderedHashTable>(masm, range, front, dataLength, temp);
7854 
7855     masm.pop(iter);
7856     masm.move32(Imm32(0), output);
7857   }
7858   masm.jump(&done);
7859   {
7860     masm.bind(&iterDone);
7861 
7862     RangeDestruct<OrderedHashTable>(masm, iter, range, temp, dataLength);
7863 
7864     masm.storeValue(PrivateValue(nullptr),
7865                     Address(iter, NativeObject::getFixedSlotOffset(
7866                                       IteratorObject::RangeSlot)));
7867 
7868     masm.bind(&iterAlreadyDone);
7869 
7870     masm.move32(Imm32(1), output);
7871   }
7872   masm.bind(&done);
7873 }
7874 
visitGetNextEntryForIterator(LGetNextEntryForIterator * lir)7875 void CodeGenerator::visitGetNextEntryForIterator(
7876     LGetNextEntryForIterator* lir) {
7877   if (lir->mir()->mode() == MGetNextEntryForIterator::Map) {
7878     emitGetNextEntryForIterator<MapIteratorObject, ValueMap>(lir);
7879   } else {
7880     MOZ_ASSERT(lir->mir()->mode() == MGetNextEntryForIterator::Set);
7881     emitGetNextEntryForIterator<SetIteratorObject, ValueSet>(lir);
7882   }
7883 }
7884 
7885 // The point of these is to inform Ion of where these values already are; they
7886 // don't normally generate code.  Still, visitWasmRegisterResult is
7887 // per-platform.
visitWasmRegisterPairResult(LWasmRegisterPairResult * lir)7888 void CodeGenerator::visitWasmRegisterPairResult(LWasmRegisterPairResult* lir) {}
visitWasmStackResult(LWasmStackResult * lir)7889 void CodeGenerator::visitWasmStackResult(LWasmStackResult* lir) {}
visitWasmStackResult64(LWasmStackResult64 * lir)7890 void CodeGenerator::visitWasmStackResult64(LWasmStackResult64* lir) {}
7891 
visitWasmStackResultArea(LWasmStackResultArea * lir)7892 void CodeGenerator::visitWasmStackResultArea(LWasmStackResultArea* lir) {
7893   LAllocation* output = lir->getDef(0)->output();
7894   MOZ_ASSERT(output->isStackArea());
7895   bool tempInit = false;
7896   for (auto iter = output->toStackArea()->results(); iter; iter.next()) {
7897     // Zero out ref stack results.
7898     if (iter.isGcPointer()) {
7899       Register temp = ToRegister(lir->temp());
7900       if (!tempInit) {
7901         masm.xorPtr(temp, temp);
7902         tempInit = true;
7903       }
7904       masm.storePtr(temp, ToAddress(iter.alloc()));
7905     }
7906   }
7907 }
7908 
visitWasmCall(LWasmCall * lir)7909 void CodeGenerator::visitWasmCall(LWasmCall* lir) {
7910   MWasmCall* mir = lir->mir();
7911   bool needsBoundsCheck = lir->needsBoundsCheck();
7912 
7913   MOZ_ASSERT((sizeof(wasm::Frame) + masm.framePushed()) % WasmStackAlignment ==
7914              0);
7915   static_assert(
7916       WasmStackAlignment >= ABIStackAlignment &&
7917           WasmStackAlignment % ABIStackAlignment == 0,
7918       "The wasm stack alignment should subsume the ABI-required alignment");
7919 
7920 #ifdef DEBUG
7921   Label ok;
7922   masm.branchTestStackPtr(Assembler::Zero, Imm32(WasmStackAlignment - 1), &ok);
7923   masm.breakpoint();
7924   masm.bind(&ok);
7925 #endif
7926 
7927   // LWasmCallBase::isCallPreserved() assumes that all MWasmCalls preserve the
7928   // TLS and pinned regs. The only case where where we don't have to reload
7929   // the TLS and pinned regs is when the callee preserves them.
7930   bool reloadRegs = true;
7931   bool switchRealm = true;
7932 
7933   const wasm::CallSiteDesc& desc = mir->desc();
7934   const wasm::CalleeDesc& callee = mir->callee();
7935   CodeOffset retOffset;
7936   switch (callee.which()) {
7937     case wasm::CalleeDesc::Func:
7938       retOffset = masm.call(desc, callee.funcIndex());
7939       reloadRegs = false;
7940       switchRealm = false;
7941       break;
7942     case wasm::CalleeDesc::Import:
7943       retOffset = masm.wasmCallImport(desc, callee);
7944       break;
7945     case wasm::CalleeDesc::AsmJSTable:
7946     case wasm::CalleeDesc::WasmTable:
7947       retOffset = masm.wasmCallIndirect(desc, callee, needsBoundsCheck);
7948       reloadRegs = switchRealm = callee.which() == wasm::CalleeDesc::WasmTable;
7949       break;
7950     case wasm::CalleeDesc::Builtin:
7951       retOffset = masm.call(desc, callee.builtin());
7952       reloadRegs = false;
7953       switchRealm = false;
7954       break;
7955     case wasm::CalleeDesc::BuiltinInstanceMethod:
7956       retOffset = masm.wasmCallBuiltinInstanceMethod(
7957           desc, mir->instanceArg(), callee.builtin(),
7958           mir->builtinMethodFailureMode());
7959       switchRealm = false;
7960       break;
7961   }
7962 
7963   // Note the assembler offset for the associated LSafePoint.
7964   markSafepointAt(retOffset.offset(), lir);
7965 
7966   // Now that all the outbound in-memory args are on the stack, note the
7967   // required lower boundary point of the associated StackMap.
7968   lir->safepoint()->setFramePushedAtStackMapBase(
7969       masm.framePushed() - mir->stackArgAreaSizeUnaligned());
7970   MOZ_ASSERT(!lir->safepoint()->isWasmTrap());
7971 
7972   if (reloadRegs) {
7973     masm.loadPtr(Address(masm.getStackPointer(), WasmCallerTLSOffsetBeforeCall),
7974                  WasmTlsReg);
7975     masm.loadWasmPinnedRegsFromTls();
7976     if (switchRealm) {
7977       masm.switchToWasmTlsRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
7978     }
7979   } else {
7980     MOZ_ASSERT(!switchRealm);
7981   }
7982 }
7983 
visitWasmLoadSlot(LWasmLoadSlot * ins)7984 void CodeGenerator::visitWasmLoadSlot(LWasmLoadSlot* ins) {
7985   MIRType type = ins->type();
7986   Register container = ToRegister(ins->containerRef());
7987   Address addr(container, ins->offset());
7988   AnyRegister dst = ToAnyRegister(ins->output());
7989 
7990   switch (type) {
7991     case MIRType::Int32:
7992       masm.load32(addr, dst.gpr());
7993       break;
7994     case MIRType::Float32:
7995       masm.loadFloat32(addr, dst.fpu());
7996       break;
7997     case MIRType::Double:
7998       masm.loadDouble(addr, dst.fpu());
7999       break;
8000     case MIRType::Pointer:
8001     case MIRType::RefOrNull:
8002       masm.loadPtr(addr, dst.gpr());
8003       break;
8004 #ifdef ENABLE_WASM_SIMD
8005     case MIRType::Simd128:
8006       masm.loadUnalignedSimd128(addr, dst.fpu());
8007       break;
8008 #endif
8009     default:
8010       MOZ_CRASH("unexpected type in LoadPrimitiveValue");
8011   }
8012 }
8013 
visitWasmStoreSlot(LWasmStoreSlot * ins)8014 void CodeGenerator::visitWasmStoreSlot(LWasmStoreSlot* ins) {
8015   MIRType type = ins->type();
8016   Register container = ToRegister(ins->containerRef());
8017   Address addr(container, ins->offset());
8018   AnyRegister src = ToAnyRegister(ins->value());
8019 
8020   switch (type) {
8021     case MIRType::Int32:
8022       masm.store32(src.gpr(), addr);
8023       break;
8024     case MIRType::Float32:
8025       masm.storeFloat32(src.fpu(), addr);
8026       break;
8027     case MIRType::Double:
8028       masm.storeDouble(src.fpu(), addr);
8029       break;
8030     case MIRType::Pointer:
8031       // This could be correct, but it would be a new usage, so check carefully.
8032       MOZ_CRASH("Unexpected type in visitWasmStoreSlot.");
8033     case MIRType::RefOrNull:
8034       MOZ_CRASH("Bad type in visitWasmStoreSlot. Use LWasmStoreRef.");
8035 #ifdef ENABLE_WASM_SIMD
8036     case MIRType::Simd128:
8037       masm.storeUnalignedSimd128(src.fpu(), addr);
8038       break;
8039 #endif
8040     default:
8041       MOZ_CRASH("unexpected type in StorePrimitiveValue");
8042   }
8043 }
8044 
visitWasmDerivedPointer(LWasmDerivedPointer * ins)8045 void CodeGenerator::visitWasmDerivedPointer(LWasmDerivedPointer* ins) {
8046   masm.movePtr(ToRegister(ins->base()), ToRegister(ins->output()));
8047   masm.addPtr(Imm32(int32_t(ins->offset())), ToRegister(ins->output()));
8048 }
8049 
visitWasmStoreRef(LWasmStoreRef * ins)8050 void CodeGenerator::visitWasmStoreRef(LWasmStoreRef* ins) {
8051   Register tls = ToRegister(ins->tls());
8052   Register valueAddr = ToRegister(ins->valueAddr());
8053   Register value = ToRegister(ins->value());
8054   Register temp = ToRegister(ins->temp());
8055 
8056   Label skipPreBarrier;
8057   wasm::EmitWasmPreBarrierGuard(masm, tls, temp, valueAddr, &skipPreBarrier);
8058   wasm::EmitWasmPreBarrierCall(masm, tls, temp, valueAddr);
8059   masm.bind(&skipPreBarrier);
8060 
8061   masm.storePtr(value, Address(valueAddr, 0));
8062   // The postbarrier is handled separately.
8063 }
8064 
visitWasmLoadSlotI64(LWasmLoadSlotI64 * ins)8065 void CodeGenerator::visitWasmLoadSlotI64(LWasmLoadSlotI64* ins) {
8066   Register container = ToRegister(ins->containerRef());
8067   Address addr(container, ins->offset());
8068   Register64 output = ToOutRegister64(ins);
8069   masm.load64(addr, output);
8070 }
8071 
visitWasmStoreSlotI64(LWasmStoreSlotI64 * ins)8072 void CodeGenerator::visitWasmStoreSlotI64(LWasmStoreSlotI64* ins) {
8073   Register container = ToRegister(ins->containerRef());
8074   Address addr(container, ins->offset());
8075   Register64 value = ToRegister64(ins->value());
8076   masm.store64(value, addr);
8077 }
8078 
visitArrayBufferByteLength(LArrayBufferByteLength * lir)8079 void CodeGenerator::visitArrayBufferByteLength(LArrayBufferByteLength* lir) {
8080   Register obj = ToRegister(lir->object());
8081   Register out = ToRegister(lir->output());
8082   masm.loadArrayBufferByteLengthIntPtr(obj, out);
8083 }
8084 
visitArrayBufferViewLength(LArrayBufferViewLength * lir)8085 void CodeGenerator::visitArrayBufferViewLength(LArrayBufferViewLength* lir) {
8086   Register obj = ToRegister(lir->object());
8087   Register out = ToRegister(lir->output());
8088   masm.loadArrayBufferViewLengthIntPtr(obj, out);
8089 }
8090 
visitArrayBufferViewByteOffset(LArrayBufferViewByteOffset * lir)8091 void CodeGenerator::visitArrayBufferViewByteOffset(
8092     LArrayBufferViewByteOffset* lir) {
8093   Register obj = ToRegister(lir->object());
8094   Register out = ToRegister(lir->output());
8095   masm.loadArrayBufferViewByteOffsetIntPtr(obj, out);
8096 }
8097 
visitArrayBufferViewElements(LArrayBufferViewElements * lir)8098 void CodeGenerator::visitArrayBufferViewElements(
8099     LArrayBufferViewElements* lir) {
8100   Register obj = ToRegister(lir->object());
8101   Register out = ToRegister(lir->output());
8102   masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), out);
8103 }
8104 
visitTypedArrayElementSize(LTypedArrayElementSize * lir)8105 void CodeGenerator::visitTypedArrayElementSize(LTypedArrayElementSize* lir) {
8106   Register obj = ToRegister(lir->object());
8107   Register out = ToRegister(lir->output());
8108 
8109   masm.typedArrayElementSize(obj, out);
8110 }
8111 
visitGuardHasAttachedArrayBuffer(LGuardHasAttachedArrayBuffer * lir)8112 void CodeGenerator::visitGuardHasAttachedArrayBuffer(
8113     LGuardHasAttachedArrayBuffer* lir) {
8114   Register obj = ToRegister(lir->object());
8115   Register temp = ToRegister(lir->temp());
8116 
8117   Label bail;
8118   masm.branchIfHasDetachedArrayBuffer(obj, temp, &bail);
8119   bailoutFrom(&bail, lir->snapshot());
8120 }
8121 
8122 class OutOfLineGuardNumberToIntPtrIndex
8123     : public OutOfLineCodeBase<CodeGenerator> {
8124   LGuardNumberToIntPtrIndex* lir_;
8125 
8126  public:
OutOfLineGuardNumberToIntPtrIndex(LGuardNumberToIntPtrIndex * lir)8127   explicit OutOfLineGuardNumberToIntPtrIndex(LGuardNumberToIntPtrIndex* lir)
8128       : lir_(lir) {}
8129 
accept(CodeGenerator * codegen)8130   void accept(CodeGenerator* codegen) override {
8131     codegen->visitOutOfLineGuardNumberToIntPtrIndex(this);
8132   }
lir() const8133   LGuardNumberToIntPtrIndex* lir() const { return lir_; }
8134 };
8135 
visitGuardNumberToIntPtrIndex(LGuardNumberToIntPtrIndex * lir)8136 void CodeGenerator::visitGuardNumberToIntPtrIndex(
8137     LGuardNumberToIntPtrIndex* lir) {
8138   FloatRegister input = ToFloatRegister(lir->input());
8139   Register output = ToRegister(lir->output());
8140 
8141   if (!lir->mir()->supportOOB()) {
8142     Label bail;
8143     masm.convertDoubleToPtr(input, output, &bail, false);
8144     bailoutFrom(&bail, lir->snapshot());
8145     return;
8146   }
8147 
8148   auto* ool = new (alloc()) OutOfLineGuardNumberToIntPtrIndex(lir);
8149   addOutOfLineCode(ool, lir->mir());
8150 
8151   masm.convertDoubleToPtr(input, output, ool->entry(), false);
8152   masm.bind(ool->rejoin());
8153 }
8154 
visitOutOfLineGuardNumberToIntPtrIndex(OutOfLineGuardNumberToIntPtrIndex * ool)8155 void CodeGenerator::visitOutOfLineGuardNumberToIntPtrIndex(
8156     OutOfLineGuardNumberToIntPtrIndex* ool) {
8157   // Substitute the invalid index with an arbitrary out-of-bounds index.
8158   masm.movePtr(ImmWord(-1), ToRegister(ool->lir()->output()));
8159   masm.jump(ool->rejoin());
8160 }
8161 
visitStringLength(LStringLength * lir)8162 void CodeGenerator::visitStringLength(LStringLength* lir) {
8163   Register input = ToRegister(lir->string());
8164   Register output = ToRegister(lir->output());
8165 
8166   masm.loadStringLength(input, output);
8167 }
8168 
visitMinMaxI(LMinMaxI * ins)8169 void CodeGenerator::visitMinMaxI(LMinMaxI* ins) {
8170   Register first = ToRegister(ins->first());
8171   Register output = ToRegister(ins->output());
8172 
8173   MOZ_ASSERT(first == output);
8174 
8175   Assembler::Condition cond =
8176       ins->mir()->isMax() ? Assembler::GreaterThan : Assembler::LessThan;
8177 
8178   if (ins->second()->isConstant()) {
8179     Label done;
8180     masm.branch32(cond, first, Imm32(ToInt32(ins->second())), &done);
8181     masm.move32(Imm32(ToInt32(ins->second())), output);
8182     masm.bind(&done);
8183   } else {
8184     Register second = ToRegister(ins->second());
8185     masm.cmp32Move32(cond, second, first, second, output);
8186   }
8187 }
8188 
visitMinMaxArrayI(LMinMaxArrayI * ins)8189 void CodeGenerator::visitMinMaxArrayI(LMinMaxArrayI* ins) {
8190   Register array = ToRegister(ins->array());
8191   Register output = ToRegister(ins->output());
8192   Register temp1 = ToRegister(ins->temp1());
8193   Register temp2 = ToRegister(ins->temp2());
8194   Register temp3 = ToRegister(ins->temp3());
8195   bool isMax = ins->isMax();
8196 
8197   Label bail;
8198   masm.minMaxArrayInt32(array, output, temp1, temp2, temp3, isMax, &bail);
8199   bailoutFrom(&bail, ins->snapshot());
8200 }
8201 
visitMinMaxArrayD(LMinMaxArrayD * ins)8202 void CodeGenerator::visitMinMaxArrayD(LMinMaxArrayD* ins) {
8203   Register array = ToRegister(ins->array());
8204   FloatRegister output = ToFloatRegister(ins->output());
8205   Register temp1 = ToRegister(ins->temp1());
8206   Register temp2 = ToRegister(ins->temp2());
8207   FloatRegister floatTemp = ToFloatRegister(ins->floatTemp());
8208   bool isMax = ins->isMax();
8209 
8210   Label bail;
8211   masm.minMaxArrayNumber(array, output, floatTemp, temp1, temp2, isMax, &bail);
8212   bailoutFrom(&bail, ins->snapshot());
8213 }
8214 
8215 // For Abs*, lowering will have tied input to output on platforms where that is
8216 // sensible, and otherwise left them untied.
8217 
visitAbsI(LAbsI * ins)8218 void CodeGenerator::visitAbsI(LAbsI* ins) {
8219   Register input = ToRegister(ins->input());
8220   Register output = ToRegister(ins->output());
8221 
8222   if (ins->mir()->fallible()) {
8223     Label positive;
8224     if (input != output) {
8225       masm.move32(input, output);
8226     }
8227     masm.branchTest32(Assembler::NotSigned, output, output, &positive);
8228     Label bail;
8229     masm.branchNeg32(Assembler::Overflow, output, &bail);
8230     bailoutFrom(&bail, ins->snapshot());
8231     masm.bind(&positive);
8232   } else {
8233     masm.abs32(input, output);
8234   }
8235 }
8236 
visitAbsD(LAbsD * ins)8237 void CodeGenerator::visitAbsD(LAbsD* ins) {
8238   masm.absDouble(ToFloatRegister(ins->input()), ToFloatRegister(ins->output()));
8239 }
8240 
visitAbsF(LAbsF * ins)8241 void CodeGenerator::visitAbsF(LAbsF* ins) {
8242   masm.absFloat32(ToFloatRegister(ins->input()),
8243                   ToFloatRegister(ins->output()));
8244 }
8245 
visitPowII(LPowII * ins)8246 void CodeGenerator::visitPowII(LPowII* ins) {
8247   Register value = ToRegister(ins->value());
8248   Register power = ToRegister(ins->power());
8249   Register output = ToRegister(ins->output());
8250   Register temp1 = ToRegister(ins->temp1());
8251   Register temp2 = ToRegister(ins->temp2());
8252 
8253   Label bailout;
8254   masm.pow32(value, power, output, temp1, temp2, &bailout);
8255   bailoutFrom(&bailout, ins->snapshot());
8256 }
8257 
visitPowI(LPowI * ins)8258 void CodeGenerator::visitPowI(LPowI* ins) {
8259   FloatRegister value = ToFloatRegister(ins->value());
8260   Register power = ToRegister(ins->power());
8261   Register temp = ToRegister(ins->temp());
8262 
8263   MOZ_ASSERT(power != temp);
8264 
8265   using Fn = double (*)(double x, int32_t y);
8266   masm.setupUnalignedABICall(temp);
8267   masm.passABIArg(value, MoveOp::DOUBLE);
8268   masm.passABIArg(power);
8269 
8270   masm.callWithABI<Fn, js::powi>(MoveOp::DOUBLE);
8271   MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
8272 }
8273 
visitPowD(LPowD * ins)8274 void CodeGenerator::visitPowD(LPowD* ins) {
8275   FloatRegister value = ToFloatRegister(ins->value());
8276   FloatRegister power = ToFloatRegister(ins->power());
8277   Register temp = ToRegister(ins->temp());
8278 
8279   using Fn = double (*)(double x, double y);
8280   masm.setupUnalignedABICall(temp);
8281   masm.passABIArg(value, MoveOp::DOUBLE);
8282   masm.passABIArg(power, MoveOp::DOUBLE);
8283   masm.callWithABI<Fn, ecmaPow>(MoveOp::DOUBLE);
8284 
8285   MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
8286 }
8287 
visitPowOfTwoI(LPowOfTwoI * ins)8288 void CodeGenerator::visitPowOfTwoI(LPowOfTwoI* ins) {
8289   Register power = ToRegister(ins->power());
8290   Register output = ToRegister(ins->output());
8291 
8292   uint32_t base = ins->base();
8293   MOZ_ASSERT(mozilla::IsPowerOfTwo(base));
8294 
8295   uint32_t n = mozilla::FloorLog2(base);
8296   MOZ_ASSERT(n != 0);
8297 
8298   // Hacker's Delight, 2nd edition, theorem D2.
8299   auto ceilingDiv = [](uint32_t x, uint32_t y) { return (x + y - 1) / y; };
8300 
8301   // Take bailout if |power| is greater-or-equals |log_y(2^31)| or is negative.
8302   // |2^(n*y) < 2^31| must hold, hence |n*y < 31| resp. |y < 31/n|.
8303   //
8304   // Note: it's important for this condition to match the code in CacheIR.cpp
8305   // (CanAttachInt32Pow) to prevent failure loops.
8306   bailoutCmp32(Assembler::AboveOrEqual, power, Imm32(ceilingDiv(31, n)),
8307                ins->snapshot());
8308 
8309   // Compute (2^n)^y as 2^(n*y) using repeated shifts. We could directly scale
8310   // |power| and perform a single shift, but due to the lack of necessary
8311   // MacroAssembler functionality, like multiplying a register with an
8312   // immediate, we restrict the number of generated shift instructions when
8313   // lowering this operation.
8314   masm.move32(Imm32(1), output);
8315   do {
8316     masm.lshift32(power, output);
8317     n--;
8318   } while (n > 0);
8319 }
8320 
visitSqrtD(LSqrtD * ins)8321 void CodeGenerator::visitSqrtD(LSqrtD* ins) {
8322   FloatRegister input = ToFloatRegister(ins->input());
8323   FloatRegister output = ToFloatRegister(ins->output());
8324   masm.sqrtDouble(input, output);
8325 }
8326 
visitSqrtF(LSqrtF * ins)8327 void CodeGenerator::visitSqrtF(LSqrtF* ins) {
8328   FloatRegister input = ToFloatRegister(ins->input());
8329   FloatRegister output = ToFloatRegister(ins->output());
8330   masm.sqrtFloat32(input, output);
8331 }
8332 
visitSignI(LSignI * ins)8333 void CodeGenerator::visitSignI(LSignI* ins) {
8334   Register input = ToRegister(ins->input());
8335   Register output = ToRegister(ins->output());
8336   masm.signInt32(input, output);
8337 }
8338 
visitSignD(LSignD * ins)8339 void CodeGenerator::visitSignD(LSignD* ins) {
8340   FloatRegister input = ToFloatRegister(ins->input());
8341   FloatRegister output = ToFloatRegister(ins->output());
8342   masm.signDouble(input, output);
8343 }
8344 
visitSignDI(LSignDI * ins)8345 void CodeGenerator::visitSignDI(LSignDI* ins) {
8346   FloatRegister input = ToFloatRegister(ins->input());
8347   FloatRegister temp = ToFloatRegister(ins->temp());
8348   Register output = ToRegister(ins->output());
8349 
8350   Label bail;
8351   masm.signDoubleToInt32(input, output, temp, &bail);
8352   bailoutFrom(&bail, ins->snapshot());
8353 }
8354 
visitMathFunctionD(LMathFunctionD * ins)8355 void CodeGenerator::visitMathFunctionD(LMathFunctionD* ins) {
8356   Register temp = ToRegister(ins->temp());
8357   FloatRegister input = ToFloatRegister(ins->input());
8358   MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
8359 
8360   UnaryMathFunction fun = ins->mir()->function();
8361   UnaryMathFunctionType funPtr = GetUnaryMathFunctionPtr(fun);
8362 
8363   masm.setupUnalignedABICall(temp);
8364 
8365   masm.passABIArg(input, MoveOp::DOUBLE);
8366   masm.callWithABI(DynamicFunction<UnaryMathFunctionType>(funPtr),
8367                    MoveOp::DOUBLE);
8368 }
8369 
visitMathFunctionF(LMathFunctionF * ins)8370 void CodeGenerator::visitMathFunctionF(LMathFunctionF* ins) {
8371   Register temp = ToRegister(ins->temp());
8372   FloatRegister input = ToFloatRegister(ins->input());
8373   MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnFloat32Reg);
8374 
8375   masm.setupUnalignedABICall(temp);
8376   masm.passABIArg(input, MoveOp::FLOAT32);
8377 
8378   using Fn = float (*)(float x);
8379   Fn funptr = nullptr;
8380   CheckUnsafeCallWithABI check = CheckUnsafeCallWithABI::Check;
8381   switch (ins->mir()->function()) {
8382     case UnaryMathFunction::Floor:
8383       funptr = floorf;
8384       check = CheckUnsafeCallWithABI::DontCheckOther;
8385       break;
8386     case UnaryMathFunction::Round:
8387       funptr = math_roundf_impl;
8388       break;
8389     case UnaryMathFunction::Trunc:
8390       funptr = math_truncf_impl;
8391       break;
8392     case UnaryMathFunction::Ceil:
8393       funptr = ceilf;
8394       check = CheckUnsafeCallWithABI::DontCheckOther;
8395       break;
8396     default:
8397       MOZ_CRASH("Unknown or unsupported float32 math function");
8398   }
8399 
8400   masm.callWithABI(DynamicFunction<Fn>(funptr), MoveOp::FLOAT32, check);
8401 }
8402 
visitModD(LModD * ins)8403 void CodeGenerator::visitModD(LModD* ins) {
8404   MOZ_ASSERT(!gen->compilingWasm());
8405 
8406   FloatRegister lhs = ToFloatRegister(ins->lhs());
8407   FloatRegister rhs = ToFloatRegister(ins->rhs());
8408 
8409   MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
8410   MOZ_ASSERT(!ins->temp()->isBogusTemp());
8411 
8412   using Fn = double (*)(double a, double b);
8413   masm.setupUnalignedABICall(ToRegister(ins->temp()));
8414   masm.passABIArg(lhs, MoveOp::DOUBLE);
8415   masm.passABIArg(rhs, MoveOp::DOUBLE);
8416   masm.callWithABI<Fn, NumberMod>(MoveOp::DOUBLE);
8417 }
8418 
visitModPowTwoD(LModPowTwoD * ins)8419 void CodeGenerator::visitModPowTwoD(LModPowTwoD* ins) {
8420   FloatRegister lhs = ToFloatRegister(ins->lhs());
8421   uint32_t divisor = ins->divisor();
8422   MOZ_ASSERT(mozilla::IsPowerOfTwo(divisor));
8423 
8424   FloatRegister output = ToFloatRegister(ins->output());
8425 
8426   // Compute |n % d| using |copysign(n - (d * trunc(n / d)), n)|.
8427   //
8428   // This doesn't work if |d| isn't a power of two, because we may lose too much
8429   // precision. For example |Number.MAX_VALUE % 3 == 2|, but
8430   // |3 * trunc(Number.MAX_VALUE / 3) == Infinity|.
8431 
8432   Label done;
8433   {
8434     ScratchDoubleScope scratch(masm);
8435 
8436     // Subnormals can lead to performance degradation, which can make calling
8437     // |fmod| faster than this inline implementation. Work around this issue by
8438     // directly returning the input for any value in the interval ]-1, +1[.
8439     Label notSubnormal;
8440     masm.loadConstantDouble(1.0, scratch);
8441     masm.loadConstantDouble(-1.0, output);
8442     masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, lhs, scratch,
8443                       &notSubnormal);
8444     masm.branchDouble(Assembler::DoubleLessThanOrEqual, lhs, output,
8445                       &notSubnormal);
8446 
8447     masm.moveDouble(lhs, output);
8448     masm.jump(&done);
8449 
8450     masm.bind(&notSubnormal);
8451 
8452     if (divisor == 1) {
8453       // The pattern |n % 1 == 0| is used to detect integer numbers. We can skip
8454       // the multiplication by one in this case.
8455       masm.moveDouble(lhs, output);
8456       masm.nearbyIntDouble(RoundingMode::TowardsZero, output, scratch);
8457       masm.subDouble(scratch, output);
8458     } else {
8459       masm.loadConstantDouble(1.0 / double(divisor), scratch);
8460       masm.loadConstantDouble(double(divisor), output);
8461 
8462       masm.mulDouble(lhs, scratch);
8463       masm.nearbyIntDouble(RoundingMode::TowardsZero, scratch, scratch);
8464       masm.mulDouble(output, scratch);
8465 
8466       masm.moveDouble(lhs, output);
8467       masm.subDouble(scratch, output);
8468     }
8469   }
8470 
8471   masm.copySignDouble(output, lhs, output);
8472   masm.bind(&done);
8473 }
8474 
visitWasmBuiltinModD(LWasmBuiltinModD * ins)8475 void CodeGenerator::visitWasmBuiltinModD(LWasmBuiltinModD* ins) {
8476   masm.Push(WasmTlsReg);
8477   int32_t framePushedAfterTls = masm.framePushed();
8478 
8479   FloatRegister lhs = ToFloatRegister(ins->lhs());
8480   FloatRegister rhs = ToFloatRegister(ins->rhs());
8481 
8482   MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
8483 
8484   masm.setupWasmABICall();
8485   masm.passABIArg(lhs, MoveOp::DOUBLE);
8486   masm.passABIArg(rhs, MoveOp::DOUBLE);
8487 
8488   int32_t tlsOffset = masm.framePushed() - framePushedAfterTls;
8489   masm.callWithABI(ins->mir()->bytecodeOffset(), wasm::SymbolicAddress::ModD,
8490                    mozilla::Some(tlsOffset), MoveOp::DOUBLE);
8491 
8492   masm.Pop(WasmTlsReg);
8493 }
8494 
visitBigIntAdd(LBigIntAdd * ins)8495 void CodeGenerator::visitBigIntAdd(LBigIntAdd* ins) {
8496   Register lhs = ToRegister(ins->lhs());
8497   Register rhs = ToRegister(ins->rhs());
8498   Register temp1 = ToRegister(ins->temp1());
8499   Register temp2 = ToRegister(ins->temp2());
8500   Register output = ToRegister(ins->output());
8501 
8502   using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
8503   auto* ool = oolCallVM<Fn, BigInt::add>(ins, ArgList(lhs, rhs),
8504                                          StoreRegisterTo(output));
8505 
8506   // 0n + x == x
8507   Label lhsNonZero;
8508   masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
8509   masm.movePtr(rhs, output);
8510   masm.jump(ool->rejoin());
8511   masm.bind(&lhsNonZero);
8512 
8513   // x + 0n == x
8514   Label rhsNonZero;
8515   masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
8516   masm.movePtr(lhs, output);
8517   masm.jump(ool->rejoin());
8518   masm.bind(&rhsNonZero);
8519 
8520   // Call into the VM when either operand can't be loaded into a pointer-sized
8521   // register.
8522   masm.loadBigIntNonZero(lhs, temp1, ool->entry());
8523   masm.loadBigIntNonZero(rhs, temp2, ool->entry());
8524 
8525   masm.branchAddPtr(Assembler::Overflow, temp2, temp1, ool->entry());
8526 
8527   // Create and return the result.
8528   masm.newGCBigInt(output, temp2, ool->entry(), bigIntsCanBeInNursery());
8529   masm.initializeBigInt(output, temp1);
8530 
8531   masm.bind(ool->rejoin());
8532 }
8533 
visitBigIntSub(LBigIntSub * ins)8534 void CodeGenerator::visitBigIntSub(LBigIntSub* ins) {
8535   Register lhs = ToRegister(ins->lhs());
8536   Register rhs = ToRegister(ins->rhs());
8537   Register temp1 = ToRegister(ins->temp1());
8538   Register temp2 = ToRegister(ins->temp2());
8539   Register output = ToRegister(ins->output());
8540 
8541   using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
8542   auto* ool = oolCallVM<Fn, BigInt::sub>(ins, ArgList(lhs, rhs),
8543                                          StoreRegisterTo(output));
8544 
8545   // x - 0n == x
8546   Label rhsNonZero;
8547   masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
8548   masm.movePtr(lhs, output);
8549   masm.jump(ool->rejoin());
8550   masm.bind(&rhsNonZero);
8551 
8552   // Call into the VM when either operand can't be loaded into a pointer-sized
8553   // register.
8554   masm.loadBigInt(lhs, temp1, ool->entry());
8555   masm.loadBigIntNonZero(rhs, temp2, ool->entry());
8556 
8557   masm.branchSubPtr(Assembler::Overflow, temp2, temp1, ool->entry());
8558 
8559   // Create and return the result.
8560   masm.newGCBigInt(output, temp2, ool->entry(), bigIntsCanBeInNursery());
8561   masm.initializeBigInt(output, temp1);
8562 
8563   masm.bind(ool->rejoin());
8564 }
8565 
visitBigIntMul(LBigIntMul * ins)8566 void CodeGenerator::visitBigIntMul(LBigIntMul* ins) {
8567   Register lhs = ToRegister(ins->lhs());
8568   Register rhs = ToRegister(ins->rhs());
8569   Register temp1 = ToRegister(ins->temp1());
8570   Register temp2 = ToRegister(ins->temp2());
8571   Register output = ToRegister(ins->output());
8572 
8573   using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
8574   auto* ool = oolCallVM<Fn, BigInt::mul>(ins, ArgList(lhs, rhs),
8575                                          StoreRegisterTo(output));
8576 
8577   // 0n * x == 0n
8578   Label lhsNonZero;
8579   masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
8580   masm.movePtr(lhs, output);
8581   masm.jump(ool->rejoin());
8582   masm.bind(&lhsNonZero);
8583 
8584   // x * 0n == 0n
8585   Label rhsNonZero;
8586   masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
8587   masm.movePtr(rhs, output);
8588   masm.jump(ool->rejoin());
8589   masm.bind(&rhsNonZero);
8590 
8591   // Call into the VM when either operand can't be loaded into a pointer-sized
8592   // register.
8593   masm.loadBigIntNonZero(lhs, temp1, ool->entry());
8594   masm.loadBigIntNonZero(rhs, temp2, ool->entry());
8595 
8596   masm.branchMulPtr(Assembler::Overflow, temp2, temp1, ool->entry());
8597 
8598   // Create and return the result.
8599   masm.newGCBigInt(output, temp2, ool->entry(), bigIntsCanBeInNursery());
8600   masm.initializeBigInt(output, temp1);
8601 
8602   masm.bind(ool->rejoin());
8603 }
8604 
visitBigIntDiv(LBigIntDiv * ins)8605 void CodeGenerator::visitBigIntDiv(LBigIntDiv* ins) {
8606   Register lhs = ToRegister(ins->lhs());
8607   Register rhs = ToRegister(ins->rhs());
8608   Register temp1 = ToRegister(ins->temp1());
8609   Register temp2 = ToRegister(ins->temp2());
8610   Register output = ToRegister(ins->output());
8611 
8612   using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
8613   auto* ool = oolCallVM<Fn, BigInt::div>(ins, ArgList(lhs, rhs),
8614                                          StoreRegisterTo(output));
8615 
8616   // x / 0 throws an error.
8617   if (ins->mir()->canBeDivideByZero()) {
8618     masm.branchIfBigIntIsZero(rhs, ool->entry());
8619   }
8620 
8621   // 0n / x == 0n
8622   Label lhsNonZero;
8623   masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
8624   masm.movePtr(lhs, output);
8625   masm.jump(ool->rejoin());
8626   masm.bind(&lhsNonZero);
8627 
8628   // Call into the VM when either operand can't be loaded into a pointer-sized
8629   // register.
8630   masm.loadBigIntNonZero(lhs, temp1, ool->entry());
8631   masm.loadBigIntNonZero(rhs, temp2, ool->entry());
8632 
8633   // |BigInt::div()| returns |lhs| for |lhs / 1n|, which means there's no
8634   // allocation which might trigger a minor GC to free up nursery space. This
8635   // requires us to apply the same optimization here, otherwise we'd end up with
8636   // always entering the OOL call, because the nursery is never evicted.
8637   Label notOne;
8638   masm.branchPtr(Assembler::NotEqual, temp2, ImmWord(1), &notOne);
8639   masm.movePtr(lhs, output);
8640   masm.jump(ool->rejoin());
8641   masm.bind(&notOne);
8642 
8643   static constexpr auto DigitMin = std::numeric_limits<
8644       mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
8645 
8646   // Handle an integer overflow from INT{32,64}_MIN / -1.
8647   Label notOverflow;
8648   masm.branchPtr(Assembler::NotEqual, temp1, ImmWord(DigitMin), &notOverflow);
8649   masm.branchPtr(Assembler::Equal, temp2, ImmWord(-1), ool->entry());
8650   masm.bind(&notOverflow);
8651 
8652   emitBigIntDiv(ins, temp1, temp2, output, ool->entry());
8653 
8654   masm.bind(ool->rejoin());
8655 }
8656 
visitBigIntMod(LBigIntMod * ins)8657 void CodeGenerator::visitBigIntMod(LBigIntMod* ins) {
8658   Register lhs = ToRegister(ins->lhs());
8659   Register rhs = ToRegister(ins->rhs());
8660   Register temp1 = ToRegister(ins->temp1());
8661   Register temp2 = ToRegister(ins->temp2());
8662   Register output = ToRegister(ins->output());
8663 
8664   using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
8665   auto* ool = oolCallVM<Fn, BigInt::mod>(ins, ArgList(lhs, rhs),
8666                                          StoreRegisterTo(output));
8667 
8668   // x % 0 throws an error.
8669   if (ins->mir()->canBeDivideByZero()) {
8670     masm.branchIfBigIntIsZero(rhs, ool->entry());
8671   }
8672 
8673   // 0n % x == 0n
8674   Label lhsNonZero;
8675   masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
8676   masm.movePtr(lhs, output);
8677   masm.jump(ool->rejoin());
8678   masm.bind(&lhsNonZero);
8679 
8680   // Call into the VM when either operand can't be loaded into a pointer-sized
8681   // register.
8682   masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
8683   masm.loadBigIntAbsolute(rhs, temp2, ool->entry());
8684 
8685   // Similar to the case for BigInt division, we must apply the same allocation
8686   // optimizations as performed in |BigInt::mod()|.
8687   Label notBelow;
8688   masm.branchPtr(Assembler::AboveOrEqual, temp1, temp2, &notBelow);
8689   masm.movePtr(lhs, output);
8690   masm.jump(ool->rejoin());
8691   masm.bind(&notBelow);
8692 
8693   // Convert both digits to signed pointer-sized values.
8694   masm.bigIntDigitToSignedPtr(lhs, temp1, ool->entry());
8695   masm.bigIntDigitToSignedPtr(rhs, temp2, ool->entry());
8696 
8697   static constexpr auto DigitMin = std::numeric_limits<
8698       mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
8699 
8700   // Handle an integer overflow from INT{32,64}_MIN / -1.
8701   Label notOverflow;
8702   masm.branchPtr(Assembler::NotEqual, temp1, ImmWord(DigitMin), &notOverflow);
8703   masm.branchPtr(Assembler::NotEqual, temp2, ImmWord(-1), &notOverflow);
8704   masm.movePtr(ImmWord(0), temp1);
8705   masm.bind(&notOverflow);
8706 
8707   emitBigIntMod(ins, temp1, temp2, output, ool->entry());
8708 
8709   masm.bind(ool->rejoin());
8710 }
8711 
visitBigIntPow(LBigIntPow * ins)8712 void CodeGenerator::visitBigIntPow(LBigIntPow* ins) {
8713   Register lhs = ToRegister(ins->lhs());
8714   Register rhs = ToRegister(ins->rhs());
8715   Register temp1 = ToRegister(ins->temp1());
8716   Register temp2 = ToRegister(ins->temp2());
8717   Register output = ToRegister(ins->output());
8718 
8719   using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
8720   auto* ool = oolCallVM<Fn, BigInt::pow>(ins, ArgList(lhs, rhs),
8721                                          StoreRegisterTo(output));
8722 
8723   // x ** -y throws an error.
8724   if (ins->mir()->canBeNegativeExponent()) {
8725     masm.branchIfBigIntIsNegative(rhs, ool->entry());
8726   }
8727 
8728   Register dest = temp1;
8729   Register base = temp2;
8730   Register exponent = output;
8731 
8732   Label done;
8733   masm.movePtr(ImmWord(1), dest);  // p = 1
8734 
8735   // 1n ** y == 1n
8736   // -1n ** y == 1n when y is even
8737   // -1n ** y == -1n when y is odd
8738   Label lhsNotOne;
8739   masm.branch32(Assembler::Above, Address(lhs, BigInt::offsetOfLength()),
8740                 Imm32(1), &lhsNotOne);
8741   masm.loadFirstBigIntDigitOrZero(lhs, base);
8742   masm.branchPtr(Assembler::NotEqual, base, Imm32(1), &lhsNotOne);
8743   {
8744     masm.loadFirstBigIntDigitOrZero(rhs, exponent);
8745 
8746     Label lhsNonNegative;
8747     masm.branchIfBigIntIsNonNegative(lhs, &lhsNonNegative);
8748     masm.branchTestPtr(Assembler::Zero, exponent, Imm32(1), &done);
8749     masm.bind(&lhsNonNegative);
8750     masm.movePtr(lhs, output);
8751     masm.jump(ool->rejoin());
8752   }
8753   masm.bind(&lhsNotOne);
8754 
8755   // x ** 0n == 1n
8756   masm.branchIfBigIntIsZero(rhs, &done);
8757 
8758   // 0n ** y == 0n with y != 0n
8759   Label lhsNonZero;
8760   masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
8761   {
8762     masm.movePtr(lhs, output);
8763     masm.jump(ool->rejoin());
8764   }
8765   masm.bind(&lhsNonZero);
8766 
8767   // Call into the VM when the exponent can't be loaded into a pointer-sized
8768   // register.
8769   masm.loadBigIntAbsolute(rhs, exponent, ool->entry());
8770 
8771   // x ** y with x > 1 and y >= DigitBits can't be pointer-sized.
8772   masm.branchPtr(Assembler::AboveOrEqual, exponent, Imm32(BigInt::DigitBits),
8773                  ool->entry());
8774 
8775   // x ** 1n == x
8776   Label rhsNotOne;
8777   masm.branch32(Assembler::NotEqual, exponent, Imm32(1), &rhsNotOne);
8778   {
8779     masm.movePtr(lhs, output);
8780     masm.jump(ool->rejoin());
8781   }
8782   masm.bind(&rhsNotOne);
8783 
8784   // Call into the VM when the base operand can't be loaded into a pointer-sized
8785   // register.
8786   masm.loadBigIntNonZero(lhs, base, ool->entry());
8787 
8788   // MacroAssembler::pow32() adjusted to work on pointer-sized registers.
8789   {
8790     // m = base
8791     // n = exponent
8792 
8793     Label start, loop;
8794     masm.jump(&start);
8795     masm.bind(&loop);
8796 
8797     // m *= m
8798     masm.branchMulPtr(Assembler::Overflow, base, base, ool->entry());
8799 
8800     masm.bind(&start);
8801 
8802     // if ((n & 1) != 0) p *= m
8803     Label even;
8804     masm.branchTest32(Assembler::Zero, exponent, Imm32(1), &even);
8805     masm.branchMulPtr(Assembler::Overflow, base, dest, ool->entry());
8806     masm.bind(&even);
8807 
8808     // n >>= 1
8809     // if (n == 0) return p
8810     masm.branchRshift32(Assembler::NonZero, Imm32(1), exponent, &loop);
8811   }
8812 
8813   MOZ_ASSERT(temp1 == dest);
8814 
8815   // Create and return the result.
8816   masm.bind(&done);
8817   masm.newGCBigInt(output, temp2, ool->entry(), bigIntsCanBeInNursery());
8818   masm.initializeBigInt(output, temp1);
8819 
8820   masm.bind(ool->rejoin());
8821 }
8822 
visitBigIntBitAnd(LBigIntBitAnd * ins)8823 void CodeGenerator::visitBigIntBitAnd(LBigIntBitAnd* ins) {
8824   Register lhs = ToRegister(ins->lhs());
8825   Register rhs = ToRegister(ins->rhs());
8826   Register temp1 = ToRegister(ins->temp1());
8827   Register temp2 = ToRegister(ins->temp2());
8828   Register output = ToRegister(ins->output());
8829 
8830   using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
8831   auto* ool = oolCallVM<Fn, BigInt::bitAnd>(ins, ArgList(lhs, rhs),
8832                                             StoreRegisterTo(output));
8833 
8834   // 0n & x == 0n
8835   Label lhsNonZero;
8836   masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
8837   masm.movePtr(lhs, output);
8838   masm.jump(ool->rejoin());
8839   masm.bind(&lhsNonZero);
8840 
8841   // x & 0n == 0n
8842   Label rhsNonZero;
8843   masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
8844   masm.movePtr(rhs, output);
8845   masm.jump(ool->rejoin());
8846   masm.bind(&rhsNonZero);
8847 
8848   // Call into the VM when either operand can't be loaded into a pointer-sized
8849   // register.
8850   masm.loadBigIntNonZero(lhs, temp1, ool->entry());
8851   masm.loadBigIntNonZero(rhs, temp2, ool->entry());
8852 
8853   masm.andPtr(temp2, temp1);
8854 
8855   // Create and return the result.
8856   masm.newGCBigInt(output, temp2, ool->entry(), bigIntsCanBeInNursery());
8857   masm.initializeBigInt(output, temp1);
8858 
8859   masm.bind(ool->rejoin());
8860 }
8861 
visitBigIntBitOr(LBigIntBitOr * ins)8862 void CodeGenerator::visitBigIntBitOr(LBigIntBitOr* ins) {
8863   Register lhs = ToRegister(ins->lhs());
8864   Register rhs = ToRegister(ins->rhs());
8865   Register temp1 = ToRegister(ins->temp1());
8866   Register temp2 = ToRegister(ins->temp2());
8867   Register output = ToRegister(ins->output());
8868 
8869   using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
8870   auto* ool = oolCallVM<Fn, BigInt::bitOr>(ins, ArgList(lhs, rhs),
8871                                            StoreRegisterTo(output));
8872 
8873   // 0n | x == x
8874   Label lhsNonZero;
8875   masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
8876   masm.movePtr(rhs, output);
8877   masm.jump(ool->rejoin());
8878   masm.bind(&lhsNonZero);
8879 
8880   // x | 0n == x
8881   Label rhsNonZero;
8882   masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
8883   masm.movePtr(lhs, output);
8884   masm.jump(ool->rejoin());
8885   masm.bind(&rhsNonZero);
8886 
8887   // Call into the VM when either operand can't be loaded into a pointer-sized
8888   // register.
8889   masm.loadBigIntNonZero(lhs, temp1, ool->entry());
8890   masm.loadBigIntNonZero(rhs, temp2, ool->entry());
8891 
8892   masm.orPtr(temp2, temp1);
8893 
8894   // Create and return the result.
8895   masm.newGCBigInt(output, temp2, ool->entry(), bigIntsCanBeInNursery());
8896   masm.initializeBigInt(output, temp1);
8897 
8898   masm.bind(ool->rejoin());
8899 }
8900 
visitBigIntBitXor(LBigIntBitXor * ins)8901 void CodeGenerator::visitBigIntBitXor(LBigIntBitXor* ins) {
8902   Register lhs = ToRegister(ins->lhs());
8903   Register rhs = ToRegister(ins->rhs());
8904   Register temp1 = ToRegister(ins->temp1());
8905   Register temp2 = ToRegister(ins->temp2());
8906   Register output = ToRegister(ins->output());
8907 
8908   using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
8909   auto* ool = oolCallVM<Fn, BigInt::bitXor>(ins, ArgList(lhs, rhs),
8910                                             StoreRegisterTo(output));
8911 
8912   // 0n ^ x == x
8913   Label lhsNonZero;
8914   masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
8915   masm.movePtr(rhs, output);
8916   masm.jump(ool->rejoin());
8917   masm.bind(&lhsNonZero);
8918 
8919   // x ^ 0n == x
8920   Label rhsNonZero;
8921   masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
8922   masm.movePtr(lhs, output);
8923   masm.jump(ool->rejoin());
8924   masm.bind(&rhsNonZero);
8925 
8926   // Call into the VM when either operand can't be loaded into a pointer-sized
8927   // register.
8928   masm.loadBigIntNonZero(lhs, temp1, ool->entry());
8929   masm.loadBigIntNonZero(rhs, temp2, ool->entry());
8930 
8931   masm.xorPtr(temp2, temp1);
8932 
8933   // Create and return the result.
8934   masm.newGCBigInt(output, temp2, ool->entry(), bigIntsCanBeInNursery());
8935   masm.initializeBigInt(output, temp1);
8936 
8937   masm.bind(ool->rejoin());
8938 }
8939 
visitBigIntLsh(LBigIntLsh * ins)8940 void CodeGenerator::visitBigIntLsh(LBigIntLsh* ins) {
8941   Register lhs = ToRegister(ins->lhs());
8942   Register rhs = ToRegister(ins->rhs());
8943   Register temp1 = ToRegister(ins->temp1());
8944   Register temp2 = ToRegister(ins->temp2());
8945   Register temp3 = ToRegister(ins->temp3());
8946   Register output = ToRegister(ins->output());
8947 
8948   using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
8949   auto* ool = oolCallVM<Fn, BigInt::lsh>(ins, ArgList(lhs, rhs),
8950                                          StoreRegisterTo(output));
8951 
8952   // 0n << x == 0n
8953   Label lhsNonZero;
8954   masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
8955   masm.movePtr(lhs, output);
8956   masm.jump(ool->rejoin());
8957   masm.bind(&lhsNonZero);
8958 
8959   // x << 0n == x
8960   Label rhsNonZero;
8961   masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
8962   masm.movePtr(lhs, output);
8963   masm.jump(ool->rejoin());
8964   masm.bind(&rhsNonZero);
8965 
8966   // Inline |BigInt::lsh| for the case when |lhs| contains a single digit.
8967 
8968   Label rhsTooLarge;
8969   masm.loadBigIntAbsolute(rhs, temp2, &rhsTooLarge);
8970 
8971   // Call into the VM when the left-hand side operand can't be loaded into a
8972   // pointer-sized register.
8973   masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
8974 
8975   // Handle shifts exceeding |BigInt::DigitBits| first.
8976   Label shift, create;
8977   masm.branchPtr(Assembler::Below, temp2, Imm32(BigInt::DigitBits), &shift);
8978   {
8979     masm.bind(&rhsTooLarge);
8980 
8981     // x << DigitBits with x != 0n always exceeds pointer-sized storage.
8982     masm.branchIfBigIntIsNonNegative(rhs, ool->entry());
8983 
8984     // x << -DigitBits == x >> DigitBits, which is either 0n or -1n.
8985     masm.move32(Imm32(0), temp1);
8986     masm.branchIfBigIntIsNonNegative(lhs, &create);
8987     masm.move32(Imm32(1), temp1);
8988     masm.jump(&create);
8989   }
8990   masm.bind(&shift);
8991 
8992   Label nonNegative;
8993   masm.branchIfBigIntIsNonNegative(rhs, &nonNegative);
8994   {
8995     masm.movePtr(temp1, temp3);
8996 
8997     // |x << -y| is computed as |x >> y|.
8998     masm.rshiftPtr(temp2, temp1);
8999 
9000     // For negative numbers, round down if any bit was shifted out.
9001     masm.branchIfBigIntIsNonNegative(lhs, &create);
9002 
9003     // Compute |mask = (static_cast<Digit>(1) << shift) - 1|.
9004     masm.movePtr(ImmWord(-1), output);
9005     masm.lshiftPtr(temp2, output);
9006     masm.notPtr(output);
9007 
9008     // Add plus one when |(lhs.digit(0) & mask) != 0|.
9009     masm.branchTestPtr(Assembler::Zero, output, temp3, &create);
9010     masm.addPtr(ImmWord(1), temp1);
9011     masm.jump(&create);
9012   }
9013   masm.bind(&nonNegative);
9014   {
9015     masm.movePtr(temp2, temp3);
9016 
9017     // Compute |grow = lhs.digit(0) >> (DigitBits - shift)|.
9018     masm.negPtr(temp2);
9019     masm.addPtr(Imm32(BigInt::DigitBits), temp2);
9020     masm.movePtr(temp1, output);
9021     masm.rshiftPtr(temp2, output);
9022 
9023     // Call into the VM when any bit will be shifted out.
9024     masm.branchTestPtr(Assembler::NonZero, output, output, ool->entry());
9025 
9026     masm.movePtr(temp3, temp2);
9027     masm.lshiftPtr(temp2, temp1);
9028   }
9029   masm.bind(&create);
9030 
9031   // Create and return the result.
9032   masm.newGCBigInt(output, temp2, ool->entry(), bigIntsCanBeInNursery());
9033   masm.initializeBigIntAbsolute(output, temp1);
9034 
9035   // Set the sign bit when the left-hand side is negative.
9036   masm.branchIfBigIntIsNonNegative(lhs, ool->rejoin());
9037   masm.or32(Imm32(BigInt::signBitMask()),
9038             Address(output, BigInt::offsetOfFlags()));
9039 
9040   masm.bind(ool->rejoin());
9041 }
9042 
visitBigIntRsh(LBigIntRsh * ins)9043 void CodeGenerator::visitBigIntRsh(LBigIntRsh* ins) {
9044   Register lhs = ToRegister(ins->lhs());
9045   Register rhs = ToRegister(ins->rhs());
9046   Register temp1 = ToRegister(ins->temp1());
9047   Register temp2 = ToRegister(ins->temp2());
9048   Register temp3 = ToRegister(ins->temp3());
9049   Register output = ToRegister(ins->output());
9050 
9051   using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
9052   auto* ool = oolCallVM<Fn, BigInt::rsh>(ins, ArgList(lhs, rhs),
9053                                          StoreRegisterTo(output));
9054 
9055   // 0n >> x == 0n
9056   Label lhsNonZero;
9057   masm.branchIfBigIntIsNonZero(lhs, &lhsNonZero);
9058   masm.movePtr(lhs, output);
9059   masm.jump(ool->rejoin());
9060   masm.bind(&lhsNonZero);
9061 
9062   // x >> 0n == x
9063   Label rhsNonZero;
9064   masm.branchIfBigIntIsNonZero(rhs, &rhsNonZero);
9065   masm.movePtr(lhs, output);
9066   masm.jump(ool->rejoin());
9067   masm.bind(&rhsNonZero);
9068 
9069   // Inline |BigInt::rsh| for the case when |lhs| contains a single digit.
9070 
9071   Label rhsTooLarge;
9072   masm.loadBigIntAbsolute(rhs, temp2, &rhsTooLarge);
9073 
9074   // Call into the VM when the left-hand side operand can't be loaded into a
9075   // pointer-sized register.
9076   masm.loadBigIntAbsolute(lhs, temp1, ool->entry());
9077 
9078   // Handle shifts exceeding |BigInt::DigitBits| first.
9079   Label shift, create;
9080   masm.branchPtr(Assembler::Below, temp2, Imm32(BigInt::DigitBits), &shift);
9081   {
9082     masm.bind(&rhsTooLarge);
9083 
9084     // x >> -DigitBits == x << DigitBits, which exceeds pointer-sized storage.
9085     masm.branchIfBigIntIsNegative(rhs, ool->entry());
9086 
9087     // x >> DigitBits is either 0n or -1n.
9088     masm.move32(Imm32(0), temp1);
9089     masm.branchIfBigIntIsNonNegative(lhs, &create);
9090     masm.move32(Imm32(1), temp1);
9091     masm.jump(&create);
9092   }
9093   masm.bind(&shift);
9094 
9095   Label nonNegative;
9096   masm.branchIfBigIntIsNonNegative(rhs, &nonNegative);
9097   {
9098     masm.movePtr(temp2, temp3);
9099 
9100     // Compute |grow = lhs.digit(0) >> (DigitBits - shift)|.
9101     masm.negPtr(temp2);
9102     masm.addPtr(Imm32(BigInt::DigitBits), temp2);
9103     masm.movePtr(temp1, output);
9104     masm.rshiftPtr(temp2, output);
9105 
9106     // Call into the VM when any bit will be shifted out.
9107     masm.branchTestPtr(Assembler::NonZero, output, output, ool->entry());
9108 
9109     // |x >> -y| is computed as |x << y|.
9110     masm.movePtr(temp3, temp2);
9111     masm.lshiftPtr(temp2, temp1);
9112     masm.jump(&create);
9113   }
9114   masm.bind(&nonNegative);
9115   {
9116     masm.movePtr(temp1, temp3);
9117 
9118     masm.rshiftPtr(temp2, temp1);
9119 
9120     // For negative numbers, round down if any bit was shifted out.
9121     masm.branchIfBigIntIsNonNegative(lhs, &create);
9122 
9123     // Compute |mask = (static_cast<Digit>(1) << shift) - 1|.
9124     masm.movePtr(ImmWord(-1), output);
9125     masm.lshiftPtr(temp2, output);
9126     masm.notPtr(output);
9127 
9128     // Add plus one when |(lhs.digit(0) & mask) != 0|.
9129     masm.branchTestPtr(Assembler::Zero, output, temp3, &create);
9130     masm.addPtr(ImmWord(1), temp1);
9131   }
9132   masm.bind(&create);
9133 
9134   // Create and return the result.
9135   masm.newGCBigInt(output, temp2, ool->entry(), bigIntsCanBeInNursery());
9136   masm.initializeBigIntAbsolute(output, temp1);
9137 
9138   // Set the sign bit when the left-hand side is negative.
9139   masm.branchIfBigIntIsNonNegative(lhs, ool->rejoin());
9140   masm.or32(Imm32(BigInt::signBitMask()),
9141             Address(output, BigInt::offsetOfFlags()));
9142 
9143   masm.bind(ool->rejoin());
9144 }
9145 
visitBigIntIncrement(LBigIntIncrement * ins)9146 void CodeGenerator::visitBigIntIncrement(LBigIntIncrement* ins) {
9147   Register input = ToRegister(ins->input());
9148   Register temp1 = ToRegister(ins->temp1());
9149   Register temp2 = ToRegister(ins->temp2());
9150   Register output = ToRegister(ins->output());
9151 
9152   using Fn = BigInt* (*)(JSContext*, HandleBigInt);
9153   auto* ool =
9154       oolCallVM<Fn, BigInt::inc>(ins, ArgList(input), StoreRegisterTo(output));
9155 
9156   // Call into the VM when the input can't be loaded into a pointer-sized
9157   // register.
9158   masm.loadBigInt(input, temp1, ool->entry());
9159   masm.movePtr(ImmWord(1), temp2);
9160 
9161   masm.branchAddPtr(Assembler::Overflow, temp2, temp1, ool->entry());
9162 
9163   // Create and return the result.
9164   masm.newGCBigInt(output, temp2, ool->entry(), bigIntsCanBeInNursery());
9165   masm.initializeBigInt(output, temp1);
9166 
9167   masm.bind(ool->rejoin());
9168 }
9169 
visitBigIntDecrement(LBigIntDecrement * ins)9170 void CodeGenerator::visitBigIntDecrement(LBigIntDecrement* ins) {
9171   Register input = ToRegister(ins->input());
9172   Register temp1 = ToRegister(ins->temp1());
9173   Register temp2 = ToRegister(ins->temp2());
9174   Register output = ToRegister(ins->output());
9175 
9176   using Fn = BigInt* (*)(JSContext*, HandleBigInt);
9177   auto* ool =
9178       oolCallVM<Fn, BigInt::dec>(ins, ArgList(input), StoreRegisterTo(output));
9179 
9180   // Call into the VM when the input can't be loaded into a pointer-sized
9181   // register.
9182   masm.loadBigInt(input, temp1, ool->entry());
9183   masm.movePtr(ImmWord(1), temp2);
9184 
9185   masm.branchSubPtr(Assembler::Overflow, temp2, temp1, ool->entry());
9186 
9187   // Create and return the result.
9188   masm.newGCBigInt(output, temp2, ool->entry(), bigIntsCanBeInNursery());
9189   masm.initializeBigInt(output, temp1);
9190 
9191   masm.bind(ool->rejoin());
9192 }
9193 
visitBigIntNegate(LBigIntNegate * ins)9194 void CodeGenerator::visitBigIntNegate(LBigIntNegate* ins) {
9195   Register input = ToRegister(ins->input());
9196   Register temp = ToRegister(ins->temp());
9197   Register output = ToRegister(ins->output());
9198 
9199   using Fn = BigInt* (*)(JSContext*, HandleBigInt);
9200   auto* ool =
9201       oolCallVM<Fn, BigInt::neg>(ins, ArgList(input), StoreRegisterTo(output));
9202 
9203   // -0n == 0n
9204   Label lhsNonZero;
9205   masm.branchIfBigIntIsNonZero(input, &lhsNonZero);
9206   masm.movePtr(input, output);
9207   masm.jump(ool->rejoin());
9208   masm.bind(&lhsNonZero);
9209 
9210   // Call into the VM when the input uses heap digits.
9211   masm.copyBigIntWithInlineDigits(input, output, temp, ool->entry(),
9212                                   bigIntsCanBeInNursery());
9213 
9214   // Flip the sign bit.
9215   masm.xor32(Imm32(BigInt::signBitMask()),
9216              Address(output, BigInt::offsetOfFlags()));
9217 
9218   masm.bind(ool->rejoin());
9219 }
9220 
visitBigIntBitNot(LBigIntBitNot * ins)9221 void CodeGenerator::visitBigIntBitNot(LBigIntBitNot* ins) {
9222   Register input = ToRegister(ins->input());
9223   Register temp1 = ToRegister(ins->temp1());
9224   Register temp2 = ToRegister(ins->temp2());
9225   Register output = ToRegister(ins->output());
9226 
9227   using Fn = BigInt* (*)(JSContext*, HandleBigInt);
9228   auto* ool = oolCallVM<Fn, BigInt::bitNot>(ins, ArgList(input),
9229                                             StoreRegisterTo(output));
9230 
9231   masm.loadBigIntAbsolute(input, temp1, ool->entry());
9232 
9233   // This follows the C++ implementation because it let's us support the full
9234   // range [-2^64, 2^64 - 1] on 64-bit resp. [-2^32, 2^32 - 1] on 32-bit.
9235   Label nonNegative, done;
9236   masm.branchIfBigIntIsNonNegative(input, &nonNegative);
9237   {
9238     // ~(-x) == ~(~(x-1)) == x-1
9239     masm.subPtr(Imm32(1), temp1);
9240     masm.jump(&done);
9241   }
9242   masm.bind(&nonNegative);
9243   {
9244     // ~x == -x-1 == -(x+1)
9245     masm.movePtr(ImmWord(1), temp2);
9246     masm.branchAddPtr(Assembler::CarrySet, temp2, temp1, ool->entry());
9247   }
9248   masm.bind(&done);
9249 
9250   // Create and return the result.
9251   masm.newGCBigInt(output, temp2, ool->entry(), bigIntsCanBeInNursery());
9252   masm.initializeBigIntAbsolute(output, temp1);
9253 
9254   // Set the sign bit when the input is positive.
9255   masm.branchIfBigIntIsNegative(input, ool->rejoin());
9256   masm.or32(Imm32(BigInt::signBitMask()),
9257             Address(output, BigInt::offsetOfFlags()));
9258 
9259   masm.bind(ool->rejoin());
9260 }
9261 
visitFloor(LFloor * lir)9262 void CodeGenerator::visitFloor(LFloor* lir) {
9263   FloatRegister input = ToFloatRegister(lir->input());
9264   Register output = ToRegister(lir->output());
9265 
9266   Label bail;
9267   masm.floorDoubleToInt32(input, output, &bail);
9268   bailoutFrom(&bail, lir->snapshot());
9269 }
9270 
visitFloorF(LFloorF * lir)9271 void CodeGenerator::visitFloorF(LFloorF* lir) {
9272   FloatRegister input = ToFloatRegister(lir->input());
9273   Register output = ToRegister(lir->output());
9274 
9275   Label bail;
9276   masm.floorFloat32ToInt32(input, output, &bail);
9277   bailoutFrom(&bail, lir->snapshot());
9278 }
9279 
visitCeil(LCeil * lir)9280 void CodeGenerator::visitCeil(LCeil* lir) {
9281   FloatRegister input = ToFloatRegister(lir->input());
9282   Register output = ToRegister(lir->output());
9283 
9284   Label bail;
9285   masm.ceilDoubleToInt32(input, output, &bail);
9286   bailoutFrom(&bail, lir->snapshot());
9287 }
9288 
visitCeilF(LCeilF * lir)9289 void CodeGenerator::visitCeilF(LCeilF* lir) {
9290   FloatRegister input = ToFloatRegister(lir->input());
9291   Register output = ToRegister(lir->output());
9292 
9293   Label bail;
9294   masm.ceilFloat32ToInt32(input, output, &bail);
9295   bailoutFrom(&bail, lir->snapshot());
9296 }
9297 
visitRound(LRound * lir)9298 void CodeGenerator::visitRound(LRound* lir) {
9299   FloatRegister input = ToFloatRegister(lir->input());
9300   FloatRegister temp = ToFloatRegister(lir->temp());
9301   Register output = ToRegister(lir->output());
9302 
9303   Label bail;
9304   masm.roundDoubleToInt32(input, output, temp, &bail);
9305   bailoutFrom(&bail, lir->snapshot());
9306 }
9307 
visitRoundF(LRoundF * lir)9308 void CodeGenerator::visitRoundF(LRoundF* lir) {
9309   FloatRegister input = ToFloatRegister(lir->input());
9310   FloatRegister temp = ToFloatRegister(lir->temp());
9311   Register output = ToRegister(lir->output());
9312 
9313   Label bail;
9314   masm.roundFloat32ToInt32(input, output, temp, &bail);
9315   bailoutFrom(&bail, lir->snapshot());
9316 }
9317 
visitTrunc(LTrunc * lir)9318 void CodeGenerator::visitTrunc(LTrunc* lir) {
9319   FloatRegister input = ToFloatRegister(lir->input());
9320   Register output = ToRegister(lir->output());
9321 
9322   Label bail;
9323   masm.truncDoubleToInt32(input, output, &bail);
9324   bailoutFrom(&bail, lir->snapshot());
9325 }
9326 
visitTruncF(LTruncF * lir)9327 void CodeGenerator::visitTruncF(LTruncF* lir) {
9328   FloatRegister input = ToFloatRegister(lir->input());
9329   Register output = ToRegister(lir->output());
9330 
9331   Label bail;
9332   masm.truncFloat32ToInt32(input, output, &bail);
9333   bailoutFrom(&bail, lir->snapshot());
9334 }
9335 
visitCompareS(LCompareS * lir)9336 void CodeGenerator::visitCompareS(LCompareS* lir) {
9337   JSOp op = lir->mir()->jsop();
9338   Register left = ToRegister(lir->left());
9339   Register right = ToRegister(lir->right());
9340   Register output = ToRegister(lir->output());
9341 
9342   OutOfLineCode* ool = nullptr;
9343 
9344   using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
9345   if (op == JSOp::Eq || op == JSOp::StrictEq) {
9346     ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
9347         lir, ArgList(left, right), StoreRegisterTo(output));
9348   } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
9349     ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
9350         lir, ArgList(left, right), StoreRegisterTo(output));
9351   } else if (op == JSOp::Lt) {
9352     ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
9353         lir, ArgList(left, right), StoreRegisterTo(output));
9354   } else if (op == JSOp::Le) {
9355     // Push the operands in reverse order for JSOp::Le:
9356     // - |left <= right| is implemented as |right >= left|.
9357     ool =
9358         oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
9359             lir, ArgList(right, left), StoreRegisterTo(output));
9360   } else if (op == JSOp::Gt) {
9361     // Push the operands in reverse order for JSOp::Gt:
9362     // - |left > right| is implemented as |right < left|.
9363     ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
9364         lir, ArgList(right, left), StoreRegisterTo(output));
9365   } else {
9366     MOZ_ASSERT(op == JSOp::Ge);
9367     ool =
9368         oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
9369             lir, ArgList(left, right), StoreRegisterTo(output));
9370   }
9371 
9372   masm.compareStrings(op, left, right, output, ool->entry());
9373 
9374   masm.bind(ool->rejoin());
9375 }
9376 
visitCompareBigInt(LCompareBigInt * lir)9377 void CodeGenerator::visitCompareBigInt(LCompareBigInt* lir) {
9378   JSOp op = lir->mir()->jsop();
9379   Register left = ToRegister(lir->left());
9380   Register right = ToRegister(lir->right());
9381   Register temp1 = ToRegister(lir->temp1());
9382   Register temp2 = ToRegister(lir->temp2());
9383   Register temp3 = ToRegister(lir->temp3());
9384   Register output = ToRegister(lir->output());
9385 
9386   Label notSame;
9387   Label compareSign;
9388   Label compareLength;
9389   Label compareDigit;
9390 
9391   Label* notSameSign;
9392   Label* notSameLength;
9393   Label* notSameDigit;
9394   if (IsEqualityOp(op)) {
9395     notSameSign = &notSame;
9396     notSameLength = &notSame;
9397     notSameDigit = &notSame;
9398   } else {
9399     notSameSign = &compareSign;
9400     notSameLength = &compareLength;
9401     notSameDigit = &compareDigit;
9402   }
9403 
9404   // Jump to |notSameSign| when the sign aren't the same.
9405   masm.load32(Address(left, BigInt::offsetOfFlags()), temp1);
9406   masm.xor32(Address(right, BigInt::offsetOfFlags()), temp1);
9407   masm.branchTest32(Assembler::NonZero, temp1, Imm32(BigInt::signBitMask()),
9408                     notSameSign);
9409 
9410   // Jump to |notSameLength| when the digits length is different.
9411   masm.load32(Address(right, BigInt::offsetOfLength()), temp1);
9412   masm.branch32(Assembler::NotEqual, Address(left, BigInt::offsetOfLength()),
9413                 temp1, notSameLength);
9414 
9415   // Both BigInts have the same sign and the same number of digits. Loop over
9416   // each digit, starting with the left-most one, and break from the loop when
9417   // the first non-matching digit was found.
9418 
9419   masm.loadBigIntDigits(left, temp2);
9420   masm.loadBigIntDigits(right, temp3);
9421 
9422   static_assert(sizeof(BigInt::Digit) == sizeof(void*),
9423                 "BigInt::Digit is pointer sized");
9424 
9425   masm.computeEffectiveAddress(BaseIndex(temp2, temp1, ScalePointer), temp2);
9426   masm.computeEffectiveAddress(BaseIndex(temp3, temp1, ScalePointer), temp3);
9427 
9428   Label start, loop;
9429   masm.jump(&start);
9430   masm.bind(&loop);
9431 
9432   masm.subPtr(Imm32(sizeof(BigInt::Digit)), temp2);
9433   masm.subPtr(Imm32(sizeof(BigInt::Digit)), temp3);
9434 
9435   masm.loadPtr(Address(temp3, 0), output);
9436   masm.branchPtr(Assembler::NotEqual, Address(temp2, 0), output, notSameDigit);
9437 
9438   masm.bind(&start);
9439   masm.branchSub32(Assembler::NotSigned, Imm32(1), temp1, &loop);
9440 
9441   // No different digits were found, both BigInts are equal to each other.
9442 
9443   Label done;
9444   masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq || op == JSOp::Le ||
9445                     op == JSOp::Ge),
9446               output);
9447   masm.jump(&done);
9448 
9449   if (IsEqualityOp(op)) {
9450     masm.bind(&notSame);
9451     masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
9452   } else {
9453     Label invertWhenNegative;
9454 
9455     // There are two cases when sign(left) != sign(right):
9456     // 1. sign(left) = positive and sign(right) = negative,
9457     // 2. or the dual case with reversed signs.
9458     //
9459     // For case 1, |left| <cmp> |right| is true for cmp=Gt or cmp=Ge and false
9460     // for cmp=Lt or cmp=Le. Initialize the result for case 1 and handle case 2
9461     // with |invertWhenNegative|.
9462     masm.bind(&compareSign);
9463     masm.move32(Imm32(op == JSOp::Gt || op == JSOp::Ge), output);
9464     masm.jump(&invertWhenNegative);
9465 
9466     // For sign(left) = sign(right) and len(digits(left)) != len(digits(right)),
9467     // we have to consider the two cases:
9468     // 1. len(digits(left)) < len(digits(right))
9469     // 2. len(digits(left)) > len(digits(right))
9470     //
9471     // For |left| <cmp> |right| with cmp=Lt:
9472     // Assume both BigInts are positive, then |left < right| is true for case 1
9473     // and false for case 2. When both are negative, the result is reversed.
9474     //
9475     // The other comparison operators can be handled similarly.
9476     //
9477     // |temp1| holds the digits length of the right-hand side operand.
9478     masm.bind(&compareLength);
9479     masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false),
9480                   Address(left, BigInt::offsetOfLength()), temp1, output);
9481     masm.jump(&invertWhenNegative);
9482 
9483     // Similar to the case above, compare the current digit to determine the
9484     // overall comparison result.
9485     //
9486     // |temp2| points to the current digit of the left-hand side operand.
9487     // |output| holds the current digit of the right-hand side operand.
9488     masm.bind(&compareDigit);
9489     masm.cmpPtrSet(JSOpToCondition(op, /* isSigned = */ false),
9490                    Address(temp2, 0), output, output);
9491 
9492     Label nonNegative;
9493     masm.bind(&invertWhenNegative);
9494     masm.branchIfBigIntIsNonNegative(left, &nonNegative);
9495     masm.xor32(Imm32(1), output);
9496     masm.bind(&nonNegative);
9497   }
9498 
9499   masm.bind(&done);
9500 }
9501 
visitCompareBigIntInt32(LCompareBigIntInt32 * lir)9502 void CodeGenerator::visitCompareBigIntInt32(LCompareBigIntInt32* lir) {
9503   JSOp op = lir->mir()->jsop();
9504   Register left = ToRegister(lir->left());
9505   Register right = ToRegister(lir->right());
9506   Register temp1 = ToRegister(lir->temp1());
9507   Register temp2 = ToRegister(lir->temp2());
9508   Register output = ToRegister(lir->output());
9509 
9510   Label ifTrue, ifFalse;
9511   masm.compareBigIntAndInt32(op, left, right, temp1, temp2, &ifTrue, &ifFalse);
9512 
9513   Label done;
9514   masm.bind(&ifFalse);
9515   masm.move32(Imm32(0), output);
9516   masm.jump(&done);
9517   masm.bind(&ifTrue);
9518   masm.move32(Imm32(1), output);
9519   masm.bind(&done);
9520 }
9521 
visitCompareBigIntDouble(LCompareBigIntDouble * lir)9522 void CodeGenerator::visitCompareBigIntDouble(LCompareBigIntDouble* lir) {
9523   JSOp op = lir->mir()->jsop();
9524   Register left = ToRegister(lir->left());
9525   FloatRegister right = ToFloatRegister(lir->right());
9526   Register temp = ToRegister(lir->temp());
9527   Register output = ToRegister(lir->output());
9528 
9529   masm.setupUnalignedABICall(temp);
9530 
9531   // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
9532   // - |left <= right| is implemented as |right >= left|.
9533   // - |left > right| is implemented as |right < left|.
9534   if (op == JSOp::Le || op == JSOp::Gt) {
9535     masm.passABIArg(right, MoveOp::DOUBLE);
9536     masm.passABIArg(left);
9537   } else {
9538     masm.passABIArg(left);
9539     masm.passABIArg(right, MoveOp::DOUBLE);
9540   }
9541 
9542   using FnBigIntNumber = bool (*)(BigInt*, double);
9543   using FnNumberBigInt = bool (*)(double, BigInt*);
9544   switch (op) {
9545     case JSOp::Eq: {
9546       masm.callWithABI<FnBigIntNumber,
9547                        jit::BigIntNumberEqual<EqualityKind::Equal>>();
9548       break;
9549     }
9550     case JSOp::Ne: {
9551       masm.callWithABI<FnBigIntNumber,
9552                        jit::BigIntNumberEqual<EqualityKind::NotEqual>>();
9553       break;
9554     }
9555     case JSOp::Lt: {
9556       masm.callWithABI<FnBigIntNumber,
9557                        jit::BigIntNumberCompare<ComparisonKind::LessThan>>();
9558       break;
9559     }
9560     case JSOp::Gt: {
9561       masm.callWithABI<FnNumberBigInt,
9562                        jit::NumberBigIntCompare<ComparisonKind::LessThan>>();
9563       break;
9564     }
9565     case JSOp::Le: {
9566       masm.callWithABI<
9567           FnNumberBigInt,
9568           jit::NumberBigIntCompare<ComparisonKind::GreaterThanOrEqual>>();
9569       break;
9570     }
9571     case JSOp::Ge: {
9572       masm.callWithABI<
9573           FnBigIntNumber,
9574           jit::BigIntNumberCompare<ComparisonKind::GreaterThanOrEqual>>();
9575       break;
9576     }
9577     default:
9578       MOZ_CRASH("unhandled op");
9579   }
9580 
9581   masm.storeCallBoolResult(output);
9582 }
9583 
visitCompareBigIntString(LCompareBigIntString * lir)9584 void CodeGenerator::visitCompareBigIntString(LCompareBigIntString* lir) {
9585   JSOp op = lir->mir()->jsop();
9586   Register left = ToRegister(lir->left());
9587   Register right = ToRegister(lir->right());
9588 
9589   // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
9590   // - |left <= right| is implemented as |right >= left|.
9591   // - |left > right| is implemented as |right < left|.
9592   if (op == JSOp::Le || op == JSOp::Gt) {
9593     pushArg(left);
9594     pushArg(right);
9595   } else {
9596     pushArg(right);
9597     pushArg(left);
9598   }
9599 
9600   using FnBigIntString =
9601       bool (*)(JSContext*, HandleBigInt, HandleString, bool*);
9602   using FnStringBigInt =
9603       bool (*)(JSContext*, HandleString, HandleBigInt, bool*);
9604 
9605   switch (op) {
9606     case JSOp::Eq: {
9607       constexpr auto Equal = EqualityKind::Equal;
9608       callVM<FnBigIntString, BigIntStringEqual<Equal>>(lir);
9609       break;
9610     }
9611     case JSOp::Ne: {
9612       constexpr auto NotEqual = EqualityKind::NotEqual;
9613       callVM<FnBigIntString, BigIntStringEqual<NotEqual>>(lir);
9614       break;
9615     }
9616     case JSOp::Lt: {
9617       constexpr auto LessThan = ComparisonKind::LessThan;
9618       callVM<FnBigIntString, BigIntStringCompare<LessThan>>(lir);
9619       break;
9620     }
9621     case JSOp::Gt: {
9622       constexpr auto LessThan = ComparisonKind::LessThan;
9623       callVM<FnStringBigInt, StringBigIntCompare<LessThan>>(lir);
9624       break;
9625     }
9626     case JSOp::Le: {
9627       constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
9628       callVM<FnStringBigInt, StringBigIntCompare<GreaterThanOrEqual>>(lir);
9629       break;
9630     }
9631     case JSOp::Ge: {
9632       constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
9633       callVM<FnBigIntString, BigIntStringCompare<GreaterThanOrEqual>>(lir);
9634       break;
9635     }
9636     default:
9637       MOZ_CRASH("Unexpected compare op");
9638   }
9639 }
9640 
visitIsNullOrLikeUndefinedV(LIsNullOrLikeUndefinedV * lir)9641 void CodeGenerator::visitIsNullOrLikeUndefinedV(LIsNullOrLikeUndefinedV* lir) {
9642   JSOp op = lir->mir()->jsop();
9643   MCompare::CompareType compareType = lir->mir()->compareType();
9644   MOZ_ASSERT(compareType == MCompare::Compare_Undefined ||
9645              compareType == MCompare::Compare_Null);
9646 
9647   const ValueOperand value = ToValue(lir, LIsNullOrLikeUndefinedV::Value);
9648   Register output = ToRegister(lir->output());
9649 
9650   if (op == JSOp::Eq || op == JSOp::Ne) {
9651     auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
9652     addOutOfLineCode(ool, lir->mir());
9653 
9654     Label* nullOrLikeUndefined = ool->label1();
9655     Label* notNullOrLikeUndefined = ool->label2();
9656 
9657     {
9658       ScratchTagScope tag(masm, value);
9659       masm.splitTagForTest(value, tag);
9660 
9661       masm.branchTestNull(Assembler::Equal, tag, nullOrLikeUndefined);
9662       masm.branchTestUndefined(Assembler::Equal, tag, nullOrLikeUndefined);
9663 
9664       // Check whether it's a truthy object or a falsy object that emulates
9665       // undefined.
9666       masm.branchTestObject(Assembler::NotEqual, tag, notNullOrLikeUndefined);
9667     }
9668 
9669     Register objreg =
9670         masm.extractObject(value, ToTempUnboxRegister(lir->tempToUnbox()));
9671     branchTestObjectEmulatesUndefined(objreg, nullOrLikeUndefined,
9672                                       notNullOrLikeUndefined,
9673                                       ToRegister(lir->temp()), ool);
9674     // fall through
9675 
9676     Label done;
9677 
9678     // It's not null or undefined, and if it's an object it doesn't
9679     // emulate undefined, so it's not like undefined.
9680     masm.move32(Imm32(op == JSOp::Ne), output);
9681     masm.jump(&done);
9682 
9683     masm.bind(nullOrLikeUndefined);
9684     masm.move32(Imm32(op == JSOp::Eq), output);
9685 
9686     // Both branches meet here.
9687     masm.bind(&done);
9688     return;
9689   }
9690 
9691   MOZ_ASSERT(op == JSOp::StrictEq || op == JSOp::StrictNe);
9692 
9693   Assembler::Condition cond = JSOpToCondition(compareType, op);
9694   if (compareType == MCompare::Compare_Null) {
9695     masm.testNullSet(cond, value, output);
9696   } else {
9697     masm.testUndefinedSet(cond, value, output);
9698   }
9699 }
9700 
visitIsNullOrLikeUndefinedAndBranchV(LIsNullOrLikeUndefinedAndBranchV * lir)9701 void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchV(
9702     LIsNullOrLikeUndefinedAndBranchV* lir) {
9703   JSOp op = lir->cmpMir()->jsop();
9704   MCompare::CompareType compareType = lir->cmpMir()->compareType();
9705   MOZ_ASSERT(compareType == MCompare::Compare_Undefined ||
9706              compareType == MCompare::Compare_Null);
9707 
9708   const ValueOperand value =
9709       ToValue(lir, LIsNullOrLikeUndefinedAndBranchV::Value);
9710 
9711   if (op == JSOp::Eq || op == JSOp::Ne) {
9712     MBasicBlock* ifTrue;
9713     MBasicBlock* ifFalse;
9714 
9715     if (op == JSOp::Eq) {
9716       ifTrue = lir->ifTrue();
9717       ifFalse = lir->ifFalse();
9718     } else {
9719       // Swap branches.
9720       ifTrue = lir->ifFalse();
9721       ifFalse = lir->ifTrue();
9722       op = JSOp::Eq;
9723     }
9724 
9725     auto* ool = new (alloc()) OutOfLineTestObject();
9726     addOutOfLineCode(ool, lir->cmpMir());
9727 
9728     Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
9729     Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
9730 
9731     {
9732       ScratchTagScope tag(masm, value);
9733       masm.splitTagForTest(value, tag);
9734 
9735       masm.branchTestNull(Assembler::Equal, tag, ifTrueLabel);
9736       masm.branchTestUndefined(Assembler::Equal, tag, ifTrueLabel);
9737 
9738       masm.branchTestObject(Assembler::NotEqual, tag, ifFalseLabel);
9739     }
9740 
9741     // Objects that emulate undefined are loosely equal to null/undefined.
9742     Register objreg =
9743         masm.extractObject(value, ToTempUnboxRegister(lir->tempToUnbox()));
9744     Register scratch = ToRegister(lir->temp());
9745     testObjectEmulatesUndefined(objreg, ifTrueLabel, ifFalseLabel, scratch,
9746                                 ool);
9747     return;
9748   }
9749 
9750   MOZ_ASSERT(op == JSOp::StrictEq || op == JSOp::StrictNe);
9751 
9752   Assembler::Condition cond = JSOpToCondition(compareType, op);
9753   if (compareType == MCompare::Compare_Null) {
9754     testNullEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
9755   } else {
9756     testUndefinedEmitBranch(cond, value, lir->ifTrue(), lir->ifFalse());
9757   }
9758 }
9759 
visitIsNullOrLikeUndefinedT(LIsNullOrLikeUndefinedT * lir)9760 void CodeGenerator::visitIsNullOrLikeUndefinedT(LIsNullOrLikeUndefinedT* lir) {
9761   MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
9762              lir->mir()->compareType() == MCompare::Compare_Null);
9763   MOZ_ASSERT(lir->mir()->lhs()->type() == MIRType::Object);
9764 
9765   JSOp op = lir->mir()->jsop();
9766   MOZ_ASSERT(op == JSOp::Eq || op == JSOp::Ne,
9767              "Strict equality should have been folded");
9768 
9769   Register objreg = ToRegister(lir->input());
9770   Register output = ToRegister(lir->output());
9771 
9772   auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
9773   addOutOfLineCode(ool, lir->mir());
9774 
9775   Label* emulatesUndefined = ool->label1();
9776   Label* doesntEmulateUndefined = ool->label2();
9777 
9778   branchTestObjectEmulatesUndefined(objreg, emulatesUndefined,
9779                                     doesntEmulateUndefined, output, ool);
9780 
9781   Label done;
9782 
9783   masm.move32(Imm32(op == JSOp::Ne), output);
9784   masm.jump(&done);
9785 
9786   masm.bind(emulatesUndefined);
9787   masm.move32(Imm32(op == JSOp::Eq), output);
9788   masm.bind(&done);
9789 }
9790 
visitIsNullOrLikeUndefinedAndBranchT(LIsNullOrLikeUndefinedAndBranchT * lir)9791 void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchT(
9792     LIsNullOrLikeUndefinedAndBranchT* lir) {
9793   DebugOnly<MCompare::CompareType> compareType = lir->cmpMir()->compareType();
9794   MOZ_ASSERT(compareType == MCompare::Compare_Undefined ||
9795              compareType == MCompare::Compare_Null);
9796   MOZ_ASSERT(lir->cmpMir()->lhs()->type() == MIRType::Object);
9797 
9798   JSOp op = lir->cmpMir()->jsop();
9799   MOZ_ASSERT(op == JSOp::Eq || op == JSOp::Ne,
9800              "Strict equality should have been folded");
9801 
9802   MBasicBlock* ifTrue;
9803   MBasicBlock* ifFalse;
9804 
9805   if (op == JSOp::Eq || op == JSOp::StrictEq) {
9806     ifTrue = lir->ifTrue();
9807     ifFalse = lir->ifFalse();
9808   } else {
9809     // Swap branches.
9810     ifTrue = lir->ifFalse();
9811     ifFalse = lir->ifTrue();
9812   }
9813 
9814   Register input = ToRegister(lir->getOperand(0));
9815 
9816   auto* ool = new (alloc()) OutOfLineTestObject();
9817   addOutOfLineCode(ool, lir->cmpMir());
9818 
9819   Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
9820   Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
9821 
9822   // Objects that emulate undefined are loosely equal to null/undefined.
9823   Register scratch = ToRegister(lir->temp());
9824   testObjectEmulatesUndefined(input, ifTrueLabel, ifFalseLabel, scratch, ool);
9825 }
9826 
visitSameValueDouble(LSameValueDouble * lir)9827 void CodeGenerator::visitSameValueDouble(LSameValueDouble* lir) {
9828   FloatRegister left = ToFloatRegister(lir->left());
9829   FloatRegister right = ToFloatRegister(lir->right());
9830   FloatRegister temp = ToFloatRegister(lir->tempFloat());
9831   Register output = ToRegister(lir->output());
9832 
9833   masm.sameValueDouble(left, right, temp, output);
9834 }
9835 
visitSameValue(LSameValue * lir)9836 void CodeGenerator::visitSameValue(LSameValue* lir) {
9837   ValueOperand lhs = ToValue(lir, LSameValue::LhsIndex);
9838   ValueOperand rhs = ToValue(lir, LSameValue::RhsIndex);
9839   Register output = ToRegister(lir->output());
9840 
9841   Label call, done;
9842 
9843   using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
9844   OutOfLineCode* ool =
9845       oolCallVM<Fn, SameValue>(lir, ArgList(lhs, rhs), StoreRegisterTo(output));
9846 
9847   // First check to see if the values have identical bits.
9848   // This is correct for SameValue because SameValue(NaN,NaN) is true,
9849   // and SameValue(0,-0) is false.
9850   masm.branch64(Assembler::NotEqual, lhs.toRegister64(), rhs.toRegister64(),
9851                 ool->entry());
9852   masm.move32(Imm32(1), output);
9853 
9854   // If this fails, call SameValue.
9855   masm.bind(ool->rejoin());
9856 }
9857 
emitConcat(LInstruction * lir,Register lhs,Register rhs,Register output)9858 void CodeGenerator::emitConcat(LInstruction* lir, Register lhs, Register rhs,
9859                                Register output) {
9860   using Fn = JSString* (*)(JSContext*, HandleString, HandleString,
9861                            js::gc::InitialHeap);
9862   OutOfLineCode* ool = oolCallVM<Fn, ConcatStrings<CanGC>>(
9863       lir, ArgList(lhs, rhs, static_cast<Imm32>(gc::DefaultHeap)),
9864       StoreRegisterTo(output));
9865 
9866   const JitRealm* jitRealm = gen->realm->jitRealm();
9867   JitCode* stringConcatStub =
9868       jitRealm->stringConcatStubNoBarrier(&realmStubsToReadBarrier_);
9869   masm.call(stringConcatStub);
9870   masm.branchTestPtr(Assembler::Zero, output, output, ool->entry());
9871 
9872   masm.bind(ool->rejoin());
9873 }
9874 
visitConcat(LConcat * lir)9875 void CodeGenerator::visitConcat(LConcat* lir) {
9876   Register lhs = ToRegister(lir->lhs());
9877   Register rhs = ToRegister(lir->rhs());
9878 
9879   Register output = ToRegister(lir->output());
9880 
9881   MOZ_ASSERT(lhs == CallTempReg0);
9882   MOZ_ASSERT(rhs == CallTempReg1);
9883   MOZ_ASSERT(ToRegister(lir->temp1()) == CallTempReg0);
9884   MOZ_ASSERT(ToRegister(lir->temp2()) == CallTempReg1);
9885   MOZ_ASSERT(ToRegister(lir->temp3()) == CallTempReg2);
9886   MOZ_ASSERT(ToRegister(lir->temp4()) == CallTempReg3);
9887   MOZ_ASSERT(ToRegister(lir->temp5()) == CallTempReg4);
9888   MOZ_ASSERT(output == CallTempReg5);
9889 
9890   emitConcat(lir, lhs, rhs, output);
9891 }
9892 
CopyStringChars(MacroAssembler & masm,Register to,Register from,Register len,Register byteOpScratch,CharEncoding fromEncoding,CharEncoding toEncoding)9893 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
9894                             Register len, Register byteOpScratch,
9895                             CharEncoding fromEncoding,
9896                             CharEncoding toEncoding) {
9897   // Copy |len| char16_t code units from |from| to |to|. Assumes len > 0
9898   // (checked below in debug builds), and when done |to| must point to the
9899   // next available char.
9900 
9901 #ifdef DEBUG
9902   Label ok;
9903   masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
9904   masm.assumeUnreachable("Length should be greater than 0.");
9905   masm.bind(&ok);
9906 #endif
9907 
9908   MOZ_ASSERT_IF(toEncoding == CharEncoding::Latin1,
9909                 fromEncoding == CharEncoding::Latin1);
9910 
9911   size_t fromWidth =
9912       fromEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
9913   size_t toWidth =
9914       toEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
9915 
9916   Label start;
9917   masm.bind(&start);
9918   masm.loadChar(Address(from, 0), byteOpScratch, fromEncoding);
9919   masm.storeChar(byteOpScratch, Address(to, 0), toEncoding);
9920   masm.addPtr(Imm32(fromWidth), from);
9921   masm.addPtr(Imm32(toWidth), to);
9922   masm.branchSub32(Assembler::NonZero, Imm32(1), len, &start);
9923 }
9924 
CopyStringChars(MacroAssembler & masm,Register to,Register from,Register len,Register byteOpScratch,CharEncoding encoding)9925 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
9926                             Register len, Register byteOpScratch,
9927                             CharEncoding encoding) {
9928   CopyStringChars(masm, to, from, len, byteOpScratch, encoding, encoding);
9929 }
9930 
CopyStringCharsMaybeInflate(MacroAssembler & masm,Register input,Register destChars,Register temp1,Register temp2)9931 static void CopyStringCharsMaybeInflate(MacroAssembler& masm, Register input,
9932                                         Register destChars, Register temp1,
9933                                         Register temp2) {
9934   // destChars is TwoByte and input is a Latin1 or TwoByte string, so we may
9935   // have to inflate.
9936 
9937   Label isLatin1, done;
9938   masm.loadStringLength(input, temp1);
9939   masm.branchLatin1String(input, &isLatin1);
9940   {
9941     masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
9942     masm.movePtr(temp2, input);
9943     CopyStringChars(masm, destChars, input, temp1, temp2,
9944                     CharEncoding::TwoByte);
9945     masm.jump(&done);
9946   }
9947   masm.bind(&isLatin1);
9948   {
9949     masm.loadStringChars(input, temp2, CharEncoding::Latin1);
9950     masm.movePtr(temp2, input);
9951     CopyStringChars(masm, destChars, input, temp1, temp2, CharEncoding::Latin1,
9952                     CharEncoding::TwoByte);
9953   }
9954   masm.bind(&done);
9955 }
9956 
ConcatInlineString(MacroAssembler & masm,Register lhs,Register rhs,Register output,Register temp1,Register temp2,Register temp3,bool stringsCanBeInNursery,Label * failure,CharEncoding encoding)9957 static void ConcatInlineString(MacroAssembler& masm, Register lhs, Register rhs,
9958                                Register output, Register temp1, Register temp2,
9959                                Register temp3, bool stringsCanBeInNursery,
9960                                Label* failure, CharEncoding encoding) {
9961   JitSpew(JitSpew_Codegen, "# Emitting ConcatInlineString (encoding=%s)",
9962           (encoding == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
9963 
9964   // State: result length in temp2.
9965 
9966   // Ensure both strings are linear.
9967   masm.branchIfRope(lhs, failure);
9968   masm.branchIfRope(rhs, failure);
9969 
9970   // Allocate a JSThinInlineString or JSFatInlineString.
9971   size_t maxThinInlineLength;
9972   if (encoding == CharEncoding::Latin1) {
9973     maxThinInlineLength = JSThinInlineString::MAX_LENGTH_LATIN1;
9974   } else {
9975     maxThinInlineLength = JSThinInlineString::MAX_LENGTH_TWO_BYTE;
9976   }
9977 
9978   Label isFat, allocDone;
9979   masm.branch32(Assembler::Above, temp2, Imm32(maxThinInlineLength), &isFat);
9980   {
9981     uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
9982     if (encoding == CharEncoding::Latin1) {
9983       flags |= JSString::LATIN1_CHARS_BIT;
9984     }
9985     masm.newGCString(output, temp1, failure, stringsCanBeInNursery);
9986     masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
9987     masm.jump(&allocDone);
9988   }
9989   masm.bind(&isFat);
9990   {
9991     uint32_t flags = JSString::INIT_FAT_INLINE_FLAGS;
9992     if (encoding == CharEncoding::Latin1) {
9993       flags |= JSString::LATIN1_CHARS_BIT;
9994     }
9995     masm.newGCFatInlineString(output, temp1, failure, stringsCanBeInNursery);
9996     masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
9997   }
9998   masm.bind(&allocDone);
9999 
10000   // Store length.
10001   masm.store32(temp2, Address(output, JSString::offsetOfLength()));
10002 
10003   // Load chars pointer in temp2.
10004   masm.loadInlineStringCharsForStore(output, temp2);
10005 
10006   auto copyChars = [&](Register src) {
10007     if (encoding == CharEncoding::TwoByte) {
10008       CopyStringCharsMaybeInflate(masm, src, temp2, temp1, temp3);
10009     } else {
10010       masm.loadStringLength(src, temp3);
10011       masm.loadStringChars(src, temp1, CharEncoding::Latin1);
10012       masm.movePtr(temp1, src);
10013       CopyStringChars(masm, temp2, src, temp3, temp1, CharEncoding::Latin1);
10014     }
10015   };
10016 
10017   // Copy lhs chars. Note that this advances temp2 to point to the next
10018   // char. This also clobbers the lhs register.
10019   copyChars(lhs);
10020 
10021   // Copy rhs chars. Clobbers the rhs register.
10022   copyChars(rhs);
10023 
10024   masm.ret();
10025 }
10026 
visitSubstr(LSubstr * lir)10027 void CodeGenerator::visitSubstr(LSubstr* lir) {
10028   Register string = ToRegister(lir->string());
10029   Register begin = ToRegister(lir->begin());
10030   Register length = ToRegister(lir->length());
10031   Register output = ToRegister(lir->output());
10032   Register temp = ToRegister(lir->temp());
10033   Register temp3 = ToRegister(lir->temp3());
10034 
10035   // On x86 there are not enough registers. In that case reuse the string
10036   // register as temporary.
10037   Register temp2 =
10038       lir->temp2()->isBogusTemp() ? string : ToRegister(lir->temp2());
10039 
10040   Address stringFlags(string, JSString::offsetOfFlags());
10041 
10042   Label isLatin1, notInline, nonZero, isInlinedLatin1;
10043 
10044   // For every edge case use the C++ variant.
10045   // Note: we also use this upon allocation failure in newGCString and
10046   // newGCFatInlineString. To squeeze out even more performance those failures
10047   // can be handled by allocate in ool code and returning to jit code to fill
10048   // in all data.
10049   using Fn = JSString* (*)(JSContext * cx, HandleString str, int32_t begin,
10050                            int32_t len);
10051   OutOfLineCode* ool = oolCallVM<Fn, SubstringKernel>(
10052       lir, ArgList(string, begin, length), StoreRegisterTo(output));
10053   Label* slowPath = ool->entry();
10054   Label* done = ool->rejoin();
10055 
10056   // Zero length, return emptystring.
10057   masm.branchTest32(Assembler::NonZero, length, length, &nonZero);
10058   const JSAtomState& names = gen->runtime->names();
10059   masm.movePtr(ImmGCPtr(names.empty), output);
10060   masm.jump(done);
10061 
10062   // Use slow path for ropes.
10063   masm.bind(&nonZero);
10064   masm.branchIfRope(string, slowPath);
10065 
10066   // Handle inlined strings by creating a FatInlineString.
10067   masm.branchTest32(Assembler::Zero, stringFlags,
10068                     Imm32(JSString::INLINE_CHARS_BIT), &notInline);
10069   masm.newGCFatInlineString(output, temp, slowPath, stringsCanBeInNursery());
10070   masm.store32(length, Address(output, JSString::offsetOfLength()));
10071 
10072   auto initializeFatInlineString = [&](CharEncoding encoding) {
10073     uint32_t flags = JSString::INIT_FAT_INLINE_FLAGS;
10074     if (encoding == CharEncoding::Latin1) {
10075       flags |= JSString::LATIN1_CHARS_BIT;
10076     }
10077 
10078     masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
10079     masm.loadInlineStringChars(string, temp, encoding);
10080     masm.addToCharPtr(temp, begin, encoding);
10081     if (temp2 == string) {
10082       masm.push(string);
10083     }
10084     masm.loadInlineStringCharsForStore(output, temp2);
10085     CopyStringChars(masm, temp2, temp, length, temp3, encoding);
10086     masm.loadStringLength(output, length);
10087     if (temp2 == string) {
10088       masm.pop(string);
10089     }
10090     masm.jump(done);
10091   };
10092 
10093   masm.branchLatin1String(string, &isInlinedLatin1);
10094   { initializeFatInlineString(CharEncoding::TwoByte); }
10095   masm.bind(&isInlinedLatin1);
10096   { initializeFatInlineString(CharEncoding::Latin1); }
10097 
10098   // Handle other cases with a DependentString.
10099   masm.bind(&notInline);
10100   masm.newGCString(output, temp, slowPath, gen->stringsCanBeInNursery());
10101   masm.store32(length, Address(output, JSString::offsetOfLength()));
10102   masm.storeDependentStringBase(string, output);
10103 
10104   auto initializeDependentString = [&](CharEncoding encoding) {
10105     uint32_t flags = JSString::INIT_DEPENDENT_FLAGS;
10106     if (encoding == CharEncoding::Latin1) {
10107       flags |= JSString::LATIN1_CHARS_BIT;
10108     }
10109 
10110     masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
10111     masm.loadNonInlineStringChars(string, temp, encoding);
10112     masm.addToCharPtr(temp, begin, encoding);
10113     masm.storeNonInlineStringChars(temp, output);
10114     masm.jump(done);
10115   };
10116 
10117   masm.branchLatin1String(string, &isLatin1);
10118   { initializeDependentString(CharEncoding::TwoByte); }
10119   masm.bind(&isLatin1);
10120   { initializeDependentString(CharEncoding::Latin1); }
10121 
10122   masm.bind(done);
10123 }
10124 
generateStringConcatStub(JSContext * cx)10125 JitCode* JitRealm::generateStringConcatStub(JSContext* cx) {
10126   JitSpew(JitSpew_Codegen, "# Emitting StringConcat stub");
10127 
10128   StackMacroAssembler masm(cx);
10129 
10130   Register lhs = CallTempReg0;
10131   Register rhs = CallTempReg1;
10132   Register temp1 = CallTempReg2;
10133   Register temp2 = CallTempReg3;
10134   Register temp3 = CallTempReg4;
10135   Register output = CallTempReg5;
10136 
10137   Label failure;
10138 #ifdef JS_USE_LINK_REGISTER
10139   masm.pushReturnAddress();
10140 #endif
10141   // If lhs is empty, return rhs.
10142   Label leftEmpty;
10143   masm.loadStringLength(lhs, temp1);
10144   masm.branchTest32(Assembler::Zero, temp1, temp1, &leftEmpty);
10145 
10146   // If rhs is empty, return lhs.
10147   Label rightEmpty;
10148   masm.loadStringLength(rhs, temp2);
10149   masm.branchTest32(Assembler::Zero, temp2, temp2, &rightEmpty);
10150 
10151   masm.add32(temp1, temp2);
10152 
10153   // Check if we can use a JSFatInlineString. The result is a Latin1 string if
10154   // lhs and rhs are both Latin1, so we AND the flags.
10155   Label isFatInlineTwoByte, isFatInlineLatin1;
10156   masm.load32(Address(lhs, JSString::offsetOfFlags()), temp1);
10157   masm.and32(Address(rhs, JSString::offsetOfFlags()), temp1);
10158 
10159   Label isLatin1, notInline;
10160   masm.branchTest32(Assembler::NonZero, temp1,
10161                     Imm32(JSString::LATIN1_CHARS_BIT), &isLatin1);
10162   {
10163     masm.branch32(Assembler::BelowOrEqual, temp2,
10164                   Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE),
10165                   &isFatInlineTwoByte);
10166     masm.jump(&notInline);
10167   }
10168   masm.bind(&isLatin1);
10169   {
10170     masm.branch32(Assembler::BelowOrEqual, temp2,
10171                   Imm32(JSFatInlineString::MAX_LENGTH_LATIN1),
10172                   &isFatInlineLatin1);
10173   }
10174   masm.bind(&notInline);
10175 
10176   // Keep AND'ed flags in temp1.
10177 
10178   // Ensure result length <= JSString::MAX_LENGTH.
10179   masm.branch32(Assembler::Above, temp2, Imm32(JSString::MAX_LENGTH), &failure);
10180 
10181   // Allocate a new rope, guaranteed to be in the nursery if
10182   // stringsCanBeInNursery. (As a result, no post barriers are needed below.)
10183   masm.newGCString(output, temp3, &failure, stringsCanBeInNursery);
10184 
10185   // Store rope length and flags. temp1 still holds the result of AND'ing the
10186   // lhs and rhs flags, so we just have to clear the other flags to get our rope
10187   // flags (Latin1 if both lhs and rhs are Latin1).
10188   static_assert(JSString::INIT_ROPE_FLAGS == 0,
10189                 "Rope type flags must have no bits set");
10190   masm.and32(Imm32(JSString::LATIN1_CHARS_BIT), temp1);
10191   masm.store32(temp1, Address(output, JSString::offsetOfFlags()));
10192   masm.store32(temp2, Address(output, JSString::offsetOfLength()));
10193 
10194   // Store left and right nodes.
10195   masm.storeRopeChildren(lhs, rhs, output);
10196   masm.ret();
10197 
10198   masm.bind(&leftEmpty);
10199   masm.mov(rhs, output);
10200   masm.ret();
10201 
10202   masm.bind(&rightEmpty);
10203   masm.mov(lhs, output);
10204   masm.ret();
10205 
10206   masm.bind(&isFatInlineTwoByte);
10207   ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
10208                      stringsCanBeInNursery, &failure, CharEncoding::TwoByte);
10209 
10210   masm.bind(&isFatInlineLatin1);
10211   ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
10212                      stringsCanBeInNursery, &failure, CharEncoding::Latin1);
10213 
10214   masm.pop(temp2);
10215   masm.pop(temp1);
10216 
10217   masm.bind(&failure);
10218   masm.movePtr(ImmPtr(nullptr), output);
10219   masm.ret();
10220 
10221   Linker linker(masm);
10222   JitCode* code = linker.newCode(cx, CodeKind::Other);
10223 
10224 #ifdef JS_ION_PERF
10225   writePerfSpewerJitCodeProfile(code, "StringConcatStub");
10226 #endif
10227 #ifdef MOZ_VTUNE
10228   vtune::MarkStub(code, "StringConcatStub");
10229 #endif
10230 
10231   return code;
10232 }
10233 
generateFreeStub(MacroAssembler & masm)10234 void JitRuntime::generateFreeStub(MacroAssembler& masm) {
10235   const Register regSlots = CallTempReg0;
10236 
10237   freeStubOffset_ = startTrampolineCode(masm);
10238 
10239 #ifdef JS_USE_LINK_REGISTER
10240   masm.pushReturnAddress();
10241 #endif
10242   AllocatableRegisterSet regs(RegisterSet::Volatile());
10243   regs.takeUnchecked(regSlots);
10244   LiveRegisterSet save(regs.asLiveSet());
10245   masm.PushRegsInMask(save);
10246 
10247   const Register regTemp = regs.takeAnyGeneral();
10248   MOZ_ASSERT(regTemp != regSlots);
10249 
10250   using Fn = void (*)(void* p);
10251   masm.setupUnalignedABICall(regTemp);
10252   masm.passABIArg(regSlots);
10253   masm.callWithABI<Fn, js_free>(MoveOp::GENERAL,
10254                                 CheckUnsafeCallWithABI::DontCheckOther);
10255 
10256   masm.PopRegsInMask(save);
10257 
10258   masm.ret();
10259 }
10260 
generateLazyLinkStub(MacroAssembler & masm)10261 void JitRuntime::generateLazyLinkStub(MacroAssembler& masm) {
10262   lazyLinkStubOffset_ = startTrampolineCode(masm);
10263 
10264 #ifdef JS_USE_LINK_REGISTER
10265   masm.pushReturnAddress();
10266 #endif
10267 
10268   AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
10269   Register temp0 = regs.takeAny();
10270   Register temp1 = regs.takeAny();
10271   Register temp2 = regs.takeAny();
10272 
10273   masm.loadJSContext(temp0);
10274   masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::LazyLink);
10275   masm.moveStackPtrTo(temp1);
10276 
10277   using Fn = uint8_t* (*)(JSContext * cx, LazyLinkExitFrameLayout * frame);
10278   masm.setupUnalignedABICall(temp2);
10279   masm.passABIArg(temp0);
10280   masm.passABIArg(temp1);
10281   masm.callWithABI<Fn, LazyLinkTopActivation>(
10282       MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
10283 
10284   masm.leaveExitFrame();
10285 
10286 #ifdef JS_USE_LINK_REGISTER
10287   // Restore the return address such that the emitPrologue function of the
10288   // CodeGenerator can push it back on the stack with pushReturnAddress.
10289   masm.popReturnAddress();
10290 #endif
10291   masm.jump(ReturnReg);
10292 }
10293 
generateInterpreterStub(MacroAssembler & masm)10294 void JitRuntime::generateInterpreterStub(MacroAssembler& masm) {
10295   interpreterStubOffset_ = startTrampolineCode(masm);
10296 
10297 #ifdef JS_USE_LINK_REGISTER
10298   masm.pushReturnAddress();
10299 #endif
10300 
10301   AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
10302   Register temp0 = regs.takeAny();
10303   Register temp1 = regs.takeAny();
10304   Register temp2 = regs.takeAny();
10305 
10306   masm.loadJSContext(temp0);
10307   masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::InterpreterStub);
10308   masm.moveStackPtrTo(temp1);
10309 
10310   using Fn = bool (*)(JSContext * cx, InterpreterStubExitFrameLayout * frame);
10311   masm.setupUnalignedABICall(temp2);
10312   masm.passABIArg(temp0);
10313   masm.passABIArg(temp1);
10314   masm.callWithABI<Fn, InvokeFromInterpreterStub>(
10315       MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
10316 
10317   masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
10318   masm.leaveExitFrame();
10319 
10320   // InvokeFromInterpreterStub stores the return value in argv[0], where the
10321   // caller stored |this|.
10322   masm.loadValue(
10323       Address(masm.getStackPointer(), JitFrameLayout::offsetOfThis()),
10324       JSReturnOperand);
10325   masm.ret();
10326 }
10327 
generateDoubleToInt32ValueStub(MacroAssembler & masm)10328 void JitRuntime::generateDoubleToInt32ValueStub(MacroAssembler& masm) {
10329   doubleToInt32ValueStubOffset_ = startTrampolineCode(masm);
10330 
10331   Label done;
10332   masm.branchTestDouble(Assembler::NotEqual, R0, &done);
10333 
10334   masm.unboxDouble(R0, FloatReg0);
10335   masm.convertDoubleToInt32(FloatReg0, R1.scratchReg(), &done,
10336                             /* negativeZeroCheck = */ false);
10337   masm.tagValue(JSVAL_TYPE_INT32, R1.scratchReg(), R0);
10338 
10339   masm.bind(&done);
10340   masm.abiret();
10341 }
10342 
generateTLEventVM(MacroAssembler & masm,const VMFunctionData & f,bool enter)10343 bool JitRuntime::generateTLEventVM(MacroAssembler& masm,
10344                                    const VMFunctionData& f, bool enter) {
10345 #ifdef JS_TRACE_LOGGING
10346   bool vmEventEnabled = TraceLogTextIdEnabled(TraceLogger_VM);
10347   bool vmSpecificEventEnabled = TraceLogTextIdEnabled(TraceLogger_VMSpecific);
10348 
10349   if (vmEventEnabled || vmSpecificEventEnabled) {
10350     AllocatableRegisterSet regs(RegisterSet::Volatile());
10351     Register loggerReg = regs.takeAnyGeneral();
10352     masm.Push(loggerReg);
10353     masm.loadTraceLogger(loggerReg);
10354 
10355     if (vmEventEnabled) {
10356       if (enter) {
10357         masm.tracelogStartId(loggerReg, TraceLogger_VM, /* force = */ true);
10358       } else {
10359         masm.tracelogStopId(loggerReg, TraceLogger_VM, /* force = */ true);
10360       }
10361     }
10362     if (vmSpecificEventEnabled) {
10363       TraceLoggerEvent event(f.name());
10364       if (!event.hasTextId()) {
10365         return false;
10366       }
10367 
10368       if (enter) {
10369         masm.tracelogStartId(loggerReg, event.textId(), /* force = */ true);
10370       } else {
10371         masm.tracelogStopId(loggerReg, event.textId(), /* force = */ true);
10372       }
10373     }
10374 
10375     masm.Pop(loggerReg);
10376   }
10377 #endif
10378 
10379   return true;
10380 }
10381 
visitCharCodeAt(LCharCodeAt * lir)10382 void CodeGenerator::visitCharCodeAt(LCharCodeAt* lir) {
10383   Register str = ToRegister(lir->str());
10384   Register index = ToRegister(lir->index());
10385   Register output = ToRegister(lir->output());
10386   Register temp = ToRegister(lir->temp());
10387 
10388   using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
10389   OutOfLineCode* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, index),
10390                                                       StoreRegisterTo(output));
10391   masm.loadStringChar(str, index, output, temp, ool->entry());
10392   masm.bind(ool->rejoin());
10393 }
10394 
visitFromCharCode(LFromCharCode * lir)10395 void CodeGenerator::visitFromCharCode(LFromCharCode* lir) {
10396   Register code = ToRegister(lir->code());
10397   Register output = ToRegister(lir->output());
10398 
10399   using Fn = JSLinearString* (*)(JSContext*, int32_t);
10400   OutOfLineCode* ool = oolCallVM<Fn, jit::StringFromCharCode>(
10401       lir, ArgList(code), StoreRegisterTo(output));
10402 
10403   // OOL path if code >= UNIT_STATIC_LIMIT.
10404   masm.boundsCheck32PowerOfTwo(code, StaticStrings::UNIT_STATIC_LIMIT,
10405                                ool->entry());
10406 
10407   masm.movePtr(ImmPtr(&gen->runtime->staticStrings().unitStaticTable), output);
10408   masm.loadPtr(BaseIndex(output, code, ScalePointer), output);
10409 
10410   masm.bind(ool->rejoin());
10411 }
10412 
visitFromCodePoint(LFromCodePoint * lir)10413 void CodeGenerator::visitFromCodePoint(LFromCodePoint* lir) {
10414   Register codePoint = ToRegister(lir->codePoint());
10415   Register output = ToRegister(lir->output());
10416   Register temp1 = ToRegister(lir->temp1());
10417   Register temp2 = ToRegister(lir->temp2());
10418   LSnapshot* snapshot = lir->snapshot();
10419 
10420   // The OOL path is only taken when we can't allocate the inline string.
10421   using Fn = JSString* (*)(JSContext*, int32_t);
10422   OutOfLineCode* ool = oolCallVM<Fn, jit::StringFromCodePoint>(
10423       lir, ArgList(codePoint), StoreRegisterTo(output));
10424 
10425   Label isTwoByte;
10426   Label* done = ool->rejoin();
10427 
10428   static_assert(
10429       StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
10430       "Latin-1 strings can be loaded from static strings");
10431   masm.boundsCheck32PowerOfTwo(codePoint, StaticStrings::UNIT_STATIC_LIMIT,
10432                                &isTwoByte);
10433   {
10434     masm.movePtr(ImmPtr(&gen->runtime->staticStrings().unitStaticTable),
10435                  output);
10436     masm.loadPtr(BaseIndex(output, codePoint, ScalePointer), output);
10437     masm.jump(done);
10438   }
10439   masm.bind(&isTwoByte);
10440   {
10441     // Use a bailout if the input is not a valid code point, because
10442     // MFromCodePoint is movable and it'd be observable when a moved
10443     // fromCodePoint throws an exception before its actual call site.
10444     bailoutCmp32(Assembler::Above, codePoint, Imm32(unicode::NonBMPMax),
10445                  snapshot);
10446 
10447     // Allocate a JSThinInlineString.
10448     {
10449       static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE >= 2,
10450                     "JSThinInlineString can hold a supplementary code point");
10451 
10452       uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
10453       masm.newGCString(output, temp1, ool->entry(),
10454                        gen->stringsCanBeInNursery());
10455       masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
10456     }
10457 
10458     Label isSupplementary;
10459     masm.branch32(Assembler::AboveOrEqual, codePoint, Imm32(unicode::NonBMPMin),
10460                   &isSupplementary);
10461     {
10462       // Store length.
10463       masm.store32(Imm32(1), Address(output, JSString::offsetOfLength()));
10464 
10465       // Load chars pointer in temp1.
10466       masm.loadInlineStringCharsForStore(output, temp1);
10467 
10468       masm.store16(codePoint, Address(temp1, 0));
10469 
10470       masm.jump(done);
10471     }
10472     masm.bind(&isSupplementary);
10473     {
10474       // Store length.
10475       masm.store32(Imm32(2), Address(output, JSString::offsetOfLength()));
10476 
10477       // Load chars pointer in temp1.
10478       masm.loadInlineStringCharsForStore(output, temp1);
10479 
10480       // Inlined unicode::LeadSurrogate(uint32_t).
10481       masm.move32(codePoint, temp2);
10482       masm.rshift32(Imm32(10), temp2);
10483       masm.add32(Imm32(unicode::LeadSurrogateMin - (unicode::NonBMPMin >> 10)),
10484                  temp2);
10485 
10486       masm.store16(temp2, Address(temp1, 0));
10487 
10488       // Inlined unicode::TrailSurrogate(uint32_t).
10489       masm.move32(codePoint, temp2);
10490       masm.and32(Imm32(0x3FF), temp2);
10491       masm.or32(Imm32(unicode::TrailSurrogateMin), temp2);
10492 
10493       masm.store16(temp2, Address(temp1, sizeof(char16_t)));
10494     }
10495   }
10496 
10497   masm.bind(done);
10498 }
10499 
visitStringConvertCase(LStringConvertCase * lir)10500 void CodeGenerator::visitStringConvertCase(LStringConvertCase* lir) {
10501   pushArg(ToRegister(lir->string()));
10502 
10503   using Fn = JSString* (*)(JSContext*, HandleString);
10504   if (lir->mir()->mode() == MStringConvertCase::LowerCase) {
10505     callVM<Fn, js::StringToLowerCase>(lir);
10506   } else {
10507     callVM<Fn, js::StringToUpperCase>(lir);
10508   }
10509 }
10510 
visitStringSplit(LStringSplit * lir)10511 void CodeGenerator::visitStringSplit(LStringSplit* lir) {
10512   pushArg(Imm32(INT32_MAX));
10513   pushArg(ToRegister(lir->separator()));
10514   pushArg(ToRegister(lir->string()));
10515 
10516   using Fn = ArrayObject* (*)(JSContext*, HandleString, HandleString, uint32_t);
10517   callVM<Fn, js::StringSplitString>(lir);
10518 }
10519 
visitInitializedLength(LInitializedLength * lir)10520 void CodeGenerator::visitInitializedLength(LInitializedLength* lir) {
10521   Address initLength(ToRegister(lir->elements()),
10522                      ObjectElements::offsetOfInitializedLength());
10523   masm.load32(initLength, ToRegister(lir->output()));
10524 }
10525 
visitSetInitializedLength(LSetInitializedLength * lir)10526 void CodeGenerator::visitSetInitializedLength(LSetInitializedLength* lir) {
10527   Address initLength(ToRegister(lir->elements()),
10528                      ObjectElements::offsetOfInitializedLength());
10529   SetLengthFromIndex(masm, lir->index(), initLength);
10530 }
10531 
visitNotBI(LNotBI * lir)10532 void CodeGenerator::visitNotBI(LNotBI* lir) {
10533   Register input = ToRegister(lir->input());
10534   Register output = ToRegister(lir->output());
10535 
10536   masm.cmp32Set(Assembler::Equal, Address(input, BigInt::offsetOfLength()),
10537                 Imm32(0), output);
10538 }
10539 
visitNotO(LNotO * lir)10540 void CodeGenerator::visitNotO(LNotO* lir) {
10541   auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
10542   addOutOfLineCode(ool, lir->mir());
10543 
10544   Label* ifEmulatesUndefined = ool->label1();
10545   Label* ifDoesntEmulateUndefined = ool->label2();
10546 
10547   Register objreg = ToRegister(lir->input());
10548   Register output = ToRegister(lir->output());
10549   branchTestObjectEmulatesUndefined(objreg, ifEmulatesUndefined,
10550                                     ifDoesntEmulateUndefined, output, ool);
10551   // fall through
10552 
10553   Label join;
10554 
10555   masm.move32(Imm32(0), output);
10556   masm.jump(&join);
10557 
10558   masm.bind(ifEmulatesUndefined);
10559   masm.move32(Imm32(1), output);
10560 
10561   masm.bind(&join);
10562 }
10563 
visitNotV(LNotV * lir)10564 void CodeGenerator::visitNotV(LNotV* lir) {
10565   auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
10566   addOutOfLineCode(ool, lir->mir());
10567 
10568   Label* ifTruthy = ool->label1();
10569   Label* ifFalsy = ool->label2();
10570 
10571   TypeDataList observed = lir->mir()->observedTypes();
10572   testValueTruthyKernel(ToValue(lir, LNotV::Input), lir->temp1(), lir->temp2(),
10573                         ToFloatRegister(lir->tempFloat()), observed, ifTruthy,
10574                         ifFalsy, ool);
10575 
10576   Label join;
10577   Register output = ToRegister(lir->output());
10578 
10579   // Note that the testValueTruthyKernel call above may choose to fall through
10580   // to ifTruthy instead of branching there.
10581   masm.bind(ifTruthy);
10582   masm.move32(Imm32(0), output);
10583   masm.jump(&join);
10584 
10585   masm.bind(ifFalsy);
10586   masm.move32(Imm32(1), output);
10587 
10588   // both branches meet here.
10589   masm.bind(&join);
10590 }
10591 
visitBoundsCheck(LBoundsCheck * lir)10592 void CodeGenerator::visitBoundsCheck(LBoundsCheck* lir) {
10593   const LAllocation* index = lir->index();
10594   const LAllocation* length = lir->length();
10595   LSnapshot* snapshot = lir->snapshot();
10596 
10597   MIRType type = lir->mir()->type();
10598 
10599   auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
10600     if (type == MIRType::Int32) {
10601       bailoutCmp32(cond, lhs, rhs, snapshot);
10602     } else {
10603       MOZ_ASSERT(type == MIRType::IntPtr);
10604       bailoutCmpPtr(cond, lhs, rhs, snapshot);
10605     }
10606   };
10607 
10608   auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
10609                                 int32_t rhs) {
10610     if (type == MIRType::Int32) {
10611       bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
10612     } else {
10613       MOZ_ASSERT(type == MIRType::IntPtr);
10614       bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
10615     }
10616   };
10617 
10618   if (index->isConstant()) {
10619     // Use uint32 so that the comparison is unsigned.
10620     uint32_t idx = ToInt32(index);
10621     if (length->isConstant()) {
10622       uint32_t len = ToInt32(lir->length());
10623       if (idx < len) {
10624         return;
10625       }
10626       bailout(snapshot);
10627       return;
10628     }
10629 
10630     if (length->isRegister()) {
10631       bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), idx);
10632     } else {
10633       bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), idx);
10634     }
10635     return;
10636   }
10637 
10638   Register indexReg = ToRegister(index);
10639   if (length->isConstant()) {
10640     bailoutCmpConstant(Assembler::AboveOrEqual, indexReg, ToInt32(length));
10641   } else if (length->isRegister()) {
10642     bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), indexReg);
10643   } else {
10644     bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), indexReg);
10645   }
10646 }
10647 
visitBoundsCheckRange(LBoundsCheckRange * lir)10648 void CodeGenerator::visitBoundsCheckRange(LBoundsCheckRange* lir) {
10649   int32_t min = lir->mir()->minimum();
10650   int32_t max = lir->mir()->maximum();
10651   MOZ_ASSERT(max >= min);
10652 
10653   LSnapshot* snapshot = lir->snapshot();
10654   MIRType type = lir->mir()->type();
10655 
10656   const LAllocation* length = lir->length();
10657   Register temp = ToRegister(lir->getTemp(0));
10658 
10659   auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
10660     if (type == MIRType::Int32) {
10661       bailoutCmp32(cond, lhs, rhs, snapshot);
10662     } else {
10663       MOZ_ASSERT(type == MIRType::IntPtr);
10664       bailoutCmpPtr(cond, lhs, rhs, snapshot);
10665     }
10666   };
10667 
10668   auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
10669                                 int32_t rhs) {
10670     if (type == MIRType::Int32) {
10671       bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
10672     } else {
10673       MOZ_ASSERT(type == MIRType::IntPtr);
10674       bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
10675     }
10676   };
10677 
10678   if (lir->index()->isConstant()) {
10679     int32_t nmin, nmax;
10680     int32_t index = ToInt32(lir->index());
10681     if (SafeAdd(index, min, &nmin) && SafeAdd(index, max, &nmax) && nmin >= 0) {
10682       if (length->isRegister()) {
10683         bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), nmax);
10684       } else {
10685         bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), nmax);
10686       }
10687       return;
10688     }
10689     masm.mov(ImmWord(index), temp);
10690   } else {
10691     masm.mov(ToRegister(lir->index()), temp);
10692   }
10693 
10694   // If the minimum and maximum differ then do an underflow check first.
10695   // If the two are the same then doing an unsigned comparison on the
10696   // length will also catch a negative index.
10697   if (min != max) {
10698     if (min != 0) {
10699       Label bail;
10700       if (type == MIRType::Int32) {
10701         masm.branchAdd32(Assembler::Overflow, Imm32(min), temp, &bail);
10702       } else {
10703         masm.branchAddPtr(Assembler::Overflow, Imm32(min), temp, &bail);
10704       }
10705       bailoutFrom(&bail, snapshot);
10706     }
10707 
10708     bailoutCmpConstant(Assembler::LessThan, temp, 0);
10709 
10710     if (min != 0) {
10711       int32_t diff;
10712       if (SafeSub(max, min, &diff)) {
10713         max = diff;
10714       } else {
10715         if (type == MIRType::Int32) {
10716           masm.sub32(Imm32(min), temp);
10717         } else {
10718           masm.subPtr(Imm32(min), temp);
10719         }
10720       }
10721     }
10722   }
10723 
10724   // Compute the maximum possible index. No overflow check is needed when
10725   // max > 0. We can only wraparound to a negative number, which will test as
10726   // larger than all nonnegative numbers in the unsigned comparison, and the
10727   // length is required to be nonnegative (else testing a negative length
10728   // would succeed on any nonnegative index).
10729   if (max != 0) {
10730     if (max < 0) {
10731       Label bail;
10732       if (type == MIRType::Int32) {
10733         masm.branchAdd32(Assembler::Overflow, Imm32(max), temp, &bail);
10734       } else {
10735         masm.branchAddPtr(Assembler::Overflow, Imm32(max), temp, &bail);
10736       }
10737       bailoutFrom(&bail, snapshot);
10738     } else {
10739       if (type == MIRType::Int32) {
10740         masm.add32(Imm32(max), temp);
10741       } else {
10742         masm.addPtr(Imm32(max), temp);
10743       }
10744     }
10745   }
10746 
10747   if (length->isRegister()) {
10748     bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), temp);
10749   } else {
10750     bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), temp);
10751   }
10752 }
10753 
visitBoundsCheckLower(LBoundsCheckLower * lir)10754 void CodeGenerator::visitBoundsCheckLower(LBoundsCheckLower* lir) {
10755   int32_t min = lir->mir()->minimum();
10756   bailoutCmp32(Assembler::LessThan, ToRegister(lir->index()), Imm32(min),
10757                lir->snapshot());
10758 }
10759 
visitSpectreMaskIndex(LSpectreMaskIndex * lir)10760 void CodeGenerator::visitSpectreMaskIndex(LSpectreMaskIndex* lir) {
10761   MOZ_ASSERT(JitOptions.spectreIndexMasking);
10762 
10763   const LAllocation* length = lir->length();
10764   Register index = ToRegister(lir->index());
10765   Register output = ToRegister(lir->output());
10766 
10767   if (lir->mir()->type() == MIRType::Int32) {
10768     if (length->isRegister()) {
10769       masm.spectreMaskIndex32(index, ToRegister(length), output);
10770     } else {
10771       masm.spectreMaskIndex32(index, ToAddress(length), output);
10772     }
10773   } else {
10774     MOZ_ASSERT(lir->mir()->type() == MIRType::IntPtr);
10775     if (length->isRegister()) {
10776       masm.spectreMaskIndexPtr(index, ToRegister(length), output);
10777     } else {
10778       masm.spectreMaskIndexPtr(index, ToAddress(length), output);
10779     }
10780   }
10781 }
10782 
10783 class OutOfLineStoreElementHole : public OutOfLineCodeBase<CodeGenerator> {
10784   LInstruction* ins_;
10785   Label rejoinStore_;
10786   Label callStub_;
10787   bool strict_;
10788 
10789  public:
OutOfLineStoreElementHole(LInstruction * ins,bool strict)10790   explicit OutOfLineStoreElementHole(LInstruction* ins, bool strict)
10791       : ins_(ins), strict_(strict) {
10792     MOZ_ASSERT(ins->isStoreElementHoleV() || ins->isStoreElementHoleT());
10793   }
10794 
accept(CodeGenerator * codegen)10795   void accept(CodeGenerator* codegen) override {
10796     codegen->visitOutOfLineStoreElementHole(this);
10797   }
ins() const10798   LInstruction* ins() const { return ins_; }
rejoinStore()10799   Label* rejoinStore() { return &rejoinStore_; }
callStub()10800   Label* callStub() { return &callStub_; }
strict() const10801   bool strict() const { return strict_; }
10802 };
10803 
emitStoreHoleCheck(Register elements,const LAllocation * index,LSnapshot * snapshot)10804 void CodeGenerator::emitStoreHoleCheck(Register elements,
10805                                        const LAllocation* index,
10806                                        LSnapshot* snapshot) {
10807   Label bail;
10808   if (index->isConstant()) {
10809     Address dest(elements, ToInt32(index) * sizeof(js::Value));
10810     masm.branchTestMagic(Assembler::Equal, dest, &bail);
10811   } else {
10812     BaseObjectElementIndex dest(elements, ToRegister(index));
10813     masm.branchTestMagic(Assembler::Equal, dest, &bail);
10814   }
10815   bailoutFrom(&bail, snapshot);
10816 }
10817 
emitStoreElementTyped(const LAllocation * value,MIRType valueType,MIRType elementType,Register elements,const LAllocation * index)10818 void CodeGenerator::emitStoreElementTyped(const LAllocation* value,
10819                                           MIRType valueType,
10820                                           MIRType elementType,
10821                                           Register elements,
10822                                           const LAllocation* index) {
10823   MOZ_ASSERT(valueType != MIRType::MagicHole);
10824   ConstantOrRegister v = ToConstantOrRegister(value, valueType);
10825   if (index->isConstant()) {
10826     Address dest(elements, ToInt32(index) * sizeof(js::Value));
10827     masm.storeUnboxedValue(v, valueType, dest, elementType);
10828   } else {
10829     BaseObjectElementIndex dest(elements, ToRegister(index));
10830     masm.storeUnboxedValue(v, valueType, dest, elementType);
10831   }
10832 }
10833 
visitStoreElementT(LStoreElementT * store)10834 void CodeGenerator::visitStoreElementT(LStoreElementT* store) {
10835   Register elements = ToRegister(store->elements());
10836   const LAllocation* index = store->index();
10837 
10838   if (store->mir()->needsBarrier()) {
10839     emitPreBarrier(elements, index);
10840   }
10841 
10842   if (store->mir()->needsHoleCheck()) {
10843     emitStoreHoleCheck(elements, index, store->snapshot());
10844   }
10845 
10846   emitStoreElementTyped(store->value(), store->mir()->value()->type(),
10847                         store->mir()->elementType(), elements, index);
10848 }
10849 
visitStoreElementV(LStoreElementV * lir)10850 void CodeGenerator::visitStoreElementV(LStoreElementV* lir) {
10851   const ValueOperand value = ToValue(lir, LStoreElementV::Value);
10852   Register elements = ToRegister(lir->elements());
10853   const LAllocation* index = lir->index();
10854 
10855   if (lir->mir()->needsBarrier()) {
10856     emitPreBarrier(elements, index);
10857   }
10858 
10859   if (lir->mir()->needsHoleCheck()) {
10860     emitStoreHoleCheck(elements, index, lir->snapshot());
10861   }
10862 
10863   if (lir->index()->isConstant()) {
10864     Address dest(elements, ToInt32(lir->index()) * sizeof(js::Value));
10865     masm.storeValue(value, dest);
10866   } else {
10867     BaseObjectElementIndex dest(elements, ToRegister(lir->index()));
10868     masm.storeValue(value, dest);
10869   }
10870 }
10871 
visitStoreHoleValueElement(LStoreHoleValueElement * lir)10872 void CodeGenerator::visitStoreHoleValueElement(LStoreHoleValueElement* lir) {
10873   Register elements = ToRegister(lir->elements());
10874   Register index = ToRegister(lir->index());
10875 
10876   Address elementsFlags(elements, ObjectElements::offsetOfFlags());
10877   masm.or32(Imm32(ObjectElements::NON_PACKED), elementsFlags);
10878 
10879   BaseObjectElementIndex element(elements, index);
10880   masm.storeValue(MagicValue(JS_ELEMENTS_HOLE), element);
10881 }
10882 
visitStoreElementHoleT(LStoreElementHoleT * lir)10883 void CodeGenerator::visitStoreElementHoleT(LStoreElementHoleT* lir) {
10884   OutOfLineStoreElementHole* ool =
10885       new (alloc()) OutOfLineStoreElementHole(lir, current->mir()->strict());
10886   addOutOfLineCode(ool, lir->mir());
10887 
10888   Register elements = ToRegister(lir->elements());
10889   Register index = ToRegister(lir->index());
10890   Register spectreTemp = ToTempRegisterOrInvalid(lir->spectreTemp());
10891 
10892   Address initLength(elements, ObjectElements::offsetOfInitializedLength());
10893   masm.spectreBoundsCheck32(index, initLength, spectreTemp, ool->entry());
10894 
10895   if (lir->mir()->needsBarrier()) {
10896     emitPreBarrier(elements, lir->index());
10897   }
10898 
10899   masm.bind(ool->rejoinStore());
10900   emitStoreElementTyped(lir->value(), lir->mir()->value()->type(),
10901                         lir->mir()->elementType(), elements, lir->index());
10902 
10903   masm.bind(ool->rejoin());
10904 }
10905 
visitStoreElementHoleV(LStoreElementHoleV * lir)10906 void CodeGenerator::visitStoreElementHoleV(LStoreElementHoleV* lir) {
10907   OutOfLineStoreElementHole* ool =
10908       new (alloc()) OutOfLineStoreElementHole(lir, current->mir()->strict());
10909   addOutOfLineCode(ool, lir->mir());
10910 
10911   Register elements = ToRegister(lir->elements());
10912   Register index = ToRegister(lir->index());
10913   const ValueOperand value = ToValue(lir, LStoreElementHoleV::Value);
10914   Register spectreTemp = ToTempRegisterOrInvalid(lir->spectreTemp());
10915 
10916   Address initLength(elements, ObjectElements::offsetOfInitializedLength());
10917   masm.spectreBoundsCheck32(index, initLength, spectreTemp, ool->entry());
10918 
10919   if (lir->mir()->needsBarrier()) {
10920     emitPreBarrier(elements, lir->index());
10921   }
10922 
10923   masm.bind(ool->rejoinStore());
10924   masm.storeValue(value, BaseObjectElementIndex(elements, index));
10925 
10926   masm.bind(ool->rejoin());
10927 }
10928 
visitOutOfLineStoreElementHole(OutOfLineStoreElementHole * ool)10929 void CodeGenerator::visitOutOfLineStoreElementHole(
10930     OutOfLineStoreElementHole* ool) {
10931   Register object, elements;
10932   LInstruction* ins = ool->ins();
10933   const LAllocation* index;
10934   MIRType valueType;
10935   mozilla::Maybe<ConstantOrRegister> value;
10936   Register spectreTemp;
10937 
10938   if (ins->isStoreElementHoleV()) {
10939     LStoreElementHoleV* store = ins->toStoreElementHoleV();
10940     object = ToRegister(store->object());
10941     elements = ToRegister(store->elements());
10942     index = store->index();
10943     valueType = store->mir()->value()->type();
10944     value.emplace(
10945         TypedOrValueRegister(ToValue(store, LStoreElementHoleV::Value)));
10946     spectreTemp = ToTempRegisterOrInvalid(store->spectreTemp());
10947   } else {
10948     LStoreElementHoleT* store = ins->toStoreElementHoleT();
10949     object = ToRegister(store->object());
10950     elements = ToRegister(store->elements());
10951     index = store->index();
10952     valueType = store->mir()->value()->type();
10953     if (store->value()->isConstant()) {
10954       value.emplace(
10955           ConstantOrRegister(store->value()->toConstant()->toJSValue()));
10956     } else {
10957       value.emplace(
10958           TypedOrValueRegister(valueType, ToAnyRegister(store->value())));
10959     }
10960     spectreTemp = ToTempRegisterOrInvalid(store->spectreTemp());
10961   }
10962 
10963   Register indexReg = ToRegister(index);
10964 
10965   // If index == initializedLength, try to bump the initialized length inline.
10966   // If index > initializedLength, call a stub. Note that this relies on the
10967   // condition flags sticking from the incoming branch.
10968   // Also note: this branch does not need Spectre mitigations, doing that for
10969   // the capacity check below is sufficient.
10970 #if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
10971   // Had to reimplement for MIPS because there are no flags.
10972   Address initLength(elements, ObjectElements::offsetOfInitializedLength());
10973   masm.branch32(Assembler::NotEqual, initLength, indexReg, ool->callStub());
10974 #else
10975   masm.j(Assembler::NotEqual, ool->callStub());
10976 #endif
10977 
10978   // Check array capacity.
10979   masm.spectreBoundsCheck32(
10980       indexReg, Address(elements, ObjectElements::offsetOfCapacity()),
10981       spectreTemp, ool->callStub());
10982 
10983   // Update initialized length. The capacity guard above ensures this won't
10984   // overflow, due to MAX_DENSE_ELEMENTS_COUNT.
10985   masm.add32(Imm32(1), indexReg);
10986   masm.store32(indexReg,
10987                Address(elements, ObjectElements::offsetOfInitializedLength()));
10988 
10989   // Update length if length < initializedLength.
10990   Label dontUpdate;
10991   masm.branch32(Assembler::AboveOrEqual,
10992                 Address(elements, ObjectElements::offsetOfLength()), indexReg,
10993                 &dontUpdate);
10994   masm.store32(indexReg, Address(elements, ObjectElements::offsetOfLength()));
10995   masm.bind(&dontUpdate);
10996 
10997   masm.sub32(Imm32(1), indexReg);
10998 
10999   if (ins->isStoreElementHoleT() && valueType != MIRType::Double) {
11000     // The inline path for StoreElementHoleT does not always store the type tag,
11001     // so we do the store on the OOL path. We use MIRType::None for the element
11002     // type so that emitStoreElementTyped will always store the type tag.
11003     emitStoreElementTyped(ins->toStoreElementHoleT()->value(), valueType,
11004                           MIRType::None, elements, index);
11005     masm.jump(ool->rejoin());
11006   } else {
11007     // Jump to the inline path where we will store the value.
11008     masm.jump(ool->rejoinStore());
11009   }
11010 
11011   masm.bind(ool->callStub());
11012   saveLive(ins);
11013 
11014   pushArg(Imm32(ool->strict()));
11015   pushArg(value.ref());
11016   if (index->isConstant()) {
11017     pushArg(Imm32(ToInt32(index)));
11018   } else {
11019     pushArg(ToRegister(index));
11020   }
11021   pushArg(object);
11022 
11023   using Fn = bool (*)(JSContext*, HandleNativeObject, int32_t, HandleValue,
11024                       bool strict);
11025   callVM<Fn, jit::SetDenseElement>(ins);
11026 
11027   restoreLive(ins);
11028   masm.jump(ool->rejoin());
11029 }
11030 
visitArrayPopShift(LArrayPopShift * lir)11031 void CodeGenerator::visitArrayPopShift(LArrayPopShift* lir) {
11032   Register obj = ToRegister(lir->object());
11033   Register temp1 = ToRegister(lir->temp0());
11034   Register temp2 = ToRegister(lir->temp1());
11035   ValueOperand out = ToOutValue(lir);
11036 
11037   Label bail;
11038   if (lir->mir()->mode() == MArrayPopShift::Pop) {
11039     masm.packedArrayPop(obj, out, temp1, temp2, &bail);
11040   } else {
11041     MOZ_ASSERT(lir->mir()->mode() == MArrayPopShift::Shift);
11042     LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
11043     masm.packedArrayShift(obj, out, temp1, temp2, volatileRegs, &bail);
11044   }
11045   bailoutFrom(&bail, lir->snapshot());
11046 }
11047 
visitArrayPush(LArrayPush * lir)11048 void CodeGenerator::visitArrayPush(LArrayPush* lir) {
11049   Register obj = ToRegister(lir->object());
11050   Register elementsTemp = ToRegister(lir->temp());
11051   Register length = ToRegister(lir->output());
11052   ValueOperand value = ToValue(lir, LArrayPush::Value);
11053   Register spectreTemp = ToTempRegisterOrInvalid(lir->spectreTemp());
11054 
11055   using Fn = bool (*)(JSContext*, HandleArrayObject, HandleValue, uint32_t*);
11056   OutOfLineCode* ool = oolCallVM<Fn, jit::ArrayPushDense>(
11057       lir, ArgList(obj, value), StoreRegisterTo(length));
11058 
11059   // Load elements and length.
11060   masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), elementsTemp);
11061   masm.load32(Address(elementsTemp, ObjectElements::offsetOfLength()), length);
11062 
11063   // TODO(post-Warp): reuse/share the CacheIR implementation when IonBuilder and
11064   // TI are gone (bug 1654180).
11065 
11066   // Bailout if the incremented length does not fit in int32.
11067   bailoutCmp32(Assembler::AboveOrEqual, length, Imm32(INT32_MAX),
11068                lir->snapshot());
11069 
11070   // Guard length == initializedLength.
11071   Address initLength(elementsTemp, ObjectElements::offsetOfInitializedLength());
11072   masm.branch32(Assembler::NotEqual, initLength, length, ool->entry());
11073 
11074   // Guard length < capacity.
11075   Address capacity(elementsTemp, ObjectElements::offsetOfCapacity());
11076   masm.spectreBoundsCheck32(length, capacity, spectreTemp, ool->entry());
11077 
11078   // Do the store.
11079   masm.storeValue(value, BaseObjectElementIndex(elementsTemp, length));
11080 
11081   masm.add32(Imm32(1), length);
11082 
11083   // Update length and initialized length.
11084   masm.store32(length, Address(elementsTemp, ObjectElements::offsetOfLength()));
11085   masm.store32(length, Address(elementsTemp,
11086                                ObjectElements::offsetOfInitializedLength()));
11087 
11088   masm.bind(ool->rejoin());
11089 }
11090 
visitArraySlice(LArraySlice * lir)11091 void CodeGenerator::visitArraySlice(LArraySlice* lir) {
11092   Register object = ToRegister(lir->object());
11093   Register begin = ToRegister(lir->begin());
11094   Register end = ToRegister(lir->end());
11095   Register temp1 = ToRegister(lir->temp1());
11096   Register temp2 = ToRegister(lir->temp2());
11097 
11098   Label call, fail;
11099 
11100   Label bail;
11101   masm.branchArrayIsNotPacked(object, temp1, temp2, &bail);
11102   bailoutFrom(&bail, lir->snapshot());
11103 
11104   // Try to allocate an object.
11105   TemplateObject templateObject(lir->mir()->templateObj());
11106   masm.createGCObject(temp1, temp2, templateObject, lir->mir()->initialHeap(),
11107                       &fail);
11108 
11109   masm.jump(&call);
11110   {
11111     masm.bind(&fail);
11112     masm.movePtr(ImmPtr(nullptr), temp1);
11113   }
11114   masm.bind(&call);
11115 
11116   pushArg(temp1);
11117   pushArg(end);
11118   pushArg(begin);
11119   pushArg(object);
11120 
11121   using Fn =
11122       JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
11123   callVM<Fn, ArraySliceDense>(lir);
11124 }
11125 
visitArrayJoin(LArrayJoin * lir)11126 void CodeGenerator::visitArrayJoin(LArrayJoin* lir) {
11127   Label skipCall;
11128 
11129   Register output = ToRegister(lir->output());
11130   Register sep = ToRegister(lir->separator());
11131   Register array = ToRegister(lir->array());
11132   Register temp = ToRegister(lir->temp());
11133 
11134   // Fast path for simple length <= 1 cases.
11135   {
11136     masm.loadPtr(Address(array, NativeObject::offsetOfElements()), temp);
11137     Address length(temp, ObjectElements::offsetOfLength());
11138     Address initLength(temp, ObjectElements::offsetOfInitializedLength());
11139 
11140     // Check for length == 0
11141     Label notEmpty;
11142     masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmpty);
11143     const JSAtomState& names = GetJitContext()->runtime->names();
11144     masm.movePtr(ImmGCPtr(names.empty), output);
11145     masm.jump(&skipCall);
11146 
11147     masm.bind(&notEmpty);
11148     Label notSingleString;
11149     // Check for length == 1, initializedLength >= 1, arr[0].isString()
11150     masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleString);
11151     masm.branch32(Assembler::LessThan, initLength, Imm32(1), &notSingleString);
11152 
11153     Address elem0(temp, 0);
11154     masm.branchTestString(Assembler::NotEqual, elem0, &notSingleString);
11155 
11156     // At this point, 'output' can be used as a scratch register, since we're
11157     // guaranteed to succeed.
11158     masm.unboxString(elem0, output);
11159     masm.jump(&skipCall);
11160     masm.bind(&notSingleString);
11161   }
11162 
11163   pushArg(sep);
11164   pushArg(array);
11165 
11166   using Fn = JSString* (*)(JSContext*, HandleObject, HandleString);
11167   callVM<Fn, jit::ArrayJoin>(lir);
11168   masm.bind(&skipCall);
11169 }
11170 
visitGetIteratorCache(LGetIteratorCache * lir)11171 void CodeGenerator::visitGetIteratorCache(LGetIteratorCache* lir) {
11172   LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
11173   TypedOrValueRegister val = toConstantOrRegister(lir, LGetIteratorCache::Value,
11174                                                   lir->mir()->value()->type())
11175                                  .reg();
11176   Register output = ToRegister(lir->output());
11177   Register temp1 = ToRegister(lir->temp1());
11178   Register temp2 = ToRegister(lir->temp2());
11179 
11180   IonGetIteratorIC ic(liveRegs, val, output, temp1, temp2);
11181   addIC(lir, allocateIC(ic));
11182 }
11183 
visitOptimizeSpreadCallCache(LOptimizeSpreadCallCache * lir)11184 void CodeGenerator::visitOptimizeSpreadCallCache(
11185     LOptimizeSpreadCallCache* lir) {
11186   LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
11187   ValueOperand val = ToValue(lir, LOptimizeSpreadCallCache::Value);
11188   Register output = ToRegister(lir->output());
11189   Register temp = ToRegister(lir->temp());
11190 
11191   IonOptimizeSpreadCallIC ic(liveRegs, val, output, temp);
11192   addIC(lir, allocateIC(ic));
11193 }
11194 
visitIteratorMore(LIteratorMore * lir)11195 void CodeGenerator::visitIteratorMore(LIteratorMore* lir) {
11196   const Register obj = ToRegister(lir->object());
11197   const ValueOperand output = ToOutValue(lir);
11198   const Register temp = ToRegister(lir->temp());
11199 
11200   masm.iteratorMore(obj, output, temp);
11201 }
11202 
visitIsNoIterAndBranch(LIsNoIterAndBranch * lir)11203 void CodeGenerator::visitIsNoIterAndBranch(LIsNoIterAndBranch* lir) {
11204   ValueOperand input = ToValue(lir, LIsNoIterAndBranch::Input);
11205   Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
11206   Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
11207 
11208   masm.branchTestMagic(Assembler::Equal, input, ifTrue);
11209 
11210   if (!isNextBlock(lir->ifFalse()->lir())) {
11211     masm.jump(ifFalse);
11212   }
11213 }
11214 
visitIteratorEnd(LIteratorEnd * lir)11215 void CodeGenerator::visitIteratorEnd(LIteratorEnd* lir) {
11216   const Register obj = ToRegister(lir->object());
11217   const Register temp1 = ToRegister(lir->temp1());
11218   const Register temp2 = ToRegister(lir->temp2());
11219   const Register temp3 = ToRegister(lir->temp3());
11220 
11221   masm.iteratorClose(obj, temp1, temp2, temp3);
11222 }
11223 
visitArgumentsLength(LArgumentsLength * lir)11224 void CodeGenerator::visitArgumentsLength(LArgumentsLength* lir) {
11225   // read number of actual arguments from the JS frame.
11226   Register argc = ToRegister(lir->output());
11227   Address ptr(masm.getStackPointer(),
11228               frameSize() + JitFrameLayout::offsetOfNumActualArgs());
11229 
11230   masm.loadPtr(ptr, argc);
11231 }
11232 
visitGetFrameArgument(LGetFrameArgument * lir)11233 void CodeGenerator::visitGetFrameArgument(LGetFrameArgument* lir) {
11234   ValueOperand result = ToOutValue(lir);
11235   const LAllocation* index = lir->index();
11236   size_t argvOffset = frameSize() + JitFrameLayout::offsetOfActualArgs();
11237 
11238   if (index->isConstant()) {
11239     int32_t i = index->toConstant()->toInt32();
11240     Address argPtr(masm.getStackPointer(), sizeof(Value) * i + argvOffset);
11241     masm.loadValue(argPtr, result);
11242   } else {
11243     Register i = ToRegister(index);
11244     BaseValueIndex argPtr(masm.getStackPointer(), i, argvOffset);
11245     masm.loadValue(argPtr, result);
11246   }
11247 }
11248 
emitRest(LInstruction * lir,Register array,Register numActuals,Register temp0,Register temp1,unsigned numFormals,Register resultreg)11249 void CodeGenerator::emitRest(LInstruction* lir, Register array,
11250                              Register numActuals, Register temp0,
11251                              Register temp1, unsigned numFormals,
11252                              Register resultreg) {
11253   // Compute actuals() + numFormals.
11254   size_t actualsOffset = frameSize() + JitFrameLayout::offsetOfActualArgs();
11255   masm.moveStackPtrTo(temp1);
11256   masm.addPtr(Imm32(sizeof(Value) * numFormals + actualsOffset), temp1);
11257 
11258   // Compute numActuals - numFormals.
11259   Label emptyLength, joinLength;
11260   masm.movePtr(numActuals, temp0);
11261   masm.branch32(Assembler::LessThanOrEqual, temp0, Imm32(numFormals),
11262                 &emptyLength);
11263   masm.sub32(Imm32(numFormals), temp0);
11264   masm.jump(&joinLength);
11265   {
11266     masm.bind(&emptyLength);
11267     masm.move32(Imm32(0), temp0);
11268   }
11269   masm.bind(&joinLength);
11270 
11271   pushArg(array);
11272   pushArg(temp1);
11273   pushArg(temp0);
11274 
11275   using Fn = JSObject* (*)(JSContext*, uint32_t, Value*, HandleObject);
11276   callVM<Fn, InitRestParameter>(lir);
11277 }
11278 
visitRest(LRest * lir)11279 void CodeGenerator::visitRest(LRest* lir) {
11280   Register numActuals = ToRegister(lir->numActuals());
11281   Register temp0 = ToRegister(lir->getTemp(0));
11282   Register temp1 = ToRegister(lir->getTemp(1));
11283   Register temp2 = ToRegister(lir->getTemp(2));
11284   unsigned numFormals = lir->mir()->numFormals();
11285 
11286   if (Shape* shape = lir->mir()->shape()) {
11287     uint32_t arrayLength = 0;
11288     uint32_t arrayCapacity = 2;
11289     gc::AllocKind allocKind = GuessArrayGCKind(arrayCapacity);
11290     MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &ArrayObject::class_));
11291     allocKind = ForegroundToBackgroundAllocKind(allocKind);
11292     MOZ_ASSERT(GetGCKindSlots(allocKind) ==
11293                arrayCapacity + ObjectElements::VALUES_PER_HEADER);
11294 
11295     Label joinAlloc, failAlloc;
11296     masm.movePtr(ImmGCPtr(shape), temp0);
11297     masm.createArrayWithFixedElements(temp2, temp0, temp1, arrayLength,
11298                                       arrayCapacity, allocKind, gc::DefaultHeap,
11299                                       &failAlloc);
11300     masm.jump(&joinAlloc);
11301     {
11302       masm.bind(&failAlloc);
11303       masm.movePtr(ImmPtr(nullptr), temp2);
11304     }
11305     masm.bind(&joinAlloc);
11306   } else {
11307     masm.movePtr(ImmPtr(nullptr), temp2);
11308   }
11309 
11310   emitRest(lir, temp2, numActuals, temp0, temp1, numFormals,
11311            ToRegister(lir->output()));
11312 }
11313 
11314 // Create a stackmap from the given safepoint, with the structure:
11315 //
11316 //   <reg dump area, if trap>
11317 //   |       ++ <body (general spill)>
11318 //   |               ++ <space for Frame>
11319 //   |                       ++ <inbound args>
11320 //   |                                       |
11321 //   Lowest Addr                             Highest Addr
11322 //
11323 // The caller owns the resulting stackmap.  This assumes a grow-down stack.
11324 //
11325 // For non-debug builds, if the stackmap would contain no pointers, no
11326 // stackmap is created, and nullptr is returned.  For a debug build, a
11327 // stackmap is always created and returned.
CreateStackMapFromLSafepoint(LSafepoint & safepoint,const MachineState & trapExitLayout,size_t trapExitLayoutNumWords,size_t nInboundStackArgBytes,wasm::StackMap ** result)11328 static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
11329                                          const MachineState& trapExitLayout,
11330                                          size_t trapExitLayoutNumWords,
11331                                          size_t nInboundStackArgBytes,
11332                                          wasm::StackMap** result) {
11333   // Ensure this is defined on all return paths.
11334   *result = nullptr;
11335 
11336   // The size of the wasm::Frame itself.
11337   const size_t nFrameBytes = sizeof(wasm::Frame);
11338 
11339   // This is the number of bytes in the general spill area, below the Frame.
11340   const size_t nBodyBytes = safepoint.framePushedAtStackMapBase();
11341 
11342   // This is the number of bytes in the general spill area, the Frame, and the
11343   // incoming args, but not including any trap (register dump) area.
11344   const size_t nNonTrapBytes = nBodyBytes + nFrameBytes + nInboundStackArgBytes;
11345   MOZ_ASSERT(nNonTrapBytes % sizeof(void*) == 0);
11346 
11347   // This is the total number of bytes covered by the map.
11348   const DebugOnly<size_t> nTotalBytes =
11349       nNonTrapBytes +
11350       (safepoint.isWasmTrap() ? (trapExitLayoutNumWords * sizeof(void*)) : 0);
11351 
11352   // Create the stackmap initially in this vector.  Since most frames will
11353   // contain 128 or fewer words, heap allocation is avoided in the majority of
11354   // cases.  vec[0] is for the lowest address in the map, vec[N-1] is for the
11355   // highest address in the map.
11356   wasm::StackMapBoolVector vec;
11357 
11358   // Keep track of whether we've actually seen any refs.
11359   bool hasRefs = false;
11360 
11361   // REG DUMP AREA, if any.
11362   const LiveGeneralRegisterSet gcRegs = safepoint.gcRegs();
11363   GeneralRegisterForwardIterator gcRegsIter(gcRegs);
11364   if (safepoint.isWasmTrap()) {
11365     // Deal with roots in registers.  This can only happen for safepoints
11366     // associated with a trap.  For safepoints associated with a call, we
11367     // don't expect to have any live values in registers, hence no roots in
11368     // registers.
11369     if (!vec.appendN(false, trapExitLayoutNumWords)) {
11370       return false;
11371     }
11372     for (; gcRegsIter.more(); ++gcRegsIter) {
11373       Register reg = *gcRegsIter;
11374       size_t offsetFromTop =
11375           reinterpret_cast<size_t>(trapExitLayout.address(reg));
11376 
11377       // If this doesn't hold, the associated register wasn't saved by
11378       // the trap exit stub.  Better to crash now than much later, in
11379       // some obscure place, and possibly with security consequences.
11380       MOZ_RELEASE_ASSERT(offsetFromTop < trapExitLayoutNumWords);
11381 
11382       // offsetFromTop is an offset in words down from the highest
11383       // address in the exit stub save area.  Switch it around to be an
11384       // offset up from the bottom of the (integer register) save area.
11385       size_t offsetFromBottom = trapExitLayoutNumWords - 1 - offsetFromTop;
11386 
11387       vec[offsetFromBottom] = true;
11388       hasRefs = true;
11389     }
11390   } else {
11391     // This map is associated with a call instruction.  We expect there to be
11392     // no live ref-carrying registers, and if there are we're in deep trouble.
11393     MOZ_RELEASE_ASSERT(!gcRegsIter.more());
11394   }
11395 
11396   // BODY (GENERAL SPILL) AREA and FRAME and INCOMING ARGS
11397   // Deal with roots on the stack.
11398   size_t wordsSoFar = vec.length();
11399   if (!vec.appendN(false, nNonTrapBytes / sizeof(void*))) {
11400     return false;
11401   }
11402   const LSafepoint::SlotList& gcSlots = safepoint.gcSlots();
11403   for (SafepointSlotEntry gcSlot : gcSlots) {
11404     // The following needs to correspond with JitFrameLayout::slotRef
11405     // gcSlot.stack == 0 means the slot is in the args area
11406     if (gcSlot.stack) {
11407       // It's a slot in the body allocation, so .slot is interpreted
11408       // as an index downwards from the Frame*
11409       MOZ_ASSERT(gcSlot.slot <= nBodyBytes);
11410       uint32_t offsetInBytes = nBodyBytes - gcSlot.slot;
11411       MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
11412       vec[wordsSoFar + offsetInBytes / sizeof(void*)] = true;
11413     } else {
11414       // It's an argument slot
11415       MOZ_ASSERT(gcSlot.slot < nInboundStackArgBytes);
11416       uint32_t offsetInBytes = nBodyBytes + nFrameBytes + gcSlot.slot;
11417       MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
11418       vec[wordsSoFar + offsetInBytes / sizeof(void*)] = true;
11419     }
11420     hasRefs = true;
11421   }
11422 
11423 #ifndef DEBUG
11424   // We saw no references, and this is a non-debug build, so don't bother
11425   // building the stackmap.
11426   if (!hasRefs) {
11427     return true;
11428   }
11429 #endif
11430 
11431   // Convert vec into a wasm::StackMap.
11432   MOZ_ASSERT(vec.length() * sizeof(void*) == nTotalBytes);
11433   wasm::StackMap* stackMap =
11434       wasm::ConvertStackMapBoolVectorToStackMap(vec, hasRefs);
11435   if (!stackMap) {
11436     return false;
11437   }
11438   if (safepoint.isWasmTrap()) {
11439     stackMap->setExitStubWords(trapExitLayoutNumWords);
11440   }
11441 
11442   // Record in the map, how far down from the highest address the Frame* is.
11443   // Take the opportunity to check that we haven't marked any part of the
11444   // Frame itself as a pointer.
11445   stackMap->setFrameOffsetFromTop((nInboundStackArgBytes + nFrameBytes) /
11446                                   sizeof(void*));
11447 #ifdef DEBUG
11448   for (uint32_t i = 0; i < nFrameBytes / sizeof(void*); i++) {
11449     MOZ_ASSERT(stackMap->getBit(stackMap->numMappedWords -
11450                                 stackMap->frameOffsetFromTop + i) == 0);
11451   }
11452 #endif
11453 
11454   *result = stackMap;
11455   return true;
11456 }
11457 
generateWasm(wasm::TypeIdDesc funcTypeId,wasm::BytecodeOffset trapOffset,const wasm::ArgTypeVector & argTypes,const MachineState & trapExitLayout,size_t trapExitLayoutNumWords,wasm::FuncOffsets * offsets,wasm::StackMaps * stackMaps)11458 bool CodeGenerator::generateWasm(wasm::TypeIdDesc funcTypeId,
11459                                  wasm::BytecodeOffset trapOffset,
11460                                  const wasm::ArgTypeVector& argTypes,
11461                                  const MachineState& trapExitLayout,
11462                                  size_t trapExitLayoutNumWords,
11463                                  wasm::FuncOffsets* offsets,
11464                                  wasm::StackMaps* stackMaps) {
11465   JitSpew(JitSpew_Codegen, "# Emitting wasm code");
11466   setUseWasmStackArgumentAbi();
11467 
11468   size_t nInboundStackArgBytes = StackArgAreaSizeUnaligned(argTypes);
11469 
11470   wasm::GenerateFunctionPrologue(masm, funcTypeId, mozilla::Nothing(), offsets);
11471 
11472   MOZ_ASSERT(masm.framePushed() == 0);
11473 
11474   if (omitOverRecursedCheck()) {
11475     masm.reserveStack(frameSize());
11476   } else {
11477     std::pair<CodeOffset, uint32_t> pair =
11478         masm.wasmReserveStackChecked(frameSize(), trapOffset);
11479     CodeOffset trapInsnOffset = pair.first;
11480     size_t nBytesReservedBeforeTrap = pair.second;
11481 
11482     wasm::StackMap* functionEntryStackMap = nullptr;
11483     if (!CreateStackMapForFunctionEntryTrap(
11484             argTypes, trapExitLayout, trapExitLayoutNumWords,
11485             nBytesReservedBeforeTrap, nInboundStackArgBytes,
11486             &functionEntryStackMap)) {
11487       return false;
11488     }
11489 
11490     // In debug builds, we'll always have a stack map, even if there are no
11491     // refs to track.
11492     MOZ_ASSERT(functionEntryStackMap);
11493 
11494     if (functionEntryStackMap &&
11495         !stackMaps->add((uint8_t*)(uintptr_t)trapInsnOffset.offset(),
11496                         functionEntryStackMap)) {
11497       functionEntryStackMap->destroy();
11498       return false;
11499     }
11500   }
11501 
11502   MOZ_ASSERT(masm.framePushed() == frameSize());
11503 
11504   if (!generateBody()) {
11505     return false;
11506   }
11507 
11508   masm.bind(&returnLabel_);
11509   wasm::GenerateFunctionEpilogue(masm, frameSize(), offsets);
11510 
11511 #if defined(JS_ION_PERF)
11512   // Note the end of the inline code and start of the OOL code.
11513   gen->perfSpewer().noteEndInlineCode(masm);
11514 #endif
11515 
11516   if (!generateOutOfLineCode()) {
11517     return false;
11518   }
11519 
11520   masm.flush();
11521   if (masm.oom()) {
11522     return false;
11523   }
11524 
11525   offsets->end = masm.currentOffset();
11526 
11527   MOZ_ASSERT(!masm.failureLabel()->used());
11528   MOZ_ASSERT(snapshots_.listSize() == 0);
11529   MOZ_ASSERT(snapshots_.RVATableSize() == 0);
11530   MOZ_ASSERT(recovers_.size() == 0);
11531   MOZ_ASSERT(bailouts_.empty());
11532   MOZ_ASSERT(graph.numConstants() == 0);
11533   MOZ_ASSERT(osiIndices_.empty());
11534   MOZ_ASSERT(icList_.empty());
11535   MOZ_ASSERT(safepoints_.size() == 0);
11536   MOZ_ASSERT(!scriptCounts_);
11537 
11538   // Convert the safepoints to stackmaps and add them to our running
11539   // collection thereof.
11540   for (CodegenSafepointIndex& index : safepointIndices_) {
11541     wasm::StackMap* stackMap = nullptr;
11542     if (!CreateStackMapFromLSafepoint(*index.safepoint(), trapExitLayout,
11543                                       trapExitLayoutNumWords,
11544                                       nInboundStackArgBytes, &stackMap)) {
11545       return false;
11546     }
11547 
11548     // In debug builds, we'll always have a stack map.
11549     MOZ_ASSERT(stackMap);
11550     if (!stackMap) {
11551       continue;
11552     }
11553 
11554     if (!stackMaps->add((uint8_t*)(uintptr_t)index.displacement(), stackMap)) {
11555       stackMap->destroy();
11556       return false;
11557     }
11558   }
11559 
11560   return true;
11561 }
11562 
generate()11563 bool CodeGenerator::generate() {
11564   JitSpew(JitSpew_Codegen, "# Emitting code for script %s:%u:%u",
11565           gen->outerInfo().script()->filename(),
11566           gen->outerInfo().script()->lineno(),
11567           gen->outerInfo().script()->column());
11568 
11569   // Initialize native code table with an entry to the start of
11570   // top-level script.
11571   InlineScriptTree* tree = gen->outerInfo().inlineScriptTree();
11572   jsbytecode* startPC = tree->script()->code();
11573   BytecodeSite* startSite = new (gen->alloc()) BytecodeSite(tree, startPC);
11574   if (!addNativeToBytecodeEntry(startSite)) {
11575     return false;
11576   }
11577 
11578   if (!safepoints_.init(gen->alloc())) {
11579     return false;
11580   }
11581 
11582   if (!generatePrologue()) {
11583     return false;
11584   }
11585 
11586   if (frameClass_ != FrameSizeClass::None()) {
11587     deoptTable_.emplace(gen->jitRuntime()->getBailoutTable(frameClass_));
11588   }
11589 
11590   // Reset native => bytecode map table with top-level script and startPc.
11591   if (!addNativeToBytecodeEntry(startSite)) {
11592     return false;
11593   }
11594 
11595   if (!generateBody()) {
11596     return false;
11597   }
11598 
11599   // Reset native => bytecode map table with top-level script and startPc.
11600   if (!addNativeToBytecodeEntry(startSite)) {
11601     return false;
11602   }
11603 
11604   if (!generateEpilogue()) {
11605     return false;
11606   }
11607 
11608   // Reset native => bytecode map table with top-level script and startPc.
11609   if (!addNativeToBytecodeEntry(startSite)) {
11610     return false;
11611   }
11612 
11613   generateInvalidateEpilogue();
11614 #if defined(JS_ION_PERF)
11615   // Note the end of the inline code and start of the OOL code.
11616   perfSpewer_.noteEndInlineCode(masm);
11617 #endif
11618 
11619   // native => bytecode entries for OOL code will be added
11620   // by CodeGeneratorShared::generateOutOfLineCode
11621   if (!generateOutOfLineCode()) {
11622     return false;
11623   }
11624 
11625   // Add terminal entry.
11626   if (!addNativeToBytecodeEntry(startSite)) {
11627     return false;
11628   }
11629 
11630   // Dump Native to bytecode entries to spew.
11631   dumpNativeToBytecodeEntries();
11632 
11633   return !masm.oom();
11634 }
11635 
AddInlinedCompilations(JSContext * cx,HandleScript script,IonCompilationId compilationId,const WarpSnapshot * snapshot,bool * isValid)11636 static bool AddInlinedCompilations(JSContext* cx, HandleScript script,
11637                                    IonCompilationId compilationId,
11638                                    const WarpSnapshot* snapshot,
11639                                    bool* isValid) {
11640   MOZ_ASSERT(!*isValid);
11641   RecompileInfo recompileInfo(script, compilationId);
11642 
11643   JitZone* jitZone = cx->zone()->jitZone();
11644 
11645   for (const auto* scriptSnapshot : snapshot->scripts()) {
11646     JSScript* inlinedScript = scriptSnapshot->script();
11647     if (inlinedScript == script) {
11648       continue;
11649     }
11650 
11651     // TODO(post-Warp): This matches FinishCompilation and is necessary to
11652     // ensure in-progress compilations are canceled when an inlined functon
11653     // becomes a debuggee. See the breakpoint-14.js jit-test.
11654     // When TI is gone, try to clean this up by moving AddInlinedCompilations to
11655     // WarpOracle so that we can handle this as part of addPendingRecompile
11656     // instead of requiring this separate check.
11657     if (inlinedScript->isDebuggee()) {
11658       *isValid = false;
11659       return true;
11660     }
11661 
11662     if (!jitZone->addInlinedCompilation(recompileInfo, inlinedScript)) {
11663       return false;
11664     }
11665   }
11666 
11667   *isValid = true;
11668   return true;
11669 }
11670 
link(JSContext * cx,const WarpSnapshot * snapshot)11671 bool CodeGenerator::link(JSContext* cx, const WarpSnapshot* snapshot) {
11672   // We cancel off-thread Ion compilations in a few places during GC, but if
11673   // this compilation was performed off-thread it will already have been
11674   // removed from the relevant lists by this point. Don't allow GC here.
11675   JS::AutoAssertNoGC nogc(cx);
11676 
11677   RootedScript script(cx, gen->outerInfo().script());
11678   MOZ_ASSERT(!script->hasIonScript());
11679 
11680   // Perform any read barriers which were skipped while compiling the
11681   // script, which may have happened off-thread.
11682   const JitRealm* jr = gen->realm->jitRealm();
11683   jr->performStubReadBarriers(realmStubsToReadBarrier_);
11684 
11685   if (scriptCounts_ && !script->hasScriptCounts() &&
11686       !script->initScriptCounts(cx)) {
11687     return false;
11688   }
11689 
11690   // Check to make sure we didn't have a mid-build invalidation. If so, we
11691   // will trickle to jit::Compile() and return Method_Skipped.
11692   uint32_t warmUpCount = script->getWarmUpCount();
11693 
11694   IonCompilationId compilationId =
11695       cx->runtime()->jitRuntime()->nextCompilationId();
11696   JitZone* jitZone = cx->zone()->jitZone();
11697   jitZone->currentCompilationIdRef().emplace(compilationId);
11698   auto resetCurrentId = mozilla::MakeScopeExit(
11699       [jitZone] { jitZone->currentCompilationIdRef().reset(); });
11700 
11701   // Record constraints. If an error occured, returns false and potentially
11702   // prevent future compilations. Otherwise, if an invalidation occured, then
11703   // skip the current compilation.
11704   bool isValid = false;
11705 
11706   // If an inlined script is invalidated (for example, by attaching
11707   // a debugger), we must also invalidate the parent IonScript.
11708   if (!AddInlinedCompilations(cx, script, compilationId, snapshot, &isValid)) {
11709     return false;
11710   }
11711   if (!isValid) {
11712     return true;
11713   }
11714 
11715   // IonMonkey could have inferred better type information during
11716   // compilation. Since adding the new information to the actual type
11717   // information can reset the usecount, increase it back to what it was
11718   // before.
11719   if (warmUpCount > script->getWarmUpCount()) {
11720     script->incWarmUpCounter(warmUpCount - script->getWarmUpCount());
11721   }
11722 
11723   uint32_t argumentSlots = (gen->outerInfo().nargs() + 1) * sizeof(Value);
11724   uint32_t scriptFrameSize =
11725       frameClass_ == FrameSizeClass::None()
11726           ? frameDepth_
11727           : FrameSizeClass::FromDepth(frameDepth_).frameSize();
11728 
11729   // We encode safepoints after the OSI-point offsets have been determined.
11730   if (!encodeSafepoints()) {
11731     return false;
11732   }
11733 
11734   size_t numNurseryObjects = snapshot->nurseryObjects().length();
11735 
11736   IonScript* ionScript = IonScript::New(
11737       cx, compilationId, graph.totalSlotCount(), argumentSlots, scriptFrameSize,
11738       snapshots_.listSize(), snapshots_.RVATableSize(), recovers_.size(),
11739       bailouts_.length(), graph.numConstants(), numNurseryObjects,
11740       safepointIndices_.length(), osiIndices_.length(), icList_.length(),
11741       runtimeData_.length(), safepoints_.size());
11742   if (!ionScript) {
11743     return false;
11744   }
11745 #ifdef DEBUG
11746   ionScript->setICHash(snapshot->icHash());
11747 #endif
11748 
11749   auto freeIonScript = mozilla::MakeScopeExit([&ionScript] {
11750     // Use js_free instead of IonScript::Destroy: the cache list is still
11751     // uninitialized.
11752     js_free(ionScript);
11753   });
11754 
11755   Linker linker(masm);
11756   JitCode* code = linker.newCode(cx, CodeKind::Ion);
11757   if (!code) {
11758     return false;
11759   }
11760 
11761   // Encode native to bytecode map if profiling is enabled.
11762   if (isProfilerInstrumentationEnabled()) {
11763     // Generate native-to-bytecode main table.
11764     if (!generateCompactNativeToBytecodeMap(cx, code)) {
11765       return false;
11766     }
11767 
11768     uint8_t* ionTableAddr =
11769         ((uint8_t*)nativeToBytecodeMap_) + nativeToBytecodeTableOffset_;
11770     JitcodeIonTable* ionTable = (JitcodeIonTable*)ionTableAddr;
11771 
11772     // Construct the IonEntry that will go into the global table.
11773     JitcodeGlobalEntry::IonEntry entry;
11774     if (!ionTable->makeIonEntry(cx, code, nativeToBytecodeScriptListLength_,
11775                                 nativeToBytecodeScriptList_, entry)) {
11776       js_free(nativeToBytecodeScriptList_);
11777       js_free(nativeToBytecodeMap_);
11778       return false;
11779     }
11780 
11781     // nativeToBytecodeScriptList_ is no longer needed.
11782     js_free(nativeToBytecodeScriptList_);
11783 
11784     // Add entry to the global table.
11785     JitcodeGlobalTable* globalTable =
11786         cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
11787     if (!globalTable->addEntry(entry)) {
11788       // Memory may have been allocated for the entry.
11789       entry.destroy();
11790       return false;
11791     }
11792 
11793     // Mark the jitcode as having a bytecode map.
11794     code->setHasBytecodeMap();
11795   } else {
11796     // Add a dumy jitcodeGlobalTable entry.
11797     JitcodeGlobalEntry::DummyEntry entry;
11798     entry.init(code, code->raw(), code->rawEnd());
11799 
11800     // Add entry to the global table.
11801     JitcodeGlobalTable* globalTable =
11802         cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
11803     if (!globalTable->addEntry(entry)) {
11804       // Memory may have been allocated for the entry.
11805       entry.destroy();
11806       return false;
11807     }
11808 
11809     // Mark the jitcode as having a bytecode map.
11810     code->setHasBytecodeMap();
11811   }
11812 
11813   ionScript->setMethod(code);
11814 
11815   // If the Gecko Profiler is enabled, mark IonScript as having been
11816   // instrumented accordingly.
11817   if (isProfilerInstrumentationEnabled()) {
11818     ionScript->setHasProfilingInstrumentation();
11819   }
11820 
11821   Assembler::PatchDataWithValueCheck(
11822       CodeLocationLabel(code, invalidateEpilogueData_), ImmPtr(ionScript),
11823       ImmPtr((void*)-1));
11824 
11825   for (CodeOffset offset : ionScriptLabels_) {
11826     Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, offset),
11827                                        ImmPtr(ionScript), ImmPtr((void*)-1));
11828   }
11829 
11830   for (NurseryObjectLabel label : ionNurseryObjectLabels_) {
11831     void* entry = ionScript->addressOfNurseryObject(label.nurseryIndex);
11832     Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label.offset),
11833                                        ImmPtr(entry), ImmPtr((void*)-1));
11834   }
11835 
11836 #ifdef JS_TRACE_LOGGING
11837   bool TLFailed = false;
11838 
11839   MOZ_ASSERT_IF(!JS::TraceLoggerSupported(), patchableTLEvents_.length() == 0);
11840   for (uint32_t i = 0; i < patchableTLEvents_.length(); i++) {
11841     TraceLoggerEvent event(patchableTLEvents_[i].event);
11842     if (!event.hasTextId() || !ionScript->addTraceLoggerEvent(event)) {
11843       TLFailed = true;
11844       break;
11845     }
11846     Assembler::PatchDataWithValueCheck(
11847         CodeLocationLabel(code, patchableTLEvents_[i].offset),
11848         ImmPtr((void*)uintptr_t(event.textId())), ImmPtr((void*)0));
11849   }
11850 
11851   if (!TLFailed && patchableTLScripts_.length() > 0) {
11852     MOZ_ASSERT(TraceLogTextIdEnabled(TraceLogger_Scripts));
11853     TraceLoggerEvent event(TraceLogger_Scripts, script);
11854     if (!event.hasTextId() || !ionScript->addTraceLoggerEvent(event)) {
11855       TLFailed = true;
11856     }
11857     if (!TLFailed) {
11858       uint32_t textId = event.textId();
11859       for (uint32_t i = 0; i < patchableTLScripts_.length(); i++) {
11860         Assembler::PatchDataWithValueCheck(
11861             CodeLocationLabel(code, patchableTLScripts_[i]),
11862             ImmPtr((void*)uintptr_t(textId)), ImmPtr((void*)0));
11863       }
11864     }
11865   }
11866 #endif
11867 
11868   // for generating inline caches during the execution.
11869   if (runtimeData_.length()) {
11870     ionScript->copyRuntimeData(&runtimeData_[0]);
11871   }
11872   if (icList_.length()) {
11873     ionScript->copyICEntries(&icList_[0]);
11874   }
11875 
11876   for (size_t i = 0; i < icInfo_.length(); i++) {
11877     IonIC& ic = ionScript->getICFromIndex(i);
11878     Assembler::PatchDataWithValueCheck(
11879         CodeLocationLabel(code, icInfo_[i].icOffsetForJump),
11880         ImmPtr(ic.codeRawPtr()), ImmPtr((void*)-1));
11881     Assembler::PatchDataWithValueCheck(
11882         CodeLocationLabel(code, icInfo_[i].icOffsetForPush), ImmPtr(&ic),
11883         ImmPtr((void*)-1));
11884   }
11885 
11886   JitSpew(JitSpew_Codegen, "Created IonScript %p (raw %p)", (void*)ionScript,
11887           (void*)code->raw());
11888 
11889   ionScript->setInvalidationEpilogueDataOffset(
11890       invalidateEpilogueData_.offset());
11891   if (jsbytecode* osrPc = gen->outerInfo().osrPc()) {
11892     ionScript->setOsrPc(osrPc);
11893     ionScript->setOsrEntryOffset(getOsrEntryOffset());
11894   }
11895   ionScript->setInvalidationEpilogueOffset(invalidate_.offset());
11896 
11897 #if defined(JS_ION_PERF)
11898   if (PerfEnabled()) {
11899     perfSpewer_.writeProfile(script, code, masm);
11900   }
11901 #endif
11902 
11903 #ifdef MOZ_VTUNE
11904   vtune::MarkScript(code, script, "ion");
11905 #endif
11906 
11907   // for marking during GC.
11908   if (safepointIndices_.length()) {
11909     ionScript->copySafepointIndices(&safepointIndices_[0]);
11910   }
11911   if (safepoints_.size()) {
11912     ionScript->copySafepoints(&safepoints_);
11913   }
11914 
11915   // for reconvering from an Ion Frame.
11916   if (bailouts_.length()) {
11917     ionScript->copyBailoutTable(&bailouts_[0]);
11918   }
11919   if (osiIndices_.length()) {
11920     ionScript->copyOsiIndices(&osiIndices_[0]);
11921   }
11922   if (snapshots_.listSize()) {
11923     ionScript->copySnapshots(&snapshots_);
11924   }
11925   MOZ_ASSERT_IF(snapshots_.listSize(), recovers_.size());
11926   if (recovers_.size()) {
11927     ionScript->copyRecovers(&recovers_);
11928   }
11929   if (graph.numConstants()) {
11930     const Value* vp = graph.constantPool();
11931     ionScript->copyConstants(vp);
11932     for (size_t i = 0; i < graph.numConstants(); i++) {
11933       const Value& v = vp[i];
11934       if (v.isGCThing()) {
11935         if (gc::StoreBuffer* sb = v.toGCThing()->storeBuffer()) {
11936           sb->putWholeCell(script);
11937           break;
11938         }
11939       }
11940     }
11941   }
11942 
11943   // Attach any generated script counts to the script.
11944   if (IonScriptCounts* counts = extractScriptCounts()) {
11945     script->addIonCounts(counts);
11946   }
11947 
11948   // WARNING: Code after this point must be infallible!
11949 
11950   // Copy the list of nursery objects. Note that the store buffer can add
11951   // HeapPtr edges that must be cleared in IonScript::Destroy. See the
11952   // infallibility warning above.
11953   const auto& nurseryObjects = snapshot->nurseryObjects();
11954   for (size_t i = 0; i < nurseryObjects.length(); i++) {
11955     ionScript->nurseryObjects()[i].init(nurseryObjects[i]);
11956   }
11957 
11958   // Transfer ownership of the IonScript to the JitScript. At this point enough
11959   // of the IonScript must be initialized for IonScript::Destroy to work.
11960   freeIonScript.release();
11961   script->jitScript()->setIonScript(script, ionScript);
11962 
11963   return true;
11964 }
11965 
11966 // An out-of-line path to convert a boxed int32 to either a float or double.
11967 class OutOfLineUnboxFloatingPoint : public OutOfLineCodeBase<CodeGenerator> {
11968   LUnboxFloatingPoint* unboxFloatingPoint_;
11969 
11970  public:
OutOfLineUnboxFloatingPoint(LUnboxFloatingPoint * unboxFloatingPoint)11971   explicit OutOfLineUnboxFloatingPoint(LUnboxFloatingPoint* unboxFloatingPoint)
11972       : unboxFloatingPoint_(unboxFloatingPoint) {}
11973 
accept(CodeGenerator * codegen)11974   void accept(CodeGenerator* codegen) override {
11975     codegen->visitOutOfLineUnboxFloatingPoint(this);
11976   }
11977 
unboxFloatingPoint() const11978   LUnboxFloatingPoint* unboxFloatingPoint() const {
11979     return unboxFloatingPoint_;
11980   }
11981 };
11982 
visitUnboxFloatingPoint(LUnboxFloatingPoint * lir)11983 void CodeGenerator::visitUnboxFloatingPoint(LUnboxFloatingPoint* lir) {
11984   const ValueOperand box = ToValue(lir, LUnboxFloatingPoint::Input);
11985   const LDefinition* result = lir->output();
11986 
11987   // Out-of-line path to convert int32 to double or bailout
11988   // if this instruction is fallible.
11989   OutOfLineUnboxFloatingPoint* ool =
11990       new (alloc()) OutOfLineUnboxFloatingPoint(lir);
11991   addOutOfLineCode(ool, lir->mir());
11992 
11993   FloatRegister resultReg = ToFloatRegister(result);
11994   masm.branchTestDouble(Assembler::NotEqual, box, ool->entry());
11995   masm.unboxDouble(box, resultReg);
11996   if (lir->type() == MIRType::Float32) {
11997     masm.convertDoubleToFloat32(resultReg, resultReg);
11998   }
11999   masm.bind(ool->rejoin());
12000 }
12001 
visitOutOfLineUnboxFloatingPoint(OutOfLineUnboxFloatingPoint * ool)12002 void CodeGenerator::visitOutOfLineUnboxFloatingPoint(
12003     OutOfLineUnboxFloatingPoint* ool) {
12004   LUnboxFloatingPoint* ins = ool->unboxFloatingPoint();
12005   const ValueOperand value = ToValue(ins, LUnboxFloatingPoint::Input);
12006 
12007   if (ins->mir()->fallible()) {
12008     Label bail;
12009     masm.branchTestInt32(Assembler::NotEqual, value, &bail);
12010     bailoutFrom(&bail, ins->snapshot());
12011   }
12012   masm.int32ValueToFloatingPoint(value, ToFloatRegister(ins->output()),
12013                                  ins->type());
12014   masm.jump(ool->rejoin());
12015 }
12016 
visitCallBindVar(LCallBindVar * lir)12017 void CodeGenerator::visitCallBindVar(LCallBindVar* lir) {
12018   pushArg(ToRegister(lir->environmentChain()));
12019 
12020   using Fn = JSObject* (*)(JSContext*, JSObject*);
12021   callVM<Fn, BindVarOperation>(lir);
12022 }
12023 
visitCallSetElement(LCallSetElement * lir)12024 void CodeGenerator::visitCallSetElement(LCallSetElement* lir) {
12025   Register obj = ToRegister(lir->getOperand(0));
12026   pushArg(Imm32(lir->mir()->strict()));
12027   pushArg(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
12028   pushArg(ToValue(lir, LCallSetElement::Value));
12029   pushArg(ToValue(lir, LCallSetElement::Index));
12030   pushArg(obj);
12031 
12032   using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue,
12033                       HandleValue, bool);
12034   callVM<Fn, js::SetObjectElementWithReceiver>(lir);
12035 }
12036 
visitLoadFixedSlotV(LLoadFixedSlotV * ins)12037 void CodeGenerator::visitLoadFixedSlotV(LLoadFixedSlotV* ins) {
12038   const Register obj = ToRegister(ins->getOperand(0));
12039   size_t slot = ins->mir()->slot();
12040   ValueOperand result = ToOutValue(ins);
12041 
12042   masm.loadValue(Address(obj, NativeObject::getFixedSlotOffset(slot)), result);
12043 }
12044 
visitLoadFixedSlotT(LLoadFixedSlotT * ins)12045 void CodeGenerator::visitLoadFixedSlotT(LLoadFixedSlotT* ins) {
12046   const Register obj = ToRegister(ins->getOperand(0));
12047   size_t slot = ins->mir()->slot();
12048   AnyRegister result = ToAnyRegister(ins->getDef(0));
12049   MIRType type = ins->mir()->type();
12050 
12051   masm.loadUnboxedValue(Address(obj, NativeObject::getFixedSlotOffset(slot)),
12052                         type, result);
12053 }
12054 
12055 template <typename T>
EmitLoadAndUnbox(MacroAssembler & masm,const T & src,MIRType type,bool fallible,AnyRegister dest,Label * fail)12056 static void EmitLoadAndUnbox(MacroAssembler& masm, const T& src, MIRType type,
12057                              bool fallible, AnyRegister dest, Label* fail) {
12058   if (type == MIRType::Double) {
12059     MOZ_ASSERT(dest.isFloat());
12060     masm.ensureDouble(src, dest.fpu(), fail);
12061     return;
12062   }
12063   if (fallible) {
12064     switch (type) {
12065       case MIRType::Int32:
12066         masm.fallibleUnboxInt32(src, dest.gpr(), fail);
12067         break;
12068       case MIRType::Boolean:
12069         masm.fallibleUnboxBoolean(src, dest.gpr(), fail);
12070         break;
12071       case MIRType::Object:
12072         masm.fallibleUnboxObject(src, dest.gpr(), fail);
12073         break;
12074       case MIRType::String:
12075         masm.fallibleUnboxString(src, dest.gpr(), fail);
12076         break;
12077       case MIRType::Symbol:
12078         masm.fallibleUnboxSymbol(src, dest.gpr(), fail);
12079         break;
12080       case MIRType::BigInt:
12081         masm.fallibleUnboxBigInt(src, dest.gpr(), fail);
12082         break;
12083       default:
12084         MOZ_CRASH("Unexpected MIRType");
12085     }
12086     return;
12087   }
12088   masm.loadUnboxedValue(src, type, dest);
12089 }
12090 
visitLoadFixedSlotAndUnbox(LLoadFixedSlotAndUnbox * ins)12091 void CodeGenerator::visitLoadFixedSlotAndUnbox(LLoadFixedSlotAndUnbox* ins) {
12092   const MLoadFixedSlotAndUnbox* mir = ins->mir();
12093   MIRType type = mir->type();
12094   Register input = ToRegister(ins->object());
12095   AnyRegister result = ToAnyRegister(ins->output());
12096   size_t slot = mir->slot();
12097 
12098   Address address(input, NativeObject::getFixedSlotOffset(slot));
12099 
12100   Label bail;
12101   EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
12102   if (mir->fallible()) {
12103     bailoutFrom(&bail, ins->snapshot());
12104   }
12105 }
12106 
visitLoadDynamicSlotAndUnbox(LLoadDynamicSlotAndUnbox * ins)12107 void CodeGenerator::visitLoadDynamicSlotAndUnbox(
12108     LLoadDynamicSlotAndUnbox* ins) {
12109   const MLoadDynamicSlotAndUnbox* mir = ins->mir();
12110   MIRType type = mir->type();
12111   Register input = ToRegister(ins->slots());
12112   AnyRegister result = ToAnyRegister(ins->output());
12113   size_t slot = mir->slot();
12114 
12115   Address address(input, slot * sizeof(JS::Value));
12116 
12117   Label bail;
12118   EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
12119   if (mir->fallible()) {
12120     bailoutFrom(&bail, ins->snapshot());
12121   }
12122 }
12123 
visitLoadElementAndUnbox(LLoadElementAndUnbox * ins)12124 void CodeGenerator::visitLoadElementAndUnbox(LLoadElementAndUnbox* ins) {
12125   const MLoadElementAndUnbox* mir = ins->mir();
12126   MIRType type = mir->type();
12127   Register elements = ToRegister(ins->elements());
12128   AnyRegister result = ToAnyRegister(ins->output());
12129 
12130   Label bail;
12131   if (ins->index()->isConstant()) {
12132     NativeObject::elementsSizeMustNotOverflow();
12133     int32_t offset = ToInt32(ins->index()) * sizeof(Value);
12134     Address address(elements, offset);
12135     EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
12136   } else {
12137     BaseObjectElementIndex address(elements, ToRegister(ins->index()));
12138     EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, &bail);
12139   }
12140 
12141   if (mir->fallible()) {
12142     bailoutFrom(&bail, ins->snapshot());
12143   }
12144 }
12145 
visitAddAndStoreSlot(LAddAndStoreSlot * ins)12146 void CodeGenerator::visitAddAndStoreSlot(LAddAndStoreSlot* ins) {
12147   const Register obj = ToRegister(ins->getOperand(0));
12148   const ValueOperand value = ToValue(ins, LAddAndStoreSlot::Value);
12149   const Register maybeTemp = ToTempRegisterOrInvalid(ins->getTemp(0));
12150 
12151   Shape* shape = ins->mir()->shape();
12152   masm.storeObjShape(shape, obj, [](MacroAssembler& masm, const Address& addr) {
12153     EmitPreBarrier(masm, addr, MIRType::Shape);
12154   });
12155 
12156   // Perform the store. No pre-barrier required since this is a new
12157   // initialization.
12158 
12159   uint32_t offset = ins->mir()->slotOffset();
12160   if (ins->mir()->kind() == MAddAndStoreSlot::Kind::FixedSlot) {
12161     Address slot(obj, offset);
12162     masm.storeValue(value, slot);
12163   } else {
12164     masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), maybeTemp);
12165     Address slot(maybeTemp, offset);
12166     masm.storeValue(value, slot);
12167   }
12168 }
12169 
visitAllocateAndStoreSlot(LAllocateAndStoreSlot * ins)12170 void CodeGenerator::visitAllocateAndStoreSlot(LAllocateAndStoreSlot* ins) {
12171   const Register obj = ToRegister(ins->getOperand(0));
12172   const ValueOperand value = ToValue(ins, LAllocateAndStoreSlot::Value);
12173   const Register temp1 = ToRegister(ins->getTemp(0));
12174   const Register temp2 = ToRegister(ins->getTemp(1));
12175 
12176   masm.push(obj);
12177   masm.pushValue(value);
12178 
12179   using Fn = bool (*)(JSContext * cx, NativeObject * obj, uint32_t newCount);
12180   masm.setupUnalignedABICall(temp1);
12181   masm.loadJSContext(temp1);
12182   masm.passABIArg(temp1);
12183   masm.passABIArg(obj);
12184   masm.move32(Imm32(ins->mir()->numNewSlots()), temp2);
12185   masm.passABIArg(temp2);
12186   masm.callWithABI<Fn, NativeObject::growSlotsPure>();
12187   masm.storeCallBoolResult(temp1);
12188 
12189   masm.popValue(value);
12190   masm.pop(obj);
12191 
12192   Label bail;
12193   masm.branchIfFalseBool(temp1, &bail);
12194   bailoutFrom(&bail, ins->snapshot());
12195 
12196   masm.storeObjShape(ins->mir()->shape(), obj,
12197                      [](MacroAssembler& masm, const Address& addr) {
12198                        EmitPreBarrier(masm, addr, MIRType::Shape);
12199                      });
12200 
12201   // Perform the store. No pre-barrier required since this is a new
12202   // initialization.
12203   masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), temp1);
12204   Address slot(temp1, ins->mir()->slotOffset());
12205   masm.storeValue(value, slot);
12206 }
12207 
visitStoreFixedSlotV(LStoreFixedSlotV * ins)12208 void CodeGenerator::visitStoreFixedSlotV(LStoreFixedSlotV* ins) {
12209   const Register obj = ToRegister(ins->getOperand(0));
12210   size_t slot = ins->mir()->slot();
12211 
12212   const ValueOperand value = ToValue(ins, LStoreFixedSlotV::Value);
12213 
12214   Address address(obj, NativeObject::getFixedSlotOffset(slot));
12215   if (ins->mir()->needsBarrier()) {
12216     emitPreBarrier(address);
12217   }
12218 
12219   masm.storeValue(value, address);
12220 }
12221 
visitStoreFixedSlotT(LStoreFixedSlotT * ins)12222 void CodeGenerator::visitStoreFixedSlotT(LStoreFixedSlotT* ins) {
12223   const Register obj = ToRegister(ins->getOperand(0));
12224   size_t slot = ins->mir()->slot();
12225 
12226   const LAllocation* value = ins->value();
12227   MIRType valueType = ins->mir()->value()->type();
12228 
12229   Address address(obj, NativeObject::getFixedSlotOffset(slot));
12230   if (ins->mir()->needsBarrier()) {
12231     emitPreBarrier(address);
12232   }
12233 
12234   ConstantOrRegister nvalue =
12235       value->isConstant()
12236           ? ConstantOrRegister(value->toConstant()->toJSValue())
12237           : TypedOrValueRegister(valueType, ToAnyRegister(value));
12238   masm.storeConstantOrRegister(nvalue, address);
12239 }
12240 
visitGetNameCache(LGetNameCache * ins)12241 void CodeGenerator::visitGetNameCache(LGetNameCache* ins) {
12242   LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
12243   Register envChain = ToRegister(ins->envObj());
12244   ValueOperand output = ToOutValue(ins);
12245   Register temp = ToRegister(ins->temp());
12246 
12247   IonGetNameIC ic(liveRegs, envChain, output, temp);
12248   addIC(ins, allocateIC(ic));
12249 }
12250 
addGetPropertyCache(LInstruction * ins,LiveRegisterSet liveRegs,TypedOrValueRegister value,const ConstantOrRegister & id,ValueOperand output)12251 void CodeGenerator::addGetPropertyCache(LInstruction* ins,
12252                                         LiveRegisterSet liveRegs,
12253                                         TypedOrValueRegister value,
12254                                         const ConstantOrRegister& id,
12255                                         ValueOperand output) {
12256   CacheKind kind = CacheKind::GetElem;
12257   if (id.constant() && id.value().isString()) {
12258     JSString* idString = id.value().toString();
12259     if (idString->isAtom() && !idString->asAtom().isIndex()) {
12260       kind = CacheKind::GetProp;
12261     }
12262   }
12263   IonGetPropertyIC cache(kind, liveRegs, value, id, output);
12264   addIC(ins, allocateIC(cache));
12265 }
12266 
addSetPropertyCache(LInstruction * ins,LiveRegisterSet liveRegs,Register objReg,Register temp,const ConstantOrRegister & id,const ConstantOrRegister & value,bool strict)12267 void CodeGenerator::addSetPropertyCache(LInstruction* ins,
12268                                         LiveRegisterSet liveRegs,
12269                                         Register objReg, Register temp,
12270                                         const ConstantOrRegister& id,
12271                                         const ConstantOrRegister& value,
12272                                         bool strict) {
12273   CacheKind kind = CacheKind::SetElem;
12274   if (id.constant() && id.value().isString()) {
12275     JSString* idString = id.value().toString();
12276     if (idString->isAtom() && !idString->asAtom().isIndex()) {
12277       kind = CacheKind::SetProp;
12278     }
12279   }
12280   IonSetPropertyIC cache(kind, liveRegs, objReg, temp, id, value, strict);
12281   addIC(ins, allocateIC(cache));
12282 }
12283 
toConstantOrRegister(LInstruction * lir,size_t n,MIRType type)12284 ConstantOrRegister CodeGenerator::toConstantOrRegister(LInstruction* lir,
12285                                                        size_t n, MIRType type) {
12286   if (type == MIRType::Value) {
12287     return TypedOrValueRegister(ToValue(lir, n));
12288   }
12289 
12290   const LAllocation* value = lir->getOperand(n);
12291   if (value->isConstant()) {
12292     return ConstantOrRegister(value->toConstant()->toJSValue());
12293   }
12294 
12295   return TypedOrValueRegister(type, ToAnyRegister(value));
12296 }
12297 
visitGetPropertyCache(LGetPropertyCache * ins)12298 void CodeGenerator::visitGetPropertyCache(LGetPropertyCache* ins) {
12299   LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
12300   TypedOrValueRegister value =
12301       toConstantOrRegister(ins, LGetPropertyCache::Value,
12302                            ins->mir()->value()->type())
12303           .reg();
12304   ConstantOrRegister id = toConstantOrRegister(ins, LGetPropertyCache::Id,
12305                                                ins->mir()->idval()->type());
12306   ValueOperand output = ToOutValue(ins);
12307   addGetPropertyCache(ins, liveRegs, value, id, output);
12308 }
12309 
visitGetPropSuperCache(LGetPropSuperCache * ins)12310 void CodeGenerator::visitGetPropSuperCache(LGetPropSuperCache* ins) {
12311   LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
12312   Register obj = ToRegister(ins->obj());
12313   TypedOrValueRegister receiver =
12314       toConstantOrRegister(ins, LGetPropSuperCache::Receiver,
12315                            ins->mir()->receiver()->type())
12316           .reg();
12317   ConstantOrRegister id = toConstantOrRegister(ins, LGetPropSuperCache::Id,
12318                                                ins->mir()->idval()->type());
12319   ValueOperand output = ToOutValue(ins);
12320 
12321   CacheKind kind = CacheKind::GetElemSuper;
12322   if (id.constant() && id.value().isString()) {
12323     JSString* idString = id.value().toString();
12324     if (idString->isAtom() && !idString->asAtom().isIndex()) {
12325       kind = CacheKind::GetPropSuper;
12326     }
12327   }
12328 
12329   IonGetPropSuperIC cache(kind, liveRegs, obj, receiver, id, output);
12330   addIC(ins, allocateIC(cache));
12331 }
12332 
visitBindNameCache(LBindNameCache * ins)12333 void CodeGenerator::visitBindNameCache(LBindNameCache* ins) {
12334   LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
12335   Register envChain = ToRegister(ins->environmentChain());
12336   Register output = ToRegister(ins->output());
12337   Register temp = ToRegister(ins->temp());
12338 
12339   IonBindNameIC ic(liveRegs, envChain, output, temp);
12340   addIC(ins, allocateIC(ic));
12341 }
12342 
visitHasOwnCache(LHasOwnCache * ins)12343 void CodeGenerator::visitHasOwnCache(LHasOwnCache* ins) {
12344   LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
12345   TypedOrValueRegister value = toConstantOrRegister(ins, LHasOwnCache::Value,
12346                                                     ins->mir()->value()->type())
12347                                    .reg();
12348   TypedOrValueRegister id =
12349       toConstantOrRegister(ins, LHasOwnCache::Id, ins->mir()->idval()->type())
12350           .reg();
12351   Register output = ToRegister(ins->output());
12352 
12353   IonHasOwnIC cache(liveRegs, value, id, output);
12354   addIC(ins, allocateIC(cache));
12355 }
12356 
visitCheckPrivateFieldCache(LCheckPrivateFieldCache * ins)12357 void CodeGenerator::visitCheckPrivateFieldCache(LCheckPrivateFieldCache* ins) {
12358   LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
12359   TypedOrValueRegister value =
12360       toConstantOrRegister(ins, LCheckPrivateFieldCache::Value,
12361                            ins->mir()->value()->type())
12362           .reg();
12363   TypedOrValueRegister id =
12364       toConstantOrRegister(ins, LCheckPrivateFieldCache::Id,
12365                            ins->mir()->idval()->type())
12366           .reg();
12367   Register output = ToRegister(ins->output());
12368 
12369   IonCheckPrivateFieldIC cache(liveRegs, value, id, output);
12370   addIC(ins, allocateIC(cache));
12371 }
12372 
visitCallDeleteProperty(LCallDeleteProperty * lir)12373 void CodeGenerator::visitCallDeleteProperty(LCallDeleteProperty* lir) {
12374   pushArg(ImmGCPtr(lir->mir()->name()));
12375   pushArg(ToValue(lir, LCallDeleteProperty::Value));
12376 
12377   using Fn = bool (*)(JSContext*, HandleValue, HandlePropertyName, bool*);
12378   if (lir->mir()->strict()) {
12379     callVM<Fn, DelPropOperation<true>>(lir);
12380   } else {
12381     callVM<Fn, DelPropOperation<false>>(lir);
12382   }
12383 }
12384 
visitCallDeleteElement(LCallDeleteElement * lir)12385 void CodeGenerator::visitCallDeleteElement(LCallDeleteElement* lir) {
12386   pushArg(ToValue(lir, LCallDeleteElement::Index));
12387   pushArg(ToValue(lir, LCallDeleteElement::Value));
12388 
12389   using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
12390   if (lir->mir()->strict()) {
12391     callVM<Fn, DelElemOperation<true>>(lir);
12392   } else {
12393     callVM<Fn, DelElemOperation<false>>(lir);
12394   }
12395 }
12396 
visitSetPropertyCache(LSetPropertyCache * ins)12397 void CodeGenerator::visitSetPropertyCache(LSetPropertyCache* ins) {
12398   LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
12399   Register objReg = ToRegister(ins->getOperand(0));
12400   Register temp = ToRegister(ins->temp());
12401 
12402   ConstantOrRegister id = toConstantOrRegister(ins, LSetPropertyCache::Id,
12403                                                ins->mir()->idval()->type());
12404   ConstantOrRegister value = toConstantOrRegister(ins, LSetPropertyCache::Value,
12405                                                   ins->mir()->value()->type());
12406 
12407   addSetPropertyCache(ins, liveRegs, objReg, temp, id, value,
12408                       ins->mir()->strict());
12409 }
12410 
visitThrow(LThrow * lir)12411 void CodeGenerator::visitThrow(LThrow* lir) {
12412   pushArg(ToValue(lir, LThrow::Value));
12413 
12414   using Fn = bool (*)(JSContext*, HandleValue);
12415   callVM<Fn, js::ThrowOperation>(lir);
12416 }
12417 
12418 class OutOfLineTypeOfV : public OutOfLineCodeBase<CodeGenerator> {
12419   LTypeOfV* ins_;
12420 
12421  public:
OutOfLineTypeOfV(LTypeOfV * ins)12422   explicit OutOfLineTypeOfV(LTypeOfV* ins) : ins_(ins) {}
12423 
accept(CodeGenerator * codegen)12424   void accept(CodeGenerator* codegen) override {
12425     codegen->visitOutOfLineTypeOfV(this);
12426   }
ins() const12427   LTypeOfV* ins() const { return ins_; }
12428 };
12429 
emitTypeOfName(JSValueType type,Register output)12430 void CodeGenerator::emitTypeOfName(JSValueType type, Register output) {
12431   const JSAtomState& names = gen->runtime->names();
12432 
12433   switch (type) {
12434     case JSVAL_TYPE_OBJECT:
12435       masm.movePtr(ImmGCPtr(names.object), output);
12436       break;
12437     case JSVAL_TYPE_DOUBLE:
12438     case JSVAL_TYPE_INT32:
12439       masm.movePtr(ImmGCPtr(names.number), output);
12440       break;
12441     case JSVAL_TYPE_BOOLEAN:
12442       masm.movePtr(ImmGCPtr(names.boolean), output);
12443       break;
12444     case JSVAL_TYPE_UNDEFINED:
12445       masm.movePtr(ImmGCPtr(names.undefined), output);
12446       break;
12447     case JSVAL_TYPE_NULL:
12448       masm.movePtr(ImmGCPtr(names.object), output);
12449       break;
12450     case JSVAL_TYPE_STRING:
12451       masm.movePtr(ImmGCPtr(names.string), output);
12452       break;
12453     case JSVAL_TYPE_SYMBOL:
12454       masm.movePtr(ImmGCPtr(names.symbol), output);
12455       break;
12456     case JSVAL_TYPE_BIGINT:
12457       masm.movePtr(ImmGCPtr(names.bigint), output);
12458       break;
12459     default:
12460       MOZ_CRASH("Unsupported JSValueType");
12461   }
12462 }
12463 
emitTypeOfCheck(JSValueType type,Register tag,Register output,Label * done,Label * oolObject)12464 void CodeGenerator::emitTypeOfCheck(JSValueType type, Register tag,
12465                                     Register output, Label* done,
12466                                     Label* oolObject) {
12467   Label notMatch;
12468   switch (type) {
12469     case JSVAL_TYPE_OBJECT:
12470       // The input may be a callable object (result is "function") or
12471       // may emulate undefined (result is "undefined"). Use an OOL path.
12472       masm.branchTestObject(Assembler::Equal, tag, oolObject);
12473       return;
12474     case JSVAL_TYPE_DOUBLE:
12475     case JSVAL_TYPE_INT32:
12476       masm.branchTestNumber(Assembler::NotEqual, tag, &notMatch);
12477       break;
12478     default:
12479       masm.branchTestType(Assembler::NotEqual, tag, type, &notMatch);
12480       break;
12481   }
12482 
12483   emitTypeOfName(type, output);
12484   masm.jump(done);
12485   masm.bind(&notMatch);
12486 }
12487 
visitTypeOfV(LTypeOfV * lir)12488 void CodeGenerator::visitTypeOfV(LTypeOfV* lir) {
12489   const ValueOperand value = ToValue(lir, LTypeOfV::Input);
12490   Register output = ToRegister(lir->output());
12491   Register tag = masm.extractTag(value, output);
12492 
12493   Label done;
12494 
12495   auto* ool = new (alloc()) OutOfLineTypeOfV(lir);
12496   addOutOfLineCode(ool, lir->mir());
12497 
12498   const uint32_t NumTypes = 8;
12499   const auto& defaultOrder = {JSVAL_TYPE_OBJECT,    JSVAL_TYPE_DOUBLE,
12500                               JSVAL_TYPE_UNDEFINED, JSVAL_TYPE_NULL,
12501                               JSVAL_TYPE_BOOLEAN,   JSVAL_TYPE_STRING,
12502                               JSVAL_TYPE_SYMBOL,    JSVAL_TYPE_BIGINT};
12503   MOZ_ASSERT(defaultOrder.size() == NumTypes);
12504 
12505   Vector<JSValueType, NumTypes, SystemAllocPolicy> remaining;
12506   MOZ_ALWAYS_TRUE(remaining.reserve(defaultOrder.size()));
12507   remaining.infallibleAppend(defaultOrder.begin(), defaultOrder.end());
12508 
12509   uint32_t numRemaining = remaining.length();
12510 
12511   // Generate checks for previously observed types first.
12512   // The TypeDataList is sorted by descending frequency.
12513   for (auto& observed : lir->mir()->observedTypes()) {
12514     JSValueType type = observed.type();
12515 
12516     // Unify number types.
12517     if (type == JSVAL_TYPE_INT32) {
12518       type = JSVAL_TYPE_DOUBLE;
12519     }
12520 
12521     emitTypeOfCheck(type, tag, output, &done, ool->entry());
12522     MOZ_ASSERT(std::count(remaining.begin(), remaining.end(), type) == 1);
12523     remaining.eraseIfEqual(type);
12524     numRemaining--;
12525   }
12526 
12527   // Generate checks for remaining types.
12528   for (auto type : remaining) {
12529     if (numRemaining == 1) {
12530       // We can skip the check for the last remaining type.
12531 #ifdef DEBUG
12532       emitTypeOfCheck(type, tag, output, &done, ool->entry());
12533       masm.assumeUnreachable("Unexpected Value type in visitTypeOfV");
12534 #else
12535       emitTypeOfName(type, output);
12536 #endif
12537     } else {
12538       emitTypeOfCheck(type, tag, output, &done, ool->entry());
12539     }
12540     numRemaining--;
12541   }
12542   MOZ_ASSERT(numRemaining == 0);
12543 
12544   masm.bind(&done);
12545   masm.bind(ool->rejoin());
12546 }
12547 
emitTypeOfObject(Register obj,Register output,Label * done)12548 void CodeGenerator::emitTypeOfObject(Register obj, Register output,
12549                                      Label* done) {
12550   const JSAtomState& names = gen->runtime->names();
12551 
12552   Label slowCheck, isObject, isCallable, isUndefined;
12553   masm.typeOfObject(obj, output, &slowCheck, &isObject, &isCallable,
12554                     &isUndefined);
12555 
12556   masm.bind(&isCallable);
12557   masm.movePtr(ImmGCPtr(names.function), output);
12558   masm.jump(done);
12559 
12560   masm.bind(&isUndefined);
12561   masm.movePtr(ImmGCPtr(names.undefined), output);
12562   masm.jump(done);
12563 
12564   masm.bind(&isObject);
12565   masm.movePtr(ImmGCPtr(names.object), output);
12566   masm.jump(done);
12567 
12568   masm.bind(&slowCheck);
12569 
12570   saveVolatile(output);
12571   using Fn = JSString* (*)(JSObject * obj, JSRuntime * rt);
12572   masm.setupUnalignedABICall(output);
12573   masm.passABIArg(obj);
12574   masm.movePtr(ImmPtr(gen->runtime), output);
12575   masm.passABIArg(output);
12576   masm.callWithABI<Fn, TypeOfObject>();
12577   masm.storeCallPointerResult(output);
12578   restoreVolatile(output);
12579 }
12580 
visitOutOfLineTypeOfV(OutOfLineTypeOfV * ool)12581 void CodeGenerator::visitOutOfLineTypeOfV(OutOfLineTypeOfV* ool) {
12582   LTypeOfV* ins = ool->ins();
12583 
12584   ValueOperand input = ToValue(ins, LTypeOfV::Input);
12585   Register temp = ToTempUnboxRegister(ins->tempToUnbox());
12586   Register output = ToRegister(ins->output());
12587 
12588   Register obj = masm.extractObject(input, temp);
12589   emitTypeOfObject(obj, output, ool->rejoin());
12590   masm.jump(ool->rejoin());
12591 }
12592 
visitTypeOfO(LTypeOfO * lir)12593 void CodeGenerator::visitTypeOfO(LTypeOfO* lir) {
12594   Register obj = ToRegister(lir->object());
12595   Register output = ToRegister(lir->output());
12596 
12597   Label done;
12598   emitTypeOfObject(obj, output, &done);
12599   masm.bind(&done);
12600 }
12601 
visitToAsyncIter(LToAsyncIter * lir)12602 void CodeGenerator::visitToAsyncIter(LToAsyncIter* lir) {
12603   pushArg(ToValue(lir, LToAsyncIter::NextMethodIndex));
12604   pushArg(ToRegister(lir->iterator()));
12605 
12606   using Fn = JSObject* (*)(JSContext*, HandleObject, HandleValue);
12607   callVM<Fn, js::CreateAsyncFromSyncIterator>(lir);
12608 }
12609 
visitToPropertyKeyCache(LToPropertyKeyCache * lir)12610 void CodeGenerator::visitToPropertyKeyCache(LToPropertyKeyCache* lir) {
12611   LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
12612   ValueOperand input = ToValue(lir, LToPropertyKeyCache::Input);
12613   ValueOperand output = ToOutValue(lir);
12614 
12615   IonToPropertyKeyIC ic(liveRegs, input, output);
12616   addIC(lir, allocateIC(ic));
12617 }
12618 
visitLoadElementV(LLoadElementV * load)12619 void CodeGenerator::visitLoadElementV(LLoadElementV* load) {
12620   Register elements = ToRegister(load->elements());
12621   const ValueOperand out = ToOutValue(load);
12622 
12623   if (load->index()->isConstant()) {
12624     NativeObject::elementsSizeMustNotOverflow();
12625     int32_t offset = ToInt32(load->index()) * sizeof(Value);
12626     masm.loadValue(Address(elements, offset), out);
12627   } else {
12628     masm.loadValue(BaseObjectElementIndex(elements, ToRegister(load->index())),
12629                    out);
12630   }
12631 
12632   if (load->mir()->needsHoleCheck()) {
12633     Label testMagic;
12634     masm.branchTestMagic(Assembler::Equal, out, &testMagic);
12635     bailoutFrom(&testMagic, load->snapshot());
12636   }
12637 }
12638 
visitLoadElementHole(LLoadElementHole * lir)12639 void CodeGenerator::visitLoadElementHole(LLoadElementHole* lir) {
12640   Register elements = ToRegister(lir->elements());
12641   Register index = ToRegister(lir->index());
12642   Register initLength = ToRegister(lir->initLength());
12643   const ValueOperand out = ToOutValue(lir);
12644 
12645   const MLoadElementHole* mir = lir->mir();
12646 
12647   // If the index is out of bounds, load |undefined|. Otherwise, load the
12648   // value.
12649   Label outOfBounds, done;
12650   masm.spectreBoundsCheck32(index, initLength, out.scratchReg(), &outOfBounds);
12651 
12652   masm.loadValue(BaseObjectElementIndex(elements, index), out);
12653 
12654   // If a hole check is needed, and the value wasn't a hole, we're done.
12655   // Otherwise, we'll load undefined.
12656   if (lir->mir()->needsHoleCheck()) {
12657     masm.branchTestMagic(Assembler::NotEqual, out, &done);
12658     masm.moveValue(UndefinedValue(), out);
12659   }
12660   masm.jump(&done);
12661 
12662   masm.bind(&outOfBounds);
12663   if (mir->needsNegativeIntCheck()) {
12664     Label negative;
12665     masm.branch32(Assembler::LessThan, index, Imm32(0), &negative);
12666     bailoutFrom(&negative, lir->snapshot());
12667   }
12668   masm.moveValue(UndefinedValue(), out);
12669 
12670   masm.bind(&done);
12671 }
12672 
visitLoadUnboxedScalar(LLoadUnboxedScalar * lir)12673 void CodeGenerator::visitLoadUnboxedScalar(LLoadUnboxedScalar* lir) {
12674   Register elements = ToRegister(lir->elements());
12675   Register temp = ToTempRegisterOrInvalid(lir->temp());
12676   AnyRegister out = ToAnyRegister(lir->output());
12677 
12678   const MLoadUnboxedScalar* mir = lir->mir();
12679 
12680   Scalar::Type storageType = mir->storageType();
12681 
12682   Label fail;
12683   if (lir->index()->isConstant()) {
12684     Address source =
12685         ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
12686     masm.loadFromTypedArray(storageType, source, out, temp, &fail);
12687   } else {
12688     BaseIndex source(elements, ToRegister(lir->index()),
12689                      ScaleFromScalarType(storageType), mir->offsetAdjustment());
12690     masm.loadFromTypedArray(storageType, source, out, temp, &fail);
12691   }
12692 
12693   if (fail.used()) {
12694     bailoutFrom(&fail, lir->snapshot());
12695   }
12696 }
12697 
visitLoadUnboxedBigInt(LLoadUnboxedBigInt * lir)12698 void CodeGenerator::visitLoadUnboxedBigInt(LLoadUnboxedBigInt* lir) {
12699   Register elements = ToRegister(lir->elements());
12700   Register temp = ToRegister(lir->temp());
12701   Register64 temp64 = ToRegister64(lir->temp64());
12702   Register out = ToRegister(lir->output());
12703 
12704   const MLoadUnboxedScalar* mir = lir->mir();
12705 
12706   Scalar::Type storageType = mir->storageType();
12707 
12708   if (lir->index()->isConstant()) {
12709     Address source =
12710         ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
12711     masm.load64(source, temp64);
12712   } else {
12713     BaseIndex source(elements, ToRegister(lir->index()),
12714                      ScaleFromScalarType(storageType), mir->offsetAdjustment());
12715     masm.load64(source, temp64);
12716   }
12717 
12718   emitCreateBigInt(lir, storageType, temp64, out, temp);
12719 }
12720 
visitLoadDataViewElement(LLoadDataViewElement * lir)12721 void CodeGenerator::visitLoadDataViewElement(LLoadDataViewElement* lir) {
12722   Register elements = ToRegister(lir->elements());
12723   const LAllocation* littleEndian = lir->littleEndian();
12724   Register temp = ToTempRegisterOrInvalid(lir->temp());
12725   Register64 temp64 = ToTempRegister64OrInvalid(lir->temp64());
12726   AnyRegister out = ToAnyRegister(lir->output());
12727 
12728   const MLoadDataViewElement* mir = lir->mir();
12729   Scalar::Type storageType = mir->storageType();
12730 
12731   BaseIndex source(elements, ToRegister(lir->index()), TimesOne);
12732 
12733   bool noSwap = littleEndian->isConstant() &&
12734                 ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
12735 
12736   // Directly load if no byte swap is needed and the platform supports unaligned
12737   // accesses for floating point registers.
12738   if (noSwap && MacroAssembler::SupportsFastUnalignedAccesses()) {
12739     if (!Scalar::isBigIntType(storageType)) {
12740       Label fail;
12741       masm.loadFromTypedArray(storageType, source, out, temp, &fail);
12742 
12743       if (fail.used()) {
12744         bailoutFrom(&fail, lir->snapshot());
12745       }
12746     } else {
12747       masm.load64(source, temp64);
12748 
12749       emitCreateBigInt(lir, storageType, temp64, out.gpr(), temp);
12750     }
12751     return;
12752   }
12753 
12754   // Load the value into a gpr register.
12755   switch (storageType) {
12756     case Scalar::Int16:
12757       masm.load16UnalignedSignExtend(source, out.gpr());
12758       break;
12759     case Scalar::Uint16:
12760       masm.load16UnalignedZeroExtend(source, out.gpr());
12761       break;
12762     case Scalar::Int32:
12763       masm.load32Unaligned(source, out.gpr());
12764       break;
12765     case Scalar::Uint32:
12766       masm.load32Unaligned(source, out.isFloat() ? temp : out.gpr());
12767       break;
12768     case Scalar::Float32:
12769       masm.load32Unaligned(source, temp);
12770       break;
12771     case Scalar::Float64:
12772     case Scalar::BigInt64:
12773     case Scalar::BigUint64:
12774       masm.load64Unaligned(source, temp64);
12775       break;
12776     case Scalar::Int8:
12777     case Scalar::Uint8:
12778     case Scalar::Uint8Clamped:
12779     default:
12780       MOZ_CRASH("Invalid typed array type");
12781   }
12782 
12783   if (!noSwap) {
12784     // Swap the bytes in the loaded value.
12785     Label skip;
12786     if (!littleEndian->isConstant()) {
12787       masm.branch32(
12788           MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
12789           ToRegister(littleEndian), Imm32(0), &skip);
12790     }
12791 
12792     switch (storageType) {
12793       case Scalar::Int16:
12794         masm.byteSwap16SignExtend(out.gpr());
12795         break;
12796       case Scalar::Uint16:
12797         masm.byteSwap16ZeroExtend(out.gpr());
12798         break;
12799       case Scalar::Int32:
12800         masm.byteSwap32(out.gpr());
12801         break;
12802       case Scalar::Uint32:
12803         masm.byteSwap32(out.isFloat() ? temp : out.gpr());
12804         break;
12805       case Scalar::Float32:
12806         masm.byteSwap32(temp);
12807         break;
12808       case Scalar::Float64:
12809       case Scalar::BigInt64:
12810       case Scalar::BigUint64:
12811         masm.byteSwap64(temp64);
12812         break;
12813       case Scalar::Int8:
12814       case Scalar::Uint8:
12815       case Scalar::Uint8Clamped:
12816       default:
12817         MOZ_CRASH("Invalid typed array type");
12818     }
12819 
12820     if (skip.used()) {
12821       masm.bind(&skip);
12822     }
12823   }
12824 
12825   // Move the value into the output register.
12826   switch (storageType) {
12827     case Scalar::Int16:
12828     case Scalar::Uint16:
12829     case Scalar::Int32:
12830       break;
12831     case Scalar::Uint32:
12832       if (out.isFloat()) {
12833         masm.convertUInt32ToDouble(temp, out.fpu());
12834       } else {
12835         // Bail out if the value doesn't fit into a signed int32 value. This
12836         // is what allows MLoadDataViewElement to have a type() of
12837         // MIRType::Int32 for UInt32 array loads.
12838         bailoutTest32(Assembler::Signed, out.gpr(), out.gpr(), lir->snapshot());
12839       }
12840       break;
12841     case Scalar::Float32:
12842       masm.moveGPRToFloat32(temp, out.fpu());
12843       masm.canonicalizeFloat(out.fpu());
12844       break;
12845     case Scalar::Float64:
12846       masm.moveGPR64ToDouble(temp64, out.fpu());
12847       masm.canonicalizeDouble(out.fpu());
12848       break;
12849     case Scalar::BigInt64:
12850     case Scalar::BigUint64:
12851       emitCreateBigInt(lir, storageType, temp64, out.gpr(), temp);
12852       break;
12853     case Scalar::Int8:
12854     case Scalar::Uint8:
12855     case Scalar::Uint8Clamped:
12856     default:
12857       MOZ_CRASH("Invalid typed array type");
12858   }
12859 }
12860 
visitLoadTypedArrayElementHole(LLoadTypedArrayElementHole * lir)12861 void CodeGenerator::visitLoadTypedArrayElementHole(
12862     LLoadTypedArrayElementHole* lir) {
12863   Register object = ToRegister(lir->object());
12864   const ValueOperand out = ToOutValue(lir);
12865 
12866   // Load the length.
12867   Register scratch = out.scratchReg();
12868   Register scratch2 = ToRegister(lir->temp());
12869   Register index = ToRegister(lir->index());
12870   masm.loadArrayBufferViewLengthIntPtr(object, scratch);
12871 
12872   // Load undefined if index >= length.
12873   Label outOfBounds, done;
12874   masm.spectreBoundsCheckPtr(index, scratch, scratch2, &outOfBounds);
12875 
12876   // Load the elements vector.
12877   masm.loadPtr(Address(object, ArrayBufferViewObject::dataOffset()), scratch);
12878 
12879   Scalar::Type arrayType = lir->mir()->arrayType();
12880   Label fail;
12881   BaseIndex source(scratch, index, ScaleFromScalarType(arrayType));
12882   MacroAssembler::Uint32Mode uint32Mode =
12883       lir->mir()->forceDouble() ? MacroAssembler::Uint32Mode::ForceDouble
12884                                 : MacroAssembler::Uint32Mode::FailOnDouble;
12885   masm.loadFromTypedArray(arrayType, source, out, uint32Mode, out.scratchReg(),
12886                           &fail);
12887   masm.jump(&done);
12888 
12889   masm.bind(&outOfBounds);
12890   masm.moveValue(UndefinedValue(), out);
12891 
12892   if (fail.used()) {
12893     bailoutFrom(&fail, lir->snapshot());
12894   }
12895 
12896   masm.bind(&done);
12897 }
12898 
visitLoadTypedArrayElementHoleBigInt(LLoadTypedArrayElementHoleBigInt * lir)12899 void CodeGenerator::visitLoadTypedArrayElementHoleBigInt(
12900     LLoadTypedArrayElementHoleBigInt* lir) {
12901   Register object = ToRegister(lir->object());
12902   const ValueOperand out = ToOutValue(lir);
12903 
12904   // On x86 there are not enough registers. In that case reuse the output's
12905   // type register as temporary.
12906 #ifdef JS_CODEGEN_X86
12907   MOZ_ASSERT(lir->temp()->isBogusTemp());
12908   Register temp = out.typeReg();
12909 #else
12910   Register temp = ToRegister(lir->temp());
12911 #endif
12912   Register64 temp64 = ToRegister64(lir->temp64());
12913 
12914   // Load the length.
12915   Register scratch = out.scratchReg();
12916   Register index = ToRegister(lir->index());
12917   masm.loadArrayBufferViewLengthIntPtr(object, scratch);
12918 
12919   // Load undefined if index >= length.
12920   Label outOfBounds, done;
12921   masm.spectreBoundsCheckPtr(index, scratch, temp, &outOfBounds);
12922 
12923   // Load the elements vector.
12924   masm.loadPtr(Address(object, ArrayBufferViewObject::dataOffset()), scratch);
12925 
12926   Scalar::Type arrayType = lir->mir()->arrayType();
12927   BaseIndex source(scratch, index, ScaleFromScalarType(arrayType));
12928   masm.load64(source, temp64);
12929 
12930   Register bigInt = out.scratchReg();
12931   emitCreateBigInt(lir, arrayType, temp64, bigInt, temp);
12932 
12933   masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, out);
12934   masm.jump(&done);
12935 
12936   masm.bind(&outOfBounds);
12937   masm.moveValue(UndefinedValue(), out);
12938 
12939   masm.bind(&done);
12940 }
12941 
12942 template <SwitchTableType tableType>
12943 class OutOfLineSwitch : public OutOfLineCodeBase<CodeGenerator> {
12944   using LabelsVector = Vector<Label, 0, JitAllocPolicy>;
12945   using CodeLabelsVector = Vector<CodeLabel, 0, JitAllocPolicy>;
12946   LabelsVector labels_;
12947   CodeLabelsVector codeLabels_;
12948   CodeLabel start_;
12949   bool isOutOfLine_;
12950 
accept(CodeGenerator * codegen)12951   void accept(CodeGenerator* codegen) override {
12952     codegen->visitOutOfLineSwitch(this);
12953   }
12954 
12955  public:
OutOfLineSwitch(TempAllocator & alloc)12956   explicit OutOfLineSwitch(TempAllocator& alloc)
12957       : labels_(alloc), codeLabels_(alloc), isOutOfLine_(false) {}
12958 
start()12959   CodeLabel* start() { return &start_; }
12960 
codeLabels()12961   CodeLabelsVector& codeLabels() { return codeLabels_; }
labels()12962   LabelsVector& labels() { return labels_; }
12963 
jumpToCodeEntries(MacroAssembler & masm,Register index,Register temp)12964   void jumpToCodeEntries(MacroAssembler& masm, Register index, Register temp) {
12965     Register base;
12966     if (tableType == SwitchTableType::Inline) {
12967 #if defined(JS_CODEGEN_ARM)
12968       base = ::js::jit::pc;
12969 #else
12970       MOZ_CRASH("NYI: SwitchTableType::Inline");
12971 #endif
12972     } else {
12973 #if defined(JS_CODEGEN_ARM)
12974       MOZ_CRASH("NYI: SwitchTableType::OutOfLine");
12975 #else
12976       masm.mov(start(), temp);
12977       base = temp;
12978 #endif
12979     }
12980     BaseIndex jumpTarget(base, index, ScalePointer);
12981     masm.branchToComputedAddress(jumpTarget);
12982   }
12983 
12984   // Register an entry in the switch table.
addTableEntry(MacroAssembler & masm)12985   void addTableEntry(MacroAssembler& masm) {
12986     if ((!isOutOfLine_ && tableType == SwitchTableType::Inline) ||
12987         (isOutOfLine_ && tableType == SwitchTableType::OutOfLine)) {
12988       CodeLabel cl;
12989       masm.writeCodePointer(&cl);
12990       masm.propagateOOM(codeLabels_.append(std::move(cl)));
12991     }
12992   }
12993   // Register the code, to which the table will jump to.
addCodeEntry(MacroAssembler & masm)12994   void addCodeEntry(MacroAssembler& masm) {
12995     Label entry;
12996     masm.bind(&entry);
12997     masm.propagateOOM(labels_.append(std::move(entry)));
12998   }
12999 
setOutOfLine()13000   void setOutOfLine() { isOutOfLine_ = true; }
13001 };
13002 
13003 template <SwitchTableType tableType>
visitOutOfLineSwitch(OutOfLineSwitch<tableType> * jumpTable)13004 void CodeGenerator::visitOutOfLineSwitch(
13005     OutOfLineSwitch<tableType>* jumpTable) {
13006   jumpTable->setOutOfLine();
13007   auto& labels = jumpTable->labels();
13008 
13009   if (tableType == SwitchTableType::OutOfLine) {
13010 #if defined(JS_CODEGEN_ARM)
13011     MOZ_CRASH("NYI: SwitchTableType::OutOfLine");
13012 #elif defined(JS_CODEGEN_NONE)
13013     MOZ_CRASH();
13014 #else
13015 
13016 #  if defined(JS_CODEGEN_ARM64)
13017     AutoForbidPoolsAndNops afp(
13018         &masm,
13019         (labels.length() + 1) * (sizeof(void*) / vixl::kInstructionSize));
13020 #  endif
13021 
13022     masm.haltingAlign(sizeof(void*));
13023 
13024     // Bind the address of the jump table and reserve the space for code
13025     // pointers to jump in the newly generated code.
13026     masm.bind(jumpTable->start());
13027     masm.addCodeLabel(*jumpTable->start());
13028     for (size_t i = 0, e = labels.length(); i < e; i++) {
13029       jumpTable->addTableEntry(masm);
13030     }
13031 #endif
13032   }
13033 
13034   // Register all reserved pointers of the jump table to target labels. The
13035   // entries of the jump table need to be absolute addresses and thus must be
13036   // patched after codegen is finished.
13037   auto& codeLabels = jumpTable->codeLabels();
13038   for (size_t i = 0, e = codeLabels.length(); i < e; i++) {
13039     auto& cl = codeLabels[i];
13040     cl.target()->bind(labels[i].offset());
13041     masm.addCodeLabel(cl);
13042   }
13043 }
13044 
13045 template void CodeGenerator::visitOutOfLineSwitch(
13046     OutOfLineSwitch<SwitchTableType::Inline>* jumpTable);
13047 template void CodeGenerator::visitOutOfLineSwitch(
13048     OutOfLineSwitch<SwitchTableType::OutOfLine>* jumpTable);
13049 
13050 template <typename T>
StoreToTypedArray(MacroAssembler & masm,Scalar::Type writeType,const LAllocation * value,const T & dest)13051 static inline void StoreToTypedArray(MacroAssembler& masm,
13052                                      Scalar::Type writeType,
13053                                      const LAllocation* value, const T& dest) {
13054   if (writeType == Scalar::Float32 || writeType == Scalar::Float64) {
13055     masm.storeToTypedFloatArray(writeType, ToFloatRegister(value), dest);
13056   } else {
13057     if (value->isConstant()) {
13058       masm.storeToTypedIntArray(writeType, Imm32(ToInt32(value)), dest);
13059     } else {
13060       masm.storeToTypedIntArray(writeType, ToRegister(value), dest);
13061     }
13062   }
13063 }
13064 
visitStoreUnboxedScalar(LStoreUnboxedScalar * lir)13065 void CodeGenerator::visitStoreUnboxedScalar(LStoreUnboxedScalar* lir) {
13066   Register elements = ToRegister(lir->elements());
13067   const LAllocation* value = lir->value();
13068 
13069   const MStoreUnboxedScalar* mir = lir->mir();
13070 
13071   Scalar::Type writeType = mir->writeType();
13072 
13073   if (lir->index()->isConstant()) {
13074     Address dest = ToAddress(elements, lir->index(), writeType);
13075     StoreToTypedArray(masm, writeType, value, dest);
13076   } else {
13077     BaseIndex dest(elements, ToRegister(lir->index()),
13078                    ScaleFromScalarType(writeType));
13079     StoreToTypedArray(masm, writeType, value, dest);
13080   }
13081 }
13082 
visitStoreUnboxedBigInt(LStoreUnboxedBigInt * lir)13083 void CodeGenerator::visitStoreUnboxedBigInt(LStoreUnboxedBigInt* lir) {
13084   Register elements = ToRegister(lir->elements());
13085   Register value = ToRegister(lir->value());
13086   Register64 temp = ToRegister64(lir->temp());
13087 
13088   Scalar::Type writeType = lir->mir()->writeType();
13089 
13090   masm.loadBigInt64(value, temp);
13091 
13092   if (lir->index()->isConstant()) {
13093     Address dest = ToAddress(elements, lir->index(), writeType);
13094     masm.storeToTypedBigIntArray(writeType, temp, dest);
13095   } else {
13096     BaseIndex dest(elements, ToRegister(lir->index()),
13097                    ScaleFromScalarType(writeType));
13098     masm.storeToTypedBigIntArray(writeType, temp, dest);
13099   }
13100 }
13101 
visitStoreDataViewElement(LStoreDataViewElement * lir)13102 void CodeGenerator::visitStoreDataViewElement(LStoreDataViewElement* lir) {
13103   Register elements = ToRegister(lir->elements());
13104   const LAllocation* value = lir->value();
13105   const LAllocation* littleEndian = lir->littleEndian();
13106   Register temp = ToTempRegisterOrInvalid(lir->temp());
13107   Register64 temp64 = ToTempRegister64OrInvalid(lir->temp64());
13108 
13109   const MStoreDataViewElement* mir = lir->mir();
13110   Scalar::Type writeType = mir->writeType();
13111 
13112   BaseIndex dest(elements, ToRegister(lir->index()), TimesOne);
13113 
13114   bool noSwap = littleEndian->isConstant() &&
13115                 ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
13116 
13117   // Directly store if no byte swap is needed and the platform supports
13118   // unaligned accesses for floating point registers.
13119   if (noSwap && MacroAssembler::SupportsFastUnalignedAccesses()) {
13120     if (!Scalar::isBigIntType(writeType)) {
13121       StoreToTypedArray(masm, writeType, value, dest);
13122     } else {
13123       masm.loadBigInt64(ToRegister(value), temp64);
13124       masm.storeToTypedBigIntArray(writeType, temp64, dest);
13125     }
13126     return;
13127   }
13128 
13129   // Load the value into a gpr register.
13130   switch (writeType) {
13131     case Scalar::Int16:
13132     case Scalar::Uint16:
13133     case Scalar::Int32:
13134     case Scalar::Uint32:
13135       if (value->isConstant()) {
13136         masm.move32(Imm32(ToInt32(value)), temp);
13137       } else {
13138         masm.move32(ToRegister(value), temp);
13139       }
13140       break;
13141     case Scalar::Float32: {
13142       FloatRegister fvalue = ToFloatRegister(value);
13143       masm.canonicalizeFloatIfDeterministic(fvalue);
13144       masm.moveFloat32ToGPR(fvalue, temp);
13145       break;
13146     }
13147     case Scalar::Float64: {
13148       FloatRegister fvalue = ToFloatRegister(value);
13149       masm.canonicalizeDoubleIfDeterministic(fvalue);
13150       masm.moveDoubleToGPR64(fvalue, temp64);
13151       break;
13152     }
13153     case Scalar::BigInt64:
13154     case Scalar::BigUint64:
13155       masm.loadBigInt64(ToRegister(value), temp64);
13156       break;
13157     case Scalar::Int8:
13158     case Scalar::Uint8:
13159     case Scalar::Uint8Clamped:
13160     default:
13161       MOZ_CRASH("Invalid typed array type");
13162   }
13163 
13164   if (!noSwap) {
13165     // Swap the bytes in the loaded value.
13166     Label skip;
13167     if (!littleEndian->isConstant()) {
13168       masm.branch32(
13169           MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
13170           ToRegister(littleEndian), Imm32(0), &skip);
13171     }
13172 
13173     switch (writeType) {
13174       case Scalar::Int16:
13175         masm.byteSwap16SignExtend(temp);
13176         break;
13177       case Scalar::Uint16:
13178         masm.byteSwap16ZeroExtend(temp);
13179         break;
13180       case Scalar::Int32:
13181       case Scalar::Uint32:
13182       case Scalar::Float32:
13183         masm.byteSwap32(temp);
13184         break;
13185       case Scalar::Float64:
13186       case Scalar::BigInt64:
13187       case Scalar::BigUint64:
13188         masm.byteSwap64(temp64);
13189         break;
13190       case Scalar::Int8:
13191       case Scalar::Uint8:
13192       case Scalar::Uint8Clamped:
13193       default:
13194         MOZ_CRASH("Invalid typed array type");
13195     }
13196 
13197     if (skip.used()) {
13198       masm.bind(&skip);
13199     }
13200   }
13201 
13202   // Store the value into the destination.
13203   switch (writeType) {
13204     case Scalar::Int16:
13205     case Scalar::Uint16:
13206       masm.store16Unaligned(temp, dest);
13207       break;
13208     case Scalar::Int32:
13209     case Scalar::Uint32:
13210     case Scalar::Float32:
13211       masm.store32Unaligned(temp, dest);
13212       break;
13213     case Scalar::Float64:
13214     case Scalar::BigInt64:
13215     case Scalar::BigUint64:
13216       masm.store64Unaligned(temp64, dest);
13217       break;
13218     case Scalar::Int8:
13219     case Scalar::Uint8:
13220     case Scalar::Uint8Clamped:
13221     default:
13222       MOZ_CRASH("Invalid typed array type");
13223   }
13224 }
13225 
visitStoreTypedArrayElementHole(LStoreTypedArrayElementHole * lir)13226 void CodeGenerator::visitStoreTypedArrayElementHole(
13227     LStoreTypedArrayElementHole* lir) {
13228   Register elements = ToRegister(lir->elements());
13229   const LAllocation* value = lir->value();
13230 
13231   Scalar::Type arrayType = lir->mir()->arrayType();
13232 
13233   Register index = ToRegister(lir->index());
13234   const LAllocation* length = lir->length();
13235   Register spectreTemp = ToTempRegisterOrInvalid(lir->spectreTemp());
13236 
13237   Label skip;
13238   if (length->isRegister()) {
13239     masm.spectreBoundsCheckPtr(index, ToRegister(length), spectreTemp, &skip);
13240   } else {
13241     masm.spectreBoundsCheckPtr(index, ToAddress(length), spectreTemp, &skip);
13242   }
13243 
13244   BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
13245   StoreToTypedArray(masm, arrayType, value, dest);
13246 
13247   masm.bind(&skip);
13248 }
13249 
visitStoreTypedArrayElementHoleBigInt(LStoreTypedArrayElementHoleBigInt * lir)13250 void CodeGenerator::visitStoreTypedArrayElementHoleBigInt(
13251     LStoreTypedArrayElementHoleBigInt* lir) {
13252   Register elements = ToRegister(lir->elements());
13253   Register value = ToRegister(lir->value());
13254   Register64 temp = ToRegister64(lir->temp());
13255 
13256   Scalar::Type arrayType = lir->mir()->arrayType();
13257 
13258   Register index = ToRegister(lir->index());
13259   const LAllocation* length = lir->length();
13260   Register spectreTemp = temp.scratchReg();
13261 
13262   Label skip;
13263   if (length->isRegister()) {
13264     masm.spectreBoundsCheckPtr(index, ToRegister(length), spectreTemp, &skip);
13265   } else {
13266     masm.spectreBoundsCheckPtr(index, ToAddress(length), spectreTemp, &skip);
13267   }
13268 
13269   masm.loadBigInt64(value, temp);
13270 
13271   BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
13272   masm.storeToTypedBigIntArray(arrayType, temp, dest);
13273 
13274   masm.bind(&skip);
13275 }
13276 
visitAtomicIsLockFree(LAtomicIsLockFree * lir)13277 void CodeGenerator::visitAtomicIsLockFree(LAtomicIsLockFree* lir) {
13278   Register value = ToRegister(lir->value());
13279   Register output = ToRegister(lir->output());
13280 
13281   masm.atomicIsLockFreeJS(value, output);
13282 }
13283 
visitClampIToUint8(LClampIToUint8 * lir)13284 void CodeGenerator::visitClampIToUint8(LClampIToUint8* lir) {
13285   Register output = ToRegister(lir->output());
13286   MOZ_ASSERT(output == ToRegister(lir->input()));
13287   masm.clampIntToUint8(output);
13288 }
13289 
visitClampDToUint8(LClampDToUint8 * lir)13290 void CodeGenerator::visitClampDToUint8(LClampDToUint8* lir) {
13291   FloatRegister input = ToFloatRegister(lir->input());
13292   Register output = ToRegister(lir->output());
13293   masm.clampDoubleToUint8(input, output);
13294 }
13295 
visitClampVToUint8(LClampVToUint8 * lir)13296 void CodeGenerator::visitClampVToUint8(LClampVToUint8* lir) {
13297   ValueOperand operand = ToValue(lir, LClampVToUint8::Input);
13298   FloatRegister tempFloat = ToFloatRegister(lir->tempFloat());
13299   Register output = ToRegister(lir->output());
13300 
13301   using Fn = bool (*)(JSContext*, JSString*, double*);
13302   OutOfLineCode* oolString = oolCallVM<Fn, StringToNumber>(
13303       lir, ArgList(output), StoreFloatRegisterTo(tempFloat));
13304   Label* stringEntry = oolString->entry();
13305   Label* stringRejoin = oolString->rejoin();
13306 
13307   Label fails;
13308   masm.clampValueToUint8(operand, stringEntry, stringRejoin, output, tempFloat,
13309                          output, &fails);
13310 
13311   bailoutFrom(&fails, lir->snapshot());
13312 }
13313 
visitInCache(LInCache * ins)13314 void CodeGenerator::visitInCache(LInCache* ins) {
13315   LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
13316 
13317   ConstantOrRegister key =
13318       toConstantOrRegister(ins, LInCache::LHS, ins->mir()->key()->type());
13319   Register object = ToRegister(ins->rhs());
13320   Register output = ToRegister(ins->output());
13321   Register temp = ToRegister(ins->temp());
13322 
13323   IonInIC cache(liveRegs, key, object, output, temp);
13324   addIC(ins, allocateIC(cache));
13325 }
13326 
visitInArray(LInArray * lir)13327 void CodeGenerator::visitInArray(LInArray* lir) {
13328   const MInArray* mir = lir->mir();
13329   Register elements = ToRegister(lir->elements());
13330   Register initLength = ToRegister(lir->initLength());
13331   Register output = ToRegister(lir->output());
13332 
13333   Label falseBranch, done, trueBranch;
13334 
13335   if (lir->index()->isConstant()) {
13336     int32_t index = ToInt32(lir->index());
13337 
13338     if (index < 0) {
13339       MOZ_ASSERT(mir->needsNegativeIntCheck());
13340       bailout(lir->snapshot());
13341       return;
13342     }
13343 
13344     masm.branch32(Assembler::BelowOrEqual, initLength, Imm32(index),
13345                   &falseBranch);
13346 
13347     NativeObject::elementsSizeMustNotOverflow();
13348     Address address = Address(elements, index * sizeof(Value));
13349     masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
13350   } else {
13351     Register index = ToRegister(lir->index());
13352 
13353     Label negativeIntCheck;
13354     Label* failedInitLength = &falseBranch;
13355     if (mir->needsNegativeIntCheck()) {
13356       failedInitLength = &negativeIntCheck;
13357     }
13358 
13359     masm.branch32(Assembler::BelowOrEqual, initLength, index, failedInitLength);
13360 
13361     BaseObjectElementIndex address(elements, index);
13362     masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
13363 
13364     if (mir->needsNegativeIntCheck()) {
13365       masm.jump(&trueBranch);
13366       masm.bind(&negativeIntCheck);
13367 
13368       bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
13369 
13370       masm.jump(&falseBranch);
13371     }
13372   }
13373 
13374   masm.bind(&trueBranch);
13375   masm.move32(Imm32(1), output);
13376   masm.jump(&done);
13377 
13378   masm.bind(&falseBranch);
13379   masm.move32(Imm32(0), output);
13380   masm.bind(&done);
13381 }
13382 
visitGuardElementNotHole(LGuardElementNotHole * lir)13383 void CodeGenerator::visitGuardElementNotHole(LGuardElementNotHole* lir) {
13384   Register elements = ToRegister(lir->elements());
13385   const LAllocation* index = lir->index();
13386 
13387   Label testMagic;
13388   if (index->isConstant()) {
13389     Address address(elements, ToInt32(index) * sizeof(js::Value));
13390     masm.branchTestMagic(Assembler::Equal, address, &testMagic);
13391   } else {
13392     BaseObjectElementIndex address(elements, ToRegister(index));
13393     masm.branchTestMagic(Assembler::Equal, address, &testMagic);
13394   }
13395   bailoutFrom(&testMagic, lir->snapshot());
13396 }
13397 
visitInstanceOfO(LInstanceOfO * ins)13398 void CodeGenerator::visitInstanceOfO(LInstanceOfO* ins) {
13399   emitInstanceOf(ins, ins->rhs());
13400 }
13401 
visitInstanceOfV(LInstanceOfV * ins)13402 void CodeGenerator::visitInstanceOfV(LInstanceOfV* ins) {
13403   emitInstanceOf(ins, ins->rhs());
13404 }
13405 
emitInstanceOf(LInstruction * ins,const LAllocation * prototypeObject)13406 void CodeGenerator::emitInstanceOf(LInstruction* ins,
13407                                    const LAllocation* prototypeObject) {
13408   // This path implements fun_hasInstance when the function's prototype is
13409   // known to be prototypeObject.
13410 
13411   Label done;
13412   Register output = ToRegister(ins->getDef(0));
13413 
13414   // If the lhs is a primitive, the result is false.
13415   Register objReg;
13416   if (ins->isInstanceOfV()) {
13417     Label isObject;
13418     ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LHS);
13419     masm.branchTestObject(Assembler::Equal, lhsValue, &isObject);
13420     masm.mov(ImmWord(0), output);
13421     masm.jump(&done);
13422     masm.bind(&isObject);
13423     objReg = masm.extractObject(lhsValue, output);
13424   } else {
13425     objReg = ToRegister(ins->toInstanceOfO()->lhs());
13426   }
13427 
13428   mozilla::Maybe<ImmGCPtr> protoPtr;
13429   mozilla::Maybe<Register> protoReg;
13430   if (prototypeObject->isConstant()) {
13431     protoPtr.emplace(&prototypeObject->toConstant()->toObject());
13432   } else {
13433     protoReg.emplace(ToRegister(prototypeObject));
13434   }
13435 
13436   // Crawl the lhs's prototype chain in a loop to search for prototypeObject.
13437   // This follows the main loop of js::IsPrototypeOf, though additionally breaks
13438   // out of the loop on Proxy::LazyProto.
13439 
13440   // Load the lhs's prototype.
13441   masm.loadObjProto(objReg, output);
13442 
13443   Label testLazy;
13444   {
13445     Label loopPrototypeChain;
13446     masm.bind(&loopPrototypeChain);
13447 
13448     // Test for the target prototype object.
13449     Label notPrototypeObject;
13450     if (protoPtr) {
13451       masm.branchPtr(Assembler::NotEqual, output, *protoPtr,
13452                      &notPrototypeObject);
13453     } else {
13454       masm.branchPtr(Assembler::NotEqual, output, *protoReg,
13455                      &notPrototypeObject);
13456     }
13457     masm.mov(ImmWord(1), output);
13458     masm.jump(&done);
13459     masm.bind(&notPrototypeObject);
13460 
13461     MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
13462 
13463     // Test for nullptr or Proxy::LazyProto
13464     masm.branchPtr(Assembler::BelowOrEqual, output, ImmWord(1), &testLazy);
13465 
13466     // Load the current object's prototype.
13467     masm.loadObjProto(output, output);
13468 
13469     masm.jump(&loopPrototypeChain);
13470   }
13471 
13472   // Make a VM call if an object with a lazy proto was found on the prototype
13473   // chain. This currently occurs only for cross compartment wrappers, which
13474   // we do not expect to be compared with non-wrapper functions from this
13475   // compartment. Otherwise, we stopped on a nullptr prototype and the output
13476   // register is already correct.
13477 
13478   using Fn = bool (*)(JSContext*, HandleObject, JSObject*, bool*);
13479   OutOfLineCode* ool;
13480   if (protoPtr) {
13481     ool = oolCallVM<Fn, IsPrototypeOf>(ins, ArgList(*protoPtr, objReg),
13482                                        StoreRegisterTo(output));
13483   } else {
13484     ool = oolCallVM<Fn, IsPrototypeOf>(ins, ArgList(*protoReg, objReg),
13485                                        StoreRegisterTo(output));
13486   }
13487 
13488   // Regenerate the original lhs object for the VM call.
13489   Label regenerate, *lazyEntry;
13490   if (objReg != output) {
13491     lazyEntry = ool->entry();
13492   } else {
13493     masm.bind(&regenerate);
13494     lazyEntry = &regenerate;
13495     if (ins->isInstanceOfV()) {
13496       ValueOperand lhsValue = ToValue(ins, LInstanceOfV::LHS);
13497       objReg = masm.extractObject(lhsValue, output);
13498     } else {
13499       objReg = ToRegister(ins->toInstanceOfO()->lhs());
13500     }
13501     MOZ_ASSERT(objReg == output);
13502     masm.jump(ool->entry());
13503   }
13504 
13505   masm.bind(&testLazy);
13506   masm.branchPtr(Assembler::Equal, output, ImmWord(1), lazyEntry);
13507 
13508   masm.bind(&done);
13509   masm.bind(ool->rejoin());
13510 }
13511 
visitInstanceOfCache(LInstanceOfCache * ins)13512 void CodeGenerator::visitInstanceOfCache(LInstanceOfCache* ins) {
13513   // The Lowering ensures that RHS is an object, and that LHS is a value.
13514   LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
13515   TypedOrValueRegister lhs =
13516       TypedOrValueRegister(ToValue(ins, LInstanceOfCache::LHS));
13517   Register rhs = ToRegister(ins->rhs());
13518   Register output = ToRegister(ins->output());
13519 
13520   IonInstanceOfIC ic(liveRegs, lhs, rhs, output);
13521   addIC(ins, allocateIC(ic));
13522 }
13523 
visitGetDOMProperty(LGetDOMProperty * ins)13524 void CodeGenerator::visitGetDOMProperty(LGetDOMProperty* ins) {
13525   const Register JSContextReg = ToRegister(ins->getJSContextReg());
13526   const Register ObjectReg = ToRegister(ins->getObjectReg());
13527   const Register PrivateReg = ToRegister(ins->getPrivReg());
13528   const Register ValueReg = ToRegister(ins->getValueReg());
13529 
13530   Label haveValue;
13531   if (ins->mir()->valueMayBeInSlot()) {
13532     size_t slot = ins->mir()->domMemberSlotIndex();
13533     // It's a bit annoying to redo these slot calculations, which duplcate
13534     // LSlots and a few other things like that, but I'm not sure there's a
13535     // way to reuse those here.
13536     //
13537     // If this ever gets fixed to work with proxies (by not assuming that
13538     // reserved slot indices, which is what domMemberSlotIndex() returns,
13539     // match fixed slot indices), we can reenable MGetDOMProperty for
13540     // proxies in IonBuilder.
13541     if (slot < NativeObject::MAX_FIXED_SLOTS) {
13542       masm.loadValue(Address(ObjectReg, NativeObject::getFixedSlotOffset(slot)),
13543                      JSReturnOperand);
13544     } else {
13545       // It's a dynamic slot.
13546       slot -= NativeObject::MAX_FIXED_SLOTS;
13547       // Use PrivateReg as a scratch register for the slots pointer.
13548       masm.loadPtr(Address(ObjectReg, NativeObject::offsetOfSlots()),
13549                    PrivateReg);
13550       masm.loadValue(Address(PrivateReg, slot * sizeof(js::Value)),
13551                      JSReturnOperand);
13552     }
13553     masm.branchTestUndefined(Assembler::NotEqual, JSReturnOperand, &haveValue);
13554   }
13555 
13556   DebugOnly<uint32_t> initialStack = masm.framePushed();
13557 
13558   masm.checkStackAlignment();
13559 
13560   // Make space for the outparam.  Pre-initialize it to UndefinedValue so we
13561   // can trace it at GC time.
13562   masm.Push(UndefinedValue());
13563   // We pass the pointer to our out param as an instance of
13564   // JSJitGetterCallArgs, since on the binary level it's the same thing.
13565   static_assert(sizeof(JSJitGetterCallArgs) == sizeof(Value*));
13566   masm.moveStackPtrTo(ValueReg);
13567 
13568   masm.Push(ObjectReg);
13569 
13570   LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
13571 
13572   // Rooting will happen at GC time.
13573   masm.moveStackPtrTo(ObjectReg);
13574 
13575   Realm* getterRealm = ins->mir()->getterRealm();
13576   if (gen->realm->realmPtr() != getterRealm) {
13577     // We use JSContextReg as scratch register here.
13578     masm.switchToRealm(getterRealm, JSContextReg);
13579   }
13580 
13581   uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
13582   masm.loadJSContext(JSContextReg);
13583   masm.enterFakeExitFrame(JSContextReg, JSContextReg,
13584                           ExitFrameType::IonDOMGetter);
13585 
13586   markSafepointAt(safepointOffset, ins);
13587 
13588   masm.setupUnalignedABICall(JSContextReg);
13589   masm.loadJSContext(JSContextReg);
13590   masm.passABIArg(JSContextReg);
13591   masm.passABIArg(ObjectReg);
13592   masm.passABIArg(PrivateReg);
13593   masm.passABIArg(ValueReg);
13594   masm.callWithABI(DynamicFunction<JSJitGetterOp>(ins->mir()->fun()),
13595                    MoveOp::GENERAL,
13596                    CheckUnsafeCallWithABI::DontCheckHasExitFrame);
13597 
13598   if (ins->mir()->isInfallible()) {
13599     masm.loadValue(Address(masm.getStackPointer(),
13600                            IonDOMExitFrameLayout::offsetOfResult()),
13601                    JSReturnOperand);
13602   } else {
13603     masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
13604 
13605     masm.loadValue(Address(masm.getStackPointer(),
13606                            IonDOMExitFrameLayout::offsetOfResult()),
13607                    JSReturnOperand);
13608   }
13609 
13610   // Switch back to the current realm if needed. Note: if the getter threw an
13611   // exception, the exception handler will do this.
13612   if (gen->realm->realmPtr() != getterRealm) {
13613     static_assert(!JSReturnOperand.aliases(ReturnReg),
13614                   "Clobbering ReturnReg should not affect the return value");
13615     masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
13616   }
13617 
13618   // Until C++ code is instrumented against Spectre, prevent speculative
13619   // execution from returning any private data.
13620   if (JitOptions.spectreJitToCxxCalls && ins->mir()->hasLiveDefUses()) {
13621     masm.speculationBarrier();
13622   }
13623 
13624   masm.adjustStack(IonDOMExitFrameLayout::Size());
13625 
13626   masm.bind(&haveValue);
13627 
13628   MOZ_ASSERT(masm.framePushed() == initialStack);
13629 }
13630 
visitGetDOMMemberV(LGetDOMMemberV * ins)13631 void CodeGenerator::visitGetDOMMemberV(LGetDOMMemberV* ins) {
13632   // It's simpler to duplicate visitLoadFixedSlotV here than it is to try to
13633   // use an LLoadFixedSlotV or some subclass of it for this case: that would
13634   // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
13635   // we'd have to duplicate a bunch of stuff we now get for free from
13636   // MGetDOMProperty.
13637   //
13638   // If this ever gets fixed to work with proxies (by not assuming that
13639   // reserved slot indices, which is what domMemberSlotIndex() returns,
13640   // match fixed slot indices), we can reenable MGetDOMMember for
13641   // proxies in IonBuilder.
13642   Register object = ToRegister(ins->object());
13643   size_t slot = ins->mir()->domMemberSlotIndex();
13644   ValueOperand result = ToOutValue(ins);
13645 
13646   masm.loadValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
13647                  result);
13648 }
13649 
visitGetDOMMemberT(LGetDOMMemberT * ins)13650 void CodeGenerator::visitGetDOMMemberT(LGetDOMMemberT* ins) {
13651   // It's simpler to duplicate visitLoadFixedSlotT here than it is to try to
13652   // use an LLoadFixedSlotT or some subclass of it for this case: that would
13653   // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
13654   // we'd have to duplicate a bunch of stuff we now get for free from
13655   // MGetDOMProperty.
13656   //
13657   // If this ever gets fixed to work with proxies (by not assuming that
13658   // reserved slot indices, which is what domMemberSlotIndex() returns,
13659   // match fixed slot indices), we can reenable MGetDOMMember for
13660   // proxies in IonBuilder.
13661   Register object = ToRegister(ins->object());
13662   size_t slot = ins->mir()->domMemberSlotIndex();
13663   AnyRegister result = ToAnyRegister(ins->getDef(0));
13664   MIRType type = ins->mir()->type();
13665 
13666   masm.loadUnboxedValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
13667                         type, result);
13668 }
13669 
visitSetDOMProperty(LSetDOMProperty * ins)13670 void CodeGenerator::visitSetDOMProperty(LSetDOMProperty* ins) {
13671   const Register JSContextReg = ToRegister(ins->getJSContextReg());
13672   const Register ObjectReg = ToRegister(ins->getObjectReg());
13673   const Register PrivateReg = ToRegister(ins->getPrivReg());
13674   const Register ValueReg = ToRegister(ins->getValueReg());
13675 
13676   DebugOnly<uint32_t> initialStack = masm.framePushed();
13677 
13678   masm.checkStackAlignment();
13679 
13680   // Push the argument. Rooting will happen at GC time.
13681   ValueOperand argVal = ToValue(ins, LSetDOMProperty::Value);
13682   masm.Push(argVal);
13683   // We pass the pointer to our out param as an instance of
13684   // JSJitGetterCallArgs, since on the binary level it's the same thing.
13685   static_assert(sizeof(JSJitSetterCallArgs) == sizeof(Value*));
13686   masm.moveStackPtrTo(ValueReg);
13687 
13688   masm.Push(ObjectReg);
13689 
13690   LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
13691 
13692   // Rooting will happen at GC time.
13693   masm.moveStackPtrTo(ObjectReg);
13694 
13695   Realm* setterRealm = ins->mir()->setterRealm();
13696   if (gen->realm->realmPtr() != setterRealm) {
13697     // We use JSContextReg as scratch register here.
13698     masm.switchToRealm(setterRealm, JSContextReg);
13699   }
13700 
13701   uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
13702   masm.loadJSContext(JSContextReg);
13703   masm.enterFakeExitFrame(JSContextReg, JSContextReg,
13704                           ExitFrameType::IonDOMSetter);
13705 
13706   markSafepointAt(safepointOffset, ins);
13707 
13708   masm.setupUnalignedABICall(JSContextReg);
13709   masm.loadJSContext(JSContextReg);
13710   masm.passABIArg(JSContextReg);
13711   masm.passABIArg(ObjectReg);
13712   masm.passABIArg(PrivateReg);
13713   masm.passABIArg(ValueReg);
13714   masm.callWithABI(DynamicFunction<JSJitSetterOp>(ins->mir()->fun()),
13715                    MoveOp::GENERAL,
13716                    CheckUnsafeCallWithABI::DontCheckHasExitFrame);
13717 
13718   masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
13719 
13720   // Switch back to the current realm if needed. Note: if the setter threw an
13721   // exception, the exception handler will do this.
13722   if (gen->realm->realmPtr() != setterRealm) {
13723     masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
13724   }
13725 
13726   masm.adjustStack(IonDOMExitFrameLayout::Size());
13727 
13728   MOZ_ASSERT(masm.framePushed() == initialStack);
13729 }
13730 
visitLoadDOMExpandoValue(LLoadDOMExpandoValue * ins)13731 void CodeGenerator::visitLoadDOMExpandoValue(LLoadDOMExpandoValue* ins) {
13732   Register proxy = ToRegister(ins->proxy());
13733   ValueOperand out = ToOutValue(ins);
13734 
13735   masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
13736                out.scratchReg());
13737   masm.loadValue(Address(out.scratchReg(),
13738                          js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
13739                  out);
13740 }
13741 
visitLoadDOMExpandoValueGuardGeneration(LLoadDOMExpandoValueGuardGeneration * ins)13742 void CodeGenerator::visitLoadDOMExpandoValueGuardGeneration(
13743     LLoadDOMExpandoValueGuardGeneration* ins) {
13744   Register proxy = ToRegister(ins->proxy());
13745   ValueOperand out = ToOutValue(ins);
13746 
13747   Label bail;
13748   masm.loadDOMExpandoValueGuardGeneration(proxy, out,
13749                                           ins->mir()->expandoAndGeneration(),
13750                                           ins->mir()->generation(), &bail);
13751   bailoutFrom(&bail, ins->snapshot());
13752 }
13753 
visitLoadDOMExpandoValueIgnoreGeneration(LLoadDOMExpandoValueIgnoreGeneration * ins)13754 void CodeGenerator::visitLoadDOMExpandoValueIgnoreGeneration(
13755     LLoadDOMExpandoValueIgnoreGeneration* ins) {
13756   Register proxy = ToRegister(ins->proxy());
13757   ValueOperand out = ToOutValue(ins);
13758 
13759   masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
13760                out.scratchReg());
13761 
13762   // Load the ExpandoAndGeneration* from the PrivateValue.
13763   masm.loadPrivate(
13764       Address(out.scratchReg(),
13765               js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
13766       out.scratchReg());
13767 
13768   // Load expandoAndGeneration->expando into the output Value register.
13769   masm.loadValue(
13770       Address(out.scratchReg(), ExpandoAndGeneration::offsetOfExpando()), out);
13771 }
13772 
visitGuardDOMExpandoMissingOrGuardShape(LGuardDOMExpandoMissingOrGuardShape * ins)13773 void CodeGenerator::visitGuardDOMExpandoMissingOrGuardShape(
13774     LGuardDOMExpandoMissingOrGuardShape* ins) {
13775   Register temp = ToRegister(ins->temp());
13776   ValueOperand input = ToValue(ins, LGuardDOMExpandoMissingOrGuardShape::Input);
13777 
13778   Label done;
13779   masm.branchTestUndefined(Assembler::Equal, input, &done);
13780 
13781   masm.debugAssertIsObject(input);
13782   masm.unboxObject(input, temp);
13783   // The expando object is not used in this case, so we don't need Spectre
13784   // mitigations.
13785   Label bail;
13786   masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, temp,
13787                                               ins->mir()->shape(), &bail);
13788   bailoutFrom(&bail, ins->snapshot());
13789 
13790   masm.bind(&done);
13791 }
13792 
13793 class OutOfLineIsCallable : public OutOfLineCodeBase<CodeGenerator> {
13794   Register object_;
13795   Register output_;
13796 
13797  public:
OutOfLineIsCallable(Register object,Register output)13798   OutOfLineIsCallable(Register object, Register output)
13799       : object_(object), output_(output) {}
13800 
accept(CodeGenerator * codegen)13801   void accept(CodeGenerator* codegen) override {
13802     codegen->visitOutOfLineIsCallable(this);
13803   }
object() const13804   Register object() const { return object_; }
output() const13805   Register output() const { return output_; }
13806 };
13807 
visitIsCallableO(LIsCallableO * ins)13808 void CodeGenerator::visitIsCallableO(LIsCallableO* ins) {
13809   Register object = ToRegister(ins->object());
13810   Register output = ToRegister(ins->output());
13811 
13812   OutOfLineIsCallable* ool = new (alloc()) OutOfLineIsCallable(object, output);
13813   addOutOfLineCode(ool, ins->mir());
13814 
13815   masm.isCallable(object, output, ool->entry());
13816 
13817   masm.bind(ool->rejoin());
13818 }
13819 
visitIsCallableV(LIsCallableV * ins)13820 void CodeGenerator::visitIsCallableV(LIsCallableV* ins) {
13821   ValueOperand val = ToValue(ins, LIsCallableV::Value);
13822   Register output = ToRegister(ins->output());
13823   Register temp = ToRegister(ins->temp());
13824 
13825   Label notObject;
13826   masm.fallibleUnboxObject(val, temp, &notObject);
13827 
13828   OutOfLineIsCallable* ool = new (alloc()) OutOfLineIsCallable(temp, output);
13829   addOutOfLineCode(ool, ins->mir());
13830 
13831   masm.isCallable(temp, output, ool->entry());
13832   masm.jump(ool->rejoin());
13833 
13834   masm.bind(&notObject);
13835   masm.move32(Imm32(0), output);
13836 
13837   masm.bind(ool->rejoin());
13838 }
13839 
visitOutOfLineIsCallable(OutOfLineIsCallable * ool)13840 void CodeGenerator::visitOutOfLineIsCallable(OutOfLineIsCallable* ool) {
13841   Register object = ool->object();
13842   Register output = ool->output();
13843 
13844   saveVolatile(output);
13845   using Fn = bool (*)(JSObject * obj);
13846   masm.setupUnalignedABICall(output);
13847   masm.passABIArg(object);
13848   masm.callWithABI<Fn, ObjectIsCallable>();
13849   masm.storeCallBoolResult(output);
13850   restoreVolatile(output);
13851   masm.jump(ool->rejoin());
13852 }
13853 
13854 class OutOfLineIsConstructor : public OutOfLineCodeBase<CodeGenerator> {
13855   LIsConstructor* ins_;
13856 
13857  public:
OutOfLineIsConstructor(LIsConstructor * ins)13858   explicit OutOfLineIsConstructor(LIsConstructor* ins) : ins_(ins) {}
13859 
accept(CodeGenerator * codegen)13860   void accept(CodeGenerator* codegen) override {
13861     codegen->visitOutOfLineIsConstructor(this);
13862   }
ins() const13863   LIsConstructor* ins() const { return ins_; }
13864 };
13865 
visitIsConstructor(LIsConstructor * ins)13866 void CodeGenerator::visitIsConstructor(LIsConstructor* ins) {
13867   Register object = ToRegister(ins->object());
13868   Register output = ToRegister(ins->output());
13869 
13870   OutOfLineIsConstructor* ool = new (alloc()) OutOfLineIsConstructor(ins);
13871   addOutOfLineCode(ool, ins->mir());
13872 
13873   masm.isConstructor(object, output, ool->entry());
13874 
13875   masm.bind(ool->rejoin());
13876 }
13877 
visitOutOfLineIsConstructor(OutOfLineIsConstructor * ool)13878 void CodeGenerator::visitOutOfLineIsConstructor(OutOfLineIsConstructor* ool) {
13879   LIsConstructor* ins = ool->ins();
13880   Register object = ToRegister(ins->object());
13881   Register output = ToRegister(ins->output());
13882 
13883   saveVolatile(output);
13884   using Fn = bool (*)(JSObject * obj);
13885   masm.setupUnalignedABICall(output);
13886   masm.passABIArg(object);
13887   masm.callWithABI<Fn, ObjectIsConstructor>();
13888   masm.storeCallBoolResult(output);
13889   restoreVolatile(output);
13890   masm.jump(ool->rejoin());
13891 }
13892 
visitIsCrossRealmArrayConstructor(LIsCrossRealmArrayConstructor * ins)13893 void CodeGenerator::visitIsCrossRealmArrayConstructor(
13894     LIsCrossRealmArrayConstructor* ins) {
13895   Register object = ToRegister(ins->object());
13896   Register output = ToRegister(ins->output());
13897 
13898   masm.setIsCrossRealmArrayConstructor(object, output);
13899 }
13900 
EmitObjectIsArray(MacroAssembler & masm,OutOfLineCode * ool,Register obj,Register output,Label * notArray=nullptr)13901 static void EmitObjectIsArray(MacroAssembler& masm, OutOfLineCode* ool,
13902                               Register obj, Register output,
13903                               Label* notArray = nullptr) {
13904   masm.loadObjClassUnsafe(obj, output);
13905 
13906   Label isArray;
13907   masm.branchPtr(Assembler::Equal, output, ImmPtr(&ArrayObject::class_),
13908                  &isArray);
13909 
13910   // Branch to OOL path if it's a proxy.
13911   masm.branchTestClassIsProxy(true, output, ool->entry());
13912 
13913   if (notArray) {
13914     masm.bind(notArray);
13915   }
13916   masm.move32(Imm32(0), output);
13917   masm.jump(ool->rejoin());
13918 
13919   masm.bind(&isArray);
13920   masm.move32(Imm32(1), output);
13921 
13922   masm.bind(ool->rejoin());
13923 }
13924 
visitIsArrayO(LIsArrayO * lir)13925 void CodeGenerator::visitIsArrayO(LIsArrayO* lir) {
13926   Register object = ToRegister(lir->object());
13927   Register output = ToRegister(lir->output());
13928 
13929   using Fn = bool (*)(JSContext*, HandleObject, bool*);
13930   OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
13931       lir, ArgList(object), StoreRegisterTo(output));
13932   EmitObjectIsArray(masm, ool, object, output);
13933 }
13934 
visitIsArrayV(LIsArrayV * lir)13935 void CodeGenerator::visitIsArrayV(LIsArrayV* lir) {
13936   ValueOperand val = ToValue(lir, LIsArrayV::Value);
13937   Register output = ToRegister(lir->output());
13938   Register temp = ToRegister(lir->temp());
13939 
13940   Label notArray;
13941   masm.fallibleUnboxObject(val, temp, &notArray);
13942 
13943   using Fn = bool (*)(JSContext*, HandleObject, bool*);
13944   OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
13945       lir, ArgList(temp), StoreRegisterTo(output));
13946   EmitObjectIsArray(masm, ool, temp, output, &notArray);
13947 }
13948 
visitIsTypedArray(LIsTypedArray * lir)13949 void CodeGenerator::visitIsTypedArray(LIsTypedArray* lir) {
13950   Register object = ToRegister(lir->object());
13951   Register output = ToRegister(lir->output());
13952 
13953   OutOfLineCode* ool = nullptr;
13954   if (lir->mir()->isPossiblyWrapped()) {
13955     using Fn = bool (*)(JSContext*, JSObject*, bool*);
13956     ool = oolCallVM<Fn, jit::IsPossiblyWrappedTypedArray>(
13957         lir, ArgList(object), StoreRegisterTo(output));
13958   }
13959 
13960   Label notTypedArray;
13961   Label done;
13962 
13963   masm.loadObjClassUnsafe(object, output);
13964   masm.branchIfClassIsNotTypedArray(output, &notTypedArray);
13965 
13966   masm.move32(Imm32(1), output);
13967   masm.jump(&done);
13968   masm.bind(&notTypedArray);
13969   if (ool) {
13970     masm.branchTestClassIsProxy(true, output, ool->entry());
13971   }
13972   masm.move32(Imm32(0), output);
13973   masm.bind(&done);
13974   if (ool) {
13975     masm.bind(ool->rejoin());
13976   }
13977 }
13978 
visitIsObject(LIsObject * ins)13979 void CodeGenerator::visitIsObject(LIsObject* ins) {
13980   Register output = ToRegister(ins->output());
13981   ValueOperand value = ToValue(ins, LIsObject::Input);
13982   masm.testObjectSet(Assembler::Equal, value, output);
13983 }
13984 
visitIsObjectAndBranch(LIsObjectAndBranch * ins)13985 void CodeGenerator::visitIsObjectAndBranch(LIsObjectAndBranch* ins) {
13986   ValueOperand value = ToValue(ins, LIsObjectAndBranch::Input);
13987   testObjectEmitBranch(Assembler::Equal, value, ins->ifTrue(), ins->ifFalse());
13988 }
13989 
visitIsNullOrUndefined(LIsNullOrUndefined * ins)13990 void CodeGenerator::visitIsNullOrUndefined(LIsNullOrUndefined* ins) {
13991   Register output = ToRegister(ins->output());
13992   ValueOperand value = ToValue(ins, LIsNullOrUndefined::Input);
13993 
13994   Label isNotNull, done;
13995   masm.branchTestNull(Assembler::NotEqual, value, &isNotNull);
13996 
13997   masm.move32(Imm32(1), output);
13998   masm.jump(&done);
13999 
14000   masm.bind(&isNotNull);
14001   masm.testUndefinedSet(Assembler::Equal, value, output);
14002 
14003   masm.bind(&done);
14004 }
14005 
visitIsNullOrUndefinedAndBranch(LIsNullOrUndefinedAndBranch * ins)14006 void CodeGenerator::visitIsNullOrUndefinedAndBranch(
14007     LIsNullOrUndefinedAndBranch* ins) {
14008   Label* ifTrue = getJumpLabelForBranch(ins->ifTrue());
14009   Label* ifFalse = getJumpLabelForBranch(ins->ifFalse());
14010   ValueOperand value = ToValue(ins, LIsNullOrUndefinedAndBranch::Input);
14011 
14012   ScratchTagScope tag(masm, value);
14013   masm.splitTagForTest(value, tag);
14014 
14015   masm.branchTestNull(Assembler::Equal, tag, ifTrue);
14016   masm.branchTestUndefined(Assembler::Equal, tag, ifTrue);
14017 
14018   if (!isNextBlock(ins->ifFalse()->lir())) {
14019     masm.jump(ifFalse);
14020   }
14021 }
14022 
loadOutermostJSScript(Register reg)14023 void CodeGenerator::loadOutermostJSScript(Register reg) {
14024   // The "outermost" JSScript means the script that we are compiling
14025   // basically; this is not always the script associated with the
14026   // current basic block, which might be an inlined script.
14027 
14028   MIRGraph& graph = current->mir()->graph();
14029   MBasicBlock* entryBlock = graph.entryBlock();
14030   masm.movePtr(ImmGCPtr(entryBlock->info().script()), reg);
14031 }
14032 
loadJSScriptForBlock(MBasicBlock * block,Register reg)14033 void CodeGenerator::loadJSScriptForBlock(MBasicBlock* block, Register reg) {
14034   // The current JSScript means the script for the current
14035   // basic block. This may be an inlined script.
14036 
14037   JSScript* script = block->info().script();
14038   masm.movePtr(ImmGCPtr(script), reg);
14039 }
14040 
visitHasClass(LHasClass * ins)14041 void CodeGenerator::visitHasClass(LHasClass* ins) {
14042   Register lhs = ToRegister(ins->lhs());
14043   Register output = ToRegister(ins->output());
14044 
14045   masm.loadObjClassUnsafe(lhs, output);
14046   masm.cmpPtrSet(Assembler::Equal, output, ImmPtr(ins->mir()->getClass()),
14047                  output);
14048 }
14049 
visitGuardToClass(LGuardToClass * ins)14050 void CodeGenerator::visitGuardToClass(LGuardToClass* ins) {
14051   Register lhs = ToRegister(ins->lhs());
14052   Register temp = ToRegister(ins->temp());
14053 
14054   // branchTestObjClass may zero the object register on speculative paths
14055   // (we should have a defineReuseInput allocation in this case).
14056   Register spectreRegToZero = lhs;
14057 
14058   Label notEqual;
14059 
14060   masm.branchTestObjClass(Assembler::NotEqual, lhs, ins->mir()->getClass(),
14061                           temp, spectreRegToZero, &notEqual);
14062 
14063   // Can't return null-return here, so bail.
14064   bailoutFrom(&notEqual, ins->snapshot());
14065 }
14066 
visitObjectClassToString(LObjectClassToString * lir)14067 void CodeGenerator::visitObjectClassToString(LObjectClassToString* lir) {
14068   Register obj = ToRegister(lir->object());
14069   Register temp = ToRegister(lir->temp());
14070 
14071   using Fn = JSString* (*)(JSContext*, JSObject*);
14072   masm.setupUnalignedABICall(temp);
14073   masm.loadJSContext(temp);
14074   masm.passABIArg(temp);
14075   masm.passABIArg(obj);
14076   masm.callWithABI<Fn, js::ObjectClassToString>();
14077 
14078   bailoutCmpPtr(Assembler::Equal, ReturnReg, ImmWord(0), lir->snapshot());
14079 }
14080 
visitWasmParameter(LWasmParameter * lir)14081 void CodeGenerator::visitWasmParameter(LWasmParameter* lir) {}
14082 
visitWasmParameterI64(LWasmParameterI64 * lir)14083 void CodeGenerator::visitWasmParameterI64(LWasmParameterI64* lir) {}
14084 
visitWasmReturn(LWasmReturn * lir)14085 void CodeGenerator::visitWasmReturn(LWasmReturn* lir) {
14086   // Don't emit a jump to the return label if this is the last block.
14087   if (current->mir() != *gen->graph().poBegin()) {
14088     masm.jump(&returnLabel_);
14089   }
14090 }
14091 
visitWasmReturnI64(LWasmReturnI64 * lir)14092 void CodeGenerator::visitWasmReturnI64(LWasmReturnI64* lir) {
14093   // Don't emit a jump to the return label if this is the last block.
14094   if (current->mir() != *gen->graph().poBegin()) {
14095     masm.jump(&returnLabel_);
14096   }
14097 }
14098 
visitWasmReturnVoid(LWasmReturnVoid * lir)14099 void CodeGenerator::visitWasmReturnVoid(LWasmReturnVoid* lir) {
14100   // Don't emit a jump to the return label if this is the last block.
14101   if (current->mir() != *gen->graph().poBegin()) {
14102     masm.jump(&returnLabel_);
14103   }
14104 }
14105 
emitAssertRangeI(MIRType type,const Range * r,Register input)14106 void CodeGenerator::emitAssertRangeI(MIRType type, const Range* r,
14107                                      Register input) {
14108   // Check the lower bound.
14109   if (r->hasInt32LowerBound() && r->lower() > INT32_MIN) {
14110     Label success;
14111     if (type == MIRType::Int32 || type == MIRType::Boolean) {
14112       masm.branch32(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
14113                     &success);
14114     } else {
14115       MOZ_ASSERT(type == MIRType::IntPtr);
14116       masm.branchPtr(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
14117                      &success);
14118     }
14119     masm.assumeUnreachable(
14120         "Integer input should be equal or higher than Lowerbound.");
14121     masm.bind(&success);
14122   }
14123 
14124   // Check the upper bound.
14125   if (r->hasInt32UpperBound() && r->upper() < INT32_MAX) {
14126     Label success;
14127     if (type == MIRType::Int32 || type == MIRType::Boolean) {
14128       masm.branch32(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
14129                     &success);
14130     } else {
14131       MOZ_ASSERT(type == MIRType::IntPtr);
14132       masm.branchPtr(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
14133                      &success);
14134     }
14135     masm.assumeUnreachable(
14136         "Integer input should be lower or equal than Upperbound.");
14137     masm.bind(&success);
14138   }
14139 
14140   // For r->canHaveFractionalPart(), r->canBeNegativeZero(), and
14141   // r->exponent(), there's nothing to check, because if we ended up in the
14142   // integer range checking code, the value is already in an integer register
14143   // in the integer range.
14144 }
14145 
emitAssertRangeD(const Range * r,FloatRegister input,FloatRegister temp)14146 void CodeGenerator::emitAssertRangeD(const Range* r, FloatRegister input,
14147                                      FloatRegister temp) {
14148   // Check the lower bound.
14149   if (r->hasInt32LowerBound()) {
14150     Label success;
14151     masm.loadConstantDouble(r->lower(), temp);
14152     if (r->canBeNaN()) {
14153       masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
14154     }
14155     masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
14156                       &success);
14157     masm.assumeUnreachable(
14158         "Double input should be equal or higher than Lowerbound.");
14159     masm.bind(&success);
14160   }
14161   // Check the upper bound.
14162   if (r->hasInt32UpperBound()) {
14163     Label success;
14164     masm.loadConstantDouble(r->upper(), temp);
14165     if (r->canBeNaN()) {
14166       masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
14167     }
14168     masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp, &success);
14169     masm.assumeUnreachable(
14170         "Double input should be lower or equal than Upperbound.");
14171     masm.bind(&success);
14172   }
14173 
14174   // This code does not yet check r->canHaveFractionalPart(). This would require
14175   // new assembler interfaces to make rounding instructions available.
14176 
14177   if (!r->canBeNegativeZero()) {
14178     Label success;
14179 
14180     // First, test for being equal to 0.0, which also includes -0.0.
14181     masm.loadConstantDouble(0.0, temp);
14182     masm.branchDouble(Assembler::DoubleNotEqualOrUnordered, input, temp,
14183                       &success);
14184 
14185     // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0 is
14186     // -Infinity instead of Infinity.
14187     masm.loadConstantDouble(1.0, temp);
14188     masm.divDouble(input, temp);
14189     masm.branchDouble(Assembler::DoubleGreaterThan, temp, input, &success);
14190 
14191     masm.assumeUnreachable("Input shouldn't be negative zero.");
14192 
14193     masm.bind(&success);
14194   }
14195 
14196   if (!r->hasInt32Bounds() && !r->canBeInfiniteOrNaN() &&
14197       r->exponent() < FloatingPoint<double>::kExponentBias) {
14198     // Check the bounds implied by the maximum exponent.
14199     Label exponentLoOk;
14200     masm.loadConstantDouble(pow(2.0, r->exponent() + 1), temp);
14201     masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentLoOk);
14202     masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp,
14203                       &exponentLoOk);
14204     masm.assumeUnreachable("Check for exponent failed.");
14205     masm.bind(&exponentLoOk);
14206 
14207     Label exponentHiOk;
14208     masm.loadConstantDouble(-pow(2.0, r->exponent() + 1), temp);
14209     masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentHiOk);
14210     masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
14211                       &exponentHiOk);
14212     masm.assumeUnreachable("Check for exponent failed.");
14213     masm.bind(&exponentHiOk);
14214   } else if (!r->hasInt32Bounds() && !r->canBeNaN()) {
14215     // If we think the value can't be NaN, check that it isn't.
14216     Label notnan;
14217     masm.branchDouble(Assembler::DoubleOrdered, input, input, &notnan);
14218     masm.assumeUnreachable("Input shouldn't be NaN.");
14219     masm.bind(&notnan);
14220 
14221     // If we think the value also can't be an infinity, check that it isn't.
14222     if (!r->canBeInfiniteOrNaN()) {
14223       Label notposinf;
14224       masm.loadConstantDouble(PositiveInfinity<double>(), temp);
14225       masm.branchDouble(Assembler::DoubleLessThan, input, temp, &notposinf);
14226       masm.assumeUnreachable("Input shouldn't be +Inf.");
14227       masm.bind(&notposinf);
14228 
14229       Label notneginf;
14230       masm.loadConstantDouble(NegativeInfinity<double>(), temp);
14231       masm.branchDouble(Assembler::DoubleGreaterThan, input, temp, &notneginf);
14232       masm.assumeUnreachable("Input shouldn't be -Inf.");
14233       masm.bind(&notneginf);
14234     }
14235   }
14236 }
14237 
visitAssertClass(LAssertClass * ins)14238 void CodeGenerator::visitAssertClass(LAssertClass* ins) {
14239   Register obj = ToRegister(ins->input());
14240   Register temp = ToRegister(ins->getTemp(0));
14241 
14242   Label success;
14243   masm.branchTestObjClassNoSpectreMitigations(
14244       Assembler::Equal, obj, ins->mir()->getClass(), temp, &success);
14245   masm.assumeUnreachable("Wrong KnownClass during run-time");
14246   masm.bind(&success);
14247 }
14248 
visitAssertShape(LAssertShape * ins)14249 void CodeGenerator::visitAssertShape(LAssertShape* ins) {
14250   Register obj = ToRegister(ins->input());
14251 
14252   Label success;
14253   masm.branchTestObjShapeNoSpectreMitigations(Assembler::Equal, obj,
14254                                               ins->mir()->shape(), &success);
14255   masm.assumeUnreachable("Wrong Shape during run-time");
14256   masm.bind(&success);
14257 }
14258 
visitAssertResultV(LAssertResultV * ins)14259 void CodeGenerator::visitAssertResultV(LAssertResultV* ins) {
14260 #ifdef DEBUG
14261   const ValueOperand value = ToValue(ins, LAssertResultV::Input);
14262   emitAssertResultV(value, ins->mirRaw());
14263 #else
14264   MOZ_CRASH("LAssertResultV is debug only");
14265 #endif
14266 }
14267 
visitAssertResultT(LAssertResultT * ins)14268 void CodeGenerator::visitAssertResultT(LAssertResultT* ins) {
14269 #ifdef DEBUG
14270   Register input = ToRegister(ins->input());
14271   emitAssertGCThingResult(input, ins->mirRaw());
14272 #else
14273   MOZ_CRASH("LAssertResultT is debug only");
14274 #endif
14275 }
14276 
visitAssertRangeI(LAssertRangeI * ins)14277 void CodeGenerator::visitAssertRangeI(LAssertRangeI* ins) {
14278   Register input = ToRegister(ins->input());
14279   const Range* r = ins->range();
14280 
14281   emitAssertRangeI(ins->mir()->input()->type(), r, input);
14282 }
14283 
visitAssertRangeD(LAssertRangeD * ins)14284 void CodeGenerator::visitAssertRangeD(LAssertRangeD* ins) {
14285   FloatRegister input = ToFloatRegister(ins->input());
14286   FloatRegister temp = ToFloatRegister(ins->temp());
14287   const Range* r = ins->range();
14288 
14289   emitAssertRangeD(r, input, temp);
14290 }
14291 
visitAssertRangeF(LAssertRangeF * ins)14292 void CodeGenerator::visitAssertRangeF(LAssertRangeF* ins) {
14293   FloatRegister input = ToFloatRegister(ins->input());
14294   FloatRegister temp = ToFloatRegister(ins->temp());
14295   FloatRegister temp2 = ToFloatRegister(ins->temp2());
14296 
14297   const Range* r = ins->range();
14298 
14299   masm.convertFloat32ToDouble(input, temp);
14300   emitAssertRangeD(r, temp, temp2);
14301 }
14302 
visitAssertRangeV(LAssertRangeV * ins)14303 void CodeGenerator::visitAssertRangeV(LAssertRangeV* ins) {
14304   const Range* r = ins->range();
14305   const ValueOperand value = ToValue(ins, LAssertRangeV::Input);
14306   Label done;
14307 
14308   {
14309     ScratchTagScope tag(masm, value);
14310     masm.splitTagForTest(value, tag);
14311 
14312     {
14313       Label isNotInt32;
14314       masm.branchTestInt32(Assembler::NotEqual, tag, &isNotInt32);
14315       {
14316         ScratchTagScopeRelease _(&tag);
14317         Register unboxInt32 = ToTempUnboxRegister(ins->temp());
14318         Register input = masm.extractInt32(value, unboxInt32);
14319         emitAssertRangeI(MIRType::Int32, r, input);
14320         masm.jump(&done);
14321       }
14322       masm.bind(&isNotInt32);
14323     }
14324 
14325     {
14326       Label isNotDouble;
14327       masm.branchTestDouble(Assembler::NotEqual, tag, &isNotDouble);
14328       {
14329         ScratchTagScopeRelease _(&tag);
14330         FloatRegister input = ToFloatRegister(ins->floatTemp1());
14331         FloatRegister temp = ToFloatRegister(ins->floatTemp2());
14332         masm.unboxDouble(value, input);
14333         emitAssertRangeD(r, input, temp);
14334         masm.jump(&done);
14335       }
14336       masm.bind(&isNotDouble);
14337     }
14338   }
14339 
14340   masm.assumeUnreachable("Incorrect range for Value.");
14341   masm.bind(&done);
14342 }
14343 
visitInterruptCheck(LInterruptCheck * lir)14344 void CodeGenerator::visitInterruptCheck(LInterruptCheck* lir) {
14345   using Fn = bool (*)(JSContext*);
14346   OutOfLineCode* ool =
14347       oolCallVM<Fn, InterruptCheck>(lir, ArgList(), StoreNothing());
14348 
14349   const void* interruptAddr = gen->runtime->addressOfInterruptBits();
14350   masm.branch32(Assembler::NotEqual, AbsoluteAddress(interruptAddr), Imm32(0),
14351                 ool->entry());
14352   masm.bind(ool->rejoin());
14353 }
14354 
visitWasmInterruptCheck(LWasmInterruptCheck * lir)14355 void CodeGenerator::visitWasmInterruptCheck(LWasmInterruptCheck* lir) {
14356   MOZ_ASSERT(gen->compilingWasm());
14357 
14358   masm.wasmInterruptCheck(ToRegister(lir->tlsPtr()),
14359                           lir->mir()->bytecodeOffset());
14360 
14361   markSafepointAt(masm.currentOffset(), lir);
14362 
14363   // Note that masm.framePushed() doesn't include the register dump area.
14364   // That will be taken into account when the StackMap is created from the
14365   // LSafepoint.
14366   lir->safepoint()->setFramePushedAtStackMapBase(masm.framePushed());
14367   lir->safepoint()->setIsWasmTrap();
14368 }
14369 
visitWasmTrap(LWasmTrap * lir)14370 void CodeGenerator::visitWasmTrap(LWasmTrap* lir) {
14371   MOZ_ASSERT(gen->compilingWasm());
14372   const MWasmTrap* mir = lir->mir();
14373 
14374   masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
14375 }
14376 
visitWasmBoundsCheck(LWasmBoundsCheck * ins)14377 void CodeGenerator::visitWasmBoundsCheck(LWasmBoundsCheck* ins) {
14378   const MWasmBoundsCheck* mir = ins->mir();
14379   Label ok;
14380   Register ptr = ToRegister(ins->ptr());
14381   Register boundsCheckLimit = ToRegister(ins->boundsCheckLimit());
14382   masm.wasmBoundsCheck32(Assembler::Below, ptr, boundsCheckLimit, &ok);
14383   masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
14384   masm.bind(&ok);
14385 }
14386 
visitWasmBoundsCheck64(LWasmBoundsCheck64 * ins)14387 void CodeGenerator::visitWasmBoundsCheck64(LWasmBoundsCheck64* ins) {
14388   const MWasmBoundsCheck* mir = ins->mir();
14389   Label ok;
14390 #ifdef JS_64BIT
14391   Register64 ptr = ToRegister64(ins->ptr());
14392   Register64 boundsCheckLimit = ToRegister64(ins->boundsCheckLimit());
14393   masm.wasmBoundsCheck64(Assembler::Below, ptr, boundsCheckLimit, &ok);
14394 #else
14395   // 64-bit bounds checks are used only on 64-bit systems.
14396   MOZ_CRASH("Should not happen");
14397 #endif
14398   masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
14399   masm.bind(&ok);
14400 }
14401 
visitWasmAlignmentCheck(LWasmAlignmentCheck * ins)14402 void CodeGenerator::visitWasmAlignmentCheck(LWasmAlignmentCheck* ins) {
14403   const MWasmAlignmentCheck* mir = ins->mir();
14404   Register ptr = ToRegister(ins->ptr());
14405   Label ok;
14406   masm.branchTest32(Assembler::Zero, ptr, Imm32(mir->byteSize() - 1), &ok);
14407   masm.wasmTrap(wasm::Trap::UnalignedAccess, mir->bytecodeOffset());
14408   masm.bind(&ok);
14409 }
14410 
visitWasmLoadTls(LWasmLoadTls * ins)14411 void CodeGenerator::visitWasmLoadTls(LWasmLoadTls* ins) {
14412   switch (ins->mir()->type()) {
14413     case MIRType::Pointer:
14414       masm.loadPtr(Address(ToRegister(ins->tlsPtr()), ins->mir()->offset()),
14415                    ToRegister(ins->output()));
14416       break;
14417     case MIRType::Int32:
14418       masm.load32(Address(ToRegister(ins->tlsPtr()), ins->mir()->offset()),
14419                   ToRegister(ins->output()));
14420       break;
14421 #ifdef JS_64BIT
14422     case MIRType::Int64:
14423       masm.load64(Address(ToRegister(ins->tlsPtr()), ins->mir()->offset()),
14424                   ToOutRegister64(ins));
14425       break;
14426 #endif
14427     default:
14428       MOZ_CRASH("MIRType not supported in WasmLoadTls");
14429   }
14430 }
14431 
incrementWarmUpCounter(AbsoluteAddress warmUpCount,JSScript * script,Register tmp)14432 void CodeGenerator::incrementWarmUpCounter(AbsoluteAddress warmUpCount,
14433                                            JSScript* script, Register tmp) {
14434   // The code depends on the JitScript* not being discarded without also
14435   // invalidating Ion code. Assert this.
14436 #ifdef DEBUG
14437   Label ok;
14438   masm.movePtr(ImmGCPtr(script), tmp);
14439   masm.loadJitScript(tmp, tmp);
14440   masm.branchPtr(Assembler::Equal, tmp, ImmPtr(script->jitScript()), &ok);
14441   masm.assumeUnreachable("Didn't find JitScript?");
14442   masm.bind(&ok);
14443 #endif
14444 
14445   masm.load32(warmUpCount, tmp);
14446   masm.add32(Imm32(1), tmp);
14447   masm.store32(tmp, warmUpCount);
14448 }
14449 
visitIncrementWarmUpCounter(LIncrementWarmUpCounter * ins)14450 void CodeGenerator::visitIncrementWarmUpCounter(LIncrementWarmUpCounter* ins) {
14451   Register tmp = ToRegister(ins->scratch());
14452 
14453   AbsoluteAddress warmUpCount =
14454       AbsoluteAddress(ins->mir()->script()->jitScript())
14455           .offset(JitScript::offsetOfWarmUpCount());
14456   incrementWarmUpCounter(warmUpCount, ins->mir()->script(), tmp);
14457 }
14458 
visitLexicalCheck(LLexicalCheck * ins)14459 void CodeGenerator::visitLexicalCheck(LLexicalCheck* ins) {
14460   ValueOperand inputValue = ToValue(ins, LLexicalCheck::Input);
14461   Label bail;
14462   masm.branchTestMagicValue(Assembler::Equal, inputValue,
14463                             JS_UNINITIALIZED_LEXICAL, &bail);
14464   bailoutFrom(&bail, ins->snapshot());
14465 }
14466 
visitThrowRuntimeLexicalError(LThrowRuntimeLexicalError * ins)14467 void CodeGenerator::visitThrowRuntimeLexicalError(
14468     LThrowRuntimeLexicalError* ins) {
14469   pushArg(Imm32(ins->mir()->errorNumber()));
14470 
14471   using Fn = bool (*)(JSContext*, unsigned);
14472   callVM<Fn, jit::ThrowRuntimeLexicalError>(ins);
14473 }
14474 
visitThrowMsg(LThrowMsg * ins)14475 void CodeGenerator::visitThrowMsg(LThrowMsg* ins) {
14476   pushArg(Imm32(static_cast<int32_t>(ins->mir()->throwMsgKind())));
14477 
14478   using Fn = bool (*)(JSContext*, unsigned);
14479   callVM<Fn, js::ThrowMsgOperation>(ins);
14480 }
14481 
visitGlobalDeclInstantiation(LGlobalDeclInstantiation * ins)14482 void CodeGenerator::visitGlobalDeclInstantiation(
14483     LGlobalDeclInstantiation* ins) {
14484   pushArg(ImmPtr(ins->mir()->resumePoint()->pc()));
14485   pushArg(ImmGCPtr(ins->mir()->block()->info().script()));
14486 
14487   using Fn = bool (*)(JSContext*, HandleScript, jsbytecode*);
14488   callVM<Fn, GlobalDeclInstantiationFromIon>(ins);
14489 }
14490 
visitDebugger(LDebugger * ins)14491 void CodeGenerator::visitDebugger(LDebugger* ins) {
14492   Register cx = ToRegister(ins->getTemp(0));
14493   Register temp = ToRegister(ins->getTemp(1));
14494 
14495   masm.loadJSContext(cx);
14496   using Fn = bool (*)(JSContext * cx);
14497   masm.setupUnalignedABICall(temp);
14498   masm.passABIArg(cx);
14499   masm.callWithABI<Fn, GlobalHasLiveOnDebuggerStatement>();
14500 
14501   Label bail;
14502   masm.branchIfTrueBool(ReturnReg, &bail);
14503   bailoutFrom(&bail, ins->snapshot());
14504 }
14505 
visitNewTarget(LNewTarget * ins)14506 void CodeGenerator::visitNewTarget(LNewTarget* ins) {
14507   ValueOperand output = ToOutValue(ins);
14508 
14509   // if (isConstructing) output = argv[Max(numActualArgs, numFormalArgs)]
14510   Label notConstructing, done;
14511   Address calleeToken(masm.getStackPointer(),
14512                       frameSize() + JitFrameLayout::offsetOfCalleeToken());
14513   masm.branchTestPtr(Assembler::Zero, calleeToken,
14514                      Imm32(CalleeToken_FunctionConstructing), &notConstructing);
14515 
14516   Register argvLen = output.scratchReg();
14517 
14518   Address actualArgsPtr(masm.getStackPointer(),
14519                         frameSize() + JitFrameLayout::offsetOfNumActualArgs());
14520   masm.loadPtr(actualArgsPtr, argvLen);
14521 
14522   Label useNFormals;
14523 
14524   size_t numFormalArgs = ins->mirRaw()->block()->info().nargs();
14525   masm.branchPtr(Assembler::Below, argvLen, Imm32(numFormalArgs), &useNFormals);
14526 
14527   size_t argsOffset = frameSize() + JitFrameLayout::offsetOfActualArgs();
14528   {
14529     BaseValueIndex newTarget(masm.getStackPointer(), argvLen, argsOffset);
14530     masm.loadValue(newTarget, output);
14531     masm.jump(&done);
14532   }
14533 
14534   masm.bind(&useNFormals);
14535 
14536   {
14537     Address newTarget(masm.getStackPointer(),
14538                       argsOffset + (numFormalArgs * sizeof(Value)));
14539     masm.loadValue(newTarget, output);
14540     masm.jump(&done);
14541   }
14542 
14543   // else output = undefined
14544   masm.bind(&notConstructing);
14545   masm.moveValue(UndefinedValue(), output);
14546   masm.bind(&done);
14547 }
14548 
visitCheckReturn(LCheckReturn * ins)14549 void CodeGenerator::visitCheckReturn(LCheckReturn* ins) {
14550   ValueOperand returnValue = ToValue(ins, LCheckReturn::ReturnValue);
14551   ValueOperand thisValue = ToValue(ins, LCheckReturn::ThisValue);
14552   ValueOperand output = ToOutValue(ins);
14553 
14554   using Fn = bool (*)(JSContext*, HandleValue);
14555   OutOfLineCode* ool = oolCallVM<Fn, ThrowBadDerivedReturnOrUninitializedThis>(
14556       ins, ArgList(returnValue), StoreNothing());
14557 
14558   Label noChecks;
14559   masm.branchTestObject(Assembler::Equal, returnValue, &noChecks);
14560   masm.branchTestUndefined(Assembler::NotEqual, returnValue, ool->entry());
14561   masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
14562   masm.moveValue(thisValue, output);
14563   masm.jump(ool->rejoin());
14564   masm.bind(&noChecks);
14565   masm.moveValue(returnValue, output);
14566   masm.bind(ool->rejoin());
14567 }
14568 
visitCheckIsObj(LCheckIsObj * ins)14569 void CodeGenerator::visitCheckIsObj(LCheckIsObj* ins) {
14570   ValueOperand value = ToValue(ins, LCheckIsObj::ValueIndex);
14571   Register output = ToRegister(ins->output());
14572 
14573   using Fn = bool (*)(JSContext*, CheckIsObjectKind);
14574   OutOfLineCode* ool = oolCallVM<Fn, ThrowCheckIsObject>(
14575       ins, ArgList(Imm32(ins->mir()->checkKind())), StoreNothing());
14576 
14577   masm.fallibleUnboxObject(value, output, ool->entry());
14578   masm.bind(ool->rejoin());
14579 }
14580 
visitCheckObjCoercible(LCheckObjCoercible * ins)14581 void CodeGenerator::visitCheckObjCoercible(LCheckObjCoercible* ins) {
14582   ValueOperand checkValue = ToValue(ins, LCheckObjCoercible::CheckValue);
14583 
14584   using Fn = bool (*)(JSContext*, HandleValue);
14585   OutOfLineCode* ool = oolCallVM<Fn, ThrowObjectCoercible>(
14586       ins, ArgList(checkValue), StoreNothing());
14587   masm.branchTestNull(Assembler::Equal, checkValue, ool->entry());
14588   masm.branchTestUndefined(Assembler::Equal, checkValue, ool->entry());
14589   masm.bind(ool->rejoin());
14590 }
14591 
visitCheckClassHeritage(LCheckClassHeritage * ins)14592 void CodeGenerator::visitCheckClassHeritage(LCheckClassHeritage* ins) {
14593   ValueOperand heritage = ToValue(ins, LCheckClassHeritage::Heritage);
14594   Register temp1 = ToRegister(ins->temp1());
14595   Register temp2 = ToRegister(ins->temp2());
14596 
14597   using Fn = bool (*)(JSContext*, HandleValue);
14598   OutOfLineCode* ool = oolCallVM<Fn, CheckClassHeritageOperation>(
14599       ins, ArgList(heritage), StoreNothing());
14600 
14601   masm.branchTestNull(Assembler::Equal, heritage, ool->rejoin());
14602   masm.fallibleUnboxObject(heritage, temp1, ool->entry());
14603 
14604   masm.isConstructor(temp1, temp2, ool->entry());
14605   masm.branchTest32(Assembler::Zero, temp2, temp2, ool->entry());
14606 
14607   masm.bind(ool->rejoin());
14608 }
14609 
visitCheckThis(LCheckThis * ins)14610 void CodeGenerator::visitCheckThis(LCheckThis* ins) {
14611   ValueOperand thisValue = ToValue(ins, LCheckThis::ThisValue);
14612 
14613   using Fn = bool (*)(JSContext*);
14614   OutOfLineCode* ool =
14615       oolCallVM<Fn, ThrowUninitializedThis>(ins, ArgList(), StoreNothing());
14616   masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
14617   masm.bind(ool->rejoin());
14618 }
14619 
visitCheckThisReinit(LCheckThisReinit * ins)14620 void CodeGenerator::visitCheckThisReinit(LCheckThisReinit* ins) {
14621   ValueOperand thisValue = ToValue(ins, LCheckThisReinit::ThisValue);
14622 
14623   using Fn = bool (*)(JSContext*);
14624   OutOfLineCode* ool =
14625       oolCallVM<Fn, ThrowInitializedThis>(ins, ArgList(), StoreNothing());
14626   masm.branchTestMagic(Assembler::NotEqual, thisValue, ool->entry());
14627   masm.bind(ool->rejoin());
14628 }
14629 
visitGenerator(LGenerator * lir)14630 void CodeGenerator::visitGenerator(LGenerator* lir) {
14631   Register callee = ToRegister(lir->callee());
14632   Register environmentChain = ToRegister(lir->environmentChain());
14633   Register argsObject = ToRegister(lir->argsObject());
14634 
14635   pushArg(argsObject);
14636   pushArg(environmentChain);
14637   pushArg(ImmGCPtr(current->mir()->info().script()));
14638   pushArg(callee);
14639 
14640   using Fn = JSObject* (*)(JSContext * cx, HandleFunction, HandleScript,
14641                            HandleObject, HandleObject);
14642   callVM<Fn, CreateGenerator>(lir);
14643 }
14644 
visitAsyncResolve(LAsyncResolve * lir)14645 void CodeGenerator::visitAsyncResolve(LAsyncResolve* lir) {
14646   Register generator = ToRegister(lir->generator());
14647   ValueOperand valueOrReason = ToValue(lir, LAsyncResolve::ValueOrReasonInput);
14648   AsyncFunctionResolveKind resolveKind = lir->mir()->resolveKind();
14649 
14650   pushArg(Imm32(static_cast<int32_t>(resolveKind)));
14651   pushArg(valueOrReason);
14652   pushArg(generator);
14653 
14654   using Fn = JSObject* (*)(JSContext*, Handle<AsyncFunctionGeneratorObject*>,
14655                            HandleValue, AsyncFunctionResolveKind);
14656   callVM<Fn, js::AsyncFunctionResolve>(lir);
14657 }
14658 
visitAsyncAwait(LAsyncAwait * lir)14659 void CodeGenerator::visitAsyncAwait(LAsyncAwait* lir) {
14660   ValueOperand value = ToValue(lir, LAsyncAwait::ValueInput);
14661   Register generator = ToRegister(lir->generator());
14662 
14663   pushArg(value);
14664   pushArg(generator);
14665 
14666   using Fn = JSObject* (*)(JSContext * cx,
14667                            Handle<AsyncFunctionGeneratorObject*> genObj,
14668                            HandleValue value);
14669   callVM<Fn, js::AsyncFunctionAwait>(lir);
14670 }
14671 
visitCanSkipAwait(LCanSkipAwait * lir)14672 void CodeGenerator::visitCanSkipAwait(LCanSkipAwait* lir) {
14673   ValueOperand value = ToValue(lir, LCanSkipAwait::ValueInput);
14674 
14675   pushArg(value);
14676 
14677   using Fn = bool (*)(JSContext*, HandleValue, bool* canSkip);
14678   callVM<Fn, js::CanSkipAwait>(lir);
14679 }
14680 
visitMaybeExtractAwaitValue(LMaybeExtractAwaitValue * lir)14681 void CodeGenerator::visitMaybeExtractAwaitValue(LMaybeExtractAwaitValue* lir) {
14682   ValueOperand value = ToValue(lir, LMaybeExtractAwaitValue::ValueInput);
14683   ValueOperand output = ToOutValue(lir);
14684   Register canSkip = ToRegister(lir->canSkip());
14685 
14686   Label cantExtract, finished;
14687   masm.branchIfFalseBool(canSkip, &cantExtract);
14688 
14689   pushArg(value);
14690 
14691   using Fn = bool (*)(JSContext*, HandleValue, MutableHandleValue);
14692   callVM<Fn, js::ExtractAwaitValue>(lir);
14693   masm.jump(&finished);
14694   masm.bind(&cantExtract);
14695 
14696   masm.moveValue(value, output);
14697 
14698   masm.bind(&finished);
14699 }
14700 
visitDebugCheckSelfHosted(LDebugCheckSelfHosted * ins)14701 void CodeGenerator::visitDebugCheckSelfHosted(LDebugCheckSelfHosted* ins) {
14702   ValueOperand checkValue = ToValue(ins, LDebugCheckSelfHosted::CheckValue);
14703   pushArg(checkValue);
14704   using Fn = bool (*)(JSContext*, HandleValue);
14705   callVM<Fn, js::Debug_CheckSelfHosted>(ins);
14706 }
14707 
visitRandom(LRandom * ins)14708 void CodeGenerator::visitRandom(LRandom* ins) {
14709   using mozilla::non_crypto::XorShift128PlusRNG;
14710 
14711   FloatRegister output = ToFloatRegister(ins->output());
14712   Register rngReg = ToRegister(ins->temp0());
14713 
14714   Register64 temp1 = ToRegister64(ins->temp1());
14715   Register64 temp2 = ToRegister64(ins->temp2());
14716 
14717   const XorShift128PlusRNG* rng = gen->realm->addressOfRandomNumberGenerator();
14718   masm.movePtr(ImmPtr(rng), rngReg);
14719 
14720   masm.randomDouble(rngReg, output, temp1, temp2);
14721 }
14722 
visitSignExtendInt32(LSignExtendInt32 * ins)14723 void CodeGenerator::visitSignExtendInt32(LSignExtendInt32* ins) {
14724   Register input = ToRegister(ins->input());
14725   Register output = ToRegister(ins->output());
14726 
14727   switch (ins->mode()) {
14728     case MSignExtendInt32::Byte:
14729       masm.move8SignExtend(input, output);
14730       break;
14731     case MSignExtendInt32::Half:
14732       masm.move16SignExtend(input, output);
14733       break;
14734   }
14735 }
14736 
visitRotate(LRotate * ins)14737 void CodeGenerator::visitRotate(LRotate* ins) {
14738   MRotate* mir = ins->mir();
14739   Register input = ToRegister(ins->input());
14740   Register dest = ToRegister(ins->output());
14741 
14742   const LAllocation* count = ins->count();
14743   if (count->isConstant()) {
14744     int32_t c = ToInt32(count) & 0x1F;
14745     if (mir->isLeftRotate()) {
14746       masm.rotateLeft(Imm32(c), input, dest);
14747     } else {
14748       masm.rotateRight(Imm32(c), input, dest);
14749     }
14750   } else {
14751     Register creg = ToRegister(count);
14752     if (mir->isLeftRotate()) {
14753       masm.rotateLeft(creg, input, dest);
14754     } else {
14755       masm.rotateRight(creg, input, dest);
14756     }
14757   }
14758 }
14759 
14760 class OutOfLineNaNToZero : public OutOfLineCodeBase<CodeGenerator> {
14761   LNaNToZero* lir_;
14762 
14763  public:
OutOfLineNaNToZero(LNaNToZero * lir)14764   explicit OutOfLineNaNToZero(LNaNToZero* lir) : lir_(lir) {}
14765 
accept(CodeGenerator * codegen)14766   void accept(CodeGenerator* codegen) override {
14767     codegen->visitOutOfLineNaNToZero(this);
14768   }
lir() const14769   LNaNToZero* lir() const { return lir_; }
14770 };
14771 
visitOutOfLineNaNToZero(OutOfLineNaNToZero * ool)14772 void CodeGenerator::visitOutOfLineNaNToZero(OutOfLineNaNToZero* ool) {
14773   FloatRegister output = ToFloatRegister(ool->lir()->output());
14774   masm.loadConstantDouble(0.0, output);
14775   masm.jump(ool->rejoin());
14776 }
14777 
visitNaNToZero(LNaNToZero * lir)14778 void CodeGenerator::visitNaNToZero(LNaNToZero* lir) {
14779   FloatRegister input = ToFloatRegister(lir->input());
14780 
14781   OutOfLineNaNToZero* ool = new (alloc()) OutOfLineNaNToZero(lir);
14782   addOutOfLineCode(ool, lir->mir());
14783 
14784   if (lir->mir()->operandIsNeverNegativeZero()) {
14785     masm.branchDouble(Assembler::DoubleUnordered, input, input, ool->entry());
14786   } else {
14787     FloatRegister scratch = ToFloatRegister(lir->tempDouble());
14788     masm.loadConstantDouble(0.0, scratch);
14789     masm.branchDouble(Assembler::DoubleEqualOrUnordered, input, scratch,
14790                       ool->entry());
14791   }
14792   masm.bind(ool->rejoin());
14793 }
14794 
BoundFunctionLength(MacroAssembler & masm,Register target,Register targetFlags,Register argCount,Register output,Label * slowPath)14795 static void BoundFunctionLength(MacroAssembler& masm, Register target,
14796                                 Register targetFlags, Register argCount,
14797                                 Register output, Label* slowPath) {
14798   masm.loadFunctionLength(target, targetFlags, output, slowPath);
14799 
14800   // Compute the bound function length: Max(0, target.length - argCount).
14801   Label nonNegative;
14802   masm.sub32(argCount, output);
14803   masm.branch32(Assembler::GreaterThanOrEqual, output, Imm32(0), &nonNegative);
14804   masm.move32(Imm32(0), output);
14805   masm.bind(&nonNegative);
14806 }
14807 
BoundFunctionName(MacroAssembler & masm,Register target,Register targetFlags,Register output,const JSAtomState & names,Label * slowPath)14808 static void BoundFunctionName(MacroAssembler& masm, Register target,
14809                               Register targetFlags, Register output,
14810                               const JSAtomState& names, Label* slowPath) {
14811   Label notBoundTarget, loadName;
14812   masm.branchTest32(Assembler::Zero, targetFlags,
14813                     Imm32(FunctionFlags::BOUND_FUN), &notBoundTarget);
14814   {
14815     // Call into the VM if the target's name atom contains the bound
14816     // function prefix.
14817     masm.branchTest32(Assembler::NonZero, targetFlags,
14818                       Imm32(FunctionFlags::HAS_BOUND_FUNCTION_NAME_PREFIX),
14819                       slowPath);
14820 
14821     // Bound functions reuse HAS_GUESSED_ATOM for
14822     // HAS_BOUND_FUNCTION_NAME_PREFIX, so skip the guessed atom check below.
14823     static_assert(
14824         FunctionFlags::HAS_BOUND_FUNCTION_NAME_PREFIX ==
14825             FunctionFlags::HAS_GUESSED_ATOM,
14826         "HAS_BOUND_FUNCTION_NAME_PREFIX is shared with HAS_GUESSED_ATOM");
14827     masm.jump(&loadName);
14828   }
14829   masm.bind(&notBoundTarget);
14830 
14831   Label guessed, hasName;
14832   masm.branchTest32(Assembler::NonZero, targetFlags,
14833                     Imm32(FunctionFlags::HAS_GUESSED_ATOM), &guessed);
14834   masm.bind(&loadName);
14835   masm.loadPtr(Address(target, JSFunction::offsetOfAtom()), output);
14836   masm.branchTestPtr(Assembler::NonZero, output, output, &hasName);
14837   {
14838     masm.bind(&guessed);
14839 
14840     // An absent name property defaults to the empty string.
14841     masm.movePtr(ImmGCPtr(names.empty), output);
14842   }
14843   masm.bind(&hasName);
14844 }
14845 
BoundFunctionFlags(MacroAssembler & masm,Register targetFlags,Register bound,Register output)14846 static void BoundFunctionFlags(MacroAssembler& masm, Register targetFlags,
14847                                Register bound, Register output) {
14848   // Set the BOUND_FN flag and, if the target is a constructor, the
14849   // CONSTRUCTOR flag.
14850   Label isConstructor, boundFlagsComputed;
14851   masm.load16ZeroExtend(Address(bound, JSFunction::offsetOfFlags()), output);
14852   masm.branchTest32(Assembler::NonZero, targetFlags,
14853                     Imm32(FunctionFlags::CONSTRUCTOR), &isConstructor);
14854   {
14855     masm.or32(Imm32(FunctionFlags::BOUND_FUN), output);
14856     masm.jump(&boundFlagsComputed);
14857   }
14858   masm.bind(&isConstructor);
14859   {
14860     masm.or32(Imm32(FunctionFlags::BOUND_FUN | FunctionFlags::CONSTRUCTOR),
14861               output);
14862   }
14863   masm.bind(&boundFlagsComputed);
14864 }
14865 
visitFinishBoundFunctionInit(LFinishBoundFunctionInit * lir)14866 void CodeGenerator::visitFinishBoundFunctionInit(
14867     LFinishBoundFunctionInit* lir) {
14868   Register bound = ToRegister(lir->bound());
14869   Register target = ToRegister(lir->target());
14870   Register argCount = ToRegister(lir->argCount());
14871   Register temp1 = ToRegister(lir->temp1());
14872   Register temp2 = ToRegister(lir->temp2());
14873 
14874   using Fn = bool (*)(JSContext * cx, HandleFunction bound, HandleObject target,
14875                       int32_t argCount);
14876   OutOfLineCode* ool = oolCallVM<Fn, JSFunction::finishBoundFunctionInit>(
14877       lir, ArgList(bound, target, argCount), StoreNothing());
14878   Label* slowPath = ool->entry();
14879 
14880   const size_t boundLengthOffset =
14881       FunctionExtended::offsetOfBoundFunctionLengthSlot();
14882 
14883   // Take the slow path if the target is not a JSFunction.
14884   masm.branchTestObjClass(Assembler::NotEqual, target, &JSFunction::class_,
14885                           temp1, target, slowPath);
14886 
14887   // Take the slow path if we'd need to adjust the [[Prototype]].
14888   masm.loadObjProto(bound, temp1);
14889   masm.loadObjProto(target, temp2);
14890   masm.branchPtr(Assembler::NotEqual, temp1, temp2, slowPath);
14891 
14892   // Get the function flags.
14893   masm.load16ZeroExtend(Address(target, JSFunction::offsetOfFlags()), temp1);
14894 
14895   // Functions with a SelfHostedLazyScript must be compiled with the slow-path
14896   // before the function length is known. If the length or name property is
14897   // resolved, it might be shadowed.
14898   masm.branchTest32(
14899       Assembler::NonZero, temp1,
14900       Imm32(FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_NAME |
14901             FunctionFlags::RESOLVED_LENGTH),
14902       slowPath);
14903 
14904   // Store the bound function's length into the extended slot.
14905   BoundFunctionLength(masm, target, temp1, argCount, temp2, slowPath);
14906   masm.storeValue(JSVAL_TYPE_INT32, temp2, Address(bound, boundLengthOffset));
14907 
14908   // Store the target's name atom in the bound function as is.
14909   BoundFunctionName(masm, target, temp1, temp2, gen->runtime->names(),
14910                     slowPath);
14911   masm.storePtr(temp2, Address(bound, JSFunction::offsetOfAtom()));
14912 
14913   // Update the bound function's flags.
14914   BoundFunctionFlags(masm, temp1, bound, temp2);
14915   masm.store16(temp2, Address(bound, JSFunction::offsetOfFlags()));
14916 
14917   masm.bind(ool->rejoin());
14918 }
14919 
visitIsPackedArray(LIsPackedArray * lir)14920 void CodeGenerator::visitIsPackedArray(LIsPackedArray* lir) {
14921   Register obj = ToRegister(lir->object());
14922   Register output = ToRegister(lir->output());
14923   Register temp = ToRegister(lir->temp());
14924 
14925   masm.setIsPackedArray(obj, output, temp);
14926 }
14927 
visitGuardArrayIsPacked(LGuardArrayIsPacked * lir)14928 void CodeGenerator::visitGuardArrayIsPacked(LGuardArrayIsPacked* lir) {
14929   Register array = ToRegister(lir->array());
14930   Register temp1 = ToRegister(lir->temp1());
14931   Register temp2 = ToRegister(lir->temp2());
14932 
14933   Label bail;
14934   masm.branchArrayIsNotPacked(array, temp1, temp2, &bail);
14935   bailoutFrom(&bail, lir->snapshot());
14936 }
14937 
visitGetPrototypeOf(LGetPrototypeOf * lir)14938 void CodeGenerator::visitGetPrototypeOf(LGetPrototypeOf* lir) {
14939   Register target = ToRegister(lir->target());
14940   ValueOperand out = ToOutValue(lir);
14941   Register scratch = out.scratchReg();
14942 
14943   using Fn = bool (*)(JSContext*, HandleObject, MutableHandleValue);
14944   OutOfLineCode* ool = oolCallVM<Fn, jit::GetPrototypeOf>(lir, ArgList(target),
14945                                                           StoreValueTo(out));
14946 
14947   MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
14948 
14949   masm.loadObjProto(target, scratch);
14950 
14951   Label hasProto;
14952   masm.branchPtr(Assembler::Above, scratch, ImmWord(1), &hasProto);
14953 
14954   // Call into the VM for lazy prototypes.
14955   masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), ool->entry());
14956 
14957   masm.moveValue(NullValue(), out);
14958   masm.jump(ool->rejoin());
14959 
14960   masm.bind(&hasProto);
14961   masm.tagValue(JSVAL_TYPE_OBJECT, scratch, out);
14962 
14963   masm.bind(ool->rejoin());
14964 }
14965 
visitObjectWithProto(LObjectWithProto * lir)14966 void CodeGenerator::visitObjectWithProto(LObjectWithProto* lir) {
14967   pushArg(ToValue(lir, LObjectWithProto::PrototypeValue));
14968 
14969   using Fn = PlainObject* (*)(JSContext*, HandleValue);
14970   callVM<Fn, js::ObjectWithProtoOperation>(lir);
14971 }
14972 
visitObjectStaticProto(LObjectStaticProto * lir)14973 void CodeGenerator::visitObjectStaticProto(LObjectStaticProto* lir) {
14974   Register obj = ToRegister(lir->input());
14975   Register output = ToRegister(lir->output());
14976 
14977   masm.loadObjProto(obj, output);
14978 
14979 #ifdef DEBUG
14980   // We shouldn't encounter a null or lazy proto.
14981   MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
14982 
14983   Label done;
14984   masm.branchPtr(Assembler::Above, output, ImmWord(1), &done);
14985   masm.assumeUnreachable("Unexpected null or lazy proto in MObjectStaticProto");
14986   masm.bind(&done);
14987 #endif
14988 }
14989 
visitBuiltinObject(LBuiltinObject * lir)14990 void CodeGenerator::visitBuiltinObject(LBuiltinObject* lir) {
14991   pushArg(Imm32(static_cast<int32_t>(lir->mir()->builtinObjectKind())));
14992 
14993   using Fn = JSObject* (*)(JSContext*, BuiltinObjectKind);
14994   callVM<Fn, js::BuiltinObjectOperation>(lir);
14995 }
14996 
visitSuperFunction(LSuperFunction * lir)14997 void CodeGenerator::visitSuperFunction(LSuperFunction* lir) {
14998   Register callee = ToRegister(lir->callee());
14999   ValueOperand out = ToOutValue(lir);
15000   Register temp = ToRegister(lir->temp());
15001 
15002 #ifdef DEBUG
15003   Label classCheckDone;
15004   masm.branchTestObjClass(Assembler::Equal, callee, &JSFunction::class_, temp,
15005                           callee, &classCheckDone);
15006   masm.assumeUnreachable("Unexpected non-JSFunction callee in JSOp::SuperFun");
15007   masm.bind(&classCheckDone);
15008 #endif
15009 
15010   // Load prototype of callee
15011   masm.loadObjProto(callee, temp);
15012 
15013 #ifdef DEBUG
15014   // We won't encounter a lazy proto, because |callee| is guaranteed to be a
15015   // JSFunction and only proxy objects can have a lazy proto.
15016   MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
15017 
15018   Label proxyCheckDone;
15019   masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
15020   masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperFun");
15021   masm.bind(&proxyCheckDone);
15022 #endif
15023 
15024   Label nullProto, done;
15025   masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
15026 
15027   // Box prototype and return
15028   masm.tagValue(JSVAL_TYPE_OBJECT, temp, out);
15029   masm.jump(&done);
15030 
15031   masm.bind(&nullProto);
15032   masm.moveValue(NullValue(), out);
15033 
15034   masm.bind(&done);
15035 }
15036 
visitInitHomeObject(LInitHomeObject * lir)15037 void CodeGenerator::visitInitHomeObject(LInitHomeObject* lir) {
15038   Register func = ToRegister(lir->function());
15039   ValueOperand homeObject = ToValue(lir, LInitHomeObject::HomeObjectValue);
15040 
15041   Address addr(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
15042 
15043   emitPreBarrier(addr);
15044   masm.storeValue(homeObject, addr);
15045 }
15046 
visitIsTypedArrayConstructor(LIsTypedArrayConstructor * lir)15047 void CodeGenerator::visitIsTypedArrayConstructor(
15048     LIsTypedArrayConstructor* lir) {
15049   Register object = ToRegister(lir->object());
15050   Register output = ToRegister(lir->output());
15051 
15052   masm.setIsDefinitelyTypedArrayConstructor(object, output);
15053 }
15054 
visitLoadValueTag(LLoadValueTag * lir)15055 void CodeGenerator::visitLoadValueTag(LLoadValueTag* lir) {
15056   ValueOperand value = ToValue(lir, LLoadValueTag::Value);
15057   Register output = ToRegister(lir->output());
15058 
15059   Register tag = masm.extractTag(value, output);
15060   if (tag != output) {
15061     masm.mov(tag, output);
15062   }
15063 }
15064 
visitGuardTagNotEqual(LGuardTagNotEqual * lir)15065 void CodeGenerator::visitGuardTagNotEqual(LGuardTagNotEqual* lir) {
15066   Register lhs = ToRegister(lir->lhs());
15067   Register rhs = ToRegister(lir->rhs());
15068 
15069   bailoutCmp32(Assembler::Equal, lhs, rhs, lir->snapshot());
15070 
15071   // If both lhs and rhs are numbers, can't use tag comparison to do inequality
15072   // comparison
15073   Label done;
15074   masm.branchTestNumber(Assembler::NotEqual, lhs, &done);
15075   masm.branchTestNumber(Assembler::NotEqual, rhs, &done);
15076   bailout(lir->snapshot());
15077 
15078   masm.bind(&done);
15079 }
15080 
visitLoadWrapperTarget(LLoadWrapperTarget * lir)15081 void CodeGenerator::visitLoadWrapperTarget(LLoadWrapperTarget* lir) {
15082   Register object = ToRegister(lir->object());
15083   Register output = ToRegister(lir->output());
15084 
15085   masm.loadPtr(Address(object, ProxyObject::offsetOfReservedSlots()), output);
15086   masm.unboxObject(
15087       Address(output, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
15088       output);
15089 }
15090 
visitGuardHasGetterSetter(LGuardHasGetterSetter * lir)15091 void CodeGenerator::visitGuardHasGetterSetter(LGuardHasGetterSetter* lir) {
15092   Register object = ToRegister(lir->object());
15093   Register temp1 = ToRegister(lir->temp1());
15094   Register temp2 = ToRegister(lir->temp2());
15095   Register temp3 = ToRegister(lir->temp3());
15096 
15097   masm.movePropertyKey(lir->mir()->propId(), temp2);
15098   masm.movePtr(ImmGCPtr(lir->mir()->getterSetter()), temp3);
15099 
15100   using Fn = bool (*)(JSContext * cx, JSObject * obj, jsid id,
15101                       GetterSetter * getterSetter);
15102   masm.setupUnalignedABICall(temp1);
15103   masm.loadJSContext(temp1);
15104   masm.passABIArg(temp1);
15105   masm.passABIArg(object);
15106   masm.passABIArg(temp2);
15107   masm.passABIArg(temp3);
15108   masm.callWithABI<Fn, ObjectHasGetterSetterPure>();
15109 
15110   bailoutIfFalseBool(ReturnReg, lir->snapshot());
15111 }
15112 
visitGuardIsExtensible(LGuardIsExtensible * lir)15113 void CodeGenerator::visitGuardIsExtensible(LGuardIsExtensible* lir) {
15114   Register object = ToRegister(lir->object());
15115   Register temp = ToRegister(lir->temp());
15116 
15117   Label bail;
15118   masm.branchIfObjectNotExtensible(object, temp, &bail);
15119   bailoutFrom(&bail, lir->snapshot());
15120 }
15121 
visitGuardInt32IsNonNegative(LGuardInt32IsNonNegative * lir)15122 void CodeGenerator::visitGuardInt32IsNonNegative(
15123     LGuardInt32IsNonNegative* lir) {
15124   Register index = ToRegister(lir->index());
15125 
15126   bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
15127 }
15128 
visitGuardIndexGreaterThanDenseInitLength(LGuardIndexGreaterThanDenseInitLength * lir)15129 void CodeGenerator::visitGuardIndexGreaterThanDenseInitLength(
15130     LGuardIndexGreaterThanDenseInitLength* lir) {
15131   Register object = ToRegister(lir->object());
15132   Register index = ToRegister(lir->index());
15133   Register temp = ToRegister(lir->temp());
15134   Register spectreTemp = ToTempRegisterOrInvalid(lir->spectreTemp());
15135 
15136   // Load obj->elements.
15137   masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
15138 
15139   // Ensure index >= initLength.
15140   Label outOfBounds;
15141   Address capacity(temp, ObjectElements::offsetOfInitializedLength());
15142   masm.spectreBoundsCheck32(index, capacity, spectreTemp, &outOfBounds);
15143   bailout(lir->snapshot());
15144   masm.bind(&outOfBounds);
15145 }
15146 
visitGuardIndexIsValidUpdateOrAdd(LGuardIndexIsValidUpdateOrAdd * lir)15147 void CodeGenerator::visitGuardIndexIsValidUpdateOrAdd(
15148     LGuardIndexIsValidUpdateOrAdd* lir) {
15149   Register object = ToRegister(lir->object());
15150   Register index = ToRegister(lir->index());
15151   Register temp = ToRegister(lir->temp());
15152   Register spectreTemp = ToTempRegisterOrInvalid(lir->spectreTemp());
15153 
15154   // Load obj->elements.
15155   masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
15156 
15157   Label success;
15158 
15159   // If length is writable, branch to &success.  All indices are writable.
15160   Address flags(temp, ObjectElements::offsetOfFlags());
15161   masm.branchTest32(Assembler::Zero, flags,
15162                     Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
15163                     &success);
15164 
15165   // Otherwise, ensure index is in bounds.
15166   Label bail;
15167   Address length(temp, ObjectElements::offsetOfLength());
15168   masm.spectreBoundsCheck32(index, length, spectreTemp, &bail);
15169   masm.bind(&success);
15170 
15171   bailoutFrom(&bail, lir->snapshot());
15172 }
15173 
visitCallAddOrUpdateSparseElement(LCallAddOrUpdateSparseElement * lir)15174 void CodeGenerator::visitCallAddOrUpdateSparseElement(
15175     LCallAddOrUpdateSparseElement* lir) {
15176   Register object = ToRegister(lir->object());
15177   Register index = ToRegister(lir->index());
15178   ValueOperand value = ToValue(lir, LCallAddOrUpdateSparseElement::ValueIndex);
15179 
15180   pushArg(Imm32(lir->mir()->strict()));
15181   pushArg(value);
15182   pushArg(index);
15183   pushArg(object);
15184 
15185   using Fn =
15186       bool (*)(JSContext*, HandleArrayObject, int32_t, HandleValue, bool);
15187   callVM<Fn, js::AddOrUpdateSparseElementHelper>(lir);
15188 }
15189 
visitCallGetSparseElement(LCallGetSparseElement * lir)15190 void CodeGenerator::visitCallGetSparseElement(LCallGetSparseElement* lir) {
15191   Register object = ToRegister(lir->object());
15192   Register index = ToRegister(lir->index());
15193 
15194   pushArg(index);
15195   pushArg(object);
15196 
15197   using Fn =
15198       bool (*)(JSContext*, HandleArrayObject, int32_t, MutableHandleValue);
15199   callVM<Fn, js::GetSparseElementHelper>(lir);
15200 }
15201 
visitCallNativeGetElement(LCallNativeGetElement * lir)15202 void CodeGenerator::visitCallNativeGetElement(LCallNativeGetElement* lir) {
15203   Register object = ToRegister(lir->object());
15204   Register index = ToRegister(lir->index());
15205 
15206   pushArg(index);
15207   pushArg(TypedOrValueRegister(MIRType::Object, AnyRegister(object)));
15208   pushArg(object);
15209 
15210   using Fn = bool (*)(JSContext*, HandleNativeObject, HandleValue, int32_t,
15211                       MutableHandleValue);
15212   callVM<Fn, js::NativeGetElement>(lir);
15213 }
15214 
visitCallObjectHasSparseElement(LCallObjectHasSparseElement * lir)15215 void CodeGenerator::visitCallObjectHasSparseElement(
15216     LCallObjectHasSparseElement* lir) {
15217   Register object = ToRegister(lir->object());
15218   Register index = ToRegister(lir->index());
15219   Register temp1 = ToRegister(lir->temp1());
15220   Register temp2 = ToRegister(lir->temp2());
15221   Register output = ToRegister(lir->output());
15222 
15223   masm.reserveStack(sizeof(Value));
15224   masm.moveStackPtrTo(temp2);
15225 
15226   using Fn = bool (*)(JSContext*, NativeObject*, int32_t, Value*);
15227   masm.setupUnalignedABICall(temp1);
15228   masm.loadJSContext(temp1);
15229   masm.passABIArg(temp1);
15230   masm.passABIArg(object);
15231   masm.passABIArg(index);
15232   masm.passABIArg(temp2);
15233   masm.callWithABI<Fn, HasNativeElementPure>();
15234   masm.mov(ReturnReg, temp1);
15235 
15236   Label bail, ok;
15237   uint32_t framePushed = masm.framePushed();
15238   masm.branchIfTrueBool(temp1, &ok);
15239   masm.adjustStack(sizeof(Value));
15240   masm.jump(&bail);
15241 
15242   masm.bind(&ok);
15243   masm.setFramePushed(framePushed);
15244   masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
15245   masm.adjustStack(sizeof(Value));
15246 
15247   bailoutFrom(&bail, lir->snapshot());
15248 }
15249 
visitBigIntAsIntN(LBigIntAsIntN * ins)15250 void CodeGenerator::visitBigIntAsIntN(LBigIntAsIntN* ins) {
15251   Register bits = ToRegister(ins->bits());
15252   Register input = ToRegister(ins->input());
15253 
15254   pushArg(bits);
15255   pushArg(input);
15256 
15257   using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
15258   callVM<Fn, jit::BigIntAsIntN>(ins);
15259 }
15260 
visitBigIntAsIntN64(LBigIntAsIntN64 * ins)15261 void CodeGenerator::visitBigIntAsIntN64(LBigIntAsIntN64* ins) {
15262   Register input = ToRegister(ins->input());
15263   Register temp = ToRegister(ins->temp());
15264   Register64 temp64 = ToRegister64(ins->temp64());
15265   Register output = ToRegister(ins->output());
15266 
15267   Label done, create;
15268 
15269   masm.movePtr(input, output);
15270 
15271   // Load the BigInt value as an int64.
15272   masm.loadBigInt64(input, temp64);
15273 
15274   // Create a new BigInt when the input exceeds the int64 range.
15275   masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
15276                 Imm32(64 / BigInt::DigitBits), &create);
15277 
15278   // And create a new BigInt when the value and the BigInt have different signs.
15279   Label nonNegative;
15280   masm.branchIfBigIntIsNonNegative(input, &nonNegative);
15281   masm.branchTest64(Assembler::NotSigned, temp64, temp64, temp, &create);
15282   masm.jump(&done);
15283 
15284   masm.bind(&nonNegative);
15285   masm.branchTest64(Assembler::NotSigned, temp64, temp64, temp, &done);
15286 
15287   masm.bind(&create);
15288   emitCreateBigInt(ins, Scalar::BigInt64, temp64, output, temp);
15289 
15290   masm.bind(&done);
15291 }
15292 
visitBigIntAsIntN32(LBigIntAsIntN32 * ins)15293 void CodeGenerator::visitBigIntAsIntN32(LBigIntAsIntN32* ins) {
15294   Register input = ToRegister(ins->input());
15295   Register temp = ToRegister(ins->temp());
15296   Register64 temp64 = ToRegister64(ins->temp64());
15297   Register output = ToRegister(ins->output());
15298 
15299   Label done, create;
15300 
15301   masm.movePtr(input, output);
15302 
15303   // Load the absolute value of the first digit.
15304   masm.loadFirstBigIntDigitOrZero(input, temp);
15305 
15306   // If the absolute value exceeds the int32 range, create a new BigInt.
15307   masm.branchPtr(Assembler::Above, temp, Imm32(INT32_MAX), &create);
15308 
15309   // Also create a new BigInt if we have more than one digit.
15310   masm.branch32(Assembler::BelowOrEqual,
15311                 Address(input, BigInt::offsetOfLength()), Imm32(1), &done);
15312 
15313   masm.bind(&create);
15314 
15315   // |temp| stores the absolute value, negate it when the sign flag is set.
15316   Label nonNegative;
15317   masm.branchIfBigIntIsNonNegative(input, &nonNegative);
15318   masm.negPtr(temp);
15319   masm.bind(&nonNegative);
15320 
15321   masm.move32To64SignExtend(temp, temp64);
15322   emitCreateBigInt(ins, Scalar::BigInt64, temp64, output, temp);
15323 
15324   masm.bind(&done);
15325 }
15326 
visitBigIntAsUintN(LBigIntAsUintN * ins)15327 void CodeGenerator::visitBigIntAsUintN(LBigIntAsUintN* ins) {
15328   Register bits = ToRegister(ins->bits());
15329   Register input = ToRegister(ins->input());
15330 
15331   pushArg(bits);
15332   pushArg(input);
15333 
15334   using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
15335   callVM<Fn, jit::BigIntAsUintN>(ins);
15336 }
15337 
visitBigIntAsUintN64(LBigIntAsUintN64 * ins)15338 void CodeGenerator::visitBigIntAsUintN64(LBigIntAsUintN64* ins) {
15339   Register input = ToRegister(ins->input());
15340   Register temp = ToRegister(ins->temp());
15341   Register64 temp64 = ToRegister64(ins->temp64());
15342   Register output = ToRegister(ins->output());
15343 
15344   Label done, create;
15345 
15346   masm.movePtr(input, output);
15347 
15348   // Load the BigInt value as an uint64.
15349   masm.loadBigInt64(input, temp64);
15350 
15351   // Create a new BigInt when the input exceeds the uint64 range.
15352   masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
15353                 Imm32(64 / BigInt::DigitBits), &create);
15354 
15355   // And create a new BigInt when the input has the sign flag set.
15356   masm.branchIfBigIntIsNonNegative(input, &done);
15357 
15358   masm.bind(&create);
15359   emitCreateBigInt(ins, Scalar::BigUint64, temp64, output, temp);
15360 
15361   masm.bind(&done);
15362 }
15363 
visitBigIntAsUintN32(LBigIntAsUintN32 * ins)15364 void CodeGenerator::visitBigIntAsUintN32(LBigIntAsUintN32* ins) {
15365   Register input = ToRegister(ins->input());
15366   Register temp = ToRegister(ins->temp());
15367   Register64 temp64 = ToRegister64(ins->temp64());
15368   Register output = ToRegister(ins->output());
15369 
15370   Label done, create;
15371 
15372   masm.movePtr(input, output);
15373 
15374   // Load the absolute value of the first digit.
15375   masm.loadFirstBigIntDigitOrZero(input, temp);
15376 
15377   // If the absolute value exceeds the uint32 range, create a new BigInt.
15378 #if JS_PUNBOX64
15379   masm.branchPtr(Assembler::Above, temp, ImmWord(UINT32_MAX), &create);
15380 #endif
15381 
15382   // Also create a new BigInt if we have more than one digit.
15383   masm.branch32(Assembler::Above, Address(input, BigInt::offsetOfLength()),
15384                 Imm32(1), &create);
15385 
15386   // And create a new BigInt when the input has the sign flag set.
15387   masm.branchIfBigIntIsNonNegative(input, &done);
15388 
15389   masm.bind(&create);
15390 
15391   // |temp| stores the absolute value, negate it when the sign flag is set.
15392   Label nonNegative;
15393   masm.branchIfBigIntIsNonNegative(input, &nonNegative);
15394   masm.negPtr(temp);
15395   masm.bind(&nonNegative);
15396 
15397   masm.move32To64ZeroExtend(temp, temp64);
15398   emitCreateBigInt(ins, Scalar::BigUint64, temp64, output, temp);
15399 
15400   masm.bind(&done);
15401 }
15402 
15403 template <size_t NumDefs>
emitIonToWasmCallBase(LIonToWasmCallBase<NumDefs> * lir)15404 void CodeGenerator::emitIonToWasmCallBase(LIonToWasmCallBase<NumDefs>* lir) {
15405   wasm::JitCallStackArgVector stackArgs;
15406   masm.propagateOOM(stackArgs.reserve(lir->numOperands()));
15407   if (masm.oom()) {
15408     return;
15409   }
15410 
15411   const wasm::FuncExport& funcExport = lir->mir()->funcExport();
15412   const wasm::FuncType& sig = funcExport.funcType();
15413 
15414   WasmABIArgGenerator abi;
15415   for (size_t i = 0; i < lir->numOperands(); i++) {
15416     MIRType argMir;
15417     switch (sig.args()[i].kind()) {
15418       case wasm::ValType::I32:
15419       case wasm::ValType::I64:
15420       case wasm::ValType::F32:
15421       case wasm::ValType::F64:
15422         argMir = ToMIRType(sig.args()[i]);
15423         break;
15424       case wasm::ValType::Rtt:
15425       case wasm::ValType::V128:
15426         MOZ_CRASH("unexpected argument type when calling from ion to wasm");
15427       case wasm::ValType::Ref:
15428         switch (sig.args()[i].refTypeKind()) {
15429           case wasm::RefType::Extern:
15430             // AnyRef is boxed on the JS side, so passed as a pointer here.
15431             argMir = ToMIRType(sig.args()[i]);
15432             break;
15433           case wasm::RefType::Func:
15434           case wasm::RefType::Eq:
15435           case wasm::RefType::TypeIndex:
15436             MOZ_CRASH("unexpected argument type when calling from ion to wasm");
15437         }
15438         break;
15439     }
15440 
15441     ABIArg arg = abi.next(argMir);
15442     switch (arg.kind()) {
15443       case ABIArg::GPR:
15444       case ABIArg::FPU: {
15445         MOZ_ASSERT(ToAnyRegister(lir->getOperand(i)) == arg.reg());
15446         stackArgs.infallibleEmplaceBack(wasm::JitCallStackArg());
15447         break;
15448       }
15449       case ABIArg::Stack: {
15450         const LAllocation* larg = lir->getOperand(i);
15451         if (larg->isConstant()) {
15452           stackArgs.infallibleEmplaceBack(ToInt32(larg));
15453         } else if (larg->isGeneralReg()) {
15454           stackArgs.infallibleEmplaceBack(ToRegister(larg));
15455         } else if (larg->isFloatReg()) {
15456           stackArgs.infallibleEmplaceBack(ToFloatRegister(larg));
15457         } else {
15458           stackArgs.infallibleEmplaceBack(ToAddress(larg));
15459         }
15460         break;
15461       }
15462 #ifdef JS_CODEGEN_REGISTER_PAIR
15463       case ABIArg::GPR_PAIR: {
15464         MOZ_CRASH(
15465             "no way to pass i64, and wasm uses hardfp for function calls");
15466       }
15467 #endif
15468       case ABIArg::Uninitialized: {
15469         MOZ_CRASH("Uninitialized ABIArg kind");
15470       }
15471     }
15472   }
15473 
15474   const wasm::ValTypeVector& results = sig.results();
15475   if (results.length() == 0) {
15476     MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
15477   } else {
15478     MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
15479     switch (results[0].kind()) {
15480       case wasm::ValType::I32:
15481         MOZ_ASSERT(lir->mir()->type() == MIRType::Int32);
15482         MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
15483         break;
15484       case wasm::ValType::I64:
15485         MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
15486         MOZ_ASSERT(ToOutRegister64(lir) == ReturnReg64);
15487         break;
15488       case wasm::ValType::F32:
15489         MOZ_ASSERT(lir->mir()->type() == MIRType::Float32);
15490         MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnFloat32Reg);
15491         break;
15492       case wasm::ValType::F64:
15493         MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
15494         MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
15495         break;
15496       case wasm::ValType::Rtt:
15497       case wasm::ValType::V128:
15498         MOZ_CRASH("unexpected return type when calling from ion to wasm");
15499       case wasm::ValType::Ref:
15500         switch (results[0].refTypeKind()) {
15501           case wasm::RefType::Func:
15502           case wasm::RefType::Extern:
15503           case wasm::RefType::Eq:
15504             // The wasm stubs layer unboxes anything that needs to be unboxed
15505             // and leaves it in a Value.  A FuncRef/EqRef we could in principle
15506             // leave it as a raw object pointer but for now it complicates the
15507             // API to do so.
15508             MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
15509             break;
15510           case wasm::RefType::TypeIndex:
15511             MOZ_CRASH("unexpected return type when calling from ion to wasm");
15512         }
15513         break;
15514     }
15515   }
15516 
15517   bool profilingEnabled = isProfilerInstrumentationEnabled();
15518   WasmInstanceObject* instObj = lir->mir()->instanceObject();
15519 
15520   Register scratch = ToRegister(lir->temp());
15521 
15522   uint32_t callOffset;
15523   GenerateDirectCallFromJit(masm, funcExport, instObj->instance(), stackArgs,
15524                             profilingEnabled, scratch, &callOffset);
15525 
15526   // Add the instance object to the constant pool, so it is transferred to
15527   // the owning IonScript and so that it gets traced as long as the IonScript
15528   // lives.
15529 
15530   uint32_t unused;
15531   masm.propagateOOM(graph.addConstantToPool(ObjectValue(*instObj), &unused));
15532 
15533   markSafepointAt(callOffset, lir);
15534 }
15535 
visitIonToWasmCall(LIonToWasmCall * lir)15536 void CodeGenerator::visitIonToWasmCall(LIonToWasmCall* lir) {
15537   emitIonToWasmCallBase(lir);
15538 }
visitIonToWasmCallV(LIonToWasmCallV * lir)15539 void CodeGenerator::visitIonToWasmCallV(LIonToWasmCallV* lir) {
15540   emitIonToWasmCallBase(lir);
15541 }
visitIonToWasmCallI64(LIonToWasmCallI64 * lir)15542 void CodeGenerator::visitIonToWasmCallI64(LIonToWasmCallI64* lir) {
15543   emitIonToWasmCallBase(lir);
15544 }
15545 
visitWasmNullConstant(LWasmNullConstant * lir)15546 void CodeGenerator::visitWasmNullConstant(LWasmNullConstant* lir) {
15547   masm.xorPtr(ToRegister(lir->output()), ToRegister(lir->output()));
15548 }
15549 
15550 // Simple codegen for platforms where some values may be stack allocated,
15551 // compare types and instruction result are limited to i32, and the "true" input
15552 // is reused for the output.  See ditto code in Lowering-shared.cpp.
emitWasmCompareAndSelect(LWasmCompareAndSelect * ins)15553 void CodeGenerator::emitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
15554   bool cmpIs32bit = ins->compareType() == MCompare::Compare_Int32 ||
15555                     ins->compareType() == MCompare::Compare_UInt32;
15556   bool selIs32bit = ins->mir()->type() == MIRType::Int32;
15557 
15558   if (cmpIs32bit && selIs32bit) {
15559     Register out = ToRegister(ins->output());
15560     MOZ_ASSERT(ToRegister(ins->ifTrueExpr()) == out,
15561                "true expr input is reused for output");
15562 
15563     Assembler::Condition cond = Assembler::InvertCondition(
15564         JSOpToCondition(ins->compareType(), ins->jsop()));
15565     const LAllocation* rhs = ins->rightExpr();
15566     const LAllocation* falseExpr = ins->ifFalseExpr();
15567     Register lhs = ToRegister(ins->leftExpr());
15568 
15569     if (rhs->isRegister()) {
15570       if (falseExpr->isRegister()) {
15571         // On arm32, this is the only one of the four cases that can actually
15572         // happen, since |rhs| and |falseExpr| are marked useAny() by
15573         // LIRGenerator::visitWasmSelect, and useAny() means "register only"
15574         // on arm32.
15575         masm.cmp32Move32(cond, lhs, ToRegister(rhs), ToRegister(falseExpr),
15576                          out);
15577       } else {
15578         masm.cmp32Load32(cond, lhs, ToRegister(rhs), ToAddress(falseExpr), out);
15579       }
15580     } else {
15581       if (falseExpr->isRegister()) {
15582         masm.cmp32Move32(cond, lhs, ToAddress(rhs), ToRegister(falseExpr), out);
15583       } else {
15584         masm.cmp32Load32(cond, lhs, ToAddress(rhs), ToAddress(falseExpr), out);
15585       }
15586     }
15587     return;
15588   }
15589 
15590   MOZ_CRASH("in CodeGenerator::visitWasmCompareAndSelect: unexpected types");
15591 }
15592 
visitWasmFence(LWasmFence * lir)15593 void CodeGenerator::visitWasmFence(LWasmFence* lir) {
15594   MOZ_ASSERT(gen->compilingWasm());
15595   masm.memoryBarrier(MembarFull);
15596 }
15597 
visitWasmBoxValue(LWasmBoxValue * lir)15598 void CodeGenerator::visitWasmBoxValue(LWasmBoxValue* lir) {
15599   ValueOperand input = ToValue(lir, LWasmBoxValue::Input);
15600   Register output = ToRegister(lir->output());
15601 
15602   Label nullValue, objectValue, done;
15603   {
15604     ScratchTagScope tag(masm, input);
15605     masm.splitTagForTest(input, tag);
15606     masm.branchTestObject(Assembler::Equal, tag, &objectValue);
15607     masm.branchTestNull(Assembler::Equal, tag, &nullValue);
15608   }
15609 
15610   using Fn = JSObject* (*)(JSContext*, HandleValue);
15611   OutOfLineCode* oolBoxValue = oolCallVM<Fn, wasm::BoxBoxableValue>(
15612       lir, ArgList(input), StoreRegisterTo(output));
15613 
15614   masm.jump(oolBoxValue->entry());
15615 
15616   masm.bind(&nullValue);
15617   // See the definition of AnyRef for a discussion of pointer representation.
15618   masm.xorPtr(output, output);
15619   masm.jump(&done);
15620 
15621   masm.bind(&objectValue);
15622   // See the definition of AnyRef for a discussion of pointer representation.
15623   masm.unboxObject(input, output);
15624 
15625   masm.bind(&done);
15626   masm.bind(oolBoxValue->rejoin());
15627 }
15628 
visitWasmAnyRefFromJSObject(LWasmAnyRefFromJSObject * lir)15629 void CodeGenerator::visitWasmAnyRefFromJSObject(LWasmAnyRefFromJSObject* lir) {
15630   Register input = ToRegister(lir->getOperand(LWasmAnyRefFromJSObject::Input));
15631   Register output = ToRegister(lir->output());
15632   // See the definition of AnyRef for a discussion of pointer representation.
15633   if (input != output) {
15634     masm.movePtr(input, output);
15635   }
15636 }
15637 
15638 static_assert(!std::is_polymorphic_v<CodeGenerator>,
15639               "CodeGenerator should not have any virtual methods");
15640 
15641 }  // namespace jit
15642 }  // namespace js
15643