1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/CacheIRCompiler.h"
8 
9 #include "mozilla/ArrayUtils.h"
10 #include "mozilla/FunctionTypeTraits.h"
11 #include "mozilla/MaybeOneOf.h"
12 #include "mozilla/ScopeExit.h"
13 
14 #include <type_traits>
15 #include <utility>
16 
17 #include "jslibmath.h"
18 #include "jsmath.h"
19 
20 #include "builtin/DataViewObject.h"
21 #include "builtin/MapObject.h"
22 #include "builtin/Object.h"
23 #include "gc/Allocator.h"
24 #include "jit/BaselineCacheIRCompiler.h"
25 #include "jit/IonCacheIRCompiler.h"
26 #include "jit/IonIC.h"
27 #include "jit/JitFrames.h"
28 #include "jit/JitRuntime.h"
29 #include "jit/JitZone.h"
30 #include "jit/SharedICHelpers.h"
31 #include "jit/SharedICRegisters.h"
32 #include "jit/TemplateObject.h"
33 #include "jit/VMFunctions.h"
34 #include "js/friend/DOMProxy.h"     // JS::ExpandoAndGeneration
35 #include "js/friend/XrayJitInfo.h"  // js::jit::GetXrayJitInfo
36 #include "js/ScalarType.h"          // js::Scalar::Type
37 #include "proxy/DOMProxy.h"
38 #include "proxy/Proxy.h"
39 #include "vm/ArgumentsObject.h"
40 #include "vm/ArrayBufferObject.h"
41 #include "vm/ArrayBufferViewObject.h"
42 #include "vm/BigIntType.h"
43 #include "vm/FunctionFlags.h"  // js::FunctionFlags
44 #include "vm/GeneratorObject.h"
45 #include "vm/GetterSetter.h"
46 #include "vm/Uint8Clamped.h"
47 
48 #include "builtin/Boolean-inl.h"
49 #include "gc/ObjectKind-inl.h"
50 #include "jit/MacroAssembler-inl.h"
51 #include "jit/SharedICHelpers-inl.h"
52 #include "jit/VMFunctionList-inl.h"
53 #include "vm/BytecodeUtil-inl.h"
54 #include "vm/Realm-inl.h"
55 
56 using namespace js;
57 using namespace js::jit;
58 
59 using mozilla::BitwiseCast;
60 using mozilla::Maybe;
61 
62 using JS::ExpandoAndGeneration;
63 
useValueRegister(MacroAssembler & masm,ValOperandId op)64 ValueOperand CacheRegisterAllocator::useValueRegister(MacroAssembler& masm,
65                                                       ValOperandId op) {
66   OperandLocation& loc = operandLocations_[op.id()];
67 
68   switch (loc.kind()) {
69     case OperandLocation::ValueReg:
70       currentOpRegs_.add(loc.valueReg());
71       return loc.valueReg();
72 
73     case OperandLocation::ValueStack: {
74       ValueOperand reg = allocateValueRegister(masm);
75       popValue(masm, &loc, reg);
76       return reg;
77     }
78 
79     case OperandLocation::BaselineFrame: {
80       ValueOperand reg = allocateValueRegister(masm);
81       Address addr = addressOf(masm, loc.baselineFrameSlot());
82       masm.loadValue(addr, reg);
83       loc.setValueReg(reg);
84       return reg;
85     }
86 
87     case OperandLocation::Constant: {
88       ValueOperand reg = allocateValueRegister(masm);
89       masm.moveValue(loc.constant(), reg);
90       loc.setValueReg(reg);
91       return reg;
92     }
93 
94     case OperandLocation::PayloadReg: {
95       // Temporarily add the payload register to currentOpRegs_ so
96       // allocateValueRegister will stay away from it.
97       currentOpRegs_.add(loc.payloadReg());
98       ValueOperand reg = allocateValueRegister(masm);
99       masm.tagValue(loc.payloadType(), loc.payloadReg(), reg);
100       currentOpRegs_.take(loc.payloadReg());
101       availableRegs_.add(loc.payloadReg());
102       loc.setValueReg(reg);
103       return reg;
104     }
105 
106     case OperandLocation::PayloadStack: {
107       ValueOperand reg = allocateValueRegister(masm);
108       popPayload(masm, &loc, reg.scratchReg());
109       masm.tagValue(loc.payloadType(), reg.scratchReg(), reg);
110       loc.setValueReg(reg);
111       return reg;
112     }
113 
114     case OperandLocation::DoubleReg: {
115       ValueOperand reg = allocateValueRegister(masm);
116       {
117         ScratchDoubleScope fpscratch(masm);
118         masm.boxDouble(loc.doubleReg(), reg, fpscratch);
119       }
120       loc.setValueReg(reg);
121       return reg;
122     }
123 
124     case OperandLocation::Uninitialized:
125       break;
126   }
127 
128   MOZ_CRASH();
129 }
130 
131 // Load a value operand directly into a float register. Caller must have
132 // guarded isNumber on the provided val.
ensureDoubleRegister(MacroAssembler & masm,NumberOperandId op,FloatRegister dest) const133 void CacheRegisterAllocator::ensureDoubleRegister(MacroAssembler& masm,
134                                                   NumberOperandId op,
135                                                   FloatRegister dest) const {
136   // If AutoScratchFloatRegister is active, we have to add sizeof(double) to
137   // any stack slot offsets below.
138   int32_t stackOffset = hasAutoScratchFloatRegisterSpill() ? sizeof(double) : 0;
139 
140   const OperandLocation& loc = operandLocations_[op.id()];
141 
142   Label failure, done;
143   switch (loc.kind()) {
144     case OperandLocation::ValueReg: {
145       masm.ensureDouble(loc.valueReg(), dest, &failure);
146       break;
147     }
148 
149     case OperandLocation::ValueStack: {
150       Address addr = valueAddress(masm, &loc);
151       addr.offset += stackOffset;
152       masm.ensureDouble(addr, dest, &failure);
153       break;
154     }
155 
156     case OperandLocation::BaselineFrame: {
157       Address addr = addressOf(masm, loc.baselineFrameSlot());
158       addr.offset += stackOffset;
159       masm.ensureDouble(addr, dest, &failure);
160       break;
161     }
162 
163     case OperandLocation::DoubleReg: {
164       masm.moveDouble(loc.doubleReg(), dest);
165       return;
166     }
167 
168     case OperandLocation::Constant: {
169       MOZ_ASSERT(loc.constant().isNumber(),
170                  "Caller must ensure the operand is a number value");
171       masm.loadConstantDouble(loc.constant().toNumber(), dest);
172       return;
173     }
174 
175     case OperandLocation::PayloadReg: {
176       // Doubles can't be stored in payload registers, so this must be an int32.
177       MOZ_ASSERT(loc.payloadType() == JSVAL_TYPE_INT32,
178                  "Caller must ensure the operand is a number value");
179       masm.convertInt32ToDouble(loc.payloadReg(), dest);
180       return;
181     }
182 
183     case OperandLocation::PayloadStack: {
184       // Doubles can't be stored in payload registers, so this must be an int32.
185       MOZ_ASSERT(loc.payloadType() == JSVAL_TYPE_INT32,
186                  "Caller must ensure the operand is a number value");
187       MOZ_ASSERT(loc.payloadStack() <= stackPushed_);
188       Address addr = payloadAddress(masm, &loc);
189       addr.offset += stackOffset;
190       masm.convertInt32ToDouble(addr, dest);
191       return;
192     }
193 
194     case OperandLocation::Uninitialized:
195       MOZ_CRASH("Unhandled operand type in ensureDoubleRegister");
196       return;
197   }
198   masm.jump(&done);
199   masm.bind(&failure);
200   masm.assumeUnreachable(
201       "Missing guard allowed non-number to hit ensureDoubleRegister");
202   masm.bind(&done);
203 }
204 
copyToScratchRegister(MacroAssembler & masm,TypedOperandId typedId,Register dest) const205 void CacheRegisterAllocator::copyToScratchRegister(MacroAssembler& masm,
206                                                    TypedOperandId typedId,
207                                                    Register dest) const {
208   // If AutoScratchFloatRegister is active, we have to add sizeof(double) to
209   // any stack slot offsets below.
210   int32_t stackOffset = hasAutoScratchFloatRegisterSpill() ? sizeof(double) : 0;
211 
212   const OperandLocation& loc = operandLocations_[typedId.id()];
213 
214   Label failure, done;
215   switch (loc.kind()) {
216     case OperandLocation::ValueReg: {
217       masm.unboxNonDouble(loc.valueReg(), dest, typedId.type());
218       break;
219     }
220     case OperandLocation::ValueStack: {
221       Address addr = valueAddress(masm, &loc);
222       addr.offset += stackOffset;
223       masm.unboxNonDouble(addr, dest, typedId.type());
224       break;
225     }
226     case OperandLocation::BaselineFrame: {
227       Address addr = addressOf(masm, loc.baselineFrameSlot());
228       addr.offset += stackOffset;
229       masm.unboxNonDouble(addr, dest, typedId.type());
230       break;
231     }
232     case OperandLocation::PayloadReg: {
233       MOZ_ASSERT(loc.payloadType() == typedId.type());
234       masm.mov(loc.payloadReg(), dest);
235       return;
236     }
237     case OperandLocation::PayloadStack: {
238       MOZ_ASSERT(loc.payloadType() == typedId.type());
239       MOZ_ASSERT(loc.payloadStack() <= stackPushed_);
240       Address addr = payloadAddress(masm, &loc);
241       addr.offset += stackOffset;
242       masm.loadPtr(addr, dest);
243       return;
244     }
245     case OperandLocation::DoubleReg:
246     case OperandLocation::Constant:
247     case OperandLocation::Uninitialized:
248       MOZ_CRASH("Unhandled operand location");
249   }
250 }
251 
copyToScratchValueRegister(MacroAssembler & masm,ValOperandId valId,ValueOperand dest) const252 void CacheRegisterAllocator::copyToScratchValueRegister(
253     MacroAssembler& masm, ValOperandId valId, ValueOperand dest) const {
254   MOZ_ASSERT(!addedFailurePath_);
255   MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
256 
257   const OperandLocation& loc = operandLocations_[valId.id()];
258   switch (loc.kind()) {
259     case OperandLocation::ValueReg:
260       masm.moveValue(loc.valueReg(), dest);
261       break;
262     case OperandLocation::ValueStack: {
263       Address addr = valueAddress(masm, &loc);
264       masm.loadValue(addr, dest);
265       break;
266     }
267     case OperandLocation::BaselineFrame: {
268       Address addr = addressOf(masm, loc.baselineFrameSlot());
269       masm.loadValue(addr, dest);
270       break;
271     }
272     case OperandLocation::Constant:
273       masm.moveValue(loc.constant(), dest);
274       break;
275     case OperandLocation::PayloadReg:
276       masm.tagValue(loc.payloadType(), loc.payloadReg(), dest);
277       break;
278     case OperandLocation::PayloadStack: {
279       Address addr = payloadAddress(masm, &loc);
280       masm.loadPtr(addr, dest.scratchReg());
281       masm.tagValue(loc.payloadType(), dest.scratchReg(), dest);
282       break;
283     }
284     case OperandLocation::DoubleReg: {
285       ScratchDoubleScope fpscratch(masm);
286       masm.boxDouble(loc.doubleReg(), dest, fpscratch);
287       break;
288     }
289     case OperandLocation::Uninitialized:
290       MOZ_CRASH();
291   }
292 }
293 
useRegister(MacroAssembler & masm,TypedOperandId typedId)294 Register CacheRegisterAllocator::useRegister(MacroAssembler& masm,
295                                              TypedOperandId typedId) {
296   MOZ_ASSERT(!addedFailurePath_);
297   MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
298 
299   OperandLocation& loc = operandLocations_[typedId.id()];
300   switch (loc.kind()) {
301     case OperandLocation::PayloadReg:
302       currentOpRegs_.add(loc.payloadReg());
303       return loc.payloadReg();
304 
305     case OperandLocation::ValueReg: {
306       // It's possible the value is still boxed: as an optimization, we unbox
307       // the first time we use a value as object.
308       ValueOperand val = loc.valueReg();
309       availableRegs_.add(val);
310       Register reg = val.scratchReg();
311       availableRegs_.take(reg);
312       masm.unboxNonDouble(val, reg, typedId.type());
313       loc.setPayloadReg(reg, typedId.type());
314       currentOpRegs_.add(reg);
315       return reg;
316     }
317 
318     case OperandLocation::PayloadStack: {
319       Register reg = allocateRegister(masm);
320       popPayload(masm, &loc, reg);
321       return reg;
322     }
323 
324     case OperandLocation::ValueStack: {
325       // The value is on the stack, but boxed. If it's on top of the stack we
326       // unbox it and then remove it from the stack, else we just unbox.
327       Register reg = allocateRegister(masm);
328       if (loc.valueStack() == stackPushed_) {
329         masm.unboxNonDouble(Address(masm.getStackPointer(), 0), reg,
330                             typedId.type());
331         masm.addToStackPtr(Imm32(sizeof(js::Value)));
332         MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
333         stackPushed_ -= sizeof(js::Value);
334       } else {
335         MOZ_ASSERT(loc.valueStack() < stackPushed_);
336         masm.unboxNonDouble(
337             Address(masm.getStackPointer(), stackPushed_ - loc.valueStack()),
338             reg, typedId.type());
339       }
340       loc.setPayloadReg(reg, typedId.type());
341       return reg;
342     }
343 
344     case OperandLocation::BaselineFrame: {
345       Register reg = allocateRegister(masm);
346       Address addr = addressOf(masm, loc.baselineFrameSlot());
347       masm.unboxNonDouble(addr, reg, typedId.type());
348       loc.setPayloadReg(reg, typedId.type());
349       return reg;
350     };
351 
352     case OperandLocation::Constant: {
353       Value v = loc.constant();
354       Register reg = allocateRegister(masm);
355       if (v.isString()) {
356         masm.movePtr(ImmGCPtr(v.toString()), reg);
357       } else if (v.isSymbol()) {
358         masm.movePtr(ImmGCPtr(v.toSymbol()), reg);
359       } else if (v.isBigInt()) {
360         masm.movePtr(ImmGCPtr(v.toBigInt()), reg);
361       } else {
362         MOZ_CRASH("Unexpected Value");
363       }
364       loc.setPayloadReg(reg, v.extractNonDoubleType());
365       return reg;
366     }
367 
368     case OperandLocation::DoubleReg:
369     case OperandLocation::Uninitialized:
370       break;
371   }
372 
373   MOZ_CRASH();
374 }
375 
useConstantOrRegister(MacroAssembler & masm,ValOperandId val)376 ConstantOrRegister CacheRegisterAllocator::useConstantOrRegister(
377     MacroAssembler& masm, ValOperandId val) {
378   MOZ_ASSERT(!addedFailurePath_);
379   MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
380 
381   OperandLocation& loc = operandLocations_[val.id()];
382   switch (loc.kind()) {
383     case OperandLocation::Constant:
384       return loc.constant();
385 
386     case OperandLocation::PayloadReg:
387     case OperandLocation::PayloadStack: {
388       JSValueType payloadType = loc.payloadType();
389       Register reg = useRegister(masm, TypedOperandId(val, payloadType));
390       return TypedOrValueRegister(MIRTypeFromValueType(payloadType),
391                                   AnyRegister(reg));
392     }
393 
394     case OperandLocation::ValueReg:
395     case OperandLocation::ValueStack:
396     case OperandLocation::BaselineFrame:
397       return TypedOrValueRegister(useValueRegister(masm, val));
398 
399     case OperandLocation::DoubleReg:
400       return TypedOrValueRegister(MIRType::Double,
401                                   AnyRegister(loc.doubleReg()));
402 
403     case OperandLocation::Uninitialized:
404       break;
405   }
406 
407   MOZ_CRASH();
408 }
409 
defineRegister(MacroAssembler & masm,TypedOperandId typedId)410 Register CacheRegisterAllocator::defineRegister(MacroAssembler& masm,
411                                                 TypedOperandId typedId) {
412   MOZ_ASSERT(!addedFailurePath_);
413   MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
414 
415   OperandLocation& loc = operandLocations_[typedId.id()];
416   MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
417 
418   Register reg = allocateRegister(masm);
419   loc.setPayloadReg(reg, typedId.type());
420   return reg;
421 }
422 
defineValueRegister(MacroAssembler & masm,ValOperandId val)423 ValueOperand CacheRegisterAllocator::defineValueRegister(MacroAssembler& masm,
424                                                          ValOperandId val) {
425   MOZ_ASSERT(!addedFailurePath_);
426   MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
427 
428   OperandLocation& loc = operandLocations_[val.id()];
429   MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
430 
431   ValueOperand reg = allocateValueRegister(masm);
432   loc.setValueReg(reg);
433   return reg;
434 }
435 
freeDeadOperandLocations(MacroAssembler & masm)436 void CacheRegisterAllocator::freeDeadOperandLocations(MacroAssembler& masm) {
437   // See if any operands are dead so we can reuse their registers. Note that
438   // we skip the input operands, as those are also used by failure paths, and
439   // we currently don't track those uses.
440   for (size_t i = writer_.numInputOperands(); i < operandLocations_.length();
441        i++) {
442     if (!writer_.operandIsDead(i, currentInstruction_)) {
443       continue;
444     }
445 
446     OperandLocation& loc = operandLocations_[i];
447     switch (loc.kind()) {
448       case OperandLocation::PayloadReg:
449         availableRegs_.add(loc.payloadReg());
450         break;
451       case OperandLocation::ValueReg:
452         availableRegs_.add(loc.valueReg());
453         break;
454       case OperandLocation::PayloadStack:
455         masm.propagateOOM(freePayloadSlots_.append(loc.payloadStack()));
456         break;
457       case OperandLocation::ValueStack:
458         masm.propagateOOM(freeValueSlots_.append(loc.valueStack()));
459         break;
460       case OperandLocation::Uninitialized:
461       case OperandLocation::BaselineFrame:
462       case OperandLocation::Constant:
463       case OperandLocation::DoubleReg:
464         break;
465     }
466     loc.setUninitialized();
467   }
468 }
469 
discardStack(MacroAssembler & masm)470 void CacheRegisterAllocator::discardStack(MacroAssembler& masm) {
471   // This should only be called when we are no longer using the operands,
472   // as we're discarding everything from the native stack. Set all operand
473   // locations to Uninitialized to catch bugs.
474   for (size_t i = 0; i < operandLocations_.length(); i++) {
475     operandLocations_[i].setUninitialized();
476   }
477 
478   if (stackPushed_ > 0) {
479     masm.addToStackPtr(Imm32(stackPushed_));
480     stackPushed_ = 0;
481   }
482   freePayloadSlots_.clear();
483   freeValueSlots_.clear();
484 }
485 
allocateRegister(MacroAssembler & masm)486 Register CacheRegisterAllocator::allocateRegister(MacroAssembler& masm) {
487   MOZ_ASSERT(!addedFailurePath_);
488   MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
489 
490   if (availableRegs_.empty()) {
491     freeDeadOperandLocations(masm);
492   }
493 
494   if (availableRegs_.empty()) {
495     // Still no registers available, try to spill unused operands to
496     // the stack.
497     for (size_t i = 0; i < operandLocations_.length(); i++) {
498       OperandLocation& loc = operandLocations_[i];
499       if (loc.kind() == OperandLocation::PayloadReg) {
500         Register reg = loc.payloadReg();
501         if (currentOpRegs_.has(reg)) {
502           continue;
503         }
504 
505         spillOperandToStack(masm, &loc);
506         availableRegs_.add(reg);
507         break;  // We got a register, so break out of the loop.
508       }
509       if (loc.kind() == OperandLocation::ValueReg) {
510         ValueOperand reg = loc.valueReg();
511         if (currentOpRegs_.aliases(reg)) {
512           continue;
513         }
514 
515         spillOperandToStack(masm, &loc);
516         availableRegs_.add(reg);
517         break;  // Break out of the loop.
518       }
519     }
520   }
521 
522   if (availableRegs_.empty() && !availableRegsAfterSpill_.empty()) {
523     Register reg = availableRegsAfterSpill_.takeAny();
524     masm.push(reg);
525     stackPushed_ += sizeof(uintptr_t);
526 
527     masm.propagateOOM(spilledRegs_.append(SpilledRegister(reg, stackPushed_)));
528 
529     availableRegs_.add(reg);
530   }
531 
532   // At this point, there must be a free register.
533   MOZ_RELEASE_ASSERT(!availableRegs_.empty());
534 
535   Register reg = availableRegs_.takeAny();
536   currentOpRegs_.add(reg);
537   return reg;
538 }
539 
allocateFixedRegister(MacroAssembler & masm,Register reg)540 void CacheRegisterAllocator::allocateFixedRegister(MacroAssembler& masm,
541                                                    Register reg) {
542   MOZ_ASSERT(!addedFailurePath_);
543   MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
544 
545   // Fixed registers should be allocated first, to ensure they're
546   // still available.
547   MOZ_ASSERT(!currentOpRegs_.has(reg), "Register is in use");
548 
549   freeDeadOperandLocations(masm);
550 
551   if (availableRegs_.has(reg)) {
552     availableRegs_.take(reg);
553     currentOpRegs_.add(reg);
554     return;
555   }
556 
557   // Register may be available only after spilling contents.
558   if (availableRegsAfterSpill_.has(reg)) {
559     availableRegsAfterSpill_.take(reg);
560     masm.push(reg);
561     stackPushed_ += sizeof(uintptr_t);
562 
563     masm.propagateOOM(spilledRegs_.append(SpilledRegister(reg, stackPushed_)));
564     currentOpRegs_.add(reg);
565     return;
566   }
567 
568   // The register must be used by some operand. Spill it to the stack.
569   for (size_t i = 0; i < operandLocations_.length(); i++) {
570     OperandLocation& loc = operandLocations_[i];
571     if (loc.kind() == OperandLocation::PayloadReg) {
572       if (loc.payloadReg() != reg) {
573         continue;
574       }
575 
576       spillOperandToStackOrRegister(masm, &loc);
577       currentOpRegs_.add(reg);
578       return;
579     }
580     if (loc.kind() == OperandLocation::ValueReg) {
581       if (!loc.valueReg().aliases(reg)) {
582         continue;
583       }
584 
585       ValueOperand valueReg = loc.valueReg();
586       spillOperandToStackOrRegister(masm, &loc);
587 
588       availableRegs_.add(valueReg);
589       availableRegs_.take(reg);
590       currentOpRegs_.add(reg);
591       return;
592     }
593   }
594 
595   MOZ_CRASH("Invalid register");
596 }
597 
allocateFixedValueRegister(MacroAssembler & masm,ValueOperand reg)598 void CacheRegisterAllocator::allocateFixedValueRegister(MacroAssembler& masm,
599                                                         ValueOperand reg) {
600 #ifdef JS_NUNBOX32
601   allocateFixedRegister(masm, reg.payloadReg());
602   allocateFixedRegister(masm, reg.typeReg());
603 #else
604   allocateFixedRegister(masm, reg.valueReg());
605 #endif
606 }
607 
608 #ifdef JS_NUNBOX32
609 // Possible miscompilation in clang-12 (bug 1689641)
610 MOZ_NEVER_INLINE
611 #endif
allocateValueRegister(MacroAssembler & masm)612 ValueOperand CacheRegisterAllocator::allocateValueRegister(
613     MacroAssembler& masm) {
614 #ifdef JS_NUNBOX32
615   Register reg1 = allocateRegister(masm);
616   Register reg2 = allocateRegister(masm);
617   return ValueOperand(reg1, reg2);
618 #else
619   Register reg = allocateRegister(masm);
620   return ValueOperand(reg);
621 #endif
622 }
623 
init()624 bool CacheRegisterAllocator::init() {
625   if (!origInputLocations_.resize(writer_.numInputOperands())) {
626     return false;
627   }
628   if (!operandLocations_.resize(writer_.numOperandIds())) {
629     return false;
630   }
631   return true;
632 }
633 
initAvailableRegsAfterSpill()634 void CacheRegisterAllocator::initAvailableRegsAfterSpill() {
635   // Registers not in availableRegs_ and not used by input operands are
636   // available after being spilled.
637   availableRegsAfterSpill_.set() = GeneralRegisterSet::Intersect(
638       GeneralRegisterSet::Not(availableRegs_.set()),
639       GeneralRegisterSet::Not(inputRegisterSet()));
640 }
641 
fixupAliasedInputs(MacroAssembler & masm)642 void CacheRegisterAllocator::fixupAliasedInputs(MacroAssembler& masm) {
643   // If IC inputs alias each other, make sure they are stored in different
644   // locations so we don't have to deal with this complexity in the rest of
645   // the allocator.
646   //
647   // Note that this can happen in IonMonkey with something like |o.foo = o|
648   // or |o[i] = i|.
649 
650   size_t numInputs = writer_.numInputOperands();
651   MOZ_ASSERT(origInputLocations_.length() == numInputs);
652 
653   for (size_t i = 1; i < numInputs; i++) {
654     OperandLocation& loc1 = operandLocations_[i];
655     if (!loc1.isInRegister()) {
656       continue;
657     }
658 
659     for (size_t j = 0; j < i; j++) {
660       OperandLocation& loc2 = operandLocations_[j];
661       if (!loc1.aliasesReg(loc2)) {
662         continue;
663       }
664 
665       // loc1 and loc2 alias so we spill one of them. If one is a
666       // ValueReg and the other is a PayloadReg, we have to spill the
667       // PayloadReg: spilling the ValueReg instead would leave its type
668       // register unallocated on 32-bit platforms.
669       if (loc1.kind() == OperandLocation::ValueReg) {
670         spillOperandToStack(masm, &loc2);
671       } else {
672         MOZ_ASSERT(loc1.kind() == OperandLocation::PayloadReg);
673         spillOperandToStack(masm, &loc1);
674         break;  // Spilled loc1, so nothing else will alias it.
675       }
676     }
677   }
678 
679 #ifdef DEBUG
680   assertValidState();
681 #endif
682 }
683 
inputRegisterSet() const684 GeneralRegisterSet CacheRegisterAllocator::inputRegisterSet() const {
685   MOZ_ASSERT(origInputLocations_.length() == writer_.numInputOperands());
686 
687   AllocatableGeneralRegisterSet result;
688   for (size_t i = 0; i < writer_.numInputOperands(); i++) {
689     const OperandLocation& loc = operandLocations_[i];
690     MOZ_ASSERT(loc == origInputLocations_[i]);
691 
692     switch (loc.kind()) {
693       case OperandLocation::PayloadReg:
694         result.addUnchecked(loc.payloadReg());
695         continue;
696       case OperandLocation::ValueReg:
697         result.addUnchecked(loc.valueReg());
698         continue;
699       case OperandLocation::PayloadStack:
700       case OperandLocation::ValueStack:
701       case OperandLocation::BaselineFrame:
702       case OperandLocation::Constant:
703       case OperandLocation::DoubleReg:
704         continue;
705       case OperandLocation::Uninitialized:
706         break;
707     }
708     MOZ_CRASH("Invalid kind");
709   }
710 
711   return result.set();
712 }
713 
knownType(ValOperandId val) const714 JSValueType CacheRegisterAllocator::knownType(ValOperandId val) const {
715   const OperandLocation& loc = operandLocations_[val.id()];
716 
717   switch (loc.kind()) {
718     case OperandLocation::ValueReg:
719     case OperandLocation::ValueStack:
720     case OperandLocation::BaselineFrame:
721       return JSVAL_TYPE_UNKNOWN;
722 
723     case OperandLocation::PayloadStack:
724     case OperandLocation::PayloadReg:
725       return loc.payloadType();
726 
727     case OperandLocation::Constant:
728       return loc.constant().isDouble() ? JSVAL_TYPE_DOUBLE
729                                        : loc.constant().extractNonDoubleType();
730 
731     case OperandLocation::DoubleReg:
732       return JSVAL_TYPE_DOUBLE;
733 
734     case OperandLocation::Uninitialized:
735       break;
736   }
737 
738   MOZ_CRASH("Invalid kind");
739 }
740 
initInputLocation(size_t i,const TypedOrValueRegister & reg)741 void CacheRegisterAllocator::initInputLocation(
742     size_t i, const TypedOrValueRegister& reg) {
743   if (reg.hasValue()) {
744     initInputLocation(i, reg.valueReg());
745   } else if (reg.typedReg().isFloat()) {
746     MOZ_ASSERT(reg.type() == MIRType::Double);
747     initInputLocation(i, reg.typedReg().fpu());
748   } else {
749     initInputLocation(i, reg.typedReg().gpr(),
750                       ValueTypeFromMIRType(reg.type()));
751   }
752 }
753 
initInputLocation(size_t i,const ConstantOrRegister & value)754 void CacheRegisterAllocator::initInputLocation(
755     size_t i, const ConstantOrRegister& value) {
756   if (value.constant()) {
757     initInputLocation(i, value.value());
758   } else {
759     initInputLocation(i, value.reg());
760   }
761 }
762 
spillOperandToStack(MacroAssembler & masm,OperandLocation * loc)763 void CacheRegisterAllocator::spillOperandToStack(MacroAssembler& masm,
764                                                  OperandLocation* loc) {
765   MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
766 
767   if (loc->kind() == OperandLocation::ValueReg) {
768     if (!freeValueSlots_.empty()) {
769       uint32_t stackPos = freeValueSlots_.popCopy();
770       MOZ_ASSERT(stackPos <= stackPushed_);
771       masm.storeValue(loc->valueReg(),
772                       Address(masm.getStackPointer(), stackPushed_ - stackPos));
773       loc->setValueStack(stackPos);
774       return;
775     }
776     stackPushed_ += sizeof(js::Value);
777     masm.pushValue(loc->valueReg());
778     loc->setValueStack(stackPushed_);
779     return;
780   }
781 
782   MOZ_ASSERT(loc->kind() == OperandLocation::PayloadReg);
783 
784   if (!freePayloadSlots_.empty()) {
785     uint32_t stackPos = freePayloadSlots_.popCopy();
786     MOZ_ASSERT(stackPos <= stackPushed_);
787     masm.storePtr(loc->payloadReg(),
788                   Address(masm.getStackPointer(), stackPushed_ - stackPos));
789     loc->setPayloadStack(stackPos, loc->payloadType());
790     return;
791   }
792   stackPushed_ += sizeof(uintptr_t);
793   masm.push(loc->payloadReg());
794   loc->setPayloadStack(stackPushed_, loc->payloadType());
795 }
796 
spillOperandToStackOrRegister(MacroAssembler & masm,OperandLocation * loc)797 void CacheRegisterAllocator::spillOperandToStackOrRegister(
798     MacroAssembler& masm, OperandLocation* loc) {
799   MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
800 
801   // If enough registers are available, use them.
802   if (loc->kind() == OperandLocation::ValueReg) {
803     static const size_t BoxPieces = sizeof(Value) / sizeof(uintptr_t);
804     if (availableRegs_.set().size() >= BoxPieces) {
805       ValueOperand reg = availableRegs_.takeAnyValue();
806       masm.moveValue(loc->valueReg(), reg);
807       loc->setValueReg(reg);
808       return;
809     }
810   } else {
811     MOZ_ASSERT(loc->kind() == OperandLocation::PayloadReg);
812     if (!availableRegs_.empty()) {
813       Register reg = availableRegs_.takeAny();
814       masm.movePtr(loc->payloadReg(), reg);
815       loc->setPayloadReg(reg, loc->payloadType());
816       return;
817     }
818   }
819 
820   // Not enough registers available, spill to the stack.
821   spillOperandToStack(masm, loc);
822 }
823 
popPayload(MacroAssembler & masm,OperandLocation * loc,Register dest)824 void CacheRegisterAllocator::popPayload(MacroAssembler& masm,
825                                         OperandLocation* loc, Register dest) {
826   MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
827   MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
828 
829   // The payload is on the stack. If it's on top of the stack we can just
830   // pop it, else we emit a load.
831   if (loc->payloadStack() == stackPushed_) {
832     masm.pop(dest);
833     stackPushed_ -= sizeof(uintptr_t);
834   } else {
835     MOZ_ASSERT(loc->payloadStack() < stackPushed_);
836     masm.loadPtr(payloadAddress(masm, loc), dest);
837     masm.propagateOOM(freePayloadSlots_.append(loc->payloadStack()));
838   }
839 
840   loc->setPayloadReg(dest, loc->payloadType());
841 }
842 
valueAddress(MacroAssembler & masm,const OperandLocation * loc) const843 Address CacheRegisterAllocator::valueAddress(MacroAssembler& masm,
844                                              const OperandLocation* loc) const {
845   MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
846   return Address(masm.getStackPointer(), stackPushed_ - loc->valueStack());
847 }
848 
payloadAddress(MacroAssembler & masm,const OperandLocation * loc) const849 Address CacheRegisterAllocator::payloadAddress(
850     MacroAssembler& masm, const OperandLocation* loc) const {
851   MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
852   return Address(masm.getStackPointer(), stackPushed_ - loc->payloadStack());
853 }
854 
popValue(MacroAssembler & masm,OperandLocation * loc,ValueOperand dest)855 void CacheRegisterAllocator::popValue(MacroAssembler& masm,
856                                       OperandLocation* loc, ValueOperand dest) {
857   MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
858   MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
859 
860   // The Value is on the stack. If it's on top of the stack we can just
861   // pop it, else we emit a load.
862   if (loc->valueStack() == stackPushed_) {
863     masm.popValue(dest);
864     stackPushed_ -= sizeof(js::Value);
865   } else {
866     MOZ_ASSERT(loc->valueStack() < stackPushed_);
867     masm.loadValue(
868         Address(masm.getStackPointer(), stackPushed_ - loc->valueStack()),
869         dest);
870     masm.propagateOOM(freeValueSlots_.append(loc->valueStack()));
871   }
872 
873   loc->setValueReg(dest);
874 }
875 
876 #ifdef DEBUG
assertValidState() const877 void CacheRegisterAllocator::assertValidState() const {
878   // Assert different operands don't have aliasing storage. We depend on this
879   // when spilling registers, for instance.
880 
881   if (!JitOptions.fullDebugChecks) {
882     return;
883   }
884 
885   for (size_t i = 0; i < operandLocations_.length(); i++) {
886     const auto& loc1 = operandLocations_[i];
887     if (loc1.isUninitialized()) {
888       continue;
889     }
890 
891     for (size_t j = 0; j < i; j++) {
892       const auto& loc2 = operandLocations_[j];
893       if (loc2.isUninitialized()) {
894         continue;
895       }
896       MOZ_ASSERT(!loc1.aliasesReg(loc2));
897     }
898   }
899 }
900 #endif
901 
aliasesReg(const OperandLocation & other) const902 bool OperandLocation::aliasesReg(const OperandLocation& other) const {
903   MOZ_ASSERT(&other != this);
904 
905   switch (other.kind_) {
906     case PayloadReg:
907       return aliasesReg(other.payloadReg());
908     case ValueReg:
909       return aliasesReg(other.valueReg());
910     case PayloadStack:
911     case ValueStack:
912     case BaselineFrame:
913     case Constant:
914     case DoubleReg:
915       return false;
916     case Uninitialized:
917       break;
918   }
919 
920   MOZ_CRASH("Invalid kind");
921 }
922 
restoreInputState(MacroAssembler & masm,bool shouldDiscardStack)923 void CacheRegisterAllocator::restoreInputState(MacroAssembler& masm,
924                                                bool shouldDiscardStack) {
925   size_t numInputOperands = origInputLocations_.length();
926   MOZ_ASSERT(writer_.numInputOperands() == numInputOperands);
927 
928   for (size_t j = 0; j < numInputOperands; j++) {
929     const OperandLocation& dest = origInputLocations_[j];
930     OperandLocation& cur = operandLocations_[j];
931     if (dest == cur) {
932       continue;
933     }
934 
935     auto autoAssign = mozilla::MakeScopeExit([&] { cur = dest; });
936 
937     // We have a cycle if a destination register will be used later
938     // as source register. If that happens, just push the current value
939     // on the stack and later get it from there.
940     for (size_t k = j + 1; k < numInputOperands; k++) {
941       OperandLocation& laterSource = operandLocations_[k];
942       if (dest.aliasesReg(laterSource)) {
943         spillOperandToStack(masm, &laterSource);
944       }
945     }
946 
947     if (dest.kind() == OperandLocation::ValueReg) {
948       // We have to restore a Value register.
949       switch (cur.kind()) {
950         case OperandLocation::ValueReg:
951           masm.moveValue(cur.valueReg(), dest.valueReg());
952           continue;
953         case OperandLocation::PayloadReg:
954           masm.tagValue(cur.payloadType(), cur.payloadReg(), dest.valueReg());
955           continue;
956         case OperandLocation::PayloadStack: {
957           Register scratch = dest.valueReg().scratchReg();
958           popPayload(masm, &cur, scratch);
959           masm.tagValue(cur.payloadType(), scratch, dest.valueReg());
960           continue;
961         }
962         case OperandLocation::ValueStack:
963           popValue(masm, &cur, dest.valueReg());
964           continue;
965         case OperandLocation::DoubleReg:
966           masm.boxDouble(cur.doubleReg(), dest.valueReg(), cur.doubleReg());
967           continue;
968         case OperandLocation::Constant:
969         case OperandLocation::BaselineFrame:
970         case OperandLocation::Uninitialized:
971           break;
972       }
973     } else if (dest.kind() == OperandLocation::PayloadReg) {
974       // We have to restore a payload register.
975       switch (cur.kind()) {
976         case OperandLocation::ValueReg:
977           MOZ_ASSERT(dest.payloadType() != JSVAL_TYPE_DOUBLE);
978           masm.unboxNonDouble(cur.valueReg(), dest.payloadReg(),
979                               dest.payloadType());
980           continue;
981         case OperandLocation::PayloadReg:
982           MOZ_ASSERT(cur.payloadType() == dest.payloadType());
983           masm.mov(cur.payloadReg(), dest.payloadReg());
984           continue;
985         case OperandLocation::PayloadStack: {
986           MOZ_ASSERT(cur.payloadType() == dest.payloadType());
987           popPayload(masm, &cur, dest.payloadReg());
988           continue;
989         }
990         case OperandLocation::ValueStack:
991           MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
992           MOZ_ASSERT(cur.valueStack() <= stackPushed_);
993           MOZ_ASSERT(dest.payloadType() != JSVAL_TYPE_DOUBLE);
994           masm.unboxNonDouble(
995               Address(masm.getStackPointer(), stackPushed_ - cur.valueStack()),
996               dest.payloadReg(), dest.payloadType());
997           continue;
998         case OperandLocation::Constant:
999         case OperandLocation::BaselineFrame:
1000         case OperandLocation::DoubleReg:
1001         case OperandLocation::Uninitialized:
1002           break;
1003       }
1004     } else if (dest.kind() == OperandLocation::Constant ||
1005                dest.kind() == OperandLocation::BaselineFrame ||
1006                dest.kind() == OperandLocation::DoubleReg) {
1007       // Nothing to do.
1008       continue;
1009     }
1010 
1011     MOZ_CRASH("Invalid kind");
1012   }
1013 
1014   for (const SpilledRegister& spill : spilledRegs_) {
1015     MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
1016 
1017     if (spill.stackPushed == stackPushed_) {
1018       masm.pop(spill.reg);
1019       stackPushed_ -= sizeof(uintptr_t);
1020     } else {
1021       MOZ_ASSERT(spill.stackPushed < stackPushed_);
1022       masm.loadPtr(
1023           Address(masm.getStackPointer(), stackPushed_ - spill.stackPushed),
1024           spill.reg);
1025     }
1026   }
1027 
1028   if (shouldDiscardStack) {
1029     discardStack(masm);
1030   }
1031 }
1032 
stubDataSize() const1033 size_t CacheIRStubInfo::stubDataSize() const {
1034   size_t field = 0;
1035   size_t size = 0;
1036   while (true) {
1037     StubField::Type type = fieldType(field++);
1038     if (type == StubField::Type::Limit) {
1039       return size;
1040     }
1041     size += StubField::sizeInBytes(type);
1042   }
1043 }
1044 
1045 template <typename T>
AsGCPtr(uintptr_t * ptr)1046 static GCPtr<T>* AsGCPtr(uintptr_t* ptr) {
1047   return reinterpret_cast<GCPtr<T>*>(ptr);
1048 }
1049 
getStubRawWord(const uint8_t * stubData,uint32_t offset) const1050 uintptr_t CacheIRStubInfo::getStubRawWord(const uint8_t* stubData,
1051                                           uint32_t offset) const {
1052   MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
1053   return *reinterpret_cast<const uintptr_t*>(stubData + offset);
1054 }
1055 
getStubRawWord(ICCacheIRStub * stub,uint32_t offset) const1056 uintptr_t CacheIRStubInfo::getStubRawWord(ICCacheIRStub* stub,
1057                                           uint32_t offset) const {
1058   uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
1059   return getStubRawWord(stubData, offset);
1060 }
1061 
getStubRawInt64(const uint8_t * stubData,uint32_t offset) const1062 int64_t CacheIRStubInfo::getStubRawInt64(const uint8_t* stubData,
1063                                          uint32_t offset) const {
1064   MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(int64_t) == 0);
1065   return *reinterpret_cast<const int64_t*>(stubData + offset);
1066 }
1067 
getStubRawInt64(ICCacheIRStub * stub,uint32_t offset) const1068 int64_t CacheIRStubInfo::getStubRawInt64(ICCacheIRStub* stub,
1069                                          uint32_t offset) const {
1070   uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
1071   return getStubRawInt64(stubData, offset);
1072 }
1073 
replaceStubRawWord(uint8_t * stubData,uint32_t offset,uintptr_t oldWord,uintptr_t newWord) const1074 void CacheIRStubInfo::replaceStubRawWord(uint8_t* stubData, uint32_t offset,
1075                                          uintptr_t oldWord,
1076                                          uintptr_t newWord) const {
1077   MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
1078   uintptr_t* addr = reinterpret_cast<uintptr_t*>(stubData + offset);
1079   MOZ_ASSERT(*addr == oldWord);
1080   *addr = newWord;
1081 }
1082 
1083 template <class Stub, class T>
getStubField(Stub * stub,uint32_t offset) const1084 GCPtr<T>& CacheIRStubInfo::getStubField(Stub* stub, uint32_t offset) const {
1085   uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
1086   MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
1087 
1088   return *AsGCPtr<T>((uintptr_t*)(stubData + offset));
1089 }
1090 
1091 template GCPtr<Shape*>& CacheIRStubInfo::getStubField<ICCacheIRStub>(
1092     ICCacheIRStub* stub, uint32_t offset) const;
1093 template GCPtr<GetterSetter*>& CacheIRStubInfo::getStubField<ICCacheIRStub>(
1094     ICCacheIRStub* stub, uint32_t offset) const;
1095 template GCPtr<JSObject*>& CacheIRStubInfo::getStubField<ICCacheIRStub>(
1096     ICCacheIRStub* stub, uint32_t offset) const;
1097 template GCPtr<JSString*>& CacheIRStubInfo::getStubField<ICCacheIRStub>(
1098     ICCacheIRStub* stub, uint32_t offset) const;
1099 template GCPtr<JSFunction*>& CacheIRStubInfo::getStubField<ICCacheIRStub>(
1100     ICCacheIRStub* stub, uint32_t offset) const;
1101 template GCPtr<JS::Symbol*>& CacheIRStubInfo::getStubField<ICCacheIRStub>(
1102     ICCacheIRStub* stub, uint32_t offset) const;
1103 template GCPtr<JS::Value>& CacheIRStubInfo::getStubField<ICCacheIRStub>(
1104     ICCacheIRStub* stub, uint32_t offset) const;
1105 template GCPtr<jsid>& CacheIRStubInfo::getStubField<ICCacheIRStub>(
1106     ICCacheIRStub* stub, uint32_t offset) const;
1107 template GCPtr<JSClass*>& CacheIRStubInfo::getStubField<ICCacheIRStub>(
1108     ICCacheIRStub* stub, uint32_t offset) const;
1109 template GCPtr<ArrayObject*>& CacheIRStubInfo::getStubField<ICCacheIRStub>(
1110     ICCacheIRStub* stub, uint32_t offset) const;
1111 
1112 template <class Stub, class T>
getPtrStubField(Stub * stub,uint32_t offset) const1113 T* CacheIRStubInfo::getPtrStubField(Stub* stub, uint32_t offset) const {
1114   uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
1115   MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
1116 
1117   return *reinterpret_cast<T**>(stubData + offset);
1118 }
1119 
1120 template gc::AllocSite* CacheIRStubInfo::getPtrStubField(ICCacheIRStub* stub,
1121                                                          uint32_t offset) const;
1122 
1123 template <typename T, typename V>
InitGCPtr(uintptr_t * ptr,V val)1124 static void InitGCPtr(uintptr_t* ptr, V val) {
1125   AsGCPtr<T>(ptr)->init(mozilla::BitwiseCast<T>(val));
1126 }
1127 
copyStubData(uint8_t * dest) const1128 void CacheIRWriter::copyStubData(uint8_t* dest) const {
1129   MOZ_ASSERT(!failed());
1130 
1131   uintptr_t* destWords = reinterpret_cast<uintptr_t*>(dest);
1132 
1133   for (const StubField& field : stubFields_) {
1134     MOZ_ASSERT((uintptr_t(destWords) % field.sizeInBytes()) == 0,
1135                "Unaligned stub field");
1136 
1137     switch (field.type()) {
1138       case StubField::Type::RawInt32:
1139       case StubField::Type::RawPointer:
1140       case StubField::Type::AllocSite:
1141         *destWords = field.asWord();
1142         break;
1143       case StubField::Type::Shape:
1144         InitGCPtr<Shape*>(destWords, field.asWord());
1145         break;
1146       case StubField::Type::GetterSetter:
1147         InitGCPtr<GetterSetter*>(destWords, field.asWord());
1148         break;
1149       case StubField::Type::JSObject:
1150         InitGCPtr<JSObject*>(destWords, field.asWord());
1151         break;
1152       case StubField::Type::Symbol:
1153         InitGCPtr<JS::Symbol*>(destWords, field.asWord());
1154         break;
1155       case StubField::Type::String:
1156         InitGCPtr<JSString*>(destWords, field.asWord());
1157         break;
1158       case StubField::Type::BaseScript:
1159         InitGCPtr<BaseScript*>(destWords, field.asWord());
1160         break;
1161       case StubField::Type::Id:
1162         AsGCPtr<jsid>(destWords)->init(jsid::fromRawBits(field.asWord()));
1163         break;
1164       case StubField::Type::RawInt64:
1165         *reinterpret_cast<uint64_t*>(destWords) = field.asInt64();
1166         break;
1167       case StubField::Type::Value:
1168         AsGCPtr<Value>(destWords)->init(
1169             Value::fromRawBits(uint64_t(field.asInt64())));
1170         break;
1171       case StubField::Type::Limit:
1172         MOZ_CRASH("Invalid type");
1173     }
1174     destWords += StubField::sizeInBytes(field.type()) / sizeof(uintptr_t);
1175   }
1176 }
1177 
1178 template <typename T>
TraceCacheIRStub(JSTracer * trc,T * stub,const CacheIRStubInfo * stubInfo)1179 void jit::TraceCacheIRStub(JSTracer* trc, T* stub,
1180                            const CacheIRStubInfo* stubInfo) {
1181   uint32_t field = 0;
1182   size_t offset = 0;
1183   while (true) {
1184     StubField::Type fieldType = stubInfo->fieldType(field);
1185     switch (fieldType) {
1186       case StubField::Type::RawInt32:
1187       case StubField::Type::RawPointer:
1188       case StubField::Type::RawInt64:
1189         break;
1190       case StubField::Type::Shape: {
1191         // For CCW IC stubs, we can store same-zone but cross-compartment
1192         // shapes. Use TraceSameZoneCrossCompartmentEdge to not assert in the
1193         // GC. Note: CacheIRWriter::writeShapeField asserts we never store
1194         // cross-zone shapes.
1195         GCPtrShape& shapeField =
1196             stubInfo->getStubField<T, Shape*>(stub, offset);
1197         TraceSameZoneCrossCompartmentEdge(trc, &shapeField, "cacheir-shape");
1198         break;
1199       }
1200       case StubField::Type::GetterSetter:
1201         TraceEdge(trc, &stubInfo->getStubField<T, GetterSetter*>(stub, offset),
1202                   "cacheir-getter-setter");
1203         break;
1204       case StubField::Type::JSObject:
1205         TraceEdge(trc, &stubInfo->getStubField<T, JSObject*>(stub, offset),
1206                   "cacheir-object");
1207         break;
1208       case StubField::Type::Symbol:
1209         TraceEdge(trc, &stubInfo->getStubField<T, JS::Symbol*>(stub, offset),
1210                   "cacheir-symbol");
1211         break;
1212       case StubField::Type::String:
1213         TraceEdge(trc, &stubInfo->getStubField<T, JSString*>(stub, offset),
1214                   "cacheir-string");
1215         break;
1216       case StubField::Type::BaseScript:
1217         TraceEdge(trc, &stubInfo->getStubField<T, BaseScript*>(stub, offset),
1218                   "cacheir-script");
1219         break;
1220       case StubField::Type::Id:
1221         TraceEdge(trc, &stubInfo->getStubField<T, jsid>(stub, offset),
1222                   "cacheir-id");
1223         break;
1224       case StubField::Type::Value:
1225         TraceEdge(trc, &stubInfo->getStubField<T, JS::Value>(stub, offset),
1226                   "cacheir-value");
1227         break;
1228       case StubField::Type::AllocSite: {
1229         gc::AllocSite* site =
1230             stubInfo->getPtrStubField<T, gc::AllocSite>(stub, offset);
1231         site->trace(trc);
1232         break;
1233       }
1234       case StubField::Type::Limit:
1235         return;  // Done.
1236     }
1237     field++;
1238     offset += StubField::sizeInBytes(fieldType);
1239   }
1240 }
1241 
1242 template void jit::TraceCacheIRStub(JSTracer* trc, ICCacheIRStub* stub,
1243                                     const CacheIRStubInfo* stubInfo);
1244 
1245 template void jit::TraceCacheIRStub(JSTracer* trc, IonICStub* stub,
1246                                     const CacheIRStubInfo* stubInfo);
1247 
stubDataEquals(const uint8_t * stubData) const1248 bool CacheIRWriter::stubDataEquals(const uint8_t* stubData) const {
1249   MOZ_ASSERT(!failed());
1250 
1251   const uintptr_t* stubDataWords = reinterpret_cast<const uintptr_t*>(stubData);
1252 
1253   for (const StubField& field : stubFields_) {
1254     if (field.sizeIsWord()) {
1255       if (field.asWord() != *stubDataWords) {
1256         return false;
1257       }
1258       stubDataWords++;
1259       continue;
1260     }
1261 
1262     if (field.asInt64() != *reinterpret_cast<const uint64_t*>(stubDataWords)) {
1263       return false;
1264     }
1265     stubDataWords += sizeof(uint64_t) / sizeof(uintptr_t);
1266   }
1267 
1268   return true;
1269 }
1270 
hash(const CacheIRStubKey::Lookup & l)1271 HashNumber CacheIRStubKey::hash(const CacheIRStubKey::Lookup& l) {
1272   HashNumber hash = mozilla::HashBytes(l.code, l.length);
1273   hash = mozilla::AddToHash(hash, uint32_t(l.kind));
1274   hash = mozilla::AddToHash(hash, uint32_t(l.engine));
1275   return hash;
1276 }
1277 
match(const CacheIRStubKey & entry,const CacheIRStubKey::Lookup & l)1278 bool CacheIRStubKey::match(const CacheIRStubKey& entry,
1279                            const CacheIRStubKey::Lookup& l) {
1280   if (entry.stubInfo->kind() != l.kind) {
1281     return false;
1282   }
1283 
1284   if (entry.stubInfo->engine() != l.engine) {
1285     return false;
1286   }
1287 
1288   if (entry.stubInfo->codeLength() != l.length) {
1289     return false;
1290   }
1291 
1292   if (!mozilla::ArrayEqual(entry.stubInfo->code(), l.code, l.length)) {
1293     return false;
1294   }
1295 
1296   return true;
1297 }
1298 
CacheIRReader(const CacheIRStubInfo * stubInfo)1299 CacheIRReader::CacheIRReader(const CacheIRStubInfo* stubInfo)
1300     : CacheIRReader(stubInfo->code(),
1301                     stubInfo->code() + stubInfo->codeLength()) {}
1302 
New(CacheKind kind,ICStubEngine engine,bool makesGCCalls,uint32_t stubDataOffset,const CacheIRWriter & writer)1303 CacheIRStubInfo* CacheIRStubInfo::New(CacheKind kind, ICStubEngine engine,
1304                                       bool makesGCCalls,
1305                                       uint32_t stubDataOffset,
1306                                       const CacheIRWriter& writer) {
1307   size_t numStubFields = writer.numStubFields();
1308   size_t bytesNeeded =
1309       sizeof(CacheIRStubInfo) + writer.codeLength() +
1310       (numStubFields + 1);  // +1 for the GCType::Limit terminator.
1311   uint8_t* p = js_pod_malloc<uint8_t>(bytesNeeded);
1312   if (!p) {
1313     return nullptr;
1314   }
1315 
1316   // Copy the CacheIR code.
1317   uint8_t* codeStart = p + sizeof(CacheIRStubInfo);
1318   mozilla::PodCopy(codeStart, writer.codeStart(), writer.codeLength());
1319 
1320   static_assert(sizeof(StubField::Type) == sizeof(uint8_t),
1321                 "StubField::Type must fit in uint8_t");
1322 
1323   // Copy the stub field types.
1324   uint8_t* fieldTypes = codeStart + writer.codeLength();
1325   for (size_t i = 0; i < numStubFields; i++) {
1326     fieldTypes[i] = uint8_t(writer.stubFieldType(i));
1327   }
1328   fieldTypes[numStubFields] = uint8_t(StubField::Type::Limit);
1329 
1330   return new (p) CacheIRStubInfo(kind, engine, makesGCCalls, stubDataOffset,
1331                                  codeStart, writer.codeLength(), fieldTypes);
1332 }
1333 
operator ==(const OperandLocation & other) const1334 bool OperandLocation::operator==(const OperandLocation& other) const {
1335   if (kind_ != other.kind_) {
1336     return false;
1337   }
1338 
1339   switch (kind()) {
1340     case Uninitialized:
1341       return true;
1342     case PayloadReg:
1343       return payloadReg() == other.payloadReg() &&
1344              payloadType() == other.payloadType();
1345     case ValueReg:
1346       return valueReg() == other.valueReg();
1347     case PayloadStack:
1348       return payloadStack() == other.payloadStack() &&
1349              payloadType() == other.payloadType();
1350     case ValueStack:
1351       return valueStack() == other.valueStack();
1352     case BaselineFrame:
1353       return baselineFrameSlot() == other.baselineFrameSlot();
1354     case Constant:
1355       return constant() == other.constant();
1356     case DoubleReg:
1357       return doubleReg() == other.doubleReg();
1358   }
1359 
1360   MOZ_CRASH("Invalid OperandLocation kind");
1361 }
1362 
AutoOutputRegister(CacheIRCompiler & compiler)1363 AutoOutputRegister::AutoOutputRegister(CacheIRCompiler& compiler)
1364     : output_(compiler.outputUnchecked_.ref()), alloc_(compiler.allocator) {
1365   if (output_.hasValue()) {
1366     alloc_.allocateFixedValueRegister(compiler.masm, output_.valueReg());
1367   } else if (!output_.typedReg().isFloat()) {
1368     alloc_.allocateFixedRegister(compiler.masm, output_.typedReg().gpr());
1369   }
1370 }
1371 
~AutoOutputRegister()1372 AutoOutputRegister::~AutoOutputRegister() {
1373   if (output_.hasValue()) {
1374     alloc_.releaseValueRegister(output_.valueReg());
1375   } else if (!output_.typedReg().isFloat()) {
1376     alloc_.releaseRegister(output_.typedReg().gpr());
1377   }
1378 }
1379 
canShareFailurePath(const FailurePath & other) const1380 bool FailurePath::canShareFailurePath(const FailurePath& other) const {
1381   if (stackPushed_ != other.stackPushed_) {
1382     return false;
1383   }
1384 
1385   if (spilledRegs_.length() != other.spilledRegs_.length()) {
1386     return false;
1387   }
1388 
1389   for (size_t i = 0; i < spilledRegs_.length(); i++) {
1390     if (spilledRegs_[i] != other.spilledRegs_[i]) {
1391       return false;
1392     }
1393   }
1394 
1395   MOZ_ASSERT(inputs_.length() == other.inputs_.length());
1396 
1397   for (size_t i = 0; i < inputs_.length(); i++) {
1398     if (inputs_[i] != other.inputs_[i]) {
1399       return false;
1400     }
1401   }
1402   return true;
1403 }
1404 
addFailurePath(FailurePath ** failure)1405 bool CacheIRCompiler::addFailurePath(FailurePath** failure) {
1406 #ifdef DEBUG
1407   allocator.setAddedFailurePath();
1408 #endif
1409   MOZ_ASSERT(!allocator.hasAutoScratchFloatRegisterSpill());
1410 
1411   FailurePath newFailure;
1412   for (size_t i = 0; i < writer_.numInputOperands(); i++) {
1413     if (!newFailure.appendInput(allocator.operandLocation(i))) {
1414       return false;
1415     }
1416   }
1417   if (!newFailure.setSpilledRegs(allocator.spilledRegs())) {
1418     return false;
1419   }
1420   newFailure.setStackPushed(allocator.stackPushed());
1421 
1422   // Reuse the previous failure path if the current one is the same, to
1423   // avoid emitting duplicate code.
1424   if (failurePaths.length() > 0 &&
1425       failurePaths.back().canShareFailurePath(newFailure)) {
1426     *failure = &failurePaths.back();
1427     return true;
1428   }
1429 
1430   if (!failurePaths.append(std::move(newFailure))) {
1431     return false;
1432   }
1433 
1434   *failure = &failurePaths.back();
1435   return true;
1436 }
1437 
emitFailurePath(size_t index)1438 bool CacheIRCompiler::emitFailurePath(size_t index) {
1439   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1440   FailurePath& failure = failurePaths[index];
1441 
1442   allocator.setStackPushed(failure.stackPushed());
1443 
1444   for (size_t i = 0; i < writer_.numInputOperands(); i++) {
1445     allocator.setOperandLocation(i, failure.input(i));
1446   }
1447 
1448   if (!allocator.setSpilledRegs(failure.spilledRegs())) {
1449     return false;
1450   }
1451 
1452   masm.bind(failure.label());
1453   allocator.restoreInputState(masm);
1454   return true;
1455 }
1456 
emitGuardIsNumber(ValOperandId inputId)1457 bool CacheIRCompiler::emitGuardIsNumber(ValOperandId inputId) {
1458   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1459   JSValueType knownType = allocator.knownType(inputId);
1460 
1461   // Doubles and ints are numbers!
1462   if (knownType == JSVAL_TYPE_DOUBLE || knownType == JSVAL_TYPE_INT32) {
1463     return true;
1464   }
1465 
1466   ValueOperand input = allocator.useValueRegister(masm, inputId);
1467   FailurePath* failure;
1468   if (!addFailurePath(&failure)) {
1469     return false;
1470   }
1471 
1472   masm.branchTestNumber(Assembler::NotEqual, input, failure->label());
1473   return true;
1474 }
1475 
emitGuardToObject(ValOperandId inputId)1476 bool CacheIRCompiler::emitGuardToObject(ValOperandId inputId) {
1477   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1478   if (allocator.knownType(inputId) == JSVAL_TYPE_OBJECT) {
1479     return true;
1480   }
1481 
1482   ValueOperand input = allocator.useValueRegister(masm, inputId);
1483   FailurePath* failure;
1484   if (!addFailurePath(&failure)) {
1485     return false;
1486   }
1487   masm.branchTestObject(Assembler::NotEqual, input, failure->label());
1488   return true;
1489 }
1490 
emitGuardIsNullOrUndefined(ValOperandId inputId)1491 bool CacheIRCompiler::emitGuardIsNullOrUndefined(ValOperandId inputId) {
1492   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1493   JSValueType knownType = allocator.knownType(inputId);
1494   if (knownType == JSVAL_TYPE_UNDEFINED || knownType == JSVAL_TYPE_NULL) {
1495     return true;
1496   }
1497 
1498   ValueOperand input = allocator.useValueRegister(masm, inputId);
1499   FailurePath* failure;
1500   if (!addFailurePath(&failure)) {
1501     return false;
1502   }
1503 
1504   Label success;
1505   masm.branchTestNull(Assembler::Equal, input, &success);
1506   masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
1507 
1508   masm.bind(&success);
1509   return true;
1510 }
1511 
emitGuardIsNull(ValOperandId inputId)1512 bool CacheIRCompiler::emitGuardIsNull(ValOperandId inputId) {
1513   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1514   JSValueType knownType = allocator.knownType(inputId);
1515   if (knownType == JSVAL_TYPE_NULL) {
1516     return true;
1517   }
1518 
1519   ValueOperand input = allocator.useValueRegister(masm, inputId);
1520   FailurePath* failure;
1521   if (!addFailurePath(&failure)) {
1522     return false;
1523   }
1524 
1525   Label success;
1526   masm.branchTestNull(Assembler::NotEqual, input, failure->label());
1527   return true;
1528 }
1529 
emitGuardIsUndefined(ValOperandId inputId)1530 bool CacheIRCompiler::emitGuardIsUndefined(ValOperandId inputId) {
1531   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1532   JSValueType knownType = allocator.knownType(inputId);
1533   if (knownType == JSVAL_TYPE_UNDEFINED) {
1534     return true;
1535   }
1536 
1537   ValueOperand input = allocator.useValueRegister(masm, inputId);
1538   FailurePath* failure;
1539   if (!addFailurePath(&failure)) {
1540     return false;
1541   }
1542 
1543   masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
1544   return true;
1545 }
1546 
emitGuardBooleanToInt32(ValOperandId inputId,Int32OperandId resultId)1547 bool CacheIRCompiler::emitGuardBooleanToInt32(ValOperandId inputId,
1548                                               Int32OperandId resultId) {
1549   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1550   Register output = allocator.defineRegister(masm, resultId);
1551 
1552   if (allocator.knownType(inputId) == JSVAL_TYPE_BOOLEAN) {
1553     Register input =
1554         allocator.useRegister(masm, BooleanOperandId(inputId.id()));
1555     masm.move32(input, output);
1556     return true;
1557   }
1558   ValueOperand input = allocator.useValueRegister(masm, inputId);
1559 
1560   FailurePath* failure;
1561   if (!addFailurePath(&failure)) {
1562     return false;
1563   }
1564 
1565   masm.fallibleUnboxBoolean(input, output, failure->label());
1566   return true;
1567 }
1568 
emitGuardToString(ValOperandId inputId)1569 bool CacheIRCompiler::emitGuardToString(ValOperandId inputId) {
1570   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1571   if (allocator.knownType(inputId) == JSVAL_TYPE_STRING) {
1572     return true;
1573   }
1574 
1575   ValueOperand input = allocator.useValueRegister(masm, inputId);
1576   FailurePath* failure;
1577   if (!addFailurePath(&failure)) {
1578     return false;
1579   }
1580   masm.branchTestString(Assembler::NotEqual, input, failure->label());
1581   return true;
1582 }
1583 
emitGuardToSymbol(ValOperandId inputId)1584 bool CacheIRCompiler::emitGuardToSymbol(ValOperandId inputId) {
1585   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1586   if (allocator.knownType(inputId) == JSVAL_TYPE_SYMBOL) {
1587     return true;
1588   }
1589 
1590   ValueOperand input = allocator.useValueRegister(masm, inputId);
1591   FailurePath* failure;
1592   if (!addFailurePath(&failure)) {
1593     return false;
1594   }
1595   masm.branchTestSymbol(Assembler::NotEqual, input, failure->label());
1596   return true;
1597 }
1598 
emitGuardToBigInt(ValOperandId inputId)1599 bool CacheIRCompiler::emitGuardToBigInt(ValOperandId inputId) {
1600   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1601   if (allocator.knownType(inputId) == JSVAL_TYPE_BIGINT) {
1602     return true;
1603   }
1604 
1605   ValueOperand input = allocator.useValueRegister(masm, inputId);
1606   FailurePath* failure;
1607   if (!addFailurePath(&failure)) {
1608     return false;
1609   }
1610   masm.branchTestBigInt(Assembler::NotEqual, input, failure->label());
1611   return true;
1612 }
1613 
emitGuardToBoolean(ValOperandId inputId)1614 bool CacheIRCompiler::emitGuardToBoolean(ValOperandId inputId) {
1615   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1616 
1617   if (allocator.knownType(inputId) == JSVAL_TYPE_BOOLEAN) {
1618     return true;
1619   }
1620 
1621   ValueOperand input = allocator.useValueRegister(masm, inputId);
1622   FailurePath* failure;
1623   if (!addFailurePath(&failure)) {
1624     return false;
1625   }
1626   masm.branchTestBoolean(Assembler::NotEqual, input, failure->label());
1627   return true;
1628 }
1629 
emitGuardToInt32(ValOperandId inputId)1630 bool CacheIRCompiler::emitGuardToInt32(ValOperandId inputId) {
1631   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1632 
1633   if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
1634     return true;
1635   }
1636 
1637   ValueOperand input = allocator.useValueRegister(masm, inputId);
1638 
1639   FailurePath* failure;
1640   if (!addFailurePath(&failure)) {
1641     return false;
1642   }
1643 
1644   masm.branchTestInt32(Assembler::NotEqual, input, failure->label());
1645   return true;
1646 }
1647 
1648 // Infallible |emitDouble| emitters can use this implementation to avoid
1649 // generating extra clean-up instructions to restore the scratch float register.
1650 // To select this function simply omit the |Label* fail| parameter for the
1651 // emitter lambda function.
1652 template <typename EmitDouble>
1653 static std::enable_if_t<mozilla::FunctionTypeTraits<EmitDouble>::arity == 1,
1654                         void>
EmitGuardDouble(CacheIRCompiler * compiler,MacroAssembler & masm,ValueOperand input,FailurePath * failure,EmitDouble emitDouble)1655 EmitGuardDouble(CacheIRCompiler* compiler, MacroAssembler& masm,
1656                 ValueOperand input, FailurePath* failure,
1657                 EmitDouble emitDouble) {
1658   AutoScratchFloatRegister floatReg(compiler);
1659 
1660   masm.unboxDouble(input, floatReg);
1661   emitDouble(floatReg.get());
1662 }
1663 
1664 template <typename EmitDouble>
1665 static std::enable_if_t<mozilla::FunctionTypeTraits<EmitDouble>::arity == 2,
1666                         void>
EmitGuardDouble(CacheIRCompiler * compiler,MacroAssembler & masm,ValueOperand input,FailurePath * failure,EmitDouble emitDouble)1667 EmitGuardDouble(CacheIRCompiler* compiler, MacroAssembler& masm,
1668                 ValueOperand input, FailurePath* failure,
1669                 EmitDouble emitDouble) {
1670   AutoScratchFloatRegister floatReg(compiler, failure);
1671 
1672   masm.unboxDouble(input, floatReg);
1673   emitDouble(floatReg.get(), floatReg.failure());
1674 }
1675 
1676 template <typename EmitInt32, typename EmitDouble>
EmitGuardInt32OrDouble(CacheIRCompiler * compiler,MacroAssembler & masm,ValueOperand input,Register output,FailurePath * failure,EmitInt32 emitInt32,EmitDouble emitDouble)1677 static void EmitGuardInt32OrDouble(CacheIRCompiler* compiler,
1678                                    MacroAssembler& masm, ValueOperand input,
1679                                    Register output, FailurePath* failure,
1680                                    EmitInt32 emitInt32, EmitDouble emitDouble) {
1681   Label done;
1682 
1683   {
1684     ScratchTagScope tag(masm, input);
1685     masm.splitTagForTest(input, tag);
1686 
1687     Label notInt32;
1688     masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
1689     {
1690       ScratchTagScopeRelease _(&tag);
1691 
1692       masm.unboxInt32(input, output);
1693       emitInt32();
1694 
1695       masm.jump(&done);
1696     }
1697     masm.bind(&notInt32);
1698 
1699     masm.branchTestDouble(Assembler::NotEqual, tag, failure->label());
1700     {
1701       ScratchTagScopeRelease _(&tag);
1702 
1703       EmitGuardDouble(compiler, masm, input, failure, emitDouble);
1704     }
1705   }
1706 
1707   masm.bind(&done);
1708 }
1709 
emitGuardToInt32Index(ValOperandId inputId,Int32OperandId resultId)1710 bool CacheIRCompiler::emitGuardToInt32Index(ValOperandId inputId,
1711                                             Int32OperandId resultId) {
1712   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1713   Register output = allocator.defineRegister(masm, resultId);
1714 
1715   if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
1716     Register input = allocator.useRegister(masm, Int32OperandId(inputId.id()));
1717     masm.move32(input, output);
1718     return true;
1719   }
1720 
1721   ValueOperand input = allocator.useValueRegister(masm, inputId);
1722 
1723   FailurePath* failure;
1724   if (!addFailurePath(&failure)) {
1725     return false;
1726   }
1727 
1728   EmitGuardInt32OrDouble(
1729       this, masm, input, output, failure,
1730       []() {
1731         // No-op if the value is already an int32.
1732       },
1733       [&](FloatRegister floatReg, Label* fail) {
1734         // ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
1735         masm.convertDoubleToInt32(floatReg, output, fail, false);
1736       });
1737 
1738   return true;
1739 }
1740 
emitInt32ToIntPtr(Int32OperandId inputId,IntPtrOperandId resultId)1741 bool CacheIRCompiler::emitInt32ToIntPtr(Int32OperandId inputId,
1742                                         IntPtrOperandId resultId) {
1743   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1744 
1745   Register input = allocator.useRegister(masm, inputId);
1746   Register output = allocator.defineRegister(masm, resultId);
1747 
1748   masm.move32SignExtendToPtr(input, output);
1749   return true;
1750 }
1751 
emitGuardNumberToIntPtrIndex(NumberOperandId inputId,bool supportOOB,IntPtrOperandId resultId)1752 bool CacheIRCompiler::emitGuardNumberToIntPtrIndex(NumberOperandId inputId,
1753                                                    bool supportOOB,
1754                                                    IntPtrOperandId resultId) {
1755   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1756 
1757   Register output = allocator.defineRegister(masm, resultId);
1758 
1759   FailurePath* failure = nullptr;
1760   if (!supportOOB) {
1761     if (!addFailurePath(&failure)) {
1762       return false;
1763     }
1764   }
1765 
1766   AutoScratchFloatRegister floatReg(this, failure);
1767   allocator.ensureDoubleRegister(masm, inputId, floatReg);
1768 
1769   // ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
1770   if (supportOOB) {
1771     Label done, fail;
1772     masm.convertDoubleToPtr(floatReg, output, &fail, false);
1773     masm.jump(&done);
1774 
1775     // Substitute the invalid index with an arbitrary out-of-bounds index.
1776     masm.bind(&fail);
1777     masm.movePtr(ImmWord(-1), output);
1778 
1779     masm.bind(&done);
1780   } else {
1781     masm.convertDoubleToPtr(floatReg, output, floatReg.failure(), false);
1782   }
1783 
1784   return true;
1785 }
1786 
emitGuardToInt32ModUint32(ValOperandId inputId,Int32OperandId resultId)1787 bool CacheIRCompiler::emitGuardToInt32ModUint32(ValOperandId inputId,
1788                                                 Int32OperandId resultId) {
1789   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1790   Register output = allocator.defineRegister(masm, resultId);
1791 
1792   if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
1793     ConstantOrRegister input = allocator.useConstantOrRegister(masm, inputId);
1794     if (input.constant()) {
1795       masm.move32(Imm32(input.value().toInt32()), output);
1796     } else {
1797       MOZ_ASSERT(input.reg().type() == MIRType::Int32);
1798       masm.move32(input.reg().typedReg().gpr(), output);
1799     }
1800     return true;
1801   }
1802 
1803   ValueOperand input = allocator.useValueRegister(masm, inputId);
1804 
1805   FailurePath* failure;
1806   if (!addFailurePath(&failure)) {
1807     return false;
1808   }
1809 
1810   EmitGuardInt32OrDouble(
1811       this, masm, input, output, failure,
1812       []() {
1813         // No-op if the value is already an int32.
1814       },
1815       [&](FloatRegister floatReg, Label* fail) {
1816         masm.branchTruncateDoubleMaybeModUint32(floatReg, output, fail);
1817       });
1818 
1819   return true;
1820 }
1821 
emitGuardToUint8Clamped(ValOperandId inputId,Int32OperandId resultId)1822 bool CacheIRCompiler::emitGuardToUint8Clamped(ValOperandId inputId,
1823                                               Int32OperandId resultId) {
1824   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1825   Register output = allocator.defineRegister(masm, resultId);
1826 
1827   if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
1828     ConstantOrRegister input = allocator.useConstantOrRegister(masm, inputId);
1829     if (input.constant()) {
1830       masm.move32(Imm32(ClampDoubleToUint8(input.value().toInt32())), output);
1831     } else {
1832       MOZ_ASSERT(input.reg().type() == MIRType::Int32);
1833       masm.move32(input.reg().typedReg().gpr(), output);
1834       masm.clampIntToUint8(output);
1835     }
1836     return true;
1837   }
1838 
1839   ValueOperand input = allocator.useValueRegister(masm, inputId);
1840 
1841   FailurePath* failure;
1842   if (!addFailurePath(&failure)) {
1843     return false;
1844   }
1845 
1846   EmitGuardInt32OrDouble(
1847       this, masm, input, output, failure,
1848       [&]() {
1849         // |output| holds the unboxed int32 value.
1850         masm.clampIntToUint8(output);
1851       },
1852       [&](FloatRegister floatReg) {
1853         masm.clampDoubleToUint8(floatReg, output);
1854       });
1855 
1856   return true;
1857 }
1858 
emitGuardNonDoubleType(ValOperandId inputId,ValueType type)1859 bool CacheIRCompiler::emitGuardNonDoubleType(ValOperandId inputId,
1860                                              ValueType type) {
1861   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1862 
1863   if (allocator.knownType(inputId) == JSValueType(type)) {
1864     return true;
1865   }
1866 
1867   ValueOperand input = allocator.useValueRegister(masm, inputId);
1868 
1869   FailurePath* failure;
1870   if (!addFailurePath(&failure)) {
1871     return false;
1872   }
1873 
1874   switch (type) {
1875     case ValueType::String:
1876       masm.branchTestString(Assembler::NotEqual, input, failure->label());
1877       break;
1878     case ValueType::Symbol:
1879       masm.branchTestSymbol(Assembler::NotEqual, input, failure->label());
1880       break;
1881     case ValueType::BigInt:
1882       masm.branchTestBigInt(Assembler::NotEqual, input, failure->label());
1883       break;
1884     case ValueType::Int32:
1885       masm.branchTestInt32(Assembler::NotEqual, input, failure->label());
1886       break;
1887     case ValueType::Boolean:
1888       masm.branchTestBoolean(Assembler::NotEqual, input, failure->label());
1889       break;
1890     case ValueType::Undefined:
1891       masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
1892       break;
1893     case ValueType::Null:
1894       masm.branchTestNull(Assembler::NotEqual, input, failure->label());
1895       break;
1896     case ValueType::Double:
1897     case ValueType::Magic:
1898     case ValueType::PrivateGCThing:
1899     case ValueType::Object:
1900       MOZ_CRASH("unexpected type");
1901   }
1902 
1903   return true;
1904 }
1905 
emitGuardClass(ObjOperandId objId,GuardClassKind kind)1906 bool CacheIRCompiler::emitGuardClass(ObjOperandId objId, GuardClassKind kind) {
1907   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1908   Register obj = allocator.useRegister(masm, objId);
1909   AutoScratchRegister scratch(allocator, masm);
1910 
1911   FailurePath* failure;
1912   if (!addFailurePath(&failure)) {
1913     return false;
1914   }
1915 
1916   const JSClass* clasp = nullptr;
1917   switch (kind) {
1918     case GuardClassKind::Array:
1919       clasp = &ArrayObject::class_;
1920       break;
1921     case GuardClassKind::ArrayBuffer:
1922       clasp = &ArrayBufferObject::class_;
1923       break;
1924     case GuardClassKind::SharedArrayBuffer:
1925       clasp = &SharedArrayBufferObject::class_;
1926       break;
1927     case GuardClassKind::DataView:
1928       clasp = &DataViewObject::class_;
1929       break;
1930     case GuardClassKind::MappedArguments:
1931       clasp = &MappedArgumentsObject::class_;
1932       break;
1933     case GuardClassKind::UnmappedArguments:
1934       clasp = &UnmappedArgumentsObject::class_;
1935       break;
1936     case GuardClassKind::WindowProxy:
1937       clasp = cx_->runtime()->maybeWindowProxyClass();
1938       break;
1939     case GuardClassKind::JSFunction:
1940       clasp = &JSFunction::class_;
1941       break;
1942   }
1943   MOZ_ASSERT(clasp);
1944 
1945   if (objectGuardNeedsSpectreMitigations(objId)) {
1946     masm.branchTestObjClass(Assembler::NotEqual, obj, clasp, scratch, obj,
1947                             failure->label());
1948   } else {
1949     masm.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual, obj, clasp,
1950                                                 scratch, failure->label());
1951   }
1952 
1953   return true;
1954 }
1955 
emitGuardNullProto(ObjOperandId objId)1956 bool CacheIRCompiler::emitGuardNullProto(ObjOperandId objId) {
1957   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1958   Register obj = allocator.useRegister(masm, objId);
1959   AutoScratchRegister scratch(allocator, masm);
1960 
1961   FailurePath* failure;
1962   if (!addFailurePath(&failure)) {
1963     return false;
1964   }
1965 
1966   masm.loadObjProto(obj, scratch);
1967   masm.branchTestPtr(Assembler::NonZero, scratch, scratch, failure->label());
1968   return true;
1969 }
1970 
emitGuardIsExtensible(ObjOperandId objId)1971 bool CacheIRCompiler::emitGuardIsExtensible(ObjOperandId objId) {
1972   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1973   Register obj = allocator.useRegister(masm, objId);
1974   AutoScratchRegister scratch(allocator, masm);
1975 
1976   FailurePath* failure;
1977   if (!addFailurePath(&failure)) {
1978     return false;
1979   }
1980 
1981   masm.branchIfObjectNotExtensible(obj, scratch, failure->label());
1982   return true;
1983 }
1984 
emitGuardDynamicSlotIsSpecificObject(ObjOperandId objId,ObjOperandId expectedId,uint32_t slotOffset)1985 bool CacheIRCompiler::emitGuardDynamicSlotIsSpecificObject(
1986     ObjOperandId objId, ObjOperandId expectedId, uint32_t slotOffset) {
1987   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
1988   Register obj = allocator.useRegister(masm, objId);
1989   Register expectedObject = allocator.useRegister(masm, expectedId);
1990 
1991   // Allocate registers before the failure path to make sure they're registered
1992   // by addFailurePath.
1993   AutoScratchRegister scratch1(allocator, masm);
1994   AutoScratchRegister scratch2(allocator, masm);
1995 
1996   FailurePath* failure;
1997   if (!addFailurePath(&failure)) {
1998     return false;
1999   }
2000 
2001   // Guard on the expected object.
2002   StubFieldOffset slot(slotOffset, StubField::Type::RawInt32);
2003   masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
2004   emitLoadStubField(slot, scratch2);
2005   BaseObjectSlotIndex expectedSlot(scratch1, scratch2);
2006   masm.fallibleUnboxObject(expectedSlot, scratch1, failure->label());
2007   masm.branchPtr(Assembler::NotEqual, expectedObject, scratch1,
2008                  failure->label());
2009 
2010   return true;
2011 }
2012 
emitGuardFixedSlotValue(ObjOperandId objId,uint32_t offsetOffset,uint32_t valOffset)2013 bool CacheIRCompiler::emitGuardFixedSlotValue(ObjOperandId objId,
2014                                               uint32_t offsetOffset,
2015                                               uint32_t valOffset) {
2016   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2017 
2018   Register obj = allocator.useRegister(masm, objId);
2019 
2020   AutoScratchRegister scratch(allocator, masm);
2021   AutoScratchValueRegister scratchVal(allocator, masm);
2022 
2023   FailurePath* failure;
2024   if (!addFailurePath(&failure)) {
2025     return false;
2026   }
2027 
2028   StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
2029   emitLoadStubField(offset, scratch);
2030 
2031   StubFieldOffset val(valOffset, StubField::Type::Value);
2032   emitLoadValueStubField(val, scratchVal);
2033 
2034   BaseIndex slotVal(obj, scratch, TimesOne);
2035   masm.branchTestValue(Assembler::NotEqual, slotVal, scratchVal,
2036                        failure->label());
2037   return true;
2038 }
2039 
emitGuardDynamicSlotValue(ObjOperandId objId,uint32_t offsetOffset,uint32_t valOffset)2040 bool CacheIRCompiler::emitGuardDynamicSlotValue(ObjOperandId objId,
2041                                                 uint32_t offsetOffset,
2042                                                 uint32_t valOffset) {
2043   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2044 
2045   Register obj = allocator.useRegister(masm, objId);
2046 
2047   AutoScratchRegister scratch1(allocator, masm);
2048   AutoScratchRegister scratch2(allocator, masm);
2049   AutoScratchValueRegister scratchVal(allocator, masm);
2050 
2051   FailurePath* failure;
2052   if (!addFailurePath(&failure)) {
2053     return false;
2054   }
2055 
2056   masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
2057 
2058   StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
2059   emitLoadStubField(offset, scratch2);
2060 
2061   StubFieldOffset val(valOffset, StubField::Type::Value);
2062   emitLoadValueStubField(val, scratchVal);
2063 
2064   BaseIndex slotVal(scratch1, scratch2, TimesOne);
2065   masm.branchTestValue(Assembler::NotEqual, slotVal, scratchVal,
2066                        failure->label());
2067   return true;
2068 }
2069 
emitGuardIsNativeObject(ObjOperandId objId)2070 bool CacheIRCompiler::emitGuardIsNativeObject(ObjOperandId objId) {
2071   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2072 
2073   Register obj = allocator.useRegister(masm, objId);
2074   AutoScratchRegister scratch(allocator, masm);
2075 
2076   FailurePath* failure;
2077   if (!addFailurePath(&failure)) {
2078     return false;
2079   }
2080 
2081   masm.branchIfNonNativeObj(obj, scratch, failure->label());
2082   return true;
2083 }
2084 
emitGuardIsProxy(ObjOperandId objId)2085 bool CacheIRCompiler::emitGuardIsProxy(ObjOperandId objId) {
2086   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2087 
2088   Register obj = allocator.useRegister(masm, objId);
2089   AutoScratchRegister scratch(allocator, masm);
2090 
2091   FailurePath* failure;
2092   if (!addFailurePath(&failure)) {
2093     return false;
2094   }
2095 
2096   masm.branchTestObjectIsProxy(false, obj, scratch, failure->label());
2097   return true;
2098 }
2099 
emitGuardIsNotProxy(ObjOperandId objId)2100 bool CacheIRCompiler::emitGuardIsNotProxy(ObjOperandId objId) {
2101   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2102 
2103   Register obj = allocator.useRegister(masm, objId);
2104   AutoScratchRegister scratch(allocator, masm);
2105 
2106   FailurePath* failure;
2107   if (!addFailurePath(&failure)) {
2108     return false;
2109   }
2110 
2111   masm.branchTestObjectIsProxy(true, obj, scratch, failure->label());
2112   return true;
2113 }
2114 
emitGuardIsNotArrayBufferMaybeShared(ObjOperandId objId)2115 bool CacheIRCompiler::emitGuardIsNotArrayBufferMaybeShared(ObjOperandId objId) {
2116   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2117 
2118   Register obj = allocator.useRegister(masm, objId);
2119   AutoScratchRegister scratch(allocator, masm);
2120 
2121   FailurePath* failure;
2122   if (!addFailurePath(&failure)) {
2123     return false;
2124   }
2125 
2126   masm.loadObjClassUnsafe(obj, scratch);
2127   masm.branchPtr(Assembler::Equal, scratch, ImmPtr(&ArrayBufferObject::class_),
2128                  failure->label());
2129   masm.branchPtr(Assembler::Equal, scratch,
2130                  ImmPtr(&SharedArrayBufferObject::class_), failure->label());
2131   return true;
2132 }
2133 
emitGuardIsTypedArray(ObjOperandId objId)2134 bool CacheIRCompiler::emitGuardIsTypedArray(ObjOperandId objId) {
2135   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2136 
2137   Register obj = allocator.useRegister(masm, objId);
2138   AutoScratchRegister scratch(allocator, masm);
2139 
2140   FailurePath* failure;
2141   if (!addFailurePath(&failure)) {
2142     return false;
2143   }
2144 
2145   masm.loadObjClassUnsafe(obj, scratch);
2146   masm.branchIfClassIsNotTypedArray(scratch, failure->label());
2147   return true;
2148 }
2149 
emitGuardIsNotDOMProxy(ObjOperandId objId)2150 bool CacheIRCompiler::emitGuardIsNotDOMProxy(ObjOperandId objId) {
2151   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2152   Register obj = allocator.useRegister(masm, objId);
2153   AutoScratchRegister scratch(allocator, masm);
2154 
2155   FailurePath* failure;
2156   if (!addFailurePath(&failure)) {
2157     return false;
2158   }
2159 
2160   masm.branchTestProxyHandlerFamily(Assembler::Equal, obj, scratch,
2161                                     GetDOMProxyHandlerFamily(),
2162                                     failure->label());
2163   return true;
2164 }
2165 
emitGuardNoDenseElements(ObjOperandId objId)2166 bool CacheIRCompiler::emitGuardNoDenseElements(ObjOperandId objId) {
2167   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2168   Register obj = allocator.useRegister(masm, objId);
2169   AutoScratchRegister scratch(allocator, masm);
2170 
2171   FailurePath* failure;
2172   if (!addFailurePath(&failure)) {
2173     return false;
2174   }
2175 
2176   // Load obj->elements.
2177   masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
2178 
2179   // Make sure there are no dense elements.
2180   Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
2181   masm.branch32(Assembler::NotEqual, initLength, Imm32(0), failure->label());
2182   return true;
2183 }
2184 
emitGuardStringToInt32(StringOperandId strId,Int32OperandId resultId)2185 bool CacheIRCompiler::emitGuardStringToInt32(StringOperandId strId,
2186                                              Int32OperandId resultId) {
2187   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2188   Register str = allocator.useRegister(masm, strId);
2189   Register output = allocator.defineRegister(masm, resultId);
2190   AutoScratchRegister scratch(allocator, masm);
2191 
2192   FailurePath* failure;
2193   if (!addFailurePath(&failure)) {
2194     return false;
2195   }
2196 
2197   LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
2198                                liveVolatileFloatRegs());
2199   masm.guardStringToInt32(str, output, scratch, volatileRegs, failure->label());
2200   return true;
2201 }
2202 
emitGuardStringToNumber(StringOperandId strId,NumberOperandId resultId)2203 bool CacheIRCompiler::emitGuardStringToNumber(StringOperandId strId,
2204                                               NumberOperandId resultId) {
2205   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2206   Register str = allocator.useRegister(masm, strId);
2207   ValueOperand output = allocator.defineValueRegister(masm, resultId);
2208   AutoScratchRegister scratch(allocator, masm);
2209 
2210   FailurePath* failure;
2211   if (!addFailurePath(&failure)) {
2212     return false;
2213   }
2214 
2215   Label vmCall, done;
2216   // Use indexed value as fast path if possible.
2217   masm.loadStringIndexValue(str, scratch, &vmCall);
2218   masm.tagValue(JSVAL_TYPE_INT32, scratch, output);
2219   masm.jump(&done);
2220   {
2221     masm.bind(&vmCall);
2222 
2223     // Reserve stack for holding the result value of the call.
2224     masm.reserveStack(sizeof(double));
2225     masm.moveStackPtrTo(output.payloadOrValueReg());
2226 
2227     // We cannot use callVM, as callVM expects to be able to clobber all
2228     // operands, however, since this op is not the last in the generated IC, we
2229     // want to be able to reference other live values.
2230     LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
2231                                  liveVolatileFloatRegs());
2232     masm.PushRegsInMask(volatileRegs);
2233 
2234     using Fn = bool (*)(JSContext * cx, JSString * str, double* result);
2235     masm.setupUnalignedABICall(scratch);
2236     masm.loadJSContext(scratch);
2237     masm.passABIArg(scratch);
2238     masm.passABIArg(str);
2239     masm.passABIArg(output.payloadOrValueReg());
2240     masm.callWithABI<Fn, js::StringToNumberPure>();
2241     masm.mov(ReturnReg, scratch);
2242 
2243     LiveRegisterSet ignore;
2244     ignore.add(scratch);
2245     masm.PopRegsInMaskIgnore(volatileRegs, ignore);
2246 
2247     Label ok;
2248     masm.branchIfTrueBool(scratch, &ok);
2249     {
2250       // OOM path, recovered by StringToNumberPure.
2251       //
2252       // Use addToStackPtr instead of freeStack as freeStack tracks stack height
2253       // flow-insensitively, and using it twice would confuse the stack height
2254       // tracking.
2255       masm.addToStackPtr(Imm32(sizeof(double)));
2256       masm.jump(failure->label());
2257     }
2258     masm.bind(&ok);
2259 
2260     {
2261       ScratchDoubleScope fpscratch(masm);
2262       masm.loadDouble(Address(output.payloadOrValueReg(), 0), fpscratch);
2263       masm.boxDouble(fpscratch, output, fpscratch);
2264     }
2265     masm.freeStack(sizeof(double));
2266   }
2267   masm.bind(&done);
2268   return true;
2269 }
2270 
emitBooleanToNumber(BooleanOperandId booleanId,NumberOperandId resultId)2271 bool CacheIRCompiler::emitBooleanToNumber(BooleanOperandId booleanId,
2272                                           NumberOperandId resultId) {
2273   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2274   Register boolean = allocator.useRegister(masm, booleanId);
2275   ValueOperand output = allocator.defineValueRegister(masm, resultId);
2276   masm.tagValue(JSVAL_TYPE_INT32, boolean, output);
2277   return true;
2278 }
2279 
emitGuardStringToIndex(StringOperandId strId,Int32OperandId resultId)2280 bool CacheIRCompiler::emitGuardStringToIndex(StringOperandId strId,
2281                                              Int32OperandId resultId) {
2282   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2283   Register str = allocator.useRegister(masm, strId);
2284   Register output = allocator.defineRegister(masm, resultId);
2285 
2286   FailurePath* failure;
2287   if (!addFailurePath(&failure)) {
2288     return false;
2289   }
2290 
2291   Label vmCall, done;
2292   masm.loadStringIndexValue(str, output, &vmCall);
2293   masm.jump(&done);
2294 
2295   {
2296     masm.bind(&vmCall);
2297     LiveRegisterSet save(GeneralRegisterSet::Volatile(),
2298                          liveVolatileFloatRegs());
2299     masm.PushRegsInMask(save);
2300 
2301     using Fn = int32_t (*)(JSString * str);
2302     masm.setupUnalignedABICall(output);
2303     masm.passABIArg(str);
2304     masm.callWithABI<Fn, GetIndexFromString>();
2305     masm.storeCallInt32Result(output);
2306 
2307     LiveRegisterSet ignore;
2308     ignore.add(output);
2309     masm.PopRegsInMaskIgnore(save, ignore);
2310 
2311     // GetIndexFromString returns a negative value on failure.
2312     masm.branchTest32(Assembler::Signed, output, output, failure->label());
2313   }
2314 
2315   masm.bind(&done);
2316   return true;
2317 }
2318 
emitLoadProto(ObjOperandId objId,ObjOperandId resultId)2319 bool CacheIRCompiler::emitLoadProto(ObjOperandId objId, ObjOperandId resultId) {
2320   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2321   Register obj = allocator.useRegister(masm, objId);
2322   Register reg = allocator.defineRegister(masm, resultId);
2323   masm.loadObjProto(obj, reg);
2324 
2325 #ifdef DEBUG
2326   // We shouldn't encounter a null or lazy proto.
2327   MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
2328 
2329   Label done;
2330   masm.branchPtr(Assembler::Above, reg, ImmWord(1), &done);
2331   masm.assumeUnreachable("Unexpected null or lazy proto in CacheIR LoadProto");
2332   masm.bind(&done);
2333 #endif
2334   return true;
2335 }
2336 
emitLoadEnclosingEnvironment(ObjOperandId objId,ObjOperandId resultId)2337 bool CacheIRCompiler::emitLoadEnclosingEnvironment(ObjOperandId objId,
2338                                                    ObjOperandId resultId) {
2339   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2340   Register obj = allocator.useRegister(masm, objId);
2341   Register reg = allocator.defineRegister(masm, resultId);
2342   masm.unboxObject(
2343       Address(obj, EnvironmentObject::offsetOfEnclosingEnvironment()), reg);
2344   return true;
2345 }
2346 
emitLoadWrapperTarget(ObjOperandId objId,ObjOperandId resultId)2347 bool CacheIRCompiler::emitLoadWrapperTarget(ObjOperandId objId,
2348                                             ObjOperandId resultId) {
2349   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2350   Register obj = allocator.useRegister(masm, objId);
2351   Register reg = allocator.defineRegister(masm, resultId);
2352 
2353   masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), reg);
2354   masm.unboxObject(
2355       Address(reg, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()), reg);
2356   return true;
2357 }
2358 
emitLoadValueTag(ValOperandId valId,ValueTagOperandId resultId)2359 bool CacheIRCompiler::emitLoadValueTag(ValOperandId valId,
2360                                        ValueTagOperandId resultId) {
2361   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2362   ValueOperand val = allocator.useValueRegister(masm, valId);
2363   Register res = allocator.defineRegister(masm, resultId);
2364 
2365   Register tag = masm.extractTag(val, res);
2366   if (tag != res) {
2367     masm.mov(tag, res);
2368   }
2369   return true;
2370 }
2371 
emitLoadDOMExpandoValue(ObjOperandId objId,ValOperandId resultId)2372 bool CacheIRCompiler::emitLoadDOMExpandoValue(ObjOperandId objId,
2373                                               ValOperandId resultId) {
2374   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2375   Register obj = allocator.useRegister(masm, objId);
2376   ValueOperand val = allocator.defineValueRegister(masm, resultId);
2377 
2378   masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
2379                val.scratchReg());
2380   masm.loadValue(Address(val.scratchReg(),
2381                          js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
2382                  val);
2383   return true;
2384 }
2385 
emitLoadDOMExpandoValueIgnoreGeneration(ObjOperandId objId,ValOperandId resultId)2386 bool CacheIRCompiler::emitLoadDOMExpandoValueIgnoreGeneration(
2387     ObjOperandId objId, ValOperandId resultId) {
2388   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2389   Register obj = allocator.useRegister(masm, objId);
2390   ValueOperand output = allocator.defineValueRegister(masm, resultId);
2391 
2392   // Determine the expando's Address.
2393   Register scratch = output.scratchReg();
2394   masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
2395   Address expandoAddr(scratch,
2396                       js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
2397 
2398 #ifdef DEBUG
2399   // Private values are stored as doubles, so assert we have a double.
2400   Label ok;
2401   masm.branchTestDouble(Assembler::Equal, expandoAddr, &ok);
2402   masm.assumeUnreachable("DOM expando is not a PrivateValue!");
2403   masm.bind(&ok);
2404 #endif
2405 
2406   // Load the ExpandoAndGeneration* from the PrivateValue.
2407   masm.loadPrivate(expandoAddr, scratch);
2408 
2409   // Load expandoAndGeneration->expando into the output Value register.
2410   masm.loadValue(Address(scratch, ExpandoAndGeneration::offsetOfExpando()),
2411                  output);
2412   return true;
2413 }
2414 
emitLoadUndefinedResult()2415 bool CacheIRCompiler::emitLoadUndefinedResult() {
2416   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2417   AutoOutputRegister output(*this);
2418   masm.moveValue(UndefinedValue(), output.valueReg());
2419   return true;
2420 }
2421 
EmitStoreBoolean(MacroAssembler & masm,bool b,const AutoOutputRegister & output)2422 static void EmitStoreBoolean(MacroAssembler& masm, bool b,
2423                              const AutoOutputRegister& output) {
2424   if (output.hasValue()) {
2425     Value val = BooleanValue(b);
2426     masm.moveValue(val, output.valueReg());
2427   } else {
2428     MOZ_ASSERT(output.type() == JSVAL_TYPE_BOOLEAN);
2429     masm.movePtr(ImmWord(b), output.typedReg().gpr());
2430   }
2431 }
2432 
emitLoadBooleanResult(bool val)2433 bool CacheIRCompiler::emitLoadBooleanResult(bool val) {
2434   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2435   AutoOutputRegister output(*this);
2436   EmitStoreBoolean(masm, val, output);
2437   return true;
2438 }
2439 
emitLoadOperandResult(ValOperandId inputId)2440 bool CacheIRCompiler::emitLoadOperandResult(ValOperandId inputId) {
2441   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2442   AutoOutputRegister output(*this);
2443   ValueOperand input = allocator.useValueRegister(masm, inputId);
2444   masm.moveValue(input, output.valueReg());
2445   return true;
2446 }
2447 
EmitStoreResult(MacroAssembler & masm,Register reg,JSValueType type,const AutoOutputRegister & output)2448 static void EmitStoreResult(MacroAssembler& masm, Register reg,
2449                             JSValueType type,
2450                             const AutoOutputRegister& output) {
2451   if (output.hasValue()) {
2452     masm.tagValue(type, reg, output.valueReg());
2453     return;
2454   }
2455   if (type == JSVAL_TYPE_INT32 && output.typedReg().isFloat()) {
2456     masm.convertInt32ToDouble(reg, output.typedReg().fpu());
2457     return;
2458   }
2459   if (type == output.type()) {
2460     masm.mov(reg, output.typedReg().gpr());
2461     return;
2462   }
2463   masm.assumeUnreachable("Should have monitored result");
2464 }
2465 
emitLoadInt32ArrayLengthResult(ObjOperandId objId)2466 bool CacheIRCompiler::emitLoadInt32ArrayLengthResult(ObjOperandId objId) {
2467   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2468   AutoOutputRegister output(*this);
2469   Register obj = allocator.useRegister(masm, objId);
2470   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2471 
2472   FailurePath* failure;
2473   if (!addFailurePath(&failure)) {
2474     return false;
2475   }
2476 
2477   masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
2478   masm.load32(Address(scratch, ObjectElements::offsetOfLength()), scratch);
2479 
2480   // Guard length fits in an int32.
2481   masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label());
2482   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2483   return true;
2484 }
2485 
emitLoadInt32ArrayLength(ObjOperandId objId,Int32OperandId resultId)2486 bool CacheIRCompiler::emitLoadInt32ArrayLength(ObjOperandId objId,
2487                                                Int32OperandId resultId) {
2488   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2489   Register obj = allocator.useRegister(masm, objId);
2490   Register res = allocator.defineRegister(masm, resultId);
2491 
2492   FailurePath* failure;
2493   if (!addFailurePath(&failure)) {
2494     return false;
2495   }
2496 
2497   masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), res);
2498   masm.load32(Address(res, ObjectElements::offsetOfLength()), res);
2499 
2500   // Guard length fits in an int32.
2501   masm.branchTest32(Assembler::Signed, res, res, failure->label());
2502   return true;
2503 }
2504 
emitDoubleAddResult(NumberOperandId lhsId,NumberOperandId rhsId)2505 bool CacheIRCompiler::emitDoubleAddResult(NumberOperandId lhsId,
2506                                           NumberOperandId rhsId) {
2507   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2508   AutoOutputRegister output(*this);
2509 
2510   // Float register must be preserved. The BinaryArith ICs use
2511   // the fact that baseline has them available, as well as fixed temps on
2512   // LBinaryCache.
2513   AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
2514   AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
2515 
2516   allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
2517   allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
2518 
2519   masm.addDouble(floatScratch1, floatScratch0);
2520   masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
2521 
2522   return true;
2523 }
emitDoubleSubResult(NumberOperandId lhsId,NumberOperandId rhsId)2524 bool CacheIRCompiler::emitDoubleSubResult(NumberOperandId lhsId,
2525                                           NumberOperandId rhsId) {
2526   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2527   AutoOutputRegister output(*this);
2528 
2529   AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
2530   AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
2531 
2532   allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
2533   allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
2534 
2535   masm.subDouble(floatScratch1, floatScratch0);
2536   masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
2537 
2538   return true;
2539 }
emitDoubleMulResult(NumberOperandId lhsId,NumberOperandId rhsId)2540 bool CacheIRCompiler::emitDoubleMulResult(NumberOperandId lhsId,
2541                                           NumberOperandId rhsId) {
2542   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2543   AutoOutputRegister output(*this);
2544 
2545   AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
2546   AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
2547 
2548   allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
2549   allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
2550 
2551   masm.mulDouble(floatScratch1, floatScratch0);
2552   masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
2553 
2554   return true;
2555 }
emitDoubleDivResult(NumberOperandId lhsId,NumberOperandId rhsId)2556 bool CacheIRCompiler::emitDoubleDivResult(NumberOperandId lhsId,
2557                                           NumberOperandId rhsId) {
2558   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2559   AutoOutputRegister output(*this);
2560 
2561   AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
2562   AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
2563 
2564   allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
2565   allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
2566 
2567   masm.divDouble(floatScratch1, floatScratch0);
2568   masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
2569 
2570   return true;
2571 }
emitDoubleModResult(NumberOperandId lhsId,NumberOperandId rhsId)2572 bool CacheIRCompiler::emitDoubleModResult(NumberOperandId lhsId,
2573                                           NumberOperandId rhsId) {
2574   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2575   AutoOutputRegister output(*this);
2576   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2577 
2578   AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
2579   AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
2580 
2581   allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
2582   allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
2583 
2584   LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
2585   masm.PushRegsInMask(save);
2586 
2587   using Fn = double (*)(double a, double b);
2588   masm.setupUnalignedABICall(scratch);
2589   masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
2590   masm.passABIArg(floatScratch1, MoveOp::DOUBLE);
2591   masm.callWithABI<Fn, js::NumberMod>(MoveOp::DOUBLE);
2592   masm.storeCallFloatResult(floatScratch0);
2593 
2594   LiveRegisterSet ignore;
2595   ignore.add(floatScratch0);
2596   masm.PopRegsInMaskIgnore(save, ignore);
2597 
2598   masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
2599 
2600   return true;
2601 }
emitDoublePowResult(NumberOperandId lhsId,NumberOperandId rhsId)2602 bool CacheIRCompiler::emitDoublePowResult(NumberOperandId lhsId,
2603                                           NumberOperandId rhsId) {
2604   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2605   AutoOutputRegister output(*this);
2606   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2607 
2608   AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
2609   AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
2610 
2611   allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
2612   allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
2613 
2614   LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
2615   masm.PushRegsInMask(save);
2616 
2617   using Fn = double (*)(double x, double y);
2618   masm.setupUnalignedABICall(scratch);
2619   masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
2620   masm.passABIArg(floatScratch1, MoveOp::DOUBLE);
2621   masm.callWithABI<Fn, js::ecmaPow>(MoveOp::DOUBLE);
2622   masm.storeCallFloatResult(floatScratch0);
2623 
2624   LiveRegisterSet ignore;
2625   ignore.add(floatScratch0);
2626   masm.PopRegsInMaskIgnore(save, ignore);
2627 
2628   masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
2629 
2630   return true;
2631 }
2632 
emitInt32AddResult(Int32OperandId lhsId,Int32OperandId rhsId)2633 bool CacheIRCompiler::emitInt32AddResult(Int32OperandId lhsId,
2634                                          Int32OperandId rhsId) {
2635   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2636   AutoOutputRegister output(*this);
2637   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2638 
2639   Register lhs = allocator.useRegister(masm, lhsId);
2640   Register rhs = allocator.useRegister(masm, rhsId);
2641 
2642   FailurePath* failure;
2643   if (!addFailurePath(&failure)) {
2644     return false;
2645   }
2646 
2647   masm.mov(rhs, scratch);
2648   masm.branchAdd32(Assembler::Overflow, lhs, scratch, failure->label());
2649   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2650 
2651   return true;
2652 }
emitInt32SubResult(Int32OperandId lhsId,Int32OperandId rhsId)2653 bool CacheIRCompiler::emitInt32SubResult(Int32OperandId lhsId,
2654                                          Int32OperandId rhsId) {
2655   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2656   AutoOutputRegister output(*this);
2657   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2658   Register lhs = allocator.useRegister(masm, lhsId);
2659   Register rhs = allocator.useRegister(masm, rhsId);
2660 
2661   FailurePath* failure;
2662   if (!addFailurePath(&failure)) {
2663     return false;
2664   }
2665 
2666   masm.mov(lhs, scratch);
2667   masm.branchSub32(Assembler::Overflow, rhs, scratch, failure->label());
2668   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2669 
2670   return true;
2671 }
2672 
emitInt32MulResult(Int32OperandId lhsId,Int32OperandId rhsId)2673 bool CacheIRCompiler::emitInt32MulResult(Int32OperandId lhsId,
2674                                          Int32OperandId rhsId) {
2675   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2676   AutoOutputRegister output(*this);
2677   Register lhs = allocator.useRegister(masm, lhsId);
2678   Register rhs = allocator.useRegister(masm, rhsId);
2679   AutoScratchRegister scratch(allocator, masm);
2680   AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
2681 
2682   FailurePath* failure;
2683   if (!addFailurePath(&failure)) {
2684     return false;
2685   }
2686 
2687   Label maybeNegZero, done;
2688   masm.mov(lhs, scratch);
2689   masm.branchMul32(Assembler::Overflow, rhs, scratch, failure->label());
2690   masm.branchTest32(Assembler::Zero, scratch, scratch, &maybeNegZero);
2691   masm.jump(&done);
2692 
2693   masm.bind(&maybeNegZero);
2694   masm.mov(lhs, scratch2);
2695   // Result is -0 if exactly one of lhs or rhs is negative.
2696   masm.or32(rhs, scratch2);
2697   masm.branchTest32(Assembler::Signed, scratch2, scratch2, failure->label());
2698 
2699   masm.bind(&done);
2700   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2701   return true;
2702 }
2703 
emitInt32DivResult(Int32OperandId lhsId,Int32OperandId rhsId)2704 bool CacheIRCompiler::emitInt32DivResult(Int32OperandId lhsId,
2705                                          Int32OperandId rhsId) {
2706   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2707   AutoOutputRegister output(*this);
2708   Register lhs = allocator.useRegister(masm, lhsId);
2709   Register rhs = allocator.useRegister(masm, rhsId);
2710   AutoScratchRegister rem(allocator, masm);
2711   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2712 
2713   FailurePath* failure;
2714   if (!addFailurePath(&failure)) {
2715     return false;
2716   }
2717 
2718   // Prevent division by 0.
2719   masm.branchTest32(Assembler::Zero, rhs, rhs, failure->label());
2720 
2721   // Prevent -2147483648 / -1.
2722   Label notOverflow;
2723   masm.branch32(Assembler::NotEqual, lhs, Imm32(INT32_MIN), &notOverflow);
2724   masm.branch32(Assembler::Equal, rhs, Imm32(-1), failure->label());
2725   masm.bind(&notOverflow);
2726 
2727   // Prevent negative 0.
2728   Label notZero;
2729   masm.branchTest32(Assembler::NonZero, lhs, lhs, &notZero);
2730   masm.branchTest32(Assembler::Signed, rhs, rhs, failure->label());
2731   masm.bind(&notZero);
2732 
2733   masm.mov(lhs, scratch);
2734   LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
2735                                liveVolatileFloatRegs());
2736   masm.flexibleDivMod32(rhs, scratch, rem, false, volatileRegs);
2737 
2738   // A remainder implies a double result.
2739   masm.branchTest32(Assembler::NonZero, rem, rem, failure->label());
2740   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2741   return true;
2742 }
2743 
emitInt32ModResult(Int32OperandId lhsId,Int32OperandId rhsId)2744 bool CacheIRCompiler::emitInt32ModResult(Int32OperandId lhsId,
2745                                          Int32OperandId rhsId) {
2746   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2747   AutoOutputRegister output(*this);
2748   Register lhs = allocator.useRegister(masm, lhsId);
2749   Register rhs = allocator.useRegister(masm, rhsId);
2750   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2751 
2752   FailurePath* failure;
2753   if (!addFailurePath(&failure)) {
2754     return false;
2755   }
2756 
2757   // x % 0 results in NaN
2758   masm.branchTest32(Assembler::Zero, rhs, rhs, failure->label());
2759 
2760   // Prevent -2147483648 % -1.
2761   //
2762   // Traps on x86 and has undefined behavior on ARM32 (when __aeabi_idivmod is
2763   // called).
2764   Label notOverflow;
2765   masm.branch32(Assembler::NotEqual, lhs, Imm32(INT32_MIN), &notOverflow);
2766   masm.branch32(Assembler::Equal, rhs, Imm32(-1), failure->label());
2767   masm.bind(&notOverflow);
2768 
2769   masm.mov(lhs, scratch);
2770   LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
2771                                liveVolatileFloatRegs());
2772   masm.flexibleRemainder32(rhs, scratch, false, volatileRegs);
2773 
2774   // Modulo takes the sign of the dividend; we can't return negative zero here.
2775   Label notZero;
2776   masm.branchTest32(Assembler::NonZero, scratch, scratch, &notZero);
2777   masm.branchTest32(Assembler::Signed, lhs, lhs, failure->label());
2778   masm.bind(&notZero);
2779 
2780   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2781 
2782   return true;
2783 }
2784 
emitInt32PowResult(Int32OperandId lhsId,Int32OperandId rhsId)2785 bool CacheIRCompiler::emitInt32PowResult(Int32OperandId lhsId,
2786                                          Int32OperandId rhsId) {
2787   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2788   AutoOutputRegister output(*this);
2789   Register base = allocator.useRegister(masm, lhsId);
2790   Register power = allocator.useRegister(masm, rhsId);
2791   AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
2792   AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
2793   AutoScratchRegister scratch3(allocator, masm);
2794 
2795   FailurePath* failure;
2796   if (!addFailurePath(&failure)) {
2797     return false;
2798   }
2799 
2800   masm.pow32(base, power, scratch1, scratch2, scratch3, failure->label());
2801 
2802   masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
2803   return true;
2804 }
2805 
emitInt32BitOrResult(Int32OperandId lhsId,Int32OperandId rhsId)2806 bool CacheIRCompiler::emitInt32BitOrResult(Int32OperandId lhsId,
2807                                            Int32OperandId rhsId) {
2808   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2809   AutoOutputRegister output(*this);
2810   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2811 
2812   Register lhs = allocator.useRegister(masm, lhsId);
2813   Register rhs = allocator.useRegister(masm, rhsId);
2814 
2815   masm.mov(rhs, scratch);
2816   masm.or32(lhs, scratch);
2817   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2818 
2819   return true;
2820 }
emitInt32BitXorResult(Int32OperandId lhsId,Int32OperandId rhsId)2821 bool CacheIRCompiler::emitInt32BitXorResult(Int32OperandId lhsId,
2822                                             Int32OperandId rhsId) {
2823   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2824   AutoOutputRegister output(*this);
2825   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2826 
2827   Register lhs = allocator.useRegister(masm, lhsId);
2828   Register rhs = allocator.useRegister(masm, rhsId);
2829 
2830   masm.mov(rhs, scratch);
2831   masm.xor32(lhs, scratch);
2832   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2833 
2834   return true;
2835 }
emitInt32BitAndResult(Int32OperandId lhsId,Int32OperandId rhsId)2836 bool CacheIRCompiler::emitInt32BitAndResult(Int32OperandId lhsId,
2837                                             Int32OperandId rhsId) {
2838   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2839   AutoOutputRegister output(*this);
2840   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2841 
2842   Register lhs = allocator.useRegister(masm, lhsId);
2843   Register rhs = allocator.useRegister(masm, rhsId);
2844 
2845   masm.mov(rhs, scratch);
2846   masm.and32(lhs, scratch);
2847   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2848 
2849   return true;
2850 }
emitInt32LeftShiftResult(Int32OperandId lhsId,Int32OperandId rhsId)2851 bool CacheIRCompiler::emitInt32LeftShiftResult(Int32OperandId lhsId,
2852                                                Int32OperandId rhsId) {
2853   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2854   AutoOutputRegister output(*this);
2855   Register lhs = allocator.useRegister(masm, lhsId);
2856   Register rhs = allocator.useRegister(masm, rhsId);
2857   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2858 
2859   masm.mov(lhs, scratch);
2860   masm.flexibleLshift32(rhs, scratch);
2861   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2862 
2863   return true;
2864 }
2865 
emitInt32RightShiftResult(Int32OperandId lhsId,Int32OperandId rhsId)2866 bool CacheIRCompiler::emitInt32RightShiftResult(Int32OperandId lhsId,
2867                                                 Int32OperandId rhsId) {
2868   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2869   AutoOutputRegister output(*this);
2870   Register lhs = allocator.useRegister(masm, lhsId);
2871   Register rhs = allocator.useRegister(masm, rhsId);
2872   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2873 
2874   masm.mov(lhs, scratch);
2875   masm.flexibleRshift32Arithmetic(rhs, scratch);
2876   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2877 
2878   return true;
2879 }
2880 
emitInt32URightShiftResult(Int32OperandId lhsId,Int32OperandId rhsId,bool forceDouble)2881 bool CacheIRCompiler::emitInt32URightShiftResult(Int32OperandId lhsId,
2882                                                  Int32OperandId rhsId,
2883                                                  bool forceDouble) {
2884   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2885   AutoOutputRegister output(*this);
2886 
2887   Register lhs = allocator.useRegister(masm, lhsId);
2888   Register rhs = allocator.useRegister(masm, rhsId);
2889   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2890 
2891   FailurePath* failure;
2892   if (!addFailurePath(&failure)) {
2893     return false;
2894   }
2895 
2896   masm.mov(lhs, scratch);
2897   masm.flexibleRshift32(rhs, scratch);
2898   if (forceDouble) {
2899     ScratchDoubleScope fpscratch(masm);
2900     masm.convertUInt32ToDouble(scratch, fpscratch);
2901     masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
2902   } else {
2903     masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label());
2904     masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2905   }
2906   return true;
2907 }
2908 
emitInt32NegationResult(Int32OperandId inputId)2909 bool CacheIRCompiler::emitInt32NegationResult(Int32OperandId inputId) {
2910   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2911   AutoOutputRegister output(*this);
2912   Register val = allocator.useRegister(masm, inputId);
2913   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2914 
2915   FailurePath* failure;
2916   if (!addFailurePath(&failure)) {
2917     return false;
2918   }
2919 
2920   // Guard against 0 and MIN_INT by checking if low 31-bits are all zero.
2921   // Both of these result in a double.
2922   masm.branchTest32(Assembler::Zero, val, Imm32(0x7fffffff), failure->label());
2923   masm.mov(val, scratch);
2924   masm.neg32(scratch);
2925   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2926   return true;
2927 }
2928 
emitInt32IncResult(Int32OperandId inputId)2929 bool CacheIRCompiler::emitInt32IncResult(Int32OperandId inputId) {
2930   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2931   AutoOutputRegister output(*this);
2932   Register input = allocator.useRegister(masm, inputId);
2933   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2934 
2935   FailurePath* failure;
2936   if (!addFailurePath(&failure)) {
2937     return false;
2938   }
2939 
2940   masm.mov(input, scratch);
2941   masm.branchAdd32(Assembler::Overflow, Imm32(1), scratch, failure->label());
2942   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2943 
2944   return true;
2945 }
2946 
emitInt32DecResult(Int32OperandId inputId)2947 bool CacheIRCompiler::emitInt32DecResult(Int32OperandId inputId) {
2948   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2949   AutoOutputRegister output(*this);
2950   Register input = allocator.useRegister(masm, inputId);
2951   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2952 
2953   FailurePath* failure;
2954   if (!addFailurePath(&failure)) {
2955     return false;
2956   }
2957 
2958   masm.mov(input, scratch);
2959   masm.branchSub32(Assembler::Overflow, Imm32(1), scratch, failure->label());
2960   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2961 
2962   return true;
2963 }
2964 
emitInt32NotResult(Int32OperandId inputId)2965 bool CacheIRCompiler::emitInt32NotResult(Int32OperandId inputId) {
2966   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2967   AutoOutputRegister output(*this);
2968   Register val = allocator.useRegister(masm, inputId);
2969   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2970 
2971   masm.mov(val, scratch);
2972   masm.not32(scratch);
2973   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
2974   return true;
2975 }
2976 
emitDoubleNegationResult(NumberOperandId inputId)2977 bool CacheIRCompiler::emitDoubleNegationResult(NumberOperandId inputId) {
2978   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
2979   AutoOutputRegister output(*this);
2980   ValueOperand val = allocator.useValueRegister(masm, inputId);
2981 
2982   FailurePath* failure;
2983   if (!addFailurePath(&failure)) {
2984     return false;
2985   }
2986 
2987   AutoScratchFloatRegister floatReg(this, failure);
2988 
2989   masm.ensureDouble(val, floatReg, floatReg.failure());
2990   masm.negateDouble(floatReg);
2991   masm.boxDouble(floatReg, output.valueReg(), floatReg);
2992 
2993   return true;
2994 }
2995 
emitDoubleIncDecResult(bool isInc,NumberOperandId inputId)2996 bool CacheIRCompiler::emitDoubleIncDecResult(bool isInc,
2997                                              NumberOperandId inputId) {
2998   AutoOutputRegister output(*this);
2999   ValueOperand val = allocator.useValueRegister(masm, inputId);
3000 
3001   FailurePath* failure;
3002   if (!addFailurePath(&failure)) {
3003     return false;
3004   }
3005 
3006   AutoScratchFloatRegister floatReg(this, failure);
3007 
3008   masm.ensureDouble(val, floatReg, floatReg.failure());
3009   {
3010     ScratchDoubleScope fpscratch(masm);
3011     masm.loadConstantDouble(1.0, fpscratch);
3012     if (isInc) {
3013       masm.addDouble(fpscratch, floatReg);
3014     } else {
3015       masm.subDouble(fpscratch, floatReg);
3016     }
3017   }
3018   masm.boxDouble(floatReg, output.valueReg(), floatReg);
3019 
3020   return true;
3021 }
3022 
emitDoubleIncResult(NumberOperandId inputId)3023 bool CacheIRCompiler::emitDoubleIncResult(NumberOperandId inputId) {
3024   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3025   return emitDoubleIncDecResult(true, inputId);
3026 }
3027 
emitDoubleDecResult(NumberOperandId inputId)3028 bool CacheIRCompiler::emitDoubleDecResult(NumberOperandId inputId) {
3029   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3030   return emitDoubleIncDecResult(false, inputId);
3031 }
3032 
3033 template <typename Fn, Fn fn>
emitBigIntBinaryOperationShared(BigIntOperandId lhsId,BigIntOperandId rhsId)3034 bool CacheIRCompiler::emitBigIntBinaryOperationShared(BigIntOperandId lhsId,
3035                                                       BigIntOperandId rhsId) {
3036   AutoCallVM callvm(masm, this, allocator);
3037   Register lhs = allocator.useRegister(masm, lhsId);
3038   Register rhs = allocator.useRegister(masm, rhsId);
3039 
3040   callvm.prepare();
3041 
3042   masm.Push(rhs);
3043   masm.Push(lhs);
3044 
3045   callvm.call<Fn, fn>();
3046   return true;
3047 }
3048 
emitBigIntAddResult(BigIntOperandId lhsId,BigIntOperandId rhsId)3049 bool CacheIRCompiler::emitBigIntAddResult(BigIntOperandId lhsId,
3050                                           BigIntOperandId rhsId) {
3051   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3052   using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3053   return emitBigIntBinaryOperationShared<Fn, BigInt::add>(lhsId, rhsId);
3054 }
3055 
emitBigIntSubResult(BigIntOperandId lhsId,BigIntOperandId rhsId)3056 bool CacheIRCompiler::emitBigIntSubResult(BigIntOperandId lhsId,
3057                                           BigIntOperandId rhsId) {
3058   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3059   using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3060   return emitBigIntBinaryOperationShared<Fn, BigInt::sub>(lhsId, rhsId);
3061 }
3062 
emitBigIntMulResult(BigIntOperandId lhsId,BigIntOperandId rhsId)3063 bool CacheIRCompiler::emitBigIntMulResult(BigIntOperandId lhsId,
3064                                           BigIntOperandId rhsId) {
3065   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3066   using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3067   return emitBigIntBinaryOperationShared<Fn, BigInt::mul>(lhsId, rhsId);
3068 }
3069 
emitBigIntDivResult(BigIntOperandId lhsId,BigIntOperandId rhsId)3070 bool CacheIRCompiler::emitBigIntDivResult(BigIntOperandId lhsId,
3071                                           BigIntOperandId rhsId) {
3072   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3073   using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3074   return emitBigIntBinaryOperationShared<Fn, BigInt::div>(lhsId, rhsId);
3075 }
3076 
emitBigIntModResult(BigIntOperandId lhsId,BigIntOperandId rhsId)3077 bool CacheIRCompiler::emitBigIntModResult(BigIntOperandId lhsId,
3078                                           BigIntOperandId rhsId) {
3079   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3080   using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3081   return emitBigIntBinaryOperationShared<Fn, BigInt::mod>(lhsId, rhsId);
3082 }
3083 
emitBigIntPowResult(BigIntOperandId lhsId,BigIntOperandId rhsId)3084 bool CacheIRCompiler::emitBigIntPowResult(BigIntOperandId lhsId,
3085                                           BigIntOperandId rhsId) {
3086   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3087   using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3088   return emitBigIntBinaryOperationShared<Fn, BigInt::pow>(lhsId, rhsId);
3089 }
3090 
emitBigIntBitAndResult(BigIntOperandId lhsId,BigIntOperandId rhsId)3091 bool CacheIRCompiler::emitBigIntBitAndResult(BigIntOperandId lhsId,
3092                                              BigIntOperandId rhsId) {
3093   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3094   using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3095   return emitBigIntBinaryOperationShared<Fn, BigInt::bitAnd>(lhsId, rhsId);
3096 }
3097 
emitBigIntBitOrResult(BigIntOperandId lhsId,BigIntOperandId rhsId)3098 bool CacheIRCompiler::emitBigIntBitOrResult(BigIntOperandId lhsId,
3099                                             BigIntOperandId rhsId) {
3100   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3101   using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3102   return emitBigIntBinaryOperationShared<Fn, BigInt::bitOr>(lhsId, rhsId);
3103 }
3104 
emitBigIntBitXorResult(BigIntOperandId lhsId,BigIntOperandId rhsId)3105 bool CacheIRCompiler::emitBigIntBitXorResult(BigIntOperandId lhsId,
3106                                              BigIntOperandId rhsId) {
3107   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3108   using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3109   return emitBigIntBinaryOperationShared<Fn, BigInt::bitXor>(lhsId, rhsId);
3110 }
3111 
emitBigIntLeftShiftResult(BigIntOperandId lhsId,BigIntOperandId rhsId)3112 bool CacheIRCompiler::emitBigIntLeftShiftResult(BigIntOperandId lhsId,
3113                                                 BigIntOperandId rhsId) {
3114   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3115   using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3116   return emitBigIntBinaryOperationShared<Fn, BigInt::lsh>(lhsId, rhsId);
3117 }
3118 
emitBigIntRightShiftResult(BigIntOperandId lhsId,BigIntOperandId rhsId)3119 bool CacheIRCompiler::emitBigIntRightShiftResult(BigIntOperandId lhsId,
3120                                                  BigIntOperandId rhsId) {
3121   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3122   using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
3123   return emitBigIntBinaryOperationShared<Fn, BigInt::rsh>(lhsId, rhsId);
3124 }
3125 
3126 template <typename Fn, Fn fn>
emitBigIntUnaryOperationShared(BigIntOperandId inputId)3127 bool CacheIRCompiler::emitBigIntUnaryOperationShared(BigIntOperandId inputId) {
3128   AutoCallVM callvm(masm, this, allocator);
3129   Register val = allocator.useRegister(masm, inputId);
3130 
3131   callvm.prepare();
3132 
3133   masm.Push(val);
3134 
3135   callvm.call<Fn, fn>();
3136   return true;
3137 }
3138 
emitBigIntNotResult(BigIntOperandId inputId)3139 bool CacheIRCompiler::emitBigIntNotResult(BigIntOperandId inputId) {
3140   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3141   using Fn = BigInt* (*)(JSContext*, HandleBigInt);
3142   return emitBigIntUnaryOperationShared<Fn, BigInt::bitNot>(inputId);
3143 }
3144 
emitBigIntNegationResult(BigIntOperandId inputId)3145 bool CacheIRCompiler::emitBigIntNegationResult(BigIntOperandId inputId) {
3146   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3147   using Fn = BigInt* (*)(JSContext*, HandleBigInt);
3148   return emitBigIntUnaryOperationShared<Fn, BigInt::neg>(inputId);
3149 }
3150 
emitBigIntIncResult(BigIntOperandId inputId)3151 bool CacheIRCompiler::emitBigIntIncResult(BigIntOperandId inputId) {
3152   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3153   using Fn = BigInt* (*)(JSContext*, HandleBigInt);
3154   return emitBigIntUnaryOperationShared<Fn, BigInt::inc>(inputId);
3155 }
3156 
emitBigIntDecResult(BigIntOperandId inputId)3157 bool CacheIRCompiler::emitBigIntDecResult(BigIntOperandId inputId) {
3158   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3159   using Fn = BigInt* (*)(JSContext*, HandleBigInt);
3160   return emitBigIntUnaryOperationShared<Fn, BigInt::dec>(inputId);
3161 }
3162 
emitTruncateDoubleToUInt32(NumberOperandId inputId,Int32OperandId resultId)3163 bool CacheIRCompiler::emitTruncateDoubleToUInt32(NumberOperandId inputId,
3164                                                  Int32OperandId resultId) {
3165   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3166   Register res = allocator.defineRegister(masm, resultId);
3167 
3168   AutoScratchFloatRegister floatReg(this);
3169 
3170   allocator.ensureDoubleRegister(masm, inputId, floatReg);
3171 
3172   Label done, truncateABICall;
3173 
3174   masm.branchTruncateDoubleMaybeModUint32(floatReg, res, &truncateABICall);
3175   masm.jump(&done);
3176 
3177   masm.bind(&truncateABICall);
3178   LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
3179   save.takeUnchecked(floatReg);
3180   // Bug 1451976
3181   save.takeUnchecked(floatReg.get().asSingle());
3182   masm.PushRegsInMask(save);
3183 
3184   using Fn = int32_t (*)(double);
3185   masm.setupUnalignedABICall(res);
3186   masm.passABIArg(floatReg, MoveOp::DOUBLE);
3187   masm.callWithABI<Fn, JS::ToInt32>(MoveOp::GENERAL,
3188                                     CheckUnsafeCallWithABI::DontCheckOther);
3189   masm.storeCallInt32Result(res);
3190 
3191   LiveRegisterSet ignore;
3192   ignore.add(res);
3193   masm.PopRegsInMaskIgnore(save, ignore);
3194 
3195   masm.bind(&done);
3196   return true;
3197 }
3198 
emitLoadArgumentsObjectLengthResult(ObjOperandId objId)3199 bool CacheIRCompiler::emitLoadArgumentsObjectLengthResult(ObjOperandId objId) {
3200   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3201   AutoOutputRegister output(*this);
3202   Register obj = allocator.useRegister(masm, objId);
3203   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3204 
3205   FailurePath* failure;
3206   if (!addFailurePath(&failure)) {
3207     return false;
3208   }
3209 
3210   masm.loadArgumentsObjectLength(obj, scratch, failure->label());
3211 
3212   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3213   return true;
3214 }
3215 
emitLoadArrayBufferByteLengthInt32Result(ObjOperandId objId)3216 bool CacheIRCompiler::emitLoadArrayBufferByteLengthInt32Result(
3217     ObjOperandId objId) {
3218   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3219   AutoOutputRegister output(*this);
3220   Register obj = allocator.useRegister(masm, objId);
3221   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3222 
3223   FailurePath* failure;
3224   if (!addFailurePath(&failure)) {
3225     return false;
3226   }
3227 
3228   masm.loadArrayBufferByteLengthIntPtr(obj, scratch);
3229   masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
3230   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3231   return true;
3232 }
3233 
emitLoadArrayBufferByteLengthDoubleResult(ObjOperandId objId)3234 bool CacheIRCompiler::emitLoadArrayBufferByteLengthDoubleResult(
3235     ObjOperandId objId) {
3236   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3237   AutoOutputRegister output(*this);
3238   Register obj = allocator.useRegister(masm, objId);
3239   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3240 
3241   ScratchDoubleScope fpscratch(masm);
3242   masm.loadArrayBufferByteLengthIntPtr(obj, scratch);
3243   masm.convertIntPtrToDouble(scratch, fpscratch);
3244   masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
3245   return true;
3246 }
3247 
emitLoadArrayBufferViewLengthInt32Result(ObjOperandId objId)3248 bool CacheIRCompiler::emitLoadArrayBufferViewLengthInt32Result(
3249     ObjOperandId objId) {
3250   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3251   AutoOutputRegister output(*this);
3252   Register obj = allocator.useRegister(masm, objId);
3253   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3254 
3255   FailurePath* failure;
3256   if (!addFailurePath(&failure)) {
3257     return false;
3258   }
3259 
3260   masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
3261   masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
3262   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3263   return true;
3264 }
3265 
emitLoadArrayBufferViewLengthDoubleResult(ObjOperandId objId)3266 bool CacheIRCompiler::emitLoadArrayBufferViewLengthDoubleResult(
3267     ObjOperandId objId) {
3268   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3269   AutoOutputRegister output(*this);
3270   Register obj = allocator.useRegister(masm, objId);
3271   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3272 
3273   ScratchDoubleScope fpscratch(masm);
3274   masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
3275   masm.convertIntPtrToDouble(scratch, fpscratch);
3276   masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
3277   return true;
3278 }
3279 
emitLoadFunctionLengthResult(ObjOperandId objId)3280 bool CacheIRCompiler::emitLoadFunctionLengthResult(ObjOperandId objId) {
3281   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3282   AutoOutputRegister output(*this);
3283   Register obj = allocator.useRegister(masm, objId);
3284   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3285 
3286   FailurePath* failure;
3287   if (!addFailurePath(&failure)) {
3288     return false;
3289   }
3290 
3291   // Get the JSFunction flags.
3292   masm.load16ZeroExtend(Address(obj, JSFunction::offsetOfFlags()), scratch);
3293 
3294   // Functions with a SelfHostedLazyScript must be compiled with the slow-path
3295   // before the function length is known. If the length was previously resolved,
3296   // the length property may be shadowed.
3297   masm.branchTest32(
3298       Assembler::NonZero, scratch,
3299       Imm32(FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH),
3300       failure->label());
3301 
3302   masm.loadFunctionLength(obj, scratch, scratch, failure->label());
3303   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3304   return true;
3305 }
3306 
emitLoadFunctionNameResult(ObjOperandId objId)3307 bool CacheIRCompiler::emitLoadFunctionNameResult(ObjOperandId objId) {
3308   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3309   AutoOutputRegister output(*this);
3310   Register obj = allocator.useRegister(masm, objId);
3311   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3312 
3313   FailurePath* failure;
3314   if (!addFailurePath(&failure)) {
3315     return false;
3316   }
3317 
3318   masm.loadFunctionName(obj, scratch, ImmGCPtr(cx_->names().empty),
3319                         failure->label());
3320 
3321   masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
3322   return true;
3323 }
3324 
emitLoadStringLengthResult(StringOperandId strId)3325 bool CacheIRCompiler::emitLoadStringLengthResult(StringOperandId strId) {
3326   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3327   AutoOutputRegister output(*this);
3328   Register str = allocator.useRegister(masm, strId);
3329   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3330 
3331   masm.loadStringLength(str, scratch);
3332   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
3333   return true;
3334 }
3335 
emitLoadStringCharCodeResult(StringOperandId strId,Int32OperandId indexId)3336 bool CacheIRCompiler::emitLoadStringCharCodeResult(StringOperandId strId,
3337                                                    Int32OperandId indexId) {
3338   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3339   AutoOutputRegister output(*this);
3340   Register str = allocator.useRegister(masm, strId);
3341   Register index = allocator.useRegister(masm, indexId);
3342   AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
3343   AutoScratchRegister scratch2(allocator, masm);
3344 
3345   FailurePath* failure;
3346   if (!addFailurePath(&failure)) {
3347     return false;
3348   }
3349 
3350   // Bounds check, load string char.
3351   masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
3352                             scratch1, failure->label());
3353   masm.loadStringChar(str, index, scratch1, scratch2, failure->label());
3354 
3355   masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
3356   return true;
3357 }
3358 
emitNewStringObjectResult(uint32_t templateObjectOffset,StringOperandId strId)3359 bool CacheIRCompiler::emitNewStringObjectResult(uint32_t templateObjectOffset,
3360                                                 StringOperandId strId) {
3361   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3362 
3363   AutoCallVM callvm(masm, this, allocator);
3364 
3365   Register str = allocator.useRegister(masm, strId);
3366 
3367   callvm.prepare();
3368   masm.Push(str);
3369 
3370   using Fn = JSObject* (*)(JSContext*, HandleString);
3371   callvm.call<Fn, NewStringObject>();
3372   return true;
3373 }
3374 
emitStringToLowerCaseResult(StringOperandId strId)3375 bool CacheIRCompiler::emitStringToLowerCaseResult(StringOperandId strId) {
3376   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3377 
3378   AutoCallVM callvm(masm, this, allocator);
3379 
3380   Register str = allocator.useRegister(masm, strId);
3381 
3382   callvm.prepare();
3383   masm.Push(str);
3384 
3385   using Fn = JSString* (*)(JSContext*, HandleString);
3386   callvm.call<Fn, js::StringToLowerCase>();
3387   return true;
3388 }
3389 
emitStringToUpperCaseResult(StringOperandId strId)3390 bool CacheIRCompiler::emitStringToUpperCaseResult(StringOperandId strId) {
3391   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3392 
3393   AutoCallVM callvm(masm, this, allocator);
3394 
3395   Register str = allocator.useRegister(masm, strId);
3396 
3397   callvm.prepare();
3398   masm.Push(str);
3399 
3400   using Fn = JSString* (*)(JSContext*, HandleString);
3401   callvm.call<Fn, js::StringToUpperCase>();
3402   return true;
3403 }
3404 
emitLoadArgumentsObjectArgResult(ObjOperandId objId,Int32OperandId indexId)3405 bool CacheIRCompiler::emitLoadArgumentsObjectArgResult(ObjOperandId objId,
3406                                                        Int32OperandId indexId) {
3407   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3408   AutoOutputRegister output(*this);
3409   Register obj = allocator.useRegister(masm, objId);
3410   Register index = allocator.useRegister(masm, indexId);
3411   AutoScratchRegister scratch(allocator, masm);
3412 
3413   FailurePath* failure;
3414   if (!addFailurePath(&failure)) {
3415     return false;
3416   }
3417 
3418   masm.loadArgumentsObjectElement(obj, index, output.valueReg(), scratch,
3419                                   failure->label());
3420   return true;
3421 }
3422 
emitLoadDenseElementResult(ObjOperandId objId,Int32OperandId indexId)3423 bool CacheIRCompiler::emitLoadDenseElementResult(ObjOperandId objId,
3424                                                  Int32OperandId indexId) {
3425   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3426   AutoOutputRegister output(*this);
3427   Register obj = allocator.useRegister(masm, objId);
3428   Register index = allocator.useRegister(masm, indexId);
3429   AutoScratchRegister scratch1(allocator, masm);
3430   AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
3431 
3432   FailurePath* failure;
3433   if (!addFailurePath(&failure)) {
3434     return false;
3435   }
3436 
3437   // Load obj->elements.
3438   masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1);
3439 
3440   // Bounds check.
3441   Address initLength(scratch1, ObjectElements::offsetOfInitializedLength());
3442   masm.spectreBoundsCheck32(index, initLength, scratch2, failure->label());
3443 
3444   // Hole check.
3445   BaseObjectElementIndex element(scratch1, index);
3446   masm.branchTestMagic(Assembler::Equal, element, failure->label());
3447   masm.loadTypedOrValue(element, output);
3448   return true;
3449 }
3450 
emitGuardInt32IsNonNegative(Int32OperandId indexId)3451 bool CacheIRCompiler::emitGuardInt32IsNonNegative(Int32OperandId indexId) {
3452   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3453   Register index = allocator.useRegister(masm, indexId);
3454 
3455   FailurePath* failure;
3456   if (!addFailurePath(&failure)) {
3457     return false;
3458   }
3459 
3460   masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
3461   return true;
3462 }
3463 
emitGuardIndexGreaterThanDenseInitLength(ObjOperandId objId,Int32OperandId indexId)3464 bool CacheIRCompiler::emitGuardIndexGreaterThanDenseInitLength(
3465     ObjOperandId objId, Int32OperandId indexId) {
3466   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3467   Register obj = allocator.useRegister(masm, objId);
3468   Register index = allocator.useRegister(masm, indexId);
3469   AutoScratchRegister scratch(allocator, masm);
3470   AutoSpectreBoundsScratchRegister spectreScratch(allocator, masm);
3471 
3472   FailurePath* failure;
3473   if (!addFailurePath(&failure)) {
3474     return false;
3475   }
3476 
3477   // Load obj->elements.
3478   masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
3479 
3480   // Ensure index >= initLength.
3481   Label outOfBounds;
3482   Address capacity(scratch, ObjectElements::offsetOfInitializedLength());
3483   masm.spectreBoundsCheck32(index, capacity, spectreScratch, &outOfBounds);
3484   masm.jump(failure->label());
3485   masm.bind(&outOfBounds);
3486 
3487   return true;
3488 }
3489 
emitGuardIndexIsValidUpdateOrAdd(ObjOperandId objId,Int32OperandId indexId)3490 bool CacheIRCompiler::emitGuardIndexIsValidUpdateOrAdd(ObjOperandId objId,
3491                                                        Int32OperandId indexId) {
3492   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3493   Register obj = allocator.useRegister(masm, objId);
3494   Register index = allocator.useRegister(masm, indexId);
3495   AutoScratchRegister scratch(allocator, masm);
3496   AutoSpectreBoundsScratchRegister spectreScratch(allocator, masm);
3497 
3498   FailurePath* failure;
3499   if (!addFailurePath(&failure)) {
3500     return false;
3501   }
3502 
3503   // Load obj->elements.
3504   masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
3505 
3506   Label success;
3507 
3508   // If length is writable, branch to &success.  All indices are writable.
3509   Address flags(scratch, ObjectElements::offsetOfFlags());
3510   masm.branchTest32(Assembler::Zero, flags,
3511                     Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
3512                     &success);
3513 
3514   // Otherwise, ensure index is in bounds.
3515   Address length(scratch, ObjectElements::offsetOfLength());
3516   masm.spectreBoundsCheck32(index, length, spectreScratch,
3517                             /* failure = */ failure->label());
3518   masm.bind(&success);
3519   return true;
3520 }
3521 
emitGuardTagNotEqual(ValueTagOperandId lhsId,ValueTagOperandId rhsId)3522 bool CacheIRCompiler::emitGuardTagNotEqual(ValueTagOperandId lhsId,
3523                                            ValueTagOperandId rhsId) {
3524   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3525   Register lhs = allocator.useRegister(masm, lhsId);
3526   Register rhs = allocator.useRegister(masm, rhsId);
3527 
3528   FailurePath* failure;
3529   if (!addFailurePath(&failure)) {
3530     return false;
3531   }
3532 
3533   Label done;
3534   masm.branch32(Assembler::Equal, lhs, rhs, failure->label());
3535 
3536   // If both lhs and rhs are numbers, can't use tag comparison to do inequality
3537   // comparison
3538   masm.branchTestNumber(Assembler::NotEqual, lhs, &done);
3539   masm.branchTestNumber(Assembler::NotEqual, rhs, &done);
3540   masm.jump(failure->label());
3541 
3542   masm.bind(&done);
3543   return true;
3544 }
3545 
emitGuardXrayExpandoShapeAndDefaultProto(ObjOperandId objId,uint32_t shapeWrapperOffset)3546 bool CacheIRCompiler::emitGuardXrayExpandoShapeAndDefaultProto(
3547     ObjOperandId objId, uint32_t shapeWrapperOffset) {
3548   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3549 
3550   Register obj = allocator.useRegister(masm, objId);
3551   StubFieldOffset shapeWrapper(shapeWrapperOffset, StubField::Type::JSObject);
3552 
3553   AutoScratchRegister scratch(allocator, masm);
3554   AutoScratchRegister scratch2(allocator, masm);
3555   AutoScratchRegister scratch3(allocator, masm);
3556 
3557   FailurePath* failure;
3558   if (!addFailurePath(&failure)) {
3559     return false;
3560   }
3561 
3562   masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
3563   Address holderAddress(scratch,
3564                         sizeof(Value) * GetXrayJitInfo()->xrayHolderSlot);
3565   Address expandoAddress(scratch, NativeObject::getFixedSlotOffset(
3566                                       GetXrayJitInfo()->holderExpandoSlot));
3567 
3568   masm.fallibleUnboxObject(holderAddress, scratch, failure->label());
3569   masm.fallibleUnboxObject(expandoAddress, scratch, failure->label());
3570 
3571   // Unwrap the expando before checking its shape.
3572   masm.loadPtr(Address(scratch, ProxyObject::offsetOfReservedSlots()), scratch);
3573   masm.unboxObject(
3574       Address(scratch, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
3575       scratch);
3576 
3577   emitLoadStubField(shapeWrapper, scratch2);
3578   LoadShapeWrapperContents(masm, scratch2, scratch2, failure->label());
3579   masm.branchTestObjShape(Assembler::NotEqual, scratch, scratch2, scratch3,
3580                           scratch, failure->label());
3581 
3582   // The reserved slots on the expando should all be in fixed slots.
3583   Address protoAddress(scratch, NativeObject::getFixedSlotOffset(
3584                                     GetXrayJitInfo()->expandoProtoSlot));
3585   masm.branchTestUndefined(Assembler::NotEqual, protoAddress, failure->label());
3586 
3587   return true;
3588 }
3589 
emitGuardXrayNoExpando(ObjOperandId objId)3590 bool CacheIRCompiler::emitGuardXrayNoExpando(ObjOperandId objId) {
3591   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3592 
3593   Register obj = allocator.useRegister(masm, objId);
3594   AutoScratchRegister scratch(allocator, masm);
3595 
3596   FailurePath* failure;
3597   if (!addFailurePath(&failure)) {
3598     return false;
3599   }
3600 
3601   masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
3602   Address holderAddress(scratch,
3603                         sizeof(Value) * GetXrayJitInfo()->xrayHolderSlot);
3604   Address expandoAddress(scratch, NativeObject::getFixedSlotOffset(
3605                                       GetXrayJitInfo()->holderExpandoSlot));
3606 
3607   Label done;
3608   masm.fallibleUnboxObject(holderAddress, scratch, &done);
3609   masm.branchTestObject(Assembler::Equal, expandoAddress, failure->label());
3610   masm.bind(&done);
3611 
3612   return true;
3613 }
3614 
emitGuardNoAllocationMetadataBuilder(uint32_t builderAddrOffset)3615 bool CacheIRCompiler::emitGuardNoAllocationMetadataBuilder(
3616     uint32_t builderAddrOffset) {
3617   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3618   AutoScratchRegister scratch(allocator, masm);
3619 
3620   FailurePath* failure;
3621   if (!addFailurePath(&failure)) {
3622     return false;
3623   }
3624 
3625   StubFieldOffset builderField(builderAddrOffset, StubField::Type::RawPointer);
3626   emitLoadStubField(builderField, scratch);
3627   masm.branchPtr(Assembler::NotEqual, Address(scratch, 0), ImmWord(0),
3628                  failure->label());
3629 
3630   return true;
3631 }
3632 
emitGuardFunctionHasJitEntry(ObjOperandId funId,bool constructing)3633 bool CacheIRCompiler::emitGuardFunctionHasJitEntry(ObjOperandId funId,
3634                                                    bool constructing) {
3635   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3636   Register fun = allocator.useRegister(masm, funId);
3637 
3638   FailurePath* failure;
3639   if (!addFailurePath(&failure)) {
3640     return false;
3641   }
3642 
3643   masm.branchIfFunctionHasNoJitEntry(fun, constructing, failure->label());
3644   return true;
3645 }
3646 
emitGuardFunctionHasNoJitEntry(ObjOperandId funId)3647 bool CacheIRCompiler::emitGuardFunctionHasNoJitEntry(ObjOperandId funId) {
3648   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3649   Register obj = allocator.useRegister(masm, funId);
3650   AutoScratchRegister scratch(allocator, masm);
3651 
3652   FailurePath* failure;
3653   if (!addFailurePath(&failure)) {
3654     return false;
3655   }
3656 
3657   masm.branchIfFunctionHasJitEntry(obj, /*isConstructing =*/false,
3658                                    failure->label());
3659   return true;
3660 }
3661 
emitGuardFunctionIsNonBuiltinCtor(ObjOperandId funId)3662 bool CacheIRCompiler::emitGuardFunctionIsNonBuiltinCtor(ObjOperandId funId) {
3663   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3664 
3665   Register fun = allocator.useRegister(masm, funId);
3666   AutoScratchRegister scratch(allocator, masm);
3667 
3668   FailurePath* failure;
3669   if (!addFailurePath(&failure)) {
3670     return false;
3671   }
3672 
3673   masm.branchIfNotFunctionIsNonBuiltinCtor(fun, scratch, failure->label());
3674   return true;
3675 }
3676 
emitGuardFunctionIsConstructor(ObjOperandId funId)3677 bool CacheIRCompiler::emitGuardFunctionIsConstructor(ObjOperandId funId) {
3678   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3679   Register funcReg = allocator.useRegister(masm, funId);
3680   AutoScratchRegister scratch(allocator, masm);
3681 
3682   FailurePath* failure;
3683   if (!addFailurePath(&failure)) {
3684     return false;
3685   }
3686 
3687   // Ensure obj is a constructor
3688   masm.branchTestFunctionFlags(funcReg, FunctionFlags::CONSTRUCTOR,
3689                                Assembler::Zero, failure->label());
3690   return true;
3691 }
3692 
emitGuardNotClassConstructor(ObjOperandId funId)3693 bool CacheIRCompiler::emitGuardNotClassConstructor(ObjOperandId funId) {
3694   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3695   Register fun = allocator.useRegister(masm, funId);
3696   AutoScratchRegister scratch(allocator, masm);
3697 
3698   FailurePath* failure;
3699   if (!addFailurePath(&failure)) {
3700     return false;
3701   }
3702 
3703   masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
3704                           fun, scratch, failure->label());
3705   return true;
3706 }
3707 
emitGuardArrayIsPacked(ObjOperandId arrayId)3708 bool CacheIRCompiler::emitGuardArrayIsPacked(ObjOperandId arrayId) {
3709   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3710   Register array = allocator.useRegister(masm, arrayId);
3711   AutoScratchRegister scratch(allocator, masm);
3712   AutoScratchRegister scratch2(allocator, masm);
3713 
3714   FailurePath* failure;
3715   if (!addFailurePath(&failure)) {
3716     return false;
3717   }
3718 
3719   masm.branchArrayIsNotPacked(array, scratch, scratch2, failure->label());
3720   return true;
3721 }
3722 
emitGuardArgumentsObjectFlags(ObjOperandId objId,uint8_t flags)3723 bool CacheIRCompiler::emitGuardArgumentsObjectFlags(ObjOperandId objId,
3724                                                     uint8_t flags) {
3725   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3726   Register obj = allocator.useRegister(masm, objId);
3727   AutoScratchRegister scratch(allocator, masm);
3728 
3729   FailurePath* failure;
3730   if (!addFailurePath(&failure)) {
3731     return false;
3732   }
3733 
3734   masm.branchTestArgumentsObjectFlags(obj, scratch, flags, Assembler::NonZero,
3735                                       failure->label());
3736   return true;
3737 }
3738 
emitLoadDenseElementHoleResult(ObjOperandId objId,Int32OperandId indexId)3739 bool CacheIRCompiler::emitLoadDenseElementHoleResult(ObjOperandId objId,
3740                                                      Int32OperandId indexId) {
3741   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3742   AutoOutputRegister output(*this);
3743   Register obj = allocator.useRegister(masm, objId);
3744   Register index = allocator.useRegister(masm, indexId);
3745   AutoScratchRegister scratch1(allocator, masm);
3746   AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
3747 
3748   FailurePath* failure;
3749   if (!addFailurePath(&failure)) {
3750     return false;
3751   }
3752 
3753   // Make sure the index is nonnegative.
3754   masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
3755 
3756   // Load obj->elements.
3757   masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1);
3758 
3759   // Guard on the initialized length.
3760   Label hole;
3761   Address initLength(scratch1, ObjectElements::offsetOfInitializedLength());
3762   masm.spectreBoundsCheck32(index, initLength, scratch2, &hole);
3763 
3764   // Load the value.
3765   Label done;
3766   masm.loadValue(BaseObjectElementIndex(scratch1, index), output.valueReg());
3767   masm.branchTestMagic(Assembler::NotEqual, output.valueReg(), &done);
3768 
3769   // Load undefined for the hole.
3770   masm.bind(&hole);
3771   masm.moveValue(UndefinedValue(), output.valueReg());
3772 
3773   masm.bind(&done);
3774   return true;
3775 }
3776 
emitLoadTypedArrayElementExistsResult(ObjOperandId objId,IntPtrOperandId indexId)3777 bool CacheIRCompiler::emitLoadTypedArrayElementExistsResult(
3778     ObjOperandId objId, IntPtrOperandId indexId) {
3779   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3780   AutoOutputRegister output(*this);
3781   Register obj = allocator.useRegister(masm, objId);
3782   Register index = allocator.useRegister(masm, indexId);
3783   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3784 
3785   Label outOfBounds, done;
3786 
3787   // Bounds check.
3788   masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
3789   masm.branchPtr(Assembler::BelowOrEqual, scratch, index, &outOfBounds);
3790   EmitStoreBoolean(masm, true, output);
3791   masm.jump(&done);
3792 
3793   masm.bind(&outOfBounds);
3794   EmitStoreBoolean(masm, false, output);
3795 
3796   masm.bind(&done);
3797   return true;
3798 }
3799 
emitLoadDenseElementExistsResult(ObjOperandId objId,Int32OperandId indexId)3800 bool CacheIRCompiler::emitLoadDenseElementExistsResult(ObjOperandId objId,
3801                                                        Int32OperandId indexId) {
3802   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3803   AutoOutputRegister output(*this);
3804   Register obj = allocator.useRegister(masm, objId);
3805   Register index = allocator.useRegister(masm, indexId);
3806   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3807 
3808   FailurePath* failure;
3809   if (!addFailurePath(&failure)) {
3810     return false;
3811   }
3812 
3813   // Load obj->elements.
3814   masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
3815 
3816   // Bounds check. Unsigned compare sends negative indices to next IC.
3817   Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
3818   masm.branch32(Assembler::BelowOrEqual, initLength, index, failure->label());
3819 
3820   // Hole check.
3821   BaseObjectElementIndex element(scratch, index);
3822   masm.branchTestMagic(Assembler::Equal, element, failure->label());
3823 
3824   EmitStoreBoolean(masm, true, output);
3825   return true;
3826 }
3827 
emitLoadDenseElementHoleExistsResult(ObjOperandId objId,Int32OperandId indexId)3828 bool CacheIRCompiler::emitLoadDenseElementHoleExistsResult(
3829     ObjOperandId objId, Int32OperandId indexId) {
3830   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3831   AutoOutputRegister output(*this);
3832   Register obj = allocator.useRegister(masm, objId);
3833   Register index = allocator.useRegister(masm, indexId);
3834   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3835 
3836   FailurePath* failure;
3837   if (!addFailurePath(&failure)) {
3838     return false;
3839   }
3840 
3841   // Make sure the index is nonnegative.
3842   masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
3843 
3844   // Load obj->elements.
3845   masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
3846 
3847   // Guard on the initialized length.
3848   Label hole;
3849   Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
3850   masm.branch32(Assembler::BelowOrEqual, initLength, index, &hole);
3851 
3852   // Load value and replace with true.
3853   Label done;
3854   BaseObjectElementIndex element(scratch, index);
3855   masm.branchTestMagic(Assembler::Equal, element, &hole);
3856   EmitStoreBoolean(masm, true, output);
3857   masm.jump(&done);
3858 
3859   // Load false for the hole.
3860   masm.bind(&hole);
3861   EmitStoreBoolean(masm, false, output);
3862 
3863   masm.bind(&done);
3864   return true;
3865 }
3866 
emitPackedArrayPopResult(ObjOperandId arrayId)3867 bool CacheIRCompiler::emitPackedArrayPopResult(ObjOperandId arrayId) {
3868   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3869 
3870   AutoOutputRegister output(*this);
3871   Register array = allocator.useRegister(masm, arrayId);
3872   AutoScratchRegister scratch1(allocator, masm);
3873   AutoScratchRegister scratch2(allocator, masm);
3874 
3875   FailurePath* failure;
3876   if (!addFailurePath(&failure)) {
3877     return false;
3878   }
3879 
3880   masm.packedArrayPop(array, output.valueReg(), scratch1, scratch2,
3881                       failure->label());
3882   return true;
3883 }
3884 
emitPackedArrayShiftResult(ObjOperandId arrayId)3885 bool CacheIRCompiler::emitPackedArrayShiftResult(ObjOperandId arrayId) {
3886   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3887 
3888   AutoOutputRegister output(*this);
3889   Register array = allocator.useRegister(masm, arrayId);
3890   AutoScratchRegister scratch1(allocator, masm);
3891   AutoScratchRegister scratch2(allocator, masm);
3892 
3893   FailurePath* failure;
3894   if (!addFailurePath(&failure)) {
3895     return false;
3896   }
3897 
3898   LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
3899                                liveVolatileFloatRegs());
3900   masm.packedArrayShift(array, output.valueReg(), scratch1, scratch2,
3901                         volatileRegs, failure->label());
3902   return true;
3903 }
3904 
emitIsObjectResult(ValOperandId inputId)3905 bool CacheIRCompiler::emitIsObjectResult(ValOperandId inputId) {
3906   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3907 
3908   AutoOutputRegister output(*this);
3909   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3910 
3911   ValueOperand val = allocator.useValueRegister(masm, inputId);
3912 
3913   masm.testObjectSet(Assembler::Equal, val, scratch);
3914 
3915   masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
3916   return true;
3917 }
3918 
emitIsPackedArrayResult(ObjOperandId objId)3919 bool CacheIRCompiler::emitIsPackedArrayResult(ObjOperandId objId) {
3920   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3921 
3922   AutoOutputRegister output(*this);
3923   Register obj = allocator.useRegister(masm, objId);
3924   AutoScratchRegister scratch(allocator, masm);
3925 
3926   Register outputScratch = output.valueReg().scratchReg();
3927   masm.setIsPackedArray(obj, outputScratch, scratch);
3928   masm.tagValue(JSVAL_TYPE_BOOLEAN, outputScratch, output.valueReg());
3929   return true;
3930 }
3931 
emitIsCallableResult(ValOperandId inputId)3932 bool CacheIRCompiler::emitIsCallableResult(ValOperandId inputId) {
3933   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3934 
3935   AutoOutputRegister output(*this);
3936   AutoScratchRegister scratch1(allocator, masm);
3937   AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
3938 
3939   ValueOperand val = allocator.useValueRegister(masm, inputId);
3940 
3941   Label isObject, done;
3942   masm.branchTestObject(Assembler::Equal, val, &isObject);
3943   // Primitives are never callable.
3944   masm.move32(Imm32(0), scratch2);
3945   masm.jump(&done);
3946 
3947   masm.bind(&isObject);
3948   masm.unboxObject(val, scratch1);
3949 
3950   Label isProxy;
3951   masm.isCallable(scratch1, scratch2, &isProxy);
3952   masm.jump(&done);
3953 
3954   masm.bind(&isProxy);
3955   {
3956     LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
3957                                  liveVolatileFloatRegs());
3958     masm.PushRegsInMask(volatileRegs);
3959 
3960     using Fn = bool (*)(JSObject * obj);
3961     masm.setupUnalignedABICall(scratch2);
3962     masm.passABIArg(scratch1);
3963     masm.callWithABI<Fn, ObjectIsCallable>();
3964     masm.storeCallBoolResult(scratch2);
3965 
3966     LiveRegisterSet ignore;
3967     ignore.add(scratch2);
3968     masm.PopRegsInMaskIgnore(volatileRegs, ignore);
3969   }
3970 
3971   masm.bind(&done);
3972   masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
3973   return true;
3974 }
3975 
emitIsConstructorResult(ObjOperandId objId)3976 bool CacheIRCompiler::emitIsConstructorResult(ObjOperandId objId) {
3977   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
3978 
3979   AutoOutputRegister output(*this);
3980   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
3981 
3982   Register obj = allocator.useRegister(masm, objId);
3983 
3984   Label isProxy, done;
3985   masm.isConstructor(obj, scratch, &isProxy);
3986   masm.jump(&done);
3987 
3988   masm.bind(&isProxy);
3989   {
3990     LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
3991                                  liveVolatileFloatRegs());
3992     masm.PushRegsInMask(volatileRegs);
3993 
3994     using Fn = bool (*)(JSObject * obj);
3995     masm.setupUnalignedABICall(scratch);
3996     masm.passABIArg(obj);
3997     masm.callWithABI<Fn, ObjectIsConstructor>();
3998     masm.storeCallBoolResult(scratch);
3999 
4000     LiveRegisterSet ignore;
4001     ignore.add(scratch);
4002     masm.PopRegsInMaskIgnore(volatileRegs, ignore);
4003   }
4004 
4005   masm.bind(&done);
4006   masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
4007   return true;
4008 }
4009 
emitIsCrossRealmArrayConstructorResult(ObjOperandId objId)4010 bool CacheIRCompiler::emitIsCrossRealmArrayConstructorResult(
4011     ObjOperandId objId) {
4012   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4013 
4014   AutoOutputRegister output(*this);
4015   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4016   Register obj = allocator.useRegister(masm, objId);
4017 
4018   masm.setIsCrossRealmArrayConstructor(obj, scratch);
4019   masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
4020   return true;
4021 }
4022 
emitArrayBufferViewByteOffsetInt32Result(ObjOperandId objId)4023 bool CacheIRCompiler::emitArrayBufferViewByteOffsetInt32Result(
4024     ObjOperandId objId) {
4025   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4026 
4027   AutoOutputRegister output(*this);
4028   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4029   Register obj = allocator.useRegister(masm, objId);
4030 
4031   FailurePath* failure;
4032   if (!addFailurePath(&failure)) {
4033     return false;
4034   }
4035 
4036   masm.loadArrayBufferViewByteOffsetIntPtr(obj, scratch);
4037   masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
4038   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
4039   return true;
4040 }
4041 
emitArrayBufferViewByteOffsetDoubleResult(ObjOperandId objId)4042 bool CacheIRCompiler::emitArrayBufferViewByteOffsetDoubleResult(
4043     ObjOperandId objId) {
4044   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4045 
4046   AutoOutputRegister output(*this);
4047   Register obj = allocator.useRegister(masm, objId);
4048   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4049 
4050   ScratchDoubleScope fpscratch(masm);
4051   masm.loadArrayBufferViewByteOffsetIntPtr(obj, scratch);
4052   masm.convertIntPtrToDouble(scratch, fpscratch);
4053   masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
4054   return true;
4055 }
4056 
emitTypedArrayByteLengthInt32Result(ObjOperandId objId)4057 bool CacheIRCompiler::emitTypedArrayByteLengthInt32Result(ObjOperandId objId) {
4058   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4059 
4060   AutoOutputRegister output(*this);
4061   AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
4062   AutoScratchRegister scratch2(allocator, masm);
4063   Register obj = allocator.useRegister(masm, objId);
4064 
4065   FailurePath* failure;
4066   if (!addFailurePath(&failure)) {
4067     return false;
4068   }
4069 
4070   masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
4071   masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
4072   masm.typedArrayElementSize(obj, scratch2);
4073 
4074   masm.branchMul32(Assembler::Overflow, scratch2.get(), scratch1,
4075                    failure->label());
4076 
4077   masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
4078   return true;
4079 }
4080 
emitTypedArrayByteLengthDoubleResult(ObjOperandId objId)4081 bool CacheIRCompiler::emitTypedArrayByteLengthDoubleResult(ObjOperandId objId) {
4082   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4083 
4084   AutoOutputRegister output(*this);
4085   AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
4086   AutoScratchRegister scratch2(allocator, masm);
4087   Register obj = allocator.useRegister(masm, objId);
4088 
4089   masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
4090   masm.typedArrayElementSize(obj, scratch2);
4091   masm.mulPtr(scratch2, scratch1);
4092 
4093   ScratchDoubleScope fpscratch(masm);
4094   masm.convertIntPtrToDouble(scratch1, fpscratch);
4095   masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
4096   return true;
4097 }
4098 
emitTypedArrayElementSizeResult(ObjOperandId objId)4099 bool CacheIRCompiler::emitTypedArrayElementSizeResult(ObjOperandId objId) {
4100   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4101 
4102   AutoOutputRegister output(*this);
4103   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4104   Register obj = allocator.useRegister(masm, objId);
4105 
4106   masm.typedArrayElementSize(obj, scratch);
4107   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
4108   return true;
4109 }
4110 
emitGuardHasAttachedArrayBuffer(ObjOperandId objId)4111 bool CacheIRCompiler::emitGuardHasAttachedArrayBuffer(ObjOperandId objId) {
4112   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4113 
4114   AutoScratchRegister scratch(allocator, masm);
4115   Register obj = allocator.useRegister(masm, objId);
4116 
4117   FailurePath* failure;
4118   if (!addFailurePath(&failure)) {
4119     return false;
4120   }
4121 
4122   masm.branchIfHasDetachedArrayBuffer(obj, scratch, failure->label());
4123   return true;
4124 }
4125 
emitIsTypedArrayConstructorResult(ObjOperandId objId)4126 bool CacheIRCompiler::emitIsTypedArrayConstructorResult(ObjOperandId objId) {
4127   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4128 
4129   AutoOutputRegister output(*this);
4130   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4131   Register obj = allocator.useRegister(masm, objId);
4132 
4133   masm.setIsDefinitelyTypedArrayConstructor(obj, scratch);
4134   masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
4135   return true;
4136 }
4137 
emitGetNextMapSetEntryForIteratorResult(ObjOperandId iterId,ObjOperandId resultArrId,bool isMap)4138 bool CacheIRCompiler::emitGetNextMapSetEntryForIteratorResult(
4139     ObjOperandId iterId, ObjOperandId resultArrId, bool isMap) {
4140   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4141 
4142   AutoOutputRegister output(*this);
4143   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4144   Register iter = allocator.useRegister(masm, iterId);
4145   Register resultArr = allocator.useRegister(masm, resultArrId);
4146 
4147   LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
4148   save.takeUnchecked(output.valueReg());
4149   save.takeUnchecked(scratch);
4150   masm.PushRegsInMask(save);
4151 
4152   masm.setupUnalignedABICall(scratch);
4153   masm.passABIArg(iter);
4154   masm.passABIArg(resultArr);
4155   if (isMap) {
4156     using Fn = bool (*)(MapIteratorObject * iter, ArrayObject * resultPairObj);
4157     masm.callWithABI<Fn, MapIteratorObject::next>();
4158   } else {
4159     using Fn = bool (*)(SetIteratorObject * iter, ArrayObject * resultObj);
4160     masm.callWithABI<Fn, SetIteratorObject::next>();
4161   }
4162   masm.storeCallBoolResult(scratch);
4163 
4164   masm.PopRegsInMask(save);
4165 
4166   masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
4167   return true;
4168 }
4169 
emitFinishBoundFunctionInitResult(ObjOperandId boundId,ObjOperandId targetId,Int32OperandId argCountId)4170 bool CacheIRCompiler::emitFinishBoundFunctionInitResult(
4171     ObjOperandId boundId, ObjOperandId targetId, Int32OperandId argCountId) {
4172   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4173 
4174   AutoCallVM callvm(masm, this, allocator);
4175 
4176   Register bound = allocator.useRegister(masm, boundId);
4177   Register target = allocator.useRegister(masm, targetId);
4178   Register argCount = allocator.useRegister(masm, argCountId);
4179 
4180   callvm.prepare();
4181 
4182   masm.Push(argCount);
4183   masm.Push(target);
4184   masm.Push(bound);
4185 
4186   using Fn = bool (*)(JSContext * cx, HandleFunction bound, HandleObject target,
4187                       int32_t argCount);
4188   callvm.callNoResult<Fn, JSFunction::finishBoundFunctionInit>();
4189 
4190   masm.moveValue(UndefinedValue(), callvm.outputValueReg());
4191   return true;
4192 }
4193 
emitNewArrayIteratorResult(uint32_t templateObjectOffset)4194 bool CacheIRCompiler::emitNewArrayIteratorResult(
4195     uint32_t templateObjectOffset) {
4196   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4197 
4198   AutoCallVM callvm(masm, this, allocator);
4199 
4200   callvm.prepare();
4201 
4202   using Fn = ArrayIteratorObject* (*)(JSContext*);
4203   callvm.call<Fn, NewArrayIterator>();
4204   return true;
4205 }
4206 
emitNewStringIteratorResult(uint32_t templateObjectOffset)4207 bool CacheIRCompiler::emitNewStringIteratorResult(
4208     uint32_t templateObjectOffset) {
4209   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4210 
4211   AutoCallVM callvm(masm, this, allocator);
4212 
4213   callvm.prepare();
4214 
4215   using Fn = StringIteratorObject* (*)(JSContext*);
4216   callvm.call<Fn, NewStringIterator>();
4217   return true;
4218 }
4219 
emitNewRegExpStringIteratorResult(uint32_t templateObjectOffset)4220 bool CacheIRCompiler::emitNewRegExpStringIteratorResult(
4221     uint32_t templateObjectOffset) {
4222   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4223 
4224   AutoCallVM callvm(masm, this, allocator);
4225 
4226   callvm.prepare();
4227 
4228   using Fn = RegExpStringIteratorObject* (*)(JSContext*);
4229   callvm.call<Fn, NewRegExpStringIterator>();
4230   return true;
4231 }
4232 
emitObjectCreateResult(uint32_t templateObjectOffset)4233 bool CacheIRCompiler::emitObjectCreateResult(uint32_t templateObjectOffset) {
4234   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4235 
4236   AutoCallVM callvm(masm, this, allocator);
4237   AutoScratchRegister scratch(allocator, masm);
4238 
4239   StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
4240   emitLoadStubField(objectField, scratch);
4241 
4242   callvm.prepare();
4243   masm.Push(scratch);
4244 
4245   using Fn = PlainObject* (*)(JSContext*, HandlePlainObject);
4246   callvm.call<Fn, ObjectCreateWithTemplate>();
4247   return true;
4248 }
4249 
emitNewArrayFromLengthResult(uint32_t templateObjectOffset,Int32OperandId lengthId)4250 bool CacheIRCompiler::emitNewArrayFromLengthResult(
4251     uint32_t templateObjectOffset, Int32OperandId lengthId) {
4252   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4253 
4254   AutoCallVM callvm(masm, this, allocator);
4255   AutoScratchRegister scratch(allocator, masm);
4256   Register length = allocator.useRegister(masm, lengthId);
4257 
4258   StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
4259   emitLoadStubField(objectField, scratch);
4260 
4261   callvm.prepare();
4262   masm.Push(length);
4263   masm.Push(scratch);
4264 
4265   using Fn = ArrayObject* (*)(JSContext*, HandleArrayObject, int32_t length);
4266   callvm.call<Fn, ArrayConstructorOneArg>();
4267   return true;
4268 }
4269 
emitNewTypedArrayFromLengthResult(uint32_t templateObjectOffset,Int32OperandId lengthId)4270 bool CacheIRCompiler::emitNewTypedArrayFromLengthResult(
4271     uint32_t templateObjectOffset, Int32OperandId lengthId) {
4272   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4273 
4274   AutoCallVM callvm(masm, this, allocator);
4275   AutoScratchRegister scratch(allocator, masm);
4276   Register length = allocator.useRegister(masm, lengthId);
4277 
4278   StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
4279   emitLoadStubField(objectField, scratch);
4280 
4281   callvm.prepare();
4282   masm.Push(length);
4283   masm.Push(scratch);
4284 
4285   using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
4286   callvm.call<Fn, NewTypedArrayWithTemplateAndLength>();
4287   return true;
4288 }
4289 
emitNewTypedArrayFromArrayBufferResult(uint32_t templateObjectOffset,ObjOperandId bufferId,ValOperandId byteOffsetId,ValOperandId lengthId)4290 bool CacheIRCompiler::emitNewTypedArrayFromArrayBufferResult(
4291     uint32_t templateObjectOffset, ObjOperandId bufferId,
4292     ValOperandId byteOffsetId, ValOperandId lengthId) {
4293   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4294 
4295 #ifdef JS_CODEGEN_X86
4296   MOZ_CRASH("Instruction not supported on 32-bit x86, not enough registers");
4297 #endif
4298 
4299   AutoCallVM callvm(masm, this, allocator);
4300   AutoScratchRegister scratch(allocator, masm);
4301   Register buffer = allocator.useRegister(masm, bufferId);
4302   ValueOperand byteOffset = allocator.useValueRegister(masm, byteOffsetId);
4303   ValueOperand length = allocator.useValueRegister(masm, lengthId);
4304 
4305   StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
4306   emitLoadStubField(objectField, scratch);
4307 
4308   callvm.prepare();
4309   masm.Push(length);
4310   masm.Push(byteOffset);
4311   masm.Push(buffer);
4312   masm.Push(scratch);
4313 
4314   using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject,
4315                                    HandleValue, HandleValue);
4316   callvm.call<Fn, NewTypedArrayWithTemplateAndBuffer>();
4317   return true;
4318 }
4319 
emitNewTypedArrayFromArrayResult(uint32_t templateObjectOffset,ObjOperandId arrayId)4320 bool CacheIRCompiler::emitNewTypedArrayFromArrayResult(
4321     uint32_t templateObjectOffset, ObjOperandId arrayId) {
4322   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4323 
4324   AutoCallVM callvm(masm, this, allocator);
4325   AutoScratchRegister scratch(allocator, masm);
4326   Register array = allocator.useRegister(masm, arrayId);
4327 
4328   StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
4329   emitLoadStubField(objectField, scratch);
4330 
4331   callvm.prepare();
4332   masm.Push(array);
4333   masm.Push(scratch);
4334 
4335   using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject);
4336   callvm.call<Fn, NewTypedArrayWithTemplateAndArray>();
4337   return true;
4338 }
4339 
emitMathAbsInt32Result(Int32OperandId inputId)4340 bool CacheIRCompiler::emitMathAbsInt32Result(Int32OperandId inputId) {
4341   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4342 
4343   AutoOutputRegister output(*this);
4344   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4345 
4346   Register input = allocator.useRegister(masm, inputId);
4347 
4348   FailurePath* failure;
4349   if (!addFailurePath(&failure)) {
4350     return false;
4351   }
4352 
4353   masm.mov(input, scratch);
4354   // Don't negate already positive values.
4355   Label positive;
4356   masm.branchTest32(Assembler::NotSigned, scratch, scratch, &positive);
4357   // neg32 might overflow for INT_MIN.
4358   masm.branchNeg32(Assembler::Overflow, scratch, failure->label());
4359   masm.bind(&positive);
4360 
4361   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
4362   return true;
4363 }
4364 
emitMathAbsNumberResult(NumberOperandId inputId)4365 bool CacheIRCompiler::emitMathAbsNumberResult(NumberOperandId inputId) {
4366   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4367 
4368   AutoOutputRegister output(*this);
4369   AutoAvailableFloatRegister scratch(*this, FloatReg0);
4370 
4371   allocator.ensureDoubleRegister(masm, inputId, scratch);
4372 
4373   masm.absDouble(scratch, scratch);
4374   masm.boxDouble(scratch, output.valueReg(), scratch);
4375   return true;
4376 }
4377 
emitMathClz32Result(Int32OperandId inputId)4378 bool CacheIRCompiler::emitMathClz32Result(Int32OperandId inputId) {
4379   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4380 
4381   AutoOutputRegister output(*this);
4382   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4383   Register input = allocator.useRegister(masm, inputId);
4384 
4385   masm.clz32(input, scratch, /* knownNotZero = */ false);
4386   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
4387   return true;
4388 }
4389 
emitMathSignInt32Result(Int32OperandId inputId)4390 bool CacheIRCompiler::emitMathSignInt32Result(Int32OperandId inputId) {
4391   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4392 
4393   AutoOutputRegister output(*this);
4394   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4395   Register input = allocator.useRegister(masm, inputId);
4396 
4397   masm.signInt32(input, scratch);
4398   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
4399   return true;
4400 }
4401 
emitMathSignNumberResult(NumberOperandId inputId)4402 bool CacheIRCompiler::emitMathSignNumberResult(NumberOperandId inputId) {
4403   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4404 
4405   AutoOutputRegister output(*this);
4406   AutoAvailableFloatRegister floatScratch1(*this, FloatReg0);
4407   AutoAvailableFloatRegister floatScratch2(*this, FloatReg1);
4408 
4409   allocator.ensureDoubleRegister(masm, inputId, floatScratch1);
4410 
4411   masm.signDouble(floatScratch1, floatScratch2);
4412   masm.boxDouble(floatScratch2, output.valueReg(), floatScratch2);
4413   return true;
4414 }
4415 
emitMathSignNumberToInt32Result(NumberOperandId inputId)4416 bool CacheIRCompiler::emitMathSignNumberToInt32Result(NumberOperandId inputId) {
4417   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4418 
4419   AutoOutputRegister output(*this);
4420   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4421   AutoAvailableFloatRegister floatScratch1(*this, FloatReg0);
4422   AutoAvailableFloatRegister floatScratch2(*this, FloatReg1);
4423 
4424   FailurePath* failure;
4425   if (!addFailurePath(&failure)) {
4426     return false;
4427   }
4428 
4429   allocator.ensureDoubleRegister(masm, inputId, floatScratch1);
4430 
4431   masm.signDoubleToInt32(floatScratch1, scratch, floatScratch2,
4432                          failure->label());
4433   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
4434   return true;
4435 }
4436 
emitMathImulResult(Int32OperandId lhsId,Int32OperandId rhsId)4437 bool CacheIRCompiler::emitMathImulResult(Int32OperandId lhsId,
4438                                          Int32OperandId rhsId) {
4439   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4440 
4441   AutoOutputRegister output(*this);
4442   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4443   Register lhs = allocator.useRegister(masm, lhsId);
4444   Register rhs = allocator.useRegister(masm, rhsId);
4445 
4446   masm.mov(lhs, scratch);
4447   masm.mul32(rhs, scratch);
4448   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
4449   return true;
4450 }
4451 
emitMathSqrtNumberResult(NumberOperandId inputId)4452 bool CacheIRCompiler::emitMathSqrtNumberResult(NumberOperandId inputId) {
4453   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4454 
4455   AutoOutputRegister output(*this);
4456   AutoAvailableFloatRegister scratch(*this, FloatReg0);
4457 
4458   allocator.ensureDoubleRegister(masm, inputId, scratch);
4459 
4460   masm.sqrtDouble(scratch, scratch);
4461   masm.boxDouble(scratch, output.valueReg(), scratch);
4462   return true;
4463 }
4464 
emitMathFloorNumberResult(NumberOperandId inputId)4465 bool CacheIRCompiler::emitMathFloorNumberResult(NumberOperandId inputId) {
4466   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4467 
4468   AutoOutputRegister output(*this);
4469   AutoAvailableFloatRegister scratch(*this, FloatReg0);
4470 
4471   allocator.ensureDoubleRegister(masm, inputId, scratch);
4472 
4473   if (Assembler::HasRoundInstruction(RoundingMode::Down)) {
4474     masm.nearbyIntDouble(RoundingMode::Down, scratch, scratch);
4475     masm.boxDouble(scratch, output.valueReg(), scratch);
4476     return true;
4477   }
4478 
4479   return emitMathFunctionNumberResultShared(UnaryMathFunction::Floor, scratch,
4480                                             output.valueReg());
4481 }
4482 
emitMathCeilNumberResult(NumberOperandId inputId)4483 bool CacheIRCompiler::emitMathCeilNumberResult(NumberOperandId inputId) {
4484   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4485 
4486   AutoOutputRegister output(*this);
4487   AutoAvailableFloatRegister scratch(*this, FloatReg0);
4488 
4489   allocator.ensureDoubleRegister(masm, inputId, scratch);
4490 
4491   if (Assembler::HasRoundInstruction(RoundingMode::Up)) {
4492     masm.nearbyIntDouble(RoundingMode::Up, scratch, scratch);
4493     masm.boxDouble(scratch, output.valueReg(), scratch);
4494     return true;
4495   }
4496 
4497   return emitMathFunctionNumberResultShared(UnaryMathFunction::Ceil, scratch,
4498                                             output.valueReg());
4499 }
4500 
emitMathTruncNumberResult(NumberOperandId inputId)4501 bool CacheIRCompiler::emitMathTruncNumberResult(NumberOperandId inputId) {
4502   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4503 
4504   AutoOutputRegister output(*this);
4505   AutoAvailableFloatRegister scratch(*this, FloatReg0);
4506 
4507   allocator.ensureDoubleRegister(masm, inputId, scratch);
4508 
4509   if (Assembler::HasRoundInstruction(RoundingMode::TowardsZero)) {
4510     masm.nearbyIntDouble(RoundingMode::TowardsZero, scratch, scratch);
4511     masm.boxDouble(scratch, output.valueReg(), scratch);
4512     return true;
4513   }
4514 
4515   return emitMathFunctionNumberResultShared(UnaryMathFunction::Trunc, scratch,
4516                                             output.valueReg());
4517 }
4518 
emitMathFRoundNumberResult(NumberOperandId inputId)4519 bool CacheIRCompiler::emitMathFRoundNumberResult(NumberOperandId inputId) {
4520   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4521 
4522   AutoOutputRegister output(*this);
4523   AutoAvailableFloatRegister scratch(*this, FloatReg0);
4524   FloatRegister scratchFloat32 = scratch.get().asSingle();
4525 
4526   allocator.ensureDoubleRegister(masm, inputId, scratch);
4527 
4528   masm.convertDoubleToFloat32(scratch, scratchFloat32);
4529   masm.convertFloat32ToDouble(scratchFloat32, scratch);
4530 
4531   masm.boxDouble(scratch, output.valueReg(), scratch);
4532   return true;
4533 }
4534 
emitMathHypot2NumberResult(NumberOperandId first,NumberOperandId second)4535 bool CacheIRCompiler::emitMathHypot2NumberResult(NumberOperandId first,
4536                                                  NumberOperandId second) {
4537   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4538   AutoOutputRegister output(*this);
4539   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4540 
4541   AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
4542   AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
4543 
4544   allocator.ensureDoubleRegister(masm, first, floatScratch0);
4545   allocator.ensureDoubleRegister(masm, second, floatScratch1);
4546 
4547   LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
4548   masm.PushRegsInMask(save);
4549 
4550   using Fn = double (*)(double x, double y);
4551   masm.setupUnalignedABICall(scratch);
4552   masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
4553   masm.passABIArg(floatScratch1, MoveOp::DOUBLE);
4554 
4555   masm.callWithABI<Fn, ecmaHypot>(MoveOp::DOUBLE);
4556   masm.storeCallFloatResult(floatScratch0);
4557 
4558   LiveRegisterSet ignore;
4559   ignore.add(floatScratch0);
4560   masm.PopRegsInMaskIgnore(save, ignore);
4561 
4562   masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
4563   return true;
4564 }
4565 
emitMathHypot3NumberResult(NumberOperandId first,NumberOperandId second,NumberOperandId third)4566 bool CacheIRCompiler::emitMathHypot3NumberResult(NumberOperandId first,
4567                                                  NumberOperandId second,
4568                                                  NumberOperandId third) {
4569   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4570   AutoOutputRegister output(*this);
4571   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4572 
4573   AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
4574   AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
4575   AutoAvailableFloatRegister floatScratch2(*this, FloatReg2);
4576 
4577   allocator.ensureDoubleRegister(masm, first, floatScratch0);
4578   allocator.ensureDoubleRegister(masm, second, floatScratch1);
4579   allocator.ensureDoubleRegister(masm, third, floatScratch2);
4580 
4581   LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
4582   masm.PushRegsInMask(save);
4583 
4584   using Fn = double (*)(double x, double y, double z);
4585   masm.setupUnalignedABICall(scratch);
4586   masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
4587   masm.passABIArg(floatScratch1, MoveOp::DOUBLE);
4588   masm.passABIArg(floatScratch2, MoveOp::DOUBLE);
4589 
4590   masm.callWithABI<Fn, hypot3>(MoveOp::DOUBLE);
4591   masm.storeCallFloatResult(floatScratch0);
4592 
4593   LiveRegisterSet ignore;
4594   ignore.add(floatScratch0);
4595   masm.PopRegsInMaskIgnore(save, ignore);
4596 
4597   masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
4598   return true;
4599 }
4600 
emitMathHypot4NumberResult(NumberOperandId first,NumberOperandId second,NumberOperandId third,NumberOperandId fourth)4601 bool CacheIRCompiler::emitMathHypot4NumberResult(NumberOperandId first,
4602                                                  NumberOperandId second,
4603                                                  NumberOperandId third,
4604                                                  NumberOperandId fourth) {
4605   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4606   AutoOutputRegister output(*this);
4607   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4608 
4609   AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
4610   AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
4611   AutoAvailableFloatRegister floatScratch2(*this, FloatReg2);
4612   AutoAvailableFloatRegister floatScratch3(*this, FloatReg3);
4613 
4614   allocator.ensureDoubleRegister(masm, first, floatScratch0);
4615   allocator.ensureDoubleRegister(masm, second, floatScratch1);
4616   allocator.ensureDoubleRegister(masm, third, floatScratch2);
4617   allocator.ensureDoubleRegister(masm, fourth, floatScratch3);
4618 
4619   LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
4620   masm.PushRegsInMask(save);
4621 
4622   using Fn = double (*)(double x, double y, double z, double w);
4623   masm.setupUnalignedABICall(scratch);
4624   masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
4625   masm.passABIArg(floatScratch1, MoveOp::DOUBLE);
4626   masm.passABIArg(floatScratch2, MoveOp::DOUBLE);
4627   masm.passABIArg(floatScratch3, MoveOp::DOUBLE);
4628 
4629   masm.callWithABI<Fn, hypot4>(MoveOp::DOUBLE);
4630   masm.storeCallFloatResult(floatScratch0);
4631 
4632   LiveRegisterSet ignore;
4633   ignore.add(floatScratch0);
4634   masm.PopRegsInMaskIgnore(save, ignore);
4635 
4636   masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
4637   return true;
4638 }
4639 
emitMathAtan2NumberResult(NumberOperandId yId,NumberOperandId xId)4640 bool CacheIRCompiler::emitMathAtan2NumberResult(NumberOperandId yId,
4641                                                 NumberOperandId xId) {
4642   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4643   AutoOutputRegister output(*this);
4644   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4645 
4646   AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
4647   AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
4648 
4649   allocator.ensureDoubleRegister(masm, yId, floatScratch0);
4650   allocator.ensureDoubleRegister(masm, xId, floatScratch1);
4651 
4652   LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
4653   masm.PushRegsInMask(save);
4654 
4655   using Fn = double (*)(double x, double y);
4656   masm.setupUnalignedABICall(scratch);
4657   masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
4658   masm.passABIArg(floatScratch1, MoveOp::DOUBLE);
4659   masm.callWithABI<Fn, js::ecmaAtan2>(MoveOp::DOUBLE);
4660   masm.storeCallFloatResult(floatScratch0);
4661 
4662   LiveRegisterSet ignore;
4663   ignore.add(floatScratch0);
4664   masm.PopRegsInMaskIgnore(save, ignore);
4665 
4666   masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
4667 
4668   return true;
4669 }
4670 
emitMathFloorToInt32Result(NumberOperandId inputId)4671 bool CacheIRCompiler::emitMathFloorToInt32Result(NumberOperandId inputId) {
4672   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4673 
4674   AutoOutputRegister output(*this);
4675   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4676 
4677   AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
4678 
4679   FailurePath* failure;
4680   if (!addFailurePath(&failure)) {
4681     return false;
4682   }
4683 
4684   allocator.ensureDoubleRegister(masm, inputId, scratchFloat);
4685 
4686   masm.floorDoubleToInt32(scratchFloat, scratch, failure->label());
4687 
4688   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
4689   return true;
4690 }
4691 
emitMathCeilToInt32Result(NumberOperandId inputId)4692 bool CacheIRCompiler::emitMathCeilToInt32Result(NumberOperandId inputId) {
4693   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4694 
4695   AutoOutputRegister output(*this);
4696   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4697 
4698   AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
4699 
4700   FailurePath* failure;
4701   if (!addFailurePath(&failure)) {
4702     return false;
4703   }
4704 
4705   allocator.ensureDoubleRegister(masm, inputId, scratchFloat);
4706 
4707   masm.ceilDoubleToInt32(scratchFloat, scratch, failure->label());
4708 
4709   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
4710   return true;
4711 }
4712 
emitMathTruncToInt32Result(NumberOperandId inputId)4713 bool CacheIRCompiler::emitMathTruncToInt32Result(NumberOperandId inputId) {
4714   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4715 
4716   AutoOutputRegister output(*this);
4717   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4718 
4719   AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
4720 
4721   FailurePath* failure;
4722   if (!addFailurePath(&failure)) {
4723     return false;
4724   }
4725 
4726   allocator.ensureDoubleRegister(masm, inputId, scratchFloat);
4727 
4728   masm.truncDoubleToInt32(scratchFloat, scratch, failure->label());
4729 
4730   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
4731   return true;
4732 }
4733 
emitMathRoundToInt32Result(NumberOperandId inputId)4734 bool CacheIRCompiler::emitMathRoundToInt32Result(NumberOperandId inputId) {
4735   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4736 
4737   AutoOutputRegister output(*this);
4738   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
4739 
4740   AutoAvailableFloatRegister scratchFloat0(*this, FloatReg0);
4741   AutoAvailableFloatRegister scratchFloat1(*this, FloatReg1);
4742 
4743   FailurePath* failure;
4744   if (!addFailurePath(&failure)) {
4745     return false;
4746   }
4747 
4748   allocator.ensureDoubleRegister(masm, inputId, scratchFloat0);
4749 
4750   masm.roundDoubleToInt32(scratchFloat0, scratch, scratchFloat1,
4751                           failure->label());
4752 
4753   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
4754   return true;
4755 }
4756 
emitInt32MinMax(bool isMax,Int32OperandId firstId,Int32OperandId secondId,Int32OperandId resultId)4757 bool CacheIRCompiler::emitInt32MinMax(bool isMax, Int32OperandId firstId,
4758                                       Int32OperandId secondId,
4759                                       Int32OperandId resultId) {
4760   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4761 
4762   Register first = allocator.useRegister(masm, firstId);
4763   Register second = allocator.useRegister(masm, secondId);
4764   Register result = allocator.defineRegister(masm, resultId);
4765 
4766   Assembler::Condition cond =
4767       isMax ? Assembler::GreaterThan : Assembler::LessThan;
4768   masm.move32(first, result);
4769   masm.cmp32Move32(cond, second, first, second, result);
4770   return true;
4771 }
4772 
emitNumberMinMax(bool isMax,NumberOperandId firstId,NumberOperandId secondId,NumberOperandId resultId)4773 bool CacheIRCompiler::emitNumberMinMax(bool isMax, NumberOperandId firstId,
4774                                        NumberOperandId secondId,
4775                                        NumberOperandId resultId) {
4776   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4777 
4778   ValueOperand output = allocator.defineValueRegister(masm, resultId);
4779 
4780   AutoAvailableFloatRegister scratch1(*this, FloatReg0);
4781   AutoAvailableFloatRegister scratch2(*this, FloatReg1);
4782 
4783   allocator.ensureDoubleRegister(masm, firstId, scratch1);
4784   allocator.ensureDoubleRegister(masm, secondId, scratch2);
4785 
4786   if (isMax) {
4787     masm.maxDouble(scratch2, scratch1, /* handleNaN = */ true);
4788   } else {
4789     masm.minDouble(scratch2, scratch1, /* handleNaN = */ true);
4790   }
4791 
4792   masm.boxDouble(scratch1, output, scratch1);
4793   return true;
4794 }
4795 
emitInt32MinMaxArrayResult(ObjOperandId arrayId,bool isMax)4796 bool CacheIRCompiler::emitInt32MinMaxArrayResult(ObjOperandId arrayId,
4797                                                  bool isMax) {
4798   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4799 
4800   AutoOutputRegister output(*this);
4801   Register array = allocator.useRegister(masm, arrayId);
4802 
4803   AutoScratchRegister scratch(allocator, masm);
4804   AutoScratchRegister scratch2(allocator, masm);
4805   AutoScratchRegisterMaybeOutputType scratch3(allocator, masm, output);
4806   AutoScratchRegisterMaybeOutput result(allocator, masm, output);
4807 
4808   FailurePath* failure;
4809   if (!addFailurePath(&failure)) {
4810     return false;
4811   }
4812 
4813   masm.minMaxArrayInt32(array, result, scratch, scratch2, scratch3, isMax,
4814                         failure->label());
4815   masm.tagValue(JSVAL_TYPE_INT32, result, output.valueReg());
4816   return true;
4817 }
4818 
emitNumberMinMaxArrayResult(ObjOperandId arrayId,bool isMax)4819 bool CacheIRCompiler::emitNumberMinMaxArrayResult(ObjOperandId arrayId,
4820                                                   bool isMax) {
4821   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4822 
4823   AutoOutputRegister output(*this);
4824   Register array = allocator.useRegister(masm, arrayId);
4825 
4826   AutoAvailableFloatRegister result(*this, FloatReg0);
4827   AutoAvailableFloatRegister floatScratch(*this, FloatReg1);
4828 
4829   AutoScratchRegister scratch1(allocator, masm);
4830   AutoScratchRegister scratch2(allocator, masm);
4831 
4832   FailurePath* failure;
4833   if (!addFailurePath(&failure)) {
4834     return false;
4835   }
4836 
4837   masm.minMaxArrayNumber(array, result, floatScratch, scratch1, scratch2, isMax,
4838                          failure->label());
4839   masm.boxDouble(result, output.valueReg(), result);
4840   return true;
4841 }
4842 
emitMathFunctionNumberResultShared(UnaryMathFunction fun,FloatRegister inputScratch,ValueOperand output)4843 bool CacheIRCompiler::emitMathFunctionNumberResultShared(
4844     UnaryMathFunction fun, FloatRegister inputScratch, ValueOperand output) {
4845   UnaryMathFunctionType funPtr = GetUnaryMathFunctionPtr(fun);
4846 
4847   LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
4848   save.takeUnchecked(inputScratch);
4849   masm.PushRegsInMask(save);
4850 
4851   masm.setupUnalignedABICall(output.scratchReg());
4852   masm.passABIArg(inputScratch, MoveOp::DOUBLE);
4853   masm.callWithABI(DynamicFunction<UnaryMathFunctionType>(funPtr),
4854                    MoveOp::DOUBLE);
4855   masm.storeCallFloatResult(inputScratch);
4856 
4857   masm.PopRegsInMask(save);
4858 
4859   masm.boxDouble(inputScratch, output, inputScratch);
4860   return true;
4861 }
4862 
emitMathFunctionNumberResult(NumberOperandId inputId,UnaryMathFunction fun)4863 bool CacheIRCompiler::emitMathFunctionNumberResult(NumberOperandId inputId,
4864                                                    UnaryMathFunction fun) {
4865   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4866 
4867   AutoOutputRegister output(*this);
4868   AutoAvailableFloatRegister scratch(*this, FloatReg0);
4869 
4870   allocator.ensureDoubleRegister(masm, inputId, scratch);
4871 
4872   return emitMathFunctionNumberResultShared(fun, scratch, output.valueReg());
4873 }
4874 
EmitStoreDenseElement(MacroAssembler & masm,const ConstantOrRegister & value,BaseObjectElementIndex target)4875 static void EmitStoreDenseElement(MacroAssembler& masm,
4876                                   const ConstantOrRegister& value,
4877                                   BaseObjectElementIndex target) {
4878   if (value.constant()) {
4879     Value v = value.value();
4880     masm.storeValue(v, target);
4881     return;
4882   }
4883 
4884   TypedOrValueRegister reg = value.reg();
4885   masm.storeTypedOrValue(reg, target);
4886 }
4887 
emitStoreDenseElement(ObjOperandId objId,Int32OperandId indexId,ValOperandId rhsId)4888 bool CacheIRCompiler::emitStoreDenseElement(ObjOperandId objId,
4889                                             Int32OperandId indexId,
4890                                             ValOperandId rhsId) {
4891   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4892 
4893   Register obj = allocator.useRegister(masm, objId);
4894   Register index = allocator.useRegister(masm, indexId);
4895   ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
4896 
4897   AutoScratchRegister scratch(allocator, masm);
4898 
4899   FailurePath* failure;
4900   if (!addFailurePath(&failure)) {
4901     return false;
4902   }
4903 
4904   // Load obj->elements in scratch.
4905   masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
4906 
4907   // Bounds check. Unfortunately we don't have more registers available on
4908   // x86, so use InvalidReg and emit slightly slower code on x86.
4909   Register spectreTemp = InvalidReg;
4910   Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
4911   masm.spectreBoundsCheck32(index, initLength, spectreTemp, failure->label());
4912 
4913   // Hole check.
4914   BaseObjectElementIndex element(scratch, index);
4915   masm.branchTestMagic(Assembler::Equal, element, failure->label());
4916 
4917   // Perform the store.
4918   EmitPreBarrier(masm, element, MIRType::Value);
4919   EmitStoreDenseElement(masm, val, element);
4920 
4921   emitPostBarrierElement(obj, val, scratch, index);
4922   return true;
4923 }
4924 
EmitAssertExtensibleElements(MacroAssembler & masm,Register elementsReg)4925 static void EmitAssertExtensibleElements(MacroAssembler& masm,
4926                                          Register elementsReg) {
4927 #ifdef DEBUG
4928   // Preceding shape guards ensure the object elements are extensible.
4929   Address elementsFlags(elementsReg, ObjectElements::offsetOfFlags());
4930   Label ok;
4931   masm.branchTest32(Assembler::Zero, elementsFlags,
4932                     Imm32(ObjectElements::Flags::NOT_EXTENSIBLE), &ok);
4933   masm.assumeUnreachable("Unexpected non-extensible elements");
4934   masm.bind(&ok);
4935 #endif
4936 }
4937 
EmitAssertWritableArrayLengthElements(MacroAssembler & masm,Register elementsReg)4938 static void EmitAssertWritableArrayLengthElements(MacroAssembler& masm,
4939                                                   Register elementsReg) {
4940 #ifdef DEBUG
4941   // Preceding shape guards ensure the array length is writable.
4942   Address elementsFlags(elementsReg, ObjectElements::offsetOfFlags());
4943   Label ok;
4944   masm.branchTest32(Assembler::Zero, elementsFlags,
4945                     Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
4946                     &ok);
4947   masm.assumeUnreachable("Unexpected non-writable array length elements");
4948   masm.bind(&ok);
4949 #endif
4950 }
4951 
emitStoreDenseElementHole(ObjOperandId objId,Int32OperandId indexId,ValOperandId rhsId,bool handleAdd)4952 bool CacheIRCompiler::emitStoreDenseElementHole(ObjOperandId objId,
4953                                                 Int32OperandId indexId,
4954                                                 ValOperandId rhsId,
4955                                                 bool handleAdd) {
4956   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
4957 
4958   Register obj = allocator.useRegister(masm, objId);
4959   Register index = allocator.useRegister(masm, indexId);
4960   ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
4961 
4962   AutoScratchRegister scratch(allocator, masm);
4963 
4964   FailurePath* failure;
4965   if (!addFailurePath(&failure)) {
4966     return false;
4967   }
4968 
4969   // Load obj->elements in scratch.
4970   masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
4971 
4972   EmitAssertExtensibleElements(masm, scratch);
4973   if (handleAdd) {
4974     EmitAssertWritableArrayLengthElements(masm, scratch);
4975   }
4976 
4977   BaseObjectElementIndex element(scratch, index);
4978   Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
4979   Address elementsFlags(scratch, ObjectElements::offsetOfFlags());
4980 
4981   // We don't have enough registers on x86 so use InvalidReg. This will emit
4982   // slightly less efficient code on x86.
4983   Register spectreTemp = InvalidReg;
4984 
4985   Label storeSkipPreBarrier;
4986   if (handleAdd) {
4987     // Bounds check.
4988     Label inBounds, outOfBounds;
4989     masm.spectreBoundsCheck32(index, initLength, spectreTemp, &outOfBounds);
4990     masm.jump(&inBounds);
4991 
4992     // If we're out-of-bounds, only handle the index == initLength case.
4993     masm.bind(&outOfBounds);
4994     masm.branch32(Assembler::NotEqual, initLength, index, failure->label());
4995 
4996     // If index < capacity, we can add a dense element inline. If not we
4997     // need to allocate more elements.
4998     Label allocElement, addNewElement;
4999     Address capacity(scratch, ObjectElements::offsetOfCapacity());
5000     masm.spectreBoundsCheck32(index, capacity, spectreTemp, &allocElement);
5001     masm.jump(&addNewElement);
5002 
5003     masm.bind(&allocElement);
5004 
5005     LiveRegisterSet save(GeneralRegisterSet::Volatile(),
5006                          liveVolatileFloatRegs());
5007     save.takeUnchecked(scratch);
5008     masm.PushRegsInMask(save);
5009 
5010     using Fn = bool (*)(JSContext * cx, NativeObject * obj);
5011     masm.setupUnalignedABICall(scratch);
5012     masm.loadJSContext(scratch);
5013     masm.passABIArg(scratch);
5014     masm.passABIArg(obj);
5015     masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
5016     masm.mov(ReturnReg, scratch);
5017 
5018     masm.PopRegsInMask(save);
5019     masm.branchIfFalseBool(scratch, failure->label());
5020 
5021     // Load the reallocated elements pointer.
5022     masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
5023 
5024     masm.bind(&addNewElement);
5025 
5026     // Increment initLength.
5027     masm.add32(Imm32(1), initLength);
5028 
5029     // If length is now <= index, increment length too.
5030     Label skipIncrementLength;
5031     Address length(scratch, ObjectElements::offsetOfLength());
5032     masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
5033     masm.add32(Imm32(1), length);
5034     masm.bind(&skipIncrementLength);
5035 
5036     // Skip EmitPreBarrier as the memory is uninitialized.
5037     masm.jump(&storeSkipPreBarrier);
5038 
5039     masm.bind(&inBounds);
5040   } else {
5041     // Fail if index >= initLength.
5042     masm.spectreBoundsCheck32(index, initLength, spectreTemp, failure->label());
5043   }
5044 
5045   EmitPreBarrier(masm, element, MIRType::Value);
5046 
5047   masm.bind(&storeSkipPreBarrier);
5048   EmitStoreDenseElement(masm, val, element);
5049 
5050   emitPostBarrierElement(obj, val, scratch, index);
5051   return true;
5052 }
5053 
emitArrayPush(ObjOperandId objId,ValOperandId rhsId)5054 bool CacheIRCompiler::emitArrayPush(ObjOperandId objId, ValOperandId rhsId) {
5055   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5056 
5057   AutoOutputRegister output(*this);
5058   Register obj = allocator.useRegister(masm, objId);
5059   ValueOperand val = allocator.useValueRegister(masm, rhsId);
5060 
5061   AutoScratchRegisterMaybeOutput scratchLength(allocator, masm, output);
5062   AutoScratchRegisterMaybeOutputType scratch(allocator, masm, output);
5063 
5064   FailurePath* failure;
5065   if (!addFailurePath(&failure)) {
5066     return false;
5067   }
5068 
5069   // Load obj->elements in scratch.
5070   masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
5071 
5072   EmitAssertExtensibleElements(masm, scratch);
5073   EmitAssertWritableArrayLengthElements(masm, scratch);
5074 
5075   Address elementsInitLength(scratch,
5076                              ObjectElements::offsetOfInitializedLength());
5077   Address elementsLength(scratch, ObjectElements::offsetOfLength());
5078   Address elementsFlags(scratch, ObjectElements::offsetOfFlags());
5079 
5080   // Fail if length != initLength.
5081   masm.load32(elementsInitLength, scratchLength);
5082   masm.branch32(Assembler::NotEqual, elementsLength, scratchLength,
5083                 failure->label());
5084 
5085   // If scratchLength < capacity, we can add a dense element inline. If not we
5086   // need to allocate more elements.
5087   Label allocElement, addNewElement;
5088   Address capacity(scratch, ObjectElements::offsetOfCapacity());
5089   masm.spectreBoundsCheck32(scratchLength, capacity, InvalidReg, &allocElement);
5090   masm.jump(&addNewElement);
5091 
5092   masm.bind(&allocElement);
5093 
5094   LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
5095   save.takeUnchecked(scratch);
5096   masm.PushRegsInMask(save);
5097 
5098   using Fn = bool (*)(JSContext * cx, NativeObject * obj);
5099   masm.setupUnalignedABICall(scratch);
5100   masm.loadJSContext(scratch);
5101   masm.passABIArg(scratch);
5102   masm.passABIArg(obj);
5103   masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
5104   masm.mov(ReturnReg, scratch);
5105 
5106   masm.PopRegsInMask(save);
5107   masm.branchIfFalseBool(scratch, failure->label());
5108 
5109   // Load the reallocated elements pointer.
5110   masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
5111 
5112   masm.bind(&addNewElement);
5113 
5114   // Increment initLength and length.
5115   masm.add32(Imm32(1), elementsInitLength);
5116   masm.add32(Imm32(1), elementsLength);
5117 
5118   // Store the value.
5119   BaseObjectElementIndex element(scratch, scratchLength);
5120   masm.storeValue(val, element);
5121   emitPostBarrierElement(obj, val, scratch, scratchLength);
5122 
5123   // Return value is new length.
5124   masm.add32(Imm32(1), scratchLength);
5125   masm.tagValue(JSVAL_TYPE_INT32, scratchLength, output.valueReg());
5126 
5127   return true;
5128 }
5129 
emitStoreTypedArrayElement(ObjOperandId objId,Scalar::Type elementType,IntPtrOperandId indexId,uint32_t rhsId,bool handleOOB)5130 bool CacheIRCompiler::emitStoreTypedArrayElement(ObjOperandId objId,
5131                                                  Scalar::Type elementType,
5132                                                  IntPtrOperandId indexId,
5133                                                  uint32_t rhsId,
5134                                                  bool handleOOB) {
5135   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5136   Register obj = allocator.useRegister(masm, objId);
5137   Register index = allocator.useRegister(masm, indexId);
5138 
5139   AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
5140 
5141   Maybe<Register> valInt32;
5142   Maybe<Register> valBigInt;
5143   switch (elementType) {
5144     case Scalar::Int8:
5145     case Scalar::Uint8:
5146     case Scalar::Int16:
5147     case Scalar::Uint16:
5148     case Scalar::Int32:
5149     case Scalar::Uint32:
5150     case Scalar::Uint8Clamped:
5151       valInt32.emplace(allocator.useRegister(masm, Int32OperandId(rhsId)));
5152       break;
5153 
5154     case Scalar::Float32:
5155     case Scalar::Float64:
5156       allocator.ensureDoubleRegister(masm, NumberOperandId(rhsId),
5157                                      floatScratch0);
5158       break;
5159 
5160     case Scalar::BigInt64:
5161     case Scalar::BigUint64:
5162       valBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(rhsId)));
5163       break;
5164 
5165     case Scalar::MaxTypedArrayViewType:
5166     case Scalar::Int64:
5167     case Scalar::Simd128:
5168       MOZ_CRASH("Unsupported TypedArray type");
5169   }
5170 
5171   AutoScratchRegister scratch1(allocator, masm);
5172   Maybe<AutoScratchRegister> scratch2;
5173   Maybe<AutoSpectreBoundsScratchRegister> spectreScratch;
5174   if (Scalar::isBigIntType(elementType)) {
5175     scratch2.emplace(allocator, masm);
5176   } else {
5177     spectreScratch.emplace(allocator, masm);
5178   }
5179 
5180   FailurePath* failure;
5181   if (!addFailurePath(&failure)) {
5182     return false;
5183   }
5184 
5185   // Bounds check.
5186   Label done;
5187   Register spectreTemp = scratch2 ? scratch2->get() : spectreScratch->get();
5188   masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
5189   masm.spectreBoundsCheckPtr(index, scratch1, spectreTemp,
5190                              handleOOB ? &done : failure->label());
5191 
5192   // Load the elements vector.
5193   masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch1);
5194 
5195   BaseIndex dest(scratch1, index, ScaleFromScalarType(elementType));
5196 
5197   if (Scalar::isBigIntType(elementType)) {
5198 #ifdef JS_PUNBOX64
5199     Register64 temp(scratch2->get());
5200 #else
5201     // We don't have more registers available on x86, so spill |obj|.
5202     masm.push(obj);
5203     Register64 temp(scratch2->get(), obj);
5204 #endif
5205 
5206     masm.loadBigInt64(*valBigInt, temp);
5207     masm.storeToTypedBigIntArray(elementType, temp, dest);
5208 
5209 #ifndef JS_PUNBOX64
5210     masm.pop(obj);
5211 #endif
5212   } else if (elementType == Scalar::Float32) {
5213     ScratchFloat32Scope fpscratch(masm);
5214     masm.convertDoubleToFloat32(floatScratch0, fpscratch);
5215     masm.storeToTypedFloatArray(elementType, fpscratch, dest);
5216   } else if (elementType == Scalar::Float64) {
5217     masm.storeToTypedFloatArray(elementType, floatScratch0, dest);
5218   } else {
5219     masm.storeToTypedIntArray(elementType, *valInt32, dest);
5220   }
5221 
5222   masm.bind(&done);
5223   return true;
5224 }
5225 
CanNurseryAllocateBigInt(JSContext * cx)5226 static bool CanNurseryAllocateBigInt(JSContext* cx) {
5227   JS::Zone* zone = cx->zone();
5228   return zone->runtimeFromAnyThread()->gc.nursery().canAllocateBigInts() &&
5229          zone->allocNurseryBigInts;
5230 }
5231 
EmitAllocateBigInt(MacroAssembler & masm,Register result,Register temp,const LiveRegisterSet & liveSet,Label * fail,bool attemptNursery)5232 static void EmitAllocateBigInt(MacroAssembler& masm, Register result,
5233                                Register temp, const LiveRegisterSet& liveSet,
5234                                Label* fail, bool attemptNursery) {
5235   Label fallback, done;
5236   masm.newGCBigInt(result, temp, &fallback, attemptNursery);
5237   masm.jump(&done);
5238   {
5239     masm.bind(&fallback);
5240     masm.PushRegsInMask(liveSet);
5241 
5242     using Fn = void* (*)(JSContext * cx, bool requestMinorGC);
5243     masm.setupUnalignedABICall(temp);
5244     masm.loadJSContext(temp);
5245     masm.passABIArg(temp);
5246     masm.move32(Imm32(attemptNursery), result);
5247     masm.passABIArg(result);
5248     masm.callWithABI<Fn, jit::AllocateBigIntNoGC>();
5249     masm.storeCallPointerResult(result);
5250 
5251     masm.PopRegsInMask(liveSet);
5252     masm.branchPtr(Assembler::Equal, result, ImmWord(0), fail);
5253   }
5254   masm.bind(&done);
5255 }
5256 
emitLoadTypedArrayElementResult(ObjOperandId objId,IntPtrOperandId indexId,Scalar::Type elementType,bool handleOOB,bool forceDoubleForUint32)5257 bool CacheIRCompiler::emitLoadTypedArrayElementResult(
5258     ObjOperandId objId, IntPtrOperandId indexId, Scalar::Type elementType,
5259     bool handleOOB, bool forceDoubleForUint32) {
5260   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5261   AutoOutputRegister output(*this);
5262   Register obj = allocator.useRegister(masm, objId);
5263   Register index = allocator.useRegister(masm, indexId);
5264 
5265   AutoScratchRegister scratch1(allocator, masm);
5266 #ifdef JS_PUNBOX64
5267   AutoScratchRegister scratch2(allocator, masm);
5268 #else
5269   // There are too few registers available on x86, so we may need to reuse the
5270   // output's scratch register.
5271   AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
5272 #endif
5273 
5274   FailurePath* failure;
5275   if (!addFailurePath(&failure)) {
5276     return false;
5277   }
5278 
5279   // Bounds check.
5280   Label outOfBounds;
5281   masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
5282   masm.spectreBoundsCheckPtr(index, scratch1, scratch2,
5283                              handleOOB ? &outOfBounds : failure->label());
5284 
5285   // Allocate BigInt if needed. The code after this should be infallible.
5286   Maybe<Register> bigInt;
5287   if (Scalar::isBigIntType(elementType)) {
5288     bigInt.emplace(output.valueReg().scratchReg());
5289 
5290     LiveRegisterSet save(GeneralRegisterSet::Volatile(),
5291                          liveVolatileFloatRegs());
5292     save.takeUnchecked(scratch1);
5293     save.takeUnchecked(scratch2);
5294     save.takeUnchecked(output);
5295 
5296     bool attemptNursery = CanNurseryAllocateBigInt(cx_);
5297     EmitAllocateBigInt(masm, *bigInt, scratch1, save, failure->label(),
5298                        attemptNursery);
5299   }
5300 
5301   // Load the elements vector.
5302   masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch1);
5303 
5304   // Load the value.
5305   BaseIndex source(scratch1, index, ScaleFromScalarType(elementType));
5306 
5307   if (Scalar::isBigIntType(elementType)) {
5308 #ifdef JS_PUNBOX64
5309     Register64 temp(scratch2);
5310 #else
5311     // We don't have more registers available on x86, so spill |obj| and
5312     // additionally use the output's type register.
5313     MOZ_ASSERT(output.valueReg().scratchReg() != output.valueReg().typeReg());
5314     masm.push(obj);
5315     Register64 temp(output.valueReg().typeReg(), obj);
5316 #endif
5317 
5318     masm.loadFromTypedBigIntArray(elementType, source, *bigInt, temp);
5319 
5320 #ifndef JS_PUNBOX64
5321     masm.pop(obj);
5322 #endif
5323 
5324     masm.tagValue(JSVAL_TYPE_BIGINT, *bigInt, output.valueReg());
5325   } else {
5326     MacroAssembler::Uint32Mode uint32Mode =
5327         forceDoubleForUint32 ? MacroAssembler::Uint32Mode::ForceDouble
5328                              : MacroAssembler::Uint32Mode::FailOnDouble;
5329     masm.loadFromTypedArray(elementType, source, output.valueReg(), uint32Mode,
5330                             scratch1, failure->label());
5331   }
5332 
5333   if (handleOOB) {
5334     Label done;
5335     masm.jump(&done);
5336 
5337     masm.bind(&outOfBounds);
5338     masm.moveValue(UndefinedValue(), output.valueReg());
5339 
5340     masm.bind(&done);
5341   }
5342 
5343   return true;
5344 }
5345 
EmitDataViewBoundsCheck(MacroAssembler & masm,size_t byteSize,Register obj,Register offset,Register scratch,Label * fail)5346 static void EmitDataViewBoundsCheck(MacroAssembler& masm, size_t byteSize,
5347                                     Register obj, Register offset,
5348                                     Register scratch, Label* fail) {
5349   // Ensure both offset < length and offset + (byteSize - 1) < length.
5350   masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
5351   if (byteSize == 1) {
5352     masm.spectreBoundsCheckPtr(offset, scratch, InvalidReg, fail);
5353   } else {
5354     // temp := length - (byteSize - 1)
5355     // if temp < 0: fail
5356     // if offset >= temp: fail
5357     masm.branchSubPtr(Assembler::Signed, Imm32(byteSize - 1), scratch, fail);
5358     masm.spectreBoundsCheckPtr(offset, scratch, InvalidReg, fail);
5359   }
5360 }
5361 
emitLoadDataViewValueResult(ObjOperandId objId,IntPtrOperandId offsetId,BooleanOperandId littleEndianId,Scalar::Type elementType,bool forceDoubleForUint32)5362 bool CacheIRCompiler::emitLoadDataViewValueResult(
5363     ObjOperandId objId, IntPtrOperandId offsetId,
5364     BooleanOperandId littleEndianId, Scalar::Type elementType,
5365     bool forceDoubleForUint32) {
5366   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5367 
5368   AutoOutputRegister output(*this);
5369   Register obj = allocator.useRegister(masm, objId);
5370   Register offset = allocator.useRegister(masm, offsetId);
5371   Register littleEndian = allocator.useRegister(masm, littleEndianId);
5372 
5373   AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
5374 
5375   Register64 outputReg64 = output.valueReg().toRegister64();
5376   Register outputScratch = outputReg64.scratchReg();
5377 
5378   FailurePath* failure;
5379   if (!addFailurePath(&failure)) {
5380     return false;
5381   }
5382 
5383   const size_t byteSize = Scalar::byteSize(elementType);
5384 
5385   EmitDataViewBoundsCheck(masm, byteSize, obj, offset, outputScratch,
5386                           failure->label());
5387 
5388   masm.loadPtr(Address(obj, DataViewObject::dataOffset()), outputScratch);
5389 
5390   // Load the value.
5391   BaseIndex source(outputScratch, offset, TimesOne);
5392   switch (elementType) {
5393     case Scalar::Int8:
5394       masm.load8SignExtend(source, outputScratch);
5395       break;
5396     case Scalar::Uint8:
5397       masm.load8ZeroExtend(source, outputScratch);
5398       break;
5399     case Scalar::Int16:
5400       masm.load16UnalignedSignExtend(source, outputScratch);
5401       break;
5402     case Scalar::Uint16:
5403       masm.load16UnalignedZeroExtend(source, outputScratch);
5404       break;
5405     case Scalar::Int32:
5406     case Scalar::Uint32:
5407     case Scalar::Float32:
5408       masm.load32Unaligned(source, outputScratch);
5409       break;
5410     case Scalar::Float64:
5411     case Scalar::BigInt64:
5412     case Scalar::BigUint64:
5413       masm.load64Unaligned(source, outputReg64);
5414       break;
5415     case Scalar::Uint8Clamped:
5416     default:
5417       MOZ_CRASH("Invalid typed array type");
5418   }
5419 
5420   // Swap the bytes in the loaded value.
5421   if (byteSize > 1) {
5422     Label skip;
5423     masm.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
5424                   littleEndian, Imm32(0), &skip);
5425 
5426     switch (elementType) {
5427       case Scalar::Int16:
5428         masm.byteSwap16SignExtend(outputScratch);
5429         break;
5430       case Scalar::Uint16:
5431         masm.byteSwap16ZeroExtend(outputScratch);
5432         break;
5433       case Scalar::Int32:
5434       case Scalar::Uint32:
5435       case Scalar::Float32:
5436         masm.byteSwap32(outputScratch);
5437         break;
5438       case Scalar::Float64:
5439       case Scalar::BigInt64:
5440       case Scalar::BigUint64:
5441         masm.byteSwap64(outputReg64);
5442         break;
5443       case Scalar::Int8:
5444       case Scalar::Uint8:
5445       case Scalar::Uint8Clamped:
5446       default:
5447         MOZ_CRASH("Invalid type");
5448     }
5449 
5450     masm.bind(&skip);
5451   }
5452 
5453   // Move the value into the output register.
5454   switch (elementType) {
5455     case Scalar::Int8:
5456     case Scalar::Uint8:
5457     case Scalar::Int16:
5458     case Scalar::Uint16:
5459     case Scalar::Int32:
5460       masm.tagValue(JSVAL_TYPE_INT32, outputScratch, output.valueReg());
5461       break;
5462     case Scalar::Uint32: {
5463       MacroAssembler::Uint32Mode uint32Mode =
5464           forceDoubleForUint32 ? MacroAssembler::Uint32Mode::ForceDouble
5465                                : MacroAssembler::Uint32Mode::FailOnDouble;
5466       masm.boxUint32(outputScratch, output.valueReg(), uint32Mode,
5467                      failure->label());
5468       break;
5469     }
5470     case Scalar::Float32: {
5471       FloatRegister scratchFloat32 = floatScratch0.get().asSingle();
5472       masm.moveGPRToFloat32(outputScratch, scratchFloat32);
5473       masm.canonicalizeFloat(scratchFloat32);
5474       masm.convertFloat32ToDouble(scratchFloat32, floatScratch0);
5475       masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
5476       break;
5477     }
5478     case Scalar::Float64:
5479       masm.moveGPR64ToDouble(outputReg64, floatScratch0);
5480       masm.canonicalizeDouble(floatScratch0);
5481       masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
5482       break;
5483     case Scalar::BigInt64:
5484     case Scalar::BigUint64: {
5485       // We need two extra registers. Reuse the obj/littleEndian registers.
5486       Register bigInt = obj;
5487       Register bigIntScratch = littleEndian;
5488       masm.push(bigInt);
5489       masm.push(bigIntScratch);
5490       Label fail, done;
5491       LiveRegisterSet save(GeneralRegisterSet::Volatile(),
5492                            liveVolatileFloatRegs());
5493       save.takeUnchecked(bigInt);
5494       save.takeUnchecked(bigIntScratch);
5495       bool attemptNursery = CanNurseryAllocateBigInt(cx_);
5496       EmitAllocateBigInt(masm, bigInt, bigIntScratch, save, &fail,
5497                          attemptNursery);
5498       masm.jump(&done);
5499 
5500       masm.bind(&fail);
5501       masm.pop(bigIntScratch);
5502       masm.pop(bigInt);
5503       masm.jump(failure->label());
5504 
5505       masm.bind(&done);
5506       masm.initializeBigInt64(elementType, bigInt, outputReg64);
5507       masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
5508       masm.pop(bigIntScratch);
5509       masm.pop(bigInt);
5510       break;
5511     }
5512     case Scalar::Uint8Clamped:
5513     default:
5514       MOZ_CRASH("Invalid typed array type");
5515   }
5516 
5517   return true;
5518 }
5519 
emitStoreDataViewValueResult(ObjOperandId objId,IntPtrOperandId offsetId,uint32_t valueId,BooleanOperandId littleEndianId,Scalar::Type elementType)5520 bool CacheIRCompiler::emitStoreDataViewValueResult(
5521     ObjOperandId objId, IntPtrOperandId offsetId, uint32_t valueId,
5522     BooleanOperandId littleEndianId, Scalar::Type elementType) {
5523   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5524 
5525   AutoOutputRegister output(*this);
5526 #ifdef JS_CODEGEN_X86
5527   // Use a scratch register to avoid running out of the registers.
5528   Register obj = output.valueReg().typeReg();
5529   allocator.copyToScratchRegister(masm, objId, obj);
5530 #else
5531   Register obj = allocator.useRegister(masm, objId);
5532 #endif
5533   Register offset = allocator.useRegister(masm, offsetId);
5534   Register littleEndian = allocator.useRegister(masm, littleEndianId);
5535 
5536   AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
5537   Maybe<Register> valInt32;
5538   Maybe<Register> valBigInt;
5539   switch (elementType) {
5540     case Scalar::Int8:
5541     case Scalar::Uint8:
5542     case Scalar::Int16:
5543     case Scalar::Uint16:
5544     case Scalar::Int32:
5545     case Scalar::Uint32:
5546     case Scalar::Uint8Clamped:
5547       valInt32.emplace(allocator.useRegister(masm, Int32OperandId(valueId)));
5548       break;
5549 
5550     case Scalar::Float32:
5551     case Scalar::Float64:
5552       allocator.ensureDoubleRegister(masm, NumberOperandId(valueId),
5553                                      floatScratch0);
5554       break;
5555 
5556     case Scalar::BigInt64:
5557     case Scalar::BigUint64:
5558       valBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(valueId)));
5559       break;
5560 
5561     case Scalar::MaxTypedArrayViewType:
5562     case Scalar::Int64:
5563     case Scalar::Simd128:
5564       MOZ_CRASH("Unsupported type");
5565   }
5566 
5567   Register scratch1 = output.valueReg().scratchReg();
5568   MOZ_ASSERT(scratch1 != obj, "scratchReg must not be typeReg");
5569 
5570   // On platforms with enough registers, |scratch2| is an extra scratch register
5571   // (pair) used for byte-swapping the value.
5572 #ifndef JS_CODEGEN_X86
5573   mozilla::MaybeOneOf<AutoScratchRegister, AutoScratchRegister64> scratch2;
5574   switch (elementType) {
5575     case Scalar::Int8:
5576     case Scalar::Uint8:
5577       break;
5578     case Scalar::Int16:
5579     case Scalar::Uint16:
5580     case Scalar::Int32:
5581     case Scalar::Uint32:
5582     case Scalar::Float32:
5583       scratch2.construct<AutoScratchRegister>(allocator, masm);
5584       break;
5585     case Scalar::Float64:
5586     case Scalar::BigInt64:
5587     case Scalar::BigUint64:
5588       scratch2.construct<AutoScratchRegister64>(allocator, masm);
5589       break;
5590     case Scalar::Uint8Clamped:
5591     default:
5592       MOZ_CRASH("Invalid type");
5593   }
5594 #endif
5595 
5596   FailurePath* failure;
5597   if (!addFailurePath(&failure)) {
5598     return false;
5599   }
5600 
5601   const size_t byteSize = Scalar::byteSize(elementType);
5602 
5603   EmitDataViewBoundsCheck(masm, byteSize, obj, offset, scratch1,
5604                           failure->label());
5605 
5606   masm.loadPtr(Address(obj, DataViewObject::dataOffset()), scratch1);
5607   BaseIndex dest(scratch1, offset, TimesOne);
5608 
5609   if (byteSize == 1) {
5610     // Byte swapping has no effect, so just do the byte store.
5611     masm.store8(*valInt32, dest);
5612     masm.moveValue(UndefinedValue(), output.valueReg());
5613     return true;
5614   }
5615 
5616   // On 32-bit x86, |obj| is already a scratch register so use that. If we need
5617   // a Register64 we also use the littleEndian register and use the stack
5618   // location for the check below.
5619   bool pushedLittleEndian = false;
5620 #ifdef JS_CODEGEN_X86
5621   if (byteSize == 8) {
5622     masm.push(littleEndian);
5623     pushedLittleEndian = true;
5624   }
5625   auto valScratch32 = [&]() -> Register { return obj; };
5626   auto valScratch64 = [&]() -> Register64 {
5627     return Register64(obj, littleEndian);
5628   };
5629 #else
5630   auto valScratch32 = [&]() -> Register {
5631     return scratch2.ref<AutoScratchRegister>();
5632   };
5633   auto valScratch64 = [&]() -> Register64 {
5634     return scratch2.ref<AutoScratchRegister64>();
5635   };
5636 #endif
5637 
5638   // Load the value into a gpr register.
5639   switch (elementType) {
5640     case Scalar::Int16:
5641     case Scalar::Uint16:
5642     case Scalar::Int32:
5643     case Scalar::Uint32:
5644       masm.move32(*valInt32, valScratch32());
5645       break;
5646     case Scalar::Float32: {
5647       FloatRegister scratchFloat32 = floatScratch0.get().asSingle();
5648       masm.convertDoubleToFloat32(floatScratch0, scratchFloat32);
5649       masm.canonicalizeFloatIfDeterministic(scratchFloat32);
5650       masm.moveFloat32ToGPR(scratchFloat32, valScratch32());
5651       break;
5652     }
5653     case Scalar::Float64: {
5654       masm.canonicalizeDoubleIfDeterministic(floatScratch0);
5655       masm.moveDoubleToGPR64(floatScratch0, valScratch64());
5656       break;
5657     }
5658     case Scalar::BigInt64:
5659     case Scalar::BigUint64:
5660       masm.loadBigInt64(*valBigInt, valScratch64());
5661       break;
5662     case Scalar::Int8:
5663     case Scalar::Uint8:
5664     case Scalar::Uint8Clamped:
5665     default:
5666       MOZ_CRASH("Invalid type");
5667   }
5668 
5669   // Swap the bytes in the loaded value.
5670   Label skip;
5671   if (pushedLittleEndian) {
5672     masm.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
5673                   Address(masm.getStackPointer(), 0), Imm32(0), &skip);
5674   } else {
5675     masm.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
5676                   littleEndian, Imm32(0), &skip);
5677   }
5678   switch (elementType) {
5679     case Scalar::Int16:
5680       masm.byteSwap16SignExtend(valScratch32());
5681       break;
5682     case Scalar::Uint16:
5683       masm.byteSwap16ZeroExtend(valScratch32());
5684       break;
5685     case Scalar::Int32:
5686     case Scalar::Uint32:
5687     case Scalar::Float32:
5688       masm.byteSwap32(valScratch32());
5689       break;
5690     case Scalar::Float64:
5691     case Scalar::BigInt64:
5692     case Scalar::BigUint64:
5693       masm.byteSwap64(valScratch64());
5694       break;
5695     case Scalar::Int8:
5696     case Scalar::Uint8:
5697     case Scalar::Uint8Clamped:
5698     default:
5699       MOZ_CRASH("Invalid type");
5700   }
5701   masm.bind(&skip);
5702 
5703   // Store the value.
5704   switch (elementType) {
5705     case Scalar::Int16:
5706     case Scalar::Uint16:
5707       masm.store16Unaligned(valScratch32(), dest);
5708       break;
5709     case Scalar::Int32:
5710     case Scalar::Uint32:
5711     case Scalar::Float32:
5712       masm.store32Unaligned(valScratch32(), dest);
5713       break;
5714     case Scalar::Float64:
5715     case Scalar::BigInt64:
5716     case Scalar::BigUint64:
5717       masm.store64Unaligned(valScratch64(), dest);
5718       break;
5719     case Scalar::Int8:
5720     case Scalar::Uint8:
5721     case Scalar::Uint8Clamped:
5722     default:
5723       MOZ_CRASH("Invalid typed array type");
5724   }
5725 
5726 #ifdef JS_CODEGEN_X86
5727   // Restore registers.
5728   if (pushedLittleEndian) {
5729     masm.pop(littleEndian);
5730   }
5731 #endif
5732 
5733   masm.moveValue(UndefinedValue(), output.valueReg());
5734   return true;
5735 }
5736 
emitStoreFixedSlotUndefinedResult(ObjOperandId objId,uint32_t offsetOffset,ValOperandId rhsId)5737 bool CacheIRCompiler::emitStoreFixedSlotUndefinedResult(ObjOperandId objId,
5738                                                         uint32_t offsetOffset,
5739                                                         ValOperandId rhsId) {
5740   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5741 
5742   AutoOutputRegister output(*this);
5743   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5744   Register obj = allocator.useRegister(masm, objId);
5745   ValueOperand val = allocator.useValueRegister(masm, rhsId);
5746 
5747   StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
5748   emitLoadStubField(offset, scratch);
5749 
5750   BaseIndex slot(obj, scratch, TimesOne);
5751   EmitPreBarrier(masm, slot, MIRType::Value);
5752   masm.storeValue(val, slot);
5753   emitPostBarrierSlot(obj, val, scratch);
5754 
5755   masm.moveValue(UndefinedValue(), output.valueReg());
5756   return true;
5757 }
5758 
emitLoadObjectResult(ObjOperandId objId)5759 bool CacheIRCompiler::emitLoadObjectResult(ObjOperandId objId) {
5760   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5761   AutoOutputRegister output(*this);
5762   Register obj = allocator.useRegister(masm, objId);
5763 
5764   EmitStoreResult(masm, obj, JSVAL_TYPE_OBJECT, output);
5765 
5766   return true;
5767 }
5768 
emitLoadStringResult(StringOperandId strId)5769 bool CacheIRCompiler::emitLoadStringResult(StringOperandId strId) {
5770   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5771   AutoOutputRegister output(*this);
5772   Register str = allocator.useRegister(masm, strId);
5773 
5774   masm.tagValue(JSVAL_TYPE_STRING, str, output.valueReg());
5775 
5776   return true;
5777 }
5778 
emitLoadSymbolResult(SymbolOperandId symId)5779 bool CacheIRCompiler::emitLoadSymbolResult(SymbolOperandId symId) {
5780   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5781   AutoOutputRegister output(*this);
5782   Register sym = allocator.useRegister(masm, symId);
5783 
5784   masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
5785 
5786   return true;
5787 }
5788 
emitLoadInt32Result(Int32OperandId valId)5789 bool CacheIRCompiler::emitLoadInt32Result(Int32OperandId valId) {
5790   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5791   AutoOutputRegister output(*this);
5792   Register val = allocator.useRegister(masm, valId);
5793 
5794   masm.tagValue(JSVAL_TYPE_INT32, val, output.valueReg());
5795 
5796   return true;
5797 }
5798 
emitLoadBigIntResult(BigIntOperandId valId)5799 bool CacheIRCompiler::emitLoadBigIntResult(BigIntOperandId valId) {
5800   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5801   AutoOutputRegister output(*this);
5802   Register val = allocator.useRegister(masm, valId);
5803 
5804   masm.tagValue(JSVAL_TYPE_BIGINT, val, output.valueReg());
5805 
5806   return true;
5807 }
5808 
emitLoadDoubleResult(NumberOperandId valId)5809 bool CacheIRCompiler::emitLoadDoubleResult(NumberOperandId valId) {
5810   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5811   AutoOutputRegister output(*this);
5812   ValueOperand val = allocator.useValueRegister(masm, valId);
5813 
5814 #ifdef DEBUG
5815   Label ok;
5816   masm.branchTestDouble(Assembler::Equal, val, &ok);
5817   masm.branchTestInt32(Assembler::Equal, val, &ok);
5818   masm.assumeUnreachable("input must be double or int32");
5819   masm.bind(&ok);
5820 #endif
5821 
5822   masm.moveValue(val, output.valueReg());
5823   masm.convertInt32ValueToDouble(output.valueReg());
5824 
5825   return true;
5826 }
5827 
emitLoadTypeOfObjectResult(ObjOperandId objId)5828 bool CacheIRCompiler::emitLoadTypeOfObjectResult(ObjOperandId objId) {
5829   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5830   AutoOutputRegister output(*this);
5831   Register obj = allocator.useRegister(masm, objId);
5832   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5833 
5834   Label slowCheck, isObject, isCallable, isUndefined, done;
5835   masm.typeOfObject(obj, scratch, &slowCheck, &isObject, &isCallable,
5836                     &isUndefined);
5837 
5838   masm.bind(&isCallable);
5839   masm.moveValue(StringValue(cx_->names().function), output.valueReg());
5840   masm.jump(&done);
5841 
5842   masm.bind(&isUndefined);
5843   masm.moveValue(StringValue(cx_->names().undefined), output.valueReg());
5844   masm.jump(&done);
5845 
5846   masm.bind(&isObject);
5847   masm.moveValue(StringValue(cx_->names().object), output.valueReg());
5848   masm.jump(&done);
5849 
5850   {
5851     masm.bind(&slowCheck);
5852     LiveRegisterSet save(GeneralRegisterSet::Volatile(),
5853                          liveVolatileFloatRegs());
5854     masm.PushRegsInMask(save);
5855 
5856     using Fn = JSString* (*)(JSObject * obj, JSRuntime * rt);
5857     masm.setupUnalignedABICall(scratch);
5858     masm.passABIArg(obj);
5859     masm.movePtr(ImmPtr(cx_->runtime()), scratch);
5860     masm.passABIArg(scratch);
5861     masm.callWithABI<Fn, TypeOfObject>();
5862     masm.mov(ReturnReg, scratch);
5863 
5864     LiveRegisterSet ignore;
5865     ignore.add(scratch);
5866     masm.PopRegsInMaskIgnore(save, ignore);
5867 
5868     masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
5869   }
5870 
5871   masm.bind(&done);
5872   return true;
5873 }
5874 
emitLoadInt32TruthyResult(ValOperandId inputId)5875 bool CacheIRCompiler::emitLoadInt32TruthyResult(ValOperandId inputId) {
5876   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5877   AutoOutputRegister output(*this);
5878   ValueOperand val = allocator.useValueRegister(masm, inputId);
5879 
5880   Label ifFalse, done;
5881   masm.branchTestInt32Truthy(false, val, &ifFalse);
5882   masm.moveValue(BooleanValue(true), output.valueReg());
5883   masm.jump(&done);
5884 
5885   masm.bind(&ifFalse);
5886   masm.moveValue(BooleanValue(false), output.valueReg());
5887 
5888   masm.bind(&done);
5889   return true;
5890 }
5891 
emitLoadStringTruthyResult(StringOperandId strId)5892 bool CacheIRCompiler::emitLoadStringTruthyResult(StringOperandId strId) {
5893   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5894   AutoOutputRegister output(*this);
5895   Register str = allocator.useRegister(masm, strId);
5896 
5897   Label ifFalse, done;
5898   masm.branch32(Assembler::Equal, Address(str, JSString::offsetOfLength()),
5899                 Imm32(0), &ifFalse);
5900   masm.moveValue(BooleanValue(true), output.valueReg());
5901   masm.jump(&done);
5902 
5903   masm.bind(&ifFalse);
5904   masm.moveValue(BooleanValue(false), output.valueReg());
5905 
5906   masm.bind(&done);
5907   return true;
5908 }
5909 
emitLoadDoubleTruthyResult(NumberOperandId inputId)5910 bool CacheIRCompiler::emitLoadDoubleTruthyResult(NumberOperandId inputId) {
5911   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5912   AutoOutputRegister output(*this);
5913 
5914   AutoScratchFloatRegister floatReg(this);
5915 
5916   allocator.ensureDoubleRegister(masm, inputId, floatReg);
5917 
5918   Label ifFalse, done;
5919 
5920   masm.branchTestDoubleTruthy(false, floatReg, &ifFalse);
5921   masm.moveValue(BooleanValue(true), output.valueReg());
5922   masm.jump(&done);
5923 
5924   masm.bind(&ifFalse);
5925   masm.moveValue(BooleanValue(false), output.valueReg());
5926 
5927   masm.bind(&done);
5928   return true;
5929 }
5930 
emitLoadObjectTruthyResult(ObjOperandId objId)5931 bool CacheIRCompiler::emitLoadObjectTruthyResult(ObjOperandId objId) {
5932   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5933   AutoOutputRegister output(*this);
5934   Register obj = allocator.useRegister(masm, objId);
5935   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
5936 
5937   Label emulatesUndefined, slowPath, done;
5938   masm.branchIfObjectEmulatesUndefined(obj, scratch, &slowPath,
5939                                        &emulatesUndefined);
5940   masm.moveValue(BooleanValue(true), output.valueReg());
5941   masm.jump(&done);
5942 
5943   masm.bind(&emulatesUndefined);
5944   masm.moveValue(BooleanValue(false), output.valueReg());
5945   masm.jump(&done);
5946 
5947   masm.bind(&slowPath);
5948   {
5949     LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
5950                                  liveVolatileFloatRegs());
5951     volatileRegs.takeUnchecked(scratch);
5952     volatileRegs.takeUnchecked(output);
5953     masm.PushRegsInMask(volatileRegs);
5954 
5955     using Fn = bool (*)(JSObject * obj);
5956     masm.setupUnalignedABICall(scratch);
5957     masm.passABIArg(obj);
5958     masm.callWithABI<Fn, js::EmulatesUndefined>();
5959     masm.convertBoolToInt32(ReturnReg, scratch);
5960     masm.xor32(Imm32(1), scratch);
5961 
5962     masm.PopRegsInMask(volatileRegs);
5963 
5964     masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
5965   }
5966 
5967   masm.bind(&done);
5968   return true;
5969 }
5970 
emitLoadBigIntTruthyResult(BigIntOperandId bigIntId)5971 bool CacheIRCompiler::emitLoadBigIntTruthyResult(BigIntOperandId bigIntId) {
5972   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5973   AutoOutputRegister output(*this);
5974   Register bigInt = allocator.useRegister(masm, bigIntId);
5975 
5976   Label ifFalse, done;
5977   masm.branch32(Assembler::Equal,
5978                 Address(bigInt, BigInt::offsetOfDigitLength()), Imm32(0),
5979                 &ifFalse);
5980   masm.moveValue(BooleanValue(true), output.valueReg());
5981   masm.jump(&done);
5982 
5983   masm.bind(&ifFalse);
5984   masm.moveValue(BooleanValue(false), output.valueReg());
5985 
5986   masm.bind(&done);
5987   return true;
5988 }
5989 
emitLoadValueTruthyResult(ValOperandId inputId)5990 bool CacheIRCompiler::emitLoadValueTruthyResult(ValOperandId inputId) {
5991   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
5992 
5993   AutoOutputRegister output(*this);
5994   ValueOperand value = allocator.useValueRegister(masm, inputId);
5995   AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
5996   AutoScratchRegister scratch2(allocator, masm);
5997   AutoScratchFloatRegister floatReg(this);
5998 
5999   Label ifFalse, ifTrue, done;
6000 
6001   {
6002     ScratchTagScope tag(masm, value);
6003     masm.splitTagForTest(value, tag);
6004 
6005     masm.branchTestUndefined(Assembler::Equal, tag, &ifFalse);
6006     masm.branchTestNull(Assembler::Equal, tag, &ifFalse);
6007 
6008     Label notBoolean;
6009     masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
6010     {
6011       ScratchTagScopeRelease _(&tag);
6012       masm.branchTestBooleanTruthy(false, value, &ifFalse);
6013       masm.jump(&ifTrue);
6014     }
6015     masm.bind(&notBoolean);
6016 
6017     Label notInt32;
6018     masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
6019     {
6020       ScratchTagScopeRelease _(&tag);
6021       masm.branchTestInt32Truthy(false, value, &ifFalse);
6022       masm.jump(&ifTrue);
6023     }
6024     masm.bind(&notInt32);
6025 
6026     Label notObject;
6027     masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
6028     {
6029       ScratchTagScopeRelease _(&tag);
6030 
6031       Register obj = masm.extractObject(value, scratch1);
6032 
6033       Label slowPath;
6034       masm.branchIfObjectEmulatesUndefined(obj, scratch2, &slowPath, &ifFalse);
6035       masm.jump(&ifTrue);
6036 
6037       masm.bind(&slowPath);
6038       {
6039         LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
6040                                      liveVolatileFloatRegs());
6041         volatileRegs.takeUnchecked(scratch1);
6042         volatileRegs.takeUnchecked(scratch2);
6043         volatileRegs.takeUnchecked(output);
6044         masm.PushRegsInMask(volatileRegs);
6045 
6046         using Fn = bool (*)(JSObject * obj);
6047         masm.setupUnalignedABICall(scratch2);
6048         masm.passABIArg(obj);
6049         masm.callWithABI<Fn, js::EmulatesUndefined>();
6050         masm.storeCallBoolResult(scratch2);
6051 
6052         masm.PopRegsInMask(volatileRegs);
6053 
6054         masm.branchTest32(Assembler::NonZero, scratch2, scratch2, &ifFalse);
6055         masm.jump(&ifTrue);
6056       }
6057     }
6058     masm.bind(&notObject);
6059 
6060     Label notString;
6061     masm.branchTestString(Assembler::NotEqual, tag, &notString);
6062     {
6063       ScratchTagScopeRelease _(&tag);
6064       masm.branchTestStringTruthy(false, value, &ifFalse);
6065       masm.jump(&ifTrue);
6066     }
6067     masm.bind(&notString);
6068 
6069     Label notBigInt;
6070     masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
6071     {
6072       ScratchTagScopeRelease _(&tag);
6073       masm.branchTestBigIntTruthy(false, value, &ifFalse);
6074       masm.jump(&ifTrue);
6075     }
6076     masm.bind(&notBigInt);
6077 
6078     masm.branchTestSymbol(Assembler::Equal, tag, &ifTrue);
6079 
6080 #ifdef DEBUG
6081     Label isDouble;
6082     masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
6083     masm.assumeUnreachable("Unexpected value type");
6084     masm.bind(&isDouble);
6085 #endif
6086 
6087     {
6088       ScratchTagScopeRelease _(&tag);
6089       masm.unboxDouble(value, floatReg);
6090       masm.branchTestDoubleTruthy(false, floatReg, &ifFalse);
6091     }
6092 
6093     // Fall through to true case.
6094   }
6095 
6096   masm.bind(&ifTrue);
6097   masm.moveValue(BooleanValue(true), output.valueReg());
6098   masm.jump(&done);
6099 
6100   masm.bind(&ifFalse);
6101   masm.moveValue(BooleanValue(false), output.valueReg());
6102 
6103   masm.bind(&done);
6104   return true;
6105 }
6106 
emitComparePointerResultShared(JSOp op,TypedOperandId lhsId,TypedOperandId rhsId)6107 bool CacheIRCompiler::emitComparePointerResultShared(JSOp op,
6108                                                      TypedOperandId lhsId,
6109                                                      TypedOperandId rhsId) {
6110   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6111   AutoOutputRegister output(*this);
6112 
6113   Register left = allocator.useRegister(masm, lhsId);
6114   Register right = allocator.useRegister(masm, rhsId);
6115 
6116   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6117 
6118   Label ifTrue, done;
6119   masm.branchPtr(JSOpToCondition(op, /* signed = */ true), left, right,
6120                  &ifTrue);
6121 
6122   EmitStoreBoolean(masm, false, output);
6123   masm.jump(&done);
6124 
6125   masm.bind(&ifTrue);
6126   EmitStoreBoolean(masm, true, output);
6127   masm.bind(&done);
6128   return true;
6129 }
6130 
emitCompareObjectResult(JSOp op,ObjOperandId lhsId,ObjOperandId rhsId)6131 bool CacheIRCompiler::emitCompareObjectResult(JSOp op, ObjOperandId lhsId,
6132                                               ObjOperandId rhsId) {
6133   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6134   return emitComparePointerResultShared(op, lhsId, rhsId);
6135 }
6136 
emitCompareSymbolResult(JSOp op,SymbolOperandId lhsId,SymbolOperandId rhsId)6137 bool CacheIRCompiler::emitCompareSymbolResult(JSOp op, SymbolOperandId lhsId,
6138                                               SymbolOperandId rhsId) {
6139   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6140   return emitComparePointerResultShared(op, lhsId, rhsId);
6141 }
6142 
emitCompareInt32Result(JSOp op,Int32OperandId lhsId,Int32OperandId rhsId)6143 bool CacheIRCompiler::emitCompareInt32Result(JSOp op, Int32OperandId lhsId,
6144                                              Int32OperandId rhsId) {
6145   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6146   AutoOutputRegister output(*this);
6147   Register left = allocator.useRegister(masm, lhsId);
6148   Register right = allocator.useRegister(masm, rhsId);
6149 
6150   Label ifTrue, done;
6151   masm.branch32(JSOpToCondition(op, /* signed = */ true), left, right, &ifTrue);
6152 
6153   EmitStoreBoolean(masm, false, output);
6154   masm.jump(&done);
6155 
6156   masm.bind(&ifTrue);
6157   EmitStoreBoolean(masm, true, output);
6158   masm.bind(&done);
6159   return true;
6160 }
6161 
emitCompareDoubleResult(JSOp op,NumberOperandId lhsId,NumberOperandId rhsId)6162 bool CacheIRCompiler::emitCompareDoubleResult(JSOp op, NumberOperandId lhsId,
6163                                               NumberOperandId rhsId) {
6164   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6165   AutoOutputRegister output(*this);
6166 
6167   // Float register must be preserved. The Compare ICs use the fact that
6168   // baseline has them available, as well as fixed temps on LBinaryBoolCache.
6169   AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
6170   AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
6171 
6172   FailurePath* failure;
6173   if (!addFailurePath(&failure)) {
6174     return false;
6175   }
6176 
6177   allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
6178   allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
6179 
6180   Label done, ifTrue;
6181   masm.branchDouble(JSOpToDoubleCondition(op), floatScratch0, floatScratch1,
6182                     &ifTrue);
6183   EmitStoreBoolean(masm, false, output);
6184   masm.jump(&done);
6185 
6186   masm.bind(&ifTrue);
6187   EmitStoreBoolean(masm, true, output);
6188   masm.bind(&done);
6189   return true;
6190 }
6191 
emitCompareBigIntResult(JSOp op,BigIntOperandId lhsId,BigIntOperandId rhsId)6192 bool CacheIRCompiler::emitCompareBigIntResult(JSOp op, BigIntOperandId lhsId,
6193                                               BigIntOperandId rhsId) {
6194   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6195   AutoOutputRegister output(*this);
6196 
6197   Register lhs = allocator.useRegister(masm, lhsId);
6198   Register rhs = allocator.useRegister(masm, rhsId);
6199 
6200   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6201 
6202   LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
6203   masm.PushRegsInMask(save);
6204 
6205   masm.setupUnalignedABICall(scratch);
6206 
6207   // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
6208   // - |left <= right| is implemented as |right >= left|.
6209   // - |left > right| is implemented as |right < left|.
6210   if (op == JSOp::Le || op == JSOp::Gt) {
6211     masm.passABIArg(rhs);
6212     masm.passABIArg(lhs);
6213   } else {
6214     masm.passABIArg(lhs);
6215     masm.passABIArg(rhs);
6216   }
6217 
6218   using Fn = bool (*)(BigInt*, BigInt*);
6219   Fn fn;
6220   if (op == JSOp::Eq || op == JSOp::StrictEq) {
6221     fn = jit::BigIntEqual<EqualityKind::Equal>;
6222   } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
6223     fn = jit::BigIntEqual<EqualityKind::NotEqual>;
6224   } else if (op == JSOp::Lt || op == JSOp::Gt) {
6225     fn = jit::BigIntCompare<ComparisonKind::LessThan>;
6226   } else {
6227     MOZ_ASSERT(op == JSOp::Le || op == JSOp::Ge);
6228     fn = jit::BigIntCompare<ComparisonKind::GreaterThanOrEqual>;
6229   }
6230 
6231   masm.callWithABI(DynamicFunction<Fn>(fn));
6232   masm.storeCallBoolResult(scratch);
6233 
6234   LiveRegisterSet ignore;
6235   ignore.add(scratch);
6236   masm.PopRegsInMaskIgnore(save, ignore);
6237 
6238   EmitStoreResult(masm, scratch, JSVAL_TYPE_BOOLEAN, output);
6239   return true;
6240 }
6241 
emitCompareBigIntInt32Result(JSOp op,BigIntOperandId lhsId,Int32OperandId rhsId)6242 bool CacheIRCompiler::emitCompareBigIntInt32Result(JSOp op,
6243                                                    BigIntOperandId lhsId,
6244                                                    Int32OperandId rhsId) {
6245   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6246   AutoOutputRegister output(*this);
6247   Register bigInt = allocator.useRegister(masm, lhsId);
6248   Register int32 = allocator.useRegister(masm, rhsId);
6249 
6250   AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
6251   AutoScratchRegister scratch2(allocator, masm);
6252 
6253   Label ifTrue, ifFalse;
6254   masm.compareBigIntAndInt32(op, bigInt, int32, scratch1, scratch2, &ifTrue,
6255                              &ifFalse);
6256 
6257   Label done;
6258   masm.bind(&ifFalse);
6259   EmitStoreBoolean(masm, false, output);
6260   masm.jump(&done);
6261 
6262   masm.bind(&ifTrue);
6263   EmitStoreBoolean(masm, true, output);
6264 
6265   masm.bind(&done);
6266   return true;
6267 }
6268 
emitCompareBigIntNumberResult(JSOp op,BigIntOperandId lhsId,NumberOperandId rhsId)6269 bool CacheIRCompiler::emitCompareBigIntNumberResult(JSOp op,
6270                                                     BigIntOperandId lhsId,
6271                                                     NumberOperandId rhsId) {
6272   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6273   AutoOutputRegister output(*this);
6274 
6275   // Float register must be preserved. The Compare ICs use the fact that
6276   // baseline has them available, as well as fixed temps on LBinaryBoolCache.
6277   AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
6278 
6279   Register lhs = allocator.useRegister(masm, lhsId);
6280   allocator.ensureDoubleRegister(masm, rhsId, floatScratch0);
6281 
6282   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6283 
6284   LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
6285   masm.PushRegsInMask(save);
6286 
6287   masm.setupUnalignedABICall(scratch);
6288 
6289   // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
6290   // - |left <= right| is implemented as |right >= left|.
6291   // - |left > right| is implemented as |right < left|.
6292   if (op == JSOp::Le || op == JSOp::Gt) {
6293     masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
6294     masm.passABIArg(lhs);
6295   } else {
6296     masm.passABIArg(lhs);
6297     masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
6298   }
6299 
6300   using FnBigIntNumber = bool (*)(BigInt*, double);
6301   using FnNumberBigInt = bool (*)(double, BigInt*);
6302   switch (op) {
6303     case JSOp::Eq: {
6304       masm.callWithABI<FnBigIntNumber,
6305                        jit::BigIntNumberEqual<EqualityKind::Equal>>();
6306       break;
6307     }
6308     case JSOp::Ne: {
6309       masm.callWithABI<FnBigIntNumber,
6310                        jit::BigIntNumberEqual<EqualityKind::NotEqual>>();
6311       break;
6312     }
6313     case JSOp::Lt: {
6314       masm.callWithABI<FnBigIntNumber,
6315                        jit::BigIntNumberCompare<ComparisonKind::LessThan>>();
6316       break;
6317     }
6318     case JSOp::Gt: {
6319       masm.callWithABI<FnNumberBigInt,
6320                        jit::NumberBigIntCompare<ComparisonKind::LessThan>>();
6321       break;
6322     }
6323     case JSOp::Le: {
6324       masm.callWithABI<
6325           FnNumberBigInt,
6326           jit::NumberBigIntCompare<ComparisonKind::GreaterThanOrEqual>>();
6327       break;
6328     }
6329     case JSOp::Ge: {
6330       masm.callWithABI<
6331           FnBigIntNumber,
6332           jit::BigIntNumberCompare<ComparisonKind::GreaterThanOrEqual>>();
6333       break;
6334     }
6335     default:
6336       MOZ_CRASH("unhandled op");
6337   }
6338 
6339   masm.storeCallBoolResult(scratch);
6340 
6341   LiveRegisterSet ignore;
6342   ignore.add(scratch);
6343   masm.PopRegsInMaskIgnore(save, ignore);
6344 
6345   EmitStoreResult(masm, scratch, JSVAL_TYPE_BOOLEAN, output);
6346   return true;
6347 }
6348 
emitCompareBigIntStringResult(JSOp op,BigIntOperandId lhsId,StringOperandId rhsId)6349 bool CacheIRCompiler::emitCompareBigIntStringResult(JSOp op,
6350                                                     BigIntOperandId lhsId,
6351                                                     StringOperandId rhsId) {
6352   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6353   AutoCallVM callvm(masm, this, allocator);
6354 
6355   Register lhs = allocator.useRegister(masm, lhsId);
6356   Register rhs = allocator.useRegister(masm, rhsId);
6357 
6358   callvm.prepare();
6359 
6360   // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
6361   // - |left <= right| is implemented as |right >= left|.
6362   // - |left > right| is implemented as |right < left|.
6363   if (op == JSOp::Le || op == JSOp::Gt) {
6364     masm.Push(lhs);
6365     masm.Push(rhs);
6366   } else {
6367     masm.Push(rhs);
6368     masm.Push(lhs);
6369   }
6370 
6371   using FnBigIntString =
6372       bool (*)(JSContext*, HandleBigInt, HandleString, bool*);
6373   using FnStringBigInt =
6374       bool (*)(JSContext*, HandleString, HandleBigInt, bool*);
6375 
6376   switch (op) {
6377     case JSOp::Eq: {
6378       constexpr auto Equal = EqualityKind::Equal;
6379       callvm.call<FnBigIntString, BigIntStringEqual<Equal>>();
6380       break;
6381     }
6382     case JSOp::Ne: {
6383       constexpr auto NotEqual = EqualityKind::NotEqual;
6384       callvm.call<FnBigIntString, BigIntStringEqual<NotEqual>>();
6385       break;
6386     }
6387     case JSOp::Lt: {
6388       constexpr auto LessThan = ComparisonKind::LessThan;
6389       callvm.call<FnBigIntString, BigIntStringCompare<LessThan>>();
6390       break;
6391     }
6392     case JSOp::Gt: {
6393       constexpr auto LessThan = ComparisonKind::LessThan;
6394       callvm.call<FnStringBigInt, StringBigIntCompare<LessThan>>();
6395       break;
6396     }
6397     case JSOp::Le: {
6398       constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
6399       callvm.call<FnStringBigInt, StringBigIntCompare<GreaterThanOrEqual>>();
6400       break;
6401     }
6402     case JSOp::Ge: {
6403       constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
6404       callvm.call<FnBigIntString, BigIntStringCompare<GreaterThanOrEqual>>();
6405       break;
6406     }
6407     default:
6408       MOZ_CRASH("unhandled op");
6409   }
6410   return true;
6411 }
6412 
emitCompareNullUndefinedResult(JSOp op,bool isUndefined,ValOperandId inputId)6413 bool CacheIRCompiler::emitCompareNullUndefinedResult(JSOp op, bool isUndefined,
6414                                                      ValOperandId inputId) {
6415   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6416 
6417   AutoOutputRegister output(*this);
6418   ValueOperand input = allocator.useValueRegister(masm, inputId);
6419   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6420 
6421   if (IsStrictEqualityOp(op)) {
6422     if (isUndefined) {
6423       masm.testUndefinedSet(JSOpToCondition(op, false), input, scratch);
6424     } else {
6425       masm.testNullSet(JSOpToCondition(op, false), input, scratch);
6426     }
6427     EmitStoreResult(masm, scratch, JSVAL_TYPE_BOOLEAN, output);
6428     return true;
6429   }
6430 
6431   FailurePath* failure;
6432   if (!addFailurePath(&failure)) {
6433     return false;
6434   }
6435 
6436   MOZ_ASSERT(IsLooseEqualityOp(op));
6437 
6438   Label nullOrLikeUndefined, notNullOrLikeUndefined, done;
6439   {
6440     ScratchTagScope tag(masm, input);
6441     masm.splitTagForTest(input, tag);
6442 
6443     if (isUndefined) {
6444       masm.branchTestUndefined(Assembler::Equal, tag, &nullOrLikeUndefined);
6445       masm.branchTestNull(Assembler::Equal, tag, &nullOrLikeUndefined);
6446     } else {
6447       masm.branchTestNull(Assembler::Equal, tag, &nullOrLikeUndefined);
6448       masm.branchTestUndefined(Assembler::Equal, tag, &nullOrLikeUndefined);
6449     }
6450     masm.branchTestObject(Assembler::NotEqual, tag, &notNullOrLikeUndefined);
6451 
6452     {
6453       ScratchTagScopeRelease _(&tag);
6454 
6455       masm.unboxObject(input, scratch);
6456       masm.branchIfObjectEmulatesUndefined(scratch, scratch, failure->label(),
6457                                            &nullOrLikeUndefined);
6458       masm.jump(&notNullOrLikeUndefined);
6459     }
6460   }
6461 
6462   masm.bind(&nullOrLikeUndefined);
6463   EmitStoreBoolean(masm, op == JSOp::Eq, output);
6464   masm.jump(&done);
6465 
6466   masm.bind(&notNullOrLikeUndefined);
6467   EmitStoreBoolean(masm, op == JSOp::Ne, output);
6468 
6469   masm.bind(&done);
6470   return true;
6471 }
6472 
emitCompareDoubleSameValueResult(NumberOperandId lhsId,NumberOperandId rhsId)6473 bool CacheIRCompiler::emitCompareDoubleSameValueResult(NumberOperandId lhsId,
6474                                                        NumberOperandId rhsId) {
6475   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6476 
6477   AutoOutputRegister output(*this);
6478   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6479   AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
6480   AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
6481   AutoAvailableFloatRegister floatScratch2(*this, FloatReg2);
6482 
6483   allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
6484   allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
6485 
6486   masm.sameValueDouble(floatScratch0, floatScratch1, floatScratch2, scratch);
6487   masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
6488   return true;
6489 }
6490 
emitIndirectTruncateInt32Result(Int32OperandId valId)6491 bool CacheIRCompiler::emitIndirectTruncateInt32Result(Int32OperandId valId) {
6492   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6493   AutoOutputRegister output(*this);
6494   Register val = allocator.useRegister(masm, valId);
6495 
6496   if (output.hasValue()) {
6497     masm.tagValue(JSVAL_TYPE_INT32, val, output.valueReg());
6498   } else {
6499     masm.mov(val, output.typedReg().gpr());
6500   }
6501   return true;
6502 }
6503 
emitCallPrintString(const char * str)6504 bool CacheIRCompiler::emitCallPrintString(const char* str) {
6505   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6506   masm.printf(str);
6507   return true;
6508 }
6509 
emitBreakpoint()6510 bool CacheIRCompiler::emitBreakpoint() {
6511   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6512   masm.breakpoint();
6513   return true;
6514 }
6515 
emitRegisterEnumerator(Register enumeratorsList,Register iter,Register scratch)6516 void CacheIRCompiler::emitRegisterEnumerator(Register enumeratorsList,
6517                                              Register iter, Register scratch) {
6518   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6519   // iter->next = list
6520   masm.storePtr(enumeratorsList, Address(iter, NativeIterator::offsetOfNext()));
6521 
6522   // iter->prev = list->prev
6523   masm.loadPtr(Address(enumeratorsList, NativeIterator::offsetOfPrev()),
6524                scratch);
6525   masm.storePtr(scratch, Address(iter, NativeIterator::offsetOfPrev()));
6526 
6527   // list->prev->next = iter
6528   masm.storePtr(iter, Address(scratch, NativeIterator::offsetOfNext()));
6529 
6530   // list->prev = ni
6531   masm.storePtr(iter, Address(enumeratorsList, NativeIterator::offsetOfPrev()));
6532 }
6533 
emitPostBarrierShared(Register obj,const ConstantOrRegister & val,Register scratch,Register maybeIndex)6534 void CacheIRCompiler::emitPostBarrierShared(Register obj,
6535                                             const ConstantOrRegister& val,
6536                                             Register scratch,
6537                                             Register maybeIndex) {
6538   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6539 
6540   if (val.constant()) {
6541     MOZ_ASSERT_IF(val.value().isGCThing(),
6542                   !IsInsideNursery(val.value().toGCThing()));
6543     return;
6544   }
6545 
6546   TypedOrValueRegister reg = val.reg();
6547   if (reg.hasTyped() && !NeedsPostBarrier(reg.type())) {
6548     return;
6549   }
6550 
6551   Label skipBarrier;
6552   if (reg.hasValue()) {
6553     masm.branchValueIsNurseryCell(Assembler::NotEqual, reg.valueReg(), scratch,
6554                                   &skipBarrier);
6555   } else {
6556     masm.branchPtrInNurseryChunk(Assembler::NotEqual, reg.typedReg().gpr(),
6557                                  scratch, &skipBarrier);
6558   }
6559   masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, &skipBarrier);
6560 
6561   // Call one of these, depending on maybeIndex:
6562   //
6563   //   void PostWriteBarrier(JSRuntime* rt, JSObject* obj);
6564   //   void PostWriteElementBarrier(JSRuntime* rt, JSObject* obj,
6565   //                                int32_t index);
6566   LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
6567   masm.PushRegsInMask(save);
6568   masm.setupUnalignedABICall(scratch);
6569   masm.movePtr(ImmPtr(cx_->runtime()), scratch);
6570   masm.passABIArg(scratch);
6571   masm.passABIArg(obj);
6572   if (maybeIndex != InvalidReg) {
6573     masm.passABIArg(maybeIndex);
6574     using Fn = void (*)(JSRuntime * rt, JSObject * obj, int32_t index);
6575     masm.callWithABI<Fn, PostWriteElementBarrier<IndexInBounds::Yes>>();
6576   } else {
6577     using Fn = void (*)(JSRuntime * rt, js::gc::Cell * cell);
6578     masm.callWithABI<Fn, PostWriteBarrier>();
6579   }
6580   masm.PopRegsInMask(save);
6581 
6582   masm.bind(&skipBarrier);
6583 }
6584 
emitWrapResult()6585 bool CacheIRCompiler::emitWrapResult() {
6586   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6587   AutoOutputRegister output(*this);
6588   AutoScratchRegister scratch(allocator, masm);
6589 
6590   FailurePath* failure;
6591   if (!addFailurePath(&failure)) {
6592     return false;
6593   }
6594 
6595   Label done;
6596   // We only have to wrap objects, because we are in the same zone.
6597   masm.branchTestObject(Assembler::NotEqual, output.valueReg(), &done);
6598 
6599   Register obj = output.valueReg().scratchReg();
6600   masm.unboxObject(output.valueReg(), obj);
6601 
6602   LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
6603   masm.PushRegsInMask(save);
6604 
6605   using Fn = JSObject* (*)(JSContext * cx, JSObject * obj);
6606   masm.setupUnalignedABICall(scratch);
6607   masm.loadJSContext(scratch);
6608   masm.passABIArg(scratch);
6609   masm.passABIArg(obj);
6610   masm.callWithABI<Fn, WrapObjectPure>();
6611   masm.mov(ReturnReg, obj);
6612 
6613   LiveRegisterSet ignore;
6614   ignore.add(obj);
6615   masm.PopRegsInMaskIgnore(save, ignore);
6616 
6617   // We could not get a wrapper for this object.
6618   masm.branchTestPtr(Assembler::Zero, obj, obj, failure->label());
6619 
6620   // We clobbered the output register, so we have to retag.
6621   masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
6622 
6623   masm.bind(&done);
6624   return true;
6625 }
6626 
emitMegamorphicLoadSlotByValueResult(ObjOperandId objId,ValOperandId idId)6627 bool CacheIRCompiler::emitMegamorphicLoadSlotByValueResult(ObjOperandId objId,
6628                                                            ValOperandId idId) {
6629   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6630   AutoOutputRegister output(*this);
6631 
6632   Register obj = allocator.useRegister(masm, objId);
6633   ValueOperand idVal = allocator.useValueRegister(masm, idId);
6634 
6635   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6636 
6637   FailurePath* failure;
6638   if (!addFailurePath(&failure)) {
6639     return false;
6640   }
6641 
6642   // The object must be Native.
6643   masm.branchIfNonNativeObj(obj, scratch, failure->label());
6644 
6645   // idVal will be in vp[0], result will be stored in vp[1].
6646   masm.reserveStack(sizeof(Value));
6647   masm.Push(idVal);
6648   masm.moveStackPtrTo(idVal.scratchReg());
6649 
6650   LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
6651                                liveVolatileFloatRegs());
6652   volatileRegs.takeUnchecked(scratch);
6653   volatileRegs.takeUnchecked(idVal);
6654   masm.PushRegsInMask(volatileRegs);
6655 
6656   using Fn = bool (*)(JSContext * cx, JSObject * obj, Value * vp);
6657   masm.setupUnalignedABICall(scratch);
6658   masm.loadJSContext(scratch);
6659   masm.passABIArg(scratch);
6660   masm.passABIArg(obj);
6661   masm.passABIArg(idVal.scratchReg());
6662   masm.callWithABI<Fn, GetNativeDataPropertyByValuePure>();
6663 
6664   masm.mov(ReturnReg, scratch);
6665   masm.PopRegsInMask(volatileRegs);
6666 
6667   masm.Pop(idVal);
6668 
6669   Label ok;
6670   uint32_t framePushed = masm.framePushed();
6671   masm.branchIfTrueBool(scratch, &ok);
6672   masm.adjustStack(sizeof(Value));
6673   masm.jump(failure->label());
6674 
6675   masm.bind(&ok);
6676   if (JitOptions.spectreJitToCxxCalls) {
6677     masm.speculationBarrier();
6678   }
6679   masm.setFramePushed(framePushed);
6680   masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
6681   masm.adjustStack(sizeof(Value));
6682   return true;
6683 }
6684 
emitMegamorphicHasPropResult(ObjOperandId objId,ValOperandId idId,bool hasOwn)6685 bool CacheIRCompiler::emitMegamorphicHasPropResult(ObjOperandId objId,
6686                                                    ValOperandId idId,
6687                                                    bool hasOwn) {
6688   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6689   AutoOutputRegister output(*this);
6690 
6691   Register obj = allocator.useRegister(masm, objId);
6692   ValueOperand idVal = allocator.useValueRegister(masm, idId);
6693 
6694   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6695 
6696   FailurePath* failure;
6697   if (!addFailurePath(&failure)) {
6698     return false;
6699   }
6700 
6701   // idVal will be in vp[0], result will be stored in vp[1].
6702   masm.reserveStack(sizeof(Value));
6703   masm.Push(idVal);
6704   masm.moveStackPtrTo(idVal.scratchReg());
6705 
6706   LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
6707                                liveVolatileFloatRegs());
6708   volatileRegs.takeUnchecked(scratch);
6709   volatileRegs.takeUnchecked(idVal);
6710   masm.PushRegsInMask(volatileRegs);
6711 
6712   using Fn = bool (*)(JSContext * cx, JSObject * obj, Value * vp);
6713   masm.setupUnalignedABICall(scratch);
6714   masm.loadJSContext(scratch);
6715   masm.passABIArg(scratch);
6716   masm.passABIArg(obj);
6717   masm.passABIArg(idVal.scratchReg());
6718   if (hasOwn) {
6719     masm.callWithABI<Fn, HasNativeDataPropertyPure<true>>();
6720   } else {
6721     masm.callWithABI<Fn, HasNativeDataPropertyPure<false>>();
6722   }
6723   masm.mov(ReturnReg, scratch);
6724   masm.PopRegsInMask(volatileRegs);
6725 
6726   masm.Pop(idVal);
6727 
6728   Label ok;
6729   uint32_t framePushed = masm.framePushed();
6730   masm.branchIfTrueBool(scratch, &ok);
6731   masm.adjustStack(sizeof(Value));
6732   masm.jump(failure->label());
6733 
6734   masm.bind(&ok);
6735   masm.setFramePushed(framePushed);
6736   masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
6737   masm.adjustStack(sizeof(Value));
6738   return true;
6739 }
6740 
emitCallObjectHasSparseElementResult(ObjOperandId objId,Int32OperandId indexId)6741 bool CacheIRCompiler::emitCallObjectHasSparseElementResult(
6742     ObjOperandId objId, Int32OperandId indexId) {
6743   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6744   AutoOutputRegister output(*this);
6745 
6746   Register obj = allocator.useRegister(masm, objId);
6747   Register index = allocator.useRegister(masm, indexId);
6748 
6749   AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
6750   AutoScratchRegister scratch2(allocator, masm);
6751 
6752   FailurePath* failure;
6753   if (!addFailurePath(&failure)) {
6754     return false;
6755   }
6756 
6757   masm.reserveStack(sizeof(Value));
6758   masm.moveStackPtrTo(scratch2.get());
6759 
6760   LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
6761                                liveVolatileFloatRegs());
6762   volatileRegs.takeUnchecked(scratch1);
6763   volatileRegs.takeUnchecked(index);
6764   masm.PushRegsInMask(volatileRegs);
6765 
6766   using Fn =
6767       bool (*)(JSContext * cx, NativeObject * obj, int32_t index, Value * vp);
6768   masm.setupUnalignedABICall(scratch1);
6769   masm.loadJSContext(scratch1);
6770   masm.passABIArg(scratch1);
6771   masm.passABIArg(obj);
6772   masm.passABIArg(index);
6773   masm.passABIArg(scratch2);
6774   masm.callWithABI<Fn, HasNativeElementPure>();
6775   masm.mov(ReturnReg, scratch1);
6776   masm.PopRegsInMask(volatileRegs);
6777 
6778   Label ok;
6779   uint32_t framePushed = masm.framePushed();
6780   masm.branchIfTrueBool(scratch1, &ok);
6781   masm.adjustStack(sizeof(Value));
6782   masm.jump(failure->label());
6783 
6784   masm.bind(&ok);
6785   masm.setFramePushed(framePushed);
6786   masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
6787   masm.adjustStack(sizeof(Value));
6788   return true;
6789 }
6790 
6791 /*
6792  * Move a constant value into register dest.
6793  */
emitLoadStubFieldConstant(StubFieldOffset val,Register dest)6794 void CacheIRCompiler::emitLoadStubFieldConstant(StubFieldOffset val,
6795                                                 Register dest) {
6796   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6797   MOZ_ASSERT(mode_ == Mode::Ion);
6798   switch (val.getStubFieldType()) {
6799     case StubField::Type::Shape:
6800       masm.movePtr(ImmGCPtr(shapeStubField(val.getOffset())), dest);
6801       break;
6802     case StubField::Type::GetterSetter:
6803       masm.movePtr(ImmGCPtr(getterSetterStubField(val.getOffset())), dest);
6804       break;
6805     case StubField::Type::String:
6806       masm.movePtr(ImmGCPtr(stringStubField(val.getOffset())), dest);
6807       break;
6808     case StubField::Type::JSObject:
6809       masm.movePtr(ImmGCPtr(objectStubField(val.getOffset())), dest);
6810       break;
6811     case StubField::Type::RawPointer:
6812       masm.movePtr(ImmPtr(pointerStubField(val.getOffset())), dest);
6813       break;
6814     case StubField::Type::RawInt32:
6815       masm.move32(Imm32(int32StubField(val.getOffset())), dest);
6816       break;
6817     case StubField::Type::Id:
6818       masm.movePropertyKey(idStubField(val.getOffset()), dest);
6819       break;
6820     default:
6821       MOZ_CRASH("Unhandled stub field constant type");
6822   }
6823 }
6824 
6825 /*
6826  * After this is done executing, dest contains the value; either through a
6827  * constant load or through the load from the stub data.
6828  *
6829  * The current policy is that Baseline will use loads from the stub data (to
6830  * allow IC sharing), where as Ion doesn't share ICs, and so we can safely use
6831  * constants in the IC.
6832  */
emitLoadStubField(StubFieldOffset val,Register dest)6833 void CacheIRCompiler::emitLoadStubField(StubFieldOffset val, Register dest) {
6834   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6835   if (stubFieldPolicy_ == StubFieldPolicy::Constant) {
6836     emitLoadStubFieldConstant(val, dest);
6837   } else {
6838     Address load(ICStubReg, stubDataOffset_ + val.getOffset());
6839 
6840     switch (val.getStubFieldType()) {
6841       case StubField::Type::RawPointer:
6842       case StubField::Type::Shape:
6843       case StubField::Type::GetterSetter:
6844       case StubField::Type::JSObject:
6845       case StubField::Type::Symbol:
6846       case StubField::Type::String:
6847       case StubField::Type::Id:
6848         masm.loadPtr(load, dest);
6849         break;
6850       case StubField::Type::RawInt32:
6851         masm.load32(load, dest);
6852         break;
6853       default:
6854         MOZ_CRASH("Unhandled stub field constant type");
6855     }
6856   }
6857 }
6858 
emitLoadValueStubField(StubFieldOffset val,ValueOperand dest)6859 void CacheIRCompiler::emitLoadValueStubField(StubFieldOffset val,
6860                                              ValueOperand dest) {
6861   MOZ_ASSERT(val.getStubFieldType() == StubField::Type::Value);
6862 
6863   if (stubFieldPolicy_ == StubFieldPolicy::Constant) {
6864     MOZ_ASSERT(mode_ == Mode::Ion);
6865     masm.moveValue(valueStubField(val.getOffset()), dest);
6866   } else {
6867     Address addr(ICStubReg, stubDataOffset_ + val.getOffset());
6868     masm.loadValue(addr, dest);
6869   }
6870 }
6871 
emitLoadInstanceOfObjectResult(ValOperandId lhsId,ObjOperandId protoId)6872 bool CacheIRCompiler::emitLoadInstanceOfObjectResult(ValOperandId lhsId,
6873                                                      ObjOperandId protoId) {
6874   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6875   AutoOutputRegister output(*this);
6876   ValueOperand lhs = allocator.useValueRegister(masm, lhsId);
6877   Register proto = allocator.useRegister(masm, protoId);
6878 
6879   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
6880 
6881   FailurePath* failure;
6882   if (!addFailurePath(&failure)) {
6883     return false;
6884   }
6885 
6886   Label returnFalse, returnTrue, done;
6887   masm.fallibleUnboxObject(lhs, scratch, &returnFalse);
6888 
6889   // LHS is an object. Load its proto.
6890   masm.loadObjProto(scratch, scratch);
6891   {
6892     // Walk the proto chain until we either reach the target object,
6893     // nullptr or LazyProto.
6894     Label loop;
6895     masm.bind(&loop);
6896 
6897     masm.branchPtr(Assembler::Equal, scratch, proto, &returnTrue);
6898     masm.branchTestPtr(Assembler::Zero, scratch, scratch, &returnFalse);
6899 
6900     MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
6901     masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), failure->label());
6902 
6903     masm.loadObjProto(scratch, scratch);
6904     masm.jump(&loop);
6905   }
6906 
6907   masm.bind(&returnFalse);
6908   EmitStoreBoolean(masm, false, output);
6909   masm.jump(&done);
6910 
6911   masm.bind(&returnTrue);
6912   EmitStoreBoolean(masm, true, output);
6913   // fallthrough
6914   masm.bind(&done);
6915   return true;
6916 }
6917 
emitMegamorphicLoadSlotResult(ObjOperandId objId,uint32_t nameOffset)6918 bool CacheIRCompiler::emitMegamorphicLoadSlotResult(ObjOperandId objId,
6919                                                     uint32_t nameOffset) {
6920   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6921   AutoOutputRegister output(*this);
6922 
6923   Register obj = allocator.useRegister(masm, objId);
6924   StubFieldOffset name(nameOffset, StubField::Type::String);
6925 
6926   AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
6927   AutoScratchRegister scratch2(allocator, masm);
6928   AutoScratchRegister scratch3(allocator, masm);
6929 
6930   FailurePath* failure;
6931   if (!addFailurePath(&failure)) {
6932     return false;
6933   }
6934 
6935   // The object must be Native.
6936   masm.branchIfNonNativeObj(obj, scratch3, failure->label());
6937 
6938   masm.Push(UndefinedValue());
6939   masm.moveStackPtrTo(scratch3.get());
6940 
6941   LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
6942                                liveVolatileFloatRegs());
6943   volatileRegs.takeUnchecked(scratch1);
6944   volatileRegs.takeUnchecked(scratch2);
6945   volatileRegs.takeUnchecked(scratch3);
6946   masm.PushRegsInMask(volatileRegs);
6947 
6948   using Fn =
6949       bool (*)(JSContext * cx, JSObject * obj, PropertyName * name, Value * vp);
6950   masm.setupUnalignedABICall(scratch1);
6951   masm.loadJSContext(scratch1);
6952   masm.passABIArg(scratch1);
6953   masm.passABIArg(obj);
6954   emitLoadStubField(name, scratch2);
6955   masm.passABIArg(scratch2);
6956   masm.passABIArg(scratch3);
6957   masm.callWithABI<Fn, GetNativeDataPropertyPure>();
6958 
6959   masm.mov(ReturnReg, scratch2);
6960   masm.PopRegsInMask(volatileRegs);
6961 
6962   masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
6963   masm.adjustStack(sizeof(Value));
6964 
6965   masm.branchIfFalseBool(scratch2, failure->label());
6966   if (JitOptions.spectreJitToCxxCalls) {
6967     masm.speculationBarrier();
6968   }
6969 
6970   return true;
6971 }
6972 
emitMegamorphicStoreSlot(ObjOperandId objId,uint32_t nameOffset,ValOperandId rhsId)6973 bool CacheIRCompiler::emitMegamorphicStoreSlot(ObjOperandId objId,
6974                                                uint32_t nameOffset,
6975                                                ValOperandId rhsId) {
6976   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
6977   Register obj = allocator.useRegister(masm, objId);
6978   StubFieldOffset name(nameOffset, StubField::Type::String);
6979   ValueOperand val = allocator.useValueRegister(masm, rhsId);
6980 
6981   AutoScratchRegister scratch1(allocator, masm);
6982   AutoScratchRegister scratch2(allocator, masm);
6983 
6984   FailurePath* failure;
6985   if (!addFailurePath(&failure)) {
6986     return false;
6987   }
6988 
6989   masm.Push(val);
6990   masm.moveStackPtrTo(val.scratchReg());
6991 
6992   LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
6993                                liveVolatileFloatRegs());
6994   volatileRegs.takeUnchecked(scratch1);
6995   volatileRegs.takeUnchecked(scratch2);
6996   volatileRegs.takeUnchecked(val);
6997   masm.PushRegsInMask(volatileRegs);
6998 
6999   using Fn = bool (*)(JSContext * cx, JSObject * obj, PropertyName * name,
7000                       Value * val);
7001   masm.setupUnalignedABICall(scratch1);
7002   masm.loadJSContext(scratch1);
7003   masm.passABIArg(scratch1);
7004   masm.passABIArg(obj);
7005   emitLoadStubField(name, scratch2);
7006   masm.passABIArg(scratch2);
7007   masm.passABIArg(val.scratchReg());
7008   masm.callWithABI<Fn, SetNativeDataPropertyPure>();
7009 
7010   masm.mov(ReturnReg, scratch1);
7011   masm.PopRegsInMask(volatileRegs);
7012 
7013   masm.loadValue(Address(masm.getStackPointer(), 0), val);
7014   masm.adjustStack(sizeof(Value));
7015 
7016   masm.branchIfFalseBool(scratch1, failure->label());
7017   return true;
7018 }
7019 
emitGuardHasGetterSetter(ObjOperandId objId,uint32_t idOffset,uint32_t getterSetterOffset)7020 bool CacheIRCompiler::emitGuardHasGetterSetter(ObjOperandId objId,
7021                                                uint32_t idOffset,
7022                                                uint32_t getterSetterOffset) {
7023   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7024 
7025   Register obj = allocator.useRegister(masm, objId);
7026 
7027   StubFieldOffset id(idOffset, StubField::Type::Id);
7028   StubFieldOffset getterSetter(getterSetterOffset,
7029                                StubField::Type::GetterSetter);
7030 
7031   AutoScratchRegister scratch1(allocator, masm);
7032   AutoScratchRegister scratch2(allocator, masm);
7033   AutoScratchRegister scratch3(allocator, masm);
7034 
7035   FailurePath* failure;
7036   if (!addFailurePath(&failure)) {
7037     return false;
7038   }
7039 
7040   LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7041                                liveVolatileFloatRegs());
7042   volatileRegs.takeUnchecked(scratch1);
7043   volatileRegs.takeUnchecked(scratch2);
7044   masm.PushRegsInMask(volatileRegs);
7045 
7046   using Fn = bool (*)(JSContext * cx, JSObject * obj, jsid id,
7047                       GetterSetter * getterSetter);
7048   masm.setupUnalignedABICall(scratch1);
7049   masm.loadJSContext(scratch1);
7050   masm.passABIArg(scratch1);
7051   masm.passABIArg(obj);
7052   emitLoadStubField(id, scratch2);
7053   masm.passABIArg(scratch2);
7054   emitLoadStubField(getterSetter, scratch3);
7055   masm.passABIArg(scratch3);
7056   masm.callWithABI<Fn, ObjectHasGetterSetterPure>();
7057   masm.mov(ReturnReg, scratch1);
7058   masm.PopRegsInMask(volatileRegs);
7059 
7060   masm.branchIfFalseBool(scratch1, failure->label());
7061   return true;
7062 }
7063 
emitGuardWasmArg(ValOperandId argId,wasm::ValType::Kind kind)7064 bool CacheIRCompiler::emitGuardWasmArg(ValOperandId argId,
7065                                        wasm::ValType::Kind kind) {
7066   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7067 
7068   // All values can be boxed as AnyRef.
7069   if (kind == wasm::ValType::Ref) {
7070     return true;
7071   }
7072   MOZ_ASSERT(kind != wasm::ValType::V128);
7073 
7074   ValueOperand arg = allocator.useValueRegister(masm, argId);
7075 
7076   FailurePath* failure;
7077   if (!addFailurePath(&failure)) {
7078     return false;
7079   }
7080 
7081   // Check that the argument can be converted to the Wasm type in Warp code
7082   // without bailing out.
7083   Label done;
7084   switch (kind) {
7085     case wasm::ValType::I32:
7086     case wasm::ValType::F32:
7087     case wasm::ValType::F64: {
7088       // Argument must be number, bool, or undefined.
7089       masm.branchTestNumber(Assembler::Equal, arg, &done);
7090       masm.branchTestBoolean(Assembler::Equal, arg, &done);
7091       masm.branchTestUndefined(Assembler::NotEqual, arg, failure->label());
7092       break;
7093     }
7094     case wasm::ValType::I64: {
7095       // Argument must be bigint, bool, or string.
7096       masm.branchTestBigInt(Assembler::Equal, arg, &done);
7097       masm.branchTestBoolean(Assembler::Equal, arg, &done);
7098       masm.branchTestString(Assembler::NotEqual, arg, failure->label());
7099       break;
7100     }
7101     default:
7102       MOZ_CRASH("Unexpected kind");
7103   }
7104   masm.bind(&done);
7105 
7106   return true;
7107 }
7108 
emitLoadObject(ObjOperandId resultId,uint32_t objOffset)7109 bool CacheIRCompiler::emitLoadObject(ObjOperandId resultId,
7110                                      uint32_t objOffset) {
7111   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7112   Register reg = allocator.defineRegister(masm, resultId);
7113   StubFieldOffset obj(objOffset, StubField::Type::JSObject);
7114   emitLoadStubField(obj, reg);
7115   return true;
7116 }
7117 
emitLoadInt32Constant(uint32_t valOffset,Int32OperandId resultId)7118 bool CacheIRCompiler::emitLoadInt32Constant(uint32_t valOffset,
7119                                             Int32OperandId resultId) {
7120   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7121   Register reg = allocator.defineRegister(masm, resultId);
7122   StubFieldOffset val(valOffset, StubField::Type::RawInt32);
7123   emitLoadStubField(val, reg);
7124   return true;
7125 }
7126 
emitLoadBooleanConstant(bool val,BooleanOperandId resultId)7127 bool CacheIRCompiler::emitLoadBooleanConstant(bool val,
7128                                               BooleanOperandId resultId) {
7129   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7130   Register reg = allocator.defineRegister(masm, resultId);
7131   masm.move32(Imm32(val), reg);
7132   return true;
7133 }
7134 
emitLoadUndefined(ValOperandId resultId)7135 bool CacheIRCompiler::emitLoadUndefined(ValOperandId resultId) {
7136   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7137 
7138   ValueOperand reg = allocator.defineValueRegister(masm, resultId);
7139   masm.moveValue(UndefinedValue(), reg);
7140   return true;
7141 }
7142 
emitLoadConstantString(uint32_t strOffset,StringOperandId resultId)7143 bool CacheIRCompiler::emitLoadConstantString(uint32_t strOffset,
7144                                              StringOperandId resultId) {
7145   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7146   Register reg = allocator.defineRegister(masm, resultId);
7147   StubFieldOffset str(strOffset, StubField::Type::String);
7148   emitLoadStubField(str, reg);
7149   return true;
7150 }
7151 
emitCallInt32ToString(Int32OperandId inputId,StringOperandId resultId)7152 bool CacheIRCompiler::emitCallInt32ToString(Int32OperandId inputId,
7153                                             StringOperandId resultId) {
7154   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7155   Register input = allocator.useRegister(masm, inputId);
7156   Register result = allocator.defineRegister(masm, resultId);
7157 
7158   FailurePath* failure;
7159   if (!addFailurePath(&failure)) {
7160     return false;
7161   }
7162 
7163   LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7164                                liveVolatileFloatRegs());
7165   volatileRegs.takeUnchecked(result);
7166   masm.PushRegsInMask(volatileRegs);
7167 
7168   using Fn = JSLinearString* (*)(JSContext * cx, int32_t i);
7169   masm.setupUnalignedABICall(result);
7170   masm.loadJSContext(result);
7171   masm.passABIArg(result);
7172   masm.passABIArg(input);
7173   masm.callWithABI<Fn, js::Int32ToStringPure>();
7174 
7175   masm.mov(ReturnReg, result);
7176   masm.PopRegsInMask(volatileRegs);
7177 
7178   masm.branchPtr(Assembler::Equal, result, ImmPtr(0), failure->label());
7179   return true;
7180 }
7181 
emitCallNumberToString(NumberOperandId inputId,StringOperandId resultId)7182 bool CacheIRCompiler::emitCallNumberToString(NumberOperandId inputId,
7183                                              StringOperandId resultId) {
7184   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7185 
7186   // Float register must be preserved. The BinaryArith ICs use
7187   // the fact that baseline has them available, as well as fixed temps on
7188   // LBinaryCache.
7189   AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
7190 
7191   allocator.ensureDoubleRegister(masm, inputId, floatScratch0);
7192   Register result = allocator.defineRegister(masm, resultId);
7193 
7194   FailurePath* failure;
7195   if (!addFailurePath(&failure)) {
7196     return false;
7197   }
7198 
7199   LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7200                                liveVolatileFloatRegs());
7201   volatileRegs.takeUnchecked(result);
7202   volatileRegs.addUnchecked(floatScratch0);
7203   masm.PushRegsInMask(volatileRegs);
7204 
7205   using Fn = JSString* (*)(JSContext * cx, double d);
7206   masm.setupUnalignedABICall(result);
7207   masm.loadJSContext(result);
7208   masm.passABIArg(result);
7209   masm.passABIArg(floatScratch0, MoveOp::DOUBLE);
7210   masm.callWithABI<Fn, js::NumberToStringPure>();
7211 
7212   masm.mov(ReturnReg, result);
7213   masm.PopRegsInMask(volatileRegs);
7214 
7215   masm.branchPtr(Assembler::Equal, result, ImmPtr(0), failure->label());
7216   return true;
7217 }
7218 
emitBooleanToString(BooleanOperandId inputId,StringOperandId resultId)7219 bool CacheIRCompiler::emitBooleanToString(BooleanOperandId inputId,
7220                                           StringOperandId resultId) {
7221   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7222   Register boolean = allocator.useRegister(masm, inputId);
7223   Register result = allocator.defineRegister(masm, resultId);
7224   const JSAtomState& names = cx_->names();
7225   Label true_, done;
7226 
7227   masm.branchTest32(Assembler::NonZero, boolean, boolean, &true_);
7228 
7229   // False case
7230   masm.movePtr(ImmGCPtr(names.false_), result);
7231   masm.jump(&done);
7232 
7233   // True case
7234   masm.bind(&true_);
7235   masm.movePtr(ImmGCPtr(names.true_), result);
7236   masm.bind(&done);
7237 
7238   return true;
7239 }
7240 
emitObjectToStringResult(ObjOperandId objId)7241 bool CacheIRCompiler::emitObjectToStringResult(ObjOperandId objId) {
7242   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7243 
7244   AutoOutputRegister output(*this);
7245   Register obj = allocator.useRegister(masm, objId);
7246   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7247 
7248   FailurePath* failure;
7249   if (!addFailurePath(&failure)) {
7250     return false;
7251   }
7252 
7253   LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7254                                liveVolatileFloatRegs());
7255   volatileRegs.takeUnchecked(output.valueReg());
7256   volatileRegs.takeUnchecked(scratch);
7257   masm.PushRegsInMask(volatileRegs);
7258 
7259   using Fn = JSString* (*)(JSContext*, JSObject*);
7260   masm.setupUnalignedABICall(scratch);
7261   masm.loadJSContext(scratch);
7262   masm.passABIArg(scratch);
7263   masm.passABIArg(obj);
7264   masm.callWithABI<Fn, js::ObjectClassToString>();
7265   masm.storeCallPointerResult(scratch);
7266 
7267   masm.PopRegsInMask(volatileRegs);
7268 
7269   masm.branchPtr(Assembler::Equal, scratch, ImmPtr(0), failure->label());
7270   masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
7271 
7272   return true;
7273 }
7274 
emitCallStringConcatResult(StringOperandId lhsId,StringOperandId rhsId)7275 bool CacheIRCompiler::emitCallStringConcatResult(StringOperandId lhsId,
7276                                                  StringOperandId rhsId) {
7277   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7278   AutoCallVM callvm(masm, this, allocator);
7279 
7280   Register lhs = allocator.useRegister(masm, lhsId);
7281   Register rhs = allocator.useRegister(masm, rhsId);
7282 
7283   callvm.prepare();
7284 
7285   masm.Push(static_cast<js::jit::Imm32>(js::gc::DefaultHeap));
7286   masm.Push(rhs);
7287   masm.Push(lhs);
7288 
7289   using Fn = JSString* (*)(JSContext*, HandleString, HandleString,
7290                            js::gc::InitialHeap);
7291   callvm.call<Fn, ConcatStrings<CanGC>>();
7292 
7293   return true;
7294 }
7295 
emitCallIsSuspendedGeneratorResult(ValOperandId valId)7296 bool CacheIRCompiler::emitCallIsSuspendedGeneratorResult(ValOperandId valId) {
7297   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7298   AutoOutputRegister output(*this);
7299   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7300   AutoScratchRegister scratch2(allocator, masm);
7301   ValueOperand input = allocator.useValueRegister(masm, valId);
7302 
7303   // Test if it's an object.
7304   Label returnFalse, done;
7305   masm.fallibleUnboxObject(input, scratch, &returnFalse);
7306 
7307   // Test if it's a GeneratorObject.
7308   masm.branchTestObjClass(Assembler::NotEqual, scratch,
7309                           &GeneratorObject::class_, scratch2, scratch,
7310                           &returnFalse);
7311 
7312   // If the resumeIndex slot holds an int32 value < RESUME_INDEX_RUNNING,
7313   // the generator is suspended.
7314   Address addr(scratch, AbstractGeneratorObject::offsetOfResumeIndexSlot());
7315   masm.fallibleUnboxInt32(addr, scratch, &returnFalse);
7316   masm.branch32(Assembler::AboveOrEqual, scratch,
7317                 Imm32(AbstractGeneratorObject::RESUME_INDEX_RUNNING),
7318                 &returnFalse);
7319 
7320   masm.moveValue(BooleanValue(true), output.valueReg());
7321   masm.jump(&done);
7322 
7323   masm.bind(&returnFalse);
7324   masm.moveValue(BooleanValue(false), output.valueReg());
7325 
7326   masm.bind(&done);
7327   return true;
7328 }
7329 
7330 // This op generates no code. It is consumed by the transpiler.
emitMetaTwoByte(uint32_t,uint32_t)7331 bool CacheIRCompiler::emitMetaTwoByte(uint32_t, uint32_t) { return true; }
7332 
emitCallNativeGetElementResult(ObjOperandId objId,Int32OperandId indexId)7333 bool CacheIRCompiler::emitCallNativeGetElementResult(ObjOperandId objId,
7334                                                      Int32OperandId indexId) {
7335   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7336   AutoCallVM callvm(masm, this, allocator);
7337 
7338   Register obj = allocator.useRegister(masm, objId);
7339   Register index = allocator.useRegister(masm, indexId);
7340 
7341   callvm.prepare();
7342 
7343   masm.Push(index);
7344   masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
7345   masm.Push(obj);
7346 
7347   using Fn = bool (*)(JSContext*, HandleNativeObject, HandleValue, int32_t,
7348                       MutableHandleValue);
7349   callvm.call<Fn, NativeGetElement>();
7350 
7351   return true;
7352 }
7353 
emitProxyHasPropResult(ObjOperandId objId,ValOperandId idId,bool hasOwn)7354 bool CacheIRCompiler::emitProxyHasPropResult(ObjOperandId objId,
7355                                              ValOperandId idId, bool hasOwn) {
7356   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7357   AutoCallVM callvm(masm, this, allocator);
7358 
7359   Register obj = allocator.useRegister(masm, objId);
7360   ValueOperand idVal = allocator.useValueRegister(masm, idId);
7361 
7362   callvm.prepare();
7363 
7364   masm.Push(idVal);
7365   masm.Push(obj);
7366 
7367   using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
7368   if (hasOwn) {
7369     callvm.call<Fn, ProxyHasOwn>();
7370   } else {
7371     callvm.call<Fn, ProxyHas>();
7372   }
7373   return true;
7374 }
7375 
emitProxyGetByValueResult(ObjOperandId objId,ValOperandId idId)7376 bool CacheIRCompiler::emitProxyGetByValueResult(ObjOperandId objId,
7377                                                 ValOperandId idId) {
7378   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7379   AutoCallVM callvm(masm, this, allocator);
7380 
7381   Register obj = allocator.useRegister(masm, objId);
7382   ValueOperand idVal = allocator.useValueRegister(masm, idId);
7383 
7384   callvm.prepare();
7385   masm.Push(idVal);
7386   masm.Push(obj);
7387 
7388   using Fn =
7389       bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
7390   callvm.call<Fn, ProxyGetPropertyByValue>();
7391   return true;
7392 }
7393 
emitCallGetSparseElementResult(ObjOperandId objId,Int32OperandId indexId)7394 bool CacheIRCompiler::emitCallGetSparseElementResult(ObjOperandId objId,
7395                                                      Int32OperandId indexId) {
7396   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7397 
7398   AutoCallVM callvm(masm, this, allocator);
7399 
7400   Register obj = allocator.useRegister(masm, objId);
7401   Register id = allocator.useRegister(masm, indexId);
7402 
7403   callvm.prepare();
7404   masm.Push(id);
7405   masm.Push(obj);
7406 
7407   using Fn = bool (*)(JSContext * cx, HandleArrayObject obj, int32_t int_id,
7408                       MutableHandleValue result);
7409   callvm.call<Fn, GetSparseElementHelper>();
7410   return true;
7411 }
7412 
emitCallRegExpMatcherResult(ObjOperandId regexpId,StringOperandId inputId,Int32OperandId lastIndexId)7413 bool CacheIRCompiler::emitCallRegExpMatcherResult(ObjOperandId regexpId,
7414                                                   StringOperandId inputId,
7415                                                   Int32OperandId lastIndexId) {
7416   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7417 
7418   AutoCallVM callvm(masm, this, allocator);
7419 
7420   Register regexp = allocator.useRegister(masm, regexpId);
7421   Register input = allocator.useRegister(masm, inputId);
7422   Register lastIndex = allocator.useRegister(masm, lastIndexId);
7423 
7424   callvm.prepare();
7425   masm.Push(ImmWord(0));  // nullptr MatchPairs.
7426   masm.Push(lastIndex);
7427   masm.Push(input);
7428   masm.Push(regexp);
7429 
7430   using Fn = bool (*)(JSContext*, HandleObject regexp, HandleString input,
7431                       int32_t lastIndex, MatchPairs * pairs,
7432                       MutableHandleValue output);
7433   callvm.call<Fn, RegExpMatcherRaw>();
7434   return true;
7435 }
7436 
emitCallRegExpSearcherResult(ObjOperandId regexpId,StringOperandId inputId,Int32OperandId lastIndexId)7437 bool CacheIRCompiler::emitCallRegExpSearcherResult(ObjOperandId regexpId,
7438                                                    StringOperandId inputId,
7439                                                    Int32OperandId lastIndexId) {
7440   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7441 
7442   AutoCallVM callvm(masm, this, allocator);
7443 
7444   Register regexp = allocator.useRegister(masm, regexpId);
7445   Register input = allocator.useRegister(masm, inputId);
7446   Register lastIndex = allocator.useRegister(masm, lastIndexId);
7447 
7448   callvm.prepare();
7449   masm.Push(ImmWord(0));  // nullptr MatchPairs.
7450   masm.Push(lastIndex);
7451   masm.Push(input);
7452   masm.Push(regexp);
7453 
7454   using Fn = bool (*)(JSContext*, HandleObject regexp, HandleString input,
7455                       int32_t lastIndex, MatchPairs * pairs, int32_t * result);
7456   callvm.call<Fn, RegExpSearcherRaw>();
7457   return true;
7458 }
7459 
emitCallRegExpTesterResult(ObjOperandId regexpId,StringOperandId inputId,Int32OperandId lastIndexId)7460 bool CacheIRCompiler::emitCallRegExpTesterResult(ObjOperandId regexpId,
7461                                                  StringOperandId inputId,
7462                                                  Int32OperandId lastIndexId) {
7463   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7464 
7465   AutoCallVM callvm(masm, this, allocator);
7466 
7467   Register regexp = allocator.useRegister(masm, regexpId);
7468   Register input = allocator.useRegister(masm, inputId);
7469   Register lastIndex = allocator.useRegister(masm, lastIndexId);
7470 
7471   callvm.prepare();
7472   masm.Push(lastIndex);
7473   masm.Push(input);
7474   masm.Push(regexp);
7475 
7476   using Fn = bool (*)(JSContext*, HandleObject regexp, HandleString input,
7477                       int32_t lastIndex, int32_t * result);
7478   callvm.call<Fn, RegExpTesterRaw>();
7479   return true;
7480 }
7481 
emitRegExpFlagResult(ObjOperandId regexpId,int32_t flagsMask)7482 bool CacheIRCompiler::emitRegExpFlagResult(ObjOperandId regexpId,
7483                                            int32_t flagsMask) {
7484   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7485 
7486   AutoOutputRegister output(*this);
7487   Register regexp = allocator.useRegister(masm, regexpId);
7488   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7489 
7490   Address flagsAddr(
7491       regexp, NativeObject::getFixedSlotOffset(RegExpObject::flagsSlot()));
7492   masm.unboxInt32(flagsAddr, scratch);
7493 
7494   Label ifFalse, done;
7495   masm.branchTest32(Assembler::Zero, scratch, Imm32(flagsMask), &ifFalse);
7496   masm.moveValue(BooleanValue(true), output.valueReg());
7497   masm.jump(&done);
7498 
7499   masm.bind(&ifFalse);
7500   masm.moveValue(BooleanValue(false), output.valueReg());
7501 
7502   masm.bind(&done);
7503   return true;
7504 }
7505 
emitCallSubstringKernelResult(StringOperandId strId,Int32OperandId beginId,Int32OperandId lengthId)7506 bool CacheIRCompiler::emitCallSubstringKernelResult(StringOperandId strId,
7507                                                     Int32OperandId beginId,
7508                                                     Int32OperandId lengthId) {
7509   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7510 
7511   AutoCallVM callvm(masm, this, allocator);
7512 
7513   Register str = allocator.useRegister(masm, strId);
7514   Register begin = allocator.useRegister(masm, beginId);
7515   Register length = allocator.useRegister(masm, lengthId);
7516 
7517   callvm.prepare();
7518   masm.Push(length);
7519   masm.Push(begin);
7520   masm.Push(str);
7521 
7522   using Fn = JSString* (*)(JSContext * cx, HandleString str, int32_t begin,
7523                            int32_t len);
7524   callvm.call<Fn, SubstringKernel>();
7525   return true;
7526 }
7527 
emitStringReplaceStringResult(StringOperandId strId,StringOperandId patternId,StringOperandId replacementId)7528 bool CacheIRCompiler::emitStringReplaceStringResult(
7529     StringOperandId strId, StringOperandId patternId,
7530     StringOperandId replacementId) {
7531   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7532 
7533   AutoCallVM callvm(masm, this, allocator);
7534 
7535   Register str = allocator.useRegister(masm, strId);
7536   Register pattern = allocator.useRegister(masm, patternId);
7537   Register replacement = allocator.useRegister(masm, replacementId);
7538 
7539   callvm.prepare();
7540   masm.Push(replacement);
7541   masm.Push(pattern);
7542   masm.Push(str);
7543 
7544   using Fn =
7545       JSString* (*)(JSContext*, HandleString, HandleString, HandleString);
7546   callvm.call<Fn, jit::StringReplace>();
7547   return true;
7548 }
7549 
emitStringSplitStringResult(StringOperandId strId,StringOperandId separatorId)7550 bool CacheIRCompiler::emitStringSplitStringResult(StringOperandId strId,
7551                                                   StringOperandId separatorId) {
7552   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7553 
7554   AutoCallVM callvm(masm, this, allocator);
7555 
7556   Register str = allocator.useRegister(masm, strId);
7557   Register separator = allocator.useRegister(masm, separatorId);
7558 
7559   callvm.prepare();
7560   masm.Push(Imm32(INT32_MAX));
7561   masm.Push(separator);
7562   masm.Push(str);
7563 
7564   using Fn = ArrayObject* (*)(JSContext*, HandleString, HandleString, uint32_t);
7565   callvm.call<Fn, js::StringSplitString>();
7566   return true;
7567 }
7568 
emitRegExpPrototypeOptimizableResult(ObjOperandId protoId)7569 bool CacheIRCompiler::emitRegExpPrototypeOptimizableResult(
7570     ObjOperandId protoId) {
7571   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7572 
7573   AutoOutputRegister output(*this);
7574   Register proto = allocator.useRegister(masm, protoId);
7575   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7576 
7577   Label slow, done;
7578   masm.branchIfNotRegExpPrototypeOptimizable(proto, scratch, &slow);
7579   masm.moveValue(BooleanValue(true), output.valueReg());
7580   masm.jump(&done);
7581 
7582   {
7583     masm.bind(&slow);
7584 
7585     LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7586                                  liveVolatileFloatRegs());
7587     volatileRegs.takeUnchecked(scratch);
7588     masm.PushRegsInMask(volatileRegs);
7589 
7590     using Fn = bool (*)(JSContext * cx, JSObject * proto);
7591     masm.setupUnalignedABICall(scratch);
7592     masm.loadJSContext(scratch);
7593     masm.passABIArg(scratch);
7594     masm.passABIArg(proto);
7595     masm.callWithABI<Fn, RegExpPrototypeOptimizableRaw>();
7596     masm.storeCallBoolResult(scratch);
7597 
7598     masm.PopRegsInMask(volatileRegs);
7599     masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
7600   }
7601 
7602   masm.bind(&done);
7603   return true;
7604 }
7605 
emitRegExpInstanceOptimizableResult(ObjOperandId regexpId,ObjOperandId protoId)7606 bool CacheIRCompiler::emitRegExpInstanceOptimizableResult(
7607     ObjOperandId regexpId, ObjOperandId protoId) {
7608   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7609 
7610   AutoOutputRegister output(*this);
7611   Register regexp = allocator.useRegister(masm, regexpId);
7612   Register proto = allocator.useRegister(masm, protoId);
7613   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7614 
7615   Label slow, done;
7616   masm.branchIfNotRegExpInstanceOptimizable(regexp, scratch, &slow);
7617   masm.moveValue(BooleanValue(true), output.valueReg());
7618   masm.jump(&done);
7619 
7620   {
7621     masm.bind(&slow);
7622 
7623     LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7624                                  liveVolatileFloatRegs());
7625     volatileRegs.takeUnchecked(scratch);
7626     masm.PushRegsInMask(volatileRegs);
7627 
7628     using Fn = bool (*)(JSContext * cx, JSObject * obj, JSObject * proto);
7629     masm.setupUnalignedABICall(scratch);
7630     masm.loadJSContext(scratch);
7631     masm.passABIArg(scratch);
7632     masm.passABIArg(regexp);
7633     masm.passABIArg(proto);
7634     masm.callWithABI<Fn, RegExpInstanceOptimizableRaw>();
7635     masm.storeCallBoolResult(scratch);
7636 
7637     masm.PopRegsInMask(volatileRegs);
7638     masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
7639   }
7640 
7641   masm.bind(&done);
7642   return true;
7643 }
7644 
emitGetFirstDollarIndexResult(StringOperandId strId)7645 bool CacheIRCompiler::emitGetFirstDollarIndexResult(StringOperandId strId) {
7646   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7647 
7648   AutoCallVM callvm(masm, this, allocator);
7649 
7650   Register str = allocator.useRegister(masm, strId);
7651 
7652   callvm.prepare();
7653   masm.Push(str);
7654 
7655   using Fn = bool (*)(JSContext*, JSString*, int32_t*);
7656   callvm.call<Fn, GetFirstDollarIndexRaw>();
7657   return true;
7658 }
7659 
emitAtomicsCompareExchangeResult(ObjOperandId objId,IntPtrOperandId indexId,uint32_t expectedId,uint32_t replacementId,Scalar::Type elementType)7660 bool CacheIRCompiler::emitAtomicsCompareExchangeResult(
7661     ObjOperandId objId, IntPtrOperandId indexId, uint32_t expectedId,
7662     uint32_t replacementId, Scalar::Type elementType) {
7663   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7664 
7665   Maybe<AutoOutputRegister> output;
7666   Maybe<AutoCallVM> callvm;
7667   if (!Scalar::isBigIntType(elementType)) {
7668     output.emplace(*this);
7669   } else {
7670     callvm.emplace(masm, this, allocator);
7671   }
7672 #ifdef JS_CODEGEN_X86
7673   // Use a scratch register to avoid running out of registers.
7674   Register obj = output ? output->valueReg().typeReg()
7675                         : callvm->outputValueReg().typeReg();
7676   allocator.copyToScratchRegister(masm, objId, obj);
7677 #else
7678   Register obj = allocator.useRegister(masm, objId);
7679 #endif
7680   Register index = allocator.useRegister(masm, indexId);
7681   Register expected;
7682   Register replacement;
7683   if (!Scalar::isBigIntType(elementType)) {
7684     expected = allocator.useRegister(masm, Int32OperandId(expectedId));
7685     replacement = allocator.useRegister(masm, Int32OperandId(replacementId));
7686   } else {
7687     expected = allocator.useRegister(masm, BigIntOperandId(expectedId));
7688     replacement = allocator.useRegister(masm, BigIntOperandId(replacementId));
7689   }
7690 
7691   Register scratch = output ? output->valueReg().scratchReg()
7692                             : callvm->outputValueReg().scratchReg();
7693   MOZ_ASSERT(scratch != obj, "scratchReg must not be typeReg");
7694 
7695   // Not enough registers on X86.
7696   Register spectreTemp = Register::Invalid();
7697 
7698   FailurePath* failure;
7699   if (!addFailurePath(&failure)) {
7700     return false;
7701   }
7702 
7703   // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
7704   // we can't use both at the same time. This isn't an issue here, because Ion
7705   // doesn't support CallICs. If that ever changes, this code must be updated.
7706   MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
7707 
7708   // Bounds check.
7709   masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
7710   masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
7711 
7712   // Atomic operations are highly platform-dependent, for example x86/x64 has
7713   // specific requirements on which registers are used; MIPS needs multiple
7714   // additional temporaries. Therefore we're using either an ABI or VM call here
7715   // instead of handling each platform separately.
7716 
7717   if (Scalar::isBigIntType(elementType)) {
7718     callvm->prepare();
7719 
7720     masm.Push(replacement);
7721     masm.Push(expected);
7722     masm.Push(index);
7723     masm.Push(obj);
7724 
7725     using Fn =
7726         BigInt* (*)(JSContext*, TypedArrayObject*, size_t, BigInt*, BigInt*);
7727     callvm->call<Fn, jit::AtomicsCompareExchange64>();
7728     return true;
7729   }
7730 
7731   {
7732     LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7733                                  liveVolatileFloatRegs());
7734     volatileRegs.takeUnchecked(output->valueReg());
7735     volatileRegs.takeUnchecked(scratch);
7736     masm.PushRegsInMask(volatileRegs);
7737 
7738     masm.setupUnalignedABICall(scratch);
7739     masm.passABIArg(obj);
7740     masm.passABIArg(index);
7741     masm.passABIArg(expected);
7742     masm.passABIArg(replacement);
7743     masm.callWithABI(DynamicFunction<AtomicsCompareExchangeFn>(
7744         AtomicsCompareExchange(elementType)));
7745     masm.storeCallInt32Result(scratch);
7746 
7747     masm.PopRegsInMask(volatileRegs);
7748   }
7749 
7750   if (elementType != Scalar::Uint32) {
7751     masm.tagValue(JSVAL_TYPE_INT32, scratch, output->valueReg());
7752   } else {
7753     ScratchDoubleScope fpscratch(masm);
7754     masm.convertUInt32ToDouble(scratch, fpscratch);
7755     masm.boxDouble(fpscratch, output->valueReg(), fpscratch);
7756   }
7757 
7758   return true;
7759 }
7760 
emitAtomicsReadModifyWriteResult(ObjOperandId objId,IntPtrOperandId indexId,uint32_t valueId,Scalar::Type elementType,AtomicsReadWriteModifyFn fn)7761 bool CacheIRCompiler::emitAtomicsReadModifyWriteResult(
7762     ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
7763     Scalar::Type elementType, AtomicsReadWriteModifyFn fn) {
7764   AutoOutputRegister output(*this);
7765   Register obj = allocator.useRegister(masm, objId);
7766   Register index = allocator.useRegister(masm, indexId);
7767   Register value = allocator.useRegister(masm, Int32OperandId(valueId));
7768   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
7769 
7770   // Not enough registers on X86.
7771   Register spectreTemp = Register::Invalid();
7772 
7773   FailurePath* failure;
7774   if (!addFailurePath(&failure)) {
7775     return false;
7776   }
7777 
7778   // Bounds check.
7779   masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
7780   masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
7781 
7782   // See comment in emitAtomicsCompareExchange for why we use an ABI call.
7783   {
7784     LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
7785                                  liveVolatileFloatRegs());
7786     volatileRegs.takeUnchecked(output.valueReg());
7787     volatileRegs.takeUnchecked(scratch);
7788     masm.PushRegsInMask(volatileRegs);
7789 
7790     masm.setupUnalignedABICall(scratch);
7791     masm.passABIArg(obj);
7792     masm.passABIArg(index);
7793     masm.passABIArg(value);
7794     masm.callWithABI(DynamicFunction<AtomicsReadWriteModifyFn>(fn));
7795     masm.storeCallInt32Result(scratch);
7796 
7797     masm.PopRegsInMask(volatileRegs);
7798   }
7799 
7800   if (elementType != Scalar::Uint32) {
7801     masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
7802   } else {
7803     ScratchDoubleScope fpscratch(masm);
7804     masm.convertUInt32ToDouble(scratch, fpscratch);
7805     masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
7806   }
7807 
7808   return true;
7809 }
7810 
7811 template <CacheIRCompiler::AtomicsReadWriteModify64Fn fn>
emitAtomicsReadModifyWriteResult64(ObjOperandId objId,IntPtrOperandId indexId,uint32_t valueId)7812 bool CacheIRCompiler::emitAtomicsReadModifyWriteResult64(
7813     ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId) {
7814   AutoCallVM callvm(masm, this, allocator);
7815   Register obj = allocator.useRegister(masm, objId);
7816   Register index = allocator.useRegister(masm, indexId);
7817   Register value = allocator.useRegister(masm, BigIntOperandId(valueId));
7818   AutoScratchRegisterMaybeOutput scratch(allocator, masm, callvm.output());
7819 
7820   // Not enough registers on X86.
7821   Register spectreTemp = Register::Invalid();
7822 
7823   FailurePath* failure;
7824   if (!addFailurePath(&failure)) {
7825     return false;
7826   }
7827 
7828   // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
7829   // we can't use both at the same time. This isn't an issue here, because Ion
7830   // doesn't support CallICs. If that ever changes, this code must be updated.
7831   MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
7832 
7833   // Bounds check.
7834   masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
7835   masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
7836 
7837   // See comment in emitAtomicsCompareExchange for why we use a VM call.
7838 
7839   callvm.prepare();
7840 
7841   masm.Push(value);
7842   masm.Push(index);
7843   masm.Push(obj);
7844 
7845   callvm.call<AtomicsReadWriteModify64Fn, fn>();
7846   return true;
7847 }
7848 
emitAtomicsExchangeResult(ObjOperandId objId,IntPtrOperandId indexId,uint32_t valueId,Scalar::Type elementType)7849 bool CacheIRCompiler::emitAtomicsExchangeResult(ObjOperandId objId,
7850                                                 IntPtrOperandId indexId,
7851                                                 uint32_t valueId,
7852                                                 Scalar::Type elementType) {
7853   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7854 
7855   if (Scalar::isBigIntType(elementType)) {
7856     return emitAtomicsReadModifyWriteResult64<jit::AtomicsExchange64>(
7857         objId, indexId, valueId);
7858   }
7859   return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
7860                                           AtomicsExchange(elementType));
7861 }
7862 
emitAtomicsAddResult(ObjOperandId objId,IntPtrOperandId indexId,uint32_t valueId,Scalar::Type elementType,bool forEffect)7863 bool CacheIRCompiler::emitAtomicsAddResult(ObjOperandId objId,
7864                                            IntPtrOperandId indexId,
7865                                            uint32_t valueId,
7866                                            Scalar::Type elementType,
7867                                            bool forEffect) {
7868   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7869 
7870   if (Scalar::isBigIntType(elementType)) {
7871     return emitAtomicsReadModifyWriteResult64<jit::AtomicsAdd64>(objId, indexId,
7872                                                                  valueId);
7873   }
7874   return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
7875                                           AtomicsAdd(elementType));
7876 }
7877 
emitAtomicsSubResult(ObjOperandId objId,IntPtrOperandId indexId,uint32_t valueId,Scalar::Type elementType,bool forEffect)7878 bool CacheIRCompiler::emitAtomicsSubResult(ObjOperandId objId,
7879                                            IntPtrOperandId indexId,
7880                                            uint32_t valueId,
7881                                            Scalar::Type elementType,
7882                                            bool forEffect) {
7883   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7884 
7885   if (Scalar::isBigIntType(elementType)) {
7886     return emitAtomicsReadModifyWriteResult64<jit::AtomicsSub64>(objId, indexId,
7887                                                                  valueId);
7888   }
7889   return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
7890                                           AtomicsSub(elementType));
7891 }
7892 
emitAtomicsAndResult(ObjOperandId objId,IntPtrOperandId indexId,uint32_t valueId,Scalar::Type elementType,bool forEffect)7893 bool CacheIRCompiler::emitAtomicsAndResult(ObjOperandId objId,
7894                                            IntPtrOperandId indexId,
7895                                            uint32_t valueId,
7896                                            Scalar::Type elementType,
7897                                            bool forEffect) {
7898   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7899 
7900   if (Scalar::isBigIntType(elementType)) {
7901     return emitAtomicsReadModifyWriteResult64<jit::AtomicsAnd64>(objId, indexId,
7902                                                                  valueId);
7903   }
7904   return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
7905                                           AtomicsAnd(elementType));
7906 }
7907 
emitAtomicsOrResult(ObjOperandId objId,IntPtrOperandId indexId,uint32_t valueId,Scalar::Type elementType,bool forEffect)7908 bool CacheIRCompiler::emitAtomicsOrResult(ObjOperandId objId,
7909                                           IntPtrOperandId indexId,
7910                                           uint32_t valueId,
7911                                           Scalar::Type elementType,
7912                                           bool forEffect) {
7913   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7914 
7915   if (Scalar::isBigIntType(elementType)) {
7916     return emitAtomicsReadModifyWriteResult64<jit::AtomicsOr64>(objId, indexId,
7917                                                                 valueId);
7918   }
7919   return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
7920                                           AtomicsOr(elementType));
7921 }
7922 
emitAtomicsXorResult(ObjOperandId objId,IntPtrOperandId indexId,uint32_t valueId,Scalar::Type elementType,bool forEffect)7923 bool CacheIRCompiler::emitAtomicsXorResult(ObjOperandId objId,
7924                                            IntPtrOperandId indexId,
7925                                            uint32_t valueId,
7926                                            Scalar::Type elementType,
7927                                            bool forEffect) {
7928   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7929 
7930   if (Scalar::isBigIntType(elementType)) {
7931     return emitAtomicsReadModifyWriteResult64<jit::AtomicsXor64>(objId, indexId,
7932                                                                  valueId);
7933   }
7934   return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
7935                                           AtomicsXor(elementType));
7936 }
7937 
emitAtomicsLoadResult(ObjOperandId objId,IntPtrOperandId indexId,Scalar::Type elementType)7938 bool CacheIRCompiler::emitAtomicsLoadResult(ObjOperandId objId,
7939                                             IntPtrOperandId indexId,
7940                                             Scalar::Type elementType) {
7941   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
7942 
7943   Maybe<AutoOutputRegister> output;
7944   Maybe<AutoCallVM> callvm;
7945   if (!Scalar::isBigIntType(elementType)) {
7946     output.emplace(*this);
7947   } else {
7948     callvm.emplace(masm, this, allocator);
7949   }
7950   Register obj = allocator.useRegister(masm, objId);
7951   Register index = allocator.useRegister(masm, indexId);
7952   AutoScratchRegisterMaybeOutput scratch(allocator, masm,
7953                                          output ? *output : callvm->output());
7954   AutoSpectreBoundsScratchRegister spectreTemp(allocator, masm);
7955   AutoAvailableFloatRegister floatReg(*this, FloatReg0);
7956 
7957   FailurePath* failure;
7958   if (!addFailurePath(&failure)) {
7959     return false;
7960   }
7961 
7962   // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
7963   // we can't use both at the same time. This isn't an issue here, because Ion
7964   // doesn't support CallICs. If that ever changes, this code must be updated.
7965   MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
7966 
7967   // Bounds check.
7968   masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
7969   masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
7970 
7971   // Atomic operations are highly platform-dependent, for example x86/arm32 has
7972   // specific requirements on which registers are used. Therefore we're using a
7973   // VM call here instead of handling each platform separately.
7974   if (Scalar::isBigIntType(elementType)) {
7975     callvm->prepare();
7976 
7977     masm.Push(index);
7978     masm.Push(obj);
7979 
7980     using Fn = BigInt* (*)(JSContext*, TypedArrayObject*, size_t);
7981     callvm->call<Fn, jit::AtomicsLoad64>();
7982     return true;
7983   }
7984 
7985   // Load the elements vector.
7986   masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch);
7987 
7988   // Load the value.
7989   BaseIndex source(scratch, index, ScaleFromScalarType(elementType));
7990 
7991   auto sync = Synchronization::Load();
7992 
7993   masm.memoryBarrierBefore(sync);
7994 
7995   Label* failUint32 = nullptr;
7996   MacroAssembler::Uint32Mode mode = MacroAssembler::Uint32Mode::ForceDouble;
7997   masm.loadFromTypedArray(elementType, source, output->valueReg(), mode,
7998                           scratch, failUint32);
7999   masm.memoryBarrierAfter(sync);
8000 
8001   return true;
8002 }
8003 
emitAtomicsStoreResult(ObjOperandId objId,IntPtrOperandId indexId,uint32_t valueId,Scalar::Type elementType)8004 bool CacheIRCompiler::emitAtomicsStoreResult(ObjOperandId objId,
8005                                              IntPtrOperandId indexId,
8006                                              uint32_t valueId,
8007                                              Scalar::Type elementType) {
8008   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8009 
8010   AutoOutputRegister output(*this);
8011   Register obj = allocator.useRegister(masm, objId);
8012   Register index = allocator.useRegister(masm, indexId);
8013   Maybe<Register> valueInt32;
8014   Maybe<Register> valueBigInt;
8015   if (!Scalar::isBigIntType(elementType)) {
8016     valueInt32.emplace(allocator.useRegister(masm, Int32OperandId(valueId)));
8017   } else {
8018     valueBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(valueId)));
8019   }
8020   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8021 
8022   // Not enough registers on X86.
8023   Register spectreTemp = Register::Invalid();
8024 
8025   FailurePath* failure;
8026   if (!addFailurePath(&failure)) {
8027     return false;
8028   }
8029 
8030   // Bounds check.
8031   masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
8032   masm.spectreBoundsCheckPtr(index, scratch, spectreTemp, failure->label());
8033 
8034   if (!Scalar::isBigIntType(elementType)) {
8035     // Load the elements vector.
8036     masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch);
8037 
8038     // Store the value.
8039     BaseIndex dest(scratch, index, ScaleFromScalarType(elementType));
8040 
8041     auto sync = Synchronization::Store();
8042 
8043     masm.memoryBarrierBefore(sync);
8044     masm.storeToTypedIntArray(elementType, *valueInt32, dest);
8045     masm.memoryBarrierAfter(sync);
8046 
8047     masm.tagValue(JSVAL_TYPE_INT32, *valueInt32, output.valueReg());
8048   } else {
8049     // See comment in emitAtomicsCompareExchange for why we use an ABI call.
8050 
8051     LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
8052                                  liveVolatileFloatRegs());
8053     volatileRegs.takeUnchecked(output.valueReg());
8054     volatileRegs.takeUnchecked(scratch);
8055     masm.PushRegsInMask(volatileRegs);
8056 
8057     using Fn = void (*)(TypedArrayObject*, size_t, BigInt*);
8058     masm.setupUnalignedABICall(scratch);
8059     masm.passABIArg(obj);
8060     masm.passABIArg(index);
8061     masm.passABIArg(*valueBigInt);
8062     masm.callWithABI<Fn, jit::AtomicsStore64>();
8063 
8064     masm.PopRegsInMask(volatileRegs);
8065 
8066     masm.tagValue(JSVAL_TYPE_BIGINT, *valueBigInt, output.valueReg());
8067   }
8068 
8069   return true;
8070 }
8071 
emitAtomicsIsLockFreeResult(Int32OperandId valueId)8072 bool CacheIRCompiler::emitAtomicsIsLockFreeResult(Int32OperandId valueId) {
8073   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8074 
8075   AutoOutputRegister output(*this);
8076   Register value = allocator.useRegister(masm, valueId);
8077   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
8078 
8079   masm.atomicIsLockFreeJS(value, scratch);
8080   masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
8081 
8082   return true;
8083 }
8084 
emitBigIntAsIntNResult(Int32OperandId bitsId,BigIntOperandId bigIntId)8085 bool CacheIRCompiler::emitBigIntAsIntNResult(Int32OperandId bitsId,
8086                                              BigIntOperandId bigIntId) {
8087   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8088 
8089   AutoCallVM callvm(masm, this, allocator);
8090 
8091   Register bits = allocator.useRegister(masm, bitsId);
8092   Register bigInt = allocator.useRegister(masm, bigIntId);
8093 
8094   callvm.prepare();
8095   masm.Push(bits);
8096   masm.Push(bigInt);
8097 
8098   using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
8099   callvm.call<Fn, jit::BigIntAsIntN>();
8100   return true;
8101 }
8102 
emitBigIntAsUintNResult(Int32OperandId bitsId,BigIntOperandId bigIntId)8103 bool CacheIRCompiler::emitBigIntAsUintNResult(Int32OperandId bitsId,
8104                                               BigIntOperandId bigIntId) {
8105   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8106 
8107   AutoCallVM callvm(masm, this, allocator);
8108 
8109   Register bits = allocator.useRegister(masm, bitsId);
8110   Register bigInt = allocator.useRegister(masm, bigIntId);
8111 
8112   callvm.prepare();
8113   masm.Push(bits);
8114   masm.Push(bigInt);
8115 
8116   using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
8117   callvm.call<Fn, jit::BigIntAsUintN>();
8118   return true;
8119 }
8120 
emitBailout()8121 bool CacheIRCompiler::emitBailout() {
8122   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8123 
8124   // Generates no code.
8125 
8126   return true;
8127 }
8128 
emitAssertRecoveredOnBailoutResult(ValOperandId valId,bool mustBeRecovered)8129 bool CacheIRCompiler::emitAssertRecoveredOnBailoutResult(ValOperandId valId,
8130                                                          bool mustBeRecovered) {
8131   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
8132 
8133   AutoOutputRegister output(*this);
8134 
8135   // NOP when not in IonMonkey
8136   masm.moveValue(UndefinedValue(), output.valueReg());
8137 
8138   return true;
8139 }
8140 
8141 template <typename Fn, Fn fn>
callVM(MacroAssembler & masm)8142 void CacheIRCompiler::callVM(MacroAssembler& masm) {
8143   VMFunctionId id = VMFunctionToId<Fn, fn>::id;
8144   callVMInternal(masm, id);
8145 }
8146 
callVMInternal(MacroAssembler & masm,VMFunctionId id)8147 void CacheIRCompiler::callVMInternal(MacroAssembler& masm, VMFunctionId id) {
8148   if (mode_ == Mode::Ion) {
8149     MOZ_ASSERT(preparedForVMCall_);
8150     TrampolinePtr code = cx_->runtime()->jitRuntime()->getVMWrapper(id);
8151     const VMFunctionData& fun = GetVMFunction(id);
8152     uint32_t frameSize = fun.explicitStackSlots() * sizeof(void*);
8153     uint32_t descriptor = MakeFrameDescriptor(frameSize, FrameType::IonICCall,
8154                                               ExitFrameLayout::Size());
8155     masm.Push(Imm32(descriptor));
8156     masm.callJit(code);
8157 
8158     // Remove rest of the frame left on the stack. We remove the return address
8159     // which is implicitly popped when returning.
8160     int framePop = sizeof(ExitFrameLayout) - sizeof(void*);
8161 
8162     // Pop arguments from framePushed.
8163     masm.implicitPop(frameSize + framePop);
8164     masm.freeStack(IonICCallFrameLayout::Size());
8165     return;
8166   }
8167 
8168   MOZ_ASSERT(mode_ == Mode::Baseline);
8169 
8170   MOZ_ASSERT(preparedForVMCall_);
8171 
8172   TrampolinePtr code = cx_->runtime()->jitRuntime()->getVMWrapper(id);
8173   MOZ_ASSERT(GetVMFunction(id).expectTailCall == NonTailCall);
8174 
8175   EmitBaselineCallVM(code, masm);
8176 }
8177 
isBaseline()8178 bool CacheIRCompiler::isBaseline() { return mode_ == Mode::Baseline; }
8179 
isIon()8180 bool CacheIRCompiler::isIon() { return mode_ == Mode::Ion; }
8181 
asBaseline()8182 BaselineCacheIRCompiler* CacheIRCompiler::asBaseline() {
8183   MOZ_ASSERT(this->isBaseline());
8184   return static_cast<BaselineCacheIRCompiler*>(this);
8185 }
8186 
asIon()8187 IonCacheIRCompiler* CacheIRCompiler::asIon() {
8188   MOZ_ASSERT(this->isIon());
8189   return static_cast<IonCacheIRCompiler*>(this);
8190 }
8191 
8192 #ifdef DEBUG
assertFloatRegisterAvailable(FloatRegister reg)8193 void CacheIRCompiler::assertFloatRegisterAvailable(FloatRegister reg) {
8194   if (isBaseline()) {
8195     // Baseline does not have any FloatRegisters live when calling an IC stub.
8196     return;
8197   }
8198 
8199   asIon()->assertFloatRegisterAvailable(reg);
8200 }
8201 #endif
8202 
AutoCallVM(MacroAssembler & masm,CacheIRCompiler * compiler,CacheRegisterAllocator & allocator)8203 AutoCallVM::AutoCallVM(MacroAssembler& masm, CacheIRCompiler* compiler,
8204                        CacheRegisterAllocator& allocator)
8205     : masm_(masm), compiler_(compiler), allocator_(allocator) {
8206   // Ion needs to `prepareVMCall` before it can callVM and it also needs to
8207   // initialize AutoSaveLiveRegisters.
8208   if (compiler_->mode_ == CacheIRCompiler::Mode::Ion) {
8209     // Will need to use a downcast here as well, in order to pass the
8210     // stub to AutoSaveLiveRegisters
8211     save_.emplace(*compiler_->asIon());
8212   }
8213 
8214   output_.emplace(*compiler);
8215 
8216   if (compiler_->mode_ == CacheIRCompiler::Mode::Baseline) {
8217     stubFrame_.emplace(*compiler_->asBaseline());
8218     scratch_.emplace(allocator_, masm_, output_.ref());
8219   }
8220 }
8221 
prepare()8222 void AutoCallVM::prepare() {
8223   allocator_.discardStack(masm_);
8224   MOZ_ASSERT(compiler_ != nullptr);
8225   if (compiler_->mode_ == CacheIRCompiler::Mode::Ion) {
8226     compiler_->asIon()->prepareVMCall(masm_, *save_.ptr());
8227     return;
8228   }
8229   MOZ_ASSERT(compiler_->mode_ == CacheIRCompiler::Mode::Baseline);
8230   stubFrame_->enter(masm_, scratch_.ref());
8231 }
8232 
storeResult(JSValueType returnType)8233 void AutoCallVM::storeResult(JSValueType returnType) {
8234   MOZ_ASSERT(returnType != JSVAL_TYPE_DOUBLE);
8235 
8236   if (returnType == JSVAL_TYPE_UNKNOWN) {
8237     masm_.storeCallResultValue(output_.ref());
8238   } else {
8239     if (output_->hasValue()) {
8240       masm_.tagValue(returnType, ReturnReg, output_->valueReg());
8241     } else {
8242       Register out = output_->typedReg().gpr();
8243       if (out != ReturnReg) {
8244         masm_.mov(ReturnReg, out);
8245       }
8246     }
8247   }
8248 }
8249 
leaveBaselineStubFrame()8250 void AutoCallVM::leaveBaselineStubFrame() {
8251   if (compiler_->mode_ == CacheIRCompiler::Mode::Baseline) {
8252     stubFrame_->leave(masm_);
8253   }
8254 }
8255 
8256 template <typename...>
8257 struct VMFunctionReturnType;
8258 
8259 template <class R, typename... Args>
8260 struct VMFunctionReturnType<R (*)(JSContext*, Args...)> {
8261   using LastArgument = typename LastArg<Args...>::Type;
8262 
8263   // By convention VMFunctions returning `bool` use an output parameter.
8264   using ReturnType =
8265       std::conditional_t<std::is_same_v<R, bool>, LastArgument, R>;
8266 };
8267 
8268 template <class>
8269 struct ReturnTypeToJSValueType;
8270 
8271 // Definitions for the currently used return types.
8272 template <>
8273 struct ReturnTypeToJSValueType<MutableHandleValue> {
8274   static constexpr JSValueType result = JSVAL_TYPE_UNKNOWN;
8275 };
8276 template <>
8277 struct ReturnTypeToJSValueType<bool*> {
8278   static constexpr JSValueType result = JSVAL_TYPE_BOOLEAN;
8279 };
8280 template <>
8281 struct ReturnTypeToJSValueType<int32_t*> {
8282   static constexpr JSValueType result = JSVAL_TYPE_INT32;
8283 };
8284 template <>
8285 struct ReturnTypeToJSValueType<JSString*> {
8286   static constexpr JSValueType result = JSVAL_TYPE_STRING;
8287 };
8288 template <>
8289 struct ReturnTypeToJSValueType<BigInt*> {
8290   static constexpr JSValueType result = JSVAL_TYPE_BIGINT;
8291 };
8292 template <>
8293 struct ReturnTypeToJSValueType<JSObject*> {
8294   static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
8295 };
8296 template <>
8297 struct ReturnTypeToJSValueType<ArrayIteratorObject*> {
8298   static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
8299 };
8300 template <>
8301 struct ReturnTypeToJSValueType<StringIteratorObject*> {
8302   static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
8303 };
8304 template <>
8305 struct ReturnTypeToJSValueType<RegExpStringIteratorObject*> {
8306   static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
8307 };
8308 template <>
8309 struct ReturnTypeToJSValueType<PlainObject*> {
8310   static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
8311 };
8312 template <>
8313 struct ReturnTypeToJSValueType<ArrayObject*> {
8314   static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
8315 };
8316 template <>
8317 struct ReturnTypeToJSValueType<TypedArrayObject*> {
8318   static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
8319 };
8320 
8321 template <typename Fn>
storeResult()8322 void AutoCallVM::storeResult() {
8323   using ReturnType = typename VMFunctionReturnType<Fn>::ReturnType;
8324   storeResult(ReturnTypeToJSValueType<ReturnType>::result);
8325 }
8326 
AutoScratchFloatRegister(CacheIRCompiler * compiler,FailurePath * failure)8327 AutoScratchFloatRegister::AutoScratchFloatRegister(CacheIRCompiler* compiler,
8328                                                    FailurePath* failure)
8329     : compiler_(compiler), failure_(failure) {
8330   // If we're compiling a Baseline IC, FloatReg0 is always available.
8331   if (!compiler_->isBaseline()) {
8332     MacroAssembler& masm = compiler_->masm;
8333     masm.push(FloatReg0);
8334     compiler->allocator.setHasAutoScratchFloatRegisterSpill(true);
8335   }
8336 
8337   if (failure_) {
8338     failure_->setHasAutoScratchFloatRegister();
8339   }
8340 }
8341 
~AutoScratchFloatRegister()8342 AutoScratchFloatRegister::~AutoScratchFloatRegister() {
8343   if (failure_) {
8344     failure_->clearHasAutoScratchFloatRegister();
8345   }
8346 
8347   if (!compiler_->isBaseline()) {
8348     MacroAssembler& masm = compiler_->masm;
8349     masm.pop(FloatReg0);
8350     compiler_->allocator.setHasAutoScratchFloatRegisterSpill(false);
8351 
8352     if (failure_) {
8353       Label done;
8354       masm.jump(&done);
8355       masm.bind(&failurePopReg_);
8356       masm.pop(FloatReg0);
8357       masm.jump(failure_->label());
8358       masm.bind(&done);
8359     }
8360   }
8361 }
8362 
failure()8363 Label* AutoScratchFloatRegister::failure() {
8364   MOZ_ASSERT(failure_);
8365 
8366   if (!compiler_->isBaseline()) {
8367     return &failurePopReg_;
8368   }
8369   return failure_->labelUnchecked();
8370 }
8371