1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/CacheIRCompiler.h"
8 
9 #include "jit/IonIC.h"
10 #include "jit/SharedICHelpers.h"
11 
12 #include "jsboolinlines.h"
13 
14 #include "jit/MacroAssembler-inl.h"
15 #include "vm/JSCompartment-inl.h"
16 
17 using namespace js;
18 using namespace js::jit;
19 
useValueRegister(MacroAssembler & masm,ValOperandId op)20 ValueOperand CacheRegisterAllocator::useValueRegister(MacroAssembler& masm,
21                                                       ValOperandId op) {
22   OperandLocation& loc = operandLocations_[op.id()];
23 
24   switch (loc.kind()) {
25     case OperandLocation::ValueReg:
26       currentOpRegs_.add(loc.valueReg());
27       return loc.valueReg();
28 
29     case OperandLocation::ValueStack: {
30       ValueOperand reg = allocateValueRegister(masm);
31       popValue(masm, &loc, reg);
32       return reg;
33     }
34 
35     case OperandLocation::BaselineFrame: {
36       ValueOperand reg = allocateValueRegister(masm);
37       Address addr = addressOf(masm, loc.baselineFrameSlot());
38       masm.loadValue(addr, reg);
39       loc.setValueReg(reg);
40       return reg;
41     }
42 
43     case OperandLocation::Constant: {
44       ValueOperand reg = allocateValueRegister(masm);
45       masm.moveValue(loc.constant(), reg);
46       loc.setValueReg(reg);
47       return reg;
48     }
49 
50     case OperandLocation::PayloadReg: {
51       // Temporarily add the payload register to currentOpRegs_ so
52       // allocateValueRegister will stay away from it.
53       currentOpRegs_.add(loc.payloadReg());
54       ValueOperand reg = allocateValueRegister(masm);
55       masm.tagValue(loc.payloadType(), loc.payloadReg(), reg);
56       currentOpRegs_.take(loc.payloadReg());
57       availableRegs_.add(loc.payloadReg());
58       loc.setValueReg(reg);
59       return reg;
60     }
61 
62     case OperandLocation::PayloadStack: {
63       ValueOperand reg = allocateValueRegister(masm);
64       popPayload(masm, &loc, reg.scratchReg());
65       masm.tagValue(loc.payloadType(), reg.scratchReg(), reg);
66       loc.setValueReg(reg);
67       return reg;
68     }
69 
70     case OperandLocation::DoubleReg: {
71       ValueOperand reg = allocateValueRegister(masm);
72       masm.boxDouble(loc.doubleReg(), reg, ScratchDoubleReg);
73       loc.setValueReg(reg);
74       return reg;
75     }
76 
77     case OperandLocation::Uninitialized:
78       break;
79   }
80 
81   MOZ_CRASH();
82 }
83 
useFixedValueRegister(MacroAssembler & masm,ValOperandId valId,ValueOperand reg)84 ValueOperand CacheRegisterAllocator::useFixedValueRegister(MacroAssembler& masm,
85                                                            ValOperandId valId,
86                                                            ValueOperand reg) {
87   allocateFixedValueRegister(masm, reg);
88 
89   OperandLocation& loc = operandLocations_[valId.id()];
90   switch (loc.kind()) {
91     case OperandLocation::ValueReg:
92       masm.moveValue(loc.valueReg(), reg);
93       MOZ_ASSERT(!currentOpRegs_.aliases(loc.valueReg()),
94                  "Register shouldn't be in use");
95       availableRegs_.add(loc.valueReg());
96       break;
97     case OperandLocation::ValueStack:
98       popValue(masm, &loc, reg);
99       break;
100     case OperandLocation::BaselineFrame: {
101       Address addr = addressOf(masm, loc.baselineFrameSlot());
102       masm.loadValue(addr, reg);
103       break;
104     }
105     case OperandLocation::Constant:
106       masm.moveValue(loc.constant(), reg);
107       break;
108     case OperandLocation::PayloadReg:
109       masm.tagValue(loc.payloadType(), loc.payloadReg(), reg);
110       MOZ_ASSERT(!currentOpRegs_.has(loc.payloadReg()),
111                  "Register shouldn't be in use");
112       availableRegs_.add(loc.payloadReg());
113       break;
114     case OperandLocation::PayloadStack:
115       popPayload(masm, &loc, reg.scratchReg());
116       masm.tagValue(loc.payloadType(), reg.scratchReg(), reg);
117       break;
118     case OperandLocation::DoubleReg:
119       masm.boxDouble(loc.doubleReg(), reg, ScratchDoubleReg);
120       break;
121     case OperandLocation::Uninitialized:
122       MOZ_CRASH();
123   }
124 
125   loc.setValueReg(reg);
126   return reg;
127 }
128 
useRegister(MacroAssembler & masm,TypedOperandId typedId)129 Register CacheRegisterAllocator::useRegister(MacroAssembler& masm,
130                                              TypedOperandId typedId) {
131   OperandLocation& loc = operandLocations_[typedId.id()];
132   switch (loc.kind()) {
133     case OperandLocation::PayloadReg:
134       currentOpRegs_.add(loc.payloadReg());
135       return loc.payloadReg();
136 
137     case OperandLocation::ValueReg: {
138       // It's possible the value is still boxed: as an optimization, we unbox
139       // the first time we use a value as object.
140       ValueOperand val = loc.valueReg();
141       availableRegs_.add(val);
142       Register reg = val.scratchReg();
143       availableRegs_.take(reg);
144       masm.unboxNonDouble(val, reg, typedId.type());
145       loc.setPayloadReg(reg, typedId.type());
146       currentOpRegs_.add(reg);
147       return reg;
148     }
149 
150     case OperandLocation::PayloadStack: {
151       Register reg = allocateRegister(masm);
152       popPayload(masm, &loc, reg);
153       return reg;
154     }
155 
156     case OperandLocation::ValueStack: {
157       // The value is on the stack, but boxed. If it's on top of the stack we
158       // unbox it and then remove it from the stack, else we just unbox.
159       Register reg = allocateRegister(masm);
160       if (loc.valueStack() == stackPushed_) {
161         masm.unboxNonDouble(Address(masm.getStackPointer(), 0), reg,
162                             typedId.type());
163         masm.addToStackPtr(Imm32(sizeof(js::Value)));
164         MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
165         stackPushed_ -= sizeof(js::Value);
166       } else {
167         MOZ_ASSERT(loc.valueStack() < stackPushed_);
168         masm.unboxNonDouble(
169             Address(masm.getStackPointer(), stackPushed_ - loc.valueStack()),
170             reg, typedId.type());
171       }
172       loc.setPayloadReg(reg, typedId.type());
173       return reg;
174     }
175 
176     case OperandLocation::BaselineFrame: {
177       Register reg = allocateRegister(masm);
178       Address addr = addressOf(masm, loc.baselineFrameSlot());
179       masm.unboxNonDouble(addr, reg, typedId.type());
180       loc.setPayloadReg(reg, typedId.type());
181       return reg;
182     };
183 
184     case OperandLocation::Constant: {
185       Value v = loc.constant();
186       Register reg = allocateRegister(masm);
187       if (v.isString())
188         masm.movePtr(ImmGCPtr(v.toString()), reg);
189       else if (v.isSymbol())
190         masm.movePtr(ImmGCPtr(v.toSymbol()), reg);
191       else
192         MOZ_CRASH("Unexpected Value");
193       loc.setPayloadReg(reg, v.extractNonDoubleType());
194       return reg;
195     }
196 
197     case OperandLocation::DoubleReg:
198     case OperandLocation::Uninitialized:
199       break;
200   }
201 
202   MOZ_CRASH();
203 }
204 
useConstantOrRegister(MacroAssembler & masm,ValOperandId val)205 ConstantOrRegister CacheRegisterAllocator::useConstantOrRegister(
206     MacroAssembler& masm, ValOperandId val) {
207   OperandLocation& loc = operandLocations_[val.id()];
208   switch (loc.kind()) {
209     case OperandLocation::Constant:
210       return loc.constant();
211 
212     case OperandLocation::PayloadReg:
213     case OperandLocation::PayloadStack: {
214       JSValueType payloadType = loc.payloadType();
215       Register reg = useRegister(masm, TypedOperandId(val, payloadType));
216       return TypedOrValueRegister(MIRTypeFromValueType(payloadType),
217                                   AnyRegister(reg));
218     }
219 
220     case OperandLocation::ValueReg:
221     case OperandLocation::ValueStack:
222     case OperandLocation::BaselineFrame:
223       return TypedOrValueRegister(useValueRegister(masm, val));
224 
225     case OperandLocation::DoubleReg:
226       return TypedOrValueRegister(MIRType::Double,
227                                   AnyRegister(loc.doubleReg()));
228 
229     case OperandLocation::Uninitialized:
230       break;
231   }
232 
233   MOZ_CRASH();
234 }
235 
defineRegister(MacroAssembler & masm,TypedOperandId typedId)236 Register CacheRegisterAllocator::defineRegister(MacroAssembler& masm,
237                                                 TypedOperandId typedId) {
238   OperandLocation& loc = operandLocations_[typedId.id()];
239   MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
240 
241   Register reg = allocateRegister(masm);
242   loc.setPayloadReg(reg, typedId.type());
243   return reg;
244 }
245 
defineValueRegister(MacroAssembler & masm,ValOperandId val)246 ValueOperand CacheRegisterAllocator::defineValueRegister(MacroAssembler& masm,
247                                                          ValOperandId val) {
248   OperandLocation& loc = operandLocations_[val.id()];
249   MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
250 
251   ValueOperand reg = allocateValueRegister(masm);
252   loc.setValueReg(reg);
253   return reg;
254 }
255 
freeDeadOperandLocations(MacroAssembler & masm)256 void CacheRegisterAllocator::freeDeadOperandLocations(MacroAssembler& masm) {
257   // See if any operands are dead so we can reuse their registers. Note that
258   // we skip the input operands, as those are also used by failure paths, and
259   // we currently don't track those uses.
260   for (size_t i = writer_.numInputOperands(); i < operandLocations_.length();
261        i++) {
262     if (!writer_.operandIsDead(i, currentInstruction_)) continue;
263 
264     OperandLocation& loc = operandLocations_[i];
265     switch (loc.kind()) {
266       case OperandLocation::PayloadReg:
267         availableRegs_.add(loc.payloadReg());
268         break;
269       case OperandLocation::ValueReg:
270         availableRegs_.add(loc.valueReg());
271         break;
272       case OperandLocation::PayloadStack:
273         masm.propagateOOM(freePayloadSlots_.append(loc.payloadStack()));
274         break;
275       case OperandLocation::ValueStack:
276         masm.propagateOOM(freeValueSlots_.append(loc.valueStack()));
277         break;
278       case OperandLocation::Uninitialized:
279       case OperandLocation::BaselineFrame:
280       case OperandLocation::Constant:
281       case OperandLocation::DoubleReg:
282         break;
283     }
284     loc.setUninitialized();
285   }
286 }
287 
discardStack(MacroAssembler & masm)288 void CacheRegisterAllocator::discardStack(MacroAssembler& masm) {
289   // This should only be called when we are no longer using the operands,
290   // as we're discarding everything from the native stack. Set all operand
291   // locations to Uninitialized to catch bugs.
292   for (size_t i = 0; i < operandLocations_.length(); i++)
293     operandLocations_[i].setUninitialized();
294 
295   if (stackPushed_ > 0) {
296     masm.addToStackPtr(Imm32(stackPushed_));
297     stackPushed_ = 0;
298   }
299   freePayloadSlots_.clear();
300   freeValueSlots_.clear();
301 }
302 
allocateRegister(MacroAssembler & masm)303 Register CacheRegisterAllocator::allocateRegister(MacroAssembler& masm) {
304   if (availableRegs_.empty()) freeDeadOperandLocations(masm);
305 
306   if (availableRegs_.empty()) {
307     // Still no registers available, try to spill unused operands to
308     // the stack.
309     for (size_t i = 0; i < operandLocations_.length(); i++) {
310       OperandLocation& loc = operandLocations_[i];
311       if (loc.kind() == OperandLocation::PayloadReg) {
312         Register reg = loc.payloadReg();
313         if (currentOpRegs_.has(reg)) continue;
314 
315         spillOperandToStack(masm, &loc);
316         availableRegs_.add(reg);
317         break;  // We got a register, so break out of the loop.
318       }
319       if (loc.kind() == OperandLocation::ValueReg) {
320         ValueOperand reg = loc.valueReg();
321         if (currentOpRegs_.aliases(reg)) continue;
322 
323         spillOperandToStack(masm, &loc);
324         availableRegs_.add(reg);
325         break;  // Break out of the loop.
326       }
327     }
328   }
329 
330   if (availableRegs_.empty() && !availableRegsAfterSpill_.empty()) {
331     Register reg = availableRegsAfterSpill_.takeAny();
332     masm.push(reg);
333     stackPushed_ += sizeof(uintptr_t);
334 
335     masm.propagateOOM(spilledRegs_.append(SpilledRegister(reg, stackPushed_)));
336 
337     availableRegs_.add(reg);
338   }
339 
340   // At this point, there must be a free register.
341   MOZ_RELEASE_ASSERT(!availableRegs_.empty());
342 
343   Register reg = availableRegs_.takeAny();
344   currentOpRegs_.add(reg);
345   return reg;
346 }
347 
allocateFixedRegister(MacroAssembler & masm,Register reg)348 void CacheRegisterAllocator::allocateFixedRegister(MacroAssembler& masm,
349                                                    Register reg) {
350   // Fixed registers should be allocated first, to ensure they're
351   // still available.
352   MOZ_ASSERT(!currentOpRegs_.has(reg), "Register is in use");
353 
354   freeDeadOperandLocations(masm);
355 
356   if (availableRegs_.has(reg)) {
357     availableRegs_.take(reg);
358     currentOpRegs_.add(reg);
359     return;
360   }
361 
362   // The register must be used by some operand. Spill it to the stack.
363   for (size_t i = 0; i < operandLocations_.length(); i++) {
364     OperandLocation& loc = operandLocations_[i];
365     if (loc.kind() == OperandLocation::PayloadReg) {
366       if (loc.payloadReg() != reg) continue;
367 
368       spillOperandToStackOrRegister(masm, &loc);
369       currentOpRegs_.add(reg);
370       return;
371     }
372     if (loc.kind() == OperandLocation::ValueReg) {
373       if (!loc.valueReg().aliases(reg)) continue;
374 
375       ValueOperand valueReg = loc.valueReg();
376       spillOperandToStackOrRegister(masm, &loc);
377 
378       availableRegs_.add(valueReg);
379       availableRegs_.take(reg);
380       currentOpRegs_.add(reg);
381       return;
382     }
383   }
384 
385   MOZ_CRASH("Invalid register");
386 }
387 
allocateFixedValueRegister(MacroAssembler & masm,ValueOperand reg)388 void CacheRegisterAllocator::allocateFixedValueRegister(MacroAssembler& masm,
389                                                         ValueOperand reg) {
390 #ifdef JS_NUNBOX32
391   allocateFixedRegister(masm, reg.payloadReg());
392   allocateFixedRegister(masm, reg.typeReg());
393 #else
394   allocateFixedRegister(masm, reg.valueReg());
395 #endif
396 }
397 
allocateValueRegister(MacroAssembler & masm)398 ValueOperand CacheRegisterAllocator::allocateValueRegister(
399     MacroAssembler& masm) {
400 #ifdef JS_NUNBOX32
401   Register reg1 = allocateRegister(masm);
402   Register reg2 = allocateRegister(masm);
403   return ValueOperand(reg1, reg2);
404 #else
405   Register reg = allocateRegister(masm);
406   return ValueOperand(reg);
407 #endif
408 }
409 
init()410 bool CacheRegisterAllocator::init() {
411   if (!origInputLocations_.resize(writer_.numInputOperands())) return false;
412   if (!operandLocations_.resize(writer_.numOperandIds())) return false;
413   return true;
414 }
415 
initAvailableRegsAfterSpill()416 void CacheRegisterAllocator::initAvailableRegsAfterSpill() {
417   // Registers not in availableRegs_ and not used by input operands are
418   // available after being spilled.
419   availableRegsAfterSpill_.set() = GeneralRegisterSet::Intersect(
420       GeneralRegisterSet::Not(availableRegs_.set()),
421       GeneralRegisterSet::Not(inputRegisterSet()));
422 }
423 
fixupAliasedInputs(MacroAssembler & masm)424 void CacheRegisterAllocator::fixupAliasedInputs(MacroAssembler& masm) {
425   // If IC inputs alias each other, make sure they are stored in different
426   // locations so we don't have to deal with this complexity in the rest of
427   // the allocator.
428   //
429   // Note that this can happen in IonMonkey with something like |o.foo = o|
430   // or |o[i] = i|.
431 
432   size_t numInputs = writer_.numInputOperands();
433   MOZ_ASSERT(origInputLocations_.length() == numInputs);
434 
435   for (size_t i = 1; i < numInputs; i++) {
436     OperandLocation& loc1 = operandLocations_[i];
437     if (!loc1.isInRegister()) continue;
438 
439     for (size_t j = 0; j < i; j++) {
440       OperandLocation& loc2 = operandLocations_[j];
441       if (!loc1.aliasesReg(loc2)) continue;
442 
443       // loc1 and loc2 alias so we spill one of them. If one is a
444       // ValueReg and the other is a PayloadReg, we have to spill the
445       // PayloadReg: spilling the ValueReg instead would leave its type
446       // register unallocated on 32-bit platforms.
447       if (loc1.kind() == OperandLocation::ValueReg) {
448         spillOperandToStack(masm, &loc2);
449       } else {
450         MOZ_ASSERT(loc1.kind() == OperandLocation::PayloadReg);
451         spillOperandToStack(masm, &loc1);
452         break;  // Spilled loc1, so nothing else will alias it.
453       }
454     }
455   }
456 }
457 
inputRegisterSet() const458 GeneralRegisterSet CacheRegisterAllocator::inputRegisterSet() const {
459   MOZ_ASSERT(origInputLocations_.length() == writer_.numInputOperands());
460 
461   AllocatableGeneralRegisterSet result;
462   for (size_t i = 0; i < writer_.numInputOperands(); i++) {
463     const OperandLocation& loc = operandLocations_[i];
464     MOZ_ASSERT(loc == origInputLocations_[i]);
465 
466     switch (loc.kind()) {
467       case OperandLocation::PayloadReg:
468         result.addUnchecked(loc.payloadReg());
469         continue;
470       case OperandLocation::ValueReg:
471         result.addUnchecked(loc.valueReg());
472         continue;
473       case OperandLocation::PayloadStack:
474       case OperandLocation::ValueStack:
475       case OperandLocation::BaselineFrame:
476       case OperandLocation::Constant:
477       case OperandLocation::DoubleReg:
478         continue;
479       case OperandLocation::Uninitialized:
480         break;
481     }
482     MOZ_CRASH("Invalid kind");
483   }
484 
485   return result.set();
486 }
487 
knownType(ValOperandId val) const488 JSValueType CacheRegisterAllocator::knownType(ValOperandId val) const {
489   const OperandLocation& loc = operandLocations_[val.id()];
490 
491   switch (loc.kind()) {
492     case OperandLocation::ValueReg:
493     case OperandLocation::ValueStack:
494     case OperandLocation::BaselineFrame:
495       return JSVAL_TYPE_UNKNOWN;
496 
497     case OperandLocation::PayloadStack:
498     case OperandLocation::PayloadReg:
499       return loc.payloadType();
500 
501     case OperandLocation::Constant:
502       return loc.constant().isDouble() ? JSVAL_TYPE_DOUBLE
503                                        : loc.constant().extractNonDoubleType();
504 
505     case OperandLocation::DoubleReg:
506       return JSVAL_TYPE_DOUBLE;
507 
508     case OperandLocation::Uninitialized:
509       break;
510   }
511 
512   MOZ_CRASH("Invalid kind");
513 }
514 
initInputLocation(size_t i,const TypedOrValueRegister & reg)515 void CacheRegisterAllocator::initInputLocation(
516     size_t i, const TypedOrValueRegister& reg) {
517   if (reg.hasValue()) {
518     initInputLocation(i, reg.valueReg());
519   } else if (reg.typedReg().isFloat()) {
520     MOZ_ASSERT(reg.type() == MIRType::Double);
521     initInputLocation(i, reg.typedReg().fpu());
522   } else {
523     initInputLocation(i, reg.typedReg().gpr(),
524                       ValueTypeFromMIRType(reg.type()));
525   }
526 }
527 
initInputLocation(size_t i,const ConstantOrRegister & value)528 void CacheRegisterAllocator::initInputLocation(
529     size_t i, const ConstantOrRegister& value) {
530   if (value.constant())
531     initInputLocation(i, value.value());
532   else
533     initInputLocation(i, value.reg());
534 }
535 
spillOperandToStack(MacroAssembler & masm,OperandLocation * loc)536 void CacheRegisterAllocator::spillOperandToStack(MacroAssembler& masm,
537                                                  OperandLocation* loc) {
538   MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
539 
540   if (loc->kind() == OperandLocation::ValueReg) {
541     if (!freeValueSlots_.empty()) {
542       uint32_t stackPos = freeValueSlots_.popCopy();
543       MOZ_ASSERT(stackPos <= stackPushed_);
544       masm.storeValue(loc->valueReg(),
545                       Address(masm.getStackPointer(), stackPushed_ - stackPos));
546       loc->setValueStack(stackPos);
547       return;
548     }
549     stackPushed_ += sizeof(js::Value);
550     masm.pushValue(loc->valueReg());
551     loc->setValueStack(stackPushed_);
552     return;
553   }
554 
555   MOZ_ASSERT(loc->kind() == OperandLocation::PayloadReg);
556 
557   if (!freePayloadSlots_.empty()) {
558     uint32_t stackPos = freePayloadSlots_.popCopy();
559     MOZ_ASSERT(stackPos <= stackPushed_);
560     masm.storePtr(loc->payloadReg(),
561                   Address(masm.getStackPointer(), stackPushed_ - stackPos));
562     loc->setPayloadStack(stackPos, loc->payloadType());
563     return;
564   }
565   stackPushed_ += sizeof(uintptr_t);
566   masm.push(loc->payloadReg());
567   loc->setPayloadStack(stackPushed_, loc->payloadType());
568 }
569 
spillOperandToStackOrRegister(MacroAssembler & masm,OperandLocation * loc)570 void CacheRegisterAllocator::spillOperandToStackOrRegister(
571     MacroAssembler& masm, OperandLocation* loc) {
572   MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
573 
574   // If enough registers are available, use them.
575   if (loc->kind() == OperandLocation::ValueReg) {
576     static const size_t BoxPieces = sizeof(Value) / sizeof(uintptr_t);
577     if (availableRegs_.set().size() >= BoxPieces) {
578       ValueOperand reg = availableRegs_.takeAnyValue();
579       masm.moveValue(loc->valueReg(), reg);
580       loc->setValueReg(reg);
581       return;
582     }
583   } else {
584     MOZ_ASSERT(loc->kind() == OperandLocation::PayloadReg);
585     if (!availableRegs_.empty()) {
586       Register reg = availableRegs_.takeAny();
587       masm.movePtr(loc->payloadReg(), reg);
588       loc->setPayloadReg(reg, loc->payloadType());
589       return;
590     }
591   }
592 
593   // Not enough registers available, spill to the stack.
594   spillOperandToStack(masm, loc);
595 }
596 
popPayload(MacroAssembler & masm,OperandLocation * loc,Register dest)597 void CacheRegisterAllocator::popPayload(MacroAssembler& masm,
598                                         OperandLocation* loc, Register dest) {
599   MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
600   MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
601 
602   // The payload is on the stack. If it's on top of the stack we can just
603   // pop it, else we emit a load.
604   if (loc->payloadStack() == stackPushed_) {
605     masm.pop(dest);
606     stackPushed_ -= sizeof(uintptr_t);
607   } else {
608     MOZ_ASSERT(loc->payloadStack() < stackPushed_);
609     masm.loadPtr(
610         Address(masm.getStackPointer(), stackPushed_ - loc->payloadStack()),
611         dest);
612     masm.propagateOOM(freePayloadSlots_.append(loc->payloadStack()));
613   }
614 
615   loc->setPayloadReg(dest, loc->payloadType());
616 }
617 
popValue(MacroAssembler & masm,OperandLocation * loc,ValueOperand dest)618 void CacheRegisterAllocator::popValue(MacroAssembler& masm,
619                                       OperandLocation* loc, ValueOperand dest) {
620   MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
621   MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
622 
623   // The Value is on the stack. If it's on top of the stack we can just
624   // pop it, else we emit a load.
625   if (loc->valueStack() == stackPushed_) {
626     masm.popValue(dest);
627     stackPushed_ -= sizeof(js::Value);
628   } else {
629     MOZ_ASSERT(loc->valueStack() < stackPushed_);
630     masm.loadValue(
631         Address(masm.getStackPointer(), stackPushed_ - loc->valueStack()),
632         dest);
633     masm.propagateOOM(freeValueSlots_.append(loc->valueStack()));
634   }
635 
636   loc->setValueReg(dest);
637 }
638 
aliasesReg(const OperandLocation & other) const639 bool OperandLocation::aliasesReg(const OperandLocation& other) const {
640   MOZ_ASSERT(&other != this);
641 
642   switch (other.kind_) {
643     case PayloadReg:
644       return aliasesReg(other.payloadReg());
645     case ValueReg:
646       return aliasesReg(other.valueReg());
647     case PayloadStack:
648     case ValueStack:
649     case BaselineFrame:
650     case Constant:
651     case DoubleReg:
652       return false;
653     case Uninitialized:
654       break;
655   }
656 
657   MOZ_CRASH("Invalid kind");
658 }
659 
restoreInputState(MacroAssembler & masm,bool shouldDiscardStack)660 void CacheRegisterAllocator::restoreInputState(MacroAssembler& masm,
661                                                bool shouldDiscardStack) {
662   size_t numInputOperands = origInputLocations_.length();
663   MOZ_ASSERT(writer_.numInputOperands() == numInputOperands);
664 
665   for (size_t j = 0; j < numInputOperands; j++) {
666     const OperandLocation& dest = origInputLocations_[j];
667     OperandLocation& cur = operandLocations_[j];
668     if (dest == cur) continue;
669 
670     auto autoAssign = mozilla::MakeScopeExit([&] { cur = dest; });
671 
672     // We have a cycle if a destination register will be used later
673     // as source register. If that happens, just push the current value
674     // on the stack and later get it from there.
675     for (size_t k = j + 1; k < numInputOperands; k++) {
676       OperandLocation& laterSource = operandLocations_[k];
677       if (dest.aliasesReg(laterSource)) spillOperandToStack(masm, &laterSource);
678     }
679 
680     if (dest.kind() == OperandLocation::ValueReg) {
681       // We have to restore a Value register.
682       switch (cur.kind()) {
683         case OperandLocation::ValueReg:
684           masm.moveValue(cur.valueReg(), dest.valueReg());
685           continue;
686         case OperandLocation::PayloadReg:
687           masm.tagValue(cur.payloadType(), cur.payloadReg(), dest.valueReg());
688           continue;
689         case OperandLocation::PayloadStack: {
690           Register scratch = dest.valueReg().scratchReg();
691           popPayload(masm, &cur, scratch);
692           masm.tagValue(cur.payloadType(), scratch, dest.valueReg());
693           continue;
694         }
695         case OperandLocation::ValueStack:
696           popValue(masm, &cur, dest.valueReg());
697           continue;
698         case OperandLocation::Constant:
699         case OperandLocation::BaselineFrame:
700         case OperandLocation::DoubleReg:
701         case OperandLocation::Uninitialized:
702           break;
703       }
704     } else if (dest.kind() == OperandLocation::PayloadReg) {
705       // We have to restore a payload register.
706       switch (cur.kind()) {
707         case OperandLocation::ValueReg:
708           MOZ_ASSERT(dest.payloadType() != JSVAL_TYPE_DOUBLE);
709           masm.unboxNonDouble(cur.valueReg(), dest.payloadReg(),
710                               dest.payloadType());
711           continue;
712         case OperandLocation::PayloadReg:
713           MOZ_ASSERT(cur.payloadType() == dest.payloadType());
714           masm.mov(cur.payloadReg(), dest.payloadReg());
715           continue;
716         case OperandLocation::PayloadStack: {
717           MOZ_ASSERT(cur.payloadType() == dest.payloadType());
718           popPayload(masm, &cur, dest.payloadReg());
719           continue;
720         }
721         case OperandLocation::ValueStack:
722           MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
723           MOZ_ASSERT(cur.valueStack() <= stackPushed_);
724           MOZ_ASSERT(dest.payloadType() != JSVAL_TYPE_DOUBLE);
725           masm.unboxNonDouble(
726               Address(masm.getStackPointer(), stackPushed_ - cur.valueStack()),
727               dest.payloadReg(), dest.payloadType());
728           continue;
729         case OperandLocation::Constant:
730         case OperandLocation::BaselineFrame:
731         case OperandLocation::DoubleReg:
732         case OperandLocation::Uninitialized:
733           break;
734       }
735     } else if (dest.kind() == OperandLocation::Constant ||
736                dest.kind() == OperandLocation::BaselineFrame ||
737                dest.kind() == OperandLocation::DoubleReg) {
738       // Nothing to do.
739       continue;
740     }
741 
742     MOZ_CRASH("Invalid kind");
743   }
744 
745   for (const SpilledRegister& spill : spilledRegs_) {
746     MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
747 
748     if (spill.stackPushed == stackPushed_) {
749       masm.pop(spill.reg);
750       stackPushed_ -= sizeof(uintptr_t);
751     } else {
752       MOZ_ASSERT(spill.stackPushed < stackPushed_);
753       masm.loadPtr(
754           Address(masm.getStackPointer(), stackPushed_ - spill.stackPushed),
755           spill.reg);
756     }
757   }
758 
759   if (shouldDiscardStack) discardStack(masm);
760 }
761 
stubDataSize() const762 size_t CacheIRStubInfo::stubDataSize() const {
763   size_t field = 0;
764   size_t size = 0;
765   while (true) {
766     StubField::Type type = fieldType(field++);
767     if (type == StubField::Type::Limit) return size;
768     size += StubField::sizeInBytes(type);
769   }
770 }
771 
copyStubData(ICStub * src,ICStub * dest) const772 void CacheIRStubInfo::copyStubData(ICStub* src, ICStub* dest) const {
773   uint8_t* srcBytes = reinterpret_cast<uint8_t*>(src);
774   uint8_t* destBytes = reinterpret_cast<uint8_t*>(dest);
775 
776   size_t field = 0;
777   size_t offset = 0;
778   while (true) {
779     StubField::Type type = fieldType(field);
780     switch (type) {
781       case StubField::Type::RawWord:
782         *reinterpret_cast<uintptr_t*>(destBytes + offset) =
783             *reinterpret_cast<uintptr_t*>(srcBytes + offset);
784         break;
785       case StubField::Type::RawInt64:
786       case StubField::Type::DOMExpandoGeneration:
787         *reinterpret_cast<uint64_t*>(destBytes + offset) =
788             *reinterpret_cast<uint64_t*>(srcBytes + offset);
789         break;
790       case StubField::Type::Shape:
791         getStubField<ICStub, Shape*>(dest, offset)
792             .init(getStubField<ICStub, Shape*>(src, offset));
793         break;
794       case StubField::Type::JSObject:
795         getStubField<ICStub, JSObject*>(dest, offset)
796             .init(getStubField<ICStub, JSObject*>(src, offset));
797         break;
798       case StubField::Type::ObjectGroup:
799         getStubField<ICStub, ObjectGroup*>(dest, offset)
800             .init(getStubField<ICStub, ObjectGroup*>(src, offset));
801         break;
802       case StubField::Type::Symbol:
803         getStubField<ICStub, JS::Symbol*>(dest, offset)
804             .init(getStubField<ICStub, JS::Symbol*>(src, offset));
805         break;
806       case StubField::Type::String:
807         getStubField<ICStub, JSString*>(dest, offset)
808             .init(getStubField<ICStub, JSString*>(src, offset));
809         break;
810       case StubField::Type::Id:
811         getStubField<ICStub, jsid>(dest, offset)
812             .init(getStubField<ICStub, jsid>(src, offset));
813         break;
814       case StubField::Type::Value:
815         getStubField<ICStub, Value>(dest, offset)
816             .init(getStubField<ICStub, Value>(src, offset));
817         break;
818       case StubField::Type::Limit:
819         return;  // Done.
820     }
821     field++;
822     offset += StubField::sizeInBytes(type);
823   }
824 }
825 
826 template <typename T>
AsGCPtr(uintptr_t * ptr)827 static GCPtr<T>* AsGCPtr(uintptr_t* ptr) {
828   return reinterpret_cast<GCPtr<T>*>(ptr);
829 }
830 
getStubRawWord(ICStub * stub,uint32_t offset) const831 uintptr_t CacheIRStubInfo::getStubRawWord(ICStub* stub, uint32_t offset) const {
832   uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
833   MOZ_ASSERT(uintptr_t(stubData) % sizeof(uintptr_t) == 0);
834   return *(uintptr_t*)(stubData + offset);
835 }
836 
837 template <class Stub, class T>
getStubField(Stub * stub,uint32_t offset) const838 GCPtr<T>& CacheIRStubInfo::getStubField(Stub* stub, uint32_t offset) const {
839   uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
840   MOZ_ASSERT(uintptr_t(stubData) % sizeof(uintptr_t) == 0);
841 
842   return *AsGCPtr<T>((uintptr_t*)(stubData + offset));
843 }
844 
845 template GCPtr<Shape*>& CacheIRStubInfo::getStubField<ICStub>(
846     ICStub* stub, uint32_t offset) const;
847 template GCPtr<ObjectGroup*>& CacheIRStubInfo::getStubField<ICStub>(
848     ICStub* stub, uint32_t offset) const;
849 template GCPtr<JSObject*>& CacheIRStubInfo::getStubField<ICStub>(
850     ICStub* stub, uint32_t offset) const;
851 template GCPtr<JSString*>& CacheIRStubInfo::getStubField<ICStub>(
852     ICStub* stub, uint32_t offset) const;
853 template GCPtr<JS::Symbol*>& CacheIRStubInfo::getStubField<ICStub>(
854     ICStub* stub, uint32_t offset) const;
855 template GCPtr<JS::Value>& CacheIRStubInfo::getStubField<ICStub>(
856     ICStub* stub, uint32_t offset) const;
857 template GCPtr<jsid>& CacheIRStubInfo::getStubField<ICStub>(
858     ICStub* stub, uint32_t offset) const;
859 
860 template <typename T, typename V>
InitGCPtr(uintptr_t * ptr,V val)861 static void InitGCPtr(uintptr_t* ptr, V val) {
862   AsGCPtr<T>(ptr)->init(mozilla::BitwiseCast<T>(val));
863 }
864 
copyStubData(uint8_t * dest) const865 void CacheIRWriter::copyStubData(uint8_t* dest) const {
866   MOZ_ASSERT(!failed());
867 
868   uintptr_t* destWords = reinterpret_cast<uintptr_t*>(dest);
869 
870   for (const StubField& field : stubFields_) {
871     switch (field.type()) {
872       case StubField::Type::RawWord:
873         *destWords = field.asWord();
874         break;
875       case StubField::Type::Shape:
876         InitGCPtr<Shape*>(destWords, field.asWord());
877         break;
878       case StubField::Type::JSObject:
879         InitGCPtr<JSObject*>(destWords, field.asWord());
880         break;
881       case StubField::Type::ObjectGroup:
882         InitGCPtr<ObjectGroup*>(destWords, field.asWord());
883         break;
884       case StubField::Type::Symbol:
885         InitGCPtr<JS::Symbol*>(destWords, field.asWord());
886         break;
887       case StubField::Type::String:
888         InitGCPtr<JSString*>(destWords, field.asWord());
889         break;
890       case StubField::Type::Id:
891         InitGCPtr<jsid>(destWords, field.asWord());
892         break;
893       case StubField::Type::RawInt64:
894       case StubField::Type::DOMExpandoGeneration:
895         *reinterpret_cast<uint64_t*>(destWords) = field.asInt64();
896         break;
897       case StubField::Type::Value:
898         AsGCPtr<Value>(destWords)->init(
899             Value::fromRawBits(uint64_t(field.asInt64())));
900         break;
901       case StubField::Type::Limit:
902         MOZ_CRASH("Invalid type");
903     }
904     destWords += StubField::sizeInBytes(field.type()) / sizeof(uintptr_t);
905   }
906 }
907 
908 template <typename T>
TraceCacheIRStub(JSTracer * trc,T * stub,const CacheIRStubInfo * stubInfo)909 void jit::TraceCacheIRStub(JSTracer* trc, T* stub,
910                            const CacheIRStubInfo* stubInfo) {
911   uint32_t field = 0;
912   size_t offset = 0;
913   while (true) {
914     StubField::Type fieldType = stubInfo->fieldType(field);
915     switch (fieldType) {
916       case StubField::Type::RawWord:
917       case StubField::Type::RawInt64:
918       case StubField::Type::DOMExpandoGeneration:
919         break;
920       case StubField::Type::Shape:
921         TraceNullableEdge(trc, &stubInfo->getStubField<T, Shape*>(stub, offset),
922                           "cacheir-shape");
923         break;
924       case StubField::Type::ObjectGroup:
925         TraceNullableEdge(
926             trc, &stubInfo->getStubField<T, ObjectGroup*>(stub, offset),
927             "cacheir-group");
928         break;
929       case StubField::Type::JSObject:
930         TraceNullableEdge(trc,
931                           &stubInfo->getStubField<T, JSObject*>(stub, offset),
932                           "cacheir-object");
933         break;
934       case StubField::Type::Symbol:
935         TraceNullableEdge(trc,
936                           &stubInfo->getStubField<T, JS::Symbol*>(stub, offset),
937                           "cacheir-symbol");
938         break;
939       case StubField::Type::String:
940         TraceNullableEdge(trc,
941                           &stubInfo->getStubField<T, JSString*>(stub, offset),
942                           "cacheir-string");
943         break;
944       case StubField::Type::Id:
945         TraceEdge(trc, &stubInfo->getStubField<T, jsid>(stub, offset),
946                   "cacheir-id");
947         break;
948       case StubField::Type::Value:
949         TraceEdge(trc, &stubInfo->getStubField<T, JS::Value>(stub, offset),
950                   "cacheir-value");
951         break;
952       case StubField::Type::Limit:
953         return;  // Done.
954     }
955     field++;
956     offset += StubField::sizeInBytes(fieldType);
957   }
958 }
959 
960 template void jit::TraceCacheIRStub(JSTracer* trc, ICStub* stub,
961                                     const CacheIRStubInfo* stubInfo);
962 
963 template void jit::TraceCacheIRStub(JSTracer* trc, IonICStub* stub,
964                                     const CacheIRStubInfo* stubInfo);
965 
stubDataEqualsMaybeUpdate(uint8_t * stubData,bool * updated) const966 bool CacheIRWriter::stubDataEqualsMaybeUpdate(uint8_t* stubData,
967                                               bool* updated) const {
968   MOZ_ASSERT(!failed());
969 
970   *updated = false;
971   const uintptr_t* stubDataWords = reinterpret_cast<const uintptr_t*>(stubData);
972 
973   // If DOMExpandoGeneration fields are different but all other stub fields
974   // are exactly the same, we overwrite the old stub data instead of attaching
975   // a new stub, as the old stub is never going to succeed. This works because
976   // even Ion stubs read the DOMExpandoGeneration field from the stub instead
977   // of baking it in.
978   bool expandoGenerationIsDifferent = false;
979 
980   for (const StubField& field : stubFields_) {
981     if (field.sizeIsWord()) {
982       if (field.asWord() != *stubDataWords) return false;
983       stubDataWords++;
984       continue;
985     }
986 
987     if (field.asInt64() != *reinterpret_cast<const uint64_t*>(stubDataWords)) {
988       if (field.type() != StubField::Type::DOMExpandoGeneration) return false;
989       expandoGenerationIsDifferent = true;
990     }
991     stubDataWords += sizeof(uint64_t) / sizeof(uintptr_t);
992   }
993 
994   if (expandoGenerationIsDifferent) {
995     copyStubData(stubData);
996     *updated = true;
997   }
998 
999   return true;
1000 }
1001 
hash(const CacheIRStubKey::Lookup & l)1002 HashNumber CacheIRStubKey::hash(const CacheIRStubKey::Lookup& l) {
1003   HashNumber hash = mozilla::HashBytes(l.code, l.length);
1004   hash = mozilla::AddToHash(hash, uint32_t(l.kind));
1005   hash = mozilla::AddToHash(hash, uint32_t(l.engine));
1006   return hash;
1007 }
1008 
match(const CacheIRStubKey & entry,const CacheIRStubKey::Lookup & l)1009 bool CacheIRStubKey::match(const CacheIRStubKey& entry,
1010                            const CacheIRStubKey::Lookup& l) {
1011   if (entry.stubInfo->kind() != l.kind) return false;
1012 
1013   if (entry.stubInfo->engine() != l.engine) return false;
1014 
1015   if (entry.stubInfo->codeLength() != l.length) return false;
1016 
1017   if (!mozilla::PodEqual(entry.stubInfo->code(), l.code, l.length))
1018     return false;
1019 
1020   return true;
1021 }
1022 
CacheIRReader(const CacheIRStubInfo * stubInfo)1023 CacheIRReader::CacheIRReader(const CacheIRStubInfo* stubInfo)
1024     : CacheIRReader(stubInfo->code(),
1025                     stubInfo->code() + stubInfo->codeLength()) {}
1026 
New(CacheKind kind,ICStubEngine engine,bool makesGCCalls,uint32_t stubDataOffset,const CacheIRWriter & writer)1027 CacheIRStubInfo* CacheIRStubInfo::New(CacheKind kind, ICStubEngine engine,
1028                                       bool makesGCCalls,
1029                                       uint32_t stubDataOffset,
1030                                       const CacheIRWriter& writer) {
1031   size_t numStubFields = writer.numStubFields();
1032   size_t bytesNeeded =
1033       sizeof(CacheIRStubInfo) + writer.codeLength() +
1034       (numStubFields + 1);  // +1 for the GCType::Limit terminator.
1035   uint8_t* p = js_pod_malloc<uint8_t>(bytesNeeded);
1036   if (!p) return nullptr;
1037 
1038   // Copy the CacheIR code.
1039   uint8_t* codeStart = p + sizeof(CacheIRStubInfo);
1040   mozilla::PodCopy(codeStart, writer.codeStart(), writer.codeLength());
1041 
1042   static_assert(sizeof(StubField::Type) == sizeof(uint8_t),
1043                 "StubField::Type must fit in uint8_t");
1044 
1045   // Copy the stub field types.
1046   uint8_t* fieldTypes = codeStart + writer.codeLength();
1047   for (size_t i = 0; i < numStubFields; i++)
1048     fieldTypes[i] = uint8_t(writer.stubFieldType(i));
1049   fieldTypes[numStubFields] = uint8_t(StubField::Type::Limit);
1050 
1051   return new (p) CacheIRStubInfo(kind, engine, makesGCCalls, stubDataOffset,
1052                                  codeStart, writer.codeLength(), fieldTypes);
1053 }
1054 
operator ==(const OperandLocation & other) const1055 bool OperandLocation::operator==(const OperandLocation& other) const {
1056   if (kind_ != other.kind_) return false;
1057 
1058   switch (kind()) {
1059     case Uninitialized:
1060       return true;
1061     case PayloadReg:
1062       return payloadReg() == other.payloadReg() &&
1063              payloadType() == other.payloadType();
1064     case ValueReg:
1065       return valueReg() == other.valueReg();
1066     case PayloadStack:
1067       return payloadStack() == other.payloadStack() &&
1068              payloadType() == other.payloadType();
1069     case ValueStack:
1070       return valueStack() == other.valueStack();
1071     case BaselineFrame:
1072       return baselineFrameSlot() == other.baselineFrameSlot();
1073     case Constant:
1074       return constant() == other.constant();
1075     case DoubleReg:
1076       return doubleReg() == other.doubleReg();
1077   }
1078 
1079   MOZ_CRASH("Invalid OperandLocation kind");
1080 }
1081 
AutoOutputRegister(CacheIRCompiler & compiler)1082 AutoOutputRegister::AutoOutputRegister(CacheIRCompiler& compiler)
1083     : output_(compiler.outputUnchecked_.ref()), alloc_(compiler.allocator) {
1084   if (output_.hasValue())
1085     alloc_.allocateFixedValueRegister(compiler.masm, output_.valueReg());
1086   else if (!output_.typedReg().isFloat())
1087     alloc_.allocateFixedRegister(compiler.masm, output_.typedReg().gpr());
1088 }
1089 
~AutoOutputRegister()1090 AutoOutputRegister::~AutoOutputRegister() {
1091   if (output_.hasValue())
1092     alloc_.releaseValueRegister(output_.valueReg());
1093   else if (!output_.typedReg().isFloat())
1094     alloc_.releaseRegister(output_.typedReg().gpr());
1095 }
1096 
canShareFailurePath(const FailurePath & other) const1097 bool FailurePath::canShareFailurePath(const FailurePath& other) const {
1098   if (stackPushed_ != other.stackPushed_) return false;
1099 
1100   if (spilledRegs_.length() != other.spilledRegs_.length()) return false;
1101 
1102   for (size_t i = 0; i < spilledRegs_.length(); i++) {
1103     if (spilledRegs_[i] != other.spilledRegs_[i]) return false;
1104   }
1105 
1106   MOZ_ASSERT(inputs_.length() == other.inputs_.length());
1107 
1108   for (size_t i = 0; i < inputs_.length(); i++) {
1109     if (inputs_[i] != other.inputs_[i]) return false;
1110   }
1111   return true;
1112 }
1113 
addFailurePath(FailurePath ** failure)1114 bool CacheIRCompiler::addFailurePath(FailurePath** failure) {
1115   FailurePath newFailure;
1116   for (size_t i = 0; i < writer_.numInputOperands(); i++) {
1117     if (!newFailure.appendInput(allocator.operandLocation(i))) return false;
1118   }
1119   if (!newFailure.setSpilledRegs(allocator.spilledRegs())) return false;
1120   newFailure.setStackPushed(allocator.stackPushed());
1121 
1122   // Reuse the previous failure path if the current one is the same, to
1123   // avoid emitting duplicate code.
1124   if (failurePaths.length() > 0 &&
1125       failurePaths.back().canShareFailurePath(newFailure)) {
1126     *failure = &failurePaths.back();
1127     return true;
1128   }
1129 
1130   if (!failurePaths.append(Move(newFailure))) return false;
1131 
1132   *failure = &failurePaths.back();
1133   return true;
1134 }
1135 
emitFailurePath(size_t index)1136 bool CacheIRCompiler::emitFailurePath(size_t index) {
1137   FailurePath& failure = failurePaths[index];
1138 
1139   allocator.setStackPushed(failure.stackPushed());
1140 
1141   for (size_t i = 0; i < writer_.numInputOperands(); i++)
1142     allocator.setOperandLocation(i, failure.input(i));
1143 
1144   if (!allocator.setSpilledRegs(failure.spilledRegs())) return false;
1145 
1146   masm.bind(failure.label());
1147   allocator.restoreInputState(masm);
1148   return true;
1149 }
1150 
emitGuardIsNumber()1151 bool CacheIRCompiler::emitGuardIsNumber() {
1152   ValOperandId inputId = reader.valOperandId();
1153   JSValueType knownType = allocator.knownType(inputId);
1154 
1155   // Doubles and ints are numbers!
1156   if (knownType == JSVAL_TYPE_DOUBLE || knownType == JSVAL_TYPE_INT32)
1157     return true;
1158 
1159   ValueOperand input = allocator.useValueRegister(masm, inputId);
1160   FailurePath* failure;
1161   if (!addFailurePath(&failure)) return false;
1162 
1163   masm.branchTestNumber(Assembler::NotEqual, input, failure->label());
1164   return true;
1165 }
1166 
emitGuardIsObject()1167 bool CacheIRCompiler::emitGuardIsObject() {
1168   ValOperandId inputId = reader.valOperandId();
1169   if (allocator.knownType(inputId) == JSVAL_TYPE_OBJECT) return true;
1170 
1171   ValueOperand input = allocator.useValueRegister(masm, inputId);
1172   FailurePath* failure;
1173   if (!addFailurePath(&failure)) return false;
1174   masm.branchTestObject(Assembler::NotEqual, input, failure->label());
1175   return true;
1176 }
1177 
emitGuardIsNullOrUndefined()1178 bool CacheIRCompiler::emitGuardIsNullOrUndefined() {
1179   ValOperandId inputId = reader.valOperandId();
1180   JSValueType knownType = allocator.knownType(inputId);
1181   if (knownType == JSVAL_TYPE_UNDEFINED || knownType == JSVAL_TYPE_NULL)
1182     return true;
1183 
1184   ValueOperand input = allocator.useValueRegister(masm, inputId);
1185   FailurePath* failure;
1186   if (!addFailurePath(&failure)) return false;
1187 
1188   Label success;
1189   masm.branchTestNull(Assembler::Equal, input, &success);
1190   masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
1191 
1192   masm.bind(&success);
1193   return true;
1194 }
1195 
emitGuardIsObjectOrNull()1196 bool CacheIRCompiler::emitGuardIsObjectOrNull() {
1197   ValOperandId inputId = reader.valOperandId();
1198   JSValueType knownType = allocator.knownType(inputId);
1199   if (knownType == JSVAL_TYPE_OBJECT || knownType == JSVAL_TYPE_NULL)
1200     return true;
1201 
1202   ValueOperand input = allocator.useValueRegister(masm, inputId);
1203   FailurePath* failure;
1204   if (!addFailurePath(&failure)) return false;
1205 
1206   Label done;
1207   masm.branchTestObject(Assembler::Equal, input, &done);
1208   masm.branchTestNull(Assembler::NotEqual, input, failure->label());
1209   masm.bind(&done);
1210   return true;
1211 }
1212 
emitGuardIsString()1213 bool CacheIRCompiler::emitGuardIsString() {
1214   ValOperandId inputId = reader.valOperandId();
1215   if (allocator.knownType(inputId) == JSVAL_TYPE_STRING) return true;
1216 
1217   ValueOperand input = allocator.useValueRegister(masm, inputId);
1218   FailurePath* failure;
1219   if (!addFailurePath(&failure)) return false;
1220   masm.branchTestString(Assembler::NotEqual, input, failure->label());
1221   return true;
1222 }
1223 
emitGuardIsSymbol()1224 bool CacheIRCompiler::emitGuardIsSymbol() {
1225   ValOperandId inputId = reader.valOperandId();
1226   if (allocator.knownType(inputId) == JSVAL_TYPE_SYMBOL) return true;
1227 
1228   ValueOperand input = allocator.useValueRegister(masm, inputId);
1229   FailurePath* failure;
1230   if (!addFailurePath(&failure)) return false;
1231   masm.branchTestSymbol(Assembler::NotEqual, input, failure->label());
1232   return true;
1233 }
1234 
emitGuardIsInt32Index()1235 bool CacheIRCompiler::emitGuardIsInt32Index() {
1236   ValOperandId inputId = reader.valOperandId();
1237   Register output = allocator.defineRegister(masm, reader.int32OperandId());
1238 
1239   if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
1240     Register input = allocator.useRegister(masm, Int32OperandId(inputId.id()));
1241     masm.move32(input, output);
1242     return true;
1243   }
1244 
1245   ValueOperand input = allocator.useValueRegister(masm, inputId);
1246 
1247   FailurePath* failure;
1248   if (!addFailurePath(&failure)) return false;
1249 
1250   Label notInt32, done;
1251   masm.branchTestInt32(Assembler::NotEqual, input, &notInt32);
1252   masm.unboxInt32(input, output);
1253   masm.jump(&done);
1254 
1255   masm.bind(&notInt32);
1256 
1257   if (cx_->runtime()->jitSupportsFloatingPoint) {
1258     masm.branchTestDouble(Assembler::NotEqual, input, failure->label());
1259 
1260     // If we're compiling a Baseline IC, FloatReg0 is always available.
1261     Label failurePopReg;
1262     if (mode_ != Mode::Baseline) masm.push(FloatReg0);
1263 
1264     masm.unboxDouble(input, FloatReg0);
1265     // ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
1266     masm.convertDoubleToInt32(
1267         FloatReg0, output,
1268         (mode_ == Mode::Baseline) ? failure->label() : &failurePopReg, false);
1269     if (mode_ != Mode::Baseline) {
1270       masm.pop(FloatReg0);
1271       masm.jump(&done);
1272 
1273       masm.bind(&failurePopReg);
1274       masm.pop(FloatReg0);
1275       masm.jump(failure->label());
1276     }
1277   } else {
1278     masm.jump(failure->label());
1279   }
1280 
1281   masm.bind(&done);
1282   return true;
1283 }
1284 
emitGuardType()1285 bool CacheIRCompiler::emitGuardType() {
1286   ValOperandId inputId = reader.valOperandId();
1287   JSValueType type = reader.valueType();
1288 
1289   if (allocator.knownType(inputId) == type) return true;
1290 
1291   ValueOperand input = allocator.useValueRegister(masm, inputId);
1292 
1293   FailurePath* failure;
1294   if (!addFailurePath(&failure)) return false;
1295 
1296   switch (type) {
1297     case JSVAL_TYPE_STRING:
1298       masm.branchTestString(Assembler::NotEqual, input, failure->label());
1299       break;
1300     case JSVAL_TYPE_SYMBOL:
1301       masm.branchTestSymbol(Assembler::NotEqual, input, failure->label());
1302       break;
1303     case JSVAL_TYPE_INT32:
1304       masm.branchTestInt32(Assembler::NotEqual, input, failure->label());
1305       break;
1306     case JSVAL_TYPE_DOUBLE:
1307       masm.branchTestDouble(Assembler::NotEqual, input, failure->label());
1308       break;
1309     case JSVAL_TYPE_BOOLEAN:
1310       masm.branchTestBoolean(Assembler::NotEqual, input, failure->label());
1311       break;
1312     case JSVAL_TYPE_UNDEFINED:
1313       masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
1314       break;
1315     case JSVAL_TYPE_NULL:
1316       masm.branchTestNull(Assembler::NotEqual, input, failure->label());
1317       break;
1318     default:
1319       MOZ_CRASH("Unexpected type");
1320   }
1321 
1322   return true;
1323 }
1324 
emitGuardClass()1325 bool CacheIRCompiler::emitGuardClass() {
1326   ObjOperandId objId = reader.objOperandId();
1327   Register obj = allocator.useRegister(masm, objId);
1328   AutoScratchRegister scratch(allocator, masm);
1329 
1330   FailurePath* failure;
1331   if (!addFailurePath(&failure)) return false;
1332 
1333   const Class* clasp = nullptr;
1334   switch (reader.guardClassKind()) {
1335     case GuardClassKind::Array:
1336       clasp = &ArrayObject::class_;
1337       break;
1338     case GuardClassKind::MappedArguments:
1339       clasp = &MappedArgumentsObject::class_;
1340       break;
1341     case GuardClassKind::UnmappedArguments:
1342       clasp = &UnmappedArgumentsObject::class_;
1343       break;
1344     case GuardClassKind::WindowProxy:
1345       clasp = cx_->runtime()->maybeWindowProxyClass();
1346       break;
1347     case GuardClassKind::JSFunction:
1348       clasp = &JSFunction::class_;
1349       break;
1350   }
1351   MOZ_ASSERT(clasp);
1352 
1353   if (objectGuardNeedsSpectreMitigations(objId)) {
1354     masm.branchTestObjClass(Assembler::NotEqual, obj, clasp, scratch, obj,
1355                             failure->label());
1356   } else {
1357     masm.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual, obj, clasp,
1358                                                 scratch, failure->label());
1359   }
1360 
1361   return true;
1362 }
1363 
emitGuardIsNativeFunction()1364 bool CacheIRCompiler::emitGuardIsNativeFunction() {
1365   Register obj = allocator.useRegister(masm, reader.objOperandId());
1366   JSNative nativeFunc = reinterpret_cast<JSNative>(reader.pointer());
1367   AutoScratchRegister scratch(allocator, masm);
1368 
1369   FailurePath* failure;
1370   if (!addFailurePath(&failure)) return false;
1371 
1372   // Ensure obj is a function.
1373   const Class* clasp = &JSFunction::class_;
1374   masm.branchTestObjClass(Assembler::NotEqual, obj, clasp, scratch, obj,
1375                           failure->label());
1376 
1377   // Ensure function native matches.
1378   masm.branchPtr(Assembler::NotEqual,
1379                  Address(obj, JSFunction::offsetOfNativeOrEnv()),
1380                  ImmPtr(nativeFunc), failure->label());
1381   return true;
1382 }
1383 
emitGuardIsNativeObject()1384 bool CacheIRCompiler::emitGuardIsNativeObject() {
1385   Register obj = allocator.useRegister(masm, reader.objOperandId());
1386   AutoScratchRegister scratch(allocator, masm);
1387 
1388   FailurePath* failure;
1389   if (!addFailurePath(&failure)) return false;
1390 
1391   masm.branchIfNonNativeObj(obj, scratch, failure->label());
1392   return true;
1393 }
1394 
emitGuardIsProxy()1395 bool CacheIRCompiler::emitGuardIsProxy() {
1396   Register obj = allocator.useRegister(masm, reader.objOperandId());
1397   AutoScratchRegister scratch(allocator, masm);
1398 
1399   FailurePath* failure;
1400   if (!addFailurePath(&failure)) return false;
1401 
1402   masm.branchTestObjectIsProxy(false, obj, scratch, failure->label());
1403   return true;
1404 }
1405 
emitGuardNotDOMProxy()1406 bool CacheIRCompiler::emitGuardNotDOMProxy() {
1407   Register obj = allocator.useRegister(masm, reader.objOperandId());
1408   AutoScratchRegister scratch(allocator, masm);
1409 
1410   FailurePath* failure;
1411   if (!addFailurePath(&failure)) return false;
1412 
1413   masm.branchTestProxyHandlerFamily(Assembler::Equal, obj, scratch,
1414                                     GetDOMProxyHandlerFamily(),
1415                                     failure->label());
1416   return true;
1417 }
1418 
emitGuardSpecificInt32Immediate()1419 bool CacheIRCompiler::emitGuardSpecificInt32Immediate() {
1420   Register reg = allocator.useRegister(masm, reader.int32OperandId());
1421   int32_t ival = reader.int32Immediate();
1422   Assembler::Condition cond = (Assembler::Condition)reader.readByte();
1423 
1424   FailurePath* failure;
1425   if (!addFailurePath(&failure)) return false;
1426 
1427   masm.branch32(Assembler::InvertCondition(cond), reg, Imm32(ival),
1428                 failure->label());
1429   return true;
1430 }
1431 
emitGuardMagicValue()1432 bool CacheIRCompiler::emitGuardMagicValue() {
1433   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
1434   JSWhyMagic magic = reader.whyMagic();
1435 
1436   FailurePath* failure;
1437   if (!addFailurePath(&failure)) return false;
1438 
1439   masm.branchTestMagicValue(Assembler::NotEqual, val, magic, failure->label());
1440   return true;
1441 }
1442 
emitGuardNoUnboxedExpando()1443 bool CacheIRCompiler::emitGuardNoUnboxedExpando() {
1444   Register obj = allocator.useRegister(masm, reader.objOperandId());
1445 
1446   FailurePath* failure;
1447   if (!addFailurePath(&failure)) return false;
1448 
1449   Address expandoAddr(obj, UnboxedPlainObject::offsetOfExpando());
1450   masm.branchPtr(Assembler::NotEqual, expandoAddr, ImmWord(0),
1451                  failure->label());
1452   return true;
1453 }
1454 
emitGuardAndLoadUnboxedExpando()1455 bool CacheIRCompiler::emitGuardAndLoadUnboxedExpando() {
1456   Register obj = allocator.useRegister(masm, reader.objOperandId());
1457   Register output = allocator.defineRegister(masm, reader.objOperandId());
1458 
1459   FailurePath* failure;
1460   if (!addFailurePath(&failure)) return false;
1461 
1462   Address expandoAddr(obj, UnboxedPlainObject::offsetOfExpando());
1463   masm.loadPtr(expandoAddr, output);
1464   masm.branchTestPtr(Assembler::Zero, output, output, failure->label());
1465   return true;
1466 }
1467 
emitGuardNoDetachedTypedObjects()1468 bool CacheIRCompiler::emitGuardNoDetachedTypedObjects() {
1469   FailurePath* failure;
1470   if (!addFailurePath(&failure)) return false;
1471 
1472   // All stubs manipulating typed objects must check the compartment-wide
1473   // flag indicating whether their underlying storage might be detached, to
1474   // bail out if needed.
1475   int32_t* address = &cx_->compartment()->detachedTypedObjects;
1476   masm.branch32(Assembler::NotEqual, AbsoluteAddress(address), Imm32(0),
1477                 failure->label());
1478   return true;
1479 }
1480 
emitGuardNoDenseElements()1481 bool CacheIRCompiler::emitGuardNoDenseElements() {
1482   Register obj = allocator.useRegister(masm, reader.objOperandId());
1483   AutoScratchRegister scratch(allocator, masm);
1484 
1485   FailurePath* failure;
1486   if (!addFailurePath(&failure)) return false;
1487 
1488   // Load obj->elements.
1489   masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
1490 
1491   // Make sure there are no dense elements.
1492   Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
1493   masm.branch32(Assembler::NotEqual, initLength, Imm32(0), failure->label());
1494   return true;
1495 }
1496 
emitGuardAndGetIndexFromString()1497 bool CacheIRCompiler::emitGuardAndGetIndexFromString() {
1498   Register str = allocator.useRegister(masm, reader.stringOperandId());
1499   Register output = allocator.defineRegister(masm, reader.int32OperandId());
1500 
1501   FailurePath* failure;
1502   if (!addFailurePath(&failure)) return false;
1503 
1504   Label vmCall, done;
1505   masm.loadStringIndexValue(str, output, &vmCall);
1506   masm.jump(&done);
1507 
1508   {
1509     masm.bind(&vmCall);
1510     LiveRegisterSet save(GeneralRegisterSet::Volatile(),
1511                          liveVolatileFloatRegs());
1512     masm.PushRegsInMask(save);
1513 
1514     masm.setupUnalignedABICall(output);
1515     masm.passABIArg(str);
1516     masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, GetIndexFromString));
1517     masm.mov(ReturnReg, output);
1518 
1519     LiveRegisterSet ignore;
1520     ignore.add(output);
1521     masm.PopRegsInMaskIgnore(save, ignore);
1522 
1523     // GetIndexFromString returns a negative value on failure.
1524     masm.branchTest32(Assembler::Signed, output, output, failure->label());
1525   }
1526 
1527   masm.bind(&done);
1528   return true;
1529 }
1530 
emitLoadProto()1531 bool CacheIRCompiler::emitLoadProto() {
1532   Register obj = allocator.useRegister(masm, reader.objOperandId());
1533   Register reg = allocator.defineRegister(masm, reader.objOperandId());
1534   masm.loadObjProto(obj, reg);
1535   return true;
1536 }
1537 
emitLoadEnclosingEnvironment()1538 bool CacheIRCompiler::emitLoadEnclosingEnvironment() {
1539   Register obj = allocator.useRegister(masm, reader.objOperandId());
1540   Register reg = allocator.defineRegister(masm, reader.objOperandId());
1541   masm.extractObject(
1542       Address(obj, EnvironmentObject::offsetOfEnclosingEnvironment()), reg);
1543   return true;
1544 }
1545 
emitLoadWrapperTarget()1546 bool CacheIRCompiler::emitLoadWrapperTarget() {
1547   Register obj = allocator.useRegister(masm, reader.objOperandId());
1548   Register reg = allocator.defineRegister(masm, reader.objOperandId());
1549 
1550   masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), reg);
1551   masm.unboxObject(
1552       Address(reg, detail::ProxyReservedSlots::offsetOfPrivateSlot()), reg);
1553   return true;
1554 }
1555 
emitLoadValueTag()1556 bool CacheIRCompiler::emitLoadValueTag() {
1557   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
1558   Register res = allocator.defineRegister(masm, reader.valueTagOperandId());
1559 
1560   Register tag = masm.extractTag(val, res);
1561   if (tag != res) masm.mov(tag, res);
1562   return true;
1563 }
1564 
emitLoadDOMExpandoValue()1565 bool CacheIRCompiler::emitLoadDOMExpandoValue() {
1566   Register obj = allocator.useRegister(masm, reader.objOperandId());
1567   ValueOperand val = allocator.defineValueRegister(masm, reader.valOperandId());
1568 
1569   masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
1570                val.scratchReg());
1571   masm.loadValue(Address(val.scratchReg(),
1572                          detail::ProxyReservedSlots::offsetOfPrivateSlot()),
1573                  val);
1574   return true;
1575 }
1576 
emitLoadDOMExpandoValueIgnoreGeneration()1577 bool CacheIRCompiler::emitLoadDOMExpandoValueIgnoreGeneration() {
1578   Register obj = allocator.useRegister(masm, reader.objOperandId());
1579   ValueOperand output =
1580       allocator.defineValueRegister(masm, reader.valOperandId());
1581 
1582   // Determine the expando's Address.
1583   Register scratch = output.scratchReg();
1584   masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
1585   Address expandoAddr(scratch,
1586                       detail::ProxyReservedSlots::offsetOfPrivateSlot());
1587 
1588 #ifdef DEBUG
1589   // Private values are stored as doubles, so assert we have a double.
1590   Label ok;
1591   masm.branchTestDouble(Assembler::Equal, expandoAddr, &ok);
1592   masm.assumeUnreachable("DOM expando is not a PrivateValue!");
1593   masm.bind(&ok);
1594 #endif
1595 
1596   // Load the ExpandoAndGeneration* from the PrivateValue.
1597   masm.loadPrivate(expandoAddr, scratch);
1598 
1599   // Load expandoAndGeneration->expando into the output Value register.
1600   masm.loadValue(Address(scratch, ExpandoAndGeneration::offsetOfExpando()),
1601                  output);
1602   return true;
1603 }
1604 
emitLoadUndefinedResult()1605 bool CacheIRCompiler::emitLoadUndefinedResult() {
1606   AutoOutputRegister output(*this);
1607   if (output.hasValue())
1608     masm.moveValue(UndefinedValue(), output.valueReg());
1609   else
1610     masm.assumeUnreachable("Should have monitored undefined result");
1611   return true;
1612 }
1613 
EmitStoreBoolean(MacroAssembler & masm,bool b,const AutoOutputRegister & output)1614 static void EmitStoreBoolean(MacroAssembler& masm, bool b,
1615                              const AutoOutputRegister& output) {
1616   if (output.hasValue()) {
1617     Value val = BooleanValue(b);
1618     masm.moveValue(val, output.valueReg());
1619   } else {
1620     MOZ_ASSERT(output.type() == JSVAL_TYPE_BOOLEAN);
1621     masm.movePtr(ImmWord(b), output.typedReg().gpr());
1622   }
1623 }
1624 
emitLoadBooleanResult()1625 bool CacheIRCompiler::emitLoadBooleanResult() {
1626   AutoOutputRegister output(*this);
1627   bool b = reader.readBool();
1628   EmitStoreBoolean(masm, b, output);
1629 
1630   return true;
1631 }
1632 
EmitStoreResult(MacroAssembler & masm,Register reg,JSValueType type,const AutoOutputRegister & output)1633 static void EmitStoreResult(MacroAssembler& masm, Register reg,
1634                             JSValueType type,
1635                             const AutoOutputRegister& output) {
1636   if (output.hasValue()) {
1637     masm.tagValue(type, reg, output.valueReg());
1638     return;
1639   }
1640   if (type == JSVAL_TYPE_INT32 && output.typedReg().isFloat()) {
1641     masm.convertInt32ToDouble(reg, output.typedReg().fpu());
1642     return;
1643   }
1644   if (type == output.type()) {
1645     masm.mov(reg, output.typedReg().gpr());
1646     return;
1647   }
1648   masm.assumeUnreachable("Should have monitored result");
1649 }
1650 
emitLoadInt32ArrayLengthResult()1651 bool CacheIRCompiler::emitLoadInt32ArrayLengthResult() {
1652   AutoOutputRegister output(*this);
1653   Register obj = allocator.useRegister(masm, reader.objOperandId());
1654   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
1655 
1656   FailurePath* failure;
1657   if (!addFailurePath(&failure)) return false;
1658 
1659   masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
1660   masm.load32(Address(scratch, ObjectElements::offsetOfLength()), scratch);
1661 
1662   // Guard length fits in an int32.
1663   masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label());
1664   EmitStoreResult(masm, scratch, JSVAL_TYPE_INT32, output);
1665   return true;
1666 }
1667 
emitLoadArgumentsObjectLengthResult()1668 bool CacheIRCompiler::emitLoadArgumentsObjectLengthResult() {
1669   AutoOutputRegister output(*this);
1670   Register obj = allocator.useRegister(masm, reader.objOperandId());
1671   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
1672 
1673   FailurePath* failure;
1674   if (!addFailurePath(&failure)) return false;
1675 
1676   // Get initial length value.
1677   masm.unboxInt32(Address(obj, ArgumentsObject::getInitialLengthSlotOffset()),
1678                   scratch);
1679 
1680   // Test if length has been overridden.
1681   masm.branchTest32(Assembler::NonZero, scratch,
1682                     Imm32(ArgumentsObject::LENGTH_OVERRIDDEN_BIT),
1683                     failure->label());
1684 
1685   // Shift out arguments length and return it. No need to type monitor
1686   // because this stub always returns int32.
1687   masm.rshiftPtr(Imm32(ArgumentsObject::PACKED_BITS_COUNT), scratch);
1688   EmitStoreResult(masm, scratch, JSVAL_TYPE_INT32, output);
1689   return true;
1690 }
1691 
emitLoadFunctionLengthResult()1692 bool CacheIRCompiler::emitLoadFunctionLengthResult() {
1693   AutoOutputRegister output(*this);
1694   Register obj = allocator.useRegister(masm, reader.objOperandId());
1695   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
1696 
1697   FailurePath* failure;
1698   if (!addFailurePath(&failure)) return false;
1699 
1700   // Get the JSFunction flags.
1701   masm.load16ZeroExtend(Address(obj, JSFunction::offsetOfFlags()), scratch);
1702 
1703   // Functions with lazy scripts don't store their length.
1704   // If the length was resolved before the length property might be shadowed.
1705   masm.branchTest32(
1706       Assembler::NonZero, scratch,
1707       Imm32(JSFunction::INTERPRETED_LAZY | JSFunction::RESOLVED_LENGTH),
1708       failure->label());
1709 
1710   Label boundFunction;
1711   masm.branchTest32(Assembler::NonZero, scratch, Imm32(JSFunction::BOUND_FUN),
1712                     &boundFunction);
1713   Label interpreted;
1714   masm.branchTest32(Assembler::NonZero, scratch, Imm32(JSFunction::INTERPRETED),
1715                     &interpreted);
1716 
1717   // Load the length of the native function.
1718   masm.load16ZeroExtend(Address(obj, JSFunction::offsetOfNargs()), scratch);
1719   Label done;
1720   masm.jump(&done);
1721 
1722   masm.bind(&boundFunction);
1723   // Bound functions might have a non-int32 length.
1724   Address boundLength(
1725       obj, FunctionExtended::offsetOfExtendedSlot(BOUND_FUN_LENGTH_SLOT));
1726   masm.branchTestInt32(Assembler::NotEqual, boundLength, failure->label());
1727   masm.unboxInt32(boundLength, scratch);
1728   masm.jump(&done);
1729 
1730   masm.bind(&interpreted);
1731   // Load the length from the function's script.
1732   masm.loadPtr(Address(obj, JSFunction::offsetOfScript()), scratch);
1733   masm.load16ZeroExtend(Address(scratch, JSScript::offsetOfFunLength()),
1734                         scratch);
1735 
1736   masm.bind(&done);
1737   EmitStoreResult(masm, scratch, JSVAL_TYPE_INT32, output);
1738   return true;
1739 }
1740 
emitLoadStringLengthResult()1741 bool CacheIRCompiler::emitLoadStringLengthResult() {
1742   AutoOutputRegister output(*this);
1743   Register str = allocator.useRegister(masm, reader.stringOperandId());
1744   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
1745 
1746   masm.loadStringLength(str, scratch);
1747   EmitStoreResult(masm, scratch, JSVAL_TYPE_INT32, output);
1748   return true;
1749 }
1750 
emitLoadStringCharResult()1751 bool CacheIRCompiler::emitLoadStringCharResult() {
1752   AutoOutputRegister output(*this);
1753   Register str = allocator.useRegister(masm, reader.stringOperandId());
1754   Register index = allocator.useRegister(masm, reader.int32OperandId());
1755   AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
1756   AutoScratchRegister scratch2(allocator, masm);
1757 
1758   FailurePath* failure;
1759   if (!addFailurePath(&failure)) return false;
1760 
1761   // Bounds check, load string char.
1762   masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
1763                             scratch1, failure->label());
1764   masm.loadStringChar(str, index, scratch1, scratch2, failure->label());
1765 
1766   // Load StaticString for this char.
1767   masm.boundsCheck32PowerOfTwo(scratch1, StaticStrings::UNIT_STATIC_LIMIT,
1768                                failure->label());
1769   masm.movePtr(ImmPtr(&cx_->staticStrings().unitStaticTable), scratch2);
1770   masm.loadPtr(BaseIndex(scratch2, scratch1, ScalePointer), scratch2);
1771 
1772   EmitStoreResult(masm, scratch2, JSVAL_TYPE_STRING, output);
1773   return true;
1774 }
1775 
emitLoadArgumentsObjectArgResult()1776 bool CacheIRCompiler::emitLoadArgumentsObjectArgResult() {
1777   AutoOutputRegister output(*this);
1778   Register obj = allocator.useRegister(masm, reader.objOperandId());
1779   Register index = allocator.useRegister(masm, reader.int32OperandId());
1780   AutoScratchRegister scratch1(allocator, masm);
1781   AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
1782 
1783   FailurePath* failure;
1784   if (!addFailurePath(&failure)) return false;
1785 
1786   // Get initial length value.
1787   masm.unboxInt32(Address(obj, ArgumentsObject::getInitialLengthSlotOffset()),
1788                   scratch1);
1789 
1790   // Ensure no overridden length/element.
1791   masm.branchTest32(Assembler::NonZero, scratch1,
1792                     Imm32(ArgumentsObject::LENGTH_OVERRIDDEN_BIT |
1793                           ArgumentsObject::ELEMENT_OVERRIDDEN_BIT),
1794                     failure->label());
1795 
1796   // Bounds check.
1797   masm.rshift32(Imm32(ArgumentsObject::PACKED_BITS_COUNT), scratch1);
1798   masm.spectreBoundsCheck32(index, scratch1, scratch2, failure->label());
1799 
1800   // Load ArgumentsData.
1801   masm.loadPrivate(Address(obj, ArgumentsObject::getDataSlotOffset()),
1802                    scratch1);
1803 
1804   // Fail if we have a RareArgumentsData (elements were deleted).
1805   masm.branchPtr(Assembler::NotEqual,
1806                  Address(scratch1, offsetof(ArgumentsData, rareData)),
1807                  ImmWord(0), failure->label());
1808 
1809   // Guard the argument is not a FORWARD_TO_CALL_SLOT MagicValue.
1810   BaseValueIndex argValue(scratch1, index, ArgumentsData::offsetOfArgs());
1811   masm.branchTestMagic(Assembler::Equal, argValue, failure->label());
1812   masm.loadValue(argValue, output.valueReg());
1813   return true;
1814 }
1815 
emitLoadDenseElementResult()1816 bool CacheIRCompiler::emitLoadDenseElementResult() {
1817   AutoOutputRegister output(*this);
1818   Register obj = allocator.useRegister(masm, reader.objOperandId());
1819   Register index = allocator.useRegister(masm, reader.int32OperandId());
1820   AutoScratchRegister scratch1(allocator, masm);
1821   AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
1822 
1823   FailurePath* failure;
1824   if (!addFailurePath(&failure)) return false;
1825 
1826   // Load obj->elements.
1827   masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1);
1828 
1829   // Bounds check.
1830   Address initLength(scratch1, ObjectElements::offsetOfInitializedLength());
1831   masm.spectreBoundsCheck32(index, initLength, scratch2, failure->label());
1832 
1833   // Hole check.
1834   BaseObjectElementIndex element(scratch1, index);
1835   masm.branchTestMagic(Assembler::Equal, element, failure->label());
1836   masm.loadTypedOrValue(element, output);
1837   return true;
1838 }
1839 
emitGuardIndexIsNonNegative()1840 bool CacheIRCompiler::emitGuardIndexIsNonNegative() {
1841   Register index = allocator.useRegister(masm, reader.int32OperandId());
1842 
1843   FailurePath* failure;
1844   if (!addFailurePath(&failure)) return false;
1845 
1846   masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
1847   return true;
1848 }
1849 
emitGuardTagNotEqual()1850 bool CacheIRCompiler::emitGuardTagNotEqual() {
1851   Register lhs = allocator.useRegister(masm, reader.valueTagOperandId());
1852   Register rhs = allocator.useRegister(masm, reader.valueTagOperandId());
1853 
1854   FailurePath* failure;
1855   if (!addFailurePath(&failure)) return false;
1856 
1857   Label done;
1858   masm.branch32(Assembler::Equal, lhs, rhs, failure->label());
1859 
1860   // If both lhs and rhs are numbers, can't use tag comparison to do inequality
1861   // comparison
1862   masm.branchTestNumber(Assembler::NotEqual, lhs, &done);
1863   masm.branchTestNumber(Assembler::NotEqual, rhs, &done);
1864   masm.jump(failure->label());
1865 
1866   masm.bind(&done);
1867   return true;
1868 }
1869 
emitLoadDenseElementHoleResult()1870 bool CacheIRCompiler::emitLoadDenseElementHoleResult() {
1871   AutoOutputRegister output(*this);
1872   Register obj = allocator.useRegister(masm, reader.objOperandId());
1873   Register index = allocator.useRegister(masm, reader.int32OperandId());
1874   AutoScratchRegister scratch1(allocator, masm);
1875   AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
1876 
1877   if (!output.hasValue()) {
1878     masm.assumeUnreachable(
1879         "Should have monitored undefined value after attaching stub");
1880     return true;
1881   }
1882 
1883   FailurePath* failure;
1884   if (!addFailurePath(&failure)) return false;
1885 
1886   // Make sure the index is nonnegative.
1887   masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
1888 
1889   // Load obj->elements.
1890   masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1);
1891 
1892   // Guard on the initialized length.
1893   Label hole;
1894   Address initLength(scratch1, ObjectElements::offsetOfInitializedLength());
1895   masm.spectreBoundsCheck32(index, initLength, scratch2, &hole);
1896 
1897   // Load the value.
1898   Label done;
1899   masm.loadValue(BaseObjectElementIndex(scratch1, index), output.valueReg());
1900   masm.branchTestMagic(Assembler::NotEqual, output.valueReg(), &done);
1901 
1902   // Load undefined for the hole.
1903   masm.bind(&hole);
1904   masm.moveValue(UndefinedValue(), output.valueReg());
1905 
1906   masm.bind(&done);
1907   return true;
1908 }
1909 
emitLoadTypedElementExistsResult()1910 bool CacheIRCompiler::emitLoadTypedElementExistsResult() {
1911   AutoOutputRegister output(*this);
1912   Register obj = allocator.useRegister(masm, reader.objOperandId());
1913   Register index = allocator.useRegister(masm, reader.int32OperandId());
1914   TypedThingLayout layout = reader.typedThingLayout();
1915   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
1916 
1917   Label outOfBounds, done;
1918 
1919   // Bound check.
1920   LoadTypedThingLength(masm, layout, obj, scratch);
1921   masm.branch32(Assembler::BelowOrEqual, scratch, index, &outOfBounds);
1922   EmitStoreBoolean(masm, true, output);
1923   masm.jump(&done);
1924 
1925   masm.bind(&outOfBounds);
1926   EmitStoreBoolean(masm, false, output);
1927 
1928   masm.bind(&done);
1929   return true;
1930 }
1931 
emitLoadDenseElementExistsResult()1932 bool CacheIRCompiler::emitLoadDenseElementExistsResult() {
1933   AutoOutputRegister output(*this);
1934   Register obj = allocator.useRegister(masm, reader.objOperandId());
1935   Register index = allocator.useRegister(masm, reader.int32OperandId());
1936   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
1937 
1938   FailurePath* failure;
1939   if (!addFailurePath(&failure)) return false;
1940 
1941   // Load obj->elements.
1942   masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
1943 
1944   // Bounds check. Unsigned compare sends negative indices to next IC.
1945   Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
1946   masm.branch32(Assembler::BelowOrEqual, initLength, index, failure->label());
1947 
1948   // Hole check.
1949   BaseObjectElementIndex element(scratch, index);
1950   masm.branchTestMagic(Assembler::Equal, element, failure->label());
1951 
1952   EmitStoreBoolean(masm, true, output);
1953   return true;
1954 }
1955 
emitLoadDenseElementHoleExistsResult()1956 bool CacheIRCompiler::emitLoadDenseElementHoleExistsResult() {
1957   AutoOutputRegister output(*this);
1958   Register obj = allocator.useRegister(masm, reader.objOperandId());
1959   Register index = allocator.useRegister(masm, reader.int32OperandId());
1960   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
1961 
1962   FailurePath* failure;
1963   if (!addFailurePath(&failure)) return false;
1964 
1965   // Make sure the index is nonnegative.
1966   masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
1967 
1968   // Load obj->elements.
1969   masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
1970 
1971   // Guard on the initialized length.
1972   Label hole;
1973   Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
1974   masm.branch32(Assembler::BelowOrEqual, initLength, index, &hole);
1975 
1976   // Load value and replace with true.
1977   Label done;
1978   BaseObjectElementIndex element(scratch, index);
1979   masm.branchTestMagic(Assembler::Equal, element, &hole);
1980   EmitStoreBoolean(masm, true, output);
1981   masm.jump(&done);
1982 
1983   // Load false for the hole.
1984   masm.bind(&hole);
1985   EmitStoreBoolean(masm, false, output);
1986 
1987   masm.bind(&done);
1988   return true;
1989 }
1990 
emitArrayJoinResult()1991 bool CacheIRCompiler::emitArrayJoinResult() {
1992   ObjOperandId objId = reader.objOperandId();
1993 
1994   AutoOutputRegister output(*this);
1995   Register obj = allocator.useRegister(masm, objId);
1996   AutoScratchRegister scratch(allocator, masm);
1997 
1998   FailurePath* failure;
1999   if (!addFailurePath(&failure)) return false;
2000 
2001   // Load obj->elements in scratch.
2002   masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
2003   Address lengthAddr(scratch, ObjectElements::offsetOfLength());
2004 
2005   // If array length is 0, return empty string.
2006   Label finished;
2007 
2008   {
2009     Label arrayNotEmpty;
2010     masm.branch32(Assembler::NotEqual, lengthAddr, Imm32(0), &arrayNotEmpty);
2011     masm.movePtr(ImmGCPtr(cx_->names().empty), scratch);
2012     masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
2013     masm.jump(&finished);
2014     masm.bind(&arrayNotEmpty);
2015   }
2016 
2017   // Otherwise, handle array length 1 case.
2018   masm.branch32(Assembler::NotEqual, lengthAddr, Imm32(1), failure->label());
2019 
2020   // But only if initializedLength is also 1.
2021   Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
2022   masm.branch32(Assembler::NotEqual, initLength, Imm32(1), failure->label());
2023 
2024   // And only if elem0 is a string.
2025   Address elementAddr(scratch, 0);
2026   masm.branchTestString(Assembler::NotEqual, elementAddr, failure->label());
2027 
2028   // Store the value.
2029   masm.loadValue(elementAddr, output.valueReg());
2030 
2031   masm.bind(&finished);
2032 
2033   return true;
2034 }
2035 
emitLoadTypedElementResult()2036 bool CacheIRCompiler::emitLoadTypedElementResult() {
2037   AutoOutputRegister output(*this);
2038   Register obj = allocator.useRegister(masm, reader.objOperandId());
2039   Register index = allocator.useRegister(masm, reader.int32OperandId());
2040   TypedThingLayout layout = reader.typedThingLayout();
2041   Scalar::Type type = reader.scalarType();
2042 
2043   AutoScratchRegister scratch1(allocator, masm);
2044   AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
2045 
2046   if (!output.hasValue()) {
2047     if (type == Scalar::Float32 || type == Scalar::Float64) {
2048       if (output.type() != JSVAL_TYPE_DOUBLE) {
2049         masm.assumeUnreachable(
2050             "Should have monitored double after attaching stub");
2051         return true;
2052       }
2053     } else {
2054       if (output.type() != JSVAL_TYPE_INT32 &&
2055           output.type() != JSVAL_TYPE_DOUBLE) {
2056         masm.assumeUnreachable(
2057             "Should have monitored int32 after attaching stub");
2058         return true;
2059       }
2060     }
2061   }
2062 
2063   FailurePath* failure;
2064   if (!addFailurePath(&failure)) return false;
2065 
2066   // Bounds check.
2067   LoadTypedThingLength(masm, layout, obj, scratch1);
2068   masm.spectreBoundsCheck32(index, scratch1, scratch2, failure->label());
2069 
2070   // Load the elements vector.
2071   LoadTypedThingData(masm, layout, obj, scratch1);
2072 
2073   // Load the value.
2074   BaseIndex source(scratch1, index, ScaleFromElemWidth(Scalar::byteSize(type)));
2075   if (output.hasValue()) {
2076     masm.loadFromTypedArray(type, source, output.valueReg(),
2077                             *allowDoubleResult_, scratch1, failure->label());
2078   } else {
2079     bool needGpr = (type == Scalar::Int8 || type == Scalar::Uint8 ||
2080                     type == Scalar::Int16 || type == Scalar::Uint16 ||
2081                     type == Scalar::Uint8Clamped || type == Scalar::Int32);
2082     if (needGpr && output.type() == JSVAL_TYPE_DOUBLE) {
2083       // Load the element as integer, then convert it to double.
2084       masm.loadFromTypedArray(type, source, AnyRegister(scratch1), scratch1,
2085                               failure->label());
2086       masm.convertInt32ToDouble(source, output.typedReg().fpu());
2087     } else {
2088       masm.loadFromTypedArray(type, source, output.typedReg(), scratch1,
2089                               failure->label());
2090     }
2091   }
2092   return true;
2093 }
2094 
emitLoadTypedObjectResultShared(const Address & fieldAddr,Register scratch,uint32_t typeDescr,const AutoOutputRegister & output)2095 void CacheIRCompiler::emitLoadTypedObjectResultShared(
2096     const Address& fieldAddr, Register scratch, uint32_t typeDescr,
2097     const AutoOutputRegister& output) {
2098   MOZ_ASSERT(output.hasValue());
2099 
2100   if (SimpleTypeDescrKeyIsScalar(typeDescr)) {
2101     Scalar::Type type = ScalarTypeFromSimpleTypeDescrKey(typeDescr);
2102     masm.loadFromTypedArray(type, fieldAddr, output.valueReg(),
2103                             /* allowDouble = */ true, scratch, nullptr);
2104   } else {
2105     ReferenceTypeDescr::Type type =
2106         ReferenceTypeFromSimpleTypeDescrKey(typeDescr);
2107     switch (type) {
2108       case ReferenceTypeDescr::TYPE_ANY:
2109         masm.loadValue(fieldAddr, output.valueReg());
2110         break;
2111 
2112       case ReferenceTypeDescr::TYPE_OBJECT: {
2113         Label notNull, done;
2114         masm.loadPtr(fieldAddr, scratch);
2115         masm.branchTestPtr(Assembler::NonZero, scratch, scratch, &notNull);
2116         masm.moveValue(NullValue(), output.valueReg());
2117         masm.jump(&done);
2118         masm.bind(&notNull);
2119         masm.tagValue(JSVAL_TYPE_OBJECT, scratch, output.valueReg());
2120         masm.bind(&done);
2121         break;
2122       }
2123 
2124       case ReferenceTypeDescr::TYPE_STRING:
2125         masm.loadPtr(fieldAddr, scratch);
2126         masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
2127         break;
2128 
2129       default:
2130         MOZ_CRASH("Invalid ReferenceTypeDescr");
2131     }
2132   }
2133 }
2134 
emitLoadObjectResult()2135 bool CacheIRCompiler::emitLoadObjectResult() {
2136   AutoOutputRegister output(*this);
2137   Register obj = allocator.useRegister(masm, reader.objOperandId());
2138 
2139   if (output.hasValue())
2140     masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
2141   else
2142     masm.mov(obj, output.typedReg().gpr());
2143 
2144   return true;
2145 }
2146 
emitLoadTypeOfObjectResult()2147 bool CacheIRCompiler::emitLoadTypeOfObjectResult() {
2148   AutoOutputRegister output(*this);
2149   Register obj = allocator.useRegister(masm, reader.objOperandId());
2150   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2151 
2152   Label slowCheck, isObject, isCallable, isUndefined, done;
2153   masm.typeOfObject(obj, scratch, &slowCheck, &isObject, &isCallable,
2154                     &isUndefined);
2155 
2156   masm.bind(&isCallable);
2157   masm.moveValue(StringValue(cx_->names().function), output.valueReg());
2158   masm.jump(&done);
2159 
2160   masm.bind(&isUndefined);
2161   masm.moveValue(StringValue(cx_->names().undefined), output.valueReg());
2162   masm.jump(&done);
2163 
2164   masm.bind(&isObject);
2165   masm.moveValue(StringValue(cx_->names().object), output.valueReg());
2166   masm.jump(&done);
2167 
2168   {
2169     masm.bind(&slowCheck);
2170     LiveRegisterSet save(GeneralRegisterSet::Volatile(),
2171                          liveVolatileFloatRegs());
2172     masm.PushRegsInMask(save);
2173 
2174     masm.setupUnalignedABICall(scratch);
2175     masm.passABIArg(obj);
2176     masm.movePtr(ImmPtr(cx_->runtime()), scratch);
2177     masm.passABIArg(scratch);
2178     masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, TypeOfObject));
2179     masm.mov(ReturnReg, scratch);
2180 
2181     LiveRegisterSet ignore;
2182     ignore.add(scratch);
2183     masm.PopRegsInMaskIgnore(save, ignore);
2184 
2185     masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
2186   }
2187 
2188   masm.bind(&done);
2189   return true;
2190 }
2191 
emitLoadInt32TruthyResult()2192 bool CacheIRCompiler::emitLoadInt32TruthyResult() {
2193   AutoOutputRegister output(*this);
2194   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
2195 
2196   Label ifFalse, done;
2197   masm.branchTestInt32Truthy(false, val, &ifFalse);
2198   masm.moveValue(BooleanValue(true), output.valueReg());
2199   masm.jump(&done);
2200 
2201   masm.bind(&ifFalse);
2202   masm.moveValue(BooleanValue(false), output.valueReg());
2203 
2204   masm.bind(&done);
2205   return true;
2206 }
2207 
emitLoadStringTruthyResult()2208 bool CacheIRCompiler::emitLoadStringTruthyResult() {
2209   AutoOutputRegister output(*this);
2210   Register str = allocator.useRegister(masm, reader.stringOperandId());
2211 
2212   Label ifFalse, done;
2213   masm.branch32(Assembler::Equal, Address(str, JSString::offsetOfLength()),
2214                 Imm32(0), &ifFalse);
2215   masm.moveValue(BooleanValue(true), output.valueReg());
2216   masm.jump(&done);
2217 
2218   masm.bind(&ifFalse);
2219   masm.moveValue(BooleanValue(false), output.valueReg());
2220 
2221   masm.bind(&done);
2222   return true;
2223 }
2224 
emitLoadDoubleTruthyResult()2225 bool CacheIRCompiler::emitLoadDoubleTruthyResult() {
2226   AutoOutputRegister output(*this);
2227   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
2228 
2229   Label ifFalse, done, failurePopReg;
2230 
2231   // If we're compiling a Baseline IC, FloatReg0 is always available.
2232   if (mode_ != Mode::Baseline) masm.push(FloatReg0);
2233 
2234   masm.unboxDouble(val, FloatReg0);
2235 
2236   masm.branchTestDoubleTruthy(false, FloatReg0, &ifFalse);
2237   masm.moveValue(BooleanValue(true), output.valueReg());
2238   masm.jump(&done);
2239 
2240   masm.bind(&ifFalse);
2241   masm.moveValue(BooleanValue(false), output.valueReg());
2242 
2243   if (mode_ != Mode::Baseline) masm.pop(FloatReg0);
2244   masm.bind(&done);
2245   return true;
2246 }
2247 
emitLoadObjectTruthyResult()2248 bool CacheIRCompiler::emitLoadObjectTruthyResult() {
2249   AutoOutputRegister output(*this);
2250   Register obj = allocator.useRegister(masm, reader.objOperandId());
2251   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2252 
2253   Label emulatesUndefined, slowPath, done;
2254   masm.branchIfObjectEmulatesUndefined(obj, scratch, &slowPath,
2255                                        &emulatesUndefined);
2256   masm.moveValue(BooleanValue(true), output.valueReg());
2257   masm.jump(&done);
2258 
2259   masm.bind(&emulatesUndefined);
2260   masm.moveValue(BooleanValue(false), output.valueReg());
2261   masm.jump(&done);
2262 
2263   masm.bind(&slowPath);
2264   masm.setupUnalignedABICall(scratch);
2265   masm.passABIArg(obj);
2266   masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, js::EmulatesUndefined));
2267   masm.convertBoolToInt32(ReturnReg, ReturnReg);
2268   masm.xor32(Imm32(1), ReturnReg);
2269   masm.tagValue(JSVAL_TYPE_BOOLEAN, ReturnReg, output.valueReg());
2270 
2271   masm.bind(&done);
2272   return true;
2273 }
2274 
emitCompareStringResult()2275 bool CacheIRCompiler::emitCompareStringResult() {
2276   AutoOutputRegister output(*this);
2277 
2278   Register left = allocator.useRegister(masm, reader.stringOperandId());
2279   Register right = allocator.useRegister(masm, reader.stringOperandId());
2280   JSOp op = reader.jsop();
2281 
2282   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2283 
2284   FailurePath* failure;
2285   if (!addFailurePath(&failure)) return false;
2286 
2287   masm.compareStrings(op, left, right, scratch, failure->label());
2288   masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
2289   return true;
2290 }
2291 
emitComparePointerResultShared(bool symbol)2292 bool CacheIRCompiler::emitComparePointerResultShared(bool symbol) {
2293   AutoOutputRegister output(*this);
2294 
2295   Register left = symbol ? allocator.useRegister(masm, reader.symbolOperandId())
2296                          : allocator.useRegister(masm, reader.objOperandId());
2297   Register right = symbol
2298                        ? allocator.useRegister(masm, reader.symbolOperandId())
2299                        : allocator.useRegister(masm, reader.objOperandId());
2300   JSOp op = reader.jsop();
2301 
2302   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2303 
2304   Label ifTrue, done;
2305   masm.branchPtr(JSOpToCondition(op, /* signed = */ true), left, right,
2306                  &ifTrue);
2307 
2308   masm.moveValue(BooleanValue(false), output.valueReg());
2309   masm.jump(&done);
2310 
2311   masm.bind(&ifTrue);
2312   masm.moveValue(BooleanValue(true), output.valueReg());
2313   masm.bind(&done);
2314   return true;
2315 }
2316 
emitCompareObjectResult()2317 bool CacheIRCompiler::emitCompareObjectResult() {
2318   return emitComparePointerResultShared(false);
2319 }
2320 
emitCompareSymbolResult()2321 bool CacheIRCompiler::emitCompareSymbolResult() {
2322   return emitComparePointerResultShared(true);
2323 }
2324 
emitCallPrintString()2325 bool CacheIRCompiler::emitCallPrintString() {
2326   const char* str = reinterpret_cast<char*>(reader.pointer());
2327   masm.printf(str);
2328   return true;
2329 }
2330 
emitBreakpoint()2331 bool CacheIRCompiler::emitBreakpoint() {
2332   masm.breakpoint();
2333   return true;
2334 }
2335 
emitStoreTypedObjectReferenceProp(ValueOperand val,ReferenceTypeDescr::Type type,const Address & dest,Register scratch)2336 void CacheIRCompiler::emitStoreTypedObjectReferenceProp(
2337     ValueOperand val, ReferenceTypeDescr::Type type, const Address& dest,
2338     Register scratch) {
2339   // Callers will post-barrier this store.
2340 
2341   switch (type) {
2342     case ReferenceTypeDescr::TYPE_ANY:
2343       EmitPreBarrier(masm, dest, MIRType::Value);
2344       masm.storeValue(val, dest);
2345       break;
2346 
2347     case ReferenceTypeDescr::TYPE_OBJECT: {
2348       EmitPreBarrier(masm, dest, MIRType::Object);
2349       Label isNull, done;
2350       masm.branchTestObject(Assembler::NotEqual, val, &isNull);
2351       masm.unboxObject(val, scratch);
2352       masm.storePtr(scratch, dest);
2353       masm.jump(&done);
2354       masm.bind(&isNull);
2355       masm.storePtr(ImmWord(0), dest);
2356       masm.bind(&done);
2357       break;
2358     }
2359 
2360     case ReferenceTypeDescr::TYPE_STRING:
2361       EmitPreBarrier(masm, dest, MIRType::String);
2362       masm.unboxString(val, scratch);
2363       masm.storePtr(scratch, dest);
2364       break;
2365   }
2366 }
2367 
emitRegisterEnumerator(Register enumeratorsList,Register iter,Register scratch)2368 void CacheIRCompiler::emitRegisterEnumerator(Register enumeratorsList,
2369                                              Register iter, Register scratch) {
2370   // iter->next = list
2371   masm.storePtr(enumeratorsList, Address(iter, NativeIterator::offsetOfNext()));
2372 
2373   // iter->prev = list->prev
2374   masm.loadPtr(Address(enumeratorsList, NativeIterator::offsetOfPrev()),
2375                scratch);
2376   masm.storePtr(scratch, Address(iter, NativeIterator::offsetOfPrev()));
2377 
2378   // list->prev->next = iter
2379   masm.storePtr(iter, Address(scratch, NativeIterator::offsetOfNext()));
2380 
2381   // list->prev = ni
2382   masm.storePtr(iter, Address(enumeratorsList, NativeIterator::offsetOfPrev()));
2383 }
2384 
emitPostBarrierShared(Register obj,const ConstantOrRegister & val,Register scratch,Register maybeIndex)2385 void CacheIRCompiler::emitPostBarrierShared(Register obj,
2386                                             const ConstantOrRegister& val,
2387                                             Register scratch,
2388                                             Register maybeIndex) {
2389   if (!cx_->nursery().exists()) return;
2390 
2391   if (val.constant()) {
2392     MOZ_ASSERT_IF(val.value().isGCThing(),
2393                   !IsInsideNursery(val.value().toGCThing()));
2394     return;
2395   }
2396 
2397   TypedOrValueRegister reg = val.reg();
2398   if (reg.hasTyped()) {
2399     if (reg.type() != MIRType::Object && reg.type() != MIRType::String) return;
2400   }
2401 
2402   Label skipBarrier;
2403   if (reg.hasValue()) {
2404     masm.branchValueIsNurseryCell(Assembler::NotEqual, reg.valueReg(), scratch,
2405                                   &skipBarrier);
2406   } else {
2407     masm.branchPtrInNurseryChunk(Assembler::NotEqual, reg.typedReg().gpr(),
2408                                  scratch, &skipBarrier);
2409   }
2410   masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, &skipBarrier);
2411 
2412   // Call one of these, depending on maybeIndex:
2413   //
2414   //   void PostWriteBarrier(JSRuntime* rt, JSObject* obj);
2415   //   void PostWriteElementBarrier(JSRuntime* rt, JSObject* obj,
2416   //                                int32_t index);
2417   LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
2418   masm.PushRegsInMask(save);
2419   masm.setupUnalignedABICall(scratch);
2420   masm.movePtr(ImmPtr(cx_->runtime()), scratch);
2421   masm.passABIArg(scratch);
2422   masm.passABIArg(obj);
2423   if (maybeIndex != InvalidReg) {
2424     masm.passABIArg(maybeIndex);
2425     masm.callWithABI(JS_FUNC_TO_DATA_PTR(
2426         void*, (PostWriteElementBarrier<IndexInBounds::Yes>)));
2427   } else {
2428     masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, PostWriteBarrier));
2429   }
2430   masm.PopRegsInMask(save);
2431 
2432   masm.bind(&skipBarrier);
2433 }
2434 
emitWrapResult()2435 bool CacheIRCompiler::emitWrapResult() {
2436   AutoOutputRegister output(*this);
2437   AutoScratchRegister scratch(allocator, masm);
2438 
2439   FailurePath* failure;
2440   if (!addFailurePath(&failure)) return false;
2441 
2442   Label done;
2443   // We only have to wrap objects, because we are in the same zone.
2444   masm.branchTestObject(Assembler::NotEqual, output.valueReg(), &done);
2445 
2446   Register obj = output.valueReg().scratchReg();
2447   masm.unboxObject(output.valueReg(), obj);
2448 
2449   LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
2450   masm.PushRegsInMask(save);
2451 
2452   masm.setupUnalignedABICall(scratch);
2453   masm.loadJSContext(scratch);
2454   masm.passABIArg(scratch);
2455   masm.passABIArg(obj);
2456   masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, WrapObjectPure));
2457   masm.mov(ReturnReg, obj);
2458 
2459   LiveRegisterSet ignore;
2460   ignore.add(obj);
2461   masm.PopRegsInMaskIgnore(save, ignore);
2462 
2463   // We could not get a wrapper for this object.
2464   masm.branchTestPtr(Assembler::Zero, obj, obj, failure->label());
2465 
2466   // We clobbered the output register, so we have to retag.
2467   masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
2468 
2469   masm.bind(&done);
2470   return true;
2471 }
2472 
emitMegamorphicLoadSlotByValueResult()2473 bool CacheIRCompiler::emitMegamorphicLoadSlotByValueResult() {
2474   AutoOutputRegister output(*this);
2475 
2476   Register obj = allocator.useRegister(masm, reader.objOperandId());
2477   ValueOperand idVal = allocator.useValueRegister(masm, reader.valOperandId());
2478   bool handleMissing = reader.readBool();
2479 
2480   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2481 
2482   FailurePath* failure;
2483   if (!addFailurePath(&failure)) return false;
2484 
2485   // The object must be Native.
2486   masm.branchIfNonNativeObj(obj, scratch, failure->label());
2487 
2488   // idVal will be in vp[0], result will be stored in vp[1].
2489   masm.reserveStack(sizeof(Value));
2490   masm.Push(idVal);
2491   masm.moveStackPtrTo(idVal.scratchReg());
2492 
2493   LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
2494                                liveVolatileFloatRegs());
2495   volatileRegs.takeUnchecked(scratch);
2496   volatileRegs.takeUnchecked(idVal);
2497   masm.PushRegsInMask(volatileRegs);
2498 
2499   masm.setupUnalignedABICall(scratch);
2500   masm.loadJSContext(scratch);
2501   masm.passABIArg(scratch);
2502   masm.passABIArg(obj);
2503   masm.passABIArg(idVal.scratchReg());
2504   if (handleMissing)
2505     masm.callWithABI(
2506         JS_FUNC_TO_DATA_PTR(void*, (GetNativeDataPropertyByValue<true>)));
2507   else
2508     masm.callWithABI(
2509         JS_FUNC_TO_DATA_PTR(void*, (GetNativeDataPropertyByValue<false>)));
2510   masm.mov(ReturnReg, scratch);
2511   masm.PopRegsInMask(volatileRegs);
2512 
2513   masm.Pop(idVal);
2514 
2515   Label ok;
2516   uint32_t framePushed = masm.framePushed();
2517   masm.branchIfTrueBool(scratch, &ok);
2518   masm.adjustStack(sizeof(Value));
2519   masm.jump(failure->label());
2520 
2521   masm.bind(&ok);
2522   if (JitOptions.spectreJitToCxxCalls) masm.speculationBarrier();
2523   masm.setFramePushed(framePushed);
2524   masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
2525   masm.adjustStack(sizeof(Value));
2526   return true;
2527 }
2528 
emitMegamorphicHasPropResult()2529 bool CacheIRCompiler::emitMegamorphicHasPropResult() {
2530   AutoOutputRegister output(*this);
2531 
2532   Register obj = allocator.useRegister(masm, reader.objOperandId());
2533   ValueOperand idVal = allocator.useValueRegister(masm, reader.valOperandId());
2534   bool hasOwn = reader.readBool();
2535 
2536   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2537 
2538   FailurePath* failure;
2539   if (!addFailurePath(&failure)) return false;
2540 
2541   // idVal will be in vp[0], result will be stored in vp[1].
2542   masm.reserveStack(sizeof(Value));
2543   masm.Push(idVal);
2544   masm.moveStackPtrTo(idVal.scratchReg());
2545 
2546   LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
2547                                liveVolatileFloatRegs());
2548   volatileRegs.takeUnchecked(scratch);
2549   volatileRegs.takeUnchecked(idVal);
2550   masm.PushRegsInMask(volatileRegs);
2551 
2552   masm.setupUnalignedABICall(scratch);
2553   masm.loadJSContext(scratch);
2554   masm.passABIArg(scratch);
2555   masm.passABIArg(obj);
2556   masm.passABIArg(idVal.scratchReg());
2557   if (hasOwn)
2558     masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, HasNativeDataProperty<true>));
2559   else
2560     masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, HasNativeDataProperty<false>));
2561   masm.mov(ReturnReg, scratch);
2562   masm.PopRegsInMask(volatileRegs);
2563 
2564   masm.Pop(idVal);
2565 
2566   Label ok;
2567   uint32_t framePushed = masm.framePushed();
2568   masm.branchIfTrueBool(scratch, &ok);
2569   masm.adjustStack(sizeof(Value));
2570   masm.jump(failure->label());
2571 
2572   masm.bind(&ok);
2573   masm.setFramePushed(framePushed);
2574   masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
2575   masm.adjustStack(sizeof(Value));
2576   return true;
2577 }
2578 
emitCallObjectHasSparseElementResult()2579 bool CacheIRCompiler::emitCallObjectHasSparseElementResult() {
2580   AutoOutputRegister output(*this);
2581 
2582   Register obj = allocator.useRegister(masm, reader.objOperandId());
2583   Register index = allocator.useRegister(masm, reader.int32OperandId());
2584 
2585   AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
2586   AutoScratchRegister scratch2(allocator, masm);
2587 
2588   FailurePath* failure;
2589   if (!addFailurePath(&failure)) return false;
2590 
2591   masm.reserveStack(sizeof(Value));
2592   masm.moveStackPtrTo(scratch2.get());
2593 
2594   LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
2595                                liveVolatileFloatRegs());
2596   volatileRegs.takeUnchecked(scratch1);
2597   volatileRegs.takeUnchecked(index);
2598   masm.PushRegsInMask(volatileRegs);
2599 
2600   masm.setupUnalignedABICall(scratch1);
2601   masm.loadJSContext(scratch1);
2602   masm.passABIArg(scratch1);
2603   masm.passABIArg(obj);
2604   masm.passABIArg(index);
2605   masm.passABIArg(scratch2);
2606   masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, HasNativeElement));
2607   masm.mov(ReturnReg, scratch1);
2608   masm.PopRegsInMask(volatileRegs);
2609 
2610   Label ok;
2611   uint32_t framePushed = masm.framePushed();
2612   masm.branchIfTrueBool(scratch1, &ok);
2613   masm.adjustStack(sizeof(Value));
2614   masm.jump(failure->label());
2615 
2616   masm.bind(&ok);
2617   masm.setFramePushed(framePushed);
2618   masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
2619   masm.adjustStack(sizeof(Value));
2620   return true;
2621 }
2622 
emitLoadInstanceOfObjectResult()2623 bool CacheIRCompiler::emitLoadInstanceOfObjectResult() {
2624   AutoOutputRegister output(*this);
2625   ValueOperand lhs = allocator.useValueRegister(masm, reader.valOperandId());
2626   Register proto = allocator.useRegister(masm, reader.objOperandId());
2627 
2628   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
2629 
2630   FailurePath* failure;
2631   if (!addFailurePath(&failure)) return false;
2632 
2633   Label returnFalse, returnTrue, done;
2634   masm.branchTestObject(Assembler::NotEqual, lhs, &returnFalse);
2635 
2636   // LHS is an object. Load its proto.
2637   masm.unboxObject(lhs, scratch);
2638   masm.loadObjProto(scratch, scratch);
2639   {
2640     // Walk the proto chain until we either reach the target object,
2641     // nullptr or LazyProto.
2642     Label loop;
2643     masm.bind(&loop);
2644 
2645     masm.branchPtr(Assembler::Equal, scratch, proto, &returnTrue);
2646     masm.branchTestPtr(Assembler::Zero, scratch, scratch, &returnFalse);
2647 
2648     MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
2649     masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), failure->label());
2650 
2651     masm.loadObjProto(scratch, scratch);
2652     masm.jump(&loop);
2653   }
2654 
2655   masm.bind(&returnFalse);
2656   EmitStoreBoolean(masm, false, output);
2657   masm.jump(&done);
2658 
2659   masm.bind(&returnTrue);
2660   EmitStoreBoolean(masm, true, output);
2661   // fallthrough
2662   masm.bind(&done);
2663   return true;
2664 }
2665