1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/BaselineCacheIR.h"
8 
9 #include "jit/CacheIR.h"
10 #include "jit/Linker.h"
11 #include "jit/SharedICHelpers.h"
12 
13 #include "jit/MacroAssembler-inl.h"
14 
15 using namespace js;
16 using namespace js::jit;
17 
18 // OperandLocation represents the location of an OperandId. The operand is
19 // either in a register or on the stack, and is either boxed or unboxed.
20 class OperandLocation
21 {
22   public:
23     enum Kind {
24         Uninitialized = 0,
25         PayloadReg,
26         ValueReg,
27         PayloadStack,
28         ValueStack,
29     };
30 
31   private:
32     Kind kind_;
33 
34     union Data {
35         struct {
36             Register reg;
37             JSValueType type;
38         } payloadReg;
39         ValueOperand valueReg;
40         struct {
41             uint32_t stackPushed;
42             JSValueType type;
43         } payloadStack;
44         uint32_t valueStackPushed;
45 
Data()46         Data() : valueStackPushed(0) {}
47     };
48     Data data_;
49 
50   public:
OperandLocation()51     OperandLocation() : kind_(Uninitialized) {}
52 
kind() const53     Kind kind() const { return kind_; }
54 
setUninitialized()55     void setUninitialized() {
56         kind_ = Uninitialized;
57     }
58 
valueReg() const59     ValueOperand valueReg() const {
60         MOZ_ASSERT(kind_ == ValueReg);
61         return data_.valueReg;
62     }
payloadReg() const63     Register payloadReg() const {
64         MOZ_ASSERT(kind_ == PayloadReg);
65         return data_.payloadReg.reg;
66     }
payloadStack() const67     uint32_t payloadStack() const {
68         MOZ_ASSERT(kind_ == PayloadStack);
69         return data_.payloadStack.stackPushed;
70     }
valueStack() const71     uint32_t valueStack() const {
72         MOZ_ASSERT(kind_ == ValueStack);
73         return data_.valueStackPushed;
74     }
payloadType() const75     JSValueType payloadType() const {
76         if (kind_ == PayloadReg)
77             return data_.payloadReg.type;
78         MOZ_ASSERT(kind_ == PayloadStack);
79         return data_.payloadStack.type;
80     }
setPayloadReg(Register reg,JSValueType type)81     void setPayloadReg(Register reg, JSValueType type) {
82         kind_ = PayloadReg;
83         data_.payloadReg.reg = reg;
84         data_.payloadReg.type = type;
85     }
setValueReg(ValueOperand reg)86     void setValueReg(ValueOperand reg) {
87         kind_ = ValueReg;
88         data_.valueReg = reg;
89     }
setPayloadStack(uint32_t stackPushed,JSValueType type)90     void setPayloadStack(uint32_t stackPushed, JSValueType type) {
91         kind_ = PayloadStack;
92         data_.payloadStack.stackPushed = stackPushed;
93         data_.payloadStack.type = type;
94     }
setValueStack(uint32_t stackPushed)95     void setValueStack(uint32_t stackPushed) {
96         kind_ = ValueStack;
97         data_.valueStackPushed = stackPushed;
98     }
99 
aliasesReg(Register reg)100     bool aliasesReg(Register reg) {
101         if (kind_ == PayloadReg)
102             return payloadReg() == reg;
103         if (kind_ == ValueReg)
104             return valueReg().aliases(reg);
105         return false;
106     }
aliasesReg(ValueOperand reg)107     bool aliasesReg(ValueOperand reg) {
108 #if defined(JS_NUNBOX32)
109         return aliasesReg(reg.typeReg()) || aliasesReg(reg.payloadReg());
110 #else
111         return aliasesReg(reg.valueReg());
112 #endif
113     }
114 
operator ==(const OperandLocation & other) const115     bool operator==(const OperandLocation& other) const {
116         if (kind_ != other.kind_)
117             return false;
118         switch (kind()) {
119           case Uninitialized:
120             return true;
121           case PayloadReg:
122             return payloadReg() == other.payloadReg() && payloadType() == other.payloadType();
123           case ValueReg:
124             return valueReg() == other.valueReg();
125           case PayloadStack:
126             return payloadStack() == other.payloadStack() && payloadType() == other.payloadType();
127           case ValueStack:
128             return valueStack() == other.valueStack();
129         }
130         MOZ_CRASH("Invalid OperandLocation kind");
131     }
operator !=(const OperandLocation & other) const132     bool operator!=(const OperandLocation& other) const { return !operator==(other); }
133 };
134 
135 // Class to track and allocate registers while emitting IC code.
136 class MOZ_RAII CacheRegisterAllocator
137 {
138     // The original location of the inputs to the cache.
139     Vector<OperandLocation, 4, SystemAllocPolicy> origInputLocations_;
140 
141     // The current location of each operand.
142     Vector<OperandLocation, 8, SystemAllocPolicy> operandLocations_;
143 
144     // The registers allocated while emitting the current CacheIR op.
145     // This prevents us from allocating a register and then immediately
146     // clobbering it for something else, while we're still holding on to it.
147     LiveGeneralRegisterSet currentOpRegs_;
148 
149     // Registers that are currently unused and available.
150     AllocatableGeneralRegisterSet availableRegs_;
151 
152     // The number of bytes pushed on the native stack.
153     uint32_t stackPushed_;
154 
155     // The index of the CacheIR instruction we're currently emitting.
156     uint32_t currentInstruction_;
157 
158     const CacheIRWriter& writer_;
159 
160     CacheRegisterAllocator(const CacheRegisterAllocator&) = delete;
161     CacheRegisterAllocator& operator=(const CacheRegisterAllocator&) = delete;
162 
163   public:
164     friend class AutoScratchRegister;
165 
CacheRegisterAllocator(const CacheIRWriter & writer)166     explicit CacheRegisterAllocator(const CacheIRWriter& writer)
167       : stackPushed_(0),
168         currentInstruction_(0),
169         writer_(writer)
170     {}
171 
init(const AllocatableGeneralRegisterSet & available)172     MOZ_MUST_USE bool init(const AllocatableGeneralRegisterSet& available) {
173         availableRegs_ = available;
174         if (!origInputLocations_.resize(writer_.numInputOperands()))
175             return false;
176         if (!operandLocations_.resize(writer_.numOperandIds()))
177             return false;
178         return true;
179     }
180 
operandLocation(size_t i) const181     OperandLocation operandLocation(size_t i) const {
182         return operandLocations_[i];
183     }
origInputLocation(size_t i) const184     OperandLocation origInputLocation(size_t i) const {
185         return origInputLocations_[i];
186     }
initInputLocation(size_t i,ValueOperand reg)187     void initInputLocation(size_t i, ValueOperand reg) {
188         origInputLocations_[i].setValueReg(reg);
189         operandLocations_[i] = origInputLocations_[i];
190     }
191 
nextOp()192     void nextOp() {
193         currentOpRegs_.clear();
194         currentInstruction_++;
195     }
196 
stackPushed() const197     uint32_t stackPushed() const {
198         return stackPushed_;
199     }
200 
201     // Allocates a new register.
202     Register allocateRegister(MacroAssembler& masm);
203     ValueOperand allocateValueRegister(MacroAssembler& masm);
204 
205     // Returns the register for the given operand. If the operand is currently
206     // not in a register, it will load it into one.
207     ValueOperand useRegister(MacroAssembler& masm, ValOperandId val);
208     Register useRegister(MacroAssembler& masm, ObjOperandId obj);
209 
210     // Allocates an output register for the given operand.
211     Register defineRegister(MacroAssembler& masm, ObjOperandId obj);
212 };
213 
214 // RAII class to put a scratch register back in the allocator's availableRegs
215 // set when we're done with it.
216 class MOZ_RAII AutoScratchRegister
217 {
218     CacheRegisterAllocator& alloc_;
219     Register reg_;
220 
221   public:
AutoScratchRegister(CacheRegisterAllocator & alloc,MacroAssembler & masm)222     AutoScratchRegister(CacheRegisterAllocator& alloc, MacroAssembler& masm)
223       : alloc_(alloc)
224     {
225         reg_ = alloc.allocateRegister(masm);
226         MOZ_ASSERT(alloc_.currentOpRegs_.has(reg_));
227     }
~AutoScratchRegister()228     ~AutoScratchRegister() {
229         MOZ_ASSERT(alloc_.currentOpRegs_.has(reg_));
230         alloc_.availableRegs_.add(reg_);
231     }
operator Register() const232     operator Register() const { return reg_; }
233 };
234 
235 // The FailurePath class stores everything we need to generate a failure path
236 // at the end of the IC code. The failure path restores the input registers, if
237 // needed, and jumps to the next stub.
238 class FailurePath
239 {
240     Vector<OperandLocation, 4, SystemAllocPolicy> inputs_;
241     NonAssertingLabel label_;
242     uint32_t stackPushed_;
243 
244   public:
245     FailurePath() = default;
246 
FailurePath(FailurePath && other)247     FailurePath(FailurePath&& other)
248       : inputs_(Move(other.inputs_)),
249         label_(other.label_),
250         stackPushed_(other.stackPushed_)
251     {}
252 
label()253     Label* label() { return &label_; }
254 
setStackPushed(uint32_t i)255     void setStackPushed(uint32_t i) { stackPushed_ = i; }
stackPushed() const256     uint32_t stackPushed() const { return stackPushed_; }
257 
appendInput(OperandLocation loc)258     bool appendInput(OperandLocation loc) {
259         return inputs_.append(loc);
260     }
input(size_t i) const261     OperandLocation input(size_t i) const {
262         return inputs_[i];
263     }
264 
265     // If canShareFailurePath(other) returns true, the same machine code will
266     // be emitted for two failure paths, so we can share them.
canShareFailurePath(const FailurePath & other) const267     bool canShareFailurePath(const FailurePath& other) const {
268         if (stackPushed_ != other.stackPushed_)
269             return false;
270 
271         MOZ_ASSERT(inputs_.length() == other.inputs_.length());
272 
273         for (size_t i = 0; i < inputs_.length(); i++) {
274             if (inputs_[i] != other.inputs_[i])
275                 return false;
276         }
277         return true;
278     }
279 };
280 
281 // Base class for BaselineCacheIRCompiler and IonCacheIRCompiler.
282 class MOZ_RAII CacheIRCompiler
283 {
284   protected:
285     JSContext* cx_;
286     CacheIRReader reader;
287     const CacheIRWriter& writer_;
288     MacroAssembler masm;
289 
290     CacheRegisterAllocator allocator;
291     Vector<FailurePath, 4, SystemAllocPolicy> failurePaths;
292 
CacheIRCompiler(JSContext * cx,const CacheIRWriter & writer)293     CacheIRCompiler(JSContext* cx, const CacheIRWriter& writer)
294       : cx_(cx),
295         reader(writer),
296         writer_(writer),
297         allocator(writer_)
298     {}
299 
300     void emitFailurePath(size_t i);
301 };
302 
303 void
emitFailurePath(size_t i)304 CacheIRCompiler::emitFailurePath(size_t i)
305 {
306     FailurePath& failure = failurePaths[i];
307 
308     masm.bind(failure.label());
309 
310     uint32_t stackPushed = failure.stackPushed();
311     size_t numInputOperands = writer_.numInputOperands();
312 
313     for (size_t j = 0; j < numInputOperands; j++) {
314         OperandLocation orig = allocator.origInputLocation(j);
315         OperandLocation cur = failure.input(j);
316 
317         MOZ_ASSERT(orig.kind() == OperandLocation::ValueReg);
318 
319         // We have a cycle if a destination register will be used later
320         // as source register. If that happens, just push the current value
321         // on the stack and later get it from there.
322         for (size_t k = j + 1; k < numInputOperands; k++) {
323             OperandLocation laterSource = failure.input(k);
324             switch (laterSource.kind()) {
325               case OperandLocation::ValueReg:
326                 if (orig.aliasesReg(laterSource.valueReg())) {
327                     stackPushed += sizeof(js::Value);
328                     masm.pushValue(laterSource.valueReg());
329                     laterSource.setValueStack(stackPushed);
330                 }
331                 break;
332               case OperandLocation::PayloadReg:
333                 if (orig.aliasesReg(laterSource.payloadReg())) {
334                     stackPushed += sizeof(uintptr_t);
335                     masm.push(laterSource.payloadReg());
336                     laterSource.setPayloadStack(stackPushed, laterSource.payloadType());
337                 }
338                 break;
339               case OperandLocation::PayloadStack:
340               case OperandLocation::ValueStack:
341               case OperandLocation::Uninitialized:
342                 break;
343             }
344         }
345 
346         switch (cur.kind()) {
347           case OperandLocation::ValueReg:
348             masm.moveValue(cur.valueReg(), orig.valueReg());
349             break;
350           case OperandLocation::PayloadReg:
351             masm.tagValue(cur.payloadType(), cur.payloadReg(), orig.valueReg());
352             break;
353           case OperandLocation::PayloadStack: {
354             MOZ_ASSERT(stackPushed >= sizeof(uintptr_t));
355             Register scratch = orig.valueReg().scratchReg();
356             if (cur.payloadStack() == stackPushed) {
357                 masm.pop(scratch);
358                 stackPushed -= sizeof(uintptr_t);
359             } else {
360                 MOZ_ASSERT(cur.payloadStack() < stackPushed);
361                 masm.loadPtr(Address(masm.getStackPointer(), stackPushed - cur.payloadStack()),
362                              scratch);
363             }
364             masm.tagValue(cur.payloadType(), scratch, orig.valueReg());
365             break;
366           }
367           case OperandLocation::ValueStack:
368             MOZ_ASSERT(stackPushed >= sizeof(js::Value));
369             if (cur.valueStack() == stackPushed) {
370                 masm.popValue(orig.valueReg());
371                 stackPushed -= sizeof(js::Value);
372             } else {
373                 MOZ_ASSERT(cur.valueStack() < stackPushed);
374                 masm.loadValue(Address(masm.getStackPointer(), stackPushed - cur.valueStack()),
375                                orig.valueReg());
376             }
377             break;
378           default:
379             MOZ_CRASH();
380         }
381     }
382 
383     if (stackPushed > 0)
384         masm.addToStackPtr(Imm32(stackPushed));
385 }
386 
387 // BaselineCacheIRCompiler compiles CacheIR to BaselineIC native code.
388 class MOZ_RAII BaselineCacheIRCompiler : public CacheIRCompiler
389 {
390     uint32_t stubDataOffset_;
391 
392   public:
BaselineCacheIRCompiler(JSContext * cx,const CacheIRWriter & writer,uint32_t stubDataOffset)393     BaselineCacheIRCompiler(JSContext* cx, const CacheIRWriter& writer, uint32_t stubDataOffset)
394       : CacheIRCompiler(cx, writer),
395         stubDataOffset_(stubDataOffset)
396     {}
397 
398     MOZ_MUST_USE bool init(CacheKind kind);
399 
400     JitCode* compile();
401 
402   private:
403 #define DEFINE_OP(op) MOZ_MUST_USE bool emit##op();
CACHE_IR_OPS(DEFINE_OP)404     CACHE_IR_OPS(DEFINE_OP)
405 #undef DEFINE_OP
406 
407     Address stubAddress(uint32_t offset) const {
408         return Address(ICStubReg, stubDataOffset_ + offset * sizeof(uintptr_t));
409     }
410 
addFailurePath(FailurePath ** failure)411     bool addFailurePath(FailurePath** failure) {
412         FailurePath newFailure;
413         for (size_t i = 0; i < writer_.numInputOperands(); i++) {
414             if (!newFailure.appendInput(allocator.operandLocation(i)))
415                 return false;
416         }
417         newFailure.setStackPushed(allocator.stackPushed());
418 
419         // Reuse the previous failure path if the current one is the same, to
420         // avoid emitting duplicate code.
421         if (failurePaths.length() > 0 && failurePaths.back().canShareFailurePath(newFailure)) {
422             *failure = &failurePaths.back();
423             return true;
424         }
425 
426         if (!failurePaths.append(Move(newFailure)))
427             return false;
428 
429         *failure = &failurePaths.back();
430         return true;
431     }
emitEnterTypeMonitorIC()432     void emitEnterTypeMonitorIC() {
433         if (allocator.stackPushed() > 0)
434             masm.addToStackPtr(Imm32(allocator.stackPushed()));
435         EmitEnterTypeMonitorIC(masm);
436     }
emitReturnFromIC()437     void emitReturnFromIC() {
438         if (allocator.stackPushed() > 0)
439             masm.addToStackPtr(Imm32(allocator.stackPushed()));
440         EmitReturnFromIC(masm);
441     }
442 };
443 
444 JitCode*
compile()445 BaselineCacheIRCompiler::compile()
446 {
447 #ifndef JS_USE_LINK_REGISTER
448     // The first value contains the return addres,
449     // which we pull into ICTailCallReg for tail calls.
450     masm.adjustFrame(sizeof(intptr_t));
451 #endif
452 #ifdef JS_CODEGEN_ARM
453     masm.setSecondScratchReg(BaselineSecondScratchReg);
454 #endif
455 
456     do {
457         switch (reader.readOp()) {
458 #define DEFINE_OP(op)                   \
459           case CacheOp::op:             \
460             if (!emit##op())            \
461                 return nullptr;         \
462             break;
463     CACHE_IR_OPS(DEFINE_OP)
464 #undef DEFINE_OP
465 
466           default:
467             MOZ_CRASH("Invalid op");
468         }
469 
470         allocator.nextOp();
471     } while (reader.more());
472 
473     // Done emitting the main IC code. Now emit the failure paths.
474     for (size_t i = 0; i < failurePaths.length(); i++) {
475         emitFailurePath(i);
476         EmitStubGuardFailure(masm);
477     }
478 
479     Linker linker(masm);
480     AutoFlushICache afc("getStubCode");
481     Rooted<JitCode*> newStubCode(cx_, linker.newCode<NoGC>(cx_, BASELINE_CODE));
482     if (!newStubCode) {
483         cx_->recoverFromOutOfMemory();
484         return nullptr;
485     }
486 
487     // All barriers are emitted off-by-default, enable them if needed.
488     if (cx_->zone()->needsIncrementalBarrier())
489         newStubCode->togglePreBarriers(true, DontReprotect);
490 
491     return newStubCode;
492 }
493 
494 ValueOperand
useRegister(MacroAssembler & masm,ValOperandId op)495 CacheRegisterAllocator::useRegister(MacroAssembler& masm, ValOperandId op)
496 {
497     OperandLocation& loc = operandLocations_[op.id()];
498 
499     switch (loc.kind()) {
500       case OperandLocation::ValueReg:
501         currentOpRegs_.add(loc.valueReg());
502         return loc.valueReg();
503 
504       case OperandLocation::ValueStack: {
505         // The Value is on the stack. If it's on top of the stack, unbox and
506         // then pop it. If we need the registers later, we can always spill
507         // back. If it's not on the top of the stack, just unbox.
508         ValueOperand reg = allocateValueRegister(masm);
509         if (loc.valueStack() == stackPushed_) {
510             masm.popValue(reg);
511             MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
512             stackPushed_ -= sizeof(js::Value);
513         } else {
514             MOZ_ASSERT(loc.valueStack() < stackPushed_);
515             masm.loadValue(Address(masm.getStackPointer(), stackPushed_ - loc.valueStack()), reg);
516         }
517         loc.setValueReg(reg);
518         return reg;
519       }
520 
521       // The operand should never be unboxed.
522       case OperandLocation::PayloadStack:
523       case OperandLocation::PayloadReg:
524       case OperandLocation::Uninitialized:
525         break;
526     }
527 
528     MOZ_CRASH();
529 }
530 
531 Register
useRegister(MacroAssembler & masm,ObjOperandId op)532 CacheRegisterAllocator::useRegister(MacroAssembler& masm, ObjOperandId op)
533 {
534     OperandLocation& loc = operandLocations_[op.id()];
535     switch (loc.kind()) {
536       case OperandLocation::PayloadReg:
537         currentOpRegs_.add(loc.payloadReg());
538         return loc.payloadReg();
539 
540       case OperandLocation::ValueReg: {
541         // It's possible the value is still boxed: as an optimization, we unbox
542         // the first time we use a value as object.
543         ValueOperand val = loc.valueReg();
544         availableRegs_.add(val);
545         Register reg = val.scratchReg();
546         availableRegs_.take(reg);
547         masm.unboxObject(val, reg);
548         loc.setPayloadReg(reg, JSVAL_TYPE_OBJECT);
549         currentOpRegs_.add(reg);
550         return reg;
551       }
552 
553       case OperandLocation::PayloadStack: {
554         // The payload is on the stack. If it's on top of the stack we can just
555         // pop it, else we emit a load.
556         Register reg = allocateRegister(masm);
557         if (loc.payloadStack() == stackPushed_) {
558             masm.pop(reg);
559             MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
560             stackPushed_ -= sizeof(uintptr_t);
561         } else {
562             MOZ_ASSERT(loc.payloadStack() < stackPushed_);
563             masm.loadPtr(Address(masm.getStackPointer(), stackPushed_ - loc.payloadStack()), reg);
564         }
565         loc.setPayloadReg(reg, loc.payloadType());
566         return reg;
567       }
568 
569       case OperandLocation::ValueStack: {
570         // The value is on the stack, but boxed. If it's on top of the stack we
571         // unbox it and then remove it from the stack, else we just unbox.
572         Register reg = allocateRegister(masm);
573         if (loc.valueStack() == stackPushed_) {
574             masm.unboxObject(Address(masm.getStackPointer(), 0), reg);
575             masm.addToStackPtr(Imm32(sizeof(js::Value)));
576             MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
577             stackPushed_ -= sizeof(js::Value);
578         } else {
579             MOZ_ASSERT(loc.valueStack() < stackPushed_);
580             masm.unboxObject(Address(masm.getStackPointer(), stackPushed_ - loc.valueStack()),
581                              reg);
582         }
583         loc.setPayloadReg(reg, JSVAL_TYPE_OBJECT);
584         return reg;
585       }
586 
587       case OperandLocation::Uninitialized:
588         break;
589     }
590 
591     MOZ_CRASH();
592 }
593 
594 Register
defineRegister(MacroAssembler & masm,ObjOperandId op)595 CacheRegisterAllocator::defineRegister(MacroAssembler& masm, ObjOperandId op)
596 {
597     OperandLocation& loc = operandLocations_[op.id()];
598     MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
599 
600     Register reg = allocateRegister(masm);
601     loc.setPayloadReg(reg, JSVAL_TYPE_OBJECT);
602     return reg;
603 }
604 
605 Register
allocateRegister(MacroAssembler & masm)606 CacheRegisterAllocator::allocateRegister(MacroAssembler& masm)
607 {
608     if (availableRegs_.empty()) {
609         // No registers available. See if any operands are dead so we can reuse
610         // their registers. Note that we skip the input operands, as those are
611         // also used by failure paths, and we currently don't track those uses.
612         for (size_t i = writer_.numInputOperands(); i < operandLocations_.length(); i++) {
613             if (!writer_.operandIsDead(i, currentInstruction_))
614                 continue;
615 
616             OperandLocation& loc = operandLocations_[i];
617             switch (loc.kind()) {
618               case OperandLocation::PayloadReg:
619                 availableRegs_.add(loc.payloadReg());
620                 break;
621               case OperandLocation::ValueReg:
622                 availableRegs_.add(loc.valueReg());
623                 break;
624               case OperandLocation::Uninitialized:
625               case OperandLocation::PayloadStack:
626               case OperandLocation::ValueStack:
627                 break;
628             }
629             loc.setUninitialized();
630         }
631     }
632 
633     if (availableRegs_.empty()) {
634         // Still no registers available, try to spill unused operands to
635         // the stack.
636         for (size_t i = 0; i < operandLocations_.length(); i++) {
637             OperandLocation& loc = operandLocations_[i];
638             if (loc.kind() == OperandLocation::PayloadReg) {
639                 Register reg = loc.payloadReg();
640                 if (currentOpRegs_.has(reg))
641                     continue;
642 
643                 masm.push(reg);
644                 stackPushed_ += sizeof(uintptr_t);
645                 loc.setPayloadStack(stackPushed_, loc.payloadType());
646                 availableRegs_.add(reg);
647                 break; // We got a register, so break out of the loop.
648             }
649             if (loc.kind() == OperandLocation::ValueReg) {
650                 ValueOperand reg = loc.valueReg();
651                 if (currentOpRegs_.aliases(reg))
652                     continue;
653 
654                 masm.pushValue(reg);
655                 stackPushed_ += sizeof(js::Value);
656                 loc.setValueStack(stackPushed_);
657                 availableRegs_.add(reg);
658                 break; // Break out of the loop.
659             }
660         }
661     }
662 
663     // At this point, there must be a free register. (Ion ICs don't have as
664     // many registers available, so once we support Ion code generation, we may
665     // have to spill some unrelated registers.)
666     MOZ_RELEASE_ASSERT(!availableRegs_.empty());
667 
668     Register reg = availableRegs_.takeAny();
669     currentOpRegs_.add(reg);
670     return reg;
671 }
672 
673 ValueOperand
allocateValueRegister(MacroAssembler & masm)674 CacheRegisterAllocator::allocateValueRegister(MacroAssembler& masm)
675 {
676 #ifdef JS_NUNBOX32
677     Register reg1 = allocateRegister(masm);
678     Register reg2 = allocateRegister(masm);
679     return ValueOperand(reg1, reg2);
680 #else
681     Register reg = allocateRegister(masm);
682     return ValueOperand(reg);
683 #endif
684 }
685 
686 bool
emitGuardIsObject()687 BaselineCacheIRCompiler::emitGuardIsObject()
688 {
689     ValueOperand input = allocator.useRegister(masm, reader.valOperandId());
690     FailurePath* failure;
691     if (!addFailurePath(&failure))
692         return false;
693     masm.branchTestObject(Assembler::NotEqual, input, failure->label());
694     return true;
695 }
696 
697 bool
emitGuardType()698 BaselineCacheIRCompiler::emitGuardType()
699 {
700     ValueOperand input = allocator.useRegister(masm, reader.valOperandId());
701     JSValueType type = reader.valueType();
702 
703     FailurePath* failure;
704     if (!addFailurePath(&failure))
705         return false;
706 
707     switch (type) {
708       case JSVAL_TYPE_STRING:
709         masm.branchTestString(Assembler::NotEqual, input, failure->label());
710         break;
711       case JSVAL_TYPE_SYMBOL:
712         masm.branchTestSymbol(Assembler::NotEqual, input, failure->label());
713         break;
714       case JSVAL_TYPE_DOUBLE:
715         masm.branchTestNumber(Assembler::NotEqual, input, failure->label());
716         break;
717       case JSVAL_TYPE_BOOLEAN:
718         masm.branchTestBoolean(Assembler::NotEqual, input, failure->label());
719         break;
720       default:
721         MOZ_CRASH("Unexpected type");
722     }
723 
724     return true;
725 }
726 
727 bool
emitGuardShape()728 BaselineCacheIRCompiler::emitGuardShape()
729 {
730     Register obj = allocator.useRegister(masm, reader.objOperandId());
731     AutoScratchRegister scratch(allocator, masm);
732 
733     FailurePath* failure;
734     if (!addFailurePath(&failure))
735         return false;
736 
737     Address addr(stubAddress(reader.stubOffset()));
738     masm.loadPtr(addr, scratch);
739     masm.branchTestObjShape(Assembler::NotEqual, obj, scratch, failure->label());
740     return true;
741 }
742 
743 bool
emitGuardGroup()744 BaselineCacheIRCompiler::emitGuardGroup()
745 {
746     Register obj = allocator.useRegister(masm, reader.objOperandId());
747     AutoScratchRegister scratch(allocator, masm);
748 
749     FailurePath* failure;
750     if (!addFailurePath(&failure))
751         return false;
752 
753     Address addr(stubAddress(reader.stubOffset()));
754     masm.loadPtr(addr, scratch);
755     masm.branchTestObjGroup(Assembler::NotEqual, obj, scratch, failure->label());
756     return true;
757 }
758 
759 bool
emitGuardProto()760 BaselineCacheIRCompiler::emitGuardProto()
761 {
762     Register obj = allocator.useRegister(masm, reader.objOperandId());
763     AutoScratchRegister scratch(allocator, masm);
764 
765     FailurePath* failure;
766     if (!addFailurePath(&failure))
767         return false;
768 
769     Address addr(stubAddress(reader.stubOffset()));
770     masm.loadObjProto(obj, scratch);
771     masm.branchPtr(Assembler::NotEqual, addr, scratch, failure->label());
772     return true;
773 }
774 
775 bool
emitGuardClass()776 BaselineCacheIRCompiler::emitGuardClass()
777 {
778     Register obj = allocator.useRegister(masm, reader.objOperandId());
779     AutoScratchRegister scratch(allocator, masm);
780 
781     FailurePath* failure;
782     if (!addFailurePath(&failure))
783         return false;
784 
785     const Class* clasp = nullptr;
786     switch (reader.guardClassKind()) {
787       case GuardClassKind::Array:
788         clasp = &ArrayObject::class_;
789         break;
790       case GuardClassKind::UnboxedArray:
791         clasp = &UnboxedArrayObject::class_;
792         break;
793       case GuardClassKind::MappedArguments:
794         clasp = &MappedArgumentsObject::class_;
795         break;
796       case GuardClassKind::UnmappedArguments:
797         clasp = &UnmappedArgumentsObject::class_;
798         break;
799     }
800 
801     MOZ_ASSERT(clasp);
802     masm.branchTestObjClass(Assembler::NotEqual, obj, scratch, clasp, failure->label());
803     return true;
804 }
805 
806 bool
emitGuardSpecificObject()807 BaselineCacheIRCompiler::emitGuardSpecificObject()
808 {
809     Register obj = allocator.useRegister(masm, reader.objOperandId());
810 
811     FailurePath* failure;
812     if (!addFailurePath(&failure))
813         return false;
814 
815     Address addr(stubAddress(reader.stubOffset()));
816     masm.branchPtr(Assembler::NotEqual, addr, obj, failure->label());
817     return true;
818 }
819 
820 bool
emitGuardNoUnboxedExpando()821 BaselineCacheIRCompiler::emitGuardNoUnboxedExpando()
822 {
823     Register obj = allocator.useRegister(masm, reader.objOperandId());
824 
825     FailurePath* failure;
826     if (!addFailurePath(&failure))
827         return false;
828 
829     Address expandoAddr(obj, UnboxedPlainObject::offsetOfExpando());
830     masm.branchPtr(Assembler::NotEqual, expandoAddr, ImmWord(0), failure->label());
831     return true;
832 }
833 
834 bool
emitGuardAndLoadUnboxedExpando()835 BaselineCacheIRCompiler::emitGuardAndLoadUnboxedExpando()
836 {
837     Register obj = allocator.useRegister(masm, reader.objOperandId());
838     Register output = allocator.defineRegister(masm, reader.objOperandId());
839 
840     FailurePath* failure;
841     if (!addFailurePath(&failure))
842         return false;
843 
844     Address expandoAddr(obj, UnboxedPlainObject::offsetOfExpando());
845     masm.loadPtr(expandoAddr, output);
846     masm.branchTestPtr(Assembler::Zero, output, output, failure->label());
847     return true;
848 }
849 
850 bool
emitLoadFixedSlotResult()851 BaselineCacheIRCompiler::emitLoadFixedSlotResult()
852 {
853     Register obj = allocator.useRegister(masm, reader.objOperandId());
854     AutoScratchRegister scratch(allocator, masm);
855 
856     masm.load32(stubAddress(reader.stubOffset()), scratch);
857     masm.loadValue(BaseIndex(obj, scratch, TimesOne), R0);
858     emitEnterTypeMonitorIC();
859     return true;
860 }
861 
862 bool
emitLoadDynamicSlotResult()863 BaselineCacheIRCompiler::emitLoadDynamicSlotResult()
864 {
865     Register obj = allocator.useRegister(masm, reader.objOperandId());
866     AutoScratchRegister scratch(allocator, masm);
867 
868     // We're about to return, so it's safe to clobber obj now.
869     masm.load32(stubAddress(reader.stubOffset()), scratch);
870     masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), obj);
871     masm.loadValue(BaseIndex(obj, scratch, TimesOne), R0);
872     emitEnterTypeMonitorIC();
873     return true;
874 }
875 
876 bool
emitLoadUnboxedPropertyResult()877 BaselineCacheIRCompiler::emitLoadUnboxedPropertyResult()
878 {
879     Register obj = allocator.useRegister(masm, reader.objOperandId());
880     AutoScratchRegister scratch(allocator, masm);
881 
882     JSValueType fieldType = reader.valueType();
883 
884     Address fieldOffset(stubAddress(reader.stubOffset()));
885     masm.load32(fieldOffset, scratch);
886     masm.loadUnboxedProperty(BaseIndex(obj, scratch, TimesOne), fieldType, R0);
887 
888     if (fieldType == JSVAL_TYPE_OBJECT)
889         emitEnterTypeMonitorIC();
890     else
891         emitReturnFromIC();
892 
893     return true;
894 }
895 
896 bool
emitGuardNoDetachedTypedObjects()897 BaselineCacheIRCompiler::emitGuardNoDetachedTypedObjects()
898 {
899     FailurePath* failure;
900     if (!addFailurePath(&failure))
901         return false;
902 
903     CheckForTypedObjectWithDetachedStorage(cx_, masm, failure->label());
904     return true;
905 }
906 
907 bool
emitLoadTypedObjectResult()908 BaselineCacheIRCompiler::emitLoadTypedObjectResult()
909 {
910     Register obj = allocator.useRegister(masm, reader.objOperandId());
911     AutoScratchRegister scratch1(allocator, masm);
912     AutoScratchRegister scratch2(allocator, masm);
913 
914     TypedThingLayout layout = reader.typedThingLayout();
915     uint32_t typeDescr = reader.typeDescrKey();
916     Address fieldOffset(stubAddress(reader.stubOffset()));
917 
918     // Get the object's data pointer.
919     LoadTypedThingData(masm, layout, obj, scratch1);
920 
921     // Get the address being written to.
922     masm.load32(fieldOffset, scratch2);
923     masm.addPtr(scratch2, scratch1);
924 
925     // Only monitor the result if the type produced by this stub might vary.
926     bool monitorLoad;
927     if (SimpleTypeDescrKeyIsScalar(typeDescr)) {
928         Scalar::Type type = ScalarTypeFromSimpleTypeDescrKey(typeDescr);
929         monitorLoad = type == Scalar::Uint32;
930 
931         masm.loadFromTypedArray(type, Address(scratch1, 0), R0, /* allowDouble = */ true,
932                                 scratch2, nullptr);
933     } else {
934         ReferenceTypeDescr::Type type = ReferenceTypeFromSimpleTypeDescrKey(typeDescr);
935         monitorLoad = type != ReferenceTypeDescr::TYPE_STRING;
936 
937         switch (type) {
938           case ReferenceTypeDescr::TYPE_ANY:
939             masm.loadValue(Address(scratch1, 0), R0);
940             break;
941 
942           case ReferenceTypeDescr::TYPE_OBJECT: {
943             Label notNull, done;
944             masm.loadPtr(Address(scratch1, 0), scratch1);
945             masm.branchTestPtr(Assembler::NonZero, scratch1, scratch1, &notNull);
946             masm.moveValue(NullValue(), R0);
947             masm.jump(&done);
948             masm.bind(&notNull);
949             masm.tagValue(JSVAL_TYPE_OBJECT, scratch1, R0);
950             masm.bind(&done);
951             break;
952           }
953 
954           case ReferenceTypeDescr::TYPE_STRING:
955             masm.loadPtr(Address(scratch1, 0), scratch1);
956             masm.tagValue(JSVAL_TYPE_STRING, scratch1, R0);
957             break;
958 
959           default:
960             MOZ_CRASH("Invalid ReferenceTypeDescr");
961         }
962     }
963 
964     if (monitorLoad)
965         emitEnterTypeMonitorIC();
966     else
967         emitReturnFromIC();
968     return true;
969 }
970 
971 bool
emitLoadUndefinedResult()972 BaselineCacheIRCompiler::emitLoadUndefinedResult()
973 {
974     masm.moveValue(UndefinedValue(), R0);
975 
976     // Normally for this op, the result would have to be monitored by TI.
977     // However, since this stub ALWAYS returns UndefinedValue(), and we can be sure
978     // that undefined is already registered with the type-set, this can be avoided.
979     emitReturnFromIC();
980     return true;
981 }
982 
983 bool
emitLoadInt32ArrayLengthResult()984 BaselineCacheIRCompiler::emitLoadInt32ArrayLengthResult()
985 {
986     Register obj = allocator.useRegister(masm, reader.objOperandId());
987     AutoScratchRegister scratch(allocator, masm);
988 
989     FailurePath* failure;
990     if (!addFailurePath(&failure))
991         return false;
992 
993     masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
994     masm.load32(Address(scratch, ObjectElements::offsetOfLength()), scratch);
995 
996     // Guard length fits in an int32.
997     masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label());
998     masm.tagValue(JSVAL_TYPE_INT32, scratch, R0);
999 
1000     // The int32 type was monitored when attaching the stub, so we can
1001     // just return.
1002     emitReturnFromIC();
1003     return true;
1004 }
1005 
1006 bool
emitLoadUnboxedArrayLengthResult()1007 BaselineCacheIRCompiler::emitLoadUnboxedArrayLengthResult()
1008 {
1009     Register obj = allocator.useRegister(masm, reader.objOperandId());
1010     masm.load32(Address(obj, UnboxedArrayObject::offsetOfLength()), R0.scratchReg());
1011     masm.tagValue(JSVAL_TYPE_INT32, R0.scratchReg(), R0);
1012 
1013     // The int32 type was monitored when attaching the stub, so we can
1014     // just return.
1015     emitReturnFromIC();
1016     return true;
1017 }
1018 
1019 bool
emitLoadArgumentsObjectLengthResult()1020 BaselineCacheIRCompiler::emitLoadArgumentsObjectLengthResult()
1021 {
1022     Register obj = allocator.useRegister(masm, reader.objOperandId());
1023     AutoScratchRegister scratch(allocator, masm);
1024 
1025     FailurePath* failure;
1026     if (!addFailurePath(&failure))
1027         return false;
1028 
1029     // Get initial length value.
1030     masm.unboxInt32(Address(obj, ArgumentsObject::getInitialLengthSlotOffset()), scratch);
1031 
1032     // Test if length has been overridden.
1033     masm.branchTest32(Assembler::NonZero,
1034                       scratch,
1035                       Imm32(ArgumentsObject::LENGTH_OVERRIDDEN_BIT),
1036                       failure->label());
1037 
1038     // Shift out arguments length and return it. No need to type monitor
1039     // because this stub always returns int32.
1040     masm.rshiftPtr(Imm32(ArgumentsObject::PACKED_BITS_COUNT), scratch);
1041     masm.tagValue(JSVAL_TYPE_INT32, scratch, R0);
1042     emitReturnFromIC();
1043     return true;
1044 }
1045 
1046 bool
emitLoadObject()1047 BaselineCacheIRCompiler::emitLoadObject()
1048 {
1049     Register reg = allocator.defineRegister(masm, reader.objOperandId());
1050     masm.loadPtr(stubAddress(reader.stubOffset()), reg);
1051     return true;
1052 }
1053 
1054 bool
emitLoadProto()1055 BaselineCacheIRCompiler::emitLoadProto()
1056 {
1057     Register obj = allocator.useRegister(masm, reader.objOperandId());
1058     Register reg = allocator.defineRegister(masm, reader.objOperandId());
1059     masm.loadObjProto(obj, reg);
1060     return true;
1061 }
1062 
1063 bool
init(CacheKind kind)1064 BaselineCacheIRCompiler::init(CacheKind kind)
1065 {
1066     size_t numInputs = writer_.numInputOperands();
1067     if (!allocator.init(ICStubCompiler::availableGeneralRegs(numInputs)))
1068         return false;
1069 
1070     MOZ_ASSERT(numInputs == 1);
1071     allocator.initInputLocation(0, R0);
1072 
1073     return true;
1074 }
1075 
1076 template <typename T>
1077 static GCPtr<T>*
AsGCPtr(uintptr_t * ptr)1078 AsGCPtr(uintptr_t* ptr)
1079 {
1080     return reinterpret_cast<GCPtr<T>*>(ptr);
1081 }
1082 
1083 template<class T>
1084 GCPtr<T>&
getStubField(ICStub * stub,uint32_t field) const1085 CacheIRStubInfo::getStubField(ICStub* stub, uint32_t field) const
1086 {
1087     uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
1088     MOZ_ASSERT(uintptr_t(stubData) % sizeof(uintptr_t) == 0);
1089 
1090     return *AsGCPtr<T>((uintptr_t*)stubData + field);
1091 }
1092 
1093 template GCPtr<Shape*>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
1094 template GCPtr<ObjectGroup*>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
1095 template GCPtr<JSObject*>& CacheIRStubInfo::getStubField(ICStub* stub, uint32_t offset) const;
1096 
1097 template <typename T>
1098 static void
InitGCPtr(uintptr_t * ptr,uintptr_t val)1099 InitGCPtr(uintptr_t* ptr, uintptr_t val)
1100 {
1101     AsGCPtr<T*>(ptr)->init((T*)val);
1102 }
1103 
1104 void
copyStubData(uint8_t * dest) const1105 CacheIRWriter::copyStubData(uint8_t* dest) const
1106 {
1107     uintptr_t* destWords = reinterpret_cast<uintptr_t*>(dest);
1108 
1109     for (size_t i = 0; i < stubFields_.length(); i++) {
1110         switch (stubFields_[i].gcType) {
1111           case StubField::GCType::NoGCThing:
1112             destWords[i] = stubFields_[i].word;
1113             continue;
1114           case StubField::GCType::Shape:
1115             InitGCPtr<Shape>(destWords + i, stubFields_[i].word);
1116             continue;
1117           case StubField::GCType::JSObject:
1118             InitGCPtr<JSObject>(destWords + i, stubFields_[i].word);
1119             continue;
1120           case StubField::GCType::ObjectGroup:
1121             InitGCPtr<ObjectGroup>(destWords + i, stubFields_[i].word);
1122             continue;
1123           case StubField::GCType::Limit:
1124             break;
1125         }
1126         MOZ_CRASH();
1127     }
1128 }
1129 
1130 HashNumber
hash(const CacheIRStubKey::Lookup & l)1131 CacheIRStubKey::hash(const CacheIRStubKey::Lookup& l)
1132 {
1133     HashNumber hash = mozilla::HashBytes(l.code, l.length);
1134     return mozilla::AddToHash(hash, uint32_t(l.kind));
1135 }
1136 
1137 bool
match(const CacheIRStubKey & entry,const CacheIRStubKey::Lookup & l)1138 CacheIRStubKey::match(const CacheIRStubKey& entry, const CacheIRStubKey::Lookup& l)
1139 {
1140     if (entry.stubInfo->kind() != l.kind)
1141         return false;
1142 
1143     if (entry.stubInfo->codeLength() != l.length)
1144         return false;
1145 
1146     if (!mozilla::PodEqual(entry.stubInfo->code(), l.code, l.length))
1147         return false;
1148 
1149     return true;
1150 }
1151 
CacheIRReader(const CacheIRStubInfo * stubInfo)1152 CacheIRReader::CacheIRReader(const CacheIRStubInfo* stubInfo)
1153   : CacheIRReader(stubInfo->code(), stubInfo->code() + stubInfo->codeLength())
1154 {}
1155 
1156 CacheIRStubInfo*
New(CacheKind kind,uint32_t stubDataOffset,const CacheIRWriter & writer)1157 CacheIRStubInfo::New(CacheKind kind, uint32_t stubDataOffset, const CacheIRWriter& writer)
1158 {
1159     size_t numStubFields = writer.numStubFields();
1160     size_t bytesNeeded = sizeof(CacheIRStubInfo) +
1161                          writer.codeLength() +
1162                          (numStubFields + 1); // +1 for the GCType::Limit terminator.
1163     uint8_t* p = js_pod_malloc<uint8_t>(bytesNeeded);
1164     if (!p)
1165         return nullptr;
1166 
1167     // Copy the CacheIR code.
1168     uint8_t* codeStart = p + sizeof(CacheIRStubInfo);
1169     mozilla::PodCopy(codeStart, writer.codeStart(), writer.codeLength());
1170 
1171     static_assert(uint32_t(StubField::GCType::Limit) <= UINT8_MAX,
1172                   "All StubField::GCTypes must fit in uint8_t");
1173 
1174     // Copy the GC types of the stub fields.
1175     uint8_t* gcTypes = codeStart + writer.codeLength();
1176     for (size_t i = 0; i < numStubFields; i++)
1177         gcTypes[i] = uint8_t(writer.stubFieldGCType(i));
1178     gcTypes[numStubFields] = uint8_t(StubField::GCType::Limit);
1179 
1180     return new(p) CacheIRStubInfo(kind, stubDataOffset, codeStart, writer.codeLength(), gcTypes);
1181 }
1182 
1183 static const size_t MaxOptimizedCacheIRStubs = 16;
1184 
1185 ICStub*
AttachBaselineCacheIRStub(JSContext * cx,const CacheIRWriter & writer,CacheKind kind,ICFallbackStub * stub)1186 jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer, CacheKind kind,
1187                                ICFallbackStub* stub)
1188 {
1189     // We shouldn't GC or report OOM (or any other exception) here.
1190     AutoAssertNoPendingException aanpe(cx);
1191     JS::AutoCheckCannotGC nogc;
1192 
1193     if (writer.failed())
1194         return nullptr;
1195 
1196     // Just a sanity check: the caller should ensure we don't attach an
1197     // unlimited number of stubs.
1198     MOZ_ASSERT(stub->numOptimizedStubs() < MaxOptimizedCacheIRStubs);
1199 
1200     MOZ_ASSERT(kind == CacheKind::GetProp);
1201     uint32_t stubDataOffset = sizeof(ICCacheIR_Monitored);
1202 
1203     JitCompartment* jitCompartment = cx->compartment()->jitCompartment();
1204 
1205     // Check if we already have JitCode for this stub.
1206     CacheIRStubInfo* stubInfo;
1207     CacheIRStubKey::Lookup lookup(kind, writer.codeStart(), writer.codeLength());
1208     JitCode* code = jitCompartment->getCacheIRStubCode(lookup, &stubInfo);
1209     if (!code) {
1210         // We have to generate stub code.
1211         JitContext jctx(cx, nullptr);
1212         BaselineCacheIRCompiler comp(cx, writer, stubDataOffset);
1213         if (!comp.init(kind))
1214             return nullptr;
1215 
1216         code = comp.compile();
1217         if (!code)
1218             return nullptr;
1219 
1220         // Allocate the shared CacheIRStubInfo. Note that the putCacheIRStubCode
1221         // call below will transfer ownership to the stub code HashMap, so we
1222         // don't have to worry about freeing it below.
1223         MOZ_ASSERT(!stubInfo);
1224         stubInfo = CacheIRStubInfo::New(kind, stubDataOffset, writer);
1225         if (!stubInfo)
1226             return nullptr;
1227 
1228         CacheIRStubKey key(stubInfo);
1229         if (!jitCompartment->putCacheIRStubCode(lookup, key, code))
1230             return nullptr;
1231     }
1232 
1233     // We got our shared stub code and stub info. Time to allocate and attach a
1234     // new stub.
1235 
1236     MOZ_ASSERT(code);
1237     MOZ_ASSERT(stubInfo);
1238     MOZ_ASSERT(stub->isMonitoredFallback());
1239 
1240     size_t bytesNeeded = stubInfo->stubDataOffset() + writer.stubDataSize();
1241 
1242     // For now, no stubs can make calls so they are all allocated in the
1243     // optimized stub space.
1244     void* newStub = cx->zone()->jitZone()->optimizedStubSpace()->alloc(bytesNeeded);
1245     if (!newStub)
1246         return nullptr;
1247 
1248     ICStub* monitorStub = stub->toMonitoredFallbackStub()->fallbackMonitorStub()->firstMonitorStub();
1249     new(newStub) ICCacheIR_Monitored(code, monitorStub, stubInfo);
1250 
1251     writer.copyStubData((uint8_t*)newStub + stubInfo->stubDataOffset());
1252     stub->addNewStub((ICStub*)newStub);
1253     return (ICStub*)newStub;
1254 }
1255 
1256 void
TraceBaselineCacheIRStub(JSTracer * trc,ICStub * stub,const CacheIRStubInfo * stubInfo)1257 jit::TraceBaselineCacheIRStub(JSTracer* trc, ICStub* stub, const CacheIRStubInfo* stubInfo)
1258 {
1259     uint32_t field = 0;
1260     while (true) {
1261         switch (stubInfo->gcType(field)) {
1262           case StubField::GCType::NoGCThing:
1263             break;
1264           case StubField::GCType::Shape:
1265             TraceNullableEdge(trc, &stubInfo->getStubField<Shape*>(stub, field),
1266                               "baseline-cacheir-shape");
1267             break;
1268           case StubField::GCType::ObjectGroup:
1269             TraceNullableEdge(trc, &stubInfo->getStubField<ObjectGroup*>(stub, field),
1270                               "baseline-cacheir-group");
1271             break;
1272           case StubField::GCType::JSObject:
1273             TraceNullableEdge(trc, &stubInfo->getStubField<JSObject*>(stub, field),
1274                               "baseline-cacheir-object");
1275             break;
1276           case StubField::GCType::Limit:
1277             return; // Done.
1278           default:
1279             MOZ_CRASH();
1280         }
1281         field++;
1282     }
1283 }
1284