1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/ScalarReplacement.h"
8 
9 #include "mozilla/Vector.h"
10 
11 #include "jit/IonAnalysis.h"
12 #include "jit/JitSpewer.h"
13 #include "jit/MIR.h"
14 #include "jit/MIRGenerator.h"
15 #include "jit/MIRGraph.h"
16 
17 #include "vm/JSObject-inl.h"
18 
19 namespace js {
20 namespace jit {
21 
22 template <typename MemoryView>
23 class EmulateStateOf {
24  private:
25   using BlockState = typename MemoryView::BlockState;
26 
27   MIRGenerator* mir_;
28   MIRGraph& graph_;
29 
30   // Block state at the entrance of all basic blocks.
31   Vector<BlockState*, 8, SystemAllocPolicy> states_;
32 
33  public:
EmulateStateOf(MIRGenerator * mir,MIRGraph & graph)34   EmulateStateOf(MIRGenerator* mir, MIRGraph& graph)
35       : mir_(mir), graph_(graph) {}
36 
37   bool run(MemoryView& view);
38 };
39 
40 template <typename MemoryView>
run(MemoryView & view)41 bool EmulateStateOf<MemoryView>::run(MemoryView& view) {
42   // Initialize the current block state of each block to an unknown state.
43   if (!states_.appendN(nullptr, graph_.numBlocks())) {
44     return false;
45   }
46 
47   // Initialize the first block which needs to be traversed in RPO.
48   MBasicBlock* startBlock = view.startingBlock();
49   if (!view.initStartingState(&states_[startBlock->id()])) {
50     return false;
51   }
52 
53   // Iterate over each basic block which has a valid entry state, and merge
54   // the state in the successor blocks.
55   for (ReversePostorderIterator block = graph_.rpoBegin(startBlock);
56        block != graph_.rpoEnd(); block++) {
57     if (mir_->shouldCancel(MemoryView::phaseName)) {
58       return false;
59     }
60 
61     // Get the block state as the result of the merge of all predecessors
62     // which have already been visited in RPO.  This means that backedges
63     // are not yet merged into the loop.
64     BlockState* state = states_[block->id()];
65     if (!state) {
66       continue;
67     }
68     view.setEntryBlockState(state);
69 
70     // Iterates over resume points, phi and instructions.
71     for (MNodeIterator iter(*block); iter;) {
72       // Increment the iterator before visiting the instruction, as the
73       // visit function might discard itself from the basic block.
74       MNode* ins = *iter++;
75       if (ins->isDefinition()) {
76         MDefinition* def = ins->toDefinition();
77         switch (def->op()) {
78 #define MIR_OP(op)                 \
79   case MDefinition::Opcode::op:    \
80     view.visit##op(def->to##op()); \
81     break;
82           MIR_OPCODE_LIST(MIR_OP)
83 #undef MIR_OP
84         }
85       } else {
86         view.visitResumePoint(ins->toResumePoint());
87       }
88       if (view.oom()) {
89         return false;
90       }
91     }
92 
93     // For each successor, merge the current state into the state of the
94     // successors.
95     for (size_t s = 0; s < block->numSuccessors(); s++) {
96       MBasicBlock* succ = block->getSuccessor(s);
97       if (!view.mergeIntoSuccessorState(*block, succ, &states_[succ->id()])) {
98         return false;
99       }
100     }
101   }
102 
103   states_.clear();
104   return true;
105 }
106 
107 static bool IsObjectEscaped(MInstruction* ins, JSObject* objDefault = nullptr);
108 
109 // Returns False if the lambda is not escaped and if it is optimizable by
110 // ScalarReplacementOfObject.
IsLambdaEscaped(MInstruction * lambda,JSObject * obj)111 static bool IsLambdaEscaped(MInstruction* lambda, JSObject* obj) {
112   MOZ_ASSERT(lambda->isLambda() || lambda->isLambdaArrow() ||
113              lambda->isFunctionWithProto());
114   JitSpewDef(JitSpew_Escape, "Check lambda\n", lambda);
115   JitSpewIndent spewIndent(JitSpew_Escape);
116 
117   // The scope chain is not escaped if none of the Lambdas which are
118   // capturing it are escaped.
119   for (MUseIterator i(lambda->usesBegin()); i != lambda->usesEnd(); i++) {
120     MNode* consumer = (*i)->consumer();
121     if (!consumer->isDefinition()) {
122       // Cannot optimize if it is observable from fun.arguments or others.
123       if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
124         JitSpew(JitSpew_Escape, "Observable lambda cannot be recovered");
125         return true;
126       }
127       continue;
128     }
129 
130     MDefinition* def = consumer->toDefinition();
131     if (!def->isFunctionEnvironment()) {
132       JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
133       return true;
134     }
135 
136     if (IsObjectEscaped(def->toInstruction(), obj)) {
137       JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
138       return true;
139     }
140   }
141   JitSpew(JitSpew_Escape, "Lambda is not escaped");
142   return false;
143 }
144 
IsOptimizableObjectInstruction(MInstruction * ins)145 static inline bool IsOptimizableObjectInstruction(MInstruction* ins) {
146   return ins->isNewObject() || ins->isCreateThisWithTemplate() ||
147          ins->isNewCallObject() || ins->isNewIterator();
148 }
149 
150 // Returns False if the object is not escaped and if it is optimizable by
151 // ScalarReplacementOfObject.
152 //
153 // For the moment, this code is dumb as it only supports objects which are not
154 // changing shape, and which are known by TI at the object creation.
IsObjectEscaped(MInstruction * ins,JSObject * objDefault)155 static bool IsObjectEscaped(MInstruction* ins, JSObject* objDefault) {
156   MOZ_ASSERT(ins->type() == MIRType::Object);
157   MOZ_ASSERT(IsOptimizableObjectInstruction(ins) || ins->isGuardShape() ||
158              ins->isGuardObjectGroup() || ins->isFunctionEnvironment());
159 
160   JitSpewDef(JitSpew_Escape, "Check object\n", ins);
161   JitSpewIndent spewIndent(JitSpew_Escape);
162 
163   JSObject* obj = objDefault;
164   if (!obj) {
165     obj = MObjectState::templateObjectOf(ins);
166   }
167 
168   if (!obj) {
169     JitSpew(JitSpew_Escape, "No template object defined.");
170     return true;
171   }
172 
173   // Check if the object is escaped. If the object is not the first argument
174   // of either a known Store / Load, then we consider it as escaped. This is a
175   // cheap and conservative escape analysis.
176   for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
177     MNode* consumer = (*i)->consumer();
178     if (!consumer->isDefinition()) {
179       // Cannot optimize if it is observable from fun.arguments or others.
180       if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
181         JitSpew(JitSpew_Escape, "Observable object cannot be recovered");
182         return true;
183       }
184       continue;
185     }
186 
187     MDefinition* def = consumer->toDefinition();
188     switch (def->op()) {
189       case MDefinition::Opcode::StoreFixedSlot:
190       case MDefinition::Opcode::LoadFixedSlot:
191         // Not escaped if it is the first argument.
192         if (def->indexOf(*i) == 0) {
193           break;
194         }
195 
196         JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
197         return true;
198 
199       case MDefinition::Opcode::PostWriteBarrier:
200         break;
201 
202       case MDefinition::Opcode::Slots: {
203 #ifdef DEBUG
204         // Assert that MSlots are only used by MStoreDynamicSlot and
205         // MLoadDynamicSlot.
206         MSlots* ins = def->toSlots();
207         MOZ_ASSERT(ins->object() != 0);
208         for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
209           // toDefinition should normally never fail, since they don't get
210           // captured by resume points.
211           MDefinition* def = (*i)->consumer()->toDefinition();
212           MOZ_ASSERT(def->op() == MDefinition::Opcode::StoreDynamicSlot ||
213                      def->op() == MDefinition::Opcode::LoadDynamicSlot);
214         }
215 #endif
216         break;
217       }
218 
219       case MDefinition::Opcode::GuardShape: {
220         MGuardShape* guard = def->toGuardShape();
221         MOZ_ASSERT(!ins->isGuardShape());
222         if (obj->shape() != guard->shape()) {
223           JitSpewDef(JitSpew_Escape, "has a non-matching guard shape\n", guard);
224           return true;
225         }
226         if (IsObjectEscaped(def->toInstruction(), obj)) {
227           JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
228           return true;
229         }
230         break;
231       }
232 
233       case MDefinition::Opcode::GuardObjectGroup: {
234         MGuardObjectGroup* guard = def->toGuardObjectGroup();
235         MOZ_ASSERT(!ins->isGuardObjectGroup());
236         if (obj->group() != guard->group()) {
237           JitSpewDef(JitSpew_Escape, "has a non-matching guard group\n", guard);
238           return true;
239         }
240         if (IsObjectEscaped(def->toInstruction(), obj)) {
241           JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
242           return true;
243         }
244         break;
245       }
246 
247       case MDefinition::Opcode::Lambda:
248       case MDefinition::Opcode::LambdaArrow:
249       case MDefinition::Opcode::FunctionWithProto: {
250         if (IsLambdaEscaped(def->toInstruction(), obj)) {
251           JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
252           return true;
253         }
254         break;
255       }
256 
257       // This instruction is a no-op used to verify that scalar replacement
258       // is working as expected in jit-test.
259       case MDefinition::Opcode::AssertRecoveredOnBailout:
260         break;
261 
262       default:
263         JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
264         return true;
265     }
266   }
267 
268   JitSpew(JitSpew_Escape, "Object is not escaped");
269   return false;
270 }
271 
272 class ObjectMemoryView : public MDefinitionVisitorDefaultNoop {
273  public:
274   using BlockState = MObjectState;
275   static const char phaseName[];
276 
277  private:
278   TempAllocator& alloc_;
279   MConstant* undefinedVal_;
280   MInstruction* obj_;
281   MBasicBlock* startBlock_;
282   BlockState* state_;
283 
284   // Used to improve the memory usage by sharing common modification.
285   const MResumePoint* lastResumePoint_;
286 
287   bool oom_;
288 
289  public:
290   ObjectMemoryView(TempAllocator& alloc, MInstruction* obj);
291 
292   MBasicBlock* startingBlock();
293   bool initStartingState(BlockState** pState);
294 
295   void setEntryBlockState(BlockState* state);
296   bool mergeIntoSuccessorState(MBasicBlock* curr, MBasicBlock* succ,
297                                BlockState** pSuccState);
298 
299 #ifdef DEBUG
300   void assertSuccess();
301 #else
assertSuccess()302   void assertSuccess() {}
303 #endif
304 
oom() const305   bool oom() const { return oom_; }
306 
307  public:
308   void visitResumePoint(MResumePoint* rp);
309   void visitObjectState(MObjectState* ins);
310   void visitStoreFixedSlot(MStoreFixedSlot* ins);
311   void visitLoadFixedSlot(MLoadFixedSlot* ins);
312   void visitPostWriteBarrier(MPostWriteBarrier* ins);
313   void visitStoreDynamicSlot(MStoreDynamicSlot* ins);
314   void visitLoadDynamicSlot(MLoadDynamicSlot* ins);
315   void visitGuardShape(MGuardShape* ins);
316   void visitGuardObjectGroup(MGuardObjectGroup* ins);
317   void visitFunctionEnvironment(MFunctionEnvironment* ins);
318   void visitLambda(MLambda* ins);
319   void visitLambdaArrow(MLambdaArrow* ins);
320   void visitFunctionWithProto(MFunctionWithProto* ins);
321 
322  private:
323   void visitObjectGuard(MInstruction* ins, MDefinition* operand);
324 };
325 
326 /* static */ const char ObjectMemoryView::phaseName[] =
327     "Scalar Replacement of Object";
328 
ObjectMemoryView(TempAllocator & alloc,MInstruction * obj)329 ObjectMemoryView::ObjectMemoryView(TempAllocator& alloc, MInstruction* obj)
330     : alloc_(alloc),
331       undefinedVal_(nullptr),
332       obj_(obj),
333       startBlock_(obj->block()),
334       state_(nullptr),
335       lastResumePoint_(nullptr),
336       oom_(false) {
337   // Annotate snapshots RValue such that we recover the store first.
338   obj_->setIncompleteObject();
339 
340   // Annotate the instruction such that we do not replace it by a
341   // Magic(JS_OPTIMIZED_OUT) in case of removed uses.
342   obj_->setImplicitlyUsedUnchecked();
343 }
344 
startingBlock()345 MBasicBlock* ObjectMemoryView::startingBlock() { return startBlock_; }
346 
initStartingState(BlockState ** pState)347 bool ObjectMemoryView::initStartingState(BlockState** pState) {
348   // Uninitialized slots have an "undefined" value.
349   undefinedVal_ = MConstant::New(alloc_, UndefinedValue());
350   startBlock_->insertBefore(obj_, undefinedVal_);
351 
352   // Create a new block state and insert at it at the location of the new
353   // object.
354   BlockState* state = BlockState::New(alloc_, obj_);
355   if (!state) {
356     return false;
357   }
358 
359   startBlock_->insertAfter(obj_, state);
360 
361   // Initialize the properties of the object state.
362   if (!state->initFromTemplateObject(alloc_, undefinedVal_)) {
363     return false;
364   }
365 
366   // Hold out of resume point until it is visited.
367   state->setInWorklist();
368 
369   *pState = state;
370   return true;
371 }
372 
setEntryBlockState(BlockState * state)373 void ObjectMemoryView::setEntryBlockState(BlockState* state) { state_ = state; }
374 
mergeIntoSuccessorState(MBasicBlock * curr,MBasicBlock * succ,BlockState ** pSuccState)375 bool ObjectMemoryView::mergeIntoSuccessorState(MBasicBlock* curr,
376                                                MBasicBlock* succ,
377                                                BlockState** pSuccState) {
378   BlockState* succState = *pSuccState;
379 
380   // When a block has no state yet, create an empty one for the
381   // successor.
382   if (!succState) {
383     // If the successor is not dominated then the object cannot flow
384     // in this basic block without a Phi.  We know that no Phi exist
385     // in non-dominated successors as the conservative escaped
386     // analysis fails otherwise.  Such condition can succeed if the
387     // successor is a join at the end of a if-block and the object
388     // only exists within the branch.
389     if (!startBlock_->dominates(succ)) {
390       return true;
391     }
392 
393     // If there is only one predecessor, carry over the last state of the
394     // block to the successor.  As the block state is immutable, if the
395     // current block has multiple successors, they will share the same entry
396     // state.
397     if (succ->numPredecessors() <= 1 || !state_->numSlots()) {
398       *pSuccState = state_;
399       return true;
400     }
401 
402     // If we have multiple predecessors, then we allocate one Phi node for
403     // each predecessor, and create a new block state which only has phi
404     // nodes.  These would later be removed by the removal of redundant phi
405     // nodes.
406     succState = BlockState::Copy(alloc_, state_);
407     if (!succState) {
408       return false;
409     }
410 
411     size_t numPreds = succ->numPredecessors();
412     for (size_t slot = 0; slot < state_->numSlots(); slot++) {
413       MPhi* phi = MPhi::New(alloc_.fallible());
414       if (!phi || !phi->reserveLength(numPreds)) {
415         return false;
416       }
417 
418       // Fill the input of the successors Phi with undefined
419       // values, and each block later fills the Phi inputs.
420       for (size_t p = 0; p < numPreds; p++) {
421         phi->addInput(undefinedVal_);
422       }
423 
424       // Add Phi in the list of Phis of the basic block.
425       succ->addPhi(phi);
426       succState->setSlot(slot, phi);
427     }
428 
429     // Insert the newly created block state instruction at the beginning
430     // of the successor block, after all the phi nodes.  Note that it
431     // would be captured by the entry resume point of the successor
432     // block.
433     succ->insertBefore(succ->safeInsertTop(), succState);
434     *pSuccState = succState;
435   }
436 
437   MOZ_ASSERT_IF(succ == startBlock_, startBlock_->isLoopHeader());
438   if (succ->numPredecessors() > 1 && succState->numSlots() &&
439       succ != startBlock_) {
440     // We need to re-compute successorWithPhis as the previous EliminatePhis
441     // phase might have removed all the Phis from the successor block.
442     size_t currIndex;
443     MOZ_ASSERT(!succ->phisEmpty());
444     if (curr->successorWithPhis()) {
445       MOZ_ASSERT(curr->successorWithPhis() == succ);
446       currIndex = curr->positionInPhiSuccessor();
447     } else {
448       currIndex = succ->indexForPredecessor(curr);
449       curr->setSuccessorWithPhis(succ, currIndex);
450     }
451     MOZ_ASSERT(succ->getPredecessor(currIndex) == curr);
452 
453     // Copy the current slot states to the index of current block in all the
454     // Phi created during the first visit of the successor.
455     for (size_t slot = 0; slot < state_->numSlots(); slot++) {
456       MPhi* phi = succState->getSlot(slot)->toPhi();
457       phi->replaceOperand(currIndex, state_->getSlot(slot));
458     }
459   }
460 
461   return true;
462 }
463 
464 #ifdef DEBUG
assertSuccess()465 void ObjectMemoryView::assertSuccess() {
466   for (MUseIterator i(obj_->usesBegin()); i != obj_->usesEnd(); i++) {
467     MNode* ins = (*i)->consumer();
468     MDefinition* def = nullptr;
469 
470     // Resume points have been replaced by the object state.
471     if (ins->isResumePoint() ||
472         (def = ins->toDefinition())->isRecoveredOnBailout()) {
473       MOZ_ASSERT(obj_->isIncompleteObject());
474       continue;
475     }
476 
477     // The only remaining uses would be removed by DCE, which will also
478     // recover the object on bailouts.
479     MOZ_ASSERT(def->isSlots() || def->isLambda() || def->isLambdaArrow() ||
480                def->isFunctionWithProto());
481     MOZ_ASSERT(!def->hasDefUses());
482   }
483 }
484 #endif
485 
visitResumePoint(MResumePoint * rp)486 void ObjectMemoryView::visitResumePoint(MResumePoint* rp) {
487   // As long as the MObjectState is not yet seen next to the allocation, we do
488   // not patch the resume point to recover the side effects.
489   if (!state_->isInWorklist()) {
490     rp->addStore(alloc_, state_, lastResumePoint_);
491     lastResumePoint_ = rp;
492   }
493 }
494 
visitObjectState(MObjectState * ins)495 void ObjectMemoryView::visitObjectState(MObjectState* ins) {
496   if (ins->isInWorklist()) {
497     ins->setNotInWorklist();
498   }
499 }
500 
visitStoreFixedSlot(MStoreFixedSlot * ins)501 void ObjectMemoryView::visitStoreFixedSlot(MStoreFixedSlot* ins) {
502   // Skip stores made on other objects.
503   if (ins->object() != obj_) {
504     return;
505   }
506 
507   // Clone the state and update the slot value.
508   if (state_->hasFixedSlot(ins->slot())) {
509     state_ = BlockState::Copy(alloc_, state_);
510     if (!state_) {
511       oom_ = true;
512       return;
513     }
514 
515     state_->setFixedSlot(ins->slot(), ins->value());
516     ins->block()->insertBefore(ins->toInstruction(), state_);
517   } else {
518     // UnsafeSetReserveSlot can access baked-in slots which are guarded by
519     // conditions, which are not seen by the escape analysis.
520     MBail* bailout = MBail::New(alloc_, Bailout_Inevitable);
521     ins->block()->insertBefore(ins, bailout);
522   }
523 
524   // Remove original instruction.
525   ins->block()->discard(ins);
526 }
527 
visitLoadFixedSlot(MLoadFixedSlot * ins)528 void ObjectMemoryView::visitLoadFixedSlot(MLoadFixedSlot* ins) {
529   // Skip loads made on other objects.
530   if (ins->object() != obj_) {
531     return;
532   }
533 
534   // Replace load by the slot value.
535   if (state_->hasFixedSlot(ins->slot())) {
536     ins->replaceAllUsesWith(state_->getFixedSlot(ins->slot()));
537   } else {
538     // UnsafeGetReserveSlot can access baked-in slots which are guarded by
539     // conditions, which are not seen by the escape analysis.
540     MBail* bailout = MBail::New(alloc_, Bailout_Inevitable);
541     ins->block()->insertBefore(ins, bailout);
542     ins->replaceAllUsesWith(undefinedVal_);
543   }
544 
545   // Remove original instruction.
546   ins->block()->discard(ins);
547 }
548 
visitPostWriteBarrier(MPostWriteBarrier * ins)549 void ObjectMemoryView::visitPostWriteBarrier(MPostWriteBarrier* ins) {
550   // Skip loads made on other objects.
551   if (ins->object() != obj_) {
552     return;
553   }
554 
555   // Remove original instruction.
556   ins->block()->discard(ins);
557 }
558 
visitStoreDynamicSlot(MStoreDynamicSlot * ins)559 void ObjectMemoryView::visitStoreDynamicSlot(MStoreDynamicSlot* ins) {
560   // Skip stores made on other objects.
561   MSlots* slots = ins->slots()->toSlots();
562   if (slots->object() != obj_) {
563     // Guard objects are replaced when they are visited.
564     MOZ_ASSERT(!slots->object()->isGuardShape() ||
565                slots->object()->toGuardShape()->object() != obj_);
566     return;
567   }
568 
569   // Clone the state and update the slot value.
570   if (state_->hasDynamicSlot(ins->slot())) {
571     state_ = BlockState::Copy(alloc_, state_);
572     if (!state_) {
573       oom_ = true;
574       return;
575     }
576 
577     state_->setDynamicSlot(ins->slot(), ins->value());
578     ins->block()->insertBefore(ins->toInstruction(), state_);
579   } else {
580     // UnsafeSetReserveSlot can access baked-in slots which are guarded by
581     // conditions, which are not seen by the escape analysis.
582     MBail* bailout = MBail::New(alloc_, Bailout_Inevitable);
583     ins->block()->insertBefore(ins, bailout);
584   }
585 
586   // Remove original instruction.
587   ins->block()->discard(ins);
588 }
589 
visitLoadDynamicSlot(MLoadDynamicSlot * ins)590 void ObjectMemoryView::visitLoadDynamicSlot(MLoadDynamicSlot* ins) {
591   // Skip loads made on other objects.
592   MSlots* slots = ins->slots()->toSlots();
593   if (slots->object() != obj_) {
594     // Guard objects are replaced when they are visited.
595     MOZ_ASSERT(!slots->object()->isGuardShape() ||
596                slots->object()->toGuardShape()->object() != obj_);
597     return;
598   }
599 
600   // Replace load by the slot value.
601   if (state_->hasDynamicSlot(ins->slot())) {
602     ins->replaceAllUsesWith(state_->getDynamicSlot(ins->slot()));
603   } else {
604     // UnsafeGetReserveSlot can access baked-in slots which are guarded by
605     // conditions, which are not seen by the escape analysis.
606     MBail* bailout = MBail::New(alloc_, Bailout_Inevitable);
607     ins->block()->insertBefore(ins, bailout);
608     ins->replaceAllUsesWith(undefinedVal_);
609   }
610 
611   // Remove original instruction.
612   ins->block()->discard(ins);
613 }
614 
visitObjectGuard(MInstruction * ins,MDefinition * operand)615 void ObjectMemoryView::visitObjectGuard(MInstruction* ins,
616                                         MDefinition* operand) {
617   MOZ_ASSERT(ins->numOperands() == 1);
618   MOZ_ASSERT(ins->getOperand(0) == operand);
619   MOZ_ASSERT(ins->type() == MIRType::Object);
620 
621   // Skip guards on other objects.
622   if (operand != obj_) {
623     return;
624   }
625 
626   // Replace the guard by its object.
627   ins->replaceAllUsesWith(obj_);
628 
629   // Remove original instruction.
630   ins->block()->discard(ins);
631 }
632 
visitGuardShape(MGuardShape * ins)633 void ObjectMemoryView::visitGuardShape(MGuardShape* ins) {
634   visitObjectGuard(ins, ins->object());
635 }
636 
visitGuardObjectGroup(MGuardObjectGroup * ins)637 void ObjectMemoryView::visitGuardObjectGroup(MGuardObjectGroup* ins) {
638   visitObjectGuard(ins, ins->object());
639 }
640 
visitFunctionEnvironment(MFunctionEnvironment * ins)641 void ObjectMemoryView::visitFunctionEnvironment(MFunctionEnvironment* ins) {
642   // Skip function environment which are not aliases of the NewCallObject.
643   MDefinition* input = ins->input();
644   if (input->isLambda()) {
645     if (input->toLambda()->environmentChain() != obj_) {
646       return;
647     }
648   } else if (input->isLambdaArrow()) {
649     if (input->toLambdaArrow()->environmentChain() != obj_) {
650       return;
651     }
652   } else if (input->isFunctionWithProto()) {
653     if (input->toFunctionWithProto()->environmentChain() != obj_) {
654       return;
655     }
656   } else {
657     return;
658   }
659 
660   // Replace the function environment by the scope chain of the lambda.
661   ins->replaceAllUsesWith(obj_);
662 
663   // Remove original instruction.
664   ins->block()->discard(ins);
665 }
666 
visitLambda(MLambda * ins)667 void ObjectMemoryView::visitLambda(MLambda* ins) {
668   if (ins->environmentChain() != obj_) {
669     return;
670   }
671 
672   // In order to recover the lambda we need to recover the scope chain, as the
673   // lambda is holding it.
674   ins->setIncompleteObject();
675 }
676 
visitLambdaArrow(MLambdaArrow * ins)677 void ObjectMemoryView::visitLambdaArrow(MLambdaArrow* ins) {
678   if (ins->environmentChain() != obj_) {
679     return;
680   }
681 
682   ins->setIncompleteObject();
683 }
684 
visitFunctionWithProto(MFunctionWithProto * ins)685 void ObjectMemoryView::visitFunctionWithProto(MFunctionWithProto* ins) {
686   if (ins->environmentChain() != obj_) {
687     return;
688   }
689 
690   ins->setIncompleteObject();
691 }
692 
IndexOf(MDefinition * ins,int32_t * res)693 static bool IndexOf(MDefinition* ins, int32_t* res) {
694   MOZ_ASSERT(ins->isLoadElement() || ins->isStoreElement());
695   MDefinition* indexDef = ins->getOperand(1);  // ins->index();
696   if (indexDef->isSpectreMaskIndex()) {
697     indexDef = indexDef->toSpectreMaskIndex()->index();
698   }
699   if (indexDef->isBoundsCheck()) {
700     indexDef = indexDef->toBoundsCheck()->index();
701   }
702   if (indexDef->isToNumberInt32()) {
703     indexDef = indexDef->toToNumberInt32()->getOperand(0);
704   }
705   MConstant* indexDefConst = indexDef->maybeConstantValue();
706   if (!indexDefConst || indexDefConst->type() != MIRType::Int32) {
707     return false;
708   }
709   *res = indexDefConst->toInt32();
710   return true;
711 }
712 
713 // Returns False if the elements is not escaped and if it is optimizable by
714 // ScalarReplacementOfArray.
IsElementEscaped(MDefinition * def,uint32_t arraySize)715 static bool IsElementEscaped(MDefinition* def, uint32_t arraySize) {
716   MOZ_ASSERT(def->isElements() || def->isConvertElementsToDoubles());
717 
718   JitSpewDef(JitSpew_Escape, "Check elements\n", def);
719   JitSpewIndent spewIndent(JitSpew_Escape);
720 
721   for (MUseIterator i(def->usesBegin()); i != def->usesEnd(); i++) {
722     // The MIRType::Elements cannot be captured in a resume point as
723     // it does not represent a value allocation.
724     MDefinition* access = (*i)->consumer()->toDefinition();
725 
726     switch (access->op()) {
727       case MDefinition::Opcode::LoadElement: {
728         MOZ_ASSERT(access->toLoadElement()->elements() == def);
729 
730         // If we need hole checks, then the array cannot be escaped
731         // as the array might refer to the prototype chain to look
732         // for properties, thus it might do additional side-effects
733         // which are not reflected by the alias set, is we are
734         // bailing on holes.
735         if (access->toLoadElement()->needsHoleCheck()) {
736           JitSpewDef(JitSpew_Escape, "has a load element with a hole check\n",
737                      access);
738           return true;
739         }
740 
741         // If the index is not a constant then this index can alias
742         // all others. We do not handle this case.
743         int32_t index;
744         if (!IndexOf(access, &index)) {
745           JitSpewDef(JitSpew_Escape,
746                      "has a load element with a non-trivial index\n", access);
747           return true;
748         }
749         if (index < 0 || arraySize <= uint32_t(index)) {
750           JitSpewDef(JitSpew_Escape,
751                      "has a load element with an out-of-bound index\n", access);
752           return true;
753         }
754         break;
755       }
756 
757       case MDefinition::Opcode::StoreElement: {
758         MOZ_ASSERT(access->toStoreElement()->elements() == def);
759 
760         // If we need hole checks, then the array cannot be escaped
761         // as the array might refer to the prototype chain to look
762         // for properties, thus it might do additional side-effects
763         // which are not reflected by the alias set, is we are
764         // bailing on holes.
765         if (access->toStoreElement()->needsHoleCheck()) {
766           JitSpewDef(JitSpew_Escape, "has a store element with a hole check\n",
767                      access);
768           return true;
769         }
770 
771         // If the index is not a constant then this index can alias
772         // all others. We do not handle this case.
773         int32_t index;
774         if (!IndexOf(access, &index)) {
775           JitSpewDef(JitSpew_Escape,
776                      "has a store element with a non-trivial index\n", access);
777           return true;
778         }
779         if (index < 0 || arraySize <= uint32_t(index)) {
780           JitSpewDef(JitSpew_Escape,
781                      "has a store element with an out-of-bound index\n",
782                      access);
783           return true;
784         }
785 
786         // We are not yet encoding magic hole constants in resume points.
787         if (access->toStoreElement()->value()->type() == MIRType::MagicHole) {
788           JitSpewDef(JitSpew_Escape,
789                      "has a store element with an magic-hole constant\n",
790                      access);
791           return true;
792         }
793         break;
794       }
795 
796       case MDefinition::Opcode::SetInitializedLength:
797         MOZ_ASSERT(access->toSetInitializedLength()->elements() == def);
798         break;
799 
800       case MDefinition::Opcode::InitializedLength:
801         MOZ_ASSERT(access->toInitializedLength()->elements() == def);
802         break;
803 
804       case MDefinition::Opcode::ArrayLength:
805         MOZ_ASSERT(access->toArrayLength()->elements() == def);
806         break;
807 
808       case MDefinition::Opcode::ConvertElementsToDoubles:
809         MOZ_ASSERT(access->toConvertElementsToDoubles()->elements() == def);
810         if (IsElementEscaped(access, arraySize)) {
811           JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", access);
812           return true;
813         }
814         break;
815 
816       default:
817         JitSpewDef(JitSpew_Escape, "is escaped by\n", access);
818         return true;
819     }
820   }
821   JitSpew(JitSpew_Escape, "Elements is not escaped");
822   return false;
823 }
824 
IsOptimizableArrayInstruction(MInstruction * ins)825 static inline bool IsOptimizableArrayInstruction(MInstruction* ins) {
826   return ins->isNewArray() || ins->isNewArrayCopyOnWrite();
827 }
828 
829 // Returns False if the array is not escaped and if it is optimizable by
830 // ScalarReplacementOfArray.
831 //
832 // For the moment, this code is dumb as it only supports arrays which are not
833 // changing length, with only access with known constants.
IsArrayEscaped(MInstruction * ins,MInstruction * newArray)834 static bool IsArrayEscaped(MInstruction* ins, MInstruction* newArray) {
835   MOZ_ASSERT(ins->type() == MIRType::Object);
836   MOZ_ASSERT(IsOptimizableArrayInstruction(ins) ||
837              ins->isMaybeCopyElementsForWrite());
838   MOZ_ASSERT(IsOptimizableArrayInstruction(newArray));
839 
840   JitSpewDef(JitSpew_Escape, "Check array\n", ins);
841   JitSpewIndent spewIndent(JitSpew_Escape);
842 
843   uint32_t length;
844   if (newArray->isNewArray()) {
845     if (!newArray->toNewArray()->templateObject()) {
846       JitSpew(JitSpew_Escape, "No template object defined.");
847       return true;
848     }
849 
850     length = newArray->toNewArray()->length();
851   } else {
852     length = newArray->toNewArrayCopyOnWrite()->templateObject()->length();
853   }
854 
855   if (length >= 16) {
856     JitSpew(JitSpew_Escape, "Array has too many elements");
857     return true;
858   }
859 
860   // Check if the object is escaped. If the object is not the first argument
861   // of either a known Store / Load, then we consider it as escaped. This is a
862   // cheap and conservative escape analysis.
863   for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
864     MNode* consumer = (*i)->consumer();
865     if (!consumer->isDefinition()) {
866       // Cannot optimize if it is observable from fun.arguments or others.
867       if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
868         JitSpew(JitSpew_Escape, "Observable array cannot be recovered");
869         return true;
870       }
871       continue;
872     }
873 
874     MDefinition* def = consumer->toDefinition();
875     switch (def->op()) {
876       case MDefinition::Opcode::Elements: {
877         MElements* elem = def->toElements();
878         MOZ_ASSERT(elem->object() == ins);
879         if (IsElementEscaped(elem, length)) {
880           JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", elem);
881           return true;
882         }
883 
884         break;
885       }
886 
887       case MDefinition::Opcode::MaybeCopyElementsForWrite: {
888         MMaybeCopyElementsForWrite* copied = def->toMaybeCopyElementsForWrite();
889         MOZ_ASSERT(copied->object() == ins);
890         if (IsArrayEscaped(copied, ins)) {
891           JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", copied);
892           return true;
893         }
894         break;
895       }
896 
897       // This instruction is a no-op used to verify that scalar replacement
898       // is working as expected in jit-test.
899       case MDefinition::Opcode::AssertRecoveredOnBailout:
900         break;
901 
902       default:
903         JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
904         return true;
905     }
906   }
907 
908   JitSpew(JitSpew_Escape, "Array is not escaped");
909   return false;
910 }
911 
912 // This class replaces every MStoreElement and MSetInitializedLength by an
913 // MArrayState which emulates the content of the array. All MLoadElement,
914 // MInitializedLength and MArrayLength are replaced by the corresponding value.
915 //
916 // In order to restore the value of the array correctly in case of bailouts, we
917 // replace all reference of the allocation by the MArrayState definition.
918 class ArrayMemoryView : public MDefinitionVisitorDefaultNoop {
919  public:
920   using BlockState = MArrayState;
921   static const char* phaseName;
922 
923  private:
924   TempAllocator& alloc_;
925   MConstant* undefinedVal_;
926   MConstant* length_;
927   MInstruction* arr_;
928   MBasicBlock* startBlock_;
929   BlockState* state_;
930 
931   // Used to improve the memory usage by sharing common modification.
932   const MResumePoint* lastResumePoint_;
933 
934   bool oom_;
935 
936  public:
937   ArrayMemoryView(TempAllocator& alloc, MInstruction* arr);
938 
939   MBasicBlock* startingBlock();
940   bool initStartingState(BlockState** pState);
941 
942   void setEntryBlockState(BlockState* state);
943   bool mergeIntoSuccessorState(MBasicBlock* curr, MBasicBlock* succ,
944                                BlockState** pSuccState);
945 
946 #ifdef DEBUG
947   void assertSuccess();
948 #else
assertSuccess()949   void assertSuccess() {}
950 #endif
951 
oom() const952   bool oom() const { return oom_; }
953 
954  private:
955   bool isArrayStateElements(MDefinition* elements);
956   void discardInstruction(MInstruction* ins, MDefinition* elements);
957 
958  public:
959   void visitResumePoint(MResumePoint* rp);
960   void visitArrayState(MArrayState* ins);
961   void visitStoreElement(MStoreElement* ins);
962   void visitLoadElement(MLoadElement* ins);
963   void visitSetInitializedLength(MSetInitializedLength* ins);
964   void visitInitializedLength(MInitializedLength* ins);
965   void visitArrayLength(MArrayLength* ins);
966   void visitMaybeCopyElementsForWrite(MMaybeCopyElementsForWrite* ins);
967   void visitConvertElementsToDoubles(MConvertElementsToDoubles* ins);
968 };
969 
970 const char* ArrayMemoryView::phaseName = "Scalar Replacement of Array";
971 
ArrayMemoryView(TempAllocator & alloc,MInstruction * arr)972 ArrayMemoryView::ArrayMemoryView(TempAllocator& alloc, MInstruction* arr)
973     : alloc_(alloc),
974       undefinedVal_(nullptr),
975       length_(nullptr),
976       arr_(arr),
977       startBlock_(arr->block()),
978       state_(nullptr),
979       lastResumePoint_(nullptr),
980       oom_(false) {
981   // Annotate snapshots RValue such that we recover the store first.
982   arr_->setIncompleteObject();
983 
984   // Annotate the instruction such that we do not replace it by a
985   // Magic(JS_OPTIMIZED_OUT) in case of removed uses.
986   arr_->setImplicitlyUsedUnchecked();
987 }
988 
startingBlock()989 MBasicBlock* ArrayMemoryView::startingBlock() { return startBlock_; }
990 
initStartingState(BlockState ** pState)991 bool ArrayMemoryView::initStartingState(BlockState** pState) {
992   // Uninitialized elements have an "undefined" value.
993   undefinedVal_ = MConstant::New(alloc_, UndefinedValue());
994   MConstant* initLength = MConstant::New(
995       alloc_, Int32Value(arr_->isNewArrayCopyOnWrite()
996                              ? arr_->toNewArrayCopyOnWrite()->length()
997                              : 0));
998   arr_->block()->insertBefore(arr_, undefinedVal_);
999   arr_->block()->insertBefore(arr_, initLength);
1000 
1001   // Create a new block state and insert at it at the location of the new array.
1002   BlockState* state = BlockState::New(alloc_, arr_, initLength);
1003   if (!state) {
1004     return false;
1005   }
1006 
1007   startBlock_->insertAfter(arr_, state);
1008 
1009   // Initialize the elements of the array state.
1010   if (!state->initFromTemplateObject(alloc_, undefinedVal_)) {
1011     return false;
1012   }
1013 
1014   // Hold out of resume point until it is visited.
1015   state->setInWorklist();
1016 
1017   *pState = state;
1018   return true;
1019 }
1020 
setEntryBlockState(BlockState * state)1021 void ArrayMemoryView::setEntryBlockState(BlockState* state) { state_ = state; }
1022 
mergeIntoSuccessorState(MBasicBlock * curr,MBasicBlock * succ,BlockState ** pSuccState)1023 bool ArrayMemoryView::mergeIntoSuccessorState(MBasicBlock* curr,
1024                                               MBasicBlock* succ,
1025                                               BlockState** pSuccState) {
1026   BlockState* succState = *pSuccState;
1027 
1028   // When a block has no state yet, create an empty one for the
1029   // successor.
1030   if (!succState) {
1031     // If the successor is not dominated then the array cannot flow
1032     // in this basic block without a Phi.  We know that no Phi exist
1033     // in non-dominated successors as the conservative escaped
1034     // analysis fails otherwise.  Such condition can succeed if the
1035     // successor is a join at the end of a if-block and the array
1036     // only exists within the branch.
1037     if (!startBlock_->dominates(succ)) {
1038       return true;
1039     }
1040 
1041     // If there is only one predecessor, carry over the last state of the
1042     // block to the successor.  As the block state is immutable, if the
1043     // current block has multiple successors, they will share the same entry
1044     // state.
1045     if (succ->numPredecessors() <= 1 || !state_->numElements()) {
1046       *pSuccState = state_;
1047       return true;
1048     }
1049 
1050     // If we have multiple predecessors, then we allocate one Phi node for
1051     // each predecessor, and create a new block state which only has phi
1052     // nodes.  These would later be removed by the removal of redundant phi
1053     // nodes.
1054     succState = BlockState::Copy(alloc_, state_);
1055     if (!succState) {
1056       return false;
1057     }
1058 
1059     size_t numPreds = succ->numPredecessors();
1060     for (size_t index = 0; index < state_->numElements(); index++) {
1061       MPhi* phi = MPhi::New(alloc_.fallible());
1062       if (!phi || !phi->reserveLength(numPreds)) {
1063         return false;
1064       }
1065 
1066       // Fill the input of the successors Phi with undefined
1067       // values, and each block later fills the Phi inputs.
1068       for (size_t p = 0; p < numPreds; p++) {
1069         phi->addInput(undefinedVal_);
1070       }
1071 
1072       // Add Phi in the list of Phis of the basic block.
1073       succ->addPhi(phi);
1074       succState->setElement(index, phi);
1075     }
1076 
1077     // Insert the newly created block state instruction at the beginning
1078     // of the successor block, after all the phi nodes.  Note that it
1079     // would be captured by the entry resume point of the successor
1080     // block.
1081     succ->insertBefore(succ->safeInsertTop(), succState);
1082     *pSuccState = succState;
1083   }
1084 
1085   MOZ_ASSERT_IF(succ == startBlock_, startBlock_->isLoopHeader());
1086   if (succ->numPredecessors() > 1 && succState->numElements() &&
1087       succ != startBlock_) {
1088     // We need to re-compute successorWithPhis as the previous EliminatePhis
1089     // phase might have removed all the Phis from the successor block.
1090     size_t currIndex;
1091     MOZ_ASSERT(!succ->phisEmpty());
1092     if (curr->successorWithPhis()) {
1093       MOZ_ASSERT(curr->successorWithPhis() == succ);
1094       currIndex = curr->positionInPhiSuccessor();
1095     } else {
1096       currIndex = succ->indexForPredecessor(curr);
1097       curr->setSuccessorWithPhis(succ, currIndex);
1098     }
1099     MOZ_ASSERT(succ->getPredecessor(currIndex) == curr);
1100 
1101     // Copy the current element states to the index of current block in all
1102     // the Phi created during the first visit of the successor.
1103     for (size_t index = 0; index < state_->numElements(); index++) {
1104       MPhi* phi = succState->getElement(index)->toPhi();
1105       phi->replaceOperand(currIndex, state_->getElement(index));
1106     }
1107   }
1108 
1109   return true;
1110 }
1111 
1112 #ifdef DEBUG
assertSuccess()1113 void ArrayMemoryView::assertSuccess() { MOZ_ASSERT(!arr_->hasLiveDefUses()); }
1114 #endif
1115 
visitResumePoint(MResumePoint * rp)1116 void ArrayMemoryView::visitResumePoint(MResumePoint* rp) {
1117   // As long as the MArrayState is not yet seen next to the allocation, we do
1118   // not patch the resume point to recover the side effects.
1119   if (!state_->isInWorklist()) {
1120     rp->addStore(alloc_, state_, lastResumePoint_);
1121     lastResumePoint_ = rp;
1122   }
1123 }
1124 
visitArrayState(MArrayState * ins)1125 void ArrayMemoryView::visitArrayState(MArrayState* ins) {
1126   if (ins->isInWorklist()) {
1127     ins->setNotInWorklist();
1128   }
1129 }
1130 
isArrayStateElements(MDefinition * elements)1131 bool ArrayMemoryView::isArrayStateElements(MDefinition* elements) {
1132   return elements->isElements() && elements->toElements()->object() == arr_;
1133 }
1134 
discardInstruction(MInstruction * ins,MDefinition * elements)1135 void ArrayMemoryView::discardInstruction(MInstruction* ins,
1136                                          MDefinition* elements) {
1137   MOZ_ASSERT(elements->isElements());
1138   ins->block()->discard(ins);
1139   if (!elements->hasLiveDefUses()) {
1140     elements->block()->discard(elements->toInstruction());
1141   }
1142 }
1143 
visitStoreElement(MStoreElement * ins)1144 void ArrayMemoryView::visitStoreElement(MStoreElement* ins) {
1145   // Skip other array objects.
1146   MDefinition* elements = ins->elements();
1147   if (!isArrayStateElements(elements)) {
1148     return;
1149   }
1150 
1151   // Register value of the setter in the state.
1152   int32_t index;
1153   MOZ_ALWAYS_TRUE(IndexOf(ins, &index));
1154   state_ = BlockState::Copy(alloc_, state_);
1155   if (!state_) {
1156     oom_ = true;
1157     return;
1158   }
1159 
1160   state_->setElement(index, ins->value());
1161   ins->block()->insertBefore(ins, state_);
1162 
1163   // Remove original instruction.
1164   discardInstruction(ins, elements);
1165 }
1166 
visitLoadElement(MLoadElement * ins)1167 void ArrayMemoryView::visitLoadElement(MLoadElement* ins) {
1168   // Skip other array objects.
1169   MDefinition* elements = ins->elements();
1170   if (!isArrayStateElements(elements)) {
1171     return;
1172   }
1173 
1174   // Replace by the value contained at the index.
1175   int32_t index;
1176   MOZ_ALWAYS_TRUE(IndexOf(ins, &index));
1177   ins->replaceAllUsesWith(state_->getElement(index));
1178 
1179   // Remove original instruction.
1180   discardInstruction(ins, elements);
1181 }
1182 
visitSetInitializedLength(MSetInitializedLength * ins)1183 void ArrayMemoryView::visitSetInitializedLength(MSetInitializedLength* ins) {
1184   // Skip other array objects.
1185   MDefinition* elements = ins->elements();
1186   if (!isArrayStateElements(elements)) {
1187     return;
1188   }
1189 
1190   // Replace by the new initialized length.  Note that the argument of
1191   // MSetInitalizedLength is the last index and not the initialized length.
1192   // To obtain the length, we need to add 1 to it, and thus we need to create
1193   // a new constant that we register in the ArrayState.
1194   state_ = BlockState::Copy(alloc_, state_);
1195   if (!state_) {
1196     oom_ = true;
1197     return;
1198   }
1199 
1200   int32_t initLengthValue = ins->index()->maybeConstantValue()->toInt32() + 1;
1201   MConstant* initLength = MConstant::New(alloc_, Int32Value(initLengthValue));
1202   ins->block()->insertBefore(ins, initLength);
1203   ins->block()->insertBefore(ins, state_);
1204   state_->setInitializedLength(initLength);
1205 
1206   // Remove original instruction.
1207   discardInstruction(ins, elements);
1208 }
1209 
visitInitializedLength(MInitializedLength * ins)1210 void ArrayMemoryView::visitInitializedLength(MInitializedLength* ins) {
1211   // Skip other array objects.
1212   MDefinition* elements = ins->elements();
1213   if (!isArrayStateElements(elements)) {
1214     return;
1215   }
1216 
1217   // Replace by the value of the length.
1218   ins->replaceAllUsesWith(state_->initializedLength());
1219 
1220   // Remove original instruction.
1221   discardInstruction(ins, elements);
1222 }
1223 
visitArrayLength(MArrayLength * ins)1224 void ArrayMemoryView::visitArrayLength(MArrayLength* ins) {
1225   // Skip other array objects.
1226   MDefinition* elements = ins->elements();
1227   if (!isArrayStateElements(elements)) {
1228     return;
1229   }
1230 
1231   // Replace by the value of the length.
1232   if (!length_) {
1233     length_ = MConstant::New(alloc_, Int32Value(state_->numElements()));
1234     arr_->block()->insertBefore(arr_, length_);
1235   }
1236   ins->replaceAllUsesWith(length_);
1237 
1238   // Remove original instruction.
1239   discardInstruction(ins, elements);
1240 }
1241 
visitMaybeCopyElementsForWrite(MMaybeCopyElementsForWrite * ins)1242 void ArrayMemoryView::visitMaybeCopyElementsForWrite(
1243     MMaybeCopyElementsForWrite* ins) {
1244   MOZ_ASSERT(ins->numOperands() == 1);
1245   MOZ_ASSERT(ins->type() == MIRType::Object);
1246 
1247   // Skip guards on other objects.
1248   if (ins->object() != arr_) {
1249     return;
1250   }
1251 
1252   // Nothing to do here: RArrayState::recover will copy the elements if
1253   // needed.
1254 
1255   // Replace the guard with the array.
1256   ins->replaceAllUsesWith(arr_);
1257 
1258   // Remove original instruction.
1259   ins->block()->discard(ins);
1260 }
1261 
visitConvertElementsToDoubles(MConvertElementsToDoubles * ins)1262 void ArrayMemoryView::visitConvertElementsToDoubles(
1263     MConvertElementsToDoubles* ins) {
1264   MOZ_ASSERT(ins->numOperands() == 1);
1265   MOZ_ASSERT(ins->type() == MIRType::Elements);
1266 
1267   // Skip other array objects.
1268   MDefinition* elements = ins->elements();
1269   if (!isArrayStateElements(elements)) {
1270     return;
1271   }
1272 
1273   // We don't have to do anything else here: MConvertElementsToDoubles just
1274   // exists to allow MLoadELement to use masm.loadDouble (without checking
1275   // for int32 elements), but since we're using scalar replacement for the
1276   // elements that doesn't matter.
1277   ins->replaceAllUsesWith(elements);
1278 
1279   // Remove original instruction.
1280   ins->block()->discard(ins);
1281 }
1282 
ScalarReplacement(MIRGenerator * mir,MIRGraph & graph)1283 bool ScalarReplacement(MIRGenerator* mir, MIRGraph& graph) {
1284   EmulateStateOf<ObjectMemoryView> replaceObject(mir, graph);
1285   EmulateStateOf<ArrayMemoryView> replaceArray(mir, graph);
1286   bool addedPhi = false;
1287 
1288   for (ReversePostorderIterator block = graph.rpoBegin();
1289        block != graph.rpoEnd(); block++) {
1290     if (mir->shouldCancel("Scalar Replacement (main loop)")) {
1291       return false;
1292     }
1293 
1294     for (MInstructionIterator ins = block->begin(); ins != block->end();
1295          ins++) {
1296       if (IsOptimizableObjectInstruction(*ins) && !IsObjectEscaped(*ins)) {
1297         ObjectMemoryView view(graph.alloc(), *ins);
1298         if (!replaceObject.run(view)) {
1299           return false;
1300         }
1301         view.assertSuccess();
1302         addedPhi = true;
1303         continue;
1304       }
1305 
1306       if (IsOptimizableArrayInstruction(*ins) && !IsArrayEscaped(*ins, *ins)) {
1307         ArrayMemoryView view(graph.alloc(), *ins);
1308         if (!replaceArray.run(view)) {
1309           return false;
1310         }
1311         view.assertSuccess();
1312         addedPhi = true;
1313         continue;
1314       }
1315     }
1316   }
1317 
1318   if (addedPhi) {
1319     // Phis added by Scalar Replacement are only redundant Phis which are
1320     // not directly captured by any resume point but only by the MDefinition
1321     // state. The conservative observability only focuses on Phis which are
1322     // not used as resume points operands.
1323     AssertExtendedGraphCoherency(graph);
1324     if (!EliminatePhis(mir, graph, ConservativeObservability)) {
1325       return false;
1326     }
1327   }
1328 
1329   return true;
1330 }
1331 
1332 } /* namespace jit */
1333 } /* namespace js */
1334