1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/ScalarReplacement.h"
8
9 #include "mozilla/Vector.h"
10
11 #include "jit/IonAnalysis.h"
12 #include "jit/JitSpewer.h"
13 #include "jit/MIR.h"
14 #include "jit/MIRGenerator.h"
15 #include "jit/MIRGraph.h"
16 #include "jit/WarpBuilderShared.h"
17 #include "vm/ArgumentsObject.h"
18
19 #include "vm/JSObject-inl.h"
20
21 namespace js {
22 namespace jit {
23
24 template <typename MemoryView>
25 class EmulateStateOf {
26 private:
27 using BlockState = typename MemoryView::BlockState;
28
29 MIRGenerator* mir_;
30 MIRGraph& graph_;
31
32 // Block state at the entrance of all basic blocks.
33 Vector<BlockState*, 8, SystemAllocPolicy> states_;
34
35 public:
EmulateStateOf(MIRGenerator * mir,MIRGraph & graph)36 EmulateStateOf(MIRGenerator* mir, MIRGraph& graph)
37 : mir_(mir), graph_(graph) {}
38
39 bool run(MemoryView& view);
40 };
41
42 template <typename MemoryView>
run(MemoryView & view)43 bool EmulateStateOf<MemoryView>::run(MemoryView& view) {
44 // Initialize the current block state of each block to an unknown state.
45 if (!states_.appendN(nullptr, graph_.numBlocks())) {
46 return false;
47 }
48
49 // Initialize the first block which needs to be traversed in RPO.
50 MBasicBlock* startBlock = view.startingBlock();
51 if (!view.initStartingState(&states_[startBlock->id()])) {
52 return false;
53 }
54
55 // Iterate over each basic block which has a valid entry state, and merge
56 // the state in the successor blocks.
57 for (ReversePostorderIterator block = graph_.rpoBegin(startBlock);
58 block != graph_.rpoEnd(); block++) {
59 if (mir_->shouldCancel(MemoryView::phaseName)) {
60 return false;
61 }
62
63 // Get the block state as the result of the merge of all predecessors
64 // which have already been visited in RPO. This means that backedges
65 // are not yet merged into the loop.
66 BlockState* state = states_[block->id()];
67 if (!state) {
68 continue;
69 }
70 view.setEntryBlockState(state);
71
72 // Iterates over resume points, phi and instructions.
73 for (MNodeIterator iter(*block); iter;) {
74 // Increment the iterator before visiting the instruction, as the
75 // visit function might discard itself from the basic block.
76 MNode* ins = *iter++;
77 if (ins->isDefinition()) {
78 MDefinition* def = ins->toDefinition();
79 switch (def->op()) {
80 #define MIR_OP(op) \
81 case MDefinition::Opcode::op: \
82 view.visit##op(def->to##op()); \
83 break;
84 MIR_OPCODE_LIST(MIR_OP)
85 #undef MIR_OP
86 }
87 } else {
88 view.visitResumePoint(ins->toResumePoint());
89 }
90 if (view.oom()) {
91 return false;
92 }
93 }
94
95 // For each successor, merge the current state into the state of the
96 // successors.
97 for (size_t s = 0; s < block->numSuccessors(); s++) {
98 MBasicBlock* succ = block->getSuccessor(s);
99 if (!view.mergeIntoSuccessorState(*block, succ, &states_[succ->id()])) {
100 return false;
101 }
102 }
103 }
104
105 states_.clear();
106 return true;
107 }
108
109 static bool IsObjectEscaped(MInstruction* ins,
110 const Shape* shapeDefault = nullptr);
111
112 // Returns False if the lambda is not escaped and if it is optimizable by
113 // ScalarReplacementOfObject.
IsLambdaEscaped(MInstruction * lambda,const Shape * shape)114 static bool IsLambdaEscaped(MInstruction* lambda, const Shape* shape) {
115 MOZ_ASSERT(lambda->isLambda() || lambda->isLambdaArrow() ||
116 lambda->isFunctionWithProto());
117 JitSpewDef(JitSpew_Escape, "Check lambda\n", lambda);
118 JitSpewIndent spewIndent(JitSpew_Escape);
119
120 // The scope chain is not escaped if none of the Lambdas which are
121 // capturing it are escaped.
122 for (MUseIterator i(lambda->usesBegin()); i != lambda->usesEnd(); i++) {
123 MNode* consumer = (*i)->consumer();
124 if (!consumer->isDefinition()) {
125 // Cannot optimize if it is observable from fun.arguments or others.
126 if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
127 JitSpew(JitSpew_Escape, "Observable lambda cannot be recovered");
128 return true;
129 }
130 continue;
131 }
132
133 MDefinition* def = consumer->toDefinition();
134 if (!def->isFunctionEnvironment()) {
135 JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
136 return true;
137 }
138
139 if (IsObjectEscaped(def->toInstruction(), shape)) {
140 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
141 return true;
142 }
143 }
144 JitSpew(JitSpew_Escape, "Lambda is not escaped");
145 return false;
146 }
147
IsOptimizableObjectInstruction(MInstruction * ins)148 static inline bool IsOptimizableObjectInstruction(MInstruction* ins) {
149 return ins->isNewObject() || ins->isNewPlainObject() ||
150 ins->isCreateThisWithTemplate() || ins->isNewCallObject() ||
151 ins->isNewIterator();
152 }
153
154 // Returns False if the object is not escaped and if it is optimizable by
155 // ScalarReplacementOfObject.
156 //
157 // For the moment, this code is dumb as it only supports objects which are not
158 // changing shape, and which are known by TI at the object creation.
IsObjectEscaped(MInstruction * ins,const Shape * shapeDefault)159 static bool IsObjectEscaped(MInstruction* ins, const Shape* shapeDefault) {
160 MOZ_ASSERT(ins->type() == MIRType::Object);
161
162 JitSpewDef(JitSpew_Escape, "Check object\n", ins);
163 JitSpewIndent spewIndent(JitSpew_Escape);
164
165 const Shape* shape = shapeDefault;
166 if (!shape) {
167 if (ins->isNewPlainObject()) {
168 shape = ins->toNewPlainObject()->shape();
169 } else if (JSObject* templateObj = MObjectState::templateObjectOf(ins)) {
170 shape = templateObj->shape();
171 }
172 }
173
174 if (!shape) {
175 JitSpew(JitSpew_Escape, "No shape defined.");
176 return true;
177 }
178
179 // Check if the object is escaped. If the object is not the first argument
180 // of either a known Store / Load, then we consider it as escaped. This is a
181 // cheap and conservative escape analysis.
182 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
183 MNode* consumer = (*i)->consumer();
184 if (!consumer->isDefinition()) {
185 // Cannot optimize if it is observable from fun.arguments or others.
186 if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
187 JitSpew(JitSpew_Escape, "Observable object cannot be recovered");
188 return true;
189 }
190 continue;
191 }
192
193 MDefinition* def = consumer->toDefinition();
194 switch (def->op()) {
195 case MDefinition::Opcode::StoreFixedSlot:
196 case MDefinition::Opcode::LoadFixedSlot:
197 // Not escaped if it is the first argument.
198 if (def->indexOf(*i) == 0) {
199 break;
200 }
201
202 JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
203 return true;
204
205 case MDefinition::Opcode::PostWriteBarrier:
206 break;
207
208 case MDefinition::Opcode::Slots: {
209 #ifdef DEBUG
210 // Assert that MSlots are only used by MStoreDynamicSlot and
211 // MLoadDynamicSlot.
212 MSlots* ins = def->toSlots();
213 MOZ_ASSERT(ins->object() != 0);
214 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
215 // toDefinition should normally never fail, since they don't get
216 // captured by resume points.
217 MDefinition* def = (*i)->consumer()->toDefinition();
218 MOZ_ASSERT(def->op() == MDefinition::Opcode::StoreDynamicSlot ||
219 def->op() == MDefinition::Opcode::LoadDynamicSlot);
220 }
221 #endif
222 break;
223 }
224
225 case MDefinition::Opcode::GuardShape: {
226 MGuardShape* guard = def->toGuardShape();
227 MOZ_ASSERT(!ins->isGuardShape());
228 if (shape != guard->shape()) {
229 JitSpewDef(JitSpew_Escape, "has a non-matching guard shape\n", guard);
230 return true;
231 }
232 if (IsObjectEscaped(def->toInstruction(), shape)) {
233 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
234 return true;
235 }
236 break;
237 }
238
239 case MDefinition::Opcode::CheckIsObj: {
240 if (IsObjectEscaped(def->toInstruction(), shape)) {
241 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
242 return true;
243 }
244 break;
245 }
246
247 case MDefinition::Opcode::Unbox: {
248 if (def->type() != MIRType::Object) {
249 JitSpewDef(JitSpew_Escape, "has an invalid unbox\n", def);
250 return true;
251 }
252 if (IsObjectEscaped(def->toInstruction(), shape)) {
253 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
254 return true;
255 }
256 break;
257 }
258
259 case MDefinition::Opcode::Lambda:
260 case MDefinition::Opcode::LambdaArrow:
261 case MDefinition::Opcode::FunctionWithProto: {
262 if (IsLambdaEscaped(def->toInstruction(), shape)) {
263 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
264 return true;
265 }
266 break;
267 }
268
269 // This instruction is a no-op used to verify that scalar replacement
270 // is working as expected in jit-test.
271 case MDefinition::Opcode::AssertRecoveredOnBailout:
272 break;
273
274 default:
275 JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
276 return true;
277 }
278 }
279
280 JitSpew(JitSpew_Escape, "Object is not escaped");
281 return false;
282 }
283
284 class ObjectMemoryView : public MDefinitionVisitorDefaultNoop {
285 public:
286 using BlockState = MObjectState;
287 static const char phaseName[];
288
289 private:
290 TempAllocator& alloc_;
291 MConstant* undefinedVal_;
292 MInstruction* obj_;
293 MBasicBlock* startBlock_;
294 BlockState* state_;
295
296 // Used to improve the memory usage by sharing common modification.
297 const MResumePoint* lastResumePoint_;
298
299 bool oom_;
300
301 public:
302 ObjectMemoryView(TempAllocator& alloc, MInstruction* obj);
303
304 MBasicBlock* startingBlock();
305 bool initStartingState(BlockState** pState);
306
307 void setEntryBlockState(BlockState* state);
308 bool mergeIntoSuccessorState(MBasicBlock* curr, MBasicBlock* succ,
309 BlockState** pSuccState);
310
311 #ifdef DEBUG
312 void assertSuccess();
313 #else
assertSuccess()314 void assertSuccess() {}
315 #endif
316
oom() const317 bool oom() const { return oom_; }
318
319 public:
320 void visitResumePoint(MResumePoint* rp);
321 void visitObjectState(MObjectState* ins);
322 void visitStoreFixedSlot(MStoreFixedSlot* ins);
323 void visitLoadFixedSlot(MLoadFixedSlot* ins);
324 void visitPostWriteBarrier(MPostWriteBarrier* ins);
325 void visitStoreDynamicSlot(MStoreDynamicSlot* ins);
326 void visitLoadDynamicSlot(MLoadDynamicSlot* ins);
327 void visitGuardShape(MGuardShape* ins);
328 void visitCheckIsObj(MCheckIsObj* ins);
329 void visitUnbox(MUnbox* ins);
330 void visitFunctionEnvironment(MFunctionEnvironment* ins);
331 void visitLambda(MLambda* ins);
332 void visitLambdaArrow(MLambdaArrow* ins);
333 void visitFunctionWithProto(MFunctionWithProto* ins);
334
335 private:
336 void visitObjectGuard(MInstruction* ins, MDefinition* operand);
337 };
338
339 /* static */ const char ObjectMemoryView::phaseName[] =
340 "Scalar Replacement of Object";
341
ObjectMemoryView(TempAllocator & alloc,MInstruction * obj)342 ObjectMemoryView::ObjectMemoryView(TempAllocator& alloc, MInstruction* obj)
343 : alloc_(alloc),
344 undefinedVal_(nullptr),
345 obj_(obj),
346 startBlock_(obj->block()),
347 state_(nullptr),
348 lastResumePoint_(nullptr),
349 oom_(false) {
350 // Annotate snapshots RValue such that we recover the store first.
351 obj_->setIncompleteObject();
352
353 // Annotate the instruction such that we do not replace it by a
354 // Magic(JS_OPTIMIZED_OUT) in case of removed uses.
355 obj_->setImplicitlyUsedUnchecked();
356 }
357
startingBlock()358 MBasicBlock* ObjectMemoryView::startingBlock() { return startBlock_; }
359
initStartingState(BlockState ** pState)360 bool ObjectMemoryView::initStartingState(BlockState** pState) {
361 // Uninitialized slots have an "undefined" value.
362 undefinedVal_ = MConstant::New(alloc_, UndefinedValue());
363 startBlock_->insertBefore(obj_, undefinedVal_);
364
365 // Create a new block state and insert at it at the location of the new
366 // object.
367 BlockState* state = BlockState::New(alloc_, obj_);
368 if (!state) {
369 return false;
370 }
371
372 startBlock_->insertAfter(obj_, state);
373
374 // Initialize the properties of the object state.
375 if (!state->initFromTemplateObject(alloc_, undefinedVal_)) {
376 return false;
377 }
378
379 // Hold out of resume point until it is visited.
380 state->setInWorklist();
381
382 *pState = state;
383 return true;
384 }
385
setEntryBlockState(BlockState * state)386 void ObjectMemoryView::setEntryBlockState(BlockState* state) { state_ = state; }
387
mergeIntoSuccessorState(MBasicBlock * curr,MBasicBlock * succ,BlockState ** pSuccState)388 bool ObjectMemoryView::mergeIntoSuccessorState(MBasicBlock* curr,
389 MBasicBlock* succ,
390 BlockState** pSuccState) {
391 BlockState* succState = *pSuccState;
392
393 // When a block has no state yet, create an empty one for the
394 // successor.
395 if (!succState) {
396 // If the successor is not dominated then the object cannot flow
397 // in this basic block without a Phi. We know that no Phi exist
398 // in non-dominated successors as the conservative escaped
399 // analysis fails otherwise. Such condition can succeed if the
400 // successor is a join at the end of a if-block and the object
401 // only exists within the branch.
402 if (!startBlock_->dominates(succ)) {
403 return true;
404 }
405
406 // If there is only one predecessor, carry over the last state of the
407 // block to the successor. As the block state is immutable, if the
408 // current block has multiple successors, they will share the same entry
409 // state.
410 if (succ->numPredecessors() <= 1 || !state_->numSlots()) {
411 *pSuccState = state_;
412 return true;
413 }
414
415 // If we have multiple predecessors, then we allocate one Phi node for
416 // each predecessor, and create a new block state which only has phi
417 // nodes. These would later be removed by the removal of redundant phi
418 // nodes.
419 succState = BlockState::Copy(alloc_, state_);
420 if (!succState) {
421 return false;
422 }
423
424 size_t numPreds = succ->numPredecessors();
425 for (size_t slot = 0; slot < state_->numSlots(); slot++) {
426 MPhi* phi = MPhi::New(alloc_.fallible());
427 if (!phi || !phi->reserveLength(numPreds)) {
428 return false;
429 }
430
431 // Fill the input of the successors Phi with undefined
432 // values, and each block later fills the Phi inputs.
433 for (size_t p = 0; p < numPreds; p++) {
434 phi->addInput(undefinedVal_);
435 }
436
437 // Add Phi in the list of Phis of the basic block.
438 succ->addPhi(phi);
439 succState->setSlot(slot, phi);
440 }
441
442 // Insert the newly created block state instruction at the beginning
443 // of the successor block, after all the phi nodes. Note that it
444 // would be captured by the entry resume point of the successor
445 // block.
446 succ->insertBefore(succ->safeInsertTop(), succState);
447 *pSuccState = succState;
448 }
449
450 MOZ_ASSERT_IF(succ == startBlock_, startBlock_->isLoopHeader());
451 if (succ->numPredecessors() > 1 && succState->numSlots() &&
452 succ != startBlock_) {
453 // We need to re-compute successorWithPhis as the previous EliminatePhis
454 // phase might have removed all the Phis from the successor block.
455 size_t currIndex;
456 MOZ_ASSERT(!succ->phisEmpty());
457 if (curr->successorWithPhis()) {
458 MOZ_ASSERT(curr->successorWithPhis() == succ);
459 currIndex = curr->positionInPhiSuccessor();
460 } else {
461 currIndex = succ->indexForPredecessor(curr);
462 curr->setSuccessorWithPhis(succ, currIndex);
463 }
464 MOZ_ASSERT(succ->getPredecessor(currIndex) == curr);
465
466 // Copy the current slot states to the index of current block in all the
467 // Phi created during the first visit of the successor.
468 for (size_t slot = 0; slot < state_->numSlots(); slot++) {
469 MPhi* phi = succState->getSlot(slot)->toPhi();
470 phi->replaceOperand(currIndex, state_->getSlot(slot));
471 }
472 }
473
474 return true;
475 }
476
477 #ifdef DEBUG
assertSuccess()478 void ObjectMemoryView::assertSuccess() {
479 for (MUseIterator i(obj_->usesBegin()); i != obj_->usesEnd(); i++) {
480 MNode* ins = (*i)->consumer();
481 MDefinition* def = nullptr;
482
483 // Resume points have been replaced by the object state.
484 if (ins->isResumePoint() ||
485 (def = ins->toDefinition())->isRecoveredOnBailout()) {
486 MOZ_ASSERT(obj_->isIncompleteObject());
487 continue;
488 }
489
490 // The only remaining uses would be removed by DCE, which will also
491 // recover the object on bailouts.
492 MOZ_ASSERT(def->isSlots() || def->isLambda() || def->isLambdaArrow() ||
493 def->isFunctionWithProto());
494 MOZ_ASSERT(!def->hasDefUses());
495 }
496 }
497 #endif
498
visitResumePoint(MResumePoint * rp)499 void ObjectMemoryView::visitResumePoint(MResumePoint* rp) {
500 // As long as the MObjectState is not yet seen next to the allocation, we do
501 // not patch the resume point to recover the side effects.
502 if (!state_->isInWorklist()) {
503 rp->addStore(alloc_, state_, lastResumePoint_);
504 lastResumePoint_ = rp;
505 }
506 }
507
visitObjectState(MObjectState * ins)508 void ObjectMemoryView::visitObjectState(MObjectState* ins) {
509 if (ins->isInWorklist()) {
510 ins->setNotInWorklist();
511 }
512 }
513
visitStoreFixedSlot(MStoreFixedSlot * ins)514 void ObjectMemoryView::visitStoreFixedSlot(MStoreFixedSlot* ins) {
515 // Skip stores made on other objects.
516 if (ins->object() != obj_) {
517 return;
518 }
519
520 // Clone the state and update the slot value.
521 if (state_->hasFixedSlot(ins->slot())) {
522 state_ = BlockState::Copy(alloc_, state_);
523 if (!state_) {
524 oom_ = true;
525 return;
526 }
527
528 state_->setFixedSlot(ins->slot(), ins->value());
529 ins->block()->insertBefore(ins->toInstruction(), state_);
530 } else {
531 // UnsafeSetReserveSlot can access baked-in slots which are guarded by
532 // conditions, which are not seen by the escape analysis.
533 MBail* bailout = MBail::New(alloc_, BailoutKind::Inevitable);
534 ins->block()->insertBefore(ins, bailout);
535 }
536
537 // Remove original instruction.
538 ins->block()->discard(ins);
539 }
540
visitLoadFixedSlot(MLoadFixedSlot * ins)541 void ObjectMemoryView::visitLoadFixedSlot(MLoadFixedSlot* ins) {
542 // Skip loads made on other objects.
543 if (ins->object() != obj_) {
544 return;
545 }
546
547 // Replace load by the slot value.
548 if (state_->hasFixedSlot(ins->slot())) {
549 ins->replaceAllUsesWith(state_->getFixedSlot(ins->slot()));
550 } else {
551 // UnsafeGetReserveSlot can access baked-in slots which are guarded by
552 // conditions, which are not seen by the escape analysis.
553 MBail* bailout = MBail::New(alloc_, BailoutKind::Inevitable);
554 ins->block()->insertBefore(ins, bailout);
555 ins->replaceAllUsesWith(undefinedVal_);
556 }
557
558 // Remove original instruction.
559 ins->block()->discard(ins);
560 }
561
visitPostWriteBarrier(MPostWriteBarrier * ins)562 void ObjectMemoryView::visitPostWriteBarrier(MPostWriteBarrier* ins) {
563 // Skip loads made on other objects.
564 if (ins->object() != obj_) {
565 return;
566 }
567
568 // Remove original instruction.
569 ins->block()->discard(ins);
570 }
571
visitStoreDynamicSlot(MStoreDynamicSlot * ins)572 void ObjectMemoryView::visitStoreDynamicSlot(MStoreDynamicSlot* ins) {
573 // Skip stores made on other objects.
574 MSlots* slots = ins->slots()->toSlots();
575 if (slots->object() != obj_) {
576 // Guard objects are replaced when they are visited.
577 MOZ_ASSERT(!slots->object()->isGuardShape() ||
578 slots->object()->toGuardShape()->object() != obj_);
579 return;
580 }
581
582 // Clone the state and update the slot value.
583 if (state_->hasDynamicSlot(ins->slot())) {
584 state_ = BlockState::Copy(alloc_, state_);
585 if (!state_) {
586 oom_ = true;
587 return;
588 }
589
590 state_->setDynamicSlot(ins->slot(), ins->value());
591 ins->block()->insertBefore(ins->toInstruction(), state_);
592 } else {
593 // UnsafeSetReserveSlot can access baked-in slots which are guarded by
594 // conditions, which are not seen by the escape analysis.
595 MBail* bailout = MBail::New(alloc_, BailoutKind::Inevitable);
596 ins->block()->insertBefore(ins, bailout);
597 }
598
599 // Remove original instruction.
600 ins->block()->discard(ins);
601 }
602
visitLoadDynamicSlot(MLoadDynamicSlot * ins)603 void ObjectMemoryView::visitLoadDynamicSlot(MLoadDynamicSlot* ins) {
604 // Skip loads made on other objects.
605 MSlots* slots = ins->slots()->toSlots();
606 if (slots->object() != obj_) {
607 // Guard objects are replaced when they are visited.
608 MOZ_ASSERT(!slots->object()->isGuardShape() ||
609 slots->object()->toGuardShape()->object() != obj_);
610 return;
611 }
612
613 // Replace load by the slot value.
614 if (state_->hasDynamicSlot(ins->slot())) {
615 ins->replaceAllUsesWith(state_->getDynamicSlot(ins->slot()));
616 } else {
617 // UnsafeGetReserveSlot can access baked-in slots which are guarded by
618 // conditions, which are not seen by the escape analysis.
619 MBail* bailout = MBail::New(alloc_, BailoutKind::Inevitable);
620 ins->block()->insertBefore(ins, bailout);
621 ins->replaceAllUsesWith(undefinedVal_);
622 }
623
624 // Remove original instruction.
625 ins->block()->discard(ins);
626 }
627
visitObjectGuard(MInstruction * ins,MDefinition * operand)628 void ObjectMemoryView::visitObjectGuard(MInstruction* ins,
629 MDefinition* operand) {
630 MOZ_ASSERT(ins->numOperands() == 1);
631 MOZ_ASSERT(ins->getOperand(0) == operand);
632 MOZ_ASSERT(ins->type() == MIRType::Object);
633
634 // Skip guards on other objects.
635 if (operand != obj_) {
636 return;
637 }
638
639 // Replace the guard by its object.
640 ins->replaceAllUsesWith(obj_);
641
642 // Remove original instruction.
643 ins->block()->discard(ins);
644 }
645
visitGuardShape(MGuardShape * ins)646 void ObjectMemoryView::visitGuardShape(MGuardShape* ins) {
647 visitObjectGuard(ins, ins->object());
648 }
649
visitCheckIsObj(MCheckIsObj * ins)650 void ObjectMemoryView::visitCheckIsObj(MCheckIsObj* ins) {
651 // Skip checks on other objects.
652 if (ins->input() != obj_) {
653 return;
654 }
655
656 // Replace the check by its object.
657 ins->replaceAllUsesWith(obj_);
658
659 // Remove original instruction.
660 ins->block()->discard(ins);
661 }
662
visitUnbox(MUnbox * ins)663 void ObjectMemoryView::visitUnbox(MUnbox* ins) {
664 // Skip unrelated unboxes.
665 if (ins->input() != obj_) {
666 return;
667 }
668 MOZ_ASSERT(ins->type() == MIRType::Object);
669
670 // Replace the unbox with the object.
671 ins->replaceAllUsesWith(obj_);
672
673 // Remove the unbox.
674 ins->block()->discard(ins);
675 }
676
visitFunctionEnvironment(MFunctionEnvironment * ins)677 void ObjectMemoryView::visitFunctionEnvironment(MFunctionEnvironment* ins) {
678 // Skip function environment which are not aliases of the NewCallObject.
679 MDefinition* input = ins->input();
680 if (input->isLambda()) {
681 if (input->toLambda()->environmentChain() != obj_) {
682 return;
683 }
684 } else if (input->isLambdaArrow()) {
685 if (input->toLambdaArrow()->environmentChain() != obj_) {
686 return;
687 }
688 } else if (input->isFunctionWithProto()) {
689 if (input->toFunctionWithProto()->environmentChain() != obj_) {
690 return;
691 }
692 } else {
693 return;
694 }
695
696 // Replace the function environment by the scope chain of the lambda.
697 ins->replaceAllUsesWith(obj_);
698
699 // Remove original instruction.
700 ins->block()->discard(ins);
701 }
702
visitLambda(MLambda * ins)703 void ObjectMemoryView::visitLambda(MLambda* ins) {
704 if (ins->environmentChain() != obj_) {
705 return;
706 }
707
708 // In order to recover the lambda we need to recover the scope chain, as the
709 // lambda is holding it.
710 ins->setIncompleteObject();
711 }
712
visitLambdaArrow(MLambdaArrow * ins)713 void ObjectMemoryView::visitLambdaArrow(MLambdaArrow* ins) {
714 if (ins->environmentChain() != obj_) {
715 return;
716 }
717
718 ins->setIncompleteObject();
719 }
720
visitFunctionWithProto(MFunctionWithProto * ins)721 void ObjectMemoryView::visitFunctionWithProto(MFunctionWithProto* ins) {
722 if (ins->environmentChain() != obj_) {
723 return;
724 }
725
726 ins->setIncompleteObject();
727 }
728
IndexOf(MDefinition * ins,int32_t * res)729 static bool IndexOf(MDefinition* ins, int32_t* res) {
730 MOZ_ASSERT(ins->isLoadElement() || ins->isStoreElement());
731 MDefinition* indexDef = ins->getOperand(1); // ins->index();
732 if (indexDef->isSpectreMaskIndex()) {
733 indexDef = indexDef->toSpectreMaskIndex()->index();
734 }
735 if (indexDef->isBoundsCheck()) {
736 indexDef = indexDef->toBoundsCheck()->index();
737 }
738 if (indexDef->isToNumberInt32()) {
739 indexDef = indexDef->toToNumberInt32()->getOperand(0);
740 }
741 MConstant* indexDefConst = indexDef->maybeConstantValue();
742 if (!indexDefConst || indexDefConst->type() != MIRType::Int32) {
743 return false;
744 }
745 *res = indexDefConst->toInt32();
746 return true;
747 }
748
749 // Returns False if the elements is not escaped and if it is optimizable by
750 // ScalarReplacementOfArray.
IsElementEscaped(MDefinition * def,uint32_t arraySize)751 static bool IsElementEscaped(MDefinition* def, uint32_t arraySize) {
752 MOZ_ASSERT(def->isElements());
753
754 JitSpewDef(JitSpew_Escape, "Check elements\n", def);
755 JitSpewIndent spewIndent(JitSpew_Escape);
756
757 for (MUseIterator i(def->usesBegin()); i != def->usesEnd(); i++) {
758 // The MIRType::Elements cannot be captured in a resume point as
759 // it does not represent a value allocation.
760 MDefinition* access = (*i)->consumer()->toDefinition();
761
762 switch (access->op()) {
763 case MDefinition::Opcode::LoadElement: {
764 MOZ_ASSERT(access->toLoadElement()->elements() == def);
765
766 // If the index is not a constant then this index can alias
767 // all others. We do not handle this case.
768 int32_t index;
769 if (!IndexOf(access, &index)) {
770 JitSpewDef(JitSpew_Escape,
771 "has a load element with a non-trivial index\n", access);
772 return true;
773 }
774 if (index < 0 || arraySize <= uint32_t(index)) {
775 JitSpewDef(JitSpew_Escape,
776 "has a load element with an out-of-bound index\n", access);
777 return true;
778 }
779 break;
780 }
781
782 case MDefinition::Opcode::StoreElement: {
783 MStoreElement* storeElem = access->toStoreElement();
784 MOZ_ASSERT(storeElem->elements() == def);
785
786 // StoreElement must bail out if it stores to a hole, in case
787 // there is a setter on the prototype chain. If this StoreElement
788 // might store to a hole, we can't scalar-replace it.
789 if (storeElem->needsHoleCheck()) {
790 JitSpewDef(JitSpew_Escape, "has a store element with a hole check\n",
791 storeElem);
792 return true;
793 }
794
795 // If the index is not a constant then this index can alias
796 // all others. We do not handle this case.
797 int32_t index;
798 if (!IndexOf(storeElem, &index)) {
799 JitSpewDef(JitSpew_Escape,
800 "has a store element with a non-trivial index\n",
801 storeElem);
802 return true;
803 }
804 if (index < 0 || arraySize <= uint32_t(index)) {
805 JitSpewDef(JitSpew_Escape,
806 "has a store element with an out-of-bound index\n",
807 storeElem);
808 return true;
809 }
810
811 // Dense element holes are written using MStoreHoleValueElement instead
812 // of MStoreElement.
813 MOZ_ASSERT(storeElem->value()->type() != MIRType::MagicHole);
814 break;
815 }
816
817 case MDefinition::Opcode::SetInitializedLength:
818 MOZ_ASSERT(access->toSetInitializedLength()->elements() == def);
819 break;
820
821 case MDefinition::Opcode::InitializedLength:
822 MOZ_ASSERT(access->toInitializedLength()->elements() == def);
823 break;
824
825 case MDefinition::Opcode::ArrayLength:
826 MOZ_ASSERT(access->toArrayLength()->elements() == def);
827 break;
828
829 default:
830 JitSpewDef(JitSpew_Escape, "is escaped by\n", access);
831 return true;
832 }
833 }
834 JitSpew(JitSpew_Escape, "Elements is not escaped");
835 return false;
836 }
837
IsOptimizableArrayInstruction(MInstruction * ins)838 static inline bool IsOptimizableArrayInstruction(MInstruction* ins) {
839 return ins->isNewArray() || ins->isNewArrayObject();
840 }
841
842 // Returns False if the array is not escaped and if it is optimizable by
843 // ScalarReplacementOfArray.
844 //
845 // For the moment, this code is dumb as it only supports arrays which are not
846 // changing length, with only access with known constants.
IsArrayEscaped(MInstruction * ins,MInstruction * newArray)847 static bool IsArrayEscaped(MInstruction* ins, MInstruction* newArray) {
848 MOZ_ASSERT(ins->type() == MIRType::Object);
849 MOZ_ASSERT(IsOptimizableArrayInstruction(newArray));
850
851 JitSpewDef(JitSpew_Escape, "Check array\n", ins);
852 JitSpewIndent spewIndent(JitSpew_Escape);
853
854 const Shape* shape;
855 uint32_t length;
856 if (newArray->isNewArrayObject()) {
857 length = newArray->toNewArrayObject()->length();
858 shape = newArray->toNewArrayObject()->shape();
859 } else {
860 length = newArray->toNewArray()->length();
861 JSObject* templateObject = newArray->toNewArray()->templateObject();
862 if (!templateObject) {
863 JitSpew(JitSpew_Escape, "No template object defined.");
864 return true;
865 }
866 shape = templateObject->shape();
867 }
868
869 if (length >= 16) {
870 JitSpew(JitSpew_Escape, "Array has too many elements");
871 return true;
872 }
873
874 // Check if the object is escaped. If the object is not the first argument
875 // of either a known Store / Load, then we consider it as escaped. This is a
876 // cheap and conservative escape analysis.
877 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
878 MNode* consumer = (*i)->consumer();
879 if (!consumer->isDefinition()) {
880 // Cannot optimize if it is observable from fun.arguments or others.
881 if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
882 JitSpew(JitSpew_Escape, "Observable array cannot be recovered");
883 return true;
884 }
885 continue;
886 }
887
888 MDefinition* def = consumer->toDefinition();
889 switch (def->op()) {
890 case MDefinition::Opcode::Elements: {
891 MElements* elem = def->toElements();
892 MOZ_ASSERT(elem->object() == ins);
893 if (IsElementEscaped(elem, length)) {
894 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", elem);
895 return true;
896 }
897
898 break;
899 }
900
901 case MDefinition::Opcode::GuardShape: {
902 MGuardShape* guard = def->toGuardShape();
903 if (shape != guard->shape()) {
904 JitSpewDef(JitSpew_Escape, "has a non-matching guard shape\n", guard);
905 return true;
906 }
907 if (IsArrayEscaped(guard, newArray)) {
908 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
909 return true;
910 }
911
912 break;
913 }
914
915 case MDefinition::Opcode::GuardToClass: {
916 MGuardToClass* guard = def->toGuardToClass();
917 if (shape->getObjectClass() != guard->getClass()) {
918 JitSpewDef(JitSpew_Escape, "has a non-matching class guard\n", guard);
919 return true;
920 }
921 if (IsArrayEscaped(guard, newArray)) {
922 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
923 return true;
924 }
925
926 break;
927 }
928
929 case MDefinition::Opcode::Unbox: {
930 if (def->type() != MIRType::Object) {
931 JitSpewDef(JitSpew_Escape, "has an invalid unbox\n", def);
932 return true;
933 }
934 if (IsArrayEscaped(def->toInstruction(), newArray)) {
935 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
936 return true;
937 }
938 break;
939 }
940
941 case MDefinition::Opcode::PostWriteBarrier:
942 case MDefinition::Opcode::PostWriteElementBarrier:
943 break;
944
945 // This instruction is a no-op used to verify that scalar replacement
946 // is working as expected in jit-test.
947 case MDefinition::Opcode::AssertRecoveredOnBailout:
948 break;
949
950 default:
951 JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
952 return true;
953 }
954 }
955
956 JitSpew(JitSpew_Escape, "Array is not escaped");
957 return false;
958 }
959
960 // This class replaces every MStoreElement and MSetInitializedLength by an
961 // MArrayState which emulates the content of the array. All MLoadElement,
962 // MInitializedLength and MArrayLength are replaced by the corresponding value.
963 //
964 // In order to restore the value of the array correctly in case of bailouts, we
965 // replace all reference of the allocation by the MArrayState definition.
966 class ArrayMemoryView : public MDefinitionVisitorDefaultNoop {
967 public:
968 using BlockState = MArrayState;
969 static const char* phaseName;
970
971 private:
972 TempAllocator& alloc_;
973 MConstant* undefinedVal_;
974 MConstant* length_;
975 MInstruction* arr_;
976 MBasicBlock* startBlock_;
977 BlockState* state_;
978
979 // Used to improve the memory usage by sharing common modification.
980 const MResumePoint* lastResumePoint_;
981
982 bool oom_;
983
984 public:
985 ArrayMemoryView(TempAllocator& alloc, MInstruction* arr);
986
987 MBasicBlock* startingBlock();
988 bool initStartingState(BlockState** pState);
989
990 void setEntryBlockState(BlockState* state);
991 bool mergeIntoSuccessorState(MBasicBlock* curr, MBasicBlock* succ,
992 BlockState** pSuccState);
993
994 #ifdef DEBUG
995 void assertSuccess();
996 #else
assertSuccess()997 void assertSuccess() {}
998 #endif
999
oom() const1000 bool oom() const { return oom_; }
1001
1002 private:
1003 bool isArrayStateElements(MDefinition* elements);
1004 void discardInstruction(MInstruction* ins, MDefinition* elements);
1005
1006 public:
1007 void visitResumePoint(MResumePoint* rp);
1008 void visitArrayState(MArrayState* ins);
1009 void visitStoreElement(MStoreElement* ins);
1010 void visitLoadElement(MLoadElement* ins);
1011 void visitSetInitializedLength(MSetInitializedLength* ins);
1012 void visitInitializedLength(MInitializedLength* ins);
1013 void visitArrayLength(MArrayLength* ins);
1014 void visitPostWriteBarrier(MPostWriteBarrier* ins);
1015 void visitPostWriteElementBarrier(MPostWriteElementBarrier* ins);
1016 void visitGuardShape(MGuardShape* ins);
1017 void visitGuardToClass(MGuardToClass* ins);
1018 void visitUnbox(MUnbox* ins);
1019 };
1020
1021 const char* ArrayMemoryView::phaseName = "Scalar Replacement of Array";
1022
ArrayMemoryView(TempAllocator & alloc,MInstruction * arr)1023 ArrayMemoryView::ArrayMemoryView(TempAllocator& alloc, MInstruction* arr)
1024 : alloc_(alloc),
1025 undefinedVal_(nullptr),
1026 length_(nullptr),
1027 arr_(arr),
1028 startBlock_(arr->block()),
1029 state_(nullptr),
1030 lastResumePoint_(nullptr),
1031 oom_(false) {
1032 // Annotate snapshots RValue such that we recover the store first.
1033 arr_->setIncompleteObject();
1034
1035 // Annotate the instruction such that we do not replace it by a
1036 // Magic(JS_OPTIMIZED_OUT) in case of removed uses.
1037 arr_->setImplicitlyUsedUnchecked();
1038 }
1039
startingBlock()1040 MBasicBlock* ArrayMemoryView::startingBlock() { return startBlock_; }
1041
initStartingState(BlockState ** pState)1042 bool ArrayMemoryView::initStartingState(BlockState** pState) {
1043 // Uninitialized elements have an "undefined" value.
1044 undefinedVal_ = MConstant::New(alloc_, UndefinedValue());
1045 MConstant* initLength = MConstant::New(alloc_, Int32Value(0));
1046 arr_->block()->insertBefore(arr_, undefinedVal_);
1047 arr_->block()->insertBefore(arr_, initLength);
1048
1049 // Create a new block state and insert at it at the location of the new array.
1050 BlockState* state = BlockState::New(alloc_, arr_, initLength);
1051 if (!state) {
1052 return false;
1053 }
1054
1055 startBlock_->insertAfter(arr_, state);
1056
1057 // Initialize the elements of the array state.
1058 if (!state->initFromTemplateObject(alloc_, undefinedVal_)) {
1059 return false;
1060 }
1061
1062 // Hold out of resume point until it is visited.
1063 state->setInWorklist();
1064
1065 *pState = state;
1066 return true;
1067 }
1068
setEntryBlockState(BlockState * state)1069 void ArrayMemoryView::setEntryBlockState(BlockState* state) { state_ = state; }
1070
mergeIntoSuccessorState(MBasicBlock * curr,MBasicBlock * succ,BlockState ** pSuccState)1071 bool ArrayMemoryView::mergeIntoSuccessorState(MBasicBlock* curr,
1072 MBasicBlock* succ,
1073 BlockState** pSuccState) {
1074 BlockState* succState = *pSuccState;
1075
1076 // When a block has no state yet, create an empty one for the
1077 // successor.
1078 if (!succState) {
1079 // If the successor is not dominated then the array cannot flow
1080 // in this basic block without a Phi. We know that no Phi exist
1081 // in non-dominated successors as the conservative escaped
1082 // analysis fails otherwise. Such condition can succeed if the
1083 // successor is a join at the end of a if-block and the array
1084 // only exists within the branch.
1085 if (!startBlock_->dominates(succ)) {
1086 return true;
1087 }
1088
1089 // If there is only one predecessor, carry over the last state of the
1090 // block to the successor. As the block state is immutable, if the
1091 // current block has multiple successors, they will share the same entry
1092 // state.
1093 if (succ->numPredecessors() <= 1 || !state_->numElements()) {
1094 *pSuccState = state_;
1095 return true;
1096 }
1097
1098 // If we have multiple predecessors, then we allocate one Phi node for
1099 // each predecessor, and create a new block state which only has phi
1100 // nodes. These would later be removed by the removal of redundant phi
1101 // nodes.
1102 succState = BlockState::Copy(alloc_, state_);
1103 if (!succState) {
1104 return false;
1105 }
1106
1107 size_t numPreds = succ->numPredecessors();
1108 for (size_t index = 0; index < state_->numElements(); index++) {
1109 MPhi* phi = MPhi::New(alloc_.fallible());
1110 if (!phi || !phi->reserveLength(numPreds)) {
1111 return false;
1112 }
1113
1114 // Fill the input of the successors Phi with undefined
1115 // values, and each block later fills the Phi inputs.
1116 for (size_t p = 0; p < numPreds; p++) {
1117 phi->addInput(undefinedVal_);
1118 }
1119
1120 // Add Phi in the list of Phis of the basic block.
1121 succ->addPhi(phi);
1122 succState->setElement(index, phi);
1123 }
1124
1125 // Insert the newly created block state instruction at the beginning
1126 // of the successor block, after all the phi nodes. Note that it
1127 // would be captured by the entry resume point of the successor
1128 // block.
1129 succ->insertBefore(succ->safeInsertTop(), succState);
1130 *pSuccState = succState;
1131 }
1132
1133 MOZ_ASSERT_IF(succ == startBlock_, startBlock_->isLoopHeader());
1134 if (succ->numPredecessors() > 1 && succState->numElements() &&
1135 succ != startBlock_) {
1136 // We need to re-compute successorWithPhis as the previous EliminatePhis
1137 // phase might have removed all the Phis from the successor block.
1138 size_t currIndex;
1139 MOZ_ASSERT(!succ->phisEmpty());
1140 if (curr->successorWithPhis()) {
1141 MOZ_ASSERT(curr->successorWithPhis() == succ);
1142 currIndex = curr->positionInPhiSuccessor();
1143 } else {
1144 currIndex = succ->indexForPredecessor(curr);
1145 curr->setSuccessorWithPhis(succ, currIndex);
1146 }
1147 MOZ_ASSERT(succ->getPredecessor(currIndex) == curr);
1148
1149 // Copy the current element states to the index of current block in all
1150 // the Phi created during the first visit of the successor.
1151 for (size_t index = 0; index < state_->numElements(); index++) {
1152 MPhi* phi = succState->getElement(index)->toPhi();
1153 phi->replaceOperand(currIndex, state_->getElement(index));
1154 }
1155 }
1156
1157 return true;
1158 }
1159
1160 #ifdef DEBUG
assertSuccess()1161 void ArrayMemoryView::assertSuccess() { MOZ_ASSERT(!arr_->hasLiveDefUses()); }
1162 #endif
1163
visitResumePoint(MResumePoint * rp)1164 void ArrayMemoryView::visitResumePoint(MResumePoint* rp) {
1165 // As long as the MArrayState is not yet seen next to the allocation, we do
1166 // not patch the resume point to recover the side effects.
1167 if (!state_->isInWorklist()) {
1168 rp->addStore(alloc_, state_, lastResumePoint_);
1169 lastResumePoint_ = rp;
1170 }
1171 }
1172
visitArrayState(MArrayState * ins)1173 void ArrayMemoryView::visitArrayState(MArrayState* ins) {
1174 if (ins->isInWorklist()) {
1175 ins->setNotInWorklist();
1176 }
1177 }
1178
isArrayStateElements(MDefinition * elements)1179 bool ArrayMemoryView::isArrayStateElements(MDefinition* elements) {
1180 return elements->isElements() && elements->toElements()->object() == arr_;
1181 }
1182
discardInstruction(MInstruction * ins,MDefinition * elements)1183 void ArrayMemoryView::discardInstruction(MInstruction* ins,
1184 MDefinition* elements) {
1185 MOZ_ASSERT(elements->isElements());
1186 ins->block()->discard(ins);
1187 if (!elements->hasLiveDefUses()) {
1188 elements->block()->discard(elements->toInstruction());
1189 }
1190 }
1191
visitStoreElement(MStoreElement * ins)1192 void ArrayMemoryView::visitStoreElement(MStoreElement* ins) {
1193 // Skip other array objects.
1194 MDefinition* elements = ins->elements();
1195 if (!isArrayStateElements(elements)) {
1196 return;
1197 }
1198
1199 // Register value of the setter in the state.
1200 int32_t index;
1201 MOZ_ALWAYS_TRUE(IndexOf(ins, &index));
1202 state_ = BlockState::Copy(alloc_, state_);
1203 if (!state_) {
1204 oom_ = true;
1205 return;
1206 }
1207
1208 state_->setElement(index, ins->value());
1209 ins->block()->insertBefore(ins, state_);
1210
1211 // Remove original instruction.
1212 discardInstruction(ins, elements);
1213 }
1214
visitLoadElement(MLoadElement * ins)1215 void ArrayMemoryView::visitLoadElement(MLoadElement* ins) {
1216 // Skip other array objects.
1217 MDefinition* elements = ins->elements();
1218 if (!isArrayStateElements(elements)) {
1219 return;
1220 }
1221
1222 // Replace by the value contained at the index.
1223 int32_t index;
1224 MOZ_ALWAYS_TRUE(IndexOf(ins, &index));
1225
1226 // The only way to store a hole value in a new array is with
1227 // StoreHoleValueElement, which IsElementEscaped does not allow.
1228 // Therefore, we do not have to do a hole check.
1229 MDefinition* element = state_->getElement(index);
1230 MOZ_ASSERT(element->type() != MIRType::MagicHole);
1231
1232 ins->replaceAllUsesWith(element);
1233
1234 // Remove original instruction.
1235 discardInstruction(ins, elements);
1236 }
1237
visitSetInitializedLength(MSetInitializedLength * ins)1238 void ArrayMemoryView::visitSetInitializedLength(MSetInitializedLength* ins) {
1239 // Skip other array objects.
1240 MDefinition* elements = ins->elements();
1241 if (!isArrayStateElements(elements)) {
1242 return;
1243 }
1244
1245 // Replace by the new initialized length. Note that the argument of
1246 // MSetInitializedLength is the last index and not the initialized length.
1247 // To obtain the length, we need to add 1 to it, and thus we need to create
1248 // a new constant that we register in the ArrayState.
1249 state_ = BlockState::Copy(alloc_, state_);
1250 if (!state_) {
1251 oom_ = true;
1252 return;
1253 }
1254
1255 int32_t initLengthValue = ins->index()->maybeConstantValue()->toInt32() + 1;
1256 MConstant* initLength = MConstant::New(alloc_, Int32Value(initLengthValue));
1257 ins->block()->insertBefore(ins, initLength);
1258 ins->block()->insertBefore(ins, state_);
1259 state_->setInitializedLength(initLength);
1260
1261 // Remove original instruction.
1262 discardInstruction(ins, elements);
1263 }
1264
visitInitializedLength(MInitializedLength * ins)1265 void ArrayMemoryView::visitInitializedLength(MInitializedLength* ins) {
1266 // Skip other array objects.
1267 MDefinition* elements = ins->elements();
1268 if (!isArrayStateElements(elements)) {
1269 return;
1270 }
1271
1272 // Replace by the value of the length.
1273 ins->replaceAllUsesWith(state_->initializedLength());
1274
1275 // Remove original instruction.
1276 discardInstruction(ins, elements);
1277 }
1278
visitArrayLength(MArrayLength * ins)1279 void ArrayMemoryView::visitArrayLength(MArrayLength* ins) {
1280 // Skip other array objects.
1281 MDefinition* elements = ins->elements();
1282 if (!isArrayStateElements(elements)) {
1283 return;
1284 }
1285
1286 // Replace by the value of the length.
1287 if (!length_) {
1288 length_ = MConstant::New(alloc_, Int32Value(state_->numElements()));
1289 arr_->block()->insertBefore(arr_, length_);
1290 }
1291 ins->replaceAllUsesWith(length_);
1292
1293 // Remove original instruction.
1294 discardInstruction(ins, elements);
1295 }
1296
visitPostWriteBarrier(MPostWriteBarrier * ins)1297 void ArrayMemoryView::visitPostWriteBarrier(MPostWriteBarrier* ins) {
1298 // Skip barriers on other objects.
1299 if (ins->object() != arr_) {
1300 return;
1301 }
1302
1303 // Remove original instruction.
1304 ins->block()->discard(ins);
1305 }
1306
visitPostWriteElementBarrier(MPostWriteElementBarrier * ins)1307 void ArrayMemoryView::visitPostWriteElementBarrier(
1308 MPostWriteElementBarrier* ins) {
1309 // Skip barriers on other objects.
1310 if (ins->object() != arr_) {
1311 return;
1312 }
1313
1314 // Remove original instruction.
1315 ins->block()->discard(ins);
1316 }
1317
visitGuardShape(MGuardShape * ins)1318 void ArrayMemoryView::visitGuardShape(MGuardShape* ins) {
1319 // Skip guards on other objects.
1320 if (ins->object() != arr_) {
1321 return;
1322 }
1323
1324 // Replace the guard by its object.
1325 ins->replaceAllUsesWith(arr_);
1326
1327 // Remove original instruction.
1328 ins->block()->discard(ins);
1329 }
1330
visitGuardToClass(MGuardToClass * ins)1331 void ArrayMemoryView::visitGuardToClass(MGuardToClass* ins) {
1332 // Skip guards on other objects.
1333 if (ins->object() != arr_) {
1334 return;
1335 }
1336
1337 // Replace the guard by its object.
1338 ins->replaceAllUsesWith(arr_);
1339
1340 // Remove original instruction.
1341 ins->block()->discard(ins);
1342 }
1343
visitUnbox(MUnbox * ins)1344 void ArrayMemoryView::visitUnbox(MUnbox* ins) {
1345 // Skip unrelated unboxes.
1346 if (ins->getOperand(0) != arr_) {
1347 return;
1348 }
1349 MOZ_ASSERT(ins->type() == MIRType::Object);
1350
1351 // Replace the unbox with the array object.
1352 ins->replaceAllUsesWith(arr_);
1353
1354 // Remove the unbox.
1355 ins->block()->discard(ins);
1356 }
1357
IsOptimizableArgumentsInstruction(MInstruction * ins)1358 static inline bool IsOptimizableArgumentsInstruction(MInstruction* ins) {
1359 return ins->isCreateArgumentsObject() ||
1360 ins->isCreateInlinedArgumentsObject();
1361 }
1362
1363 class ArgumentsReplacer : public MDefinitionVisitorDefaultNoop {
1364 private:
1365 MIRGenerator* mir_;
1366 MIRGraph& graph_;
1367 MInstruction* args_;
1368
alloc()1369 TempAllocator& alloc() { return graph_.alloc(); }
1370
isInlinedArguments() const1371 bool isInlinedArguments() const {
1372 return args_->isCreateInlinedArgumentsObject();
1373 }
1374
1375 void visitGuardToClass(MGuardToClass* ins);
1376 void visitGuardArgumentsObjectFlags(MGuardArgumentsObjectFlags* ins);
1377 void visitUnbox(MUnbox* ins);
1378 void visitGetArgumentsObjectArg(MGetArgumentsObjectArg* ins);
1379 void visitLoadArgumentsObjectArg(MLoadArgumentsObjectArg* ins);
1380 void visitArgumentsObjectLength(MArgumentsObjectLength* ins);
1381 void visitApplyArgsObj(MApplyArgsObj* ins);
1382 void visitLoadFixedSlot(MLoadFixedSlot* ins);
1383
1384 public:
ArgumentsReplacer(MIRGenerator * mir,MIRGraph & graph,MInstruction * args)1385 ArgumentsReplacer(MIRGenerator* mir, MIRGraph& graph, MInstruction* args)
1386 : mir_(mir), graph_(graph), args_(args) {
1387 MOZ_ASSERT(IsOptimizableArgumentsInstruction(args_));
1388 }
1389
1390 bool escapes(MInstruction* ins, bool guardedForMapped = false);
1391 bool run();
1392 void assertSuccess();
1393 };
1394
1395 // Returns false if the arguments object does not escape.
escapes(MInstruction * ins,bool guardedForMapped)1396 bool ArgumentsReplacer::escapes(MInstruction* ins, bool guardedForMapped) {
1397 MOZ_ASSERT(ins->type() == MIRType::Object);
1398
1399 JitSpewDef(JitSpew_Escape, "Check arguments object\n", ins);
1400 JitSpewIndent spewIndent(JitSpew_Escape);
1401
1402 // We can replace inlined arguments in scripts with OSR entries, but
1403 // the outermost arguments object has already been allocated before
1404 // we enter via OSR and can't be replaced.
1405 if (ins->isCreateArgumentsObject() && graph_.osrBlock()) {
1406 JitSpew(JitSpew_Escape, "Can't replace outermost OSR arguments");
1407 return true;
1408 }
1409
1410 // Check all uses to see whether they can be supported without
1411 // allocating an ArgumentsObject.
1412 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
1413 MNode* consumer = (*i)->consumer();
1414
1415 // If a resume point can observe this instruction, we can only optimize
1416 // if it is recoverable.
1417 if (consumer->isResumePoint()) {
1418 if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
1419 JitSpew(JitSpew_Escape, "Observable args object cannot be recovered");
1420 return true;
1421 }
1422 continue;
1423 }
1424
1425 MDefinition* def = consumer->toDefinition();
1426 switch (def->op()) {
1427 case MDefinition::Opcode::GuardToClass: {
1428 MGuardToClass* guard = def->toGuardToClass();
1429 if (!guard->isArgumentsObjectClass()) {
1430 JitSpewDef(JitSpew_Escape, "has a non-matching class guard\n", guard);
1431 return true;
1432 }
1433 bool isMapped = guard->getClass() == &MappedArgumentsObject::class_;
1434 if (escapes(guard, isMapped)) {
1435 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
1436 return true;
1437 }
1438 break;
1439 }
1440
1441 case MDefinition::Opcode::GuardArgumentsObjectFlags: {
1442 if (escapes(def->toInstruction(), guardedForMapped)) {
1443 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
1444 return true;
1445 }
1446 break;
1447 }
1448
1449 case MDefinition::Opcode::Unbox: {
1450 if (def->type() != MIRType::Object) {
1451 JitSpewDef(JitSpew_Escape, "has an invalid unbox\n", def);
1452 return true;
1453 }
1454 if (escapes(def->toInstruction())) {
1455 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
1456 return true;
1457 }
1458 break;
1459 }
1460
1461 case MDefinition::Opcode::LoadFixedSlot: {
1462 MLoadFixedSlot* load = def->toLoadFixedSlot();
1463
1464 // We can replace arguments.callee.
1465 if (load->slot() == ArgumentsObject::CALLEE_SLOT) {
1466 MOZ_ASSERT(guardedForMapped);
1467 continue;
1468 }
1469 JitSpew(JitSpew_Escape, "is escaped by unsupported LoadFixedSlot\n");
1470 return true;
1471 }
1472
1473 case MDefinition::Opcode::ApplyArgsObj: {
1474 if (ins == def->toApplyArgsObj()->getThis()) {
1475 JitSpew(JitSpew_Escape, "is escaped as |this| arg of ApplyArgsObj\n");
1476 return true;
1477 }
1478 MOZ_ASSERT(ins == def->toApplyArgsObj()->getArgsObj());
1479 break;
1480 }
1481
1482 // This is a replaceable consumer.
1483 case MDefinition::Opcode::ArgumentsObjectLength:
1484 case MDefinition::Opcode::GetArgumentsObjectArg:
1485 case MDefinition::Opcode::LoadArgumentsObjectArg:
1486 break;
1487
1488 // This instruction is a no-op used to test that scalar replacement
1489 // is working as expected.
1490 case MDefinition::Opcode::AssertRecoveredOnBailout:
1491 break;
1492
1493 default:
1494 JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
1495 return true;
1496 }
1497 }
1498
1499 JitSpew(JitSpew_Escape, "ArgumentsObject is not escaped");
1500 return false;
1501 }
1502
1503 // Replacing the arguments object is simpler than replacing an object
1504 // or array, because the arguments object does not change state.
run()1505 bool ArgumentsReplacer::run() {
1506 MBasicBlock* startBlock = args_->block();
1507
1508 // Iterate over each basic block.
1509 for (ReversePostorderIterator block = graph_.rpoBegin(startBlock);
1510 block != graph_.rpoEnd(); block++) {
1511 if (mir_->shouldCancel("Scalar replacement of Arguments Object")) {
1512 return false;
1513 }
1514
1515 // Iterates over phis and instructions.
1516 // We do not have to visit resume points. Any resume points that capture
1517 // the argument object will be handled by the Sink pass.
1518 for (MNodeIterator iter(*block); iter;) {
1519 // Increment the iterator before visiting the instruction, as the
1520 // visit function might discard itself from the basic block.
1521 MNode* ins = *iter++;
1522 if (ins->isDefinition()) {
1523 MDefinition* def = ins->toDefinition();
1524 switch (def->op()) {
1525 #define MIR_OP(op) \
1526 case MDefinition::Opcode::op: \
1527 visit##op(def->to##op()); \
1528 break;
1529 MIR_OPCODE_LIST(MIR_OP)
1530 #undef MIR_OP
1531 }
1532 }
1533 }
1534 }
1535
1536 assertSuccess();
1537 return true;
1538 }
1539
assertSuccess()1540 void ArgumentsReplacer::assertSuccess() {
1541 MOZ_ASSERT(args_->canRecoverOnBailout());
1542 MOZ_ASSERT(!args_->hasLiveDefUses());
1543 }
1544
visitGuardToClass(MGuardToClass * ins)1545 void ArgumentsReplacer::visitGuardToClass(MGuardToClass* ins) {
1546 // Skip guards on other objects.
1547 if (ins->object() != args_) {
1548 return;
1549 }
1550 MOZ_ASSERT(ins->isArgumentsObjectClass());
1551
1552 // Replace the guard with the args object.
1553 ins->replaceAllUsesWith(args_);
1554
1555 // Remove the guard.
1556 ins->block()->discard(ins);
1557 }
1558
visitGuardArgumentsObjectFlags(MGuardArgumentsObjectFlags * ins)1559 void ArgumentsReplacer::visitGuardArgumentsObjectFlags(
1560 MGuardArgumentsObjectFlags* ins) {
1561 // Skip other arguments objects.
1562 if (ins->argsObject() != args_) {
1563 return;
1564 }
1565
1566 #ifdef DEBUG
1567 // Each *_OVERRIDDEN_BIT can only be set by setting or deleting a
1568 // property of the args object. We have already determined that the
1569 // args object doesn't escape, so its properties can't be mutated.
1570 //
1571 // FORWARDED_ARGUMENTS_BIT is set if any mapped argument is closed
1572 // over, which is an immutable property of the script. Because we
1573 // are replacing the args object for a known script, we can check
1574 // the flag once, which is done when we first attach the CacheIR,
1575 // and rely on it. (Note that this wouldn't be true if we didn't
1576 // know the origin of args_, because it could be passed in from
1577 // another function.)
1578 uint32_t supportedBits = ArgumentsObject::LENGTH_OVERRIDDEN_BIT |
1579 ArgumentsObject::ITERATOR_OVERRIDDEN_BIT |
1580 ArgumentsObject::ELEMENT_OVERRIDDEN_BIT |
1581 ArgumentsObject::CALLEE_OVERRIDDEN_BIT |
1582 ArgumentsObject::FORWARDED_ARGUMENTS_BIT;
1583
1584 MOZ_ASSERT((ins->flags() & ~supportedBits) == 0);
1585 MOZ_ASSERT_IF(ins->flags() & ArgumentsObject::FORWARDED_ARGUMENTS_BIT,
1586 !args_->block()->info().anyFormalIsForwarded());
1587 #endif
1588
1589 // Replace the guard with the args object.
1590 ins->replaceAllUsesWith(args_);
1591
1592 // Remove the guard.
1593 ins->block()->discard(ins);
1594 }
1595
visitUnbox(MUnbox * ins)1596 void ArgumentsReplacer::visitUnbox(MUnbox* ins) {
1597 // Skip unrelated unboxes.
1598 if (ins->getOperand(0) != args_) {
1599 return;
1600 }
1601 MOZ_ASSERT(ins->type() == MIRType::Object);
1602
1603 // Replace the unbox with the args object.
1604 ins->replaceAllUsesWith(args_);
1605
1606 // Remove the unbox.
1607 ins->block()->discard(ins);
1608 }
1609
visitGetArgumentsObjectArg(MGetArgumentsObjectArg * ins)1610 void ArgumentsReplacer::visitGetArgumentsObjectArg(
1611 MGetArgumentsObjectArg* ins) {
1612 // Skip other arguments objects.
1613 if (ins->argsObject() != args_) {
1614 return;
1615 }
1616
1617 // We don't support setting arguments in ArgumentsReplacer::escapes,
1618 // so we can load the initial value of the argument without worrying
1619 // about it being stale.
1620 MDefinition* getArg;
1621 if (isInlinedArguments()) {
1622 // Inlined frames have direct access to the actual arguments.
1623 auto* actualArgs = args_->toCreateInlinedArgumentsObject();
1624 if (ins->argno() < actualArgs->numActuals()) {
1625 getArg = actualArgs->getArg(ins->argno());
1626 } else {
1627 // Omitted arguments are not mapped to the arguments object, and
1628 // will always be undefined.
1629 auto* undef = MConstant::New(alloc(), UndefinedValue());
1630 ins->block()->insertBefore(ins, undef);
1631 getArg = undef;
1632 }
1633 } else {
1634 // Load the argument from the frame.
1635 auto* index = MConstant::New(alloc(), Int32Value(ins->argno()));
1636 ins->block()->insertBefore(ins, index);
1637
1638 auto* loadArg = MGetFrameArgument::New(alloc(), index);
1639 ins->block()->insertBefore(ins, loadArg);
1640 getArg = loadArg;
1641 }
1642 ins->replaceAllUsesWith(getArg);
1643
1644 // Remove original instruction.
1645 ins->block()->discard(ins);
1646 }
1647
visitLoadArgumentsObjectArg(MLoadArgumentsObjectArg * ins)1648 void ArgumentsReplacer::visitLoadArgumentsObjectArg(
1649 MLoadArgumentsObjectArg* ins) {
1650 // Skip other arguments objects.
1651 if (ins->argsObject() != args_) {
1652 return;
1653 }
1654
1655 MDefinition* index = ins->index();
1656
1657 MInstruction* loadArg;
1658 if (isInlinedArguments()) {
1659 auto* actualArgs = args_->toCreateInlinedArgumentsObject();
1660
1661 // Insert bounds check.
1662 auto* length =
1663 MConstant::New(alloc(), Int32Value(actualArgs->numActuals()));
1664 ins->block()->insertBefore(ins, length);
1665
1666 MInstruction* check = MBoundsCheck::New(alloc(), index, length);
1667 check->setBailoutKind(ins->bailoutKind());
1668 ins->block()->insertBefore(ins, check);
1669
1670 if (mir_->outerInfo().hadBoundsCheckBailout()) {
1671 check->setNotMovable();
1672 }
1673
1674 loadArg = MGetInlinedArgument::New(alloc(), check, actualArgs);
1675 } else {
1676 // Insert bounds check.
1677 auto* length = MArgumentsLength::New(alloc());
1678 ins->block()->insertBefore(ins, length);
1679
1680 MInstruction* check = MBoundsCheck::New(alloc(), index, length);
1681 check->setBailoutKind(ins->bailoutKind());
1682 ins->block()->insertBefore(ins, check);
1683
1684 if (mir_->outerInfo().hadBoundsCheckBailout()) {
1685 check->setNotMovable();
1686 }
1687
1688 if (JitOptions.spectreIndexMasking) {
1689 check = MSpectreMaskIndex::New(alloc(), check, length);
1690 ins->block()->insertBefore(ins, check);
1691 }
1692
1693 loadArg = MGetFrameArgument::New(alloc(), check);
1694 }
1695 ins->block()->insertBefore(ins, loadArg);
1696 ins->replaceAllUsesWith(loadArg);
1697
1698 // Remove original instruction.
1699 ins->block()->discard(ins);
1700 }
1701
visitArgumentsObjectLength(MArgumentsObjectLength * ins)1702 void ArgumentsReplacer::visitArgumentsObjectLength(
1703 MArgumentsObjectLength* ins) {
1704 // Skip other arguments objects.
1705 if (ins->argsObject() != args_) {
1706 return;
1707 }
1708
1709 MInstruction* length;
1710 if (isInlinedArguments()) {
1711 uint32_t argc = args_->toCreateInlinedArgumentsObject()->numActuals();
1712 length = MConstant::New(alloc(), Int32Value(argc));
1713 } else {
1714 length = MArgumentsLength::New(alloc());
1715 }
1716 ins->block()->insertBefore(ins, length);
1717 ins->replaceAllUsesWith(length);
1718
1719 // Remove original instruction.
1720 ins->block()->discard(ins);
1721 }
1722
visitApplyArgsObj(MApplyArgsObj * ins)1723 void ArgumentsReplacer::visitApplyArgsObj(MApplyArgsObj* ins) {
1724 // Skip other arguments objects.
1725 if (ins->getArgsObj() != args_) {
1726 return;
1727 }
1728
1729 MInstruction* newIns;
1730 if (isInlinedArguments()) {
1731 auto* actualArgs = args_->toCreateInlinedArgumentsObject();
1732 CallInfo callInfo(alloc(), /*constructing=*/false,
1733 ins->ignoresReturnValue());
1734
1735 callInfo.initForApplyInlinedArgs(ins->getFunction(), ins->getThis(),
1736 actualArgs->numActuals());
1737 for (uint32_t i = 0; i < actualArgs->numActuals(); i++) {
1738 callInfo.initArg(i, actualArgs->getArg(i));
1739 }
1740
1741 auto addUndefined = [this, &ins]() -> MConstant* {
1742 MConstant* undef = MConstant::New(alloc(), UndefinedValue());
1743 ins->block()->insertBefore(ins, undef);
1744 return undef;
1745 };
1746
1747 bool needsThisCheck = false;
1748 bool isDOMCall = false;
1749 auto* call = MakeCall(alloc(), addUndefined, callInfo, needsThisCheck,
1750 ins->getSingleTarget(), isDOMCall);
1751 if (!ins->maybeCrossRealm()) {
1752 call->setNotCrossRealm();
1753 }
1754 newIns = call;
1755 } else {
1756 auto* numArgs = MArgumentsLength::New(alloc());
1757 ins->block()->insertBefore(ins, numArgs);
1758
1759 // TODO: Should we rename MApplyArgs?
1760 auto* apply = MApplyArgs::New(alloc(), ins->getSingleTarget(),
1761 ins->getFunction(), numArgs, ins->getThis());
1762 apply->setBailoutKind(ins->bailoutKind());
1763 if (!ins->maybeCrossRealm()) {
1764 apply->setNotCrossRealm();
1765 }
1766 if (ins->ignoresReturnValue()) {
1767 apply->setIgnoresReturnValue();
1768 }
1769 newIns = apply;
1770 }
1771
1772 ins->block()->insertBefore(ins, newIns);
1773 ins->replaceAllUsesWith(newIns);
1774
1775 newIns->stealResumePoint(ins);
1776 ins->block()->discard(ins);
1777 }
1778
visitLoadFixedSlot(MLoadFixedSlot * ins)1779 void ArgumentsReplacer::visitLoadFixedSlot(MLoadFixedSlot* ins) {
1780 // Skip other arguments objects.
1781 if (ins->object() != args_) {
1782 return;
1783 }
1784
1785 MOZ_ASSERT(ins->slot() == ArgumentsObject::CALLEE_SLOT);
1786
1787 MDefinition* replacement;
1788 if (isInlinedArguments()) {
1789 replacement = args_->toCreateInlinedArgumentsObject()->getCallee();
1790 } else {
1791 auto* callee = MCallee::New(alloc());
1792 ins->block()->insertBefore(ins, callee);
1793 replacement = callee;
1794 }
1795 ins->replaceAllUsesWith(replacement);
1796
1797 // Remove original instruction.
1798 ins->block()->discard(ins);
1799 }
1800
ScalarReplacement(MIRGenerator * mir,MIRGraph & graph)1801 bool ScalarReplacement(MIRGenerator* mir, MIRGraph& graph) {
1802 JitSpew(JitSpew_Escape, "Begin (ScalarReplacement)");
1803
1804 EmulateStateOf<ObjectMemoryView> replaceObject(mir, graph);
1805 EmulateStateOf<ArrayMemoryView> replaceArray(mir, graph);
1806 bool addedPhi = false;
1807
1808 for (ReversePostorderIterator block = graph.rpoBegin();
1809 block != graph.rpoEnd(); block++) {
1810 if (mir->shouldCancel("Scalar Replacement (main loop)")) {
1811 return false;
1812 }
1813
1814 for (MInstructionIterator ins = block->begin(); ins != block->end();
1815 ins++) {
1816 if (IsOptimizableObjectInstruction(*ins) && !IsObjectEscaped(*ins)) {
1817 ObjectMemoryView view(graph.alloc(), *ins);
1818 if (!replaceObject.run(view)) {
1819 return false;
1820 }
1821 view.assertSuccess();
1822 addedPhi = true;
1823 continue;
1824 }
1825
1826 if (IsOptimizableArrayInstruction(*ins) && !IsArrayEscaped(*ins, *ins)) {
1827 ArrayMemoryView view(graph.alloc(), *ins);
1828 if (!replaceArray.run(view)) {
1829 return false;
1830 }
1831 view.assertSuccess();
1832 addedPhi = true;
1833 continue;
1834 }
1835
1836 if (IsOptimizableArgumentsInstruction(*ins)) {
1837 ArgumentsReplacer replacer(mir, graph, *ins);
1838 if (replacer.escapes(*ins)) {
1839 continue;
1840 }
1841 if (!replacer.run()) {
1842 return false;
1843 }
1844 continue;
1845 }
1846 }
1847 }
1848
1849 if (addedPhi) {
1850 // Phis added by Scalar Replacement are only redundant Phis which are
1851 // not directly captured by any resume point but only by the MDefinition
1852 // state. The conservative observability only focuses on Phis which are
1853 // not used as resume points operands.
1854 AssertExtendedGraphCoherency(graph);
1855 if (!EliminatePhis(mir, graph, ConservativeObservability)) {
1856 return false;
1857 }
1858 }
1859
1860 return true;
1861 }
1862
1863 } /* namespace jit */
1864 } /* namespace js */
1865