1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/MIRGraph.h"
8
9 #include "jit/CompileInfo.h"
10 #include "jit/InlineScriptTree.h"
11 #include "jit/Ion.h"
12 #include "jit/JitSpewer.h"
13 #include "jit/MIR.h"
14 #include "jit/MIRGenerator.h"
15 #include "wasm/WasmTypes.h"
16
17 using namespace js;
18 using namespace js::jit;
19
MIRGenerator(CompileRealm * realm,const JitCompileOptions & options,TempAllocator * alloc,MIRGraph * graph,const CompileInfo * info,const OptimizationInfo * optimizationInfo)20 MIRGenerator::MIRGenerator(CompileRealm* realm,
21 const JitCompileOptions& options,
22 TempAllocator* alloc, MIRGraph* graph,
23 const CompileInfo* info,
24 const OptimizationInfo* optimizationInfo)
25 : realm(realm),
26 runtime(realm ? realm->runtime() : nullptr),
27 outerInfo_(info),
28 optimizationInfo_(optimizationInfo),
29 alloc_(alloc),
30 graph_(graph),
31 offThreadStatus_(Ok()),
32 cancelBuild_(false),
33 wasmMaxStackArgBytes_(0),
34 needsOverrecursedCheck_(false),
35 needsStaticStackAlignment_(false),
36 instrumentedProfiling_(false),
37 instrumentedProfilingIsCached_(false),
38 stringsCanBeInNursery_(realm ? realm->zone()->canNurseryAllocateStrings()
39 : false),
40 bigIntsCanBeInNursery_(realm ? realm->zone()->canNurseryAllocateBigInts()
41 : false),
42 minWasmHeapLength_(0),
43 options(options),
44 gs_(alloc) {}
45
abort(AbortReason r)46 mozilla::GenericErrorResult<AbortReason> MIRGenerator::abort(AbortReason r) {
47 if (JitSpewEnabled(JitSpew_IonAbort)) {
48 switch (r) {
49 case AbortReason::Alloc:
50 JitSpew(JitSpew_IonAbort, "AbortReason::Alloc");
51 break;
52 case AbortReason::Disable:
53 JitSpew(JitSpew_IonAbort, "AbortReason::Disable");
54 break;
55 case AbortReason::Error:
56 JitSpew(JitSpew_IonAbort, "AbortReason::Error");
57 break;
58 case AbortReason::NoAbort:
59 MOZ_CRASH("Abort with AbortReason::NoAbort");
60 break;
61 }
62 }
63 return Err(std::move(r));
64 }
65
abortFmt(AbortReason r,const char * message,va_list ap)66 mozilla::GenericErrorResult<AbortReason> MIRGenerator::abortFmt(
67 AbortReason r, const char* message, va_list ap) {
68 JitSpewVA(JitSpew_IonAbort, message, ap);
69 return Err(std::move(r));
70 }
71
abort(AbortReason r,const char * message,...)72 mozilla::GenericErrorResult<AbortReason> MIRGenerator::abort(
73 AbortReason r, const char* message, ...) {
74 va_list ap;
75 va_start(ap, message);
76 auto forward = abortFmt(r, message, ap);
77 va_end(ap);
78 return forward;
79 }
80
addBlock(MBasicBlock * block)81 void MIRGraph::addBlock(MBasicBlock* block) {
82 MOZ_ASSERT(block);
83 block->setId(blockIdGen_++);
84 blocks_.pushBack(block);
85 numBlocks_++;
86 }
87
insertBlockAfter(MBasicBlock * at,MBasicBlock * block)88 void MIRGraph::insertBlockAfter(MBasicBlock* at, MBasicBlock* block) {
89 block->setId(blockIdGen_++);
90 blocks_.insertAfter(at, block);
91 numBlocks_++;
92 }
93
insertBlockBefore(MBasicBlock * at,MBasicBlock * block)94 void MIRGraph::insertBlockBefore(MBasicBlock* at, MBasicBlock* block) {
95 block->setId(blockIdGen_++);
96 blocks_.insertBefore(at, block);
97 numBlocks_++;
98 }
99
removeBlock(MBasicBlock * block)100 void MIRGraph::removeBlock(MBasicBlock* block) {
101 // Remove a block from the graph. It will also cleanup the block.
102
103 if (block == osrBlock_) {
104 osrBlock_ = nullptr;
105 }
106
107 if (returnAccumulator_) {
108 size_t i = 0;
109 while (i < returnAccumulator_->length()) {
110 if ((*returnAccumulator_)[i] == block) {
111 returnAccumulator_->erase(returnAccumulator_->begin() + i);
112 } else {
113 i++;
114 }
115 }
116 }
117
118 block->clear();
119 block->markAsDead();
120
121 if (block->isInList()) {
122 blocks_.remove(block);
123 numBlocks_--;
124 }
125 }
126
unmarkBlocks()127 void MIRGraph::unmarkBlocks() {
128 for (MBasicBlockIterator i(blocks_.begin()); i != blocks_.end(); i++) {
129 i->unmark();
130 }
131 }
132
New(MIRGraph & graph,size_t stackDepth,const CompileInfo & info,MBasicBlock * maybePred,BytecodeSite * site,Kind kind)133 MBasicBlock* MBasicBlock::New(MIRGraph& graph, size_t stackDepth,
134 const CompileInfo& info, MBasicBlock* maybePred,
135 BytecodeSite* site, Kind kind) {
136 MOZ_ASSERT(site->pc() != nullptr);
137
138 MBasicBlock* block = new (graph.alloc()) MBasicBlock(graph, info, site, kind);
139 if (!block->init()) {
140 return nullptr;
141 }
142
143 if (!block->inherit(graph.alloc(), stackDepth, maybePred, 0)) {
144 return nullptr;
145 }
146
147 return block;
148 }
149
NewPopN(MIRGraph & graph,const CompileInfo & info,MBasicBlock * pred,BytecodeSite * site,Kind kind,uint32_t popped)150 MBasicBlock* MBasicBlock::NewPopN(MIRGraph& graph, const CompileInfo& info,
151 MBasicBlock* pred, BytecodeSite* site,
152 Kind kind, uint32_t popped) {
153 MOZ_ASSERT(site->pc() != nullptr);
154
155 MBasicBlock* block = new (graph.alloc()) MBasicBlock(graph, info, site, kind);
156 if (!block->init()) {
157 return nullptr;
158 }
159
160 if (!block->inherit(graph.alloc(), pred->stackDepth(), pred, popped)) {
161 return nullptr;
162 }
163
164 return block;
165 }
166
NewPendingLoopHeader(MIRGraph & graph,const CompileInfo & info,MBasicBlock * pred,BytecodeSite * site)167 MBasicBlock* MBasicBlock::NewPendingLoopHeader(MIRGraph& graph,
168 const CompileInfo& info,
169 MBasicBlock* pred,
170 BytecodeSite* site) {
171 MOZ_ASSERT(site->pc() != nullptr);
172
173 MBasicBlock* block =
174 new (graph.alloc()) MBasicBlock(graph, info, site, PENDING_LOOP_HEADER);
175 if (!block->init()) {
176 return nullptr;
177 }
178
179 if (!block->inherit(graph.alloc(), pred->stackDepth(), pred, 0)) {
180 return nullptr;
181 }
182
183 return block;
184 }
185
NewSplitEdge(MIRGraph & graph,MBasicBlock * pred,size_t predEdgeIdx,MBasicBlock * succ)186 MBasicBlock* MBasicBlock::NewSplitEdge(MIRGraph& graph, MBasicBlock* pred,
187 size_t predEdgeIdx, MBasicBlock* succ) {
188 MBasicBlock* split = nullptr;
189 if (!succ->pc()) {
190 // The predecessor does not have a PC, this is a Wasm compilation.
191 split = MBasicBlock::New(graph, succ->info(), pred, SPLIT_EDGE);
192 if (!split) {
193 return nullptr;
194 }
195 } else {
196 // The predecessor has a PC, this is a Warp compilation.
197 MResumePoint* succEntry = succ->entryResumePoint();
198
199 BytecodeSite* site =
200 new (graph.alloc()) BytecodeSite(succ->trackedTree(), succEntry->pc());
201 split =
202 new (graph.alloc()) MBasicBlock(graph, succ->info(), site, SPLIT_EDGE);
203
204 if (!split->init()) {
205 return nullptr;
206 }
207
208 // A split edge is used to simplify the graph to avoid having a
209 // predecessor with multiple successors as well as a successor with
210 // multiple predecessors. As instructions can be moved in this
211 // split-edge block, we need to give this block a resume point. To do
212 // so, we copy the entry resume points of the successor and filter the
213 // phis to keep inputs from the current edge.
214
215 // Propagate the caller resume point from the inherited block.
216 split->callerResumePoint_ = succ->callerResumePoint();
217
218 // Split-edge are created after the interpreter stack emulation. Thus,
219 // there is no need for creating slots.
220 split->stackPosition_ = succEntry->stackDepth();
221
222 // Create a resume point using our initial stack position.
223 MResumePoint* splitEntry = new (graph.alloc())
224 MResumePoint(split, succEntry->pc(), MResumePoint::ResumeAt);
225 if (!splitEntry->init(graph.alloc())) {
226 return nullptr;
227 }
228 split->entryResumePoint_ = splitEntry;
229
230 // The target entry resume point might have phi operands, keep the
231 // operands of the phi coming from our edge.
232 size_t succEdgeIdx = succ->indexForPredecessor(pred);
233
234 for (size_t i = 0, e = splitEntry->numOperands(); i < e; i++) {
235 MDefinition* def = succEntry->getOperand(i);
236 // This early in the pipeline, we have no recover instructions in
237 // any entry resume point.
238 MOZ_ASSERT_IF(def->block() == succ, def->isPhi());
239 if (def->block() == succ) {
240 def = def->toPhi()->getOperand(succEdgeIdx);
241 }
242
243 splitEntry->initOperand(i, def);
244 }
245
246 // This is done in the New variant for wasm, so we cannot keep this
247 // line below, where the rest of the graph is modified.
248 if (!split->predecessors_.append(pred)) {
249 return nullptr;
250 }
251 }
252
253 split->setLoopDepth(succ->loopDepth());
254
255 // Insert the split edge block in-between.
256 split->end(MGoto::New(graph.alloc(), succ));
257
258 graph.insertBlockAfter(pred, split);
259
260 pred->replaceSuccessor(predEdgeIdx, split);
261 succ->replacePredecessor(pred, split);
262 return split;
263 }
264
New(MIRGraph & graph,const CompileInfo & info,MBasicBlock * pred,Kind kind)265 MBasicBlock* MBasicBlock::New(MIRGraph& graph, const CompileInfo& info,
266 MBasicBlock* pred, Kind kind) {
267 BytecodeSite* site = new (graph.alloc()) BytecodeSite();
268 MBasicBlock* block = new (graph.alloc()) MBasicBlock(graph, info, site, kind);
269 if (!block->init()) {
270 return nullptr;
271 }
272
273 if (pred) {
274 block->stackPosition_ = pred->stackPosition_;
275
276 if (block->kind_ == PENDING_LOOP_HEADER) {
277 size_t nphis = block->stackPosition_;
278
279 size_t nfree = graph.phiFreeListLength();
280
281 TempAllocator& alloc = graph.alloc();
282 MPhi* phis = nullptr;
283 if (nphis > nfree) {
284 phis = alloc.allocateArray<MPhi>(nphis - nfree);
285 if (!phis) {
286 return nullptr;
287 }
288 }
289
290 // Note: Phis are inserted in the same order as the slots.
291 for (size_t i = 0; i < nphis; i++) {
292 MDefinition* predSlot = pred->getSlot(i);
293
294 MOZ_ASSERT(predSlot->type() != MIRType::Value);
295
296 MPhi* phi;
297 if (i < nfree) {
298 phi = graph.takePhiFromFreeList();
299 } else {
300 phi = phis + (i - nfree);
301 }
302 new (phi) MPhi(alloc, predSlot->type());
303
304 phi->addInlineInput(predSlot);
305
306 // Add append Phis in the block.
307 block->addPhi(phi);
308 block->setSlot(i, phi);
309 }
310 } else {
311 if (!block->ensureHasSlots(0)) {
312 return nullptr;
313 }
314 block->copySlots(pred);
315 }
316
317 if (!block->predecessors_.append(pred)) {
318 return nullptr;
319 }
320 }
321
322 return block;
323 }
324
325 // Create an empty and unreachable block which jumps to |header|. Used
326 // when the normal entry into a loop is removed (but the loop is still
327 // reachable due to OSR) to preserve the invariant that every loop
328 // header has two predecessors, which is needed for building the
329 // dominator tree. The new block is inserted immediately before the
330 // header, which preserves the graph ordering (post-order/RPO). These
331 // blocks will all be removed before lowering.
NewFakeLoopPredecessor(MIRGraph & graph,MBasicBlock * header)332 MBasicBlock* MBasicBlock::NewFakeLoopPredecessor(MIRGraph& graph,
333 MBasicBlock* header) {
334 MOZ_ASSERT(graph.osrBlock());
335
336 MBasicBlock* backedge = header->backedge();
337 MBasicBlock* fake = MBasicBlock::New(graph, header->info(), nullptr,
338 MBasicBlock::FAKE_LOOP_PRED);
339 if (!fake) {
340 return nullptr;
341 }
342
343 graph.insertBlockBefore(header, fake);
344 fake->setUnreachable();
345
346 // Create fake defs to use as inputs for any phis in |header|.
347 for (MPhiIterator iter(header->phisBegin()), end(header->phisEnd());
348 iter != end; ++iter) {
349 MPhi* phi = *iter;
350 auto* fakeDef = MUnreachableResult::New(graph.alloc(), phi->type());
351 fake->add(fakeDef);
352 if (!phi->addInputSlow(fakeDef)) {
353 return nullptr;
354 }
355 }
356
357 fake->end(MGoto::New(graph.alloc(), header));
358
359 if (!header->addPredecessorWithoutPhis(fake)) {
360 return nullptr;
361 }
362
363 // The backedge is always the last predecessor, but we have added a
364 // new pred. Restore |backedge| as |header|'s loop backedge.
365 header->clearLoopHeader();
366 header->setLoopHeader(backedge);
367
368 return fake;
369 }
370
removeFakeLoopPredecessors()371 void MIRGraph::removeFakeLoopPredecessors() {
372 MOZ_ASSERT(osrBlock());
373 size_t id = 0;
374 for (ReversePostorderIterator it = rpoBegin(); it != rpoEnd();) {
375 MBasicBlock* block = *it++;
376 if (block->isFakeLoopPred()) {
377 MOZ_ASSERT(block->unreachable());
378 MBasicBlock* succ = block->getSingleSuccessor();
379 succ->removePredecessor(block);
380 removeBlock(block);
381 } else {
382 block->setId(id++);
383 }
384 }
385 #ifdef DEBUG
386 canBuildDominators_ = false;
387 #endif
388 }
389
MBasicBlock(MIRGraph & graph,const CompileInfo & info,BytecodeSite * site,Kind kind)390 MBasicBlock::MBasicBlock(MIRGraph& graph, const CompileInfo& info,
391 BytecodeSite* site, Kind kind)
392 : graph_(graph),
393 info_(info),
394 predecessors_(graph.alloc()),
395 stackPosition_(info_.firstStackSlot()),
396 id_(0),
397 domIndex_(0),
398 numDominated_(0),
399 lir_(nullptr),
400 callerResumePoint_(nullptr),
401 entryResumePoint_(nullptr),
402 outerResumePoint_(nullptr),
403 successorWithPhis_(nullptr),
404 positionInPhiSuccessor_(0),
405 loopDepth_(0),
406 kind_(kind),
407 mark_(false),
408 immediatelyDominated_(graph.alloc()),
409 immediateDominator_(nullptr),
410 trackedSite_(site)
411 #if defined(JS_ION_PERF) || defined(DEBUG)
412 ,
413 lineno_(0u),
414 columnIndex_(0u)
415 #endif
416 {
417 MOZ_ASSERT(trackedSite_, "trackedSite_ is non-nullptr");
418 }
419
init()420 bool MBasicBlock::init() { return slots_.init(graph_.alloc(), info_.nslots()); }
421
increaseSlots(size_t num)422 bool MBasicBlock::increaseSlots(size_t num) {
423 return slots_.growBy(graph_.alloc(), num);
424 }
425
ensureHasSlots(size_t num)426 bool MBasicBlock::ensureHasSlots(size_t num) {
427 size_t depth = stackDepth() + num;
428 if (depth > nslots()) {
429 if (!increaseSlots(depth - nslots())) {
430 return false;
431 }
432 }
433 return true;
434 }
435
copySlots(MBasicBlock * from)436 void MBasicBlock::copySlots(MBasicBlock* from) {
437 MOZ_ASSERT(stackPosition_ <= from->stackPosition_);
438 MOZ_ASSERT(stackPosition_ <= nslots());
439
440 MDefinition** thisSlots = slots_.begin();
441 MDefinition** fromSlots = from->slots_.begin();
442 for (size_t i = 0, e = stackPosition_; i < e; ++i) {
443 thisSlots[i] = fromSlots[i];
444 }
445 }
446
inherit(TempAllocator & alloc,size_t stackDepth,MBasicBlock * maybePred,uint32_t popped)447 bool MBasicBlock::inherit(TempAllocator& alloc, size_t stackDepth,
448 MBasicBlock* maybePred, uint32_t popped) {
449 MOZ_ASSERT_IF(maybePred, maybePred->stackDepth() == stackDepth);
450
451 MOZ_ASSERT(stackDepth >= popped);
452 stackDepth -= popped;
453 stackPosition_ = stackDepth;
454
455 if (maybePred && kind_ != PENDING_LOOP_HEADER) {
456 copySlots(maybePred);
457 }
458
459 MOZ_ASSERT(info_.nslots() >= stackPosition_);
460 MOZ_ASSERT(!entryResumePoint_);
461
462 // Propagate the caller resume point from the inherited block.
463 callerResumePoint_ = maybePred ? maybePred->callerResumePoint() : nullptr;
464
465 // Create a resume point using our initial stack state.
466 entryResumePoint_ =
467 new (alloc) MResumePoint(this, pc(), MResumePoint::ResumeAt);
468 if (!entryResumePoint_->init(alloc)) {
469 return false;
470 }
471
472 if (maybePred) {
473 if (!predecessors_.append(maybePred)) {
474 return false;
475 }
476
477 if (kind_ == PENDING_LOOP_HEADER) {
478 for (size_t i = 0; i < stackDepth; i++) {
479 MPhi* phi = MPhi::New(alloc.fallible());
480 if (!phi) {
481 return false;
482 }
483 phi->addInlineInput(maybePred->getSlot(i));
484 addPhi(phi);
485 setSlot(i, phi);
486 entryResumePoint()->initOperand(i, phi);
487 }
488 } else {
489 for (size_t i = 0; i < stackDepth; i++) {
490 entryResumePoint()->initOperand(i, getSlot(i));
491 }
492 }
493 } else {
494 /*
495 * Don't leave the operands uninitialized for the caller, as it may not
496 * initialize them later on.
497 */
498 for (size_t i = 0; i < stackDepth; i++) {
499 entryResumePoint()->clearOperand(i);
500 }
501 }
502
503 return true;
504 }
505
inheritSlots(MBasicBlock * parent)506 void MBasicBlock::inheritSlots(MBasicBlock* parent) {
507 stackPosition_ = parent->stackPosition_;
508 copySlots(parent);
509 }
510
initEntrySlots(TempAllocator & alloc)511 bool MBasicBlock::initEntrySlots(TempAllocator& alloc) {
512 // Remove the previous resume point.
513 discardResumePoint(entryResumePoint_);
514
515 // Create a resume point using our initial stack state.
516 entryResumePoint_ =
517 MResumePoint::New(alloc, this, pc(), MResumePoint::ResumeAt);
518 if (!entryResumePoint_) {
519 return false;
520 }
521 return true;
522 }
523
environmentChain()524 MDefinition* MBasicBlock::environmentChain() {
525 return getSlot(info().environmentChainSlot());
526 }
527
argumentsObject()528 MDefinition* MBasicBlock::argumentsObject() {
529 return getSlot(info().argsObjSlot());
530 }
531
setEnvironmentChain(MDefinition * scopeObj)532 void MBasicBlock::setEnvironmentChain(MDefinition* scopeObj) {
533 setSlot(info().environmentChainSlot(), scopeObj);
534 }
535
setArgumentsObject(MDefinition * argsObj)536 void MBasicBlock::setArgumentsObject(MDefinition* argsObj) {
537 setSlot(info().argsObjSlot(), argsObj);
538 }
539
pick(int32_t depth)540 void MBasicBlock::pick(int32_t depth) {
541 // pick takes a value and moves it to the top.
542 // pick(-2):
543 // A B C D E
544 // A B D C E [ swapAt(-2) ]
545 // A B D E C [ swapAt(-1) ]
546 for (; depth < 0; depth++) {
547 swapAt(depth);
548 }
549 }
550
unpick(int32_t depth)551 void MBasicBlock::unpick(int32_t depth) {
552 // unpick takes the value on top of the stack and moves it under the depth-th
553 // element;
554 // unpick(-2):
555 // A B C D E
556 // A B C E D [ swapAt(-1) ]
557 // A B E C D [ swapAt(-2) ]
558 for (int32_t n = -1; n >= depth; n--) {
559 swapAt(n);
560 }
561 }
562
swapAt(int32_t depth)563 void MBasicBlock::swapAt(int32_t depth) {
564 uint32_t lhsDepth = stackPosition_ + depth - 1;
565 uint32_t rhsDepth = stackPosition_ + depth;
566
567 MDefinition* temp = slots_[lhsDepth];
568 slots_[lhsDepth] = slots_[rhsDepth];
569 slots_[rhsDepth] = temp;
570 }
571
discardLastIns()572 void MBasicBlock::discardLastIns() { discard(lastIns()); }
573
optimizedOutConstant(TempAllocator & alloc)574 MConstant* MBasicBlock::optimizedOutConstant(TempAllocator& alloc) {
575 // If the first instruction is a MConstant(MagicValue(JS_OPTIMIZED_OUT))
576 // then reuse it.
577 MInstruction* ins = *begin();
578 if (ins->type() == MIRType::MagicOptimizedOut) {
579 return ins->toConstant();
580 }
581
582 MConstant* constant = MConstant::New(alloc, MagicValue(JS_OPTIMIZED_OUT));
583 insertBefore(ins, constant);
584 return constant;
585 }
586
moveBefore(MInstruction * at,MInstruction * ins)587 void MBasicBlock::moveBefore(MInstruction* at, MInstruction* ins) {
588 // Remove |ins| from the current block.
589 MOZ_ASSERT(ins->block() == this);
590 instructions_.remove(ins);
591
592 // Insert into new block, which may be distinct.
593 // Uses and operands are untouched.
594 ins->setInstructionBlock(at->block(), at->trackedSite());
595 at->block()->instructions_.insertBefore(at, ins);
596 }
597
safeInsertTop(MDefinition * ins,IgnoreTop ignore)598 MInstruction* MBasicBlock::safeInsertTop(MDefinition* ins, IgnoreTop ignore) {
599 MOZ_ASSERT(graph().osrBlock() != this,
600 "We are not supposed to add any instruction in OSR blocks.");
601
602 // Beta nodes and interrupt checks are required to be located at the
603 // beginnings of basic blocks, so we must insert new instructions after any
604 // such instructions.
605 MInstructionIterator insertIter =
606 !ins || ins->isPhi() ? begin() : begin(ins->toInstruction());
607 while (insertIter->isBeta() || insertIter->isInterruptCheck() ||
608 insertIter->isConstant() || insertIter->isParameter() ||
609 (!(ignore & IgnoreRecover) && insertIter->isRecoveredOnBailout())) {
610 insertIter++;
611 }
612
613 return *insertIter;
614 }
615
discardResumePoint(MResumePoint * rp,ReferencesType refType)616 void MBasicBlock::discardResumePoint(
617 MResumePoint* rp, ReferencesType refType /* = RefType_Default */) {
618 if (refType & RefType_DiscardOperands) {
619 rp->releaseUses();
620 }
621 #ifdef DEBUG
622 MResumePointIterator iter = resumePointsBegin();
623 while (*iter != rp) {
624 // We should reach it before reaching the end.
625 MOZ_ASSERT(iter != resumePointsEnd());
626 iter++;
627 }
628 resumePoints_.removeAt(iter);
629 #endif
630 }
631
prepareForDiscard(MInstruction * ins,ReferencesType refType)632 void MBasicBlock::prepareForDiscard(
633 MInstruction* ins, ReferencesType refType /* = RefType_Default */) {
634 // Only remove instructions from the same basic block. This is needed for
635 // correctly removing the resume point if any.
636 MOZ_ASSERT(ins->block() == this);
637
638 MResumePoint* rp = ins->resumePoint();
639 if ((refType & RefType_DiscardResumePoint) && rp) {
640 discardResumePoint(rp, refType);
641 }
642
643 // We need to assert that instructions have no uses after removing the their
644 // resume points operands as they could be captured by their own resume
645 // point.
646 MOZ_ASSERT_IF(refType & RefType_AssertNoUses, !ins->hasUses());
647
648 const uint32_t InstructionOperands =
649 RefType_DiscardOperands | RefType_DiscardInstruction;
650 if ((refType & InstructionOperands) == InstructionOperands) {
651 for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
652 ins->releaseOperand(i);
653 }
654 }
655
656 ins->setDiscarded();
657 }
658
discard(MInstruction * ins)659 void MBasicBlock::discard(MInstruction* ins) {
660 prepareForDiscard(ins);
661 instructions_.remove(ins);
662 }
663
discardIgnoreOperands(MInstruction * ins)664 void MBasicBlock::discardIgnoreOperands(MInstruction* ins) {
665 #ifdef DEBUG
666 for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
667 MOZ_ASSERT(!ins->hasOperand(i));
668 }
669 #endif
670
671 prepareForDiscard(ins, RefType_IgnoreOperands);
672 instructions_.remove(ins);
673 }
674
discardDef(MDefinition * at)675 void MBasicBlock::discardDef(MDefinition* at) {
676 if (at->isPhi()) {
677 at->block()->discardPhi(at->toPhi());
678 } else {
679 at->block()->discard(at->toInstruction());
680 }
681 }
682
discardAllInstructions()683 void MBasicBlock::discardAllInstructions() {
684 MInstructionIterator iter = begin();
685 discardAllInstructionsStartingAt(iter);
686 }
687
discardAllInstructionsStartingAt(MInstructionIterator iter)688 void MBasicBlock::discardAllInstructionsStartingAt(MInstructionIterator iter) {
689 while (iter != end()) {
690 // Discard operands and resume point operands and flag the instruction
691 // as discarded. Also we do not assert that we have no uses as blocks
692 // might be removed in reverse post order.
693 MInstruction* ins = *iter++;
694 prepareForDiscard(ins, RefType_DefaultNoAssert);
695 instructions_.remove(ins);
696 }
697 }
698
discardAllPhis()699 void MBasicBlock::discardAllPhis() {
700 for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++) {
701 iter->removeAllOperands();
702 }
703
704 for (MBasicBlock** pred = predecessors_.begin(); pred != predecessors_.end();
705 pred++) {
706 (*pred)->clearSuccessorWithPhis();
707 }
708
709 phis_.clear();
710 }
711
discardAllResumePoints(bool discardEntry)712 void MBasicBlock::discardAllResumePoints(bool discardEntry) {
713 if (outerResumePoint_) {
714 clearOuterResumePoint();
715 }
716
717 if (discardEntry && entryResumePoint_) {
718 clearEntryResumePoint();
719 }
720
721 #ifdef DEBUG
722 if (!entryResumePoint()) {
723 MOZ_ASSERT(resumePointsEmpty());
724 } else {
725 MResumePointIterator iter(resumePointsBegin());
726 MOZ_ASSERT(iter != resumePointsEnd());
727 iter++;
728 MOZ_ASSERT(iter == resumePointsEnd());
729 }
730 #endif
731 }
732
clear()733 void MBasicBlock::clear() {
734 discardAllInstructions();
735 discardAllResumePoints();
736 discardAllPhis();
737 }
738
insertBefore(MInstruction * at,MInstruction * ins)739 void MBasicBlock::insertBefore(MInstruction* at, MInstruction* ins) {
740 MOZ_ASSERT(at->block() == this);
741 ins->setInstructionBlock(this, at->trackedSite());
742 graph().allocDefinitionId(ins);
743 instructions_.insertBefore(at, ins);
744 }
745
insertAfter(MInstruction * at,MInstruction * ins)746 void MBasicBlock::insertAfter(MInstruction* at, MInstruction* ins) {
747 MOZ_ASSERT(at->block() == this);
748 ins->setInstructionBlock(this, at->trackedSite());
749 graph().allocDefinitionId(ins);
750 instructions_.insertAfter(at, ins);
751 }
752
insertAtEnd(MInstruction * ins)753 void MBasicBlock::insertAtEnd(MInstruction* ins) {
754 if (hasLastIns()) {
755 insertBefore(lastIns(), ins);
756 } else {
757 add(ins);
758 }
759 }
760
addPhi(MPhi * phi)761 void MBasicBlock::addPhi(MPhi* phi) {
762 phis_.pushBack(phi);
763 phi->setPhiBlock(this);
764 graph().allocDefinitionId(phi);
765 }
766
discardPhi(MPhi * phi)767 void MBasicBlock::discardPhi(MPhi* phi) {
768 MOZ_ASSERT(!phis_.empty());
769
770 phi->removeAllOperands();
771 phi->setDiscarded();
772
773 phis_.remove(phi);
774
775 if (phis_.empty()) {
776 for (MBasicBlock* pred : predecessors_) {
777 pred->clearSuccessorWithPhis();
778 }
779 }
780 }
781
flagOperandsOfPrunedBranches(MInstruction * ins)782 void MBasicBlock::flagOperandsOfPrunedBranches(MInstruction* ins) {
783 // Find the previous resume point which would be used for bailing out.
784 MResumePoint* rp = nullptr;
785 for (MInstructionReverseIterator iter = rbegin(ins); iter != rend(); iter++) {
786 rp = iter->resumePoint();
787 if (rp) {
788 break;
789 }
790 }
791
792 // If none, take the entry resume point.
793 if (!rp) {
794 rp = entryResumePoint();
795 }
796
797 // The only blocks which do not have any entryResumePoint in Ion, are the
798 // SplitEdge blocks. SplitEdge blocks only have a Goto instruction before
799 // Range Analysis phase. In adjustInputs, we are manipulating instructions
800 // which have a TypePolicy. So, as a Goto has no operand and no type
801 // policy, the entry resume point should exist.
802 MOZ_ASSERT(rp);
803
804 // Flag all operands as being potentially used.
805 while (rp) {
806 for (size_t i = 0, end = rp->numOperands(); i < end; i++) {
807 rp->getOperand(i)->setImplicitlyUsedUnchecked();
808 }
809 rp = rp->caller();
810 }
811 }
812
addPredecessor(TempAllocator & alloc,MBasicBlock * pred)813 bool MBasicBlock::addPredecessor(TempAllocator& alloc, MBasicBlock* pred) {
814 return addPredecessorPopN(alloc, pred, 0);
815 }
816
addPredecessorPopN(TempAllocator & alloc,MBasicBlock * pred,uint32_t popped)817 bool MBasicBlock::addPredecessorPopN(TempAllocator& alloc, MBasicBlock* pred,
818 uint32_t popped) {
819 MOZ_ASSERT(pred);
820 MOZ_ASSERT(predecessors_.length() > 0);
821
822 // Predecessors must be finished, and at the correct stack depth.
823 MOZ_ASSERT(pred->hasLastIns());
824 MOZ_ASSERT(pred->stackPosition_ == stackPosition_ + popped);
825
826 for (uint32_t i = 0, e = stackPosition_; i < e; ++i) {
827 MDefinition* mine = getSlot(i);
828 MDefinition* other = pred->getSlot(i);
829
830 if (mine != other) {
831 MIRType phiType = mine->type();
832 if (phiType != other->type()) {
833 phiType = MIRType::Value;
834 }
835
836 // If the current instruction is a phi, and it was created in this
837 // basic block, then we have already placed this phi and should
838 // instead append to its operands.
839 if (mine->isPhi() && mine->block() == this) {
840 MOZ_ASSERT(predecessors_.length());
841 MOZ_ASSERT(!mine->hasDefUses(),
842 "should only change type of newly created phis");
843 mine->setResultType(phiType);
844 if (!mine->toPhi()->addInputSlow(other)) {
845 return false;
846 }
847 } else {
848 // Otherwise, create a new phi node.
849 MPhi* phi = MPhi::New(alloc.fallible(), phiType);
850 if (!phi) {
851 return false;
852 }
853 addPhi(phi);
854
855 // Prime the phi for each predecessor, so input(x) comes from
856 // predecessor(x).
857 if (!phi->reserveLength(predecessors_.length() + 1)) {
858 return false;
859 }
860
861 for (size_t j = 0, numPreds = predecessors_.length(); j < numPreds;
862 ++j) {
863 MOZ_ASSERT(predecessors_[j]->getSlot(i) == mine);
864 phi->addInput(mine);
865 }
866 phi->addInput(other);
867
868 setSlot(i, phi);
869 if (entryResumePoint()) {
870 entryResumePoint()->replaceOperand(i, phi);
871 }
872 }
873 }
874 }
875
876 return predecessors_.append(pred);
877 }
878
addPredecessorSameInputsAs(MBasicBlock * pred,MBasicBlock * existingPred)879 bool MBasicBlock::addPredecessorSameInputsAs(MBasicBlock* pred,
880 MBasicBlock* existingPred) {
881 MOZ_ASSERT(pred);
882 MOZ_ASSERT(predecessors_.length() > 0);
883
884 // Predecessors must be finished, and at the correct stack depth.
885 MOZ_ASSERT(pred->hasLastIns());
886 MOZ_ASSERT(!pred->successorWithPhis());
887
888 if (!phisEmpty()) {
889 size_t existingPosition = indexForPredecessor(existingPred);
890 for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++) {
891 if (!iter->addInputSlow(iter->getOperand(existingPosition))) {
892 return false;
893 }
894 }
895 }
896
897 if (!predecessors_.append(pred)) {
898 return false;
899 }
900 return true;
901 }
902
addPredecessorWithoutPhis(MBasicBlock * pred)903 bool MBasicBlock::addPredecessorWithoutPhis(MBasicBlock* pred) {
904 // Predecessors must be finished.
905 MOZ_ASSERT(pred && pred->hasLastIns());
906 return predecessors_.append(pred);
907 }
908
addImmediatelyDominatedBlock(MBasicBlock * child)909 bool MBasicBlock::addImmediatelyDominatedBlock(MBasicBlock* child) {
910 return immediatelyDominated_.append(child);
911 }
912
removeImmediatelyDominatedBlock(MBasicBlock * child)913 void MBasicBlock::removeImmediatelyDominatedBlock(MBasicBlock* child) {
914 for (size_t i = 0;; ++i) {
915 MOZ_ASSERT(i < immediatelyDominated_.length(),
916 "Dominated block to remove not present");
917 if (immediatelyDominated_[i] == child) {
918 immediatelyDominated_[i] = immediatelyDominated_.back();
919 immediatelyDominated_.popBack();
920 return;
921 }
922 }
923 }
924
setBackedge(MBasicBlock * pred)925 bool MBasicBlock::setBackedge(MBasicBlock* pred) {
926 // Predecessors must be finished, and at the correct stack depth.
927 MOZ_ASSERT(hasLastIns());
928 MOZ_ASSERT(pred->hasLastIns());
929 MOZ_ASSERT(pred->stackDepth() == entryResumePoint()->stackDepth());
930
931 // We must be a pending loop header
932 MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
933
934 // Add exit definitions to each corresponding phi at the entry.
935 if (!inheritPhisFromBackedge(pred)) {
936 return false;
937 }
938
939 // We are now a loop header proper
940 kind_ = LOOP_HEADER;
941
942 return predecessors_.append(pred);
943 }
944
setBackedgeWasm(MBasicBlock * pred,size_t paramCount)945 bool MBasicBlock::setBackedgeWasm(MBasicBlock* pred, size_t paramCount) {
946 // Predecessors must be finished, and at the correct stack depth.
947 MOZ_ASSERT(hasLastIns());
948 MOZ_ASSERT(pred->hasLastIns());
949 MOZ_ASSERT(stackDepth() + paramCount == pred->stackDepth());
950
951 // We must be a pending loop header
952 MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
953
954 // Add exit definitions to each corresponding phi at the entry.
955 // Note: Phis are inserted in the same order as the slots. (see
956 // MBasicBlock::New)
957 size_t slot = 0;
958 for (MPhiIterator phi = phisBegin(); phi != phisEnd(); phi++, slot++) {
959 MPhi* entryDef = *phi;
960 MDefinition* exitDef = pred->getSlot(slot);
961
962 // Assert that we already placed phis for each slot.
963 MOZ_ASSERT(entryDef->block() == this);
964
965 // Assert that the phi already has the correct type.
966 MOZ_ASSERT(entryDef->type() == exitDef->type());
967 MOZ_ASSERT(entryDef->type() != MIRType::Value);
968
969 if (entryDef == exitDef) {
970 // If the exit def is the same as the entry def, make a redundant
971 // phi. Since loop headers have exactly two incoming edges, we
972 // know that that's just the first input.
973 //
974 // Note that we eliminate later rather than now, to avoid any
975 // weirdness around pending continue edges which might still hold
976 // onto phis.
977 exitDef = entryDef->getOperand(0);
978 }
979
980 // Phis always have room for 2 operands, so this can't fail.
981 MOZ_ASSERT(phi->numOperands() == 1);
982 entryDef->addInlineInput(exitDef);
983
984 // Two cases here: phis that correspond to locals, and phis that correspond
985 // to loop parameters. Only phis for locals go in slots.
986 if (slot < stackDepth()) {
987 setSlot(slot, entryDef);
988 }
989 }
990
991 // We are now a loop header proper
992 kind_ = LOOP_HEADER;
993
994 return predecessors_.append(pred);
995 }
996
clearLoopHeader()997 void MBasicBlock::clearLoopHeader() {
998 MOZ_ASSERT(isLoopHeader());
999 kind_ = NORMAL;
1000 }
1001
setLoopHeader(MBasicBlock * newBackedge)1002 void MBasicBlock::setLoopHeader(MBasicBlock* newBackedge) {
1003 MOZ_ASSERT(!isLoopHeader());
1004 kind_ = LOOP_HEADER;
1005
1006 size_t numPreds = numPredecessors();
1007 MOZ_ASSERT(numPreds != 0);
1008
1009 size_t lastIndex = numPreds - 1;
1010 size_t oldIndex = 0;
1011 for (;; ++oldIndex) {
1012 MOZ_ASSERT(oldIndex < numPreds);
1013 MBasicBlock* pred = getPredecessor(oldIndex);
1014 if (pred == newBackedge) {
1015 break;
1016 }
1017 }
1018
1019 // Set the loop backedge to be the last element in predecessors_.
1020 std::swap(predecessors_[oldIndex], predecessors_[lastIndex]);
1021
1022 // If we have phis, reorder their operands accordingly.
1023 if (!phisEmpty()) {
1024 getPredecessor(oldIndex)->setSuccessorWithPhis(this, oldIndex);
1025 getPredecessor(lastIndex)->setSuccessorWithPhis(this, lastIndex);
1026 for (MPhiIterator iter(phisBegin()), end(phisEnd()); iter != end; ++iter) {
1027 MPhi* phi = *iter;
1028 MDefinition* last = phi->getOperand(oldIndex);
1029 MDefinition* old = phi->getOperand(lastIndex);
1030 phi->replaceOperand(oldIndex, old);
1031 phi->replaceOperand(lastIndex, last);
1032 }
1033 }
1034
1035 MOZ_ASSERT(newBackedge->loopHeaderOfBackedge() == this);
1036 MOZ_ASSERT(backedge() == newBackedge);
1037 }
1038
getSuccessorIndex(MBasicBlock * block) const1039 size_t MBasicBlock::getSuccessorIndex(MBasicBlock* block) const {
1040 MOZ_ASSERT(lastIns());
1041 for (size_t i = 0; i < numSuccessors(); i++) {
1042 if (getSuccessor(i) == block) {
1043 return i;
1044 }
1045 }
1046 MOZ_CRASH("Invalid successor");
1047 }
1048
getPredecessorIndex(MBasicBlock * block) const1049 size_t MBasicBlock::getPredecessorIndex(MBasicBlock* block) const {
1050 for (size_t i = 0, e = numPredecessors(); i < e; ++i) {
1051 if (getPredecessor(i) == block) {
1052 return i;
1053 }
1054 }
1055 MOZ_CRASH("Invalid predecessor");
1056 }
1057
replaceSuccessor(size_t pos,MBasicBlock * split)1058 void MBasicBlock::replaceSuccessor(size_t pos, MBasicBlock* split) {
1059 MOZ_ASSERT(lastIns());
1060
1061 // Note, during split-critical-edges, successors-with-phis is not yet set.
1062 // During PAA, this case is handled before we enter.
1063 MOZ_ASSERT_IF(successorWithPhis_, successorWithPhis_ != getSuccessor(pos));
1064
1065 lastIns()->replaceSuccessor(pos, split);
1066 }
1067
replacePredecessor(MBasicBlock * old,MBasicBlock * split)1068 void MBasicBlock::replacePredecessor(MBasicBlock* old, MBasicBlock* split) {
1069 for (size_t i = 0; i < numPredecessors(); i++) {
1070 if (getPredecessor(i) == old) {
1071 predecessors_[i] = split;
1072
1073 #ifdef DEBUG
1074 // The same block should not appear twice in the predecessor list.
1075 for (size_t j = i; j < numPredecessors(); j++) {
1076 MOZ_ASSERT(predecessors_[j] != old);
1077 }
1078 #endif
1079
1080 return;
1081 }
1082 }
1083
1084 MOZ_CRASH("predecessor was not found");
1085 }
1086
clearDominatorInfo()1087 void MBasicBlock::clearDominatorInfo() {
1088 setImmediateDominator(nullptr);
1089 immediatelyDominated_.clear();
1090 numDominated_ = 0;
1091 }
1092
removePredecessorWithoutPhiOperands(MBasicBlock * pred,size_t predIndex)1093 void MBasicBlock::removePredecessorWithoutPhiOperands(MBasicBlock* pred,
1094 size_t predIndex) {
1095 // If we're removing the last backedge, this is no longer a loop.
1096 if (isLoopHeader() && hasUniqueBackedge() && backedge() == pred) {
1097 clearLoopHeader();
1098 }
1099
1100 // Adjust phis. Note that this can leave redundant phis behind.
1101 // Don't adjust successorWithPhis() if we haven't constructed this
1102 // information yet.
1103 if (pred->successorWithPhis()) {
1104 MOZ_ASSERT(pred->positionInPhiSuccessor() == predIndex);
1105 pred->clearSuccessorWithPhis();
1106 for (size_t j = predIndex + 1; j < numPredecessors(); j++) {
1107 getPredecessor(j)->setSuccessorWithPhis(this, j - 1);
1108 }
1109 }
1110
1111 // Remove from pred list.
1112 predecessors_.erase(predecessors_.begin() + predIndex);
1113 }
1114
removePredecessor(MBasicBlock * pred)1115 void MBasicBlock::removePredecessor(MBasicBlock* pred) {
1116 size_t predIndex = getPredecessorIndex(pred);
1117
1118 // Remove the phi operands.
1119 for (MPhiIterator iter(phisBegin()), end(phisEnd()); iter != end; ++iter) {
1120 iter->removeOperand(predIndex);
1121 }
1122
1123 // Now we can call the underlying function, which expects that phi
1124 // operands have been removed.
1125 removePredecessorWithoutPhiOperands(pred, predIndex);
1126 }
1127
inheritPhisFromBackedge(MBasicBlock * backedge)1128 bool MBasicBlock::inheritPhisFromBackedge(MBasicBlock* backedge) {
1129 // We must be a pending loop header
1130 MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
1131
1132 size_t stackDepth = entryResumePoint()->stackDepth();
1133 for (size_t slot = 0; slot < stackDepth; slot++) {
1134 // Get the value stack-slot of the back edge.
1135 MDefinition* exitDef = backedge->getSlot(slot);
1136
1137 // Get the value of the loop header.
1138 MDefinition* loopDef = entryResumePoint()->getOperand(slot);
1139 if (loopDef->block() != this) {
1140 // If we are finishing a pending loop header, then we need to ensure
1141 // that all operands are phis. This is usualy the case, except for
1142 // object/arrays build with generators, in which case we share the
1143 // same allocations across all blocks.
1144 MOZ_ASSERT(loopDef->block()->id() < id());
1145 MOZ_ASSERT(loopDef == exitDef);
1146 continue;
1147 }
1148
1149 // Phis are allocated by NewPendingLoopHeader.
1150 MPhi* entryDef = loopDef->toPhi();
1151 MOZ_ASSERT(entryDef->block() == this);
1152
1153 if (entryDef == exitDef) {
1154 // If the exit def is the same as the entry def, make a redundant
1155 // phi. Since loop headers have exactly two incoming edges, we
1156 // know that that's just the first input.
1157 //
1158 // Note that we eliminate later rather than now, to avoid any
1159 // weirdness around pending continue edges which might still hold
1160 // onto phis.
1161 exitDef = entryDef->getOperand(0);
1162 }
1163
1164 if (!entryDef->addInputSlow(exitDef)) {
1165 return false;
1166 }
1167 }
1168
1169 return true;
1170 }
1171
immediateDominatorBranch(BranchDirection * pdirection)1172 MTest* MBasicBlock::immediateDominatorBranch(BranchDirection* pdirection) {
1173 *pdirection = FALSE_BRANCH;
1174
1175 if (numPredecessors() != 1) {
1176 return nullptr;
1177 }
1178
1179 MBasicBlock* dom = immediateDominator();
1180 if (dom != getPredecessor(0)) {
1181 return nullptr;
1182 }
1183
1184 // Look for a trailing MTest branching to this block.
1185 MInstruction* ins = dom->lastIns();
1186 if (ins->isTest()) {
1187 MTest* test = ins->toTest();
1188
1189 MOZ_ASSERT(test->ifTrue() == this || test->ifFalse() == this);
1190 if (test->ifTrue() == this && test->ifFalse() == this) {
1191 return nullptr;
1192 }
1193
1194 *pdirection = (test->ifTrue() == this) ? TRUE_BRANCH : FALSE_BRANCH;
1195 return test;
1196 }
1197
1198 return nullptr;
1199 }
1200
dumpStack(GenericPrinter & out)1201 void MBasicBlock::dumpStack(GenericPrinter& out) {
1202 #ifdef DEBUG
1203 out.printf(" %-3s %-16s %-6s %-10s\n", "#", "name", "copyOf", "first/next");
1204 out.printf("-------------------------------------------\n");
1205 for (uint32_t i = 0; i < stackPosition_; i++) {
1206 out.printf(" %-3u", i);
1207 out.printf(" %-16p\n", (void*)slots_[i]);
1208 }
1209 #endif
1210 }
1211
dumpStack()1212 void MBasicBlock::dumpStack() {
1213 Fprinter out(stderr);
1214 dumpStack(out);
1215 out.finish();
1216 }
1217
dump(GenericPrinter & out)1218 void MIRGraph::dump(GenericPrinter& out) {
1219 #ifdef JS_JITSPEW
1220 for (MBasicBlockIterator iter(begin()); iter != end(); iter++) {
1221 iter->dump(out);
1222 out.printf("\n");
1223 }
1224 #endif
1225 }
1226
dump()1227 void MIRGraph::dump() {
1228 Fprinter out(stderr);
1229 dump(out);
1230 out.finish();
1231 }
1232
dump(GenericPrinter & out)1233 void MBasicBlock::dump(GenericPrinter& out) {
1234 #ifdef JS_JITSPEW
1235 out.printf("block%u:%s%s%s\n", id(), isLoopHeader() ? " (loop header)" : "",
1236 unreachable() ? " (unreachable)" : "",
1237 isMarked() ? " (marked)" : "");
1238 if (MResumePoint* resume = entryResumePoint()) {
1239 resume->dump(out);
1240 }
1241 for (MPhiIterator iter(phisBegin()); iter != phisEnd(); iter++) {
1242 iter->dump(out);
1243 }
1244 for (MInstructionIterator iter(begin()); iter != end(); iter++) {
1245 iter->dump(out);
1246 }
1247 #endif
1248 }
1249
dump()1250 void MBasicBlock::dump() {
1251 Fprinter out(stderr);
1252 dump(out);
1253 out.finish();
1254 }
1255