1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/ValueNumbering.h"
8
9 #include "jit/AliasAnalysis.h"
10 #include "jit/IonAnalysis.h"
11 #include "jit/JitSpewer.h"
12 #include "jit/MIRGenerator.h"
13
14 using namespace js;
15 using namespace js::jit;
16
17 /*
18 * Some notes on the main algorithm here:
19 * - The SSA identifier id() is the value number. We do replaceAllUsesWith as
20 * we go, so there's always at most one visible value with a given number.
21 *
22 * - Consequently, the GVN algorithm is effectively pessimistic. This means it
23 * is not as powerful as an optimistic GVN would be, but it is simpler and
24 * faster.
25 *
26 * - We iterate in RPO, so that when visiting a block, we've already optimized
27 * and hashed all values in dominating blocks. With occasional exceptions,
28 * this allows us to do everything in a single pass.
29 *
30 * - When we do use multiple passes, we just re-run the algorithm on the whole
31 * graph instead of doing sparse propagation. This is a tradeoff to keep the
32 * algorithm simpler and lighter on inputs that don't have a lot of
33 * interesting unreachable blocks or degenerate loop induction variables, at
34 * the expense of being slower on inputs that do. The loop for this always
35 * terminates, because it only iterates when code is or will be removed, so
36 * eventually it must stop iterating.
37 *
38 * - Values are not immediately removed from the hash set when they go out of
39 * scope. Instead, we check for dominance after a lookup. If the dominance
40 * check fails, the value is removed.
41 */
42
hash(Lookup ins)43 HashNumber ValueNumberer::VisibleValues::ValueHasher::hash(Lookup ins) {
44 return ins->valueHash();
45 }
46
47 // Test whether two MDefinitions are congruent.
match(Key k,Lookup l)48 bool ValueNumberer::VisibleValues::ValueHasher::match(Key k, Lookup l) {
49 // If one of the instructions depends on a store, and the other instruction
50 // does not depend on the same store, the instructions are not congruent.
51 if (k->dependency() != l->dependency()) return false;
52
53 bool congruent =
54 k->congruentTo(l); // Ask the values themselves what they think.
55 #ifdef JS_JITSPEW
56 if (congruent != l->congruentTo(k)) {
57 JitSpew(
58 JitSpew_GVN,
59 " congruentTo relation is not symmetric between %s%u and %s%u!!",
60 k->opName(), k->id(), l->opName(), l->id());
61 }
62 #endif
63 return congruent;
64 }
65
rekey(Key & k,Key newKey)66 void ValueNumberer::VisibleValues::ValueHasher::rekey(Key& k, Key newKey) {
67 k = newKey;
68 }
69
VisibleValues(TempAllocator & alloc)70 ValueNumberer::VisibleValues::VisibleValues(TempAllocator& alloc)
71 : set_(alloc) {}
72
73 // Initialize the set.
init()74 bool ValueNumberer::VisibleValues::init() { return set_.init(); }
75
76 // Look up the first entry for |def|.
findLeader(const MDefinition * def) const77 ValueNumberer::VisibleValues::Ptr ValueNumberer::VisibleValues::findLeader(
78 const MDefinition* def) const {
79 return set_.lookup(def);
80 }
81
82 // Look up the first entry for |def|.
83 ValueNumberer::VisibleValues::AddPtr
findLeaderForAdd(MDefinition * def)84 ValueNumberer::VisibleValues::findLeaderForAdd(MDefinition* def) {
85 return set_.lookupForAdd(def);
86 }
87
88 // Insert a value into the set.
add(AddPtr p,MDefinition * def)89 bool ValueNumberer::VisibleValues::add(AddPtr p, MDefinition* def) {
90 return set_.add(p, def);
91 }
92
93 // Insert a value onto the set overwriting any existing entry.
overwrite(AddPtr p,MDefinition * def)94 void ValueNumberer::VisibleValues::overwrite(AddPtr p, MDefinition* def) {
95 set_.replaceKey(p, def);
96 }
97
98 // |def| will be discarded, so remove it from any sets.
forget(const MDefinition * def)99 void ValueNumberer::VisibleValues::forget(const MDefinition* def) {
100 Ptr p = set_.lookup(def);
101 if (p && *p == def) set_.remove(p);
102 }
103
104 // Clear all state.
clear()105 void ValueNumberer::VisibleValues::clear() { set_.clear(); }
106
107 #ifdef DEBUG
108 // Test whether |def| is in the set.
has(const MDefinition * def) const109 bool ValueNumberer::VisibleValues::has(const MDefinition* def) const {
110 Ptr p = set_.lookup(def);
111 return p && *p == def;
112 }
113 #endif
114
115 // Call MDefinition::justReplaceAllUsesWith, and add some GVN-specific asserts.
ReplaceAllUsesWith(MDefinition * from,MDefinition * to)116 static void ReplaceAllUsesWith(MDefinition* from, MDefinition* to) {
117 MOZ_ASSERT(from != to, "GVN shouldn't try to replace a value with itself");
118 MOZ_ASSERT(from->type() == to->type(), "Def replacement has different type");
119 MOZ_ASSERT(!to->isDiscarded(),
120 "GVN replaces an instruction by a removed instruction");
121
122 // We don't need the extra setting of UseRemoved flags that the regular
123 // replaceAllUsesWith does because we do it ourselves.
124 from->justReplaceAllUsesWith(to);
125 }
126
127 // Test whether |succ| is a successor of |block|.
HasSuccessor(const MControlInstruction * block,const MBasicBlock * succ)128 static bool HasSuccessor(const MControlInstruction* block,
129 const MBasicBlock* succ) {
130 for (size_t i = 0, e = block->numSuccessors(); i != e; ++i) {
131 if (block->getSuccessor(i) == succ) return true;
132 }
133 return false;
134 }
135
136 // Given a block which has had predecessors removed but is still reachable, test
137 // whether the block's new dominator will be closer than its old one and whether
138 // it will expose potential optimization opportunities.
ComputeNewDominator(MBasicBlock * block,MBasicBlock * old)139 static MBasicBlock* ComputeNewDominator(MBasicBlock* block, MBasicBlock* old) {
140 MBasicBlock* now = block->getPredecessor(0);
141 for (size_t i = 1, e = block->numPredecessors(); i < e; ++i) {
142 MBasicBlock* pred = block->getPredecessor(i);
143 // Note that dominators haven't been recomputed yet, so we have to check
144 // whether now dominates pred, not block.
145 while (!now->dominates(pred)) {
146 MBasicBlock* next = now->immediateDominator();
147 if (next == old) return old;
148 if (next == now) {
149 MOZ_ASSERT(block == old,
150 "Non-self-dominating block became self-dominating");
151 return block;
152 }
153 now = next;
154 }
155 }
156 MOZ_ASSERT(old != block || old != now,
157 "Missed self-dominating block staying self-dominating");
158 return now;
159 }
160
161 // Test for any defs which look potentially interesting to GVN.
BlockHasInterestingDefs(MBasicBlock * block)162 static bool BlockHasInterestingDefs(MBasicBlock* block) {
163 return !block->phisEmpty() || *block->begin() != block->lastIns();
164 }
165
166 // Walk up the dominator tree from |block| to the root and test for any defs
167 // which look potentially interesting to GVN.
ScanDominatorsForDefs(MBasicBlock * block)168 static bool ScanDominatorsForDefs(MBasicBlock* block) {
169 for (MBasicBlock* i = block;;) {
170 if (BlockHasInterestingDefs(block)) return true;
171
172 MBasicBlock* immediateDominator = i->immediateDominator();
173 if (immediateDominator == i) break;
174 i = immediateDominator;
175 }
176 return false;
177 }
178
179 // Walk up the dominator tree from |now| to |old| and test for any defs which
180 // look potentially interesting to GVN.
ScanDominatorsForDefs(MBasicBlock * now,MBasicBlock * old)181 static bool ScanDominatorsForDefs(MBasicBlock* now, MBasicBlock* old) {
182 MOZ_ASSERT(old->dominates(now),
183 "Refined dominator not dominated by old dominator");
184
185 for (MBasicBlock* i = now; i != old; i = i->immediateDominator()) {
186 if (BlockHasInterestingDefs(i)) return true;
187 }
188 return false;
189 }
190
191 // Given a block which has had predecessors removed but is still reachable, test
192 // whether the block's new dominator will be closer than its old one and whether
193 // it will expose potential optimization opportunities.
IsDominatorRefined(MBasicBlock * block)194 static bool IsDominatorRefined(MBasicBlock* block) {
195 MBasicBlock* old = block->immediateDominator();
196 MBasicBlock* now = ComputeNewDominator(block, old);
197
198 // If this block is just a goto and it doesn't dominate its destination,
199 // removing its predecessors won't refine the dominators of anything
200 // interesting.
201 MControlInstruction* control = block->lastIns();
202 if (*block->begin() == control && block->phisEmpty() && control->isGoto() &&
203 !block->dominates(control->toGoto()->target())) {
204 return false;
205 }
206
207 // We've computed block's new dominator. Test whether there are any
208 // newly-dominating definitions which look interesting.
209 if (block == old) return block != now && ScanDominatorsForDefs(now);
210 MOZ_ASSERT(block != now, "Non-self-dominating block became self-dominating");
211 return ScanDominatorsForDefs(now, old);
212 }
213
214 // |def| has just had one of its users release it. If it's now dead, enqueue it
215 // for discarding, otherwise just make note of it.
handleUseReleased(MDefinition * def,UseRemovedOption useRemovedOption)216 bool ValueNumberer::handleUseReleased(MDefinition* def,
217 UseRemovedOption useRemovedOption) {
218 if (IsDiscardable(def)) {
219 values_.forget(def);
220 if (!deadDefs_.append(def)) return false;
221 } else {
222 if (useRemovedOption == SetUseRemoved) def->setUseRemovedUnchecked();
223 }
224 return true;
225 }
226
227 // Discard |def| and anything in its use-def subtree which is no longer needed.
discardDefsRecursively(MDefinition * def)228 bool ValueNumberer::discardDefsRecursively(MDefinition* def) {
229 MOZ_ASSERT(deadDefs_.empty(), "deadDefs_ not cleared");
230
231 return discardDef(def) && processDeadDefs();
232 }
233
234 // Assuming |resume| is unreachable, release its operands.
235 // It might be nice to integrate this code with prepareForDiscard, however GVN
236 // needs it to call handleUseReleased so that it can observe when a definition
237 // becomes unused, so it isn't trivial to do.
releaseResumePointOperands(MResumePoint * resume)238 bool ValueNumberer::releaseResumePointOperands(MResumePoint* resume) {
239 for (size_t i = 0, e = resume->numOperands(); i < e; ++i) {
240 if (!resume->hasOperand(i)) continue;
241 MDefinition* op = resume->getOperand(i);
242 resume->releaseOperand(i);
243
244 // We set the UseRemoved flag when removing resume point operands,
245 // because even though we may think we're certain that a particular
246 // branch might not be taken, the type information might be incomplete.
247 if (!handleUseReleased(op, SetUseRemoved)) return false;
248 }
249 return true;
250 }
251
252 // Assuming |phi| is dead, release and remove its operands. If an operand
253 // becomes dead, push it to the discard worklist.
releaseAndRemovePhiOperands(MPhi * phi)254 bool ValueNumberer::releaseAndRemovePhiOperands(MPhi* phi) {
255 // MPhi saves operands in a vector so we iterate in reverse.
256 for (int o = phi->numOperands() - 1; o >= 0; --o) {
257 MDefinition* op = phi->getOperand(o);
258 phi->removeOperand(o);
259 if (!handleUseReleased(op, DontSetUseRemoved)) return false;
260 }
261 return true;
262 }
263
264 // Assuming |def| is dead, release its operands. If an operand becomes dead,
265 // push it to the discard worklist.
releaseOperands(MDefinition * def)266 bool ValueNumberer::releaseOperands(MDefinition* def) {
267 for (size_t o = 0, e = def->numOperands(); o < e; ++o) {
268 MDefinition* op = def->getOperand(o);
269 def->releaseOperand(o);
270 if (!handleUseReleased(op, DontSetUseRemoved)) return false;
271 }
272 return true;
273 }
274
275 // Discard |def| and mine its operands for any subsequently dead defs.
discardDef(MDefinition * def)276 bool ValueNumberer::discardDef(MDefinition* def) {
277 #ifdef JS_JITSPEW
278 JitSpew(JitSpew_GVN, " Discarding %s %s%u",
279 def->block()->isMarked() ? "unreachable" : "dead", def->opName(),
280 def->id());
281 #endif
282 #ifdef DEBUG
283 MOZ_ASSERT(def != nextDef_, "Invalidating the MDefinition iterator");
284 if (def->block()->isMarked()) {
285 MOZ_ASSERT(!def->hasUses(), "Discarding def that still has uses");
286 } else {
287 MOZ_ASSERT(IsDiscardable(def), "Discarding non-discardable definition");
288 MOZ_ASSERT(!values_.has(def), "Discarding a definition still in the set");
289 }
290 #endif
291
292 MBasicBlock* block = def->block();
293 if (def->isPhi()) {
294 MPhi* phi = def->toPhi();
295 if (!releaseAndRemovePhiOperands(phi)) return false;
296 block->discardPhi(phi);
297 } else {
298 MInstruction* ins = def->toInstruction();
299 if (MResumePoint* resume = ins->resumePoint()) {
300 if (!releaseResumePointOperands(resume)) return false;
301 }
302 if (!releaseOperands(ins)) return false;
303 block->discardIgnoreOperands(ins);
304 }
305
306 // If that was the last definition in the block, it can be safely removed
307 // from the graph.
308 if (block->phisEmpty() && block->begin() == block->end()) {
309 MOZ_ASSERT(block->isMarked(),
310 "Reachable block lacks at least a control instruction");
311
312 // As a special case, don't remove a block which is a dominator tree
313 // root so that we don't invalidate the iterator in visitGraph. We'll
314 // check for this and remove it later.
315 if (block->immediateDominator() != block) {
316 JitSpew(JitSpew_GVN, " Block block%u is now empty; discarding",
317 block->id());
318 graph_.removeBlock(block);
319 blocksRemoved_ = true;
320 } else {
321 JitSpew(JitSpew_GVN,
322 " Dominator root block%u is now empty; will discard later",
323 block->id());
324 }
325 }
326
327 return true;
328 }
329
330 // Recursively discard all the defs on the deadDefs_ worklist.
processDeadDefs()331 bool ValueNumberer::processDeadDefs() {
332 MDefinition* nextDef = nextDef_;
333 while (!deadDefs_.empty()) {
334 MDefinition* def = deadDefs_.popCopy();
335
336 // Don't invalidate the MDefinition iterator. This is what we're going
337 // to visit next, so we won't miss anything.
338 if (def == nextDef) continue;
339
340 if (!discardDef(def)) return false;
341 }
342 return true;
343 }
344
345 // Test whether |block|, which is a loop header, has any predecessors other than
346 // |loopPred|, the loop predecessor, which it doesn't dominate.
hasNonDominatingPredecessor(MBasicBlock * block,MBasicBlock * loopPred)347 static bool hasNonDominatingPredecessor(MBasicBlock* block,
348 MBasicBlock* loopPred) {
349 MOZ_ASSERT(block->isLoopHeader());
350 MOZ_ASSERT(block->loopPredecessor() == loopPred);
351
352 for (uint32_t i = 0, e = block->numPredecessors(); i < e; ++i) {
353 MBasicBlock* pred = block->getPredecessor(i);
354 if (pred != loopPred && !block->dominates(pred)) return true;
355 }
356 return false;
357 }
358
359 // A loop is about to be made reachable only through an OSR entry into one of
360 // its nested loops. Fix everything up.
fixupOSROnlyLoop(MBasicBlock * block,MBasicBlock * backedge)361 bool ValueNumberer::fixupOSROnlyLoop(MBasicBlock* block,
362 MBasicBlock* backedge) {
363 // Create an empty and unreachable(!) block which jumps to |block|. This
364 // allows |block| to remain marked as a loop header, so we don't have to
365 // worry about moving a different block into place as the new loop header,
366 // which is hard, especially if the OSR is into a nested loop. Doing all
367 // that would produce slightly more optimal code, but this is so
368 // extraordinarily rare that it isn't worth the complexity.
369 MBasicBlock* fake =
370 MBasicBlock::New(graph_, block->info(), nullptr, MBasicBlock::NORMAL);
371 if (fake == nullptr) return false;
372
373 graph_.insertBlockBefore(block, fake);
374 fake->setImmediateDominator(fake);
375 fake->addNumDominated(1);
376 fake->setDomIndex(fake->id());
377 fake->setUnreachable();
378
379 // Create zero-input phis to use as inputs for any phis in |block|.
380 // Again, this is a little odd, but it's the least-odd thing we can do
381 // without significant complexity.
382 for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd());
383 iter != end; ++iter) {
384 MPhi* phi = *iter;
385 MPhi* fakePhi = MPhi::New(graph_.alloc(), phi->type());
386 fake->addPhi(fakePhi);
387 if (!phi->addInputSlow(fakePhi)) return false;
388 }
389
390 fake->end(MGoto::New(graph_.alloc(), block));
391
392 if (!block->addPredecessorWithoutPhis(fake)) return false;
393
394 // Restore |backedge| as |block|'s loop backedge.
395 block->clearLoopHeader();
396 block->setLoopHeader(backedge);
397
398 JitSpew(JitSpew_GVN, " Created fake block%u", fake->id());
399 hasOSRFixups_ = true;
400 return true;
401 }
402
403 // Remove the CFG edge between |pred| and |block|, after releasing the phi
404 // operands on that edge and discarding any definitions consequently made dead.
removePredecessorAndDoDCE(MBasicBlock * block,MBasicBlock * pred,size_t predIndex)405 bool ValueNumberer::removePredecessorAndDoDCE(MBasicBlock* block,
406 MBasicBlock* pred,
407 size_t predIndex) {
408 MOZ_ASSERT(
409 !block->isMarked(),
410 "Block marked unreachable should have predecessors removed already");
411
412 // Before removing the predecessor edge, scan the phi operands for that edge
413 // for dead code before they get removed.
414 MOZ_ASSERT(nextDef_ == nullptr);
415 for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd());
416 iter != end;) {
417 MPhi* phi = *iter++;
418 MOZ_ASSERT(!values_.has(phi),
419 "Visited phi in block having predecessor removed");
420 MOZ_ASSERT(!phi->isGuard());
421
422 MDefinition* op = phi->getOperand(predIndex);
423 phi->removeOperand(predIndex);
424
425 nextDef_ = iter != end ? *iter : nullptr;
426 if (!handleUseReleased(op, DontSetUseRemoved) || !processDeadDefs())
427 return false;
428
429 // If |nextDef_| became dead while we had it pinned, advance the
430 // iterator and discard it now.
431 while (nextDef_ && !nextDef_->hasUses() &&
432 !nextDef_->isGuardRangeBailouts()) {
433 phi = nextDef_->toPhi();
434 iter++;
435 nextDef_ = iter != end ? *iter : nullptr;
436 if (!discardDefsRecursively(phi)) return false;
437 }
438 }
439 nextDef_ = nullptr;
440
441 block->removePredecessorWithoutPhiOperands(pred, predIndex);
442 return true;
443 }
444
445 // Remove the CFG edge between |pred| and |block|, and if this makes |block|
446 // unreachable, mark it so, and remove the rest of its incoming edges too. And
447 // discard any instructions made dead by the entailed release of any phi
448 // operands.
removePredecessorAndCleanUp(MBasicBlock * block,MBasicBlock * pred)449 bool ValueNumberer::removePredecessorAndCleanUp(MBasicBlock* block,
450 MBasicBlock* pred) {
451 MOZ_ASSERT(!block->isMarked(),
452 "Removing predecessor on block already marked unreachable");
453
454 // We'll be removing a predecessor, so anything we know about phis in this
455 // block will be wrong.
456 for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd());
457 iter != end; ++iter)
458 values_.forget(*iter);
459
460 // If this is a loop header, test whether it will become an unreachable
461 // loop, or whether it needs special OSR-related fixups.
462 bool isUnreachableLoop = false;
463 if (block->isLoopHeader()) {
464 if (block->loopPredecessor() == pred) {
465 if (MOZ_UNLIKELY(hasNonDominatingPredecessor(block, pred))) {
466 JitSpew(JitSpew_GVN,
467 " "
468 "Loop with header block%u is now only reachable through an "
469 "OSR entry into the middle of the loop!!",
470 block->id());
471 } else {
472 // Deleting the entry into the loop makes the loop unreachable.
473 isUnreachableLoop = true;
474 JitSpew(JitSpew_GVN,
475 " "
476 "Loop with header block%u is no longer reachable",
477 block->id());
478 }
479 #ifdef JS_JITSPEW
480 } else if (block->hasUniqueBackedge() && block->backedge() == pred) {
481 JitSpew(JitSpew_GVN, " Loop with header block%u is no longer a loop",
482 block->id());
483 #endif
484 }
485 }
486
487 // Actually remove the CFG edge.
488 if (!removePredecessorAndDoDCE(block, pred, block->getPredecessorIndex(pred)))
489 return false;
490
491 // We've now edited the CFG; check to see if |block| became unreachable.
492 if (block->numPredecessors() == 0 || isUnreachableLoop) {
493 JitSpew(JitSpew_GVN, " Disconnecting block%u", block->id());
494
495 // Remove |block| from its dominator parent's subtree. This is the only
496 // immediately-dominated-block information we need to update, because
497 // everything dominated by this block is about to be swept away.
498 MBasicBlock* parent = block->immediateDominator();
499 if (parent != block) parent->removeImmediatelyDominatedBlock(block);
500
501 // Completely disconnect it from the CFG. We do this now rather than
502 // just doing it later when we arrive there in visitUnreachableBlock
503 // so that we don't leave a partially broken loop sitting around. This
504 // also lets visitUnreachableBlock assert that numPredecessors() == 0,
505 // which is a nice invariant.
506 if (block->isLoopHeader()) block->clearLoopHeader();
507 for (size_t i = 0, e = block->numPredecessors(); i < e; ++i) {
508 if (!removePredecessorAndDoDCE(block, block->getPredecessor(i), i))
509 return false;
510 }
511
512 // Clear out the resume point operands, as they can hold things that
513 // don't appear to dominate them live.
514 if (MResumePoint* resume = block->entryResumePoint()) {
515 if (!releaseResumePointOperands(resume) || !processDeadDefs())
516 return false;
517 if (MResumePoint* outer = block->outerResumePoint()) {
518 if (!releaseResumePointOperands(outer) || !processDeadDefs())
519 return false;
520 }
521 MOZ_ASSERT(nextDef_ == nullptr);
522 for (MInstructionIterator iter(block->begin()), end(block->end());
523 iter != end;) {
524 MInstruction* ins = *iter++;
525 nextDef_ = iter != end ? *iter : nullptr;
526 if (MResumePoint* resume = ins->resumePoint()) {
527 if (!releaseResumePointOperands(resume) || !processDeadDefs())
528 return false;
529 }
530 }
531 nextDef_ = nullptr;
532 } else {
533 #ifdef DEBUG
534 MOZ_ASSERT(block->outerResumePoint() == nullptr,
535 "Outer resume point in block without an entry resume point");
536 for (MInstructionIterator iter(block->begin()), end(block->end());
537 iter != end; ++iter) {
538 MOZ_ASSERT(iter->resumePoint() == nullptr,
539 "Instruction with resume point in block without entry "
540 "resume point");
541 }
542 #endif
543 }
544
545 // Use the mark to note that we've already removed all its predecessors,
546 // and we know it's unreachable.
547 block->mark();
548 }
549
550 return true;
551 }
552
553 // Return a simplified form of |def|, if we can.
simplified(MDefinition * def) const554 MDefinition* ValueNumberer::simplified(MDefinition* def) const {
555 return def->foldsTo(graph_.alloc());
556 }
557
558 // If an equivalent and dominating value already exists in the set, return it.
559 // Otherwise insert |def| into the set and return it.
leader(MDefinition * def)560 MDefinition* ValueNumberer::leader(MDefinition* def) {
561 // If the value isn't suitable for eliminating, don't bother hashing it. The
562 // convention is that congruentTo returns false for node kinds that wish to
563 // opt out of redundance elimination.
564 // TODO: It'd be nice to clean up that convention (bug 1031406).
565 if (!def->isEffectful() && def->congruentTo(def)) {
566 // Look for a match.
567 VisibleValues::AddPtr p = values_.findLeaderForAdd(def);
568 if (p) {
569 MDefinition* rep = *p;
570 if (!rep->isDiscarded() && rep->block()->dominates(def->block())) {
571 // We found a dominating congruent value.
572 return rep;
573 }
574
575 // The congruent value doesn't dominate. It never will again in this
576 // dominator tree, so overwrite it.
577 values_.overwrite(p, def);
578 } else {
579 // No match. Add a new entry.
580 if (!values_.add(p, def)) return nullptr;
581 }
582
583 #ifdef JS_JITSPEW
584 JitSpew(JitSpew_GVN, " Recording %s%u", def->opName(), def->id());
585 #endif
586 }
587
588 return def;
589 }
590
591 // Test whether |phi| is dominated by a congruent phi.
hasLeader(const MPhi * phi,const MBasicBlock * phiBlock) const592 bool ValueNumberer::hasLeader(const MPhi* phi,
593 const MBasicBlock* phiBlock) const {
594 if (VisibleValues::Ptr p = values_.findLeader(phi)) {
595 const MDefinition* rep = *p;
596 return rep != phi && rep->block()->dominates(phiBlock);
597 }
598 return false;
599 }
600
601 // Test whether there are any phis in |header| which are newly optimizable, as a
602 // result of optimizations done inside the loop. This is not a sparse approach,
603 // but restarting is rare enough in practice. Termination is ensured by
604 // discarding the phi triggering the iteration.
loopHasOptimizablePhi(MBasicBlock * header) const605 bool ValueNumberer::loopHasOptimizablePhi(MBasicBlock* header) const {
606 // If the header is unreachable, don't bother re-optimizing it.
607 if (header->isMarked()) return false;
608
609 // Rescan the phis for any that can be simplified, since they may be reading
610 // values from backedges.
611 for (MPhiIterator iter(header->phisBegin()), end(header->phisEnd());
612 iter != end; ++iter) {
613 MPhi* phi = *iter;
614 MOZ_ASSERT_IF(!phi->hasUses(), !DeadIfUnused(phi));
615
616 if (phi->operandIfRedundant() || hasLeader(phi, header))
617 return true; // Phi can be simplified.
618 }
619 return false;
620 }
621
622 // Visit |def|.
visitDefinition(MDefinition * def)623 bool ValueNumberer::visitDefinition(MDefinition* def) {
624 // Nop does not fit in any of the previous optimization, as its only purpose
625 // is to reduce the register pressure by keeping additional resume
626 // point. Still, there is no need consecutive list of MNop instructions, and
627 // this will slow down every other iteration on the Graph.
628 if (def->isNop()) {
629 MNop* nop = def->toNop();
630 MBasicBlock* block = nop->block();
631
632 // We look backward to know if we can remove the previous Nop, we do not
633 // look forward as we would not benefit from the folding made by GVN.
634 MInstructionReverseIterator iter = ++block->rbegin(nop);
635
636 // This nop is at the beginning of the basic block, just replace the
637 // resume point of the basic block by the one from the resume point.
638 if (iter == block->rend()) {
639 JitSpew(JitSpew_GVN, " Removing Nop%u", nop->id());
640 nop->moveResumePointAsEntry();
641 block->discard(nop);
642 return true;
643 }
644
645 // The previous instruction is also a Nop, no need to keep it anymore.
646 MInstruction* prev = *iter;
647 if (prev->isNop()) {
648 JitSpew(JitSpew_GVN, " Removing Nop%u", prev->id());
649 block->discard(prev);
650 return true;
651 }
652
653 // The Nop is introduced to capture the result and make sure the operands
654 // are not live anymore when there are no further uses. Though when
655 // all operands are still needed the Nop doesn't decrease the liveness
656 // and can get removed.
657 MResumePoint* rp = nop->resumePoint();
658 if (rp && rp->numOperands() > 0 &&
659 rp->getOperand(rp->numOperands() - 1) == prev &&
660 !nop->block()->lastIns()->isThrow() &&
661 !prev->isAssertRecoveredOnBailout()) {
662 size_t numOperandsLive = 0;
663 for (size_t j = 0; j < prev->numOperands(); j++) {
664 for (size_t i = 0; i < rp->numOperands(); i++) {
665 if (prev->getOperand(j) == rp->getOperand(i)) {
666 numOperandsLive++;
667 break;
668 }
669 }
670 }
671
672 if (numOperandsLive == prev->numOperands()) {
673 JitSpew(JitSpew_GVN, " Removing Nop%u", nop->id());
674 block->discard(nop);
675 }
676 }
677
678 return true;
679 }
680
681 // Skip optimizations on instructions which are recovered on bailout, to
682 // avoid mixing instructions which are recovered on bailouts with
683 // instructions which are not.
684 if (def->isRecoveredOnBailout()) return true;
685
686 // If this instruction has a dependency() into an unreachable block, we'll
687 // need to update AliasAnalysis.
688 MDefinition* dep = def->dependency();
689 if (dep != nullptr && (dep->isDiscarded() || dep->block()->isDead())) {
690 JitSpew(JitSpew_GVN, " AliasAnalysis invalidated");
691 if (updateAliasAnalysis_ && !dependenciesBroken_) {
692 // TODO: Recomputing alias-analysis could theoretically expose more
693 // GVN opportunities.
694 JitSpew(JitSpew_GVN, " Will recompute!");
695 dependenciesBroken_ = true;
696 }
697 // Temporarily clear its dependency, to protect foldsTo, which may
698 // wish to use the dependency to do store-to-load forwarding.
699 def->setDependency(def->toInstruction());
700 } else {
701 dep = nullptr;
702 }
703
704 // Look for a simplified form of |def|.
705 MDefinition* sim = simplified(def);
706 if (sim != def) {
707 if (sim == nullptr) return false;
708
709 bool isNewInstruction = sim->block() == nullptr;
710
711 // If |sim| doesn't belong to a block, insert it next to |def|.
712 if (isNewInstruction)
713 def->block()->insertAfter(def->toInstruction(), sim->toInstruction());
714
715 #ifdef JS_JITSPEW
716 JitSpew(JitSpew_GVN, " Folded %s%u to %s%u", def->opName(), def->id(),
717 sim->opName(), sim->id());
718 #endif
719 MOZ_ASSERT(!sim->isDiscarded());
720 ReplaceAllUsesWith(def, sim);
721
722 // The node's foldsTo said |def| can be replaced by |rep|. If |def| is a
723 // guard, then either |rep| is also a guard, or a guard isn't actually
724 // needed, so we can clear |def|'s guard flag and let it be discarded.
725 def->setNotGuardUnchecked();
726
727 if (def->isGuardRangeBailouts()) sim->setGuardRangeBailoutsUnchecked();
728
729 if (DeadIfUnused(def)) {
730 if (!discardDefsRecursively(def)) return false;
731
732 // If that ended up discarding |sim|, then we're done here.
733 if (sim->isDiscarded()) return true;
734 }
735
736 if (!rerun_ && def->isPhi() && !sim->isPhi()) {
737 rerun_ = true;
738 JitSpew(JitSpew_GVN,
739 " Replacing phi%u may have enabled cascading optimisations; "
740 "will re-run",
741 def->id());
742 }
743
744 // Otherwise, procede to optimize with |sim| in place of |def|.
745 def = sim;
746
747 // If the simplified instruction was already part of the graph, then we
748 // probably already visited and optimized this instruction.
749 if (!isNewInstruction) return true;
750 }
751
752 // Now that foldsTo is done, re-enable the original dependency. Even though
753 // it may be pointing into a discarded block, it's still valid for the
754 // purposes of detecting congruent loads.
755 if (dep != nullptr) def->setDependency(dep);
756
757 // Look for a dominating def which makes |def| redundant.
758 MDefinition* rep = leader(def);
759 if (rep != def) {
760 if (rep == nullptr) return false;
761 if (rep->updateForReplacement(def)) {
762 #ifdef JS_JITSPEW
763 JitSpew(JitSpew_GVN, " Replacing %s%u with %s%u", def->opName(),
764 def->id(), rep->opName(), rep->id());
765 #endif
766 ReplaceAllUsesWith(def, rep);
767
768 // The node's congruentTo said |def| is congruent to |rep|, and it's
769 // dominated by |rep|. If |def| is a guard, it's covered by |rep|,
770 // so we can clear |def|'s guard flag and let it be discarded.
771 def->setNotGuardUnchecked();
772
773 if (DeadIfUnused(def)) {
774 // discardDef should not add anything to the deadDefs, as the
775 // redundant operation should have the same input operands.
776 mozilla::DebugOnly<bool> r = discardDef(def);
777 MOZ_ASSERT(
778 r,
779 "discardDef shouldn't have tried to add anything to the worklist, "
780 "so it shouldn't have failed");
781 MOZ_ASSERT(deadDefs_.empty(),
782 "discardDef shouldn't have added anything to the worklist");
783 }
784 def = rep;
785 }
786 }
787
788 return true;
789 }
790
791 // Visit the control instruction at the end of |block|.
visitControlInstruction(MBasicBlock * block)792 bool ValueNumberer::visitControlInstruction(MBasicBlock* block) {
793 // Look for a simplified form of the control instruction.
794 MControlInstruction* control = block->lastIns();
795 MDefinition* rep = simplified(control);
796 if (rep == control) return true;
797
798 if (rep == nullptr) return false;
799
800 MControlInstruction* newControl = rep->toControlInstruction();
801 MOZ_ASSERT(!newControl->block(),
802 "Control instruction replacement shouldn't already be in a block");
803 #ifdef JS_JITSPEW
804 JitSpew(JitSpew_GVN, " Folded control instruction %s%u to %s%u",
805 control->opName(), control->id(), newControl->opName(),
806 graph_.getNumInstructionIds());
807 #endif
808
809 // If the simplification removes any CFG edges, update the CFG and remove
810 // any blocks that become dead.
811 size_t oldNumSuccs = control->numSuccessors();
812 size_t newNumSuccs = newControl->numSuccessors();
813 if (newNumSuccs != oldNumSuccs) {
814 MOZ_ASSERT(newNumSuccs < oldNumSuccs,
815 "New control instruction has too many successors");
816 for (size_t i = 0; i != oldNumSuccs; ++i) {
817 MBasicBlock* succ = control->getSuccessor(i);
818 if (HasSuccessor(newControl, succ)) continue;
819 if (succ->isMarked()) continue;
820 if (!removePredecessorAndCleanUp(succ, block)) return false;
821 if (succ->isMarked()) continue;
822 if (!rerun_) {
823 if (!remainingBlocks_.append(succ)) return false;
824 }
825 }
826 }
827
828 if (!releaseOperands(control)) return false;
829 block->discardIgnoreOperands(control);
830 block->end(newControl);
831 if (block->entryResumePoint() && newNumSuccs != oldNumSuccs)
832 block->flagOperandsOfPrunedBranches(newControl);
833 return processDeadDefs();
834 }
835
836 // |block| is unreachable. Mine it for opportunities to delete more dead
837 // code, and then discard it.
visitUnreachableBlock(MBasicBlock * block)838 bool ValueNumberer::visitUnreachableBlock(MBasicBlock* block) {
839 JitSpew(JitSpew_GVN, " Visiting unreachable block%u%s%s%s", block->id(),
840 block->isLoopHeader() ? " (loop header)" : "",
841 block->isSplitEdge() ? " (split edge)" : "",
842 block->immediateDominator() == block ? " (dominator root)" : "");
843
844 MOZ_ASSERT(block->isMarked(),
845 "Visiting unmarked (and therefore reachable?) block");
846 MOZ_ASSERT(block->numPredecessors() == 0,
847 "Block marked unreachable still has predecessors");
848 MOZ_ASSERT(block != graph_.entryBlock(), "Removing normal entry block");
849 MOZ_ASSERT(block != graph_.osrBlock(), "Removing OSR entry block");
850 MOZ_ASSERT(deadDefs_.empty(), "deadDefs_ not cleared");
851
852 // Disconnect all outgoing CFG edges.
853 for (size_t i = 0, e = block->numSuccessors(); i < e; ++i) {
854 MBasicBlock* succ = block->getSuccessor(i);
855 if (succ->isDead() || succ->isMarked()) continue;
856 if (!removePredecessorAndCleanUp(succ, block)) return false;
857 if (succ->isMarked()) continue;
858 // |succ| is still reachable. Make a note of it so that we can scan
859 // it for interesting dominator tree changes later.
860 if (!rerun_) {
861 if (!remainingBlocks_.append(succ)) return false;
862 }
863 }
864
865 // Discard any instructions with no uses. The remaining instructions will be
866 // discarded when their last use is discarded.
867 MOZ_ASSERT(nextDef_ == nullptr);
868 for (MDefinitionIterator iter(block); iter;) {
869 MDefinition* def = *iter++;
870 if (def->hasUses()) continue;
871 nextDef_ = iter ? *iter : nullptr;
872 if (!discardDefsRecursively(def)) return false;
873 }
874
875 nextDef_ = nullptr;
876 MControlInstruction* control = block->lastIns();
877 return discardDefsRecursively(control);
878 }
879
880 // Visit all the phis and instructions |block|.
visitBlock(MBasicBlock * block)881 bool ValueNumberer::visitBlock(MBasicBlock* block) {
882 MOZ_ASSERT(!block->isMarked(), "Blocks marked unreachable during GVN");
883 MOZ_ASSERT(!block->isDead(), "Block to visit is already dead");
884
885 JitSpew(JitSpew_GVN, " Visiting block%u", block->id());
886
887 // Visit the definitions in the block top-down.
888 MOZ_ASSERT(nextDef_ == nullptr);
889 for (MDefinitionIterator iter(block); iter;) {
890 if (!graph_.alloc().ensureBallast()) return false;
891 MDefinition* def = *iter++;
892
893 // Remember where our iterator is so that we don't invalidate it.
894 nextDef_ = iter ? *iter : nullptr;
895
896 // If the definition is dead, discard it.
897 if (IsDiscardable(def)) {
898 if (!discardDefsRecursively(def)) return false;
899 continue;
900 }
901
902 if (!visitDefinition(def)) return false;
903 }
904 nextDef_ = nullptr;
905
906 if (!graph_.alloc().ensureBallast()) return false;
907
908 return visitControlInstruction(block);
909 }
910
911 // Visit all the blocks dominated by dominatorRoot.
visitDominatorTree(MBasicBlock * dominatorRoot)912 bool ValueNumberer::visitDominatorTree(MBasicBlock* dominatorRoot) {
913 JitSpew(JitSpew_GVN,
914 " Visiting dominator tree (with %" PRIu64
915 " blocks) rooted at block%u%s",
916 uint64_t(dominatorRoot->numDominated()), dominatorRoot->id(),
917 dominatorRoot == graph_.entryBlock()
918 ? " (normal entry block)"
919 : dominatorRoot == graph_.osrBlock()
920 ? " (OSR entry block)"
921 : dominatorRoot->numPredecessors() == 0
922 ? " (odd unreachable block)"
923 : " (merge point from normal entry and OSR entry)");
924 MOZ_ASSERT(dominatorRoot->immediateDominator() == dominatorRoot,
925 "root is not a dominator tree root");
926
927 // Visit all blocks dominated by dominatorRoot, in RPO. This has the nice
928 // property that we'll always visit a block before any block it dominates,
929 // so we can make a single pass through the list and see every full
930 // redundance.
931 size_t numVisited = 0;
932 size_t numDiscarded = 0;
933 for (ReversePostorderIterator iter(graph_.rpoBegin(dominatorRoot));;) {
934 MOZ_ASSERT(iter != graph_.rpoEnd(), "Inconsistent dominator information");
935 MBasicBlock* block = *iter++;
936 // We're only visiting blocks in dominatorRoot's tree right now.
937 if (!dominatorRoot->dominates(block)) continue;
938
939 // If this is a loop backedge, remember the header, as we may not be able
940 // to find it after we simplify the block.
941 MBasicBlock* header =
942 block->isLoopBackedge() ? block->loopHeaderOfBackedge() : nullptr;
943
944 if (block->isMarked()) {
945 // This block has become unreachable; handle it specially.
946 if (!visitUnreachableBlock(block)) return false;
947 ++numDiscarded;
948 } else {
949 // Visit the block!
950 if (!visitBlock(block)) return false;
951 ++numVisited;
952 }
953
954 // If the block is/was a loop backedge, check to see if the block that
955 // is/was its header has optimizable phis, which would want a re-run.
956 if (!rerun_ && header && loopHasOptimizablePhi(header)) {
957 JitSpew(JitSpew_GVN,
958 " Loop phi in block%u can now be optimized; will re-run GVN!",
959 header->id());
960 rerun_ = true;
961 remainingBlocks_.clear();
962 }
963
964 MOZ_ASSERT(numVisited <= dominatorRoot->numDominated() - numDiscarded,
965 "Visited blocks too many times");
966 if (numVisited >= dominatorRoot->numDominated() - numDiscarded) break;
967 }
968
969 totalNumVisited_ += numVisited;
970 values_.clear();
971 return true;
972 }
973
974 // Visit all the blocks in the graph.
visitGraph()975 bool ValueNumberer::visitGraph() {
976 // Due to OSR blocks, the set of blocks dominated by a blocks may not be
977 // contiguous in the RPO. Do a separate traversal for each dominator tree
978 // root. There's always the main entry, and sometimes there's an OSR entry,
979 // and then there are the roots formed where the OSR paths merge with the
980 // main entry paths.
981 for (ReversePostorderIterator iter(graph_.rpoBegin());;) {
982 MOZ_ASSERT(iter != graph_.rpoEnd(), "Inconsistent dominator information");
983 MBasicBlock* block = *iter;
984 if (block->immediateDominator() == block) {
985 if (!visitDominatorTree(block)) return false;
986
987 // Normally unreachable blocks would be removed by now, but if this
988 // block is a dominator tree root, it has been special-cased and left
989 // in place in order to avoid invalidating our iterator. Now that
990 // we've finished the tree, increment the iterator, and then if it's
991 // marked for removal, remove it.
992 ++iter;
993 if (block->isMarked()) {
994 JitSpew(JitSpew_GVN, " Discarding dominator root block%u",
995 block->id());
996 MOZ_ASSERT(
997 block->begin() == block->end(),
998 "Unreachable dominator tree root has instructions after tree walk");
999 MOZ_ASSERT(block->phisEmpty(),
1000 "Unreachable dominator tree root has phis after tree walk");
1001 graph_.removeBlock(block);
1002 blocksRemoved_ = true;
1003 }
1004
1005 MOZ_ASSERT(totalNumVisited_ <= graph_.numBlocks(),
1006 "Visited blocks too many times");
1007 if (totalNumVisited_ >= graph_.numBlocks()) break;
1008 } else {
1009 // This block a dominator tree root. Proceed to the next one.
1010 ++iter;
1011 }
1012 }
1013 totalNumVisited_ = 0;
1014 return true;
1015 }
1016
insertOSRFixups()1017 bool ValueNumberer::insertOSRFixups() {
1018 ReversePostorderIterator end(graph_.end());
1019 for (ReversePostorderIterator iter(graph_.begin()); iter != end;) {
1020 MBasicBlock* block = *iter++;
1021
1022 // Only add fixup block above for loops which can be reached from OSR.
1023 if (!block->isLoopHeader()) continue;
1024
1025 // If the loop header is not self-dominated, then this loop does not
1026 // have to deal with a second entry point, so there is no need to add a
1027 // second entry point with a fixup block.
1028 if (block->immediateDominator() != block) continue;
1029
1030 if (!fixupOSROnlyLoop(block, block->backedge())) return false;
1031 }
1032
1033 return true;
1034 }
1035
1036 // OSR fixups serve the purpose of representing the non-OSR entry into a loop
1037 // when the only real entry is an OSR entry into the middle. However, if the
1038 // entry into the middle is subsequently folded away, the loop may actually
1039 // have become unreachable. Mark-and-sweep all blocks to remove all such code.
cleanupOSRFixups()1040 bool ValueNumberer::cleanupOSRFixups() {
1041 // Mark.
1042 Vector<MBasicBlock*, 0, JitAllocPolicy> worklist(graph_.alloc());
1043 unsigned numMarked = 2;
1044 graph_.entryBlock()->mark();
1045 graph_.osrBlock()->mark();
1046 if (!worklist.append(graph_.entryBlock()) ||
1047 !worklist.append(graph_.osrBlock()))
1048 return false;
1049 while (!worklist.empty()) {
1050 MBasicBlock* block = worklist.popCopy();
1051 for (size_t i = 0, e = block->numSuccessors(); i != e; ++i) {
1052 MBasicBlock* succ = block->getSuccessor(i);
1053 if (!succ->isMarked()) {
1054 ++numMarked;
1055 succ->mark();
1056 if (!worklist.append(succ)) return false;
1057 } else if (succ->isLoopHeader() && succ->loopPredecessor() == block &&
1058 succ->numPredecessors() == 3) {
1059 // Unmark fixup blocks if the loop predecessor is marked after
1060 // the loop header.
1061 succ->getPredecessor(1)->unmarkUnchecked();
1062 }
1063 }
1064
1065 // OSR fixup blocks are needed if and only if the loop header is
1066 // reachable from its backedge (via the OSR block) and not from its
1067 // original loop predecessor.
1068 //
1069 // Thus OSR fixup blocks are removed if the loop header is not
1070 // reachable, or if the loop header is reachable from both its backedge
1071 // and its original loop predecessor.
1072 if (block->isLoopHeader()) {
1073 MBasicBlock* maybeFixupBlock = nullptr;
1074 if (block->numPredecessors() == 2) {
1075 maybeFixupBlock = block->getPredecessor(0);
1076 } else {
1077 MOZ_ASSERT(block->numPredecessors() == 3);
1078 if (!block->loopPredecessor()->isMarked())
1079 maybeFixupBlock = block->getPredecessor(1);
1080 }
1081
1082 if (maybeFixupBlock && !maybeFixupBlock->isMarked() &&
1083 maybeFixupBlock->numPredecessors() == 0) {
1084 MOZ_ASSERT(maybeFixupBlock->numSuccessors() == 1,
1085 "OSR fixup block should have exactly one successor");
1086 MOZ_ASSERT(maybeFixupBlock != graph_.entryBlock(),
1087 "OSR fixup block shouldn't be the entry block");
1088 MOZ_ASSERT(maybeFixupBlock != graph_.osrBlock(),
1089 "OSR fixup block shouldn't be the OSR entry block");
1090 maybeFixupBlock->mark();
1091 }
1092 }
1093 }
1094
1095 // And sweep.
1096 return RemoveUnmarkedBlocks(mir_, graph_, numMarked);
1097 }
1098
ValueNumberer(MIRGenerator * mir,MIRGraph & graph)1099 ValueNumberer::ValueNumberer(MIRGenerator* mir, MIRGraph& graph)
1100 : mir_(mir),
1101 graph_(graph),
1102 values_(graph.alloc()),
1103 deadDefs_(graph.alloc()),
1104 remainingBlocks_(graph.alloc()),
1105 nextDef_(nullptr),
1106 totalNumVisited_(0),
1107 rerun_(false),
1108 blocksRemoved_(false),
1109 updateAliasAnalysis_(false),
1110 dependenciesBroken_(false),
1111 hasOSRFixups_(false) {}
1112
init()1113 bool ValueNumberer::init() {
1114 // Initialize the value set. It's tempting to pass in a size here of some
1115 // function of graph_.getNumInstructionIds(), however if we start out with a
1116 // large capacity, it will be far larger than the actual element count for
1117 // most of the pass, so when we remove elements, it would often think it
1118 // needs to compact itself. Empirically, just letting the HashTable grow as
1119 // needed on its own seems to work pretty well.
1120 return values_.init();
1121 }
1122
run(UpdateAliasAnalysisFlag updateAliasAnalysis)1123 bool ValueNumberer::run(UpdateAliasAnalysisFlag updateAliasAnalysis) {
1124 updateAliasAnalysis_ = updateAliasAnalysis == UpdateAliasAnalysis;
1125
1126 JitSpew(JitSpew_GVN, "Running GVN on graph (with %" PRIu64 " blocks)",
1127 uint64_t(graph_.numBlocks()));
1128
1129 // Adding fixup blocks only make sense iff we have a second entry point into
1130 // the graph which cannot be reached any more from the entry point.
1131 if (graph_.osrBlock()) {
1132 if (!insertOSRFixups()) return false;
1133 }
1134
1135 // Top level non-sparse iteration loop. If an iteration performs a
1136 // significant change, such as discarding a block which changes the
1137 // dominator tree and may enable more optimization, this loop takes another
1138 // iteration.
1139 int runs = 0;
1140 for (;;) {
1141 if (!visitGraph()) return false;
1142
1143 // Test whether any block which was not removed but which had at least
1144 // one predecessor removed will have a new dominator parent.
1145 while (!remainingBlocks_.empty()) {
1146 MBasicBlock* block = remainingBlocks_.popCopy();
1147 if (!block->isDead() && IsDominatorRefined(block)) {
1148 JitSpew(JitSpew_GVN,
1149 " Dominator for block%u can now be refined; will re-run GVN!",
1150 block->id());
1151 rerun_ = true;
1152 remainingBlocks_.clear();
1153 break;
1154 }
1155 }
1156
1157 if (blocksRemoved_) {
1158 if (!AccountForCFGChanges(mir_, graph_, dependenciesBroken_,
1159 /* underValueNumberer = */ true))
1160 return false;
1161
1162 blocksRemoved_ = false;
1163 dependenciesBroken_ = false;
1164 }
1165
1166 if (mir_->shouldCancel("GVN (outer loop)")) return false;
1167
1168 // If no further opportunities have been discovered, we're done.
1169 if (!rerun_) break;
1170
1171 rerun_ = false;
1172
1173 // Enforce an arbitrary iteration limit. This is rarely reached, and
1174 // isn't even strictly necessary, as the algorithm is guaranteed to
1175 // terminate on its own in a finite amount of time (since every time we
1176 // re-run we discard the construct which triggered the re-run), but it
1177 // does help avoid slow compile times on pathological code.
1178 ++runs;
1179 if (runs == 6) {
1180 JitSpew(JitSpew_GVN, "Re-run cutoff of %d reached. Terminating GVN!",
1181 runs);
1182 break;
1183 }
1184
1185 JitSpew(JitSpew_GVN,
1186 "Re-running GVN on graph (run %d, now with %" PRIu64 " blocks)",
1187 runs, uint64_t(graph_.numBlocks()));
1188 }
1189
1190 if (MOZ_UNLIKELY(hasOSRFixups_)) {
1191 if (!cleanupOSRFixups()) return false;
1192 hasOSRFixups_ = false;
1193 }
1194
1195 return true;
1196 }
1197