1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/IonAnalysis.h"
8
9 #include <algorithm>
10 #include <utility> // for ::std::pair
11
12 #include "jit/AliasAnalysis.h"
13 #include "jit/BaselineJIT.h"
14 #include "jit/CompileInfo.h"
15 #include "jit/InlineScriptTree.h"
16 #include "jit/Ion.h"
17 #include "jit/IonOptimizationLevels.h"
18 #include "jit/LIR.h"
19 #include "jit/Lowering.h"
20 #include "jit/MIRGraph.h"
21 #include "jit/WarpBuilder.h"
22 #include "jit/WarpOracle.h"
23 #include "util/CheckedArithmetic.h"
24 #include "vm/PlainObject.h" // js::PlainObject
25 #include "vm/RegExpObject.h"
26 #include "vm/SelfHosting.h"
27
28 #include "jit/InlineScriptTree-inl.h"
29 #include "jit/JitScript-inl.h"
30 #include "jit/shared/Lowering-shared-inl.h"
31 #include "vm/BytecodeUtil-inl.h"
32 #include "vm/JSObject-inl.h"
33 #include "vm/JSScript-inl.h"
34
35 using namespace js;
36 using namespace js::jit;
37
38 using mozilla::DebugOnly;
39
40 // Stack used by FlagPhiInputsAsImplicitlyUsed. It stores the Phi instruction
41 // pointer and the MUseIterator which should be visited next.
42 using MPhiUseIteratorStack =
43 Vector<std::pair<MPhi*, MUseIterator>, 16, SystemAllocPolicy>;
44
45 // Look for Phi uses with a depth-first search. If any uses are found the stack
46 // of MPhi instructions is returned in the |worklist| argument.
DepthFirstSearchUse(MIRGenerator * mir,MPhiUseIteratorStack & worklist,MPhi * phi)47 static bool DepthFirstSearchUse(MIRGenerator* mir,
48 MPhiUseIteratorStack& worklist, MPhi* phi) {
49 // Push a Phi and the next use to iterate over in the worklist.
50 auto push = [&worklist](MPhi* phi, MUseIterator use) -> bool {
51 phi->setInWorklist();
52 return worklist.append(std::make_pair(phi, use));
53 };
54
55 #ifdef DEBUG
56 // Used to assert that when we have no uses, we at least visited all the
57 // transitive uses.
58 size_t refUseCount = phi->useCount();
59 size_t useCount = 0;
60 #endif
61 MOZ_ASSERT(worklist.empty());
62 if (!push(phi, phi->usesBegin())) {
63 return false;
64 }
65
66 while (!worklist.empty()) {
67 // Resume iterating over the last phi-use pair added by the next loop.
68 auto pair = worklist.popCopy();
69 MPhi* producer = pair.first;
70 MUseIterator use = pair.second;
71 MUseIterator end(producer->usesEnd());
72 producer->setNotInWorklist();
73
74 // Keep going down the tree of uses, skipping (continue)
75 // non-observable/unused cases and Phi which are already listed in the
76 // worklist. Stop (return) as soon as one use is found.
77 while (use != end) {
78 MNode* consumer = (*use)->consumer();
79 MUseIterator it = use;
80 use++;
81 #ifdef DEBUG
82 useCount++;
83 #endif
84 if (mir->shouldCancel("FlagPhiInputsAsImplicitlyUsed inner loop")) {
85 return false;
86 }
87
88 if (consumer->isResumePoint()) {
89 MResumePoint* rp = consumer->toResumePoint();
90 // Observable operands are similar to potential uses.
91 if (rp->isObservableOperand(*it)) {
92 return push(producer, use);
93 }
94 continue;
95 }
96
97 MDefinition* cdef = consumer->toDefinition();
98 if (!cdef->isPhi()) {
99 // The producer is explicitly used by a definition.
100 return push(producer, use);
101 }
102
103 MPhi* cphi = cdef->toPhi();
104 if (cphi->getUsageAnalysis() == PhiUsage::Used ||
105 cphi->isImplicitlyUsed()) {
106 // The information got cached on the Phi the last time it
107 // got visited, or when flagging operands of implicitly used
108 // instructions.
109 return push(producer, use);
110 }
111
112 if (cphi->isInWorklist() || cphi == producer) {
113 // We are already iterating over the uses of this Phi instruction which
114 // are part of a loop, instead of trying to handle loops, conservatively
115 // mark them as used.
116 return push(producer, use);
117 }
118
119 if (cphi->getUsageAnalysis() == PhiUsage::Unused) {
120 // The instruction already got visited and is known to have
121 // no uses. Skip it.
122 continue;
123 }
124
125 // We found another Phi instruction, move the use iterator to
126 // the next use push it to the worklist stack. Then, continue
127 // with a depth search.
128 if (!push(producer, use)) {
129 return false;
130 }
131 producer = cphi;
132 use = producer->usesBegin();
133 end = producer->usesEnd();
134 #ifdef DEBUG
135 refUseCount += producer->useCount();
136 #endif
137 }
138
139 // When unused, we cannot bubble up this information without iterating
140 // over the rest of the previous Phi instruction consumers.
141 MOZ_ASSERT(use == end);
142 producer->setUsageAnalysis(PhiUsage::Unused);
143 }
144
145 MOZ_ASSERT(useCount == refUseCount);
146 return true;
147 }
148
FlagPhiInputsAsImplicitlyUsed(MIRGenerator * mir,MBasicBlock * block,MBasicBlock * succ,MPhiUseIteratorStack & worklist)149 static bool FlagPhiInputsAsImplicitlyUsed(MIRGenerator* mir, MBasicBlock* block,
150 MBasicBlock* succ,
151 MPhiUseIteratorStack& worklist) {
152 // When removing an edge between 2 blocks, we might remove the ability of
153 // later phases to figure out that the uses of a Phi should be considered as
154 // a use of all its inputs. Thus we need to mark the Phi inputs as being
155 // implicitly used iff the phi has any uses.
156 //
157 //
158 // +--------------------+ +---------------------+
159 // |12 MFoo 6 | |32 MBar 5 |
160 // | | | |
161 // | ... | | ... |
162 // | | | |
163 // |25 MGoto Block 4 | |43 MGoto Block 4 |
164 // +--------------------+ +---------------------+
165 // | |
166 // | | |
167 // | | |
168 // | +-----X------------------------+
169 // | Edge |
170 // | Removed |
171 // | |
172 // | +------------v-----------+
173 // | |50 MPhi 12 32 |
174 // | | |
175 // | | ... |
176 // | | |
177 // | |70 MReturn 50 |
178 // | +------------------------+
179 // |
180 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
181 // |
182 // v
183 //
184 // ^ +--------------------+ +---------------------+
185 // /!\ |12 MConst opt-out | |32 MBar 5 |
186 // '---' | | | |
187 // | ... | | ... |
188 // |78 MBail | | |
189 // |80 MUnreachable | |43 MGoto Block 4 |
190 // +--------------------+ +---------------------+
191 // |
192 // |
193 // |
194 // +---------------+
195 // |
196 // |
197 // |
198 // +------------v-----------+
199 // |50 MPhi 32 |
200 // | |
201 // | ... |
202 // | |
203 // |70 MReturn 50 |
204 // +------------------------+
205 //
206 //
207 // If the inputs of the Phi are not flagged as implicitly used, then
208 // later compilation phase might optimize them out. The problem is that a
209 // bailout will use this value and give it back to baseline, which will then
210 // use the OptimizedOut magic value in a computation.
211 //
212 // Unfortunately, we cannot be too conservative about flagging Phi inputs as
213 // having implicit uses, as this would prevent many optimizations from being
214 // used. Thus, the following code is in charge of flagging Phi instructions
215 // as Unused or Used, and setting ImplicitlyUsed accordingly.
216 size_t predIndex = succ->getPredecessorIndex(block);
217 MPhiIterator end = succ->phisEnd();
218 MPhiIterator it = succ->phisBegin();
219 for (; it != end; it++) {
220 MPhi* phi = *it;
221
222 if (mir->shouldCancel("FlagPhiInputsAsImplicitlyUsed outer loop")) {
223 return false;
224 }
225
226 // We are looking to mark the Phi inputs which are used across the edge
227 // between the |block| and its successor |succ|.
228 MDefinition* def = phi->getOperand(predIndex);
229 if (def->isImplicitlyUsed()) {
230 continue;
231 }
232
233 // If the Phi is either Used or Unused, set the ImplicitlyUsed flag
234 // accordingly.
235 if (phi->getUsageAnalysis() == PhiUsage::Used || phi->isImplicitlyUsed()) {
236 def->setImplicitlyUsedUnchecked();
237 continue;
238 } else if (phi->getUsageAnalysis() == PhiUsage::Unused) {
239 continue;
240 }
241
242 // We do not know if the Phi was Used or Unused, iterate over all uses
243 // with a depth-search of uses. Returns the matching stack in the
244 // worklist as soon as one use is found.
245 MOZ_ASSERT(worklist.empty());
246 if (!DepthFirstSearchUse(mir, worklist, phi)) {
247 return false;
248 }
249
250 MOZ_ASSERT_IF(worklist.empty(),
251 phi->getUsageAnalysis() == PhiUsage::Unused);
252 if (!worklist.empty()) {
253 // One of the Phis is used, set Used flags on all the Phis which are
254 // in the use chain.
255 def->setImplicitlyUsedUnchecked();
256 do {
257 auto pair = worklist.popCopy();
258 MPhi* producer = pair.first;
259 producer->setUsageAnalysis(PhiUsage::Used);
260 producer->setNotInWorklist();
261 } while (!worklist.empty());
262 }
263 MOZ_ASSERT(phi->getUsageAnalysis() != PhiUsage::Unknown);
264 }
265
266 return true;
267 }
268
FindFirstInstructionAfterBail(MBasicBlock * block)269 static MInstructionIterator FindFirstInstructionAfterBail(MBasicBlock* block) {
270 MOZ_ASSERT(block->alwaysBails());
271 for (MInstructionIterator it = block->begin(); it != block->end(); it++) {
272 MInstruction* ins = *it;
273 if (ins->isBail()) {
274 it++;
275 return it;
276 }
277 }
278 MOZ_CRASH("Expected MBail in alwaysBails block");
279 }
280
281 // Given an iterator pointing to the first removed instruction, mark
282 // the operands of each removed instruction as having implicit uses.
FlagOperandsAsImplicitlyUsedAfter(MIRGenerator * mir,MBasicBlock * block,MInstructionIterator firstRemoved)283 static bool FlagOperandsAsImplicitlyUsedAfter(
284 MIRGenerator* mir, MBasicBlock* block, MInstructionIterator firstRemoved) {
285 MOZ_ASSERT(firstRemoved->block() == block);
286
287 const CompileInfo& info = block->info();
288
289 // Flag operands of removed instructions as having implicit uses.
290 MInstructionIterator end = block->end();
291 for (MInstructionIterator it = firstRemoved; it != end; it++) {
292 if (mir->shouldCancel("FlagOperandsAsImplicitlyUsedAfter (loop 1)")) {
293 return false;
294 }
295
296 MInstruction* ins = *it;
297 for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
298 ins->getOperand(i)->setImplicitlyUsedUnchecked();
299 }
300
301 // Flag observable resume point operands as having implicit uses.
302 if (MResumePoint* rp = ins->resumePoint()) {
303 // Note: no need to iterate over the caller's of the resume point as
304 // this is the same as the entry resume point.
305 MOZ_ASSERT(&rp->block()->info() == &info);
306 for (size_t i = 0, e = rp->numOperands(); i < e; i++) {
307 if (info.isObservableSlot(i)) {
308 rp->getOperand(i)->setImplicitlyUsedUnchecked();
309 }
310 }
311 }
312 }
313
314 // Flag Phi inputs of the successors as having implicit uses.
315 MPhiUseIteratorStack worklist;
316 for (size_t i = 0, e = block->numSuccessors(); i < e; i++) {
317 if (mir->shouldCancel("FlagOperandsAsImplicitlyUsedAfter (loop 2)")) {
318 return false;
319 }
320
321 if (!FlagPhiInputsAsImplicitlyUsed(mir, block, block->getSuccessor(i),
322 worklist)) {
323 return false;
324 }
325 }
326
327 return true;
328 }
329
FlagEntryResumePointOperands(MIRGenerator * mir,MBasicBlock * block)330 static bool FlagEntryResumePointOperands(MIRGenerator* mir,
331 MBasicBlock* block) {
332 // Flag observable operands of the entry resume point as having implicit uses.
333 MResumePoint* rp = block->entryResumePoint();
334 while (rp) {
335 if (mir->shouldCancel("FlagEntryResumePointOperands")) {
336 return false;
337 }
338
339 const CompileInfo& info = rp->block()->info();
340 for (size_t i = 0, e = rp->numOperands(); i < e; i++) {
341 if (info.isObservableSlot(i)) {
342 rp->getOperand(i)->setImplicitlyUsedUnchecked();
343 }
344 }
345
346 rp = rp->caller();
347 }
348
349 return true;
350 }
351
FlagAllOperandsAsImplicitlyUsed(MIRGenerator * mir,MBasicBlock * block)352 static bool FlagAllOperandsAsImplicitlyUsed(MIRGenerator* mir,
353 MBasicBlock* block) {
354 return FlagEntryResumePointOperands(mir, block) &&
355 FlagOperandsAsImplicitlyUsedAfter(mir, block, block->begin());
356 }
357
358 // WarpBuilder sets the alwaysBails flag on blocks that contain an
359 // unconditional bailout. We trim any instructions in those blocks
360 // after the first unconditional bailout, and remove any blocks that
361 // are only reachable through bailing blocks.
PruneUnusedBranches(MIRGenerator * mir,MIRGraph & graph)362 bool jit::PruneUnusedBranches(MIRGenerator* mir, MIRGraph& graph) {
363 JitSpew(JitSpew_Prune, "Begin");
364
365 // Pruning is guided by unconditional bailouts. Wasm does not have bailouts.
366 MOZ_ASSERT(!mir->compilingWasm());
367
368 Vector<MBasicBlock*, 16, SystemAllocPolicy> worklist;
369 uint32_t numMarked = 0;
370 bool needsTrim = false;
371
372 auto markReachable = [&](MBasicBlock* block) -> bool {
373 block->mark();
374 numMarked++;
375 if (block->alwaysBails()) {
376 needsTrim = true;
377 }
378 return worklist.append(block);
379 };
380
381 // The entry block is always reachable.
382 if (!markReachable(graph.entryBlock())) {
383 return false;
384 }
385
386 // The OSR entry block is always reachable if it exists.
387 if (graph.osrBlock() && !markReachable(graph.osrBlock())) {
388 return false;
389 }
390
391 // Iteratively mark all reachable blocks.
392 while (!worklist.empty()) {
393 if (mir->shouldCancel("Prune unused branches (marking reachable)")) {
394 return false;
395 }
396 MBasicBlock* block = worklist.popCopy();
397
398 JitSpew(JitSpew_Prune, "Visit block %u:", block->id());
399 JitSpewIndent indent(JitSpew_Prune);
400
401 // If this block always bails, then it does not reach its successors.
402 if (block->alwaysBails()) {
403 continue;
404 }
405
406 for (size_t i = 0; i < block->numSuccessors(); i++) {
407 MBasicBlock* succ = block->getSuccessor(i);
408 if (succ->isMarked()) {
409 continue;
410 }
411 JitSpew(JitSpew_Prune, "Reaches block %u", succ->id());
412 if (!markReachable(succ)) {
413 return false;
414 }
415 }
416 }
417
418 if (!needsTrim && numMarked == graph.numBlocks()) {
419 // There is nothing to prune.
420 graph.unmarkBlocks();
421 return true;
422 }
423
424 JitSpew(JitSpew_Prune, "Remove unreachable instructions and blocks:");
425 JitSpewIndent indent(JitSpew_Prune);
426
427 // The operands of removed instructions may be needed in baseline
428 // after bailing out.
429 for (PostorderIterator it(graph.poBegin()); it != graph.poEnd();) {
430 if (mir->shouldCancel("Prune unused branches (marking operands)")) {
431 return false;
432 }
433
434 MBasicBlock* block = *it++;
435 if (!block->isMarked()) {
436 // If we are removing the block entirely, mark the operands of every
437 // instruction as being implicitly used.
438 FlagAllOperandsAsImplicitlyUsed(mir, block);
439 } else if (block->alwaysBails()) {
440 // If we are only trimming instructions after a bail, only mark operands
441 // of removed instructions.
442 MInstructionIterator firstRemoved = FindFirstInstructionAfterBail(block);
443 FlagOperandsAsImplicitlyUsedAfter(mir, block, firstRemoved);
444 }
445 }
446
447 // Remove the blocks in post-order such that consumers are visited before
448 // the predecessors, the only exception being the Phi nodes of loop headers.
449 for (PostorderIterator it(graph.poBegin()); it != graph.poEnd();) {
450 if (mir->shouldCancel("Prune unused branches (removal loop)")) {
451 return false;
452 }
453 if (!graph.alloc().ensureBallast()) {
454 return false;
455 }
456
457 MBasicBlock* block = *it++;
458 if (block->isMarked() && !block->alwaysBails()) {
459 continue;
460 }
461
462 // As we are going to replace/remove the last instruction, we first have
463 // to remove this block from the predecessor list of its successors.
464 size_t numSucc = block->numSuccessors();
465 for (uint32_t i = 0; i < numSucc; i++) {
466 MBasicBlock* succ = block->getSuccessor(i);
467 if (succ->isDead()) {
468 continue;
469 }
470
471 // Our dominators code expects all loop headers to have two predecessors.
472 // If we are removing the normal entry to a loop, but can still reach
473 // the loop header via OSR, we create a fake unreachable predecessor.
474 if (succ->isLoopHeader() && block != succ->backedge()) {
475 MOZ_ASSERT(graph.osrBlock());
476 if (!graph.alloc().ensureBallast()) {
477 return false;
478 }
479
480 MBasicBlock* fake = MBasicBlock::NewFakeLoopPredecessor(graph, succ);
481 if (!fake) {
482 return false;
483 }
484 // Mark the block to avoid removing it as unreachable.
485 fake->mark();
486
487 JitSpew(JitSpew_Prune,
488 "Header %u only reachable by OSR. Add fake predecessor %u",
489 succ->id(), fake->id());
490 }
491
492 JitSpew(JitSpew_Prune, "Remove block edge %u -> %u.", block->id(),
493 succ->id());
494 succ->removePredecessor(block);
495 }
496
497 if (!block->isMarked()) {
498 // Remove unreachable blocks from the CFG.
499 JitSpew(JitSpew_Prune, "Remove block %u.", block->id());
500 graph.removeBlock(block);
501 } else {
502 // Remove unreachable instructions after unconditional bailouts.
503 JitSpew(JitSpew_Prune, "Trim block %u.", block->id());
504
505 // Discard all instructions after the first MBail.
506 MInstructionIterator firstRemoved = FindFirstInstructionAfterBail(block);
507 block->discardAllInstructionsStartingAt(firstRemoved);
508
509 if (block->outerResumePoint()) {
510 block->clearOuterResumePoint();
511 }
512
513 block->end(MUnreachable::New(graph.alloc()));
514 }
515 }
516 graph.unmarkBlocks();
517
518 return true;
519 }
520
SplitCriticalEdgesForBlock(MIRGraph & graph,MBasicBlock * block)521 static bool SplitCriticalEdgesForBlock(MIRGraph& graph, MBasicBlock* block) {
522 if (block->numSuccessors() < 2) {
523 return true;
524 }
525 for (size_t i = 0; i < block->numSuccessors(); i++) {
526 MBasicBlock* target = block->getSuccessor(i);
527 if (target->numPredecessors() < 2) {
528 continue;
529 }
530
531 // Create a simple new block which contains a goto and which split the
532 // edge between block and target.
533 MBasicBlock* split = MBasicBlock::NewSplitEdge(graph, block, i, target);
534 if (!split) {
535 return false;
536 }
537 }
538 return true;
539 }
540
541 // A critical edge is an edge which is neither its successor's only predecessor
542 // nor its predecessor's only successor. Critical edges must be split to
543 // prevent copy-insertion and code motion from affecting other edges.
SplitCriticalEdges(MIRGraph & graph)544 bool jit::SplitCriticalEdges(MIRGraph& graph) {
545 for (MBasicBlockIterator iter(graph.begin()); iter != graph.end(); iter++) {
546 MBasicBlock* block = *iter;
547 if (!SplitCriticalEdgesForBlock(graph, block)) {
548 return false;
549 }
550 }
551 return true;
552 }
553
IsUint32Type(const MDefinition * def)554 bool jit::IsUint32Type(const MDefinition* def) {
555 if (def->isBeta()) {
556 def = def->getOperand(0);
557 }
558
559 if (def->type() != MIRType::Int32) {
560 return false;
561 }
562
563 return def->isUrsh() && def->getOperand(1)->isConstant() &&
564 def->getOperand(1)->toConstant()->type() == MIRType::Int32 &&
565 def->getOperand(1)->toConstant()->toInt32() == 0;
566 }
567
568 // Return whether a block simply computes the specified constant value.
BlockComputesConstant(MBasicBlock * block,MDefinition * value,bool * constBool)569 static bool BlockComputesConstant(MBasicBlock* block, MDefinition* value,
570 bool* constBool) {
571 // Look for values with no uses. This is used to eliminate constant
572 // computing blocks in condition statements, and the phi which used to
573 // consume the constant has already been removed.
574 if (value->hasUses()) {
575 return false;
576 }
577
578 if (!value->isConstant() || value->block() != block) {
579 return false;
580 }
581 if (!block->phisEmpty()) {
582 return false;
583 }
584 for (MInstructionIterator iter = block->begin(); iter != block->end();
585 ++iter) {
586 if (*iter != value || !iter->isGoto()) {
587 return false;
588 }
589 }
590 return value->toConstant()->valueToBoolean(constBool);
591 }
592
593 // Determine whether phiBlock/testBlock simply compute a phi and perform a
594 // test on it.
BlockIsSingleTest(MBasicBlock * phiBlock,MBasicBlock * testBlock,MPhi ** pphi,MTest ** ptest)595 static bool BlockIsSingleTest(MBasicBlock* phiBlock, MBasicBlock* testBlock,
596 MPhi** pphi, MTest** ptest) {
597 *pphi = nullptr;
598 *ptest = nullptr;
599
600 if (phiBlock != testBlock) {
601 MOZ_ASSERT(phiBlock->numSuccessors() == 1 &&
602 phiBlock->getSuccessor(0) == testBlock);
603 if (!phiBlock->begin()->isGoto()) {
604 return false;
605 }
606 }
607
608 MInstruction* ins = *testBlock->begin();
609 if (!ins->isTest()) {
610 return false;
611 }
612 MTest* test = ins->toTest();
613 if (!test->input()->isPhi()) {
614 return false;
615 }
616 MPhi* phi = test->input()->toPhi();
617 if (phi->block() != phiBlock) {
618 return false;
619 }
620
621 for (MUseIterator iter = phi->usesBegin(); iter != phi->usesEnd(); ++iter) {
622 MUse* use = *iter;
623 if (use->consumer() == test) {
624 continue;
625 }
626 if (use->consumer()->isResumePoint()) {
627 MBasicBlock* useBlock = use->consumer()->block();
628 if (useBlock == phiBlock || useBlock == testBlock) {
629 continue;
630 }
631 }
632 return false;
633 }
634
635 for (MPhiIterator iter = phiBlock->phisBegin(); iter != phiBlock->phisEnd();
636 ++iter) {
637 if (*iter != phi) {
638 return false;
639 }
640 }
641
642 if (phiBlock != testBlock && !testBlock->phisEmpty()) {
643 return false;
644 }
645
646 *pphi = phi;
647 *ptest = test;
648
649 return true;
650 }
651
652 // Change block so that it ends in a goto to the specific target block.
653 // existingPred is an existing predecessor of the block.
UpdateGotoSuccessor(TempAllocator & alloc,MBasicBlock * block,MBasicBlock * target,MBasicBlock * existingPred)654 [[nodiscard]] static bool UpdateGotoSuccessor(TempAllocator& alloc,
655 MBasicBlock* block,
656 MBasicBlock* target,
657 MBasicBlock* existingPred) {
658 MInstruction* ins = block->lastIns();
659 MOZ_ASSERT(ins->isGoto());
660 ins->toGoto()->target()->removePredecessor(block);
661 block->discardLastIns();
662
663 MGoto* newGoto = MGoto::New(alloc, target);
664 block->end(newGoto);
665
666 return target->addPredecessorSameInputsAs(block, existingPred);
667 }
668
669 // Change block so that it ends in a test of the specified value, going to
670 // either ifTrue or ifFalse. existingPred is an existing predecessor of ifTrue
671 // or ifFalse with the same values incoming to ifTrue/ifFalse as block.
672 // existingPred is not required to be a predecessor of ifTrue/ifFalse if block
673 // already ends in a test going to that block on a true/false result.
UpdateTestSuccessors(TempAllocator & alloc,MBasicBlock * block,MDefinition * value,MBasicBlock * ifTrue,MBasicBlock * ifFalse,MBasicBlock * existingPred)674 [[nodiscard]] static bool UpdateTestSuccessors(
675 TempAllocator& alloc, MBasicBlock* block, MDefinition* value,
676 MBasicBlock* ifTrue, MBasicBlock* ifFalse, MBasicBlock* existingPred) {
677 MInstruction* ins = block->lastIns();
678 if (ins->isTest()) {
679 MTest* test = ins->toTest();
680 MOZ_ASSERT(test->input() == value);
681
682 if (ifTrue != test->ifTrue()) {
683 test->ifTrue()->removePredecessor(block);
684 if (!ifTrue->addPredecessorSameInputsAs(block, existingPred)) {
685 return false;
686 }
687 MOZ_ASSERT(test->ifTrue() == test->getSuccessor(0));
688 test->replaceSuccessor(0, ifTrue);
689 }
690
691 if (ifFalse != test->ifFalse()) {
692 test->ifFalse()->removePredecessor(block);
693 if (!ifFalse->addPredecessorSameInputsAs(block, existingPred)) {
694 return false;
695 }
696 MOZ_ASSERT(test->ifFalse() == test->getSuccessor(1));
697 test->replaceSuccessor(1, ifFalse);
698 }
699
700 return true;
701 }
702
703 MOZ_ASSERT(ins->isGoto());
704 ins->toGoto()->target()->removePredecessor(block);
705 block->discardLastIns();
706
707 MTest* test = MTest::New(alloc, value, ifTrue, ifFalse);
708 block->end(test);
709
710 if (!ifTrue->addPredecessorSameInputsAs(block, existingPred)) {
711 return false;
712 }
713 if (!ifFalse->addPredecessorSameInputsAs(block, existingPred)) {
714 return false;
715 }
716 return true;
717 }
718
MaybeFoldConditionBlock(MIRGraph & graph,MBasicBlock * initialBlock)719 static bool MaybeFoldConditionBlock(MIRGraph& graph,
720 MBasicBlock* initialBlock) {
721 // Optimize the MIR graph to improve the code generated for conditional
722 // operations. A test like 'if (a ? b : c)' normally requires four blocks,
723 // with a phi for the intermediate value. This can be improved to use three
724 // blocks with no phi value, and if either b or c is constant,
725 // e.g. 'if (a ? b : 0)', then the block associated with that constant
726 // can be eliminated.
727
728 /*
729 * Look for a diamond pattern:
730 *
731 * initialBlock
732 * / \
733 * trueBranch falseBranch
734 * \ /
735 * phiBlock
736 * |
737 * testBlock
738 *
739 * Where phiBlock contains a single phi combining values pushed onto the
740 * stack by trueBranch and falseBranch, and testBlock contains a test on
741 * that phi. phiBlock and testBlock may be the same block; generated code
742 * will use different blocks if the (?:) op is in an inlined function.
743 */
744
745 MInstruction* ins = initialBlock->lastIns();
746 if (!ins->isTest()) {
747 return true;
748 }
749 MTest* initialTest = ins->toTest();
750
751 MBasicBlock* trueBranch = initialTest->ifTrue();
752 if (trueBranch->numPredecessors() != 1 || trueBranch->numSuccessors() != 1) {
753 return true;
754 }
755 MBasicBlock* falseBranch = initialTest->ifFalse();
756 if (falseBranch->numPredecessors() != 1 ||
757 falseBranch->numSuccessors() != 1) {
758 return true;
759 }
760 MBasicBlock* phiBlock = trueBranch->getSuccessor(0);
761 if (phiBlock != falseBranch->getSuccessor(0)) {
762 return true;
763 }
764 if (phiBlock->numPredecessors() != 2) {
765 return true;
766 }
767
768 if (initialBlock->isLoopBackedge() || trueBranch->isLoopBackedge() ||
769 falseBranch->isLoopBackedge()) {
770 return true;
771 }
772
773 MBasicBlock* testBlock = phiBlock;
774 if (testBlock->numSuccessors() == 1) {
775 if (testBlock->isLoopBackedge()) {
776 return true;
777 }
778 testBlock = testBlock->getSuccessor(0);
779 if (testBlock->numPredecessors() != 1) {
780 return true;
781 }
782 }
783
784 // Make sure the test block does not have any outgoing loop backedges.
785 if (!SplitCriticalEdgesForBlock(graph, testBlock)) {
786 return false;
787 }
788
789 MPhi* phi;
790 MTest* finalTest;
791 if (!BlockIsSingleTest(phiBlock, testBlock, &phi, &finalTest)) {
792 return true;
793 }
794
795 MDefinition* trueResult =
796 phi->getOperand(phiBlock->indexForPredecessor(trueBranch));
797 MDefinition* falseResult =
798 phi->getOperand(phiBlock->indexForPredecessor(falseBranch));
799
800 // OK, we found the desired pattern, now transform the graph.
801
802 // Remove the phi from phiBlock.
803 phiBlock->discardPhi(*phiBlock->phisBegin());
804
805 // If either trueBranch or falseBranch just computes a constant for the
806 // test, determine the block that branch will end up jumping to and eliminate
807 // the branch. Otherwise, change the end of the block to a test that jumps
808 // directly to successors of testBlock, rather than to testBlock itself.
809
810 MBasicBlock* trueTarget = trueBranch;
811 bool constBool;
812 if (BlockComputesConstant(trueBranch, trueResult, &constBool)) {
813 trueTarget = constBool ? finalTest->ifTrue() : finalTest->ifFalse();
814 phiBlock->removePredecessor(trueBranch);
815 graph.removeBlock(trueBranch);
816 } else if (initialTest->input() == trueResult) {
817 if (!UpdateGotoSuccessor(graph.alloc(), trueBranch, finalTest->ifTrue(),
818 testBlock)) {
819 return false;
820 }
821 } else {
822 if (!UpdateTestSuccessors(graph.alloc(), trueBranch, trueResult,
823 finalTest->ifTrue(), finalTest->ifFalse(),
824 testBlock)) {
825 return false;
826 }
827 }
828
829 MBasicBlock* falseTarget = falseBranch;
830 if (BlockComputesConstant(falseBranch, falseResult, &constBool)) {
831 falseTarget = constBool ? finalTest->ifTrue() : finalTest->ifFalse();
832 phiBlock->removePredecessor(falseBranch);
833 graph.removeBlock(falseBranch);
834 } else if (initialTest->input() == falseResult) {
835 if (!UpdateGotoSuccessor(graph.alloc(), falseBranch, finalTest->ifFalse(),
836 testBlock)) {
837 return false;
838 }
839 } else {
840 if (!UpdateTestSuccessors(graph.alloc(), falseBranch, falseResult,
841 finalTest->ifTrue(), finalTest->ifFalse(),
842 testBlock)) {
843 return false;
844 }
845 }
846
847 // Short circuit the initial test to skip any constant branch eliminated
848 // above.
849 if (!UpdateTestSuccessors(graph.alloc(), initialBlock, initialTest->input(),
850 trueTarget, falseTarget, testBlock)) {
851 return false;
852 }
853
854 // Remove phiBlock, if different from testBlock.
855 if (phiBlock != testBlock) {
856 testBlock->removePredecessor(phiBlock);
857 graph.removeBlock(phiBlock);
858 }
859
860 // Remove testBlock itself.
861 finalTest->ifTrue()->removePredecessor(testBlock);
862 finalTest->ifFalse()->removePredecessor(testBlock);
863 graph.removeBlock(testBlock);
864
865 return true;
866 }
867
FoldTests(MIRGraph & graph)868 bool jit::FoldTests(MIRGraph& graph) {
869 for (MBasicBlockIterator block(graph.begin()); block != graph.end();
870 block++) {
871 if (!MaybeFoldConditionBlock(graph, *block)) {
872 return false;
873 }
874 }
875 return true;
876 }
877
FoldEmptyBlocks(MIRGraph & graph)878 bool jit::FoldEmptyBlocks(MIRGraph& graph) {
879 for (MBasicBlockIterator iter(graph.begin()); iter != graph.end();) {
880 MBasicBlock* block = *iter;
881 iter++;
882
883 if (block->numPredecessors() != 1 || block->numSuccessors() != 1) {
884 continue;
885 }
886
887 if (!block->phisEmpty()) {
888 continue;
889 }
890
891 if (block->outerResumePoint()) {
892 continue;
893 }
894
895 if (*block->begin() != *block->rbegin()) {
896 continue;
897 }
898
899 MBasicBlock* succ = block->getSuccessor(0);
900 MBasicBlock* pred = block->getPredecessor(0);
901
902 if (succ->numPredecessors() != 1) {
903 continue;
904 }
905
906 size_t pos = pred->getSuccessorIndex(block);
907 pred->lastIns()->replaceSuccessor(pos, succ);
908
909 graph.removeBlock(block);
910
911 if (!succ->addPredecessorSameInputsAs(pred, block)) {
912 return false;
913 }
914 succ->removePredecessor(block);
915 }
916 return true;
917 }
918
EliminateTriviallyDeadResumePointOperands(MIRGraph & graph,MResumePoint * rp)919 static void EliminateTriviallyDeadResumePointOperands(MIRGraph& graph,
920 MResumePoint* rp) {
921 // If we will pop the top of the stack immediately after resuming,
922 // then don't preserve the top value in the resume point.
923 if (rp->mode() != MResumePoint::ResumeAt || JSOp(*rp->pc()) != JSOp::Pop) {
924 return;
925 }
926
927 size_t top = rp->stackDepth() - 1;
928 MOZ_ASSERT(!rp->isObservableOperand(top));
929
930 MDefinition* def = rp->getOperand(top);
931 if (def->isConstant()) {
932 return;
933 }
934
935 MConstant* constant = rp->block()->optimizedOutConstant(graph.alloc());
936 rp->replaceOperand(top, constant);
937 }
938
939 // Operands to a resume point which are dead at the point of the resume can be
940 // replaced with a magic value. This analysis supports limited detection of
941 // dead operands, pruning those which are defined in the resume point's basic
942 // block and have no uses outside the block or at points later than the resume
943 // point.
944 //
945 // This is intended to ensure that extra resume points within a basic block
946 // will not artificially extend the lifetimes of any SSA values. This could
947 // otherwise occur if the new resume point captured a value which is created
948 // between the old and new resume point and is dead at the new resume point.
EliminateDeadResumePointOperands(MIRGenerator * mir,MIRGraph & graph)949 bool jit::EliminateDeadResumePointOperands(MIRGenerator* mir, MIRGraph& graph) {
950 // If we are compiling try blocks, locals and arguments may be observable
951 // from catch or finally blocks (which Ion does not compile). For now just
952 // disable the pass in this case.
953 if (graph.hasTryBlock()) {
954 return true;
955 }
956
957 for (PostorderIterator block = graph.poBegin(); block != graph.poEnd();
958 block++) {
959 if (mir->shouldCancel("Eliminate Dead Resume Point Operands (main loop)")) {
960 return false;
961 }
962
963 if (MResumePoint* rp = block->entryResumePoint()) {
964 if (!graph.alloc().ensureBallast()) {
965 return false;
966 }
967 EliminateTriviallyDeadResumePointOperands(graph, rp);
968 }
969
970 // The logic below can get confused on infinite loops.
971 if (block->isLoopHeader() && block->backedge() == *block) {
972 continue;
973 }
974
975 for (MInstructionIterator ins = block->begin(); ins != block->end();
976 ins++) {
977 if (MResumePoint* rp = ins->resumePoint()) {
978 if (!graph.alloc().ensureBallast()) {
979 return false;
980 }
981 EliminateTriviallyDeadResumePointOperands(graph, rp);
982 }
983
984 // No benefit to replacing constant operands with other constants.
985 if (ins->isConstant()) {
986 continue;
987 }
988
989 // Scanning uses does not give us sufficient information to tell
990 // where instructions that are involved in box/unbox operations or
991 // parameter passing might be live. Rewriting uses of these terms
992 // in resume points may affect the interpreter's behavior. Rather
993 // than doing a more sophisticated analysis, just ignore these.
994 if (ins->isUnbox() || ins->isParameter() || ins->isBoxNonStrictThis()) {
995 continue;
996 }
997
998 // Early intermediate values captured by resume points, such as
999 // ArrayState and its allocation, may be legitimately dead in Ion code,
1000 // but are still needed if we bail out. They can recover on bailout.
1001 if (ins->isRecoveredOnBailout()) {
1002 MOZ_ASSERT(ins->canRecoverOnBailout());
1003 continue;
1004 }
1005
1006 // If the instruction's behavior has been constant folded into a
1007 // separate instruction, we can't determine precisely where the
1008 // instruction becomes dead and can't eliminate its uses.
1009 if (ins->isImplicitlyUsed()) {
1010 continue;
1011 }
1012
1013 // Check if this instruction's result is only used within the
1014 // current block, and keep track of its last use in a definition
1015 // (not resume point). This requires the instructions in the block
1016 // to be numbered, ensured by running this immediately after alias
1017 // analysis.
1018 uint32_t maxDefinition = 0;
1019 for (MUseIterator uses(ins->usesBegin()); uses != ins->usesEnd();
1020 uses++) {
1021 MNode* consumer = uses->consumer();
1022 if (consumer->isResumePoint()) {
1023 // If the instruction's is captured by one of the resume point, then
1024 // it might be observed indirectly while the frame is live on the
1025 // stack, so it has to be computed.
1026 MResumePoint* resume = consumer->toResumePoint();
1027 if (resume->isObservableOperand(*uses)) {
1028 maxDefinition = UINT32_MAX;
1029 break;
1030 }
1031 continue;
1032 }
1033
1034 MDefinition* def = consumer->toDefinition();
1035 if (def->block() != *block || def->isBox() || def->isPhi()) {
1036 maxDefinition = UINT32_MAX;
1037 break;
1038 }
1039 maxDefinition = std::max(maxDefinition, def->id());
1040 }
1041 if (maxDefinition == UINT32_MAX) {
1042 continue;
1043 }
1044
1045 // Walk the uses a second time, removing any in resume points after
1046 // the last use in a definition.
1047 for (MUseIterator uses(ins->usesBegin()); uses != ins->usesEnd();) {
1048 MUse* use = *uses++;
1049 if (use->consumer()->isDefinition()) {
1050 continue;
1051 }
1052 MResumePoint* mrp = use->consumer()->toResumePoint();
1053 if (mrp->block() != *block || !mrp->instruction() ||
1054 mrp->instruction() == *ins ||
1055 mrp->instruction()->id() <= maxDefinition) {
1056 continue;
1057 }
1058
1059 if (!graph.alloc().ensureBallast()) {
1060 return false;
1061 }
1062
1063 // Store an optimized out magic value in place of all dead
1064 // resume point operands. Making any such substitution can in
1065 // general alter the interpreter's behavior, even though the
1066 // code is dead, as the interpreter will still execute opcodes
1067 // whose effects cannot be observed. If the magic value value
1068 // were to flow to, say, a dead property access the
1069 // interpreter could throw an exception; we avoid this problem
1070 // by removing dead operands before removing dead code.
1071 MConstant* constant =
1072 MConstant::New(graph.alloc(), MagicValue(JS_OPTIMIZED_OUT));
1073 block->insertBefore(*(block->begin()), constant);
1074 use->replaceProducer(constant);
1075 }
1076 }
1077 }
1078
1079 return true;
1080 }
1081
1082 // Test whether |def| would be needed if it had no uses.
DeadIfUnused(const MDefinition * def)1083 bool js::jit::DeadIfUnused(const MDefinition* def) {
1084 // Effectful instructions of course cannot be removed.
1085 if (def->isEffectful()) {
1086 return false;
1087 }
1088
1089 // Never eliminate guard instructions.
1090 if (def->isGuard()) {
1091 return false;
1092 }
1093
1094 // Required to be preserved, as the type guard related to this instruction
1095 // is part of the semantics of a transformation.
1096 if (def->isGuardRangeBailouts()) {
1097 return false;
1098 }
1099
1100 // Control instructions have no uses, but also shouldn't be optimized out
1101 if (def->isControlInstruction()) {
1102 return false;
1103 }
1104
1105 // Used when lowering to generate the corresponding snapshots and aggregate
1106 // the list of recover instructions to be repeated.
1107 if (def->isInstruction() && def->toInstruction()->resumePoint()) {
1108 return false;
1109 }
1110
1111 return true;
1112 }
1113
1114 // Test whether |def| may be safely discarded, due to being dead or due to being
1115 // located in a basic block which has itself been marked for discarding.
IsDiscardable(const MDefinition * def)1116 bool js::jit::IsDiscardable(const MDefinition* def) {
1117 return !def->hasUses() && (DeadIfUnused(def) || def->block()->isMarked());
1118 }
1119
1120 // Instructions are useless if they are unused and have no side effects.
1121 // This pass eliminates useless instructions.
1122 // The graph itself is unchanged.
EliminateDeadCode(MIRGenerator * mir,MIRGraph & graph)1123 bool jit::EliminateDeadCode(MIRGenerator* mir, MIRGraph& graph) {
1124 // Traverse in postorder so that we hit uses before definitions.
1125 // Traverse instruction list backwards for the same reason.
1126 for (PostorderIterator block = graph.poBegin(); block != graph.poEnd();
1127 block++) {
1128 if (mir->shouldCancel("Eliminate Dead Code (main loop)")) {
1129 return false;
1130 }
1131
1132 // Remove unused instructions.
1133 for (MInstructionReverseIterator iter = block->rbegin();
1134 iter != block->rend();) {
1135 MInstruction* inst = *iter++;
1136 if (js::jit::IsDiscardable(inst)) {
1137 block->discard(inst);
1138 }
1139 }
1140 }
1141
1142 return true;
1143 }
1144
IsPhiObservable(MPhi * phi,Observability observe)1145 static inline bool IsPhiObservable(MPhi* phi, Observability observe) {
1146 // If the phi has uses which are not reflected in SSA, then behavior in the
1147 // interpreter may be affected by removing the phi.
1148 if (phi->isImplicitlyUsed()) {
1149 return true;
1150 }
1151
1152 // Check for uses of this phi node outside of other phi nodes.
1153 // Note that, initially, we skip reading resume points, which we
1154 // don't count as actual uses. If the only uses are resume points,
1155 // then the SSA name is never consumed by the program. However,
1156 // after optimizations have been performed, it's possible that the
1157 // actual uses in the program have been (incorrectly) optimized
1158 // away, so we must be more conservative and consider resume
1159 // points as well.
1160 for (MUseIterator iter(phi->usesBegin()); iter != phi->usesEnd(); iter++) {
1161 MNode* consumer = iter->consumer();
1162 if (consumer->isResumePoint()) {
1163 MResumePoint* resume = consumer->toResumePoint();
1164 if (observe == ConservativeObservability) {
1165 return true;
1166 }
1167 if (resume->isObservableOperand(*iter)) {
1168 return true;
1169 }
1170 } else {
1171 MDefinition* def = consumer->toDefinition();
1172 if (!def->isPhi()) {
1173 return true;
1174 }
1175 }
1176 }
1177
1178 return false;
1179 }
1180
1181 // Handles cases like:
1182 // x is phi(a, x) --> a
1183 // x is phi(a, a) --> a
IsPhiRedundant(MPhi * phi)1184 static inline MDefinition* IsPhiRedundant(MPhi* phi) {
1185 MDefinition* first = phi->operandIfRedundant();
1186 if (first == nullptr) {
1187 return nullptr;
1188 }
1189
1190 // Propagate the ImplicitlyUsed flag if |phi| is replaced with another phi.
1191 if (phi->isImplicitlyUsed()) {
1192 first->setImplicitlyUsedUnchecked();
1193 }
1194
1195 return first;
1196 }
1197
EliminatePhis(MIRGenerator * mir,MIRGraph & graph,Observability observe)1198 bool jit::EliminatePhis(MIRGenerator* mir, MIRGraph& graph,
1199 Observability observe) {
1200 // Eliminates redundant or unobservable phis from the graph. A
1201 // redundant phi is something like b = phi(a, a) or b = phi(a, b),
1202 // both of which can be replaced with a. An unobservable phi is
1203 // one that whose value is never used in the program.
1204 //
1205 // Note that we must be careful not to eliminate phis representing
1206 // values that the interpreter will require later. When the graph
1207 // is first constructed, we can be more aggressive, because there
1208 // is a greater correspondence between the CFG and the bytecode.
1209 // After optimizations such as GVN have been performed, however,
1210 // the bytecode and CFG may not correspond as closely to one
1211 // another. In that case, we must be more conservative. The flag
1212 // |conservativeObservability| is used to indicate that eliminate
1213 // phis is being run after some optimizations have been performed,
1214 // and thus we should use more conservative rules about
1215 // observability. The particular danger is that we can optimize
1216 // away uses of a phi because we think they are not executable,
1217 // but the foundation for that assumption is false TI information
1218 // that will eventually be invalidated. Therefore, if
1219 // |conservativeObservability| is set, we will consider any use
1220 // from a resume point to be observable. Otherwise, we demand a
1221 // use from an actual instruction.
1222
1223 Vector<MPhi*, 16, SystemAllocPolicy> worklist;
1224
1225 // Add all observable phis to a worklist. We use the "in worklist" bit to
1226 // mean "this phi is live".
1227 for (PostorderIterator block = graph.poBegin(); block != graph.poEnd();
1228 block++) {
1229 MPhiIterator iter = block->phisBegin();
1230 while (iter != block->phisEnd()) {
1231 MPhi* phi = *iter++;
1232
1233 if (mir->shouldCancel("Eliminate Phis (populate loop)")) {
1234 return false;
1235 }
1236
1237 // Flag all as unused, only observable phis would be marked as used
1238 // when processed by the work list.
1239 phi->setUnused();
1240
1241 // If the phi is redundant, remove it here.
1242 if (MDefinition* redundant = IsPhiRedundant(phi)) {
1243 phi->justReplaceAllUsesWith(redundant);
1244 block->discardPhi(phi);
1245 continue;
1246 }
1247
1248 // Enqueue observable Phis.
1249 if (IsPhiObservable(phi, observe)) {
1250 phi->setInWorklist();
1251 if (!worklist.append(phi)) {
1252 return false;
1253 }
1254 }
1255 }
1256 }
1257
1258 // Iteratively mark all phis reachable from live phis.
1259 while (!worklist.empty()) {
1260 if (mir->shouldCancel("Eliminate Phis (worklist)")) {
1261 return false;
1262 }
1263
1264 MPhi* phi = worklist.popCopy();
1265 MOZ_ASSERT(phi->isUnused());
1266 phi->setNotInWorklist();
1267
1268 // The removal of Phis can produce newly redundant phis.
1269 if (MDefinition* redundant = IsPhiRedundant(phi)) {
1270 // Add to the worklist the used phis which are impacted.
1271 for (MUseDefIterator it(phi); it; it++) {
1272 if (it.def()->isPhi()) {
1273 MPhi* use = it.def()->toPhi();
1274 if (!use->isUnused()) {
1275 use->setUnusedUnchecked();
1276 use->setInWorklist();
1277 if (!worklist.append(use)) {
1278 return false;
1279 }
1280 }
1281 }
1282 }
1283 phi->justReplaceAllUsesWith(redundant);
1284 } else {
1285 // Otherwise flag them as used.
1286 phi->setNotUnused();
1287 }
1288
1289 // The current phi is/was used, so all its operands are used.
1290 for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
1291 MDefinition* in = phi->getOperand(i);
1292 if (!in->isPhi() || !in->isUnused() || in->isInWorklist()) {
1293 continue;
1294 }
1295 in->setInWorklist();
1296 if (!worklist.append(in->toPhi())) {
1297 return false;
1298 }
1299 }
1300 }
1301
1302 // Sweep dead phis.
1303 for (PostorderIterator block = graph.poBegin(); block != graph.poEnd();
1304 block++) {
1305 MPhiIterator iter = block->phisBegin();
1306 while (iter != block->phisEnd()) {
1307 MPhi* phi = *iter++;
1308 if (phi->isUnused()) {
1309 if (!phi->optimizeOutAllUses(graph.alloc())) {
1310 return false;
1311 }
1312 block->discardPhi(phi);
1313 }
1314 }
1315 }
1316
1317 return true;
1318 }
1319
1320 namespace {
1321
1322 // The type analysis algorithm inserts conversions and box/unbox instructions
1323 // to make the IR graph well-typed for future passes.
1324 //
1325 // Phi adjustment: If a phi's inputs are all the same type, the phi is
1326 // specialized to return that type.
1327 //
1328 // Input adjustment: Each input is asked to apply conversion operations to its
1329 // inputs. This may include Box, Unbox, or other instruction-specific type
1330 // conversion operations.
1331 //
1332 class TypeAnalyzer {
1333 MIRGenerator* mir;
1334 MIRGraph& graph;
1335 Vector<MPhi*, 0, SystemAllocPolicy> phiWorklist_;
1336
alloc() const1337 TempAllocator& alloc() const { return graph.alloc(); }
1338
addPhiToWorklist(MPhi * phi)1339 bool addPhiToWorklist(MPhi* phi) {
1340 if (phi->isInWorklist()) {
1341 return true;
1342 }
1343 if (!phiWorklist_.append(phi)) {
1344 return false;
1345 }
1346 phi->setInWorklist();
1347 return true;
1348 }
popPhi()1349 MPhi* popPhi() {
1350 MPhi* phi = phiWorklist_.popCopy();
1351 phi->setNotInWorklist();
1352 return phi;
1353 }
1354
1355 [[nodiscard]] bool propagateAllPhiSpecializations();
1356
1357 bool respecialize(MPhi* phi, MIRType type);
1358 bool propagateSpecialization(MPhi* phi);
1359 bool specializePhis();
1360 bool specializeOsrOnlyPhis();
1361 void replaceRedundantPhi(MPhi* phi);
1362 bool adjustPhiInputs(MPhi* phi);
1363 bool adjustInputs(MDefinition* def);
1364 bool insertConversions();
1365
1366 bool checkFloatCoherency();
1367 bool graphContainsFloat32();
1368 bool markPhiConsumers();
1369 bool markPhiProducers();
1370 bool specializeValidFloatOps();
1371 bool tryEmitFloatOperations();
1372
1373 bool shouldSpecializeOsrPhis() const;
1374 MIRType guessPhiType(MPhi* phi) const;
1375
1376 public:
TypeAnalyzer(MIRGenerator * mir,MIRGraph & graph)1377 TypeAnalyzer(MIRGenerator* mir, MIRGraph& graph) : mir(mir), graph(graph) {}
1378
1379 bool analyze();
1380 };
1381
1382 } /* anonymous namespace */
1383
shouldSpecializeOsrPhis() const1384 bool TypeAnalyzer::shouldSpecializeOsrPhis() const {
1385 // [SMDOC] OSR Phi Type Specialization
1386 //
1387 // Without special handling for OSR phis, we end up with unspecialized phis
1388 // (MIRType::Value) in the loop (pre)header and other blocks, resulting in
1389 // unnecessary boxing and unboxing in the loop body.
1390 //
1391 // To fix this, phi type specialization needs special code to deal with the
1392 // OSR entry block. Recall that OSR results in the following basic block
1393 // structure:
1394 //
1395 // +------------------+ +-----------------+
1396 // | Code before loop | | OSR entry block |
1397 // +------------------+ +-----------------+
1398 // | |
1399 // | |
1400 // | +---------------+ |
1401 // +---------> | OSR preheader | <---------+
1402 // +---------------+
1403 // |
1404 // V
1405 // +---------------+
1406 // | Loop header |<-----+
1407 // +---------------+ |
1408 // | |
1409 // ... |
1410 // | |
1411 // +---------------+ |
1412 // | Loop backedge |------+
1413 // +---------------+
1414 //
1415 // OSR phi specialization happens in three steps:
1416 //
1417 // (1) Specialize phis but ignore MOsrValue phi inputs. In other words,
1418 // pretend the OSR entry block doesn't exist. See guessPhiType.
1419 //
1420 // (2) Once phi specialization is done, look at the types of loop header phis
1421 // and add these types to the corresponding preheader phis. This way, the
1422 // types of the preheader phis are based on the code before the loop and
1423 // the code in the loop body. These are exactly the types we expect for
1424 // the OSR Values. See the last part of TypeAnalyzer::specializePhis.
1425 //
1426 // (3) For type-specialized preheader phis, add guard/unbox instructions to
1427 // the OSR entry block to guard the incoming Value indeed has this type.
1428 // This happens in:
1429 //
1430 // * TypeAnalyzer::adjustPhiInputs: adds a fallible unbox for values that
1431 // can be unboxed.
1432 //
1433 // * TypeAnalyzer::replaceRedundantPhi: adds a type guard for values that
1434 // can't be unboxed (null/undefined/magic Values).
1435 if (!mir->graph().osrBlock()) {
1436 return false;
1437 }
1438
1439 return !mir->outerInfo().hadSpeculativePhiBailout();
1440 }
1441
1442 // Try to specialize this phi based on its non-cyclic inputs.
guessPhiType(MPhi * phi) const1443 MIRType TypeAnalyzer::guessPhiType(MPhi* phi) const {
1444 #ifdef DEBUG
1445 // Check that different magic constants aren't flowing together. Ignore
1446 // JS_OPTIMIZED_OUT, since an operand could be legitimately optimized
1447 // away.
1448 MIRType magicType = MIRType::None;
1449 for (size_t i = 0; i < phi->numOperands(); i++) {
1450 MDefinition* in = phi->getOperand(i);
1451 if (in->type() == MIRType::MagicHole ||
1452 in->type() == MIRType::MagicIsConstructing) {
1453 if (magicType == MIRType::None) {
1454 magicType = in->type();
1455 }
1456 MOZ_ASSERT(magicType == in->type());
1457 }
1458 }
1459 #endif
1460
1461 MIRType type = MIRType::None;
1462 bool convertibleToFloat32 = false;
1463 bool hasOSRValueInput = false;
1464 DebugOnly<bool> hasSpecializableInput = false;
1465 for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
1466 MDefinition* in = phi->getOperand(i);
1467 if (in->isPhi()) {
1468 hasSpecializableInput = true;
1469 if (!in->toPhi()->triedToSpecialize()) {
1470 continue;
1471 }
1472 if (in->type() == MIRType::None) {
1473 // The operand is a phi we tried to specialize, but we were
1474 // unable to guess its type. propagateSpecialization will
1475 // propagate the type to this phi when it becomes known.
1476 continue;
1477 }
1478 }
1479
1480 // See shouldSpecializeOsrPhis comment. This is the first step mentioned
1481 // there.
1482 if (shouldSpecializeOsrPhis() && in->isOsrValue()) {
1483 hasOSRValueInput = true;
1484 hasSpecializableInput = true;
1485 continue;
1486 }
1487
1488 if (type == MIRType::None) {
1489 type = in->type();
1490 if (in->canProduceFloat32() &&
1491 !mir->outerInfo().hadSpeculativePhiBailout()) {
1492 convertibleToFloat32 = true;
1493 }
1494 continue;
1495 }
1496
1497 if (type == in->type()) {
1498 convertibleToFloat32 = convertibleToFloat32 && in->canProduceFloat32();
1499 } else {
1500 if (convertibleToFloat32 && in->type() == MIRType::Float32) {
1501 // If we only saw definitions that can be converted into Float32 before
1502 // and encounter a Float32 value, promote previous values to Float32
1503 type = MIRType::Float32;
1504 } else if (IsTypeRepresentableAsDouble(type) &&
1505 IsTypeRepresentableAsDouble(in->type())) {
1506 // Specialize phis with int32 and double operands as double.
1507 type = MIRType::Double;
1508 convertibleToFloat32 = convertibleToFloat32 && in->canProduceFloat32();
1509 } else {
1510 return MIRType::Value;
1511 }
1512 }
1513 }
1514
1515 if (hasOSRValueInput && type == MIRType::Float32) {
1516 // TODO(post-Warp): simplify float32 handling in this function or (better)
1517 // make the float32 analysis a stand-alone optimization pass instead of
1518 // complicating type analysis. See bug 1655773.
1519 type = MIRType::Double;
1520 }
1521
1522 MOZ_ASSERT_IF(type == MIRType::None, hasSpecializableInput);
1523 return type;
1524 }
1525
respecialize(MPhi * phi,MIRType type)1526 bool TypeAnalyzer::respecialize(MPhi* phi, MIRType type) {
1527 if (phi->type() == type) {
1528 return true;
1529 }
1530 phi->specialize(type);
1531 return addPhiToWorklist(phi);
1532 }
1533
propagateSpecialization(MPhi * phi)1534 bool TypeAnalyzer::propagateSpecialization(MPhi* phi) {
1535 MOZ_ASSERT(phi->type() != MIRType::None);
1536
1537 // Verify that this specialization matches any phis depending on it.
1538 for (MUseDefIterator iter(phi); iter; iter++) {
1539 if (!iter.def()->isPhi()) {
1540 continue;
1541 }
1542 MPhi* use = iter.def()->toPhi();
1543 if (!use->triedToSpecialize()) {
1544 continue;
1545 }
1546 if (use->type() == MIRType::None) {
1547 // We tried to specialize this phi, but were unable to guess its
1548 // type. Now that we know the type of one of its operands, we can
1549 // specialize it.
1550 if (!respecialize(use, phi->type())) {
1551 return false;
1552 }
1553 continue;
1554 }
1555 if (use->type() != phi->type()) {
1556 // Specialize phis with int32 that can be converted to float and float
1557 // operands as floats.
1558 if ((use->type() == MIRType::Int32 && use->canProduceFloat32() &&
1559 phi->type() == MIRType::Float32) ||
1560 (phi->type() == MIRType::Int32 && phi->canProduceFloat32() &&
1561 use->type() == MIRType::Float32)) {
1562 if (!respecialize(use, MIRType::Float32)) {
1563 return false;
1564 }
1565 continue;
1566 }
1567
1568 // Specialize phis with int32 and double operands as double.
1569 if (IsTypeRepresentableAsDouble(use->type()) &&
1570 IsTypeRepresentableAsDouble(phi->type())) {
1571 if (!respecialize(use, MIRType::Double)) {
1572 return false;
1573 }
1574 continue;
1575 }
1576
1577 // This phi in our use chain can now no longer be specialized.
1578 if (!respecialize(use, MIRType::Value)) {
1579 return false;
1580 }
1581 }
1582 }
1583
1584 return true;
1585 }
1586
propagateAllPhiSpecializations()1587 bool TypeAnalyzer::propagateAllPhiSpecializations() {
1588 while (!phiWorklist_.empty()) {
1589 if (mir->shouldCancel("Specialize Phis (worklist)")) {
1590 return false;
1591 }
1592
1593 MPhi* phi = popPhi();
1594 if (!propagateSpecialization(phi)) {
1595 return false;
1596 }
1597 }
1598
1599 return true;
1600 }
1601
1602 // If branch pruning removes the path from the entry block to the OSR
1603 // preheader, we may have phis (or chains of phis) with no operands
1604 // other than OsrValues. These phis will still have MIRType::None.
1605 // Since we don't have any information about them, we specialize them
1606 // as MIRType::Value.
specializeOsrOnlyPhis()1607 bool TypeAnalyzer::specializeOsrOnlyPhis() {
1608 MOZ_ASSERT(graph.osrBlock());
1609 MOZ_ASSERT(graph.osrPreHeaderBlock()->numPredecessors() == 1);
1610
1611 for (PostorderIterator block(graph.poBegin()); block != graph.poEnd();
1612 block++) {
1613 if (mir->shouldCancel("Specialize osr-only phis (main loop)")) {
1614 return false;
1615 }
1616
1617 for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
1618 if (mir->shouldCancel("Specialize osr-only phis (inner loop)")) {
1619 return false;
1620 }
1621
1622 if (phi->type() == MIRType::None) {
1623 phi->specialize(MIRType::Value);
1624 }
1625 }
1626 }
1627 return true;
1628 }
1629
specializePhis()1630 bool TypeAnalyzer::specializePhis() {
1631 for (PostorderIterator block(graph.poBegin()); block != graph.poEnd();
1632 block++) {
1633 if (mir->shouldCancel("Specialize Phis (main loop)")) {
1634 return false;
1635 }
1636
1637 for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
1638 if (mir->shouldCancel("Specialize Phis (inner loop)")) {
1639 return false;
1640 }
1641
1642 MIRType type = guessPhiType(*phi);
1643 phi->specialize(type);
1644 if (type == MIRType::None) {
1645 // We tried to guess the type but failed because all operands are
1646 // phis we still have to visit. Set the triedToSpecialize flag but
1647 // don't propagate the type to other phis, propagateSpecialization
1648 // will do that once we know the type of one of the operands.
1649 continue;
1650 }
1651 if (!propagateSpecialization(*phi)) {
1652 return false;
1653 }
1654 }
1655 }
1656
1657 if (!propagateAllPhiSpecializations()) {
1658 return false;
1659 }
1660
1661 if (shouldSpecializeOsrPhis()) {
1662 // See shouldSpecializeOsrPhis comment. This is the second step, propagating
1663 // loop header phi types to preheader phis.
1664 MBasicBlock* preHeader = graph.osrPreHeaderBlock();
1665 MBasicBlock* header = preHeader->getSingleSuccessor();
1666
1667 if (preHeader->numPredecessors() == 1) {
1668 MOZ_ASSERT(preHeader->getPredecessor(0) == graph.osrBlock());
1669 // Branch pruning has removed the path from the entry block
1670 // to the preheader. Specialize any phis with no non-osr inputs.
1671 if (!specializeOsrOnlyPhis()) {
1672 return false;
1673 }
1674 } else if (header->isLoopHeader()) {
1675 for (MPhiIterator phi(header->phisBegin()); phi != header->phisEnd();
1676 phi++) {
1677 MPhi* preHeaderPhi = phi->getOperand(0)->toPhi();
1678 MOZ_ASSERT(preHeaderPhi->block() == preHeader);
1679
1680 if (preHeaderPhi->type() == MIRType::Value) {
1681 // Already includes everything.
1682 continue;
1683 }
1684
1685 MIRType loopType = phi->type();
1686 if (!respecialize(preHeaderPhi, loopType)) {
1687 return false;
1688 }
1689 }
1690 if (!propagateAllPhiSpecializations()) {
1691 return false;
1692 }
1693 } else {
1694 // Edge case: there is no backedge in this loop. This can happen
1695 // if the header is a 'pending' loop header when control flow in
1696 // the loop body is terminated unconditionally, or if a block
1697 // that dominates the backedge unconditionally bails out. In
1698 // this case the header only has the preheader as predecessor
1699 // and we don't need to do anything.
1700 MOZ_ASSERT(header->numPredecessors() == 1);
1701 }
1702 }
1703
1704 MOZ_ASSERT(phiWorklist_.empty());
1705 return true;
1706 }
1707
adjustPhiInputs(MPhi * phi)1708 bool TypeAnalyzer::adjustPhiInputs(MPhi* phi) {
1709 MIRType phiType = phi->type();
1710 MOZ_ASSERT(phiType != MIRType::None);
1711
1712 // If we specialized a type that's not Value, there are 3 cases:
1713 // 1. Every input is of that type.
1714 // 2. Every observed input is of that type (i.e., some inputs haven't been
1715 // executed yet).
1716 // 3. Inputs were doubles and int32s, and was specialized to double.
1717 if (phiType != MIRType::Value) {
1718 for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
1719 MDefinition* in = phi->getOperand(i);
1720 if (in->type() == phiType) {
1721 continue;
1722 }
1723
1724 if (!alloc().ensureBallast()) {
1725 return false;
1726 }
1727
1728 if (in->isBox() && in->toBox()->input()->type() == phiType) {
1729 phi->replaceOperand(i, in->toBox()->input());
1730 } else {
1731 MInstruction* replacement;
1732
1733 if (phiType == MIRType::Double && IsFloatType(in->type())) {
1734 // Convert int32 operands to double.
1735 replacement = MToDouble::New(alloc(), in);
1736 } else if (phiType == MIRType::Float32) {
1737 if (in->type() == MIRType::Int32 || in->type() == MIRType::Double) {
1738 replacement = MToFloat32::New(alloc(), in);
1739 } else {
1740 // See comment below
1741 if (in->type() != MIRType::Value) {
1742 MBox* box = MBox::New(alloc(), in);
1743 in->block()->insertBefore(in->block()->lastIns(), box);
1744 in = box;
1745 }
1746
1747 MUnbox* unbox =
1748 MUnbox::New(alloc(), in, MIRType::Double, MUnbox::Fallible);
1749 unbox->setBailoutKind(BailoutKind::SpeculativePhi);
1750 in->block()->insertBefore(in->block()->lastIns(), unbox);
1751 replacement = MToFloat32::New(alloc(), in);
1752 }
1753 } else {
1754 // If we know this branch will fail to convert to phiType,
1755 // insert a box that'll immediately fail in the fallible unbox
1756 // below.
1757 if (in->type() != MIRType::Value) {
1758 MBox* box = MBox::New(alloc(), in);
1759 in->block()->insertBefore(in->block()->lastIns(), box);
1760 in = box;
1761 }
1762
1763 // Be optimistic and insert unboxes when the operand is a
1764 // value.
1765 replacement = MUnbox::New(alloc(), in, phiType, MUnbox::Fallible);
1766 }
1767
1768 replacement->setBailoutKind(BailoutKind::SpeculativePhi);
1769 in->block()->insertBefore(in->block()->lastIns(), replacement);
1770 phi->replaceOperand(i, replacement);
1771 }
1772 }
1773
1774 return true;
1775 }
1776
1777 // Box every typed input.
1778 for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
1779 MDefinition* in = phi->getOperand(i);
1780 if (in->type() == MIRType::Value) {
1781 continue;
1782 }
1783
1784 // The input is being explicitly unboxed, so sneak past and grab the
1785 // original box. Don't bother optimizing if magic values are involved.
1786 if (in->isUnbox()) {
1787 MDefinition* unboxInput = in->toUnbox()->input();
1788 if (!IsMagicType(unboxInput->type()) && phi->typeIncludes(unboxInput)) {
1789 in = in->toUnbox()->input();
1790 }
1791 }
1792
1793 if (in->type() != MIRType::Value) {
1794 if (!alloc().ensureBallast()) {
1795 return false;
1796 }
1797
1798 MBasicBlock* pred = phi->block()->getPredecessor(i);
1799 in = AlwaysBoxAt(alloc(), pred->lastIns(), in);
1800 }
1801
1802 phi->replaceOperand(i, in);
1803 }
1804
1805 return true;
1806 }
1807
adjustInputs(MDefinition * def)1808 bool TypeAnalyzer::adjustInputs(MDefinition* def) {
1809 // Definitions such as MPhi have no type policy.
1810 if (!def->isInstruction()) {
1811 return true;
1812 }
1813
1814 MInstruction* ins = def->toInstruction();
1815 const TypePolicy* policy = ins->typePolicy();
1816 if (policy && !policy->adjustInputs(alloc(), ins)) {
1817 return false;
1818 }
1819 return true;
1820 }
1821
replaceRedundantPhi(MPhi * phi)1822 void TypeAnalyzer::replaceRedundantPhi(MPhi* phi) {
1823 MBasicBlock* block = phi->block();
1824 js::Value v;
1825 switch (phi->type()) {
1826 case MIRType::Undefined:
1827 v = UndefinedValue();
1828 break;
1829 case MIRType::Null:
1830 v = NullValue();
1831 break;
1832 case MIRType::MagicOptimizedOut:
1833 v = MagicValue(JS_OPTIMIZED_OUT);
1834 break;
1835 case MIRType::MagicUninitializedLexical:
1836 v = MagicValue(JS_UNINITIALIZED_LEXICAL);
1837 break;
1838 case MIRType::MagicIsConstructing:
1839 v = MagicValue(JS_IS_CONSTRUCTING);
1840 break;
1841 case MIRType::MagicHole:
1842 default:
1843 MOZ_CRASH("unexpected type");
1844 }
1845 MConstant* c = MConstant::New(alloc(), v);
1846 // The instruction pass will insert the box
1847 block->insertBefore(*(block->begin()), c);
1848 phi->justReplaceAllUsesWith(c);
1849
1850 if (shouldSpecializeOsrPhis()) {
1851 // See shouldSpecializeOsrPhis comment. This is part of the third step,
1852 // guard the incoming MOsrValue is of this type.
1853 for (uint32_t i = 0; i < phi->numOperands(); i++) {
1854 MDefinition* def = phi->getOperand(i);
1855 if (def->type() != phi->type()) {
1856 MOZ_ASSERT(def->isOsrValue() || def->isPhi());
1857 MOZ_ASSERT(def->type() == MIRType::Value);
1858 MGuardValue* guard = MGuardValue::New(alloc(), def, v);
1859 guard->setBailoutKind(BailoutKind::SpeculativePhi);
1860 def->block()->insertBefore(def->block()->lastIns(), guard);
1861 }
1862 }
1863 }
1864 }
1865
insertConversions()1866 bool TypeAnalyzer::insertConversions() {
1867 // Instructions are processed in reverse postorder: all uses are defs are
1868 // seen before uses. This ensures that output adjustment (which may rewrite
1869 // inputs of uses) does not conflict with input adjustment.
1870 for (ReversePostorderIterator block(graph.rpoBegin());
1871 block != graph.rpoEnd(); block++) {
1872 if (mir->shouldCancel("Insert Conversions")) {
1873 return false;
1874 }
1875
1876 for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd());
1877 iter != end;) {
1878 MPhi* phi = *iter++;
1879 if (IsNullOrUndefined(phi->type()) || IsMagicType(phi->type())) {
1880 // We can replace this phi with a constant.
1881 if (!alloc().ensureBallast()) {
1882 return false;
1883 }
1884 replaceRedundantPhi(phi);
1885 block->discardPhi(phi);
1886 } else {
1887 if (!adjustPhiInputs(phi)) {
1888 return false;
1889 }
1890 }
1891 }
1892
1893 // AdjustInputs can add/remove/mutate instructions before and after the
1894 // current instruction. Only increment the iterator after it is finished.
1895 for (MInstructionIterator iter(block->begin()); iter != block->end();
1896 iter++) {
1897 if (!alloc().ensureBallast()) {
1898 return false;
1899 }
1900
1901 if (!adjustInputs(*iter)) {
1902 return false;
1903 }
1904 }
1905 }
1906 return true;
1907 }
1908
1909 /* clang-format off */
1910 //
1911 // This function tries to emit Float32 specialized operations whenever it's possible.
1912 // MIR nodes are flagged as:
1913 // - Producers, when they can create Float32 that might need to be coerced into a Double.
1914 // Loads in Float32 arrays and conversions to Float32 are producers.
1915 // - Consumers, when they can have Float32 as inputs and validate a legal use of a Float32.
1916 // Stores in Float32 arrays and conversions to Float32 are consumers.
1917 // - Float32 commutative, when using the Float32 instruction instead of the Double instruction
1918 // does not result in a compound loss of precision. This is the case for +, -, /, * with 2
1919 // operands, for instance. However, an addition with 3 operands is not commutative anymore,
1920 // so an intermediate coercion is needed.
1921 // Except for phis, all these flags are known after Ion building, so they cannot change during
1922 // the process.
1923 //
1924 // The idea behind the algorithm is easy: whenever we can prove that a commutative operation
1925 // has only producers as inputs and consumers as uses, we can specialize the operation as a
1926 // float32 operation. Otherwise, we have to convert all float32 inputs to doubles. Even
1927 // if a lot of conversions are produced, GVN will take care of eliminating the redundant ones.
1928 //
1929 // Phis have a special status. Phis need to be flagged as producers or consumers as they can
1930 // be inputs or outputs of commutative instructions. Fortunately, producers and consumers
1931 // properties are such that we can deduce the property using all non phis inputs first (which form
1932 // an initial phi graph) and then propagate all properties from one phi to another using a
1933 // fixed point algorithm. The algorithm is ensured to terminate as each iteration has less or as
1934 // many flagged phis as the previous iteration (so the worst steady state case is all phis being
1935 // flagged as false).
1936 //
1937 // In a nutshell, the algorithm applies three passes:
1938 // 1 - Determine which phis are consumers. Each phi gets an initial value by making a global AND on
1939 // all its non-phi inputs. Then each phi propagates its value to other phis. If after propagation,
1940 // the flag value changed, we have to reapply the algorithm on all phi operands, as a phi is a
1941 // consumer if all of its uses are consumers.
1942 // 2 - Determine which phis are producers. It's the same algorithm, except that we have to reapply
1943 // the algorithm on all phi uses, as a phi is a producer if all of its operands are producers.
1944 // 3 - Go through all commutative operations and ensure their inputs are all producers and their
1945 // uses are all consumers.
1946 //
1947 /* clang-format on */
markPhiConsumers()1948 bool TypeAnalyzer::markPhiConsumers() {
1949 MOZ_ASSERT(phiWorklist_.empty());
1950
1951 // Iterate in postorder so worklist is initialized to RPO.
1952 for (PostorderIterator block(graph.poBegin()); block != graph.poEnd();
1953 ++block) {
1954 if (mir->shouldCancel(
1955 "Ensure Float32 commutativity - Consumer Phis - Initial state")) {
1956 return false;
1957 }
1958
1959 for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); ++phi) {
1960 MOZ_ASSERT(!phi->isInWorklist());
1961 bool canConsumeFloat32 = !phi->isImplicitlyUsed();
1962 for (MUseDefIterator use(*phi); canConsumeFloat32 && use; use++) {
1963 MDefinition* usedef = use.def();
1964 canConsumeFloat32 &=
1965 usedef->isPhi() || usedef->canConsumeFloat32(use.use());
1966 }
1967 phi->setCanConsumeFloat32(canConsumeFloat32);
1968 if (canConsumeFloat32 && !addPhiToWorklist(*phi)) {
1969 return false;
1970 }
1971 }
1972 }
1973
1974 while (!phiWorklist_.empty()) {
1975 if (mir->shouldCancel(
1976 "Ensure Float32 commutativity - Consumer Phis - Fixed point")) {
1977 return false;
1978 }
1979
1980 MPhi* phi = popPhi();
1981 MOZ_ASSERT(phi->canConsumeFloat32(nullptr /* unused */));
1982
1983 bool validConsumer = true;
1984 for (MUseDefIterator use(phi); use; use++) {
1985 MDefinition* def = use.def();
1986 if (def->isPhi() && !def->canConsumeFloat32(use.use())) {
1987 validConsumer = false;
1988 break;
1989 }
1990 }
1991
1992 if (validConsumer) {
1993 continue;
1994 }
1995
1996 // Propagate invalidated phis
1997 phi->setCanConsumeFloat32(false);
1998 for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
1999 MDefinition* input = phi->getOperand(i);
2000 if (input->isPhi() && !input->isInWorklist() &&
2001 input->canConsumeFloat32(nullptr /* unused */)) {
2002 if (!addPhiToWorklist(input->toPhi())) {
2003 return false;
2004 }
2005 }
2006 }
2007 }
2008 return true;
2009 }
2010
markPhiProducers()2011 bool TypeAnalyzer::markPhiProducers() {
2012 MOZ_ASSERT(phiWorklist_.empty());
2013
2014 // Iterate in reverse postorder so worklist is initialized to PO.
2015 for (ReversePostorderIterator block(graph.rpoBegin());
2016 block != graph.rpoEnd(); ++block) {
2017 if (mir->shouldCancel(
2018 "Ensure Float32 commutativity - Producer Phis - initial state")) {
2019 return false;
2020 }
2021
2022 for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); ++phi) {
2023 MOZ_ASSERT(!phi->isInWorklist());
2024 bool canProduceFloat32 = true;
2025 for (size_t i = 0, e = phi->numOperands(); canProduceFloat32 && i < e;
2026 ++i) {
2027 MDefinition* input = phi->getOperand(i);
2028 canProduceFloat32 &= input->isPhi() || input->canProduceFloat32();
2029 }
2030 phi->setCanProduceFloat32(canProduceFloat32);
2031 if (canProduceFloat32 && !addPhiToWorklist(*phi)) {
2032 return false;
2033 }
2034 }
2035 }
2036
2037 while (!phiWorklist_.empty()) {
2038 if (mir->shouldCancel(
2039 "Ensure Float32 commutativity - Producer Phis - Fixed point")) {
2040 return false;
2041 }
2042
2043 MPhi* phi = popPhi();
2044 MOZ_ASSERT(phi->canProduceFloat32());
2045
2046 bool validProducer = true;
2047 for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
2048 MDefinition* input = phi->getOperand(i);
2049 if (input->isPhi() && !input->canProduceFloat32()) {
2050 validProducer = false;
2051 break;
2052 }
2053 }
2054
2055 if (validProducer) {
2056 continue;
2057 }
2058
2059 // Propagate invalidated phis
2060 phi->setCanProduceFloat32(false);
2061 for (MUseDefIterator use(phi); use; use++) {
2062 MDefinition* def = use.def();
2063 if (def->isPhi() && !def->isInWorklist() && def->canProduceFloat32()) {
2064 if (!addPhiToWorklist(def->toPhi())) {
2065 return false;
2066 }
2067 }
2068 }
2069 }
2070 return true;
2071 }
2072
specializeValidFloatOps()2073 bool TypeAnalyzer::specializeValidFloatOps() {
2074 for (ReversePostorderIterator block(graph.rpoBegin());
2075 block != graph.rpoEnd(); ++block) {
2076 if (mir->shouldCancel("Ensure Float32 commutativity - Instructions")) {
2077 return false;
2078 }
2079
2080 for (MInstructionIterator ins(block->begin()); ins != block->end(); ++ins) {
2081 if (!ins->isFloat32Commutative()) {
2082 continue;
2083 }
2084
2085 if (ins->type() == MIRType::Float32) {
2086 continue;
2087 }
2088
2089 if (!alloc().ensureBallast()) {
2090 return false;
2091 }
2092
2093 // This call will try to specialize the instruction iff all uses are
2094 // consumers and all inputs are producers.
2095 ins->trySpecializeFloat32(alloc());
2096 }
2097 }
2098 return true;
2099 }
2100
graphContainsFloat32()2101 bool TypeAnalyzer::graphContainsFloat32() {
2102 for (ReversePostorderIterator block(graph.rpoBegin());
2103 block != graph.rpoEnd(); ++block) {
2104 for (MDefinitionIterator def(*block); def; def++) {
2105 if (mir->shouldCancel(
2106 "Ensure Float32 commutativity - Graph contains Float32")) {
2107 return false;
2108 }
2109
2110 if (def->type() == MIRType::Float32) {
2111 return true;
2112 }
2113 }
2114 }
2115 return false;
2116 }
2117
tryEmitFloatOperations()2118 bool TypeAnalyzer::tryEmitFloatOperations() {
2119 // Asm.js uses the ahead of time type checks to specialize operations, no need
2120 // to check them again at this point.
2121 if (mir->compilingWasm()) {
2122 return true;
2123 }
2124
2125 // Check ahead of time that there is at least one definition typed as Float32,
2126 // otherwise we don't need this pass.
2127 if (!graphContainsFloat32()) {
2128 return true;
2129 }
2130
2131 if (!markPhiConsumers()) {
2132 return false;
2133 }
2134 if (!markPhiProducers()) {
2135 return false;
2136 }
2137 if (!specializeValidFloatOps()) {
2138 return false;
2139 }
2140 return true;
2141 }
2142
checkFloatCoherency()2143 bool TypeAnalyzer::checkFloatCoherency() {
2144 #ifdef DEBUG
2145 // Asserts that all Float32 instructions are flowing into Float32 consumers or
2146 // specialized operations
2147 for (ReversePostorderIterator block(graph.rpoBegin());
2148 block != graph.rpoEnd(); ++block) {
2149 if (mir->shouldCancel("Check Float32 coherency")) {
2150 return false;
2151 }
2152
2153 for (MDefinitionIterator def(*block); def; def++) {
2154 if (def->type() != MIRType::Float32) {
2155 continue;
2156 }
2157
2158 for (MUseDefIterator use(*def); use; use++) {
2159 MDefinition* consumer = use.def();
2160 MOZ_ASSERT(consumer->isConsistentFloat32Use(use.use()));
2161 }
2162 }
2163 }
2164 #endif
2165 return true;
2166 }
2167
analyze()2168 bool TypeAnalyzer::analyze() {
2169 if (!tryEmitFloatOperations()) {
2170 return false;
2171 }
2172 if (!specializePhis()) {
2173 return false;
2174 }
2175 if (!insertConversions()) {
2176 return false;
2177 }
2178 if (!checkFloatCoherency()) {
2179 return false;
2180 }
2181 return true;
2182 }
2183
ApplyTypeInformation(MIRGenerator * mir,MIRGraph & graph)2184 bool jit::ApplyTypeInformation(MIRGenerator* mir, MIRGraph& graph) {
2185 TypeAnalyzer analyzer(mir, graph);
2186
2187 if (!analyzer.analyze()) {
2188 return false;
2189 }
2190
2191 return true;
2192 }
2193
RenumberBlocks(MIRGraph & graph)2194 void jit::RenumberBlocks(MIRGraph& graph) {
2195 size_t id = 0;
2196 for (ReversePostorderIterator block(graph.rpoBegin());
2197 block != graph.rpoEnd(); block++) {
2198 block->setId(id++);
2199 }
2200 }
2201
2202 // A utility for code which deletes blocks. Renumber the remaining blocks,
2203 // recompute dominators, and optionally recompute AliasAnalysis dependencies.
AccountForCFGChanges(MIRGenerator * mir,MIRGraph & graph,bool updateAliasAnalysis,bool underValueNumberer)2204 bool jit::AccountForCFGChanges(MIRGenerator* mir, MIRGraph& graph,
2205 bool updateAliasAnalysis,
2206 bool underValueNumberer) {
2207 // Renumber the blocks and clear out the old dominator info.
2208 size_t id = 0;
2209 for (ReversePostorderIterator i(graph.rpoBegin()), e(graph.rpoEnd()); i != e;
2210 ++i) {
2211 i->clearDominatorInfo();
2212 i->setId(id++);
2213 }
2214
2215 // Recompute dominator info.
2216 if (!BuildDominatorTree(graph)) {
2217 return false;
2218 }
2219
2220 // If needed, update alias analysis dependencies.
2221 if (updateAliasAnalysis) {
2222 TraceLoggerThread* logger = TraceLoggerForCurrentThread();
2223 AutoTraceLog log(logger, TraceLogger_AliasAnalysis);
2224
2225 if (!AliasAnalysis(mir, graph).analyze()) {
2226 return false;
2227 }
2228 }
2229
2230 AssertExtendedGraphCoherency(graph, underValueNumberer);
2231 return true;
2232 }
2233
2234 // Remove all blocks not marked with isMarked(). Unmark all remaining blocks.
2235 // Alias analysis dependencies may be invalid after calling this function.
RemoveUnmarkedBlocks(MIRGenerator * mir,MIRGraph & graph,uint32_t numMarkedBlocks)2236 bool jit::RemoveUnmarkedBlocks(MIRGenerator* mir, MIRGraph& graph,
2237 uint32_t numMarkedBlocks) {
2238 if (numMarkedBlocks == graph.numBlocks()) {
2239 // If all blocks are marked, no blocks need removal. Just clear the
2240 // marks. We'll still need to update the dominator tree below though,
2241 // since we may have removed edges even if we didn't remove any blocks.
2242 graph.unmarkBlocks();
2243 } else {
2244 // As we are going to remove edges and basic blocks, we have to mark
2245 // instructions which would be needed by baseline if we were to
2246 // bailout.
2247 for (PostorderIterator it(graph.poBegin()); it != graph.poEnd();) {
2248 MBasicBlock* block = *it++;
2249 if (block->isMarked()) {
2250 continue;
2251 }
2252
2253 FlagAllOperandsAsImplicitlyUsed(mir, block);
2254 }
2255
2256 // Find unmarked blocks and remove them.
2257 for (ReversePostorderIterator iter(graph.rpoBegin());
2258 iter != graph.rpoEnd();) {
2259 MBasicBlock* block = *iter++;
2260
2261 if (block->isMarked()) {
2262 block->unmark();
2263 continue;
2264 }
2265
2266 // The block is unreachable. Clear out the loop header flag, as
2267 // we're doing the sweep of a mark-and-sweep here, so we no longer
2268 // need to worry about whether an unmarked block is a loop or not.
2269 if (block->isLoopHeader()) {
2270 block->clearLoopHeader();
2271 }
2272
2273 for (size_t i = 0, e = block->numSuccessors(); i != e; ++i) {
2274 block->getSuccessor(i)->removePredecessor(block);
2275 }
2276 graph.removeBlock(block);
2277 }
2278 }
2279
2280 // Renumber the blocks and update the dominator tree.
2281 return AccountForCFGChanges(mir, graph, /*updateAliasAnalysis=*/false);
2282 }
2283
2284 // A Simple, Fast Dominance Algorithm by Cooper et al.
2285 // Modified to support empty intersections for OSR, and in RPO.
IntersectDominators(MBasicBlock * block1,MBasicBlock * block2)2286 static MBasicBlock* IntersectDominators(MBasicBlock* block1,
2287 MBasicBlock* block2) {
2288 MBasicBlock* finger1 = block1;
2289 MBasicBlock* finger2 = block2;
2290
2291 MOZ_ASSERT(finger1);
2292 MOZ_ASSERT(finger2);
2293
2294 // In the original paper, the block ID comparisons are on the postorder index.
2295 // This implementation iterates in RPO, so the comparisons are reversed.
2296
2297 // For this function to be called, the block must have multiple predecessors.
2298 // If a finger is then found to be self-dominating, it must therefore be
2299 // reachable from multiple roots through non-intersecting control flow.
2300 // nullptr is returned in this case, to denote an empty intersection.
2301
2302 while (finger1->id() != finger2->id()) {
2303 while (finger1->id() > finger2->id()) {
2304 MBasicBlock* idom = finger1->immediateDominator();
2305 if (idom == finger1) {
2306 return nullptr; // Empty intersection.
2307 }
2308 finger1 = idom;
2309 }
2310
2311 while (finger2->id() > finger1->id()) {
2312 MBasicBlock* idom = finger2->immediateDominator();
2313 if (idom == finger2) {
2314 return nullptr; // Empty intersection.
2315 }
2316 finger2 = idom;
2317 }
2318 }
2319 return finger1;
2320 }
2321
ClearDominatorTree(MIRGraph & graph)2322 void jit::ClearDominatorTree(MIRGraph& graph) {
2323 for (MBasicBlockIterator iter = graph.begin(); iter != graph.end(); iter++) {
2324 iter->clearDominatorInfo();
2325 }
2326 }
2327
ComputeImmediateDominators(MIRGraph & graph)2328 static void ComputeImmediateDominators(MIRGraph& graph) {
2329 // The default start block is a root and therefore only self-dominates.
2330 MBasicBlock* startBlock = graph.entryBlock();
2331 startBlock->setImmediateDominator(startBlock);
2332
2333 // Any OSR block is a root and therefore only self-dominates.
2334 MBasicBlock* osrBlock = graph.osrBlock();
2335 if (osrBlock) {
2336 osrBlock->setImmediateDominator(osrBlock);
2337 }
2338
2339 bool changed = true;
2340
2341 while (changed) {
2342 changed = false;
2343
2344 ReversePostorderIterator block = graph.rpoBegin();
2345
2346 // For each block in RPO, intersect all dominators.
2347 for (; block != graph.rpoEnd(); block++) {
2348 // If a node has once been found to have no exclusive dominator,
2349 // it will never have an exclusive dominator, so it may be skipped.
2350 if (block->immediateDominator() == *block) {
2351 continue;
2352 }
2353
2354 // A block with no predecessors is not reachable from any entry, so
2355 // it self-dominates.
2356 if (MOZ_UNLIKELY(block->numPredecessors() == 0)) {
2357 block->setImmediateDominator(*block);
2358 continue;
2359 }
2360
2361 MBasicBlock* newIdom = block->getPredecessor(0);
2362
2363 // Find the first common dominator.
2364 for (size_t i = 1; i < block->numPredecessors(); i++) {
2365 MBasicBlock* pred = block->getPredecessor(i);
2366 if (pred->immediateDominator() == nullptr) {
2367 continue;
2368 }
2369
2370 newIdom = IntersectDominators(pred, newIdom);
2371
2372 // If there is no common dominator, the block self-dominates.
2373 if (newIdom == nullptr) {
2374 block->setImmediateDominator(*block);
2375 changed = true;
2376 break;
2377 }
2378 }
2379
2380 if (newIdom && block->immediateDominator() != newIdom) {
2381 block->setImmediateDominator(newIdom);
2382 changed = true;
2383 }
2384 }
2385 }
2386
2387 #ifdef DEBUG
2388 // Assert that all blocks have dominator information.
2389 for (MBasicBlockIterator block(graph.begin()); block != graph.end();
2390 block++) {
2391 MOZ_ASSERT(block->immediateDominator() != nullptr);
2392 }
2393 #endif
2394 }
2395
BuildDominatorTree(MIRGraph & graph)2396 bool jit::BuildDominatorTree(MIRGraph& graph) {
2397 MOZ_ASSERT(graph.canBuildDominators());
2398
2399 ComputeImmediateDominators(graph);
2400
2401 Vector<MBasicBlock*, 4, JitAllocPolicy> worklist(graph.alloc());
2402
2403 // Traversing through the graph in post-order means that every non-phi use
2404 // of a definition is visited before the def itself. Since a def
2405 // dominates its uses, by the time we reach a particular
2406 // block, we have processed all of its dominated children, so
2407 // block->numDominated() is accurate.
2408 for (PostorderIterator i(graph.poBegin()); i != graph.poEnd(); i++) {
2409 MBasicBlock* child = *i;
2410 MBasicBlock* parent = child->immediateDominator();
2411
2412 // Dominance is defined such that blocks always dominate themselves.
2413 child->addNumDominated(1);
2414
2415 // If the block only self-dominates, it has no definite parent.
2416 // Add it to the worklist as a root for pre-order traversal.
2417 // This includes all roots. Order does not matter.
2418 if (child == parent) {
2419 if (!worklist.append(child)) {
2420 return false;
2421 }
2422 continue;
2423 }
2424
2425 if (!parent->addImmediatelyDominatedBlock(child)) {
2426 return false;
2427 }
2428
2429 parent->addNumDominated(child->numDominated());
2430 }
2431
2432 #ifdef DEBUG
2433 // If compiling with OSR, many blocks will self-dominate.
2434 // Without OSR, there is only one root block which dominates all.
2435 if (!graph.osrBlock()) {
2436 MOZ_ASSERT(graph.entryBlock()->numDominated() == graph.numBlocks());
2437 }
2438 #endif
2439 // Now, iterate through the dominator tree in pre-order and annotate every
2440 // block with its index in the traversal.
2441 size_t index = 0;
2442 while (!worklist.empty()) {
2443 MBasicBlock* block = worklist.popCopy();
2444 block->setDomIndex(index);
2445
2446 if (!worklist.append(block->immediatelyDominatedBlocksBegin(),
2447 block->immediatelyDominatedBlocksEnd())) {
2448 return false;
2449 }
2450 index++;
2451 }
2452
2453 return true;
2454 }
2455
BuildPhiReverseMapping(MIRGraph & graph)2456 bool jit::BuildPhiReverseMapping(MIRGraph& graph) {
2457 // Build a mapping such that given a basic block, whose successor has one or
2458 // more phis, we can find our specific input to that phi. To make this fast
2459 // mapping work we rely on a specific property of our structured control
2460 // flow graph: For a block with phis, its predecessors each have only one
2461 // successor with phis. Consider each case:
2462 // * Blocks with less than two predecessors cannot have phis.
2463 // * Breaks. A break always has exactly one successor, and the break
2464 // catch block has exactly one predecessor for each break, as
2465 // well as a final predecessor for the actual loop exit.
2466 // * Continues. A continue always has exactly one successor, and the
2467 // continue catch block has exactly one predecessor for each
2468 // continue, as well as a final predecessor for the actual
2469 // loop continuation. The continue itself has exactly one
2470 // successor.
2471 // * An if. Each branch as exactly one predecessor.
2472 // * A switch. Each branch has exactly one predecessor.
2473 // * Loop tail. A new block is always created for the exit, and if a
2474 // break statement is present, the exit block will forward
2475 // directly to the break block.
2476 for (MBasicBlockIterator block(graph.begin()); block != graph.end();
2477 block++) {
2478 if (block->phisEmpty()) {
2479 continue;
2480 }
2481
2482 // Assert on the above.
2483 for (size_t j = 0; j < block->numPredecessors(); j++) {
2484 MBasicBlock* pred = block->getPredecessor(j);
2485
2486 #ifdef DEBUG
2487 size_t numSuccessorsWithPhis = 0;
2488 for (size_t k = 0; k < pred->numSuccessors(); k++) {
2489 MBasicBlock* successor = pred->getSuccessor(k);
2490 if (!successor->phisEmpty()) {
2491 numSuccessorsWithPhis++;
2492 }
2493 }
2494 MOZ_ASSERT(numSuccessorsWithPhis <= 1);
2495 #endif
2496
2497 pred->setSuccessorWithPhis(*block, j);
2498 }
2499 }
2500
2501 return true;
2502 }
2503
2504 #ifdef DEBUG
CheckSuccessorImpliesPredecessor(MBasicBlock * A,MBasicBlock * B)2505 static bool CheckSuccessorImpliesPredecessor(MBasicBlock* A, MBasicBlock* B) {
2506 // Assuming B = succ(A), verify A = pred(B).
2507 for (size_t i = 0; i < B->numPredecessors(); i++) {
2508 if (A == B->getPredecessor(i)) {
2509 return true;
2510 }
2511 }
2512 return false;
2513 }
2514
CheckPredecessorImpliesSuccessor(MBasicBlock * A,MBasicBlock * B)2515 static bool CheckPredecessorImpliesSuccessor(MBasicBlock* A, MBasicBlock* B) {
2516 // Assuming B = pred(A), verify A = succ(B).
2517 for (size_t i = 0; i < B->numSuccessors(); i++) {
2518 if (A == B->getSuccessor(i)) {
2519 return true;
2520 }
2521 }
2522 return false;
2523 }
2524
2525 // If you have issues with the usesBalance assertions, then define the macro
2526 // _DEBUG_CHECK_OPERANDS_USES_BALANCE to spew information on the error output.
2527 // This output can then be processed with the following awk script to filter and
2528 // highlight which checks are missing or if there is an unexpected operand /
2529 // use.
2530 //
2531 // define _DEBUG_CHECK_OPERANDS_USES_BALANCE 1
2532 /*
2533
2534 $ ./js 2>stderr.log
2535 $ gawk '
2536 /^==Check/ { context = ""; state = $2; }
2537 /^[a-z]/ { context = context "\n\t" $0; }
2538 /^==End/ {
2539 if (state == "Operand") {
2540 list[context] = list[context] - 1;
2541 } else if (state == "Use") {
2542 list[context] = list[context] + 1;
2543 }
2544 }
2545 END {
2546 for (ctx in list) {
2547 if (list[ctx] > 0) {
2548 print "Missing operand check", ctx, "\n"
2549 }
2550 if (list[ctx] < 0) {
2551 print "Missing use check", ctx, "\n"
2552 }
2553 };
2554 }' < stderr.log
2555
2556 */
2557
CheckOperand(const MNode * consumer,const MUse * use,int32_t * usesBalance)2558 static void CheckOperand(const MNode* consumer, const MUse* use,
2559 int32_t* usesBalance) {
2560 MOZ_ASSERT(use->hasProducer());
2561 MDefinition* producer = use->producer();
2562 MOZ_ASSERT(!producer->isDiscarded());
2563 MOZ_ASSERT(producer->block() != nullptr);
2564 MOZ_ASSERT(use->consumer() == consumer);
2565 # ifdef _DEBUG_CHECK_OPERANDS_USES_BALANCE
2566 Fprinter print(stderr);
2567 print.printf("==Check Operand\n");
2568 use->producer()->dump(print);
2569 print.printf(" index: %zu\n", use->consumer()->indexOf(use));
2570 use->consumer()->dump(print);
2571 print.printf("==End\n");
2572 # endif
2573 --*usesBalance;
2574 }
2575
CheckUse(const MDefinition * producer,const MUse * use,int32_t * usesBalance)2576 static void CheckUse(const MDefinition* producer, const MUse* use,
2577 int32_t* usesBalance) {
2578 MOZ_ASSERT(!use->consumer()->block()->isDead());
2579 MOZ_ASSERT_IF(use->consumer()->isDefinition(),
2580 !use->consumer()->toDefinition()->isDiscarded());
2581 MOZ_ASSERT(use->consumer()->block() != nullptr);
2582 MOZ_ASSERT(use->consumer()->getOperand(use->index()) == producer);
2583 # ifdef _DEBUG_CHECK_OPERANDS_USES_BALANCE
2584 Fprinter print(stderr);
2585 print.printf("==Check Use\n");
2586 use->producer()->dump(print);
2587 print.printf(" index: %zu\n", use->consumer()->indexOf(use));
2588 use->consumer()->dump(print);
2589 print.printf("==End\n");
2590 # endif
2591 ++*usesBalance;
2592 }
2593
2594 // To properly encode entry resume points, we have to ensure that all the
2595 // operands of the entry resume point are located before the safeInsertTop
2596 // location.
AssertOperandsBeforeSafeInsertTop(MResumePoint * resume)2597 static void AssertOperandsBeforeSafeInsertTop(MResumePoint* resume) {
2598 MBasicBlock* block = resume->block();
2599 if (block == block->graph().osrBlock()) {
2600 return;
2601 }
2602 MInstruction* stop = block->safeInsertTop();
2603 for (size_t i = 0, e = resume->numOperands(); i < e; ++i) {
2604 MDefinition* def = resume->getOperand(i);
2605 if (def->block() != block) {
2606 continue;
2607 }
2608 if (def->isPhi()) {
2609 continue;
2610 }
2611
2612 for (MInstructionIterator ins = block->begin(); true; ins++) {
2613 if (*ins == def) {
2614 break;
2615 }
2616 MOZ_ASSERT(
2617 *ins != stop,
2618 "Resume point operand located after the safeInsertTop location");
2619 }
2620 }
2621 }
2622 #endif // DEBUG
2623
AssertBasicGraphCoherency(MIRGraph & graph,bool force)2624 void jit::AssertBasicGraphCoherency(MIRGraph& graph, bool force) {
2625 #ifdef DEBUG
2626 if (!JitOptions.fullDebugChecks && !force) {
2627 return;
2628 }
2629
2630 MOZ_ASSERT(graph.entryBlock()->numPredecessors() == 0);
2631 MOZ_ASSERT(graph.entryBlock()->phisEmpty());
2632 MOZ_ASSERT(!graph.entryBlock()->unreachable());
2633
2634 if (MBasicBlock* osrBlock = graph.osrBlock()) {
2635 MOZ_ASSERT(osrBlock->numPredecessors() == 0);
2636 MOZ_ASSERT(osrBlock->phisEmpty());
2637 MOZ_ASSERT(osrBlock != graph.entryBlock());
2638 MOZ_ASSERT(!osrBlock->unreachable());
2639 }
2640
2641 if (MResumePoint* resumePoint = graph.entryResumePoint()) {
2642 MOZ_ASSERT(resumePoint->block() == graph.entryBlock());
2643 }
2644
2645 // Assert successor and predecessor list coherency.
2646 uint32_t count = 0;
2647 int32_t usesBalance = 0;
2648 for (MBasicBlockIterator block(graph.begin()); block != graph.end();
2649 block++) {
2650 count++;
2651
2652 MOZ_ASSERT(&block->graph() == &graph);
2653 MOZ_ASSERT(!block->isDead());
2654 MOZ_ASSERT_IF(block->outerResumePoint() != nullptr,
2655 block->entryResumePoint() != nullptr);
2656
2657 for (size_t i = 0; i < block->numSuccessors(); i++) {
2658 MOZ_ASSERT(
2659 CheckSuccessorImpliesPredecessor(*block, block->getSuccessor(i)));
2660 }
2661
2662 for (size_t i = 0; i < block->numPredecessors(); i++) {
2663 MOZ_ASSERT(
2664 CheckPredecessorImpliesSuccessor(*block, block->getPredecessor(i)));
2665 }
2666
2667 if (MResumePoint* resume = block->entryResumePoint()) {
2668 MOZ_ASSERT(!resume->instruction());
2669 MOZ_ASSERT(resume->block() == *block);
2670 AssertOperandsBeforeSafeInsertTop(resume);
2671 }
2672 if (MResumePoint* resume = block->outerResumePoint()) {
2673 MOZ_ASSERT(!resume->instruction());
2674 MOZ_ASSERT(resume->block() == *block);
2675 }
2676 for (MResumePointIterator iter(block->resumePointsBegin());
2677 iter != block->resumePointsEnd(); iter++) {
2678 // We cannot yet assert that is there is no instruction then this is
2679 // the entry resume point because we are still storing resume points
2680 // in the InlinePropertyTable.
2681 MOZ_ASSERT_IF(iter->instruction(),
2682 iter->instruction()->block() == *block);
2683 for (uint32_t i = 0, e = iter->numOperands(); i < e; i++) {
2684 CheckOperand(*iter, iter->getUseFor(i), &usesBalance);
2685 }
2686 }
2687 for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
2688 MOZ_ASSERT(phi->numOperands() == block->numPredecessors());
2689 MOZ_ASSERT(!phi->isRecoveredOnBailout());
2690 MOZ_ASSERT(phi->type() != MIRType::None);
2691 MOZ_ASSERT(phi->dependency() == nullptr);
2692 }
2693 for (MDefinitionIterator iter(*block); iter; iter++) {
2694 MOZ_ASSERT(iter->block() == *block);
2695 MOZ_ASSERT_IF(iter->hasUses(), iter->type() != MIRType::None);
2696 MOZ_ASSERT(!iter->isDiscarded());
2697 MOZ_ASSERT_IF(iter->isStart(),
2698 *block == graph.entryBlock() || *block == graph.osrBlock());
2699 MOZ_ASSERT_IF(iter->isParameter(),
2700 *block == graph.entryBlock() || *block == graph.osrBlock());
2701 MOZ_ASSERT_IF(iter->isOsrEntry(), *block == graph.osrBlock());
2702 MOZ_ASSERT_IF(iter->isOsrValue(), *block == graph.osrBlock());
2703
2704 // Assert that use chains are valid for this instruction.
2705 for (uint32_t i = 0, end = iter->numOperands(); i < end; i++) {
2706 CheckOperand(*iter, iter->getUseFor(i), &usesBalance);
2707 }
2708 for (MUseIterator use(iter->usesBegin()); use != iter->usesEnd(); use++) {
2709 CheckUse(*iter, *use, &usesBalance);
2710 }
2711
2712 if (iter->isInstruction()) {
2713 if (MResumePoint* resume = iter->toInstruction()->resumePoint()) {
2714 MOZ_ASSERT(resume->instruction() == *iter);
2715 MOZ_ASSERT(resume->block() == *block);
2716 MOZ_ASSERT(resume->block()->entryResumePoint() != nullptr);
2717 }
2718 }
2719
2720 if (iter->isRecoveredOnBailout()) {
2721 MOZ_ASSERT(!iter->hasLiveDefUses());
2722 }
2723 }
2724
2725 // The control instruction is not visited by the MDefinitionIterator.
2726 MControlInstruction* control = block->lastIns();
2727 MOZ_ASSERT(control->block() == *block);
2728 MOZ_ASSERT(!control->hasUses());
2729 MOZ_ASSERT(control->type() == MIRType::None);
2730 MOZ_ASSERT(!control->isDiscarded());
2731 MOZ_ASSERT(!control->isRecoveredOnBailout());
2732 MOZ_ASSERT(control->resumePoint() == nullptr);
2733 for (uint32_t i = 0, end = control->numOperands(); i < end; i++) {
2734 CheckOperand(control, control->getUseFor(i), &usesBalance);
2735 }
2736 for (size_t i = 0; i < control->numSuccessors(); i++) {
2737 MOZ_ASSERT(control->getSuccessor(i));
2738 }
2739 }
2740
2741 // In case issues, see the _DEBUG_CHECK_OPERANDS_USES_BALANCE macro above.
2742 MOZ_ASSERT(usesBalance <= 0, "More use checks than operand checks");
2743 MOZ_ASSERT(usesBalance >= 0, "More operand checks than use checks");
2744 MOZ_ASSERT(graph.numBlocks() == count);
2745 #endif
2746 }
2747
2748 #ifdef DEBUG
AssertReversePostorder(MIRGraph & graph)2749 static void AssertReversePostorder(MIRGraph& graph) {
2750 // Check that every block is visited after all its predecessors (except
2751 // backedges).
2752 for (ReversePostorderIterator iter(graph.rpoBegin()); iter != graph.rpoEnd();
2753 ++iter) {
2754 MBasicBlock* block = *iter;
2755 MOZ_ASSERT(!block->isMarked());
2756
2757 for (size_t i = 0; i < block->numPredecessors(); i++) {
2758 MBasicBlock* pred = block->getPredecessor(i);
2759 if (!pred->isMarked()) {
2760 MOZ_ASSERT(pred->isLoopBackedge());
2761 MOZ_ASSERT(block->backedge() == pred);
2762 }
2763 }
2764
2765 block->mark();
2766 }
2767
2768 graph.unmarkBlocks();
2769 }
2770 #endif
2771
2772 #ifdef DEBUG
AssertDominatorTree(MIRGraph & graph)2773 static void AssertDominatorTree(MIRGraph& graph) {
2774 // Check dominators.
2775
2776 MOZ_ASSERT(graph.entryBlock()->immediateDominator() == graph.entryBlock());
2777 if (MBasicBlock* osrBlock = graph.osrBlock()) {
2778 MOZ_ASSERT(osrBlock->immediateDominator() == osrBlock);
2779 } else {
2780 MOZ_ASSERT(graph.entryBlock()->numDominated() == graph.numBlocks());
2781 }
2782
2783 size_t i = graph.numBlocks();
2784 size_t totalNumDominated = 0;
2785 for (MBasicBlockIterator block(graph.begin()); block != graph.end();
2786 block++) {
2787 MOZ_ASSERT(block->dominates(*block));
2788
2789 MBasicBlock* idom = block->immediateDominator();
2790 MOZ_ASSERT(idom->dominates(*block));
2791 MOZ_ASSERT(idom == *block || idom->id() < block->id());
2792
2793 if (idom == *block) {
2794 totalNumDominated += block->numDominated();
2795 } else {
2796 bool foundInParent = false;
2797 for (size_t j = 0; j < idom->numImmediatelyDominatedBlocks(); j++) {
2798 if (idom->getImmediatelyDominatedBlock(j) == *block) {
2799 foundInParent = true;
2800 break;
2801 }
2802 }
2803 MOZ_ASSERT(foundInParent);
2804 }
2805
2806 size_t numDominated = 1;
2807 for (size_t j = 0; j < block->numImmediatelyDominatedBlocks(); j++) {
2808 MBasicBlock* dom = block->getImmediatelyDominatedBlock(j);
2809 MOZ_ASSERT(block->dominates(dom));
2810 MOZ_ASSERT(dom->id() > block->id());
2811 MOZ_ASSERT(dom->immediateDominator() == *block);
2812
2813 numDominated += dom->numDominated();
2814 }
2815 MOZ_ASSERT(block->numDominated() == numDominated);
2816 MOZ_ASSERT(block->numDominated() <= i);
2817 MOZ_ASSERT(block->numSuccessors() != 0 || block->numDominated() == 1);
2818 i--;
2819 }
2820 MOZ_ASSERT(i == 0);
2821 MOZ_ASSERT(totalNumDominated == graph.numBlocks());
2822 }
2823 #endif
2824
AssertGraphCoherency(MIRGraph & graph,bool force)2825 void jit::AssertGraphCoherency(MIRGraph& graph, bool force) {
2826 #ifdef DEBUG
2827 if (!JitOptions.checkGraphConsistency) {
2828 return;
2829 }
2830 if (!JitOptions.fullDebugChecks && !force) {
2831 return;
2832 }
2833 AssertBasicGraphCoherency(graph, force);
2834 AssertReversePostorder(graph);
2835 #endif
2836 }
2837
2838 #ifdef DEBUG
IsResumableMIRType(MIRType type)2839 static bool IsResumableMIRType(MIRType type) {
2840 // see CodeGeneratorShared::encodeAllocation
2841 switch (type) {
2842 case MIRType::Undefined:
2843 case MIRType::Null:
2844 case MIRType::Boolean:
2845 case MIRType::Int32:
2846 case MIRType::Double:
2847 case MIRType::Float32:
2848 case MIRType::String:
2849 case MIRType::Symbol:
2850 case MIRType::BigInt:
2851 case MIRType::Object:
2852 case MIRType::Shape:
2853 case MIRType::MagicOptimizedOut:
2854 case MIRType::MagicUninitializedLexical:
2855 case MIRType::MagicIsConstructing:
2856 case MIRType::Value:
2857 case MIRType::Simd128:
2858 return true;
2859
2860 case MIRType::MagicHole:
2861 case MIRType::None:
2862 case MIRType::Slots:
2863 case MIRType::Elements:
2864 case MIRType::Pointer:
2865 case MIRType::Int64:
2866 case MIRType::RefOrNull:
2867 case MIRType::StackResults:
2868 case MIRType::IntPtr:
2869 return false;
2870 }
2871 MOZ_CRASH("Unknown MIRType.");
2872 }
2873
AssertResumableOperands(MNode * node)2874 static void AssertResumableOperands(MNode* node) {
2875 for (size_t i = 0, e = node->numOperands(); i < e; ++i) {
2876 MDefinition* op = node->getOperand(i);
2877 if (op->isRecoveredOnBailout()) {
2878 continue;
2879 }
2880 MOZ_ASSERT(IsResumableMIRType(op->type()),
2881 "Resume point cannot encode its operands");
2882 }
2883 }
2884
AssertIfResumableInstruction(MDefinition * def)2885 static void AssertIfResumableInstruction(MDefinition* def) {
2886 if (!def->isRecoveredOnBailout()) {
2887 return;
2888 }
2889 AssertResumableOperands(def);
2890 }
2891
AssertResumePointDominatedByOperands(MResumePoint * resume)2892 static void AssertResumePointDominatedByOperands(MResumePoint* resume) {
2893 for (size_t i = 0, e = resume->numOperands(); i < e; ++i) {
2894 MDefinition* op = resume->getOperand(i);
2895 MOZ_ASSERT(op->block()->dominates(resume->block()),
2896 "Resume point is not dominated by its operands");
2897 }
2898 }
2899 #endif // DEBUG
2900
AssertExtendedGraphCoherency(MIRGraph & graph,bool underValueNumberer,bool force)2901 void jit::AssertExtendedGraphCoherency(MIRGraph& graph, bool underValueNumberer,
2902 bool force) {
2903 // Checks the basic GraphCoherency but also other conditions that
2904 // do not hold immediately (such as the fact that critical edges
2905 // are split)
2906
2907 #ifdef DEBUG
2908 if (!JitOptions.checkGraphConsistency) {
2909 return;
2910 }
2911 if (!JitOptions.fullDebugChecks && !force) {
2912 return;
2913 }
2914
2915 AssertGraphCoherency(graph, force);
2916
2917 AssertDominatorTree(graph);
2918
2919 DebugOnly<uint32_t> idx = 0;
2920 for (MBasicBlockIterator block(graph.begin()); block != graph.end();
2921 block++) {
2922 MOZ_ASSERT(block->id() == idx);
2923 ++idx;
2924
2925 // No critical edges:
2926 if (block->numSuccessors() > 1) {
2927 for (size_t i = 0; i < block->numSuccessors(); i++) {
2928 MOZ_ASSERT(block->getSuccessor(i)->numPredecessors() == 1);
2929 }
2930 }
2931
2932 if (block->isLoopHeader()) {
2933 if (underValueNumberer && block->numPredecessors() == 3) {
2934 // Fixup block.
2935 MOZ_ASSERT(block->getPredecessor(1)->numPredecessors() == 0);
2936 MOZ_ASSERT(graph.osrBlock(),
2937 "Fixup blocks should only exists if we have an osr block.");
2938 } else {
2939 MOZ_ASSERT(block->numPredecessors() == 2);
2940 }
2941 MBasicBlock* backedge = block->backedge();
2942 MOZ_ASSERT(backedge->id() >= block->id());
2943 MOZ_ASSERT(backedge->numSuccessors() == 1);
2944 MOZ_ASSERT(backedge->getSuccessor(0) == *block);
2945 }
2946
2947 if (!block->phisEmpty()) {
2948 for (size_t i = 0; i < block->numPredecessors(); i++) {
2949 MBasicBlock* pred = block->getPredecessor(i);
2950 MOZ_ASSERT(pred->successorWithPhis() == *block);
2951 MOZ_ASSERT(pred->positionInPhiSuccessor() == i);
2952 }
2953 }
2954
2955 uint32_t successorWithPhis = 0;
2956 for (size_t i = 0; i < block->numSuccessors(); i++) {
2957 if (!block->getSuccessor(i)->phisEmpty()) {
2958 successorWithPhis++;
2959 }
2960 }
2961
2962 MOZ_ASSERT(successorWithPhis <= 1);
2963 MOZ_ASSERT((successorWithPhis != 0) ==
2964 (block->successorWithPhis() != nullptr));
2965
2966 // Verify that phi operands dominate the corresponding CFG predecessor
2967 // edges.
2968 for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd());
2969 iter != end; ++iter) {
2970 MPhi* phi = *iter;
2971 for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
2972 MOZ_ASSERT(
2973 phi->getOperand(i)->block()->dominates(block->getPredecessor(i)),
2974 "Phi input is not dominated by its operand");
2975 }
2976 }
2977
2978 // Verify that instructions are dominated by their operands.
2979 for (MInstructionIterator iter(block->begin()), end(block->end());
2980 iter != end; ++iter) {
2981 MInstruction* ins = *iter;
2982 for (size_t i = 0, e = ins->numOperands(); i < e; ++i) {
2983 MDefinition* op = ins->getOperand(i);
2984 MBasicBlock* opBlock = op->block();
2985 MOZ_ASSERT(opBlock->dominates(*block),
2986 "Instruction is not dominated by its operands");
2987
2988 // If the operand is an instruction in the same block, check
2989 // that it comes first.
2990 if (opBlock == *block && !op->isPhi()) {
2991 MInstructionIterator opIter = block->begin(op->toInstruction());
2992 do {
2993 ++opIter;
2994 MOZ_ASSERT(opIter != block->end(),
2995 "Operand in same block as instruction does not precede");
2996 } while (*opIter != ins);
2997 }
2998 }
2999 AssertIfResumableInstruction(ins);
3000 if (MResumePoint* resume = ins->resumePoint()) {
3001 AssertResumePointDominatedByOperands(resume);
3002 AssertResumableOperands(resume);
3003 }
3004 }
3005
3006 // Verify that the block resume points are dominated by their operands.
3007 if (MResumePoint* resume = block->entryResumePoint()) {
3008 AssertResumePointDominatedByOperands(resume);
3009 AssertResumableOperands(resume);
3010 }
3011 if (MResumePoint* resume = block->outerResumePoint()) {
3012 AssertResumePointDominatedByOperands(resume);
3013 AssertResumableOperands(resume);
3014 }
3015 }
3016 #endif
3017 }
3018
3019 struct BoundsCheckInfo {
3020 MBoundsCheck* check;
3021 uint32_t validEnd;
3022 };
3023
3024 typedef HashMap<uint32_t, BoundsCheckInfo, DefaultHasher<uint32_t>,
3025 JitAllocPolicy>
3026 BoundsCheckMap;
3027
3028 // Compute a hash for bounds checks which ignores constant offsets in the index.
BoundsCheckHashIgnoreOffset(MBoundsCheck * check)3029 static HashNumber BoundsCheckHashIgnoreOffset(MBoundsCheck* check) {
3030 SimpleLinearSum indexSum = ExtractLinearSum(check->index());
3031 uintptr_t index = indexSum.term ? uintptr_t(indexSum.term) : 0;
3032 uintptr_t length = uintptr_t(check->length());
3033 return index ^ length;
3034 }
3035
FindDominatingBoundsCheck(BoundsCheckMap & checks,MBoundsCheck * check,size_t index)3036 static MBoundsCheck* FindDominatingBoundsCheck(BoundsCheckMap& checks,
3037 MBoundsCheck* check,
3038 size_t index) {
3039 // Since we are traversing the dominator tree in pre-order, when we
3040 // are looking at the |index|-th block, the next numDominated() blocks
3041 // we traverse are precisely the set of blocks that are dominated.
3042 //
3043 // So, this value is visible in all blocks if:
3044 // index <= index + ins->block->numDominated()
3045 // and becomes invalid after that.
3046 HashNumber hash = BoundsCheckHashIgnoreOffset(check);
3047 BoundsCheckMap::Ptr p = checks.lookup(hash);
3048 if (!p || index >= p->value().validEnd) {
3049 // We didn't find a dominating bounds check.
3050 BoundsCheckInfo info;
3051 info.check = check;
3052 info.validEnd = index + check->block()->numDominated();
3053
3054 if (!checks.put(hash, info)) return nullptr;
3055
3056 return check;
3057 }
3058
3059 return p->value().check;
3060 }
3061
ExtractMathSpace(MDefinition * ins)3062 static MathSpace ExtractMathSpace(MDefinition* ins) {
3063 MOZ_ASSERT(ins->isAdd() || ins->isSub());
3064 MBinaryArithInstruction* arith = nullptr;
3065 if (ins->isAdd()) {
3066 arith = ins->toAdd();
3067 } else {
3068 arith = ins->toSub();
3069 }
3070 switch (arith->truncateKind()) {
3071 case TruncateKind::NoTruncate:
3072 case TruncateKind::TruncateAfterBailouts:
3073 // TruncateAfterBailouts is considered as infinite space because the
3074 // LinearSum will effectively remove the bailout check.
3075 return MathSpace::Infinite;
3076 case TruncateKind::IndirectTruncate:
3077 case TruncateKind::Truncate:
3078 return MathSpace::Modulo;
3079 }
3080 MOZ_CRASH("Unknown TruncateKind");
3081 }
3082
MonotoneAdd(int32_t lhs,int32_t rhs)3083 static bool MonotoneAdd(int32_t lhs, int32_t rhs) {
3084 return (lhs >= 0 && rhs >= 0) || (lhs <= 0 && rhs <= 0);
3085 }
3086
MonotoneSub(int32_t lhs,int32_t rhs)3087 static bool MonotoneSub(int32_t lhs, int32_t rhs) {
3088 return (lhs >= 0 && rhs <= 0) || (lhs <= 0 && rhs >= 0);
3089 }
3090
3091 // Extract a linear sum from ins, if possible (otherwise giving the
3092 // sum 'ins + 0').
ExtractLinearSum(MDefinition * ins,MathSpace space,int32_t recursionDepth)3093 SimpleLinearSum jit::ExtractLinearSum(MDefinition* ins, MathSpace space,
3094 int32_t recursionDepth) {
3095 const int32_t SAFE_RECURSION_LIMIT = 100;
3096 if (recursionDepth > SAFE_RECURSION_LIMIT) {
3097 return SimpleLinearSum(ins, 0);
3098 }
3099
3100 // Unwrap Int32ToIntPtr. This instruction only changes the representation
3101 // (int32_t to intptr_t) without affecting the value.
3102 if (ins->isInt32ToIntPtr()) {
3103 ins = ins->toInt32ToIntPtr()->input();
3104 }
3105
3106 if (ins->isBeta()) {
3107 ins = ins->getOperand(0);
3108 }
3109
3110 MOZ_ASSERT(!ins->isInt32ToIntPtr());
3111
3112 if (ins->type() != MIRType::Int32) {
3113 return SimpleLinearSum(ins, 0);
3114 }
3115
3116 if (ins->isConstant()) {
3117 return SimpleLinearSum(nullptr, ins->toConstant()->toInt32());
3118 }
3119
3120 if (!ins->isAdd() && !ins->isSub()) {
3121 return SimpleLinearSum(ins, 0);
3122 }
3123
3124 // Only allow math which are in the same space.
3125 MathSpace insSpace = ExtractMathSpace(ins);
3126 if (space == MathSpace::Unknown) {
3127 space = insSpace;
3128 } else if (space != insSpace) {
3129 return SimpleLinearSum(ins, 0);
3130 }
3131 MOZ_ASSERT(space == MathSpace::Modulo || space == MathSpace::Infinite);
3132
3133 MDefinition* lhs = ins->getOperand(0);
3134 MDefinition* rhs = ins->getOperand(1);
3135 if (lhs->type() != MIRType::Int32 || rhs->type() != MIRType::Int32) {
3136 return SimpleLinearSum(ins, 0);
3137 }
3138
3139 // Extract linear sums of each operand.
3140 SimpleLinearSum lsum = ExtractLinearSum(lhs, space, recursionDepth + 1);
3141 SimpleLinearSum rsum = ExtractLinearSum(rhs, space, recursionDepth + 1);
3142
3143 // LinearSum only considers a single term operand, if both sides have
3144 // terms, then ignore extracted linear sums.
3145 if (lsum.term && rsum.term) {
3146 return SimpleLinearSum(ins, 0);
3147 }
3148
3149 // Check if this is of the form <SUM> + n or n + <SUM>.
3150 if (ins->isAdd()) {
3151 int32_t constant;
3152 if (space == MathSpace::Modulo) {
3153 constant = uint32_t(lsum.constant) + uint32_t(rsum.constant);
3154 } else if (!SafeAdd(lsum.constant, rsum.constant, &constant) ||
3155 !MonotoneAdd(lsum.constant, rsum.constant)) {
3156 return SimpleLinearSum(ins, 0);
3157 }
3158 return SimpleLinearSum(lsum.term ? lsum.term : rsum.term, constant);
3159 }
3160
3161 MOZ_ASSERT(ins->isSub());
3162 // Check if this is of the form <SUM> - n.
3163 if (lsum.term) {
3164 int32_t constant;
3165 if (space == MathSpace::Modulo) {
3166 constant = uint32_t(lsum.constant) - uint32_t(rsum.constant);
3167 } else if (!SafeSub(lsum.constant, rsum.constant, &constant) ||
3168 !MonotoneSub(lsum.constant, rsum.constant)) {
3169 return SimpleLinearSum(ins, 0);
3170 }
3171 return SimpleLinearSum(lsum.term, constant);
3172 }
3173
3174 // Ignore any of the form n - <SUM>.
3175 return SimpleLinearSum(ins, 0);
3176 }
3177
3178 // Extract a linear inequality holding when a boolean test goes in the
3179 // specified direction, of the form 'lhs + lhsN <= rhs' (or >=).
ExtractLinearInequality(MTest * test,BranchDirection direction,SimpleLinearSum * plhs,MDefinition ** prhs,bool * plessEqual)3180 bool jit::ExtractLinearInequality(MTest* test, BranchDirection direction,
3181 SimpleLinearSum* plhs, MDefinition** prhs,
3182 bool* plessEqual) {
3183 if (!test->getOperand(0)->isCompare()) {
3184 return false;
3185 }
3186
3187 MCompare* compare = test->getOperand(0)->toCompare();
3188
3189 MDefinition* lhs = compare->getOperand(0);
3190 MDefinition* rhs = compare->getOperand(1);
3191
3192 // TODO: optimize Compare_UInt32
3193 if (!compare->isInt32Comparison()) {
3194 return false;
3195 }
3196
3197 MOZ_ASSERT(lhs->type() == MIRType::Int32);
3198 MOZ_ASSERT(rhs->type() == MIRType::Int32);
3199
3200 JSOp jsop = compare->jsop();
3201 if (direction == FALSE_BRANCH) {
3202 jsop = NegateCompareOp(jsop);
3203 }
3204
3205 SimpleLinearSum lsum = ExtractLinearSum(lhs);
3206 SimpleLinearSum rsum = ExtractLinearSum(rhs);
3207
3208 if (!SafeSub(lsum.constant, rsum.constant, &lsum.constant)) {
3209 return false;
3210 }
3211
3212 // Normalize operations to use <= or >=.
3213 switch (jsop) {
3214 case JSOp::Le:
3215 *plessEqual = true;
3216 break;
3217 case JSOp::Lt:
3218 /* x < y ==> x + 1 <= y */
3219 if (!SafeAdd(lsum.constant, 1, &lsum.constant)) {
3220 return false;
3221 }
3222 *plessEqual = true;
3223 break;
3224 case JSOp::Ge:
3225 *plessEqual = false;
3226 break;
3227 case JSOp::Gt:
3228 /* x > y ==> x - 1 >= y */
3229 if (!SafeSub(lsum.constant, 1, &lsum.constant)) {
3230 return false;
3231 }
3232 *plessEqual = false;
3233 break;
3234 default:
3235 return false;
3236 }
3237
3238 *plhs = lsum;
3239 *prhs = rsum.term;
3240
3241 return true;
3242 }
3243
TryEliminateBoundsCheck(BoundsCheckMap & checks,size_t blockIndex,MBoundsCheck * dominated,bool * eliminated)3244 static bool TryEliminateBoundsCheck(BoundsCheckMap& checks, size_t blockIndex,
3245 MBoundsCheck* dominated, bool* eliminated) {
3246 MOZ_ASSERT(!*eliminated);
3247
3248 // Replace all uses of the bounds check with the actual index.
3249 // This is (a) necessary, because we can coalesce two different
3250 // bounds checks and would otherwise use the wrong index and
3251 // (b) helps register allocation. Note that this is safe since
3252 // no other pass after bounds check elimination moves instructions.
3253 dominated->replaceAllUsesWith(dominated->index());
3254
3255 if (!dominated->isMovable()) {
3256 return true;
3257 }
3258
3259 if (!dominated->fallible()) {
3260 return true;
3261 }
3262
3263 MBoundsCheck* dominating =
3264 FindDominatingBoundsCheck(checks, dominated, blockIndex);
3265 if (!dominating) {
3266 return false;
3267 }
3268
3269 if (dominating == dominated) {
3270 // We didn't find a dominating bounds check.
3271 return true;
3272 }
3273
3274 // We found two bounds checks with the same hash number, but we still have
3275 // to make sure the lengths and index terms are equal.
3276 if (dominating->length() != dominated->length()) {
3277 return true;
3278 }
3279
3280 SimpleLinearSum sumA = ExtractLinearSum(dominating->index());
3281 SimpleLinearSum sumB = ExtractLinearSum(dominated->index());
3282
3283 // Both terms should be nullptr or the same definition.
3284 if (sumA.term != sumB.term) {
3285 return true;
3286 }
3287
3288 // This bounds check is redundant.
3289 *eliminated = true;
3290
3291 // Normalize the ranges according to the constant offsets in the two indexes.
3292 int32_t minimumA, maximumA, minimumB, maximumB;
3293 if (!SafeAdd(sumA.constant, dominating->minimum(), &minimumA) ||
3294 !SafeAdd(sumA.constant, dominating->maximum(), &maximumA) ||
3295 !SafeAdd(sumB.constant, dominated->minimum(), &minimumB) ||
3296 !SafeAdd(sumB.constant, dominated->maximum(), &maximumB)) {
3297 return false;
3298 }
3299
3300 // Update the dominating check to cover both ranges, denormalizing the
3301 // result per the constant offset in the index.
3302 int32_t newMinimum, newMaximum;
3303 if (!SafeSub(std::min(minimumA, minimumB), sumA.constant, &newMinimum) ||
3304 !SafeSub(std::max(maximumA, maximumB), sumA.constant, &newMaximum)) {
3305 return false;
3306 }
3307
3308 dominating->setMinimum(newMinimum);
3309 dominating->setMaximum(newMaximum);
3310 dominating->setBailoutKind(BailoutKind::HoistBoundsCheck);
3311
3312 return true;
3313 }
3314
3315 // Eliminate checks which are redundant given each other or other instructions.
3316 //
3317 // A bounds check is considered redundant if it's dominated by another bounds
3318 // check with the same length and the indexes differ by only a constant amount.
3319 // In this case we eliminate the redundant bounds check and update the other one
3320 // to cover the ranges of both checks.
3321 //
3322 // Bounds checks are added to a hash map and since the hash function ignores
3323 // differences in constant offset, this offers a fast way to find redundant
3324 // checks.
EliminateRedundantChecks(MIRGraph & graph)3325 bool jit::EliminateRedundantChecks(MIRGraph& graph) {
3326 BoundsCheckMap checks(graph.alloc());
3327
3328 // Stack for pre-order CFG traversal.
3329 Vector<MBasicBlock*, 1, JitAllocPolicy> worklist(graph.alloc());
3330
3331 // The index of the current block in the CFG traversal.
3332 size_t index = 0;
3333
3334 // Add all self-dominating blocks to the worklist.
3335 // This includes all roots. Order does not matter.
3336 for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
3337 MBasicBlock* block = *i;
3338 if (block->immediateDominator() == block) {
3339 if (!worklist.append(block)) {
3340 return false;
3341 }
3342 }
3343 }
3344
3345 MDefinitionVector eliminateList(graph.alloc());
3346
3347 // Starting from each self-dominating block, traverse the CFG in pre-order.
3348 while (!worklist.empty()) {
3349 MBasicBlock* block = worklist.popCopy();
3350
3351 // Add all immediate dominators to the front of the worklist.
3352 if (!worklist.append(block->immediatelyDominatedBlocksBegin(),
3353 block->immediatelyDominatedBlocksEnd())) {
3354 return false;
3355 }
3356
3357 for (MDefinitionIterator iter(block); iter;) {
3358 MDefinition* def = *iter++;
3359
3360 if (!def->isBoundsCheck()) {
3361 continue;
3362 }
3363
3364 bool eliminated = false;
3365 if (!TryEliminateBoundsCheck(checks, index, def->toBoundsCheck(),
3366 &eliminated)) {
3367 return false;
3368 }
3369
3370 if (eliminated) {
3371 block->discardDef(def);
3372 }
3373 }
3374 index++;
3375 }
3376
3377 MOZ_ASSERT(index == graph.numBlocks());
3378
3379 for (size_t i = 0; i < eliminateList.length(); i++) {
3380 MDefinition* def = eliminateList[i];
3381 def->block()->discardDef(def);
3382 }
3383
3384 return true;
3385 }
3386
NeedsKeepAlive(MInstruction * slotsOrElements,MInstruction * use)3387 static bool NeedsKeepAlive(MInstruction* slotsOrElements, MInstruction* use) {
3388 MOZ_ASSERT(slotsOrElements->type() == MIRType::Elements ||
3389 slotsOrElements->type() == MIRType::Slots);
3390
3391 if (slotsOrElements->block() != use->block()) {
3392 return true;
3393 }
3394
3395 MBasicBlock* block = use->block();
3396 MInstructionIterator iter(block->begin(slotsOrElements));
3397 MOZ_ASSERT(*iter == slotsOrElements);
3398 ++iter;
3399
3400 while (true) {
3401 if (*iter == use) {
3402 return false;
3403 }
3404
3405 switch (iter->op()) {
3406 case MDefinition::Opcode::Nop:
3407 case MDefinition::Opcode::Constant:
3408 case MDefinition::Opcode::KeepAliveObject:
3409 case MDefinition::Opcode::Unbox:
3410 case MDefinition::Opcode::LoadDynamicSlot:
3411 case MDefinition::Opcode::StoreDynamicSlot:
3412 case MDefinition::Opcode::LoadFixedSlot:
3413 case MDefinition::Opcode::StoreFixedSlot:
3414 case MDefinition::Opcode::LoadElement:
3415 case MDefinition::Opcode::LoadElementAndUnbox:
3416 case MDefinition::Opcode::StoreElement:
3417 case MDefinition::Opcode::StoreHoleValueElement:
3418 case MDefinition::Opcode::InitializedLength:
3419 case MDefinition::Opcode::ArrayLength:
3420 case MDefinition::Opcode::BoundsCheck:
3421 case MDefinition::Opcode::GuardElementNotHole:
3422 case MDefinition::Opcode::SpectreMaskIndex:
3423 iter++;
3424 break;
3425 default:
3426 return true;
3427 }
3428 }
3429
3430 MOZ_CRASH("Unreachable");
3431 }
3432
AddKeepAliveInstructions(MIRGraph & graph)3433 bool jit::AddKeepAliveInstructions(MIRGraph& graph) {
3434 for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
3435 MBasicBlock* block = *i;
3436
3437 for (MInstructionIterator insIter(block->begin()); insIter != block->end();
3438 insIter++) {
3439 MInstruction* ins = *insIter;
3440 if (ins->type() != MIRType::Elements && ins->type() != MIRType::Slots) {
3441 continue;
3442 }
3443
3444 MDefinition* ownerObject;
3445 switch (ins->op()) {
3446 case MDefinition::Opcode::Elements:
3447 case MDefinition::Opcode::ArrayBufferViewElements:
3448 MOZ_ASSERT(ins->numOperands() == 1);
3449 ownerObject = ins->getOperand(0);
3450 break;
3451 case MDefinition::Opcode::Slots:
3452 ownerObject = ins->toSlots()->object();
3453 break;
3454 default:
3455 MOZ_CRASH("Unexpected op");
3456 }
3457
3458 MOZ_ASSERT(ownerObject->type() == MIRType::Object);
3459
3460 if (ownerObject->isConstant()) {
3461 // Constants are kept alive by other pointers, for instance
3462 // ImmGCPtr in JIT code.
3463 continue;
3464 }
3465
3466 for (MUseDefIterator uses(ins); uses; uses++) {
3467 MInstruction* use = uses.def()->toInstruction();
3468
3469 if (use->isStoreElementHole()) {
3470 // StoreElementHole has an explicit object operand. If GVN
3471 // is disabled, we can get different unbox instructions with
3472 // the same object as input, so we check for that case.
3473 MOZ_ASSERT_IF(!use->toStoreElementHole()->object()->isUnbox() &&
3474 !ownerObject->isUnbox(),
3475 use->toStoreElementHole()->object() == ownerObject);
3476 continue;
3477 }
3478
3479 if (use->isInArray()) {
3480 // See StoreElementHole case above.
3481 MOZ_ASSERT_IF(
3482 !use->toInArray()->object()->isUnbox() && !ownerObject->isUnbox(),
3483 use->toInArray()->object() == ownerObject);
3484 continue;
3485 }
3486
3487 if (!NeedsKeepAlive(ins, use)) {
3488 continue;
3489 }
3490
3491 if (!graph.alloc().ensureBallast()) {
3492 return false;
3493 }
3494 MKeepAliveObject* keepAlive =
3495 MKeepAliveObject::New(graph.alloc(), ownerObject);
3496 use->block()->insertAfter(use, keepAlive);
3497 }
3498 }
3499 }
3500
3501 return true;
3502 }
3503
multiply(int32_t scale)3504 bool LinearSum::multiply(int32_t scale) {
3505 for (size_t i = 0; i < terms_.length(); i++) {
3506 if (!SafeMul(scale, terms_[i].scale, &terms_[i].scale)) {
3507 return false;
3508 }
3509 }
3510 return SafeMul(scale, constant_, &constant_);
3511 }
3512
divide(uint32_t scale)3513 bool LinearSum::divide(uint32_t scale) {
3514 MOZ_ASSERT(scale > 0);
3515
3516 for (size_t i = 0; i < terms_.length(); i++) {
3517 if (terms_[i].scale % scale != 0) {
3518 return false;
3519 }
3520 }
3521 if (constant_ % scale != 0) {
3522 return false;
3523 }
3524
3525 for (size_t i = 0; i < terms_.length(); i++) {
3526 terms_[i].scale /= scale;
3527 }
3528 constant_ /= scale;
3529
3530 return true;
3531 }
3532
add(const LinearSum & other,int32_t scale)3533 bool LinearSum::add(const LinearSum& other, int32_t scale /* = 1 */) {
3534 for (size_t i = 0; i < other.terms_.length(); i++) {
3535 int32_t newScale = scale;
3536 if (!SafeMul(scale, other.terms_[i].scale, &newScale)) {
3537 return false;
3538 }
3539 if (!add(other.terms_[i].term, newScale)) {
3540 return false;
3541 }
3542 }
3543 int32_t newConstant = scale;
3544 if (!SafeMul(scale, other.constant_, &newConstant)) {
3545 return false;
3546 }
3547 return add(newConstant);
3548 }
3549
add(SimpleLinearSum other,int32_t scale)3550 bool LinearSum::add(SimpleLinearSum other, int32_t scale) {
3551 if (other.term && !add(other.term, scale)) {
3552 return false;
3553 }
3554
3555 int32_t constant;
3556 if (!SafeMul(other.constant, scale, &constant)) {
3557 return false;
3558 }
3559
3560 return add(constant);
3561 }
3562
add(MDefinition * term,int32_t scale)3563 bool LinearSum::add(MDefinition* term, int32_t scale) {
3564 MOZ_ASSERT(term);
3565
3566 if (scale == 0) {
3567 return true;
3568 }
3569
3570 if (MConstant* termConst = term->maybeConstantValue()) {
3571 int32_t constant = termConst->toInt32();
3572 if (!SafeMul(constant, scale, &constant)) {
3573 return false;
3574 }
3575 return add(constant);
3576 }
3577
3578 for (size_t i = 0; i < terms_.length(); i++) {
3579 if (term == terms_[i].term) {
3580 if (!SafeAdd(scale, terms_[i].scale, &terms_[i].scale)) {
3581 return false;
3582 }
3583 if (terms_[i].scale == 0) {
3584 terms_[i] = terms_.back();
3585 terms_.popBack();
3586 }
3587 return true;
3588 }
3589 }
3590
3591 AutoEnterOOMUnsafeRegion oomUnsafe;
3592 if (!terms_.append(LinearTerm(term, scale))) {
3593 oomUnsafe.crash("LinearSum::add");
3594 }
3595
3596 return true;
3597 }
3598
add(int32_t constant)3599 bool LinearSum::add(int32_t constant) {
3600 return SafeAdd(constant, constant_, &constant_);
3601 }
3602
dump(GenericPrinter & out) const3603 void LinearSum::dump(GenericPrinter& out) const {
3604 for (size_t i = 0; i < terms_.length(); i++) {
3605 int32_t scale = terms_[i].scale;
3606 int32_t id = terms_[i].term->id();
3607 MOZ_ASSERT(scale);
3608 if (scale > 0) {
3609 if (i) {
3610 out.printf("+");
3611 }
3612 if (scale == 1) {
3613 out.printf("#%d", id);
3614 } else {
3615 out.printf("%d*#%d", scale, id);
3616 }
3617 } else if (scale == -1) {
3618 out.printf("-#%d", id);
3619 } else {
3620 out.printf("%d*#%d", scale, id);
3621 }
3622 }
3623 if (constant_ > 0) {
3624 out.printf("+%d", constant_);
3625 } else if (constant_ < 0) {
3626 out.printf("%d", constant_);
3627 }
3628 }
3629
dump() const3630 void LinearSum::dump() const {
3631 Fprinter out(stderr);
3632 dump(out);
3633 out.finish();
3634 }
3635
ConvertLinearSum(TempAllocator & alloc,MBasicBlock * block,const LinearSum & sum,BailoutKind bailoutKind)3636 MDefinition* jit::ConvertLinearSum(TempAllocator& alloc, MBasicBlock* block,
3637 const LinearSum& sum,
3638 BailoutKind bailoutKind) {
3639 MDefinition* def = nullptr;
3640
3641 for (size_t i = 0; i < sum.numTerms(); i++) {
3642 LinearTerm term = sum.term(i);
3643 MOZ_ASSERT(!term.term->isConstant());
3644 if (term.scale == 1) {
3645 if (def) {
3646 def = MAdd::New(alloc, def, term.term, MIRType::Int32);
3647 def->setBailoutKind(bailoutKind);
3648 block->insertAtEnd(def->toInstruction());
3649 def->computeRange(alloc);
3650 } else {
3651 def = term.term;
3652 }
3653 } else if (term.scale == -1) {
3654 if (!def) {
3655 def = MConstant::New(alloc, Int32Value(0));
3656 block->insertAtEnd(def->toInstruction());
3657 def->computeRange(alloc);
3658 }
3659 def = MSub::New(alloc, def, term.term, MIRType::Int32);
3660 def->setBailoutKind(bailoutKind);
3661 block->insertAtEnd(def->toInstruction());
3662 def->computeRange(alloc);
3663 } else {
3664 MOZ_ASSERT(term.scale != 0);
3665 MConstant* factor = MConstant::New(alloc, Int32Value(term.scale));
3666 block->insertAtEnd(factor);
3667 MMul* mul = MMul::New(alloc, term.term, factor, MIRType::Int32);
3668 mul->setBailoutKind(bailoutKind);
3669 block->insertAtEnd(mul);
3670 mul->computeRange(alloc);
3671 if (def) {
3672 def = MAdd::New(alloc, def, mul, MIRType::Int32);
3673 def->setBailoutKind(bailoutKind);
3674 block->insertAtEnd(def->toInstruction());
3675 def->computeRange(alloc);
3676 } else {
3677 def = mul;
3678 }
3679 }
3680 }
3681
3682 if (!def) {
3683 def = MConstant::New(alloc, Int32Value(0));
3684 block->insertAtEnd(def->toInstruction());
3685 def->computeRange(alloc);
3686 }
3687
3688 return def;
3689 }
3690
3691 // Mark all the blocks that are in the loop with the given header.
3692 // Returns the number of blocks marked. Set *canOsr to true if the loop is
3693 // reachable from both the normal entry and the OSR entry.
MarkLoopBlocks(MIRGraph & graph,MBasicBlock * header,bool * canOsr)3694 size_t jit::MarkLoopBlocks(MIRGraph& graph, MBasicBlock* header, bool* canOsr) {
3695 #ifdef DEBUG
3696 for (ReversePostorderIterator i = graph.rpoBegin(), e = graph.rpoEnd();
3697 i != e; ++i) {
3698 MOZ_ASSERT(!i->isMarked(), "Some blocks already marked");
3699 }
3700 #endif
3701
3702 MBasicBlock* osrBlock = graph.osrBlock();
3703 *canOsr = false;
3704
3705 // The blocks are in RPO; start at the loop backedge, which marks the bottom
3706 // of the loop, and walk up until we get to the header. Loops may be
3707 // discontiguous, so we trace predecessors to determine which blocks are
3708 // actually part of the loop. The backedge is always part of the loop, and
3709 // so are its predecessors, transitively, up to the loop header or an OSR
3710 // entry.
3711 MBasicBlock* backedge = header->backedge();
3712 backedge->mark();
3713 size_t numMarked = 1;
3714 for (PostorderIterator i = graph.poBegin(backedge);; ++i) {
3715 MOZ_ASSERT(
3716 i != graph.poEnd(),
3717 "Reached the end of the graph while searching for the loop header");
3718 MBasicBlock* block = *i;
3719 // If we've reached the loop header, we're done.
3720 if (block == header) {
3721 break;
3722 }
3723 // A block not marked by the time we reach it is not in the loop.
3724 if (!block->isMarked()) {
3725 continue;
3726 }
3727
3728 // This block is in the loop; trace to its predecessors.
3729 for (size_t p = 0, e = block->numPredecessors(); p != e; ++p) {
3730 MBasicBlock* pred = block->getPredecessor(p);
3731 if (pred->isMarked()) {
3732 continue;
3733 }
3734
3735 // Blocks dominated by the OSR entry are not part of the loop
3736 // (unless they aren't reachable from the normal entry).
3737 if (osrBlock && pred != header && osrBlock->dominates(pred) &&
3738 !osrBlock->dominates(header)) {
3739 *canOsr = true;
3740 continue;
3741 }
3742
3743 MOZ_ASSERT(pred->id() >= header->id() && pred->id() <= backedge->id(),
3744 "Loop block not between loop header and loop backedge");
3745
3746 pred->mark();
3747 ++numMarked;
3748
3749 // A nested loop may not exit back to the enclosing loop at its
3750 // bottom. If we just marked its header, then the whole nested loop
3751 // is part of the enclosing loop.
3752 if (pred->isLoopHeader()) {
3753 MBasicBlock* innerBackedge = pred->backedge();
3754 if (!innerBackedge->isMarked()) {
3755 // Mark its backedge so that we add all of its blocks to the
3756 // outer loop as we walk upwards.
3757 innerBackedge->mark();
3758 ++numMarked;
3759
3760 // If the nested loop is not contiguous, we may have already
3761 // passed its backedge. If this happens, back up.
3762 if (innerBackedge->id() > block->id()) {
3763 i = graph.poBegin(innerBackedge);
3764 --i;
3765 }
3766 }
3767 }
3768 }
3769 }
3770
3771 // If there's no path connecting the header to the backedge, then this isn't
3772 // actually a loop. This can happen when the code starts with a loop but GVN
3773 // folds some branches away.
3774 if (!header->isMarked()) {
3775 jit::UnmarkLoopBlocks(graph, header);
3776 return 0;
3777 }
3778
3779 return numMarked;
3780 }
3781
3782 // Unmark all the blocks that are in the loop with the given header.
UnmarkLoopBlocks(MIRGraph & graph,MBasicBlock * header)3783 void jit::UnmarkLoopBlocks(MIRGraph& graph, MBasicBlock* header) {
3784 MBasicBlock* backedge = header->backedge();
3785 for (ReversePostorderIterator i = graph.rpoBegin(header);; ++i) {
3786 MOZ_ASSERT(i != graph.rpoEnd(),
3787 "Reached the end of the graph while searching for the backedge");
3788 MBasicBlock* block = *i;
3789 if (block->isMarked()) {
3790 block->unmark();
3791 if (block == backedge) {
3792 break;
3793 }
3794 }
3795 }
3796
3797 #ifdef DEBUG
3798 for (ReversePostorderIterator i = graph.rpoBegin(), e = graph.rpoEnd();
3799 i != e; ++i) {
3800 MOZ_ASSERT(!i->isMarked(), "Not all blocks got unmarked");
3801 }
3802 #endif
3803 }
3804
FoldLoadsWithUnbox(MIRGenerator * mir,MIRGraph & graph)3805 bool jit::FoldLoadsWithUnbox(MIRGenerator* mir, MIRGraph& graph) {
3806 // This pass folds MLoadFixedSlot, MLoadDynamicSlot, MLoadElement instructions
3807 // followed by MUnbox into a single instruction. For LoadElement this allows
3808 // us to fuse the hole check with the type check for the unbox.
3809
3810 for (MBasicBlockIterator block(graph.begin()); block != graph.end();
3811 block++) {
3812 if (mir->shouldCancel("FoldLoadsWithUnbox")) {
3813 return false;
3814 }
3815
3816 for (MInstructionIterator insIter(block->begin());
3817 insIter != block->end();) {
3818 MInstruction* ins = *insIter;
3819 insIter++;
3820
3821 // We're only interested in loads producing a Value.
3822 if (!ins->isLoadFixedSlot() && !ins->isLoadDynamicSlot() &&
3823 !ins->isLoadElement()) {
3824 continue;
3825 }
3826 if (ins->type() != MIRType::Value) {
3827 continue;
3828 }
3829
3830 MInstruction* load = ins;
3831
3832 // Ensure there's a single def-use (ignoring resume points) and it's an
3833 // unbox.
3834 MDefinition* defUse = load->maybeSingleDefUse();
3835 if (!defUse) {
3836 continue;
3837 }
3838 if (!defUse->isUnbox()) {
3839 continue;
3840 }
3841
3842 // For now require the load and unbox to be in the same block. This isn't
3843 // strictly necessary but it's the common case and could prevent bailouts
3844 // when moving the unbox before a loop.
3845 MUnbox* unbox = defUse->toUnbox();
3846 if (unbox->block() != *block) {
3847 continue;
3848 }
3849
3850 MOZ_ASSERT(!IsMagicType(unbox->type()));
3851
3852 // If this is a LoadElement that needs a hole check, we only support
3853 // folding it with a fallible unbox so that we can eliminate the hole
3854 // check.
3855 if (load->isLoadElement() && load->toLoadElement()->needsHoleCheck() &&
3856 !unbox->fallible()) {
3857 continue;
3858 }
3859
3860 // Combine the load and unbox into a single MIR instruction.
3861 if (!graph.alloc().ensureBallast()) {
3862 return false;
3863 }
3864
3865 MIRType type = unbox->type();
3866 MUnbox::Mode mode = unbox->mode();
3867
3868 MInstruction* replacement;
3869 switch (load->op()) {
3870 case MDefinition::Opcode::LoadFixedSlot: {
3871 auto* loadIns = load->toLoadFixedSlot();
3872 replacement = MLoadFixedSlotAndUnbox::New(
3873 graph.alloc(), loadIns->object(), loadIns->slot(), mode, type);
3874 break;
3875 }
3876 case MDefinition::Opcode::LoadDynamicSlot: {
3877 auto* loadIns = load->toLoadDynamicSlot();
3878 replacement = MLoadDynamicSlotAndUnbox::New(
3879 graph.alloc(), loadIns->slots(), loadIns->slot(), mode, type);
3880 break;
3881 }
3882 case MDefinition::Opcode::LoadElement: {
3883 auto* loadIns = load->toLoadElement();
3884 MOZ_ASSERT_IF(loadIns->needsHoleCheck(), unbox->fallible());
3885 replacement = MLoadElementAndUnbox::New(
3886 graph.alloc(), loadIns->elements(), loadIns->index(), mode, type);
3887 break;
3888 }
3889 default:
3890 MOZ_CRASH("Unexpected instruction");
3891 }
3892
3893 block->insertBefore(load, replacement);
3894 unbox->replaceAllUsesWith(replacement);
3895 load->replaceAllUsesWith(replacement);
3896
3897 if (*insIter == unbox) {
3898 insIter++;
3899 }
3900 block->discard(unbox);
3901 block->discard(load);
3902 }
3903 }
3904
3905 return true;
3906 }
3907
3908 // Reorder the blocks in the loop starting at the given header to be contiguous.
MakeLoopContiguous(MIRGraph & graph,MBasicBlock * header,size_t numMarked)3909 static void MakeLoopContiguous(MIRGraph& graph, MBasicBlock* header,
3910 size_t numMarked) {
3911 MBasicBlock* backedge = header->backedge();
3912
3913 MOZ_ASSERT(header->isMarked(), "Loop header is not part of loop");
3914 MOZ_ASSERT(backedge->isMarked(), "Loop backedge is not part of loop");
3915
3916 // If there are any blocks between the loop header and the loop backedge
3917 // that are not part of the loop, prepare to move them to the end. We keep
3918 // them in order, which preserves RPO.
3919 ReversePostorderIterator insertIter = graph.rpoBegin(backedge);
3920 insertIter++;
3921 MBasicBlock* insertPt = *insertIter;
3922
3923 // Visit all the blocks from the loop header to the loop backedge.
3924 size_t headerId = header->id();
3925 size_t inLoopId = headerId;
3926 size_t notInLoopId = inLoopId + numMarked;
3927 ReversePostorderIterator i = graph.rpoBegin(header);
3928 for (;;) {
3929 MBasicBlock* block = *i++;
3930 MOZ_ASSERT(block->id() >= header->id() && block->id() <= backedge->id(),
3931 "Loop backedge should be last block in loop");
3932
3933 if (block->isMarked()) {
3934 // This block is in the loop.
3935 block->unmark();
3936 block->setId(inLoopId++);
3937 // If we've reached the loop backedge, we're done!
3938 if (block == backedge) {
3939 break;
3940 }
3941 } else {
3942 // This block is not in the loop. Move it to the end.
3943 graph.moveBlockBefore(insertPt, block);
3944 block->setId(notInLoopId++);
3945 }
3946 }
3947 MOZ_ASSERT(header->id() == headerId, "Loop header id changed");
3948 MOZ_ASSERT(inLoopId == headerId + numMarked,
3949 "Wrong number of blocks kept in loop");
3950 MOZ_ASSERT(notInLoopId == (insertIter != graph.rpoEnd() ? insertPt->id()
3951 : graph.numBlocks()),
3952 "Wrong number of blocks moved out of loop");
3953 }
3954
3955 // Reorder the blocks in the graph so that loops are contiguous.
MakeLoopsContiguous(MIRGraph & graph)3956 bool jit::MakeLoopsContiguous(MIRGraph& graph) {
3957 // Visit all loop headers (in any order).
3958 for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
3959 MBasicBlock* header = *i;
3960 if (!header->isLoopHeader()) {
3961 continue;
3962 }
3963
3964 // Mark all blocks that are actually part of the loop.
3965 bool canOsr;
3966 size_t numMarked = MarkLoopBlocks(graph, header, &canOsr);
3967
3968 // If the loop isn't a loop, don't try to optimize it.
3969 if (numMarked == 0) {
3970 continue;
3971 }
3972
3973 // If there's an OSR block entering the loop in the middle, it's tricky,
3974 // so don't try to handle it, for now.
3975 if (canOsr) {
3976 UnmarkLoopBlocks(graph, header);
3977 continue;
3978 }
3979
3980 // Move all blocks between header and backedge that aren't marked to
3981 // the end of the loop, making the loop itself contiguous.
3982 MakeLoopContiguous(graph, header, numMarked);
3983 }
3984
3985 return true;
3986 }
3987
3988 #ifdef JS_JITSPEW
DumpDefinition(GenericPrinter & out,MDefinition * def,size_t depth)3989 static void DumpDefinition(GenericPrinter& out, MDefinition* def,
3990 size_t depth) {
3991 out.printf("%u:", def->id());
3992 if (def->isConstant()) {
3993 def->printOpcode(out);
3994 } else {
3995 MDefinition::PrintOpcodeName(out, def->op());
3996 }
3997
3998 if (depth == 0) {
3999 return;
4000 }
4001
4002 for (size_t i = 0; i < def->numOperands(); i++) {
4003 out.printf(" (");
4004 DumpDefinition(out, def->getOperand(i), depth - 1);
4005 out.printf(")");
4006 }
4007 }
4008 #endif
4009
DumpMIRExpressions(MIRGraph & graph,const CompileInfo & info,const char * phase)4010 void jit::DumpMIRExpressions(MIRGraph& graph, const CompileInfo& info,
4011 const char* phase) {
4012 #ifdef JS_JITSPEW
4013 if (!JitSpewEnabled(JitSpew_MIRExpressions)) {
4014 return;
4015 }
4016
4017 Fprinter& out = JitSpewPrinter();
4018 out.printf("===== %s =====\n", phase);
4019
4020 size_t depth = 2;
4021 bool isFirstBlock = true;
4022 for (ReversePostorderIterator block(graph.rpoBegin());
4023 block != graph.rpoEnd(); block++) {
4024 if (isFirstBlock) {
4025 isFirstBlock = false;
4026 } else {
4027 out.printf(" --\n");
4028 }
4029 for (MInstructionIterator iter(block->begin()), end(block->end());
4030 iter != end; iter++) {
4031 out.printf(" ");
4032 DumpDefinition(out, *iter, depth);
4033 out.printf("\n");
4034 }
4035 }
4036
4037 if (info.compilingWasm()) {
4038 out.printf("===== end wasm MIR dump =====\n");
4039 } else {
4040 out.printf("===== %s:%u =====\n", info.filename(), info.lineno());
4041 }
4042 #endif
4043 }
4044