1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/RangeAnalysis.h"
8 
9 #include "mozilla/MathAlgorithms.h"
10 
11 #include "jit/Ion.h"
12 #include "jit/IonAnalysis.h"
13 #include "jit/JitSpewer.h"
14 #include "jit/MIR.h"
15 #include "jit/MIRGenerator.h"
16 #include "jit/MIRGraph.h"
17 #include "js/Conversions.h"
18 #include "vm/ArgumentsObject.h"
19 #include "vm/TypedArrayObject.h"
20 
21 #include "vm/BytecodeUtil-inl.h"
22 
23 using namespace js;
24 using namespace js::jit;
25 
26 using JS::GenericNaN;
27 using JS::ToInt32;
28 using mozilla::Abs;
29 using mozilla::CountLeadingZeroes32;
30 using mozilla::ExponentComponent;
31 using mozilla::FloorLog2;
32 using mozilla::IsInfinite;
33 using mozilla::IsNaN;
34 using mozilla::IsNegativeZero;
35 using mozilla::NegativeInfinity;
36 using mozilla::NumberEqualsInt32;
37 using mozilla::PositiveInfinity;
38 using mozilla::Swap;
39 
40 // This algorithm is based on the paper "Eliminating Range Checks Using
41 // Static Single Assignment Form" by Gough and Klaren.
42 //
43 // We associate a range object with each SSA name, and the ranges are consulted
44 // in order to determine whether overflow is possible for arithmetic
45 // computations.
46 //
47 // An important source of range information that requires care to take
48 // advantage of is conditional control flow. Consider the code below:
49 //
50 // if (x < 0) {
51 //   y = x + 2000000000;
52 // } else {
53 //   if (x < 1000000000) {
54 //     y = x * 2;
55 //   } else {
56 //     y = x - 3000000000;
57 //   }
58 // }
59 //
60 // The arithmetic operations in this code cannot overflow, but it is not
61 // sufficient to simply associate each name with a range, since the information
62 // differs between basic blocks. The traditional dataflow approach would be
63 // associate ranges with (name, basic block) pairs. This solution is not
64 // satisfying, since we lose the benefit of SSA form: in SSA form, each
65 // definition has a unique name, so there is no need to track information about
66 // the control flow of the program.
67 //
68 // The approach used here is to add a new form of pseudo operation called a
69 // beta node, which associates range information with a value. These beta
70 // instructions take one argument and additionally have an auxiliary constant
71 // range associated with them. Operationally, beta nodes are just copies, but
72 // the invariant expressed by beta node copies is that the output will fall
73 // inside the range given by the beta node.  Gough and Klaeren refer to SSA
74 // extended with these beta nodes as XSA form. The following shows the example
75 // code transformed into XSA form:
76 //
77 // if (x < 0) {
78 //   x1 = Beta(x, [INT_MIN, -1]);
79 //   y1 = x1 + 2000000000;
80 // } else {
81 //   x2 = Beta(x, [0, INT_MAX]);
82 //   if (x2 < 1000000000) {
83 //     x3 = Beta(x2, [INT_MIN, 999999999]);
84 //     y2 = x3*2;
85 //   } else {
86 //     x4 = Beta(x2, [1000000000, INT_MAX]);
87 //     y3 = x4 - 3000000000;
88 //   }
89 //   y4 = Phi(y2, y3);
90 // }
91 // y = Phi(y1, y4);
92 //
93 // We insert beta nodes for the purposes of range analysis (they might also be
94 // usefully used for other forms of bounds check elimination) and remove them
95 // after range analysis is performed. The remaining compiler phases do not ever
96 // encounter beta nodes.
97 
IsDominatedUse(MBasicBlock * block,MUse * use)98 static bool IsDominatedUse(MBasicBlock* block, MUse* use) {
99   MNode* n = use->consumer();
100   bool isPhi = n->isDefinition() && n->toDefinition()->isPhi();
101 
102   if (isPhi) {
103     MPhi* phi = n->toDefinition()->toPhi();
104     return block->dominates(phi->block()->getPredecessor(phi->indexOf(use)));
105   }
106 
107   return block->dominates(n->block());
108 }
109 
SpewRange(MDefinition * def)110 static inline void SpewRange(MDefinition* def) {
111 #ifdef JS_JITSPEW
112   if (JitSpewEnabled(JitSpew_Range) && def->type() != MIRType::None &&
113       def->range()) {
114     JitSpewHeader(JitSpew_Range);
115     Fprinter& out = JitSpewPrinter();
116     def->printName(out);
117     out.printf(" has range ");
118     def->range()->dump(out);
119   }
120 #endif
121 }
122 
SpewTruncate(MDefinition * def,MDefinition::TruncateKind kind,bool shouldClone)123 static inline void SpewTruncate(MDefinition* def,
124                                 MDefinition::TruncateKind kind,
125                                 bool shouldClone) {
126 #ifdef JS_JITSPEW
127   if (JitSpewEnabled(JitSpew_Range)) {
128     JitSpewHeader(JitSpew_Range);
129     Fprinter& out = JitSpewPrinter();
130     out.printf("truncating ");
131     def->printName(out);
132     out.printf(" (kind: %s, clone: %d)\n",
133                MDefinition::TruncateKindString(kind), shouldClone);
134   }
135 #endif
136 }
137 
alloc() const138 TempAllocator& RangeAnalysis::alloc() const { return graph_.alloc(); }
139 
replaceDominatedUsesWith(MDefinition * orig,MDefinition * dom,MBasicBlock * block)140 void RangeAnalysis::replaceDominatedUsesWith(MDefinition* orig,
141                                              MDefinition* dom,
142                                              MBasicBlock* block) {
143   for (MUseIterator i(orig->usesBegin()); i != orig->usesEnd();) {
144     MUse* use = *i++;
145     if (use->consumer() != dom && IsDominatedUse(block, use))
146       use->replaceProducer(dom);
147   }
148 }
149 
addBetaNodes()150 bool RangeAnalysis::addBetaNodes() {
151   JitSpew(JitSpew_Range, "Adding beta nodes");
152 
153   for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) {
154     MBasicBlock* block = *i;
155     JitSpew(JitSpew_Range, "Looking at block %d", block->id());
156 
157     BranchDirection branch_dir;
158     MTest* test = block->immediateDominatorBranch(&branch_dir);
159 
160     if (!test || !test->getOperand(0)->isCompare()) continue;
161 
162     MCompare* compare = test->getOperand(0)->toCompare();
163 
164     if (!compare->isNumericComparison()) continue;
165 
166     // TODO: support unsigned comparisons
167     if (compare->compareType() == MCompare::Compare_UInt32) continue;
168 
169     MDefinition* left = compare->getOperand(0);
170     MDefinition* right = compare->getOperand(1);
171     double bound;
172     double conservativeLower = NegativeInfinity<double>();
173     double conservativeUpper = PositiveInfinity<double>();
174     MDefinition* val = nullptr;
175 
176     JSOp jsop = compare->jsop();
177 
178     if (branch_dir == FALSE_BRANCH) {
179       jsop = NegateCompareOp(jsop);
180       conservativeLower = GenericNaN();
181       conservativeUpper = GenericNaN();
182     }
183 
184     MConstant* leftConst = left->maybeConstantValue();
185     MConstant* rightConst = right->maybeConstantValue();
186     if (leftConst && leftConst->isTypeRepresentableAsDouble()) {
187       bound = leftConst->numberToDouble();
188       val = right;
189       jsop = ReverseCompareOp(jsop);
190     } else if (rightConst && rightConst->isTypeRepresentableAsDouble()) {
191       bound = rightConst->numberToDouble();
192       val = left;
193     } else if (left->type() == MIRType::Int32 &&
194                right->type() == MIRType::Int32) {
195       MDefinition* smaller = nullptr;
196       MDefinition* greater = nullptr;
197       if (jsop == JSOP_LT) {
198         smaller = left;
199         greater = right;
200       } else if (jsop == JSOP_GT) {
201         smaller = right;
202         greater = left;
203       }
204       if (smaller && greater) {
205         if (!alloc().ensureBallast()) return false;
206 
207         MBeta* beta;
208         beta = MBeta::New(
209             alloc(), smaller,
210             Range::NewInt32Range(alloc(), JSVAL_INT_MIN, JSVAL_INT_MAX - 1));
211         block->insertBefore(*block->begin(), beta);
212         replaceDominatedUsesWith(smaller, beta, block);
213         JitSpew(JitSpew_Range, "Adding beta node for smaller %d",
214                 smaller->id());
215         beta = MBeta::New(
216             alloc(), greater,
217             Range::NewInt32Range(alloc(), JSVAL_INT_MIN + 1, JSVAL_INT_MAX));
218         block->insertBefore(*block->begin(), beta);
219         replaceDominatedUsesWith(greater, beta, block);
220         JitSpew(JitSpew_Range, "Adding beta node for greater %d",
221                 greater->id());
222       }
223       continue;
224     } else {
225       continue;
226     }
227 
228     // At this point, one of the operands if the compare is a constant, and
229     // val is the other operand.
230     MOZ_ASSERT(val);
231 
232     Range comp;
233     switch (jsop) {
234       case JSOP_LE:
235         comp.setDouble(conservativeLower, bound);
236         break;
237       case JSOP_LT:
238         // For integers, if x < c, the upper bound of x is c-1.
239         if (val->type() == MIRType::Int32) {
240           int32_t intbound;
241           if (NumberEqualsInt32(bound, &intbound) &&
242               SafeSub(intbound, 1, &intbound))
243             bound = intbound;
244         }
245         comp.setDouble(conservativeLower, bound);
246 
247         // Negative zero is not less than zero.
248         if (bound == 0) comp.refineToExcludeNegativeZero();
249         break;
250       case JSOP_GE:
251         comp.setDouble(bound, conservativeUpper);
252         break;
253       case JSOP_GT:
254         // For integers, if x > c, the lower bound of x is c+1.
255         if (val->type() == MIRType::Int32) {
256           int32_t intbound;
257           if (NumberEqualsInt32(bound, &intbound) &&
258               SafeAdd(intbound, 1, &intbound))
259             bound = intbound;
260         }
261         comp.setDouble(bound, conservativeUpper);
262 
263         // Negative zero is not greater than zero.
264         if (bound == 0) comp.refineToExcludeNegativeZero();
265         break;
266       case JSOP_STRICTEQ:
267         // A strict comparison can test for things other than numeric value.
268         if (!compare->isNumericComparison()) continue;
269         // Otherwise fall through to handle JSOP_STRICTEQ the same as JSOP_EQ.
270         MOZ_FALLTHROUGH;
271       case JSOP_EQ:
272         comp.setDouble(bound, bound);
273         break;
274       case JSOP_STRICTNE:
275         // A strict comparison can test for things other than numeric value.
276         if (!compare->isNumericComparison()) continue;
277         // Otherwise fall through to handle JSOP_STRICTNE the same as JSOP_NE.
278         MOZ_FALLTHROUGH;
279       case JSOP_NE:
280         // Negative zero is not not-equal to zero.
281         if (bound == 0) {
282           comp.refineToExcludeNegativeZero();
283           break;
284         }
285         continue;  // well, we could have
286                    // [-\inf, bound-1] U [bound+1, \inf] but we only use
287                    // contiguous ranges.
288       default:
289         continue;
290     }
291 
292     if (JitSpewEnabled(JitSpew_Range)) {
293       JitSpewHeader(JitSpew_Range);
294       Fprinter& out = JitSpewPrinter();
295       out.printf("Adding beta node for %d with range ", val->id());
296       comp.dump(out);
297     }
298 
299     if (!alloc().ensureBallast()) return false;
300 
301     MBeta* beta = MBeta::New(alloc(), val, new (alloc()) Range(comp));
302     block->insertBefore(*block->begin(), beta);
303     replaceDominatedUsesWith(val, beta, block);
304   }
305 
306   return true;
307 }
308 
removeBetaNodes()309 bool RangeAnalysis::removeBetaNodes() {
310   JitSpew(JitSpew_Range, "Removing beta nodes");
311 
312   for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) {
313     MBasicBlock* block = *i;
314     for (MDefinitionIterator iter(*i); iter;) {
315       MDefinition* def = *iter++;
316       if (def->isBeta()) {
317         MDefinition* op = def->getOperand(0);
318         JitSpew(JitSpew_Range, "Removing beta node %d for %d", def->id(),
319                 op->id());
320         def->justReplaceAllUsesWith(op);
321         block->discardDef(def);
322       } else {
323         // We only place Beta nodes at the beginning of basic
324         // blocks, so if we see something else, we can move on
325         // to the next block.
326         break;
327       }
328     }
329   }
330   return true;
331 }
332 
dump(GenericPrinter & out) const333 void SymbolicBound::dump(GenericPrinter& out) const {
334   if (loop) out.printf("[loop] ");
335   sum.dump(out);
336 }
337 
dump() const338 void SymbolicBound::dump() const {
339   Fprinter out(stderr);
340   dump(out);
341   out.printf("\n");
342   out.finish();
343 }
344 
345 // Test whether the given range's exponent tells us anything that its lower
346 // and upper bound values don't.
IsExponentInteresting(const Range * r)347 static bool IsExponentInteresting(const Range* r) {
348   // If it lacks either a lower or upper bound, the exponent is interesting.
349   if (!r->hasInt32Bounds()) return true;
350 
351   // Otherwise if there's no fractional part, the lower and upper bounds,
352   // which are integers, are perfectly precise.
353   if (!r->canHaveFractionalPart()) return false;
354 
355   // Otherwise, if the bounds are conservatively rounded across a power-of-two
356   // boundary, the exponent may imply a tighter range.
357   return FloorLog2(Max(Abs(r->lower()), Abs(r->upper()))) > r->exponent();
358 }
359 
dump(GenericPrinter & out) const360 void Range::dump(GenericPrinter& out) const {
361   assertInvariants();
362 
363   // Floating-point or Integer subset.
364   if (canHaveFractionalPart_)
365     out.printf("F");
366   else
367     out.printf("I");
368 
369   out.printf("[");
370 
371   if (!hasInt32LowerBound_)
372     out.printf("?");
373   else
374     out.printf("%d", lower_);
375   if (symbolicLower_) {
376     out.printf(" {");
377     symbolicLower_->dump(out);
378     out.printf("}");
379   }
380 
381   out.printf(", ");
382 
383   if (!hasInt32UpperBound_)
384     out.printf("?");
385   else
386     out.printf("%d", upper_);
387   if (symbolicUpper_) {
388     out.printf(" {");
389     symbolicUpper_->dump(out);
390     out.printf("}");
391   }
392 
393   out.printf("]");
394 
395   bool includesNaN = max_exponent_ == IncludesInfinityAndNaN;
396   bool includesNegativeInfinity =
397       max_exponent_ >= IncludesInfinity && !hasInt32LowerBound_;
398   bool includesPositiveInfinity =
399       max_exponent_ >= IncludesInfinity && !hasInt32UpperBound_;
400   bool includesNegativeZero = canBeNegativeZero_;
401 
402   if (includesNaN || includesNegativeInfinity || includesPositiveInfinity ||
403       includesNegativeZero) {
404     out.printf(" (");
405     bool first = true;
406     if (includesNaN) {
407       if (first)
408         first = false;
409       else
410         out.printf(" ");
411       out.printf("U NaN");
412     }
413     if (includesNegativeInfinity) {
414       if (first)
415         first = false;
416       else
417         out.printf(" ");
418       out.printf("U -Infinity");
419     }
420     if (includesPositiveInfinity) {
421       if (first)
422         first = false;
423       else
424         out.printf(" ");
425       out.printf("U Infinity");
426     }
427     if (includesNegativeZero) {
428       if (first)
429         first = false;
430       else
431         out.printf(" ");
432       out.printf("U -0");
433     }
434     out.printf(")");
435   }
436   if (max_exponent_ < IncludesInfinity && IsExponentInteresting(this))
437     out.printf(" (< pow(2, %d+1))", max_exponent_);
438 }
439 
dump() const440 void Range::dump() const {
441   Fprinter out(stderr);
442   dump(out);
443   out.printf("\n");
444   out.finish();
445 }
446 
intersect(TempAllocator & alloc,const Range * lhs,const Range * rhs,bool * emptyRange)447 Range* Range::intersect(TempAllocator& alloc, const Range* lhs,
448                         const Range* rhs, bool* emptyRange) {
449   *emptyRange = false;
450 
451   if (!lhs && !rhs) return nullptr;
452 
453   if (!lhs) return new (alloc) Range(*rhs);
454   if (!rhs) return new (alloc) Range(*lhs);
455 
456   int32_t newLower = Max(lhs->lower_, rhs->lower_);
457   int32_t newUpper = Min(lhs->upper_, rhs->upper_);
458 
459   // If upper < lower, then we have conflicting constraints. Consider:
460   //
461   // if (x < 0) {
462   //   if (x > 0) {
463   //     [Some code.]
464   //   }
465   // }
466   //
467   // In this case, the block is unreachable.
468   if (newUpper < newLower) {
469     // If both ranges can be NaN, the result can still be NaN.
470     if (!lhs->canBeNaN() || !rhs->canBeNaN()) *emptyRange = true;
471     return nullptr;
472   }
473 
474   bool newHasInt32LowerBound =
475       lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_;
476   bool newHasInt32UpperBound =
477       lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_;
478 
479   FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
480       lhs->canHaveFractionalPart_ && rhs->canHaveFractionalPart_);
481   NegativeZeroFlag newMayIncludeNegativeZero =
482       NegativeZeroFlag(lhs->canBeNegativeZero_ && rhs->canBeNegativeZero_);
483 
484   uint16_t newExponent = Min(lhs->max_exponent_, rhs->max_exponent_);
485 
486   // NaN is a special value which is neither greater than infinity or less than
487   // negative infinity. When we intersect two ranges like [?, 0] and [0, ?], we
488   // can end up thinking we have both a lower and upper bound, even though NaN
489   // is still possible. In this case, just be conservative, since any case where
490   // we can have NaN is not especially interesting.
491   if (newHasInt32LowerBound && newHasInt32UpperBound &&
492       newExponent == IncludesInfinityAndNaN)
493     return nullptr;
494 
495   // If one of the ranges has a fractional part and the other doesn't, it's
496   // possible that we will have computed a newExponent that's more precise
497   // than our newLower and newUpper. This is unusual, so we handle it here
498   // instead of in optimize().
499   //
500   // For example, consider the range F[0,1.5]. Range analysis represents the
501   // lower and upper bound as integers, so we'd actually have
502   // F[0,2] (< pow(2, 0+1)). In this case, the exponent gives us a slightly
503   // more precise upper bound than the integer upper bound.
504   //
505   // When intersecting such a range with an integer range, the fractional part
506   // of the range is dropped. The max exponent of 0 remains valid, so the
507   // upper bound needs to be adjusted to 1.
508   //
509   // When intersecting F[0,2] (< pow(2, 0+1)) with a range like F[2,4],
510   // the naive intersection is I[2,2], but since the max exponent tells us
511   // that the value is always less than 2, the intersection is actually empty.
512   if (lhs->canHaveFractionalPart() != rhs->canHaveFractionalPart() ||
513       (lhs->canHaveFractionalPart() && newHasInt32LowerBound &&
514        newHasInt32UpperBound && newLower == newUpper)) {
515     refineInt32BoundsByExponent(newExponent, &newLower, &newHasInt32LowerBound,
516                                 &newUpper, &newHasInt32UpperBound);
517 
518     // If we're intersecting two ranges that don't overlap, this could also
519     // push the bounds past each other, since the actual intersection is
520     // the empty set.
521     if (newLower > newUpper) {
522       *emptyRange = true;
523       return nullptr;
524     }
525   }
526 
527   return new (alloc)
528       Range(newLower, newHasInt32LowerBound, newUpper, newHasInt32UpperBound,
529             newCanHaveFractionalPart, newMayIncludeNegativeZero, newExponent);
530 }
531 
unionWith(const Range * other)532 void Range::unionWith(const Range* other) {
533   int32_t newLower = Min(lower_, other->lower_);
534   int32_t newUpper = Max(upper_, other->upper_);
535 
536   bool newHasInt32LowerBound =
537       hasInt32LowerBound_ && other->hasInt32LowerBound_;
538   bool newHasInt32UpperBound =
539       hasInt32UpperBound_ && other->hasInt32UpperBound_;
540 
541   FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
542       canHaveFractionalPart_ || other->canHaveFractionalPart_);
543   NegativeZeroFlag newMayIncludeNegativeZero =
544       NegativeZeroFlag(canBeNegativeZero_ || other->canBeNegativeZero_);
545 
546   uint16_t newExponent = Max(max_exponent_, other->max_exponent_);
547 
548   rawInitialize(newLower, newHasInt32LowerBound, newUpper,
549                 newHasInt32UpperBound, newCanHaveFractionalPart,
550                 newMayIncludeNegativeZero, newExponent);
551 }
552 
Range(const MDefinition * def)553 Range::Range(const MDefinition* def)
554     : symbolicLower_(nullptr), symbolicUpper_(nullptr) {
555   if (const Range* other = def->range()) {
556     // The instruction has range information; use it.
557     *this = *other;
558 
559     // Simulate the effect of converting the value to its type.
560     // Note: we cannot clamp here, since ranges aren't allowed to shrink
561     // and truncation can increase range again. So doing wrapAround to
562     // mimick a possible truncation.
563     switch (def->type()) {
564       case MIRType::Int32:
565         // MToNumberInt32 cannot truncate. So we can safely clamp.
566         if (def->isToNumberInt32())
567           clampToInt32();
568         else
569           wrapAroundToInt32();
570         break;
571       case MIRType::Boolean:
572         wrapAroundToBoolean();
573         break;
574       case MIRType::None:
575         MOZ_CRASH("Asking for the range of an instruction with no value");
576       default:
577         break;
578     }
579   } else {
580     // Otherwise just use type information. We can trust the type here
581     // because we don't care what value the instruction actually produces,
582     // but what value we might get after we get past the bailouts.
583     switch (def->type()) {
584       case MIRType::Int32:
585         setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX);
586         break;
587       case MIRType::Boolean:
588         setInt32(0, 1);
589         break;
590       case MIRType::None:
591         MOZ_CRASH("Asking for the range of an instruction with no value");
592       default:
593         setUnknown();
594         break;
595     }
596   }
597 
598   // As a special case, MUrsh is permitted to claim a result type of
599   // MIRType::Int32 while actually returning values in [0,UINT32_MAX] without
600   // bailouts. If range analysis hasn't ruled out values in
601   // (INT32_MAX,UINT32_MAX], set the range to be conservatively correct for
602   // use as either a uint32 or an int32.
603   if (!hasInt32UpperBound() && def->isUrsh() &&
604       def->toUrsh()->bailoutsDisabled() && def->type() != MIRType::Int64) {
605     lower_ = INT32_MIN;
606   }
607 
608   assertInvariants();
609 }
610 
ExponentImpliedByDouble(double d)611 static uint16_t ExponentImpliedByDouble(double d) {
612   // Handle the special values.
613   if (IsNaN(d)) return Range::IncludesInfinityAndNaN;
614   if (IsInfinite(d)) return Range::IncludesInfinity;
615 
616   // Otherwise take the exponent part and clamp it at zero, since the Range
617   // class doesn't track fractional ranges.
618   return uint16_t(Max(int_fast16_t(0), ExponentComponent(d)));
619 }
620 
setDouble(double l,double h)621 void Range::setDouble(double l, double h) {
622   MOZ_ASSERT(!(l > h));
623 
624   // Infer lower_, upper_, hasInt32LowerBound_, and hasInt32UpperBound_.
625   if (l >= INT32_MIN && l <= INT32_MAX) {
626     lower_ = int32_t(::floor(l));
627     hasInt32LowerBound_ = true;
628   } else if (l >= INT32_MAX) {
629     lower_ = INT32_MAX;
630     hasInt32LowerBound_ = true;
631   } else {
632     lower_ = INT32_MIN;
633     hasInt32LowerBound_ = false;
634   }
635   if (h >= INT32_MIN && h <= INT32_MAX) {
636     upper_ = int32_t(::ceil(h));
637     hasInt32UpperBound_ = true;
638   } else if (h <= INT32_MIN) {
639     upper_ = INT32_MIN;
640     hasInt32UpperBound_ = true;
641   } else {
642     upper_ = INT32_MAX;
643     hasInt32UpperBound_ = false;
644   }
645 
646   // Infer max_exponent_.
647   uint16_t lExp = ExponentImpliedByDouble(l);
648   uint16_t hExp = ExponentImpliedByDouble(h);
649   max_exponent_ = Max(lExp, hExp);
650 
651   canHaveFractionalPart_ = ExcludesFractionalParts;
652   canBeNegativeZero_ = ExcludesNegativeZero;
653 
654   // Infer the canHaveFractionalPart_ setting. We can have a
655   // fractional part if the range crosses through the neighborhood of zero. We
656   // won't have a fractional value if the value is always beyond the point at
657   // which double precision can't represent fractional values.
658   uint16_t minExp = Min(lExp, hExp);
659   bool includesNegative = IsNaN(l) || l < 0;
660   bool includesPositive = IsNaN(h) || h > 0;
661   bool crossesZero = includesNegative && includesPositive;
662   if (crossesZero || minExp < MaxTruncatableExponent)
663     canHaveFractionalPart_ = IncludesFractionalParts;
664 
665   // Infer the canBeNegativeZero_ setting. We can have a negative zero if
666   // either bound is zero.
667   if (!(l > 0) && !(h < 0)) canBeNegativeZero_ = IncludesNegativeZero;
668 
669   optimize();
670 }
671 
setDoubleSingleton(double d)672 void Range::setDoubleSingleton(double d) {
673   setDouble(d, d);
674 
675   // The above setDouble call is for comparisons, and treats negative zero
676   // as equal to zero. We're aiming for a minimum range, so we can clear the
677   // negative zero flag if the value isn't actually negative zero.
678   if (!IsNegativeZero(d)) canBeNegativeZero_ = ExcludesNegativeZero;
679 
680   assertInvariants();
681 }
682 
MissingAnyInt32Bounds(const Range * lhs,const Range * rhs)683 static inline bool MissingAnyInt32Bounds(const Range* lhs, const Range* rhs) {
684   return !lhs->hasInt32Bounds() || !rhs->hasInt32Bounds();
685 }
686 
add(TempAllocator & alloc,const Range * lhs,const Range * rhs)687 Range* Range::add(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
688   int64_t l = (int64_t)lhs->lower_ + (int64_t)rhs->lower_;
689   if (!lhs->hasInt32LowerBound() || !rhs->hasInt32LowerBound())
690     l = NoInt32LowerBound;
691 
692   int64_t h = (int64_t)lhs->upper_ + (int64_t)rhs->upper_;
693   if (!lhs->hasInt32UpperBound() || !rhs->hasInt32UpperBound())
694     h = NoInt32UpperBound;
695 
696   // The exponent is at most one greater than the greater of the operands'
697   // exponents, except for NaN and infinity cases.
698   uint16_t e = Max(lhs->max_exponent_, rhs->max_exponent_);
699   if (e <= Range::MaxFiniteExponent) ++e;
700 
701   // Infinity + -Infinity is NaN.
702   if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN())
703     e = Range::IncludesInfinityAndNaN;
704 
705   return new (alloc) Range(
706       l, h,
707       FractionalPartFlag(lhs->canHaveFractionalPart() ||
708                          rhs->canHaveFractionalPart()),
709       NegativeZeroFlag(lhs->canBeNegativeZero() && rhs->canBeNegativeZero()),
710       e);
711 }
712 
sub(TempAllocator & alloc,const Range * lhs,const Range * rhs)713 Range* Range::sub(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
714   int64_t l = (int64_t)lhs->lower_ - (int64_t)rhs->upper_;
715   if (!lhs->hasInt32LowerBound() || !rhs->hasInt32UpperBound())
716     l = NoInt32LowerBound;
717 
718   int64_t h = (int64_t)lhs->upper_ - (int64_t)rhs->lower_;
719   if (!lhs->hasInt32UpperBound() || !rhs->hasInt32LowerBound())
720     h = NoInt32UpperBound;
721 
722   // The exponent is at most one greater than the greater of the operands'
723   // exponents, except for NaN and infinity cases.
724   uint16_t e = Max(lhs->max_exponent_, rhs->max_exponent_);
725   if (e <= Range::MaxFiniteExponent) ++e;
726 
727   // Infinity - Infinity is NaN.
728   if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN())
729     e = Range::IncludesInfinityAndNaN;
730 
731   return new (alloc)
732       Range(l, h,
733             FractionalPartFlag(lhs->canHaveFractionalPart() ||
734                                rhs->canHaveFractionalPart()),
735             NegativeZeroFlag(lhs->canBeNegativeZero() && rhs->canBeZero()), e);
736 }
737 
and_(TempAllocator & alloc,const Range * lhs,const Range * rhs)738 Range* Range::and_(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
739   MOZ_ASSERT(lhs->isInt32());
740   MOZ_ASSERT(rhs->isInt32());
741 
742   // If both numbers can be negative, result can be negative in the whole range
743   if (lhs->lower() < 0 && rhs->lower() < 0)
744     return Range::NewInt32Range(alloc, INT32_MIN,
745                                 Max(lhs->upper(), rhs->upper()));
746 
747   // Only one of both numbers can be negative.
748   // - result can't be negative
749   // - Upper bound is minimum of both upper range,
750   int32_t lower = 0;
751   int32_t upper = Min(lhs->upper(), rhs->upper());
752 
753   // EXCEPT when upper bound of non negative number is max value,
754   // because negative value can return the whole max value.
755   // -1 & 5 = 5
756   if (lhs->lower() < 0) upper = rhs->upper();
757   if (rhs->lower() < 0) upper = lhs->upper();
758 
759   return Range::NewInt32Range(alloc, lower, upper);
760 }
761 
or_(TempAllocator & alloc,const Range * lhs,const Range * rhs)762 Range* Range::or_(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
763   MOZ_ASSERT(lhs->isInt32());
764   MOZ_ASSERT(rhs->isInt32());
765   // When one operand is always 0 or always -1, it's a special case where we
766   // can compute a fully precise result. Handling these up front also
767   // protects the code below from calling CountLeadingZeroes32 with a zero
768   // operand or from shifting an int32_t by 32.
769   if (lhs->lower() == lhs->upper()) {
770     if (lhs->lower() == 0) return new (alloc) Range(*rhs);
771     if (lhs->lower() == -1) return new (alloc) Range(*lhs);
772   }
773   if (rhs->lower() == rhs->upper()) {
774     if (rhs->lower() == 0) return new (alloc) Range(*lhs);
775     if (rhs->lower() == -1) return new (alloc) Range(*rhs);
776   }
777 
778   // The code below uses CountLeadingZeroes32, which has undefined behavior
779   // if its operand is 0. We rely on the code above to protect it.
780   MOZ_ASSERT_IF(lhs->lower() >= 0, lhs->upper() != 0);
781   MOZ_ASSERT_IF(rhs->lower() >= 0, rhs->upper() != 0);
782   MOZ_ASSERT_IF(lhs->upper() < 0, lhs->lower() != -1);
783   MOZ_ASSERT_IF(rhs->upper() < 0, rhs->lower() != -1);
784 
785   int32_t lower = INT32_MIN;
786   int32_t upper = INT32_MAX;
787 
788   if (lhs->lower() >= 0 && rhs->lower() >= 0) {
789     // Both operands are non-negative, so the result won't be less than either.
790     lower = Max(lhs->lower(), rhs->lower());
791     // The result will have leading zeros where both operands have leading
792     // zeros. CountLeadingZeroes32 of a non-negative int32 will at least be 1 to
793     // account for the bit of sign.
794     upper = int32_t(UINT32_MAX >> Min(CountLeadingZeroes32(lhs->upper()),
795                                       CountLeadingZeroes32(rhs->upper())));
796   } else {
797     // The result will have leading ones where either operand has leading ones.
798     if (lhs->upper() < 0) {
799       unsigned leadingOnes = CountLeadingZeroes32(~lhs->lower());
800       lower = Max(lower, ~int32_t(UINT32_MAX >> leadingOnes));
801       upper = -1;
802     }
803     if (rhs->upper() < 0) {
804       unsigned leadingOnes = CountLeadingZeroes32(~rhs->lower());
805       lower = Max(lower, ~int32_t(UINT32_MAX >> leadingOnes));
806       upper = -1;
807     }
808   }
809 
810   return Range::NewInt32Range(alloc, lower, upper);
811 }
812 
xor_(TempAllocator & alloc,const Range * lhs,const Range * rhs)813 Range* Range::xor_(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
814   MOZ_ASSERT(lhs->isInt32());
815   MOZ_ASSERT(rhs->isInt32());
816   int32_t lhsLower = lhs->lower();
817   int32_t lhsUpper = lhs->upper();
818   int32_t rhsLower = rhs->lower();
819   int32_t rhsUpper = rhs->upper();
820   bool invertAfter = false;
821 
822   // If either operand is negative, bitwise-negate it, and arrange to negate
823   // the result; ~((~x)^y) == x^y. If both are negative the negations on the
824   // result cancel each other out; effectively this is (~x)^(~y) == x^y.
825   // These transformations reduce the number of cases we have to handle below.
826   if (lhsUpper < 0) {
827     lhsLower = ~lhsLower;
828     lhsUpper = ~lhsUpper;
829     Swap(lhsLower, lhsUpper);
830     invertAfter = !invertAfter;
831   }
832   if (rhsUpper < 0) {
833     rhsLower = ~rhsLower;
834     rhsUpper = ~rhsUpper;
835     Swap(rhsLower, rhsUpper);
836     invertAfter = !invertAfter;
837   }
838 
839   // Handle cases where lhs or rhs is always zero specially, because they're
840   // easy cases where we can be perfectly precise, and because it protects the
841   // CountLeadingZeroes32 calls below from seeing 0 operands, which would be
842   // undefined behavior.
843   int32_t lower = INT32_MIN;
844   int32_t upper = INT32_MAX;
845   if (lhsLower == 0 && lhsUpper == 0) {
846     upper = rhsUpper;
847     lower = rhsLower;
848   } else if (rhsLower == 0 && rhsUpper == 0) {
849     upper = lhsUpper;
850     lower = lhsLower;
851   } else if (lhsLower >= 0 && rhsLower >= 0) {
852     // Both operands are non-negative. The result will be non-negative.
853     lower = 0;
854     // To compute the upper value, take each operand's upper value and
855     // set all bits that don't correspond to leading zero bits in the
856     // other to one. For each one, this gives an upper bound for the
857     // result, so we can take the minimum between the two.
858     unsigned lhsLeadingZeros = CountLeadingZeroes32(lhsUpper);
859     unsigned rhsLeadingZeros = CountLeadingZeroes32(rhsUpper);
860     upper = Min(rhsUpper | int32_t(UINT32_MAX >> lhsLeadingZeros),
861                 lhsUpper | int32_t(UINT32_MAX >> rhsLeadingZeros));
862   }
863 
864   // If we bitwise-negated one (but not both) of the operands above, apply the
865   // bitwise-negate to the result, completing ~((~x)^y) == x^y.
866   if (invertAfter) {
867     lower = ~lower;
868     upper = ~upper;
869     Swap(lower, upper);
870   }
871 
872   return Range::NewInt32Range(alloc, lower, upper);
873 }
874 
not_(TempAllocator & alloc,const Range * op)875 Range* Range::not_(TempAllocator& alloc, const Range* op) {
876   MOZ_ASSERT(op->isInt32());
877   return Range::NewInt32Range(alloc, ~op->upper(), ~op->lower());
878 }
879 
mul(TempAllocator & alloc,const Range * lhs,const Range * rhs)880 Range* Range::mul(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
881   FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
882       lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_);
883 
884   NegativeZeroFlag newMayIncludeNegativeZero = NegativeZeroFlag(
885       (lhs->canHaveSignBitSet() && rhs->canBeFiniteNonNegative()) ||
886       (rhs->canHaveSignBitSet() && lhs->canBeFiniteNonNegative()));
887 
888   uint16_t exponent;
889   if (!lhs->canBeInfiniteOrNaN() && !rhs->canBeInfiniteOrNaN()) {
890     // Two finite values.
891     exponent = lhs->numBits() + rhs->numBits() - 1;
892     if (exponent > Range::MaxFiniteExponent) exponent = Range::IncludesInfinity;
893   } else if (!lhs->canBeNaN() && !rhs->canBeNaN() &&
894              !(lhs->canBeZero() && rhs->canBeInfiniteOrNaN()) &&
895              !(rhs->canBeZero() && lhs->canBeInfiniteOrNaN())) {
896     // Two values that multiplied together won't produce a NaN.
897     exponent = Range::IncludesInfinity;
898   } else {
899     // Could be anything.
900     exponent = Range::IncludesInfinityAndNaN;
901   }
902 
903   if (MissingAnyInt32Bounds(lhs, rhs))
904     return new (alloc)
905         Range(NoInt32LowerBound, NoInt32UpperBound, newCanHaveFractionalPart,
906               newMayIncludeNegativeZero, exponent);
907   int64_t a = (int64_t)lhs->lower() * (int64_t)rhs->lower();
908   int64_t b = (int64_t)lhs->lower() * (int64_t)rhs->upper();
909   int64_t c = (int64_t)lhs->upper() * (int64_t)rhs->lower();
910   int64_t d = (int64_t)lhs->upper() * (int64_t)rhs->upper();
911   return new (alloc)
912       Range(Min(Min(a, b), Min(c, d)), Max(Max(a, b), Max(c, d)),
913             newCanHaveFractionalPart, newMayIncludeNegativeZero, exponent);
914 }
915 
lsh(TempAllocator & alloc,const Range * lhs,int32_t c)916 Range* Range::lsh(TempAllocator& alloc, const Range* lhs, int32_t c) {
917   MOZ_ASSERT(lhs->isInt32());
918   int32_t shift = c & 0x1f;
919 
920   // If the shift doesn't loose bits or shift bits into the sign bit, we
921   // can simply compute the correct range by shifting.
922   if ((int32_t)((uint32_t)lhs->lower() << shift << 1 >> shift >> 1) ==
923           lhs->lower() &&
924       (int32_t)((uint32_t)lhs->upper() << shift << 1 >> shift >> 1) ==
925           lhs->upper()) {
926     return Range::NewInt32Range(alloc, uint32_t(lhs->lower()) << shift,
927                                 uint32_t(lhs->upper()) << shift);
928   }
929 
930   return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
931 }
932 
rsh(TempAllocator & alloc,const Range * lhs,int32_t c)933 Range* Range::rsh(TempAllocator& alloc, const Range* lhs, int32_t c) {
934   MOZ_ASSERT(lhs->isInt32());
935   int32_t shift = c & 0x1f;
936   return Range::NewInt32Range(alloc, lhs->lower() >> shift,
937                               lhs->upper() >> shift);
938 }
939 
ursh(TempAllocator & alloc,const Range * lhs,int32_t c)940 Range* Range::ursh(TempAllocator& alloc, const Range* lhs, int32_t c) {
941   // ursh's left operand is uint32, not int32, but for range analysis we
942   // currently approximate it as int32. We assume here that the range has
943   // already been adjusted accordingly by our callers.
944   MOZ_ASSERT(lhs->isInt32());
945 
946   int32_t shift = c & 0x1f;
947 
948   // If the value is always non-negative or always negative, we can simply
949   // compute the correct range by shifting.
950   if (lhs->isFiniteNonNegative() || lhs->isFiniteNegative()) {
951     return Range::NewUInt32Range(alloc, uint32_t(lhs->lower()) >> shift,
952                                  uint32_t(lhs->upper()) >> shift);
953   }
954 
955   // Otherwise return the most general range after the shift.
956   return Range::NewUInt32Range(alloc, 0, UINT32_MAX >> shift);
957 }
958 
lsh(TempAllocator & alloc,const Range * lhs,const Range * rhs)959 Range* Range::lsh(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
960   MOZ_ASSERT(lhs->isInt32());
961   MOZ_ASSERT(rhs->isInt32());
962   return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
963 }
964 
rsh(TempAllocator & alloc,const Range * lhs,const Range * rhs)965 Range* Range::rsh(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
966   MOZ_ASSERT(lhs->isInt32());
967   MOZ_ASSERT(rhs->isInt32());
968 
969   // Canonicalize the shift range to 0 to 31.
970   int32_t shiftLower = rhs->lower();
971   int32_t shiftUpper = rhs->upper();
972   if ((int64_t(shiftUpper) - int64_t(shiftLower)) >= 31) {
973     shiftLower = 0;
974     shiftUpper = 31;
975   } else {
976     shiftLower &= 0x1f;
977     shiftUpper &= 0x1f;
978     if (shiftLower > shiftUpper) {
979       shiftLower = 0;
980       shiftUpper = 31;
981     }
982   }
983   MOZ_ASSERT(shiftLower >= 0 && shiftUpper <= 31);
984 
985   // The lhs bounds are signed, thus the minimum is either the lower bound
986   // shift by the smallest shift if negative or the lower bound shifted by the
987   // biggest shift otherwise.  And the opposite for the maximum.
988   int32_t lhsLower = lhs->lower();
989   int32_t min = lhsLower < 0 ? lhsLower >> shiftLower : lhsLower >> shiftUpper;
990   int32_t lhsUpper = lhs->upper();
991   int32_t max = lhsUpper >= 0 ? lhsUpper >> shiftLower : lhsUpper >> shiftUpper;
992 
993   return Range::NewInt32Range(alloc, min, max);
994 }
995 
ursh(TempAllocator & alloc,const Range * lhs,const Range * rhs)996 Range* Range::ursh(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
997   // ursh's left operand is uint32, not int32, but for range analysis we
998   // currently approximate it as int32. We assume here that the range has
999   // already been adjusted accordingly by our callers.
1000   MOZ_ASSERT(lhs->isInt32());
1001   MOZ_ASSERT(rhs->isInt32());
1002   return Range::NewUInt32Range(
1003       alloc, 0, lhs->isFiniteNonNegative() ? lhs->upper() : UINT32_MAX);
1004 }
1005 
abs(TempAllocator & alloc,const Range * op)1006 Range* Range::abs(TempAllocator& alloc, const Range* op) {
1007   int32_t l = op->lower_;
1008   int32_t u = op->upper_;
1009   FractionalPartFlag canHaveFractionalPart = op->canHaveFractionalPart_;
1010 
1011   // Abs never produces a negative zero.
1012   NegativeZeroFlag canBeNegativeZero = ExcludesNegativeZero;
1013 
1014   return new (alloc)
1015       Range(Max(Max(int32_t(0), l), u == INT32_MIN ? INT32_MAX : -u), true,
1016             Max(Max(int32_t(0), u), l == INT32_MIN ? INT32_MAX : -l),
1017             op->hasInt32Bounds() && l != INT32_MIN, canHaveFractionalPart,
1018             canBeNegativeZero, op->max_exponent_);
1019 }
1020 
min(TempAllocator & alloc,const Range * lhs,const Range * rhs)1021 Range* Range::min(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
1022   // If either operand is NaN, the result is NaN.
1023   if (lhs->canBeNaN() || rhs->canBeNaN()) return nullptr;
1024 
1025   FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
1026       lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_);
1027   NegativeZeroFlag newMayIncludeNegativeZero =
1028       NegativeZeroFlag(lhs->canBeNegativeZero_ || rhs->canBeNegativeZero_);
1029 
1030   return new (alloc) Range(Min(lhs->lower_, rhs->lower_),
1031                            lhs->hasInt32LowerBound_ && rhs->hasInt32LowerBound_,
1032                            Min(lhs->upper_, rhs->upper_),
1033                            lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_,
1034                            newCanHaveFractionalPart, newMayIncludeNegativeZero,
1035                            Max(lhs->max_exponent_, rhs->max_exponent_));
1036 }
1037 
max(TempAllocator & alloc,const Range * lhs,const Range * rhs)1038 Range* Range::max(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
1039   // If either operand is NaN, the result is NaN.
1040   if (lhs->canBeNaN() || rhs->canBeNaN()) return nullptr;
1041 
1042   FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
1043       lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_);
1044   NegativeZeroFlag newMayIncludeNegativeZero =
1045       NegativeZeroFlag(lhs->canBeNegativeZero_ || rhs->canBeNegativeZero_);
1046 
1047   return new (alloc) Range(Max(lhs->lower_, rhs->lower_),
1048                            lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_,
1049                            Max(lhs->upper_, rhs->upper_),
1050                            lhs->hasInt32UpperBound_ && rhs->hasInt32UpperBound_,
1051                            newCanHaveFractionalPart, newMayIncludeNegativeZero,
1052                            Max(lhs->max_exponent_, rhs->max_exponent_));
1053 }
1054 
floor(TempAllocator & alloc,const Range * op)1055 Range* Range::floor(TempAllocator& alloc, const Range* op) {
1056   Range* copy = new (alloc) Range(*op);
1057   // Decrement lower bound of copy range if op have a factional part and lower
1058   // bound is Int32 defined. Also we avoid to decrement when op have a
1059   // fractional part but lower_ >= JSVAL_INT_MAX.
1060   if (op->canHaveFractionalPart() && op->hasInt32LowerBound())
1061     copy->setLowerInit(int64_t(copy->lower_) - 1);
1062 
1063   // Also refine max_exponent_ because floor may have decremented int value
1064   // If we've got int32 defined bounds, just deduce it using defined bounds.
1065   // But, if we don't have those, value's max_exponent_ may have changed.
1066   // Because we're looking to maintain an over estimation, if we can,
1067   // we increment it.
1068   if (copy->hasInt32Bounds())
1069     copy->max_exponent_ = copy->exponentImpliedByInt32Bounds();
1070   else if (copy->max_exponent_ < MaxFiniteExponent)
1071     copy->max_exponent_++;
1072 
1073   copy->canHaveFractionalPart_ = ExcludesFractionalParts;
1074   copy->assertInvariants();
1075   return copy;
1076 }
1077 
ceil(TempAllocator & alloc,const Range * op)1078 Range* Range::ceil(TempAllocator& alloc, const Range* op) {
1079   Range* copy = new (alloc) Range(*op);
1080 
1081   // We need to refine max_exponent_ because ceil may have incremented the int
1082   // value. If we have got int32 bounds defined, just deduce it using the
1083   // defined bounds. Else we can just increment its value, as we are looking to
1084   // maintain an over estimation.
1085   if (copy->hasInt32Bounds())
1086     copy->max_exponent_ = copy->exponentImpliedByInt32Bounds();
1087   else if (copy->max_exponent_ < MaxFiniteExponent)
1088     copy->max_exponent_++;
1089 
1090   copy->canHaveFractionalPart_ = ExcludesFractionalParts;
1091   copy->assertInvariants();
1092   return copy;
1093 }
1094 
sign(TempAllocator & alloc,const Range * op)1095 Range* Range::sign(TempAllocator& alloc, const Range* op) {
1096   if (op->canBeNaN()) return nullptr;
1097 
1098   return new (alloc)
1099       Range(Max(Min(op->lower_, 1), -1), Max(Min(op->upper_, 1), -1),
1100             Range::ExcludesFractionalParts,
1101             NegativeZeroFlag(op->canBeNegativeZero()), 0);
1102 }
1103 
NaNToZero(TempAllocator & alloc,const Range * op)1104 Range* Range::NaNToZero(TempAllocator& alloc, const Range* op) {
1105   Range* copy = new (alloc) Range(*op);
1106   if (copy->canBeNaN()) {
1107     copy->max_exponent_ = Range::IncludesInfinity;
1108     if (!copy->canBeZero()) {
1109       Range zero;
1110       zero.setDoubleSingleton(0);
1111       copy->unionWith(&zero);
1112     }
1113   }
1114   copy->refineToExcludeNegativeZero();
1115   return copy;
1116 }
1117 
negativeZeroMul(const Range * lhs,const Range * rhs)1118 bool Range::negativeZeroMul(const Range* lhs, const Range* rhs) {
1119   // The result can only be negative zero if both sides are finite and they
1120   // have differing signs.
1121   return (lhs->canHaveSignBitSet() && rhs->canBeFiniteNonNegative()) ||
1122          (rhs->canHaveSignBitSet() && lhs->canBeFiniteNonNegative());
1123 }
1124 
update(const Range * other)1125 bool Range::update(const Range* other) {
1126   bool changed = lower_ != other->lower_ ||
1127                  hasInt32LowerBound_ != other->hasInt32LowerBound_ ||
1128                  upper_ != other->upper_ ||
1129                  hasInt32UpperBound_ != other->hasInt32UpperBound_ ||
1130                  canHaveFractionalPart_ != other->canHaveFractionalPart_ ||
1131                  canBeNegativeZero_ != other->canBeNegativeZero_ ||
1132                  max_exponent_ != other->max_exponent_;
1133   if (changed) {
1134     lower_ = other->lower_;
1135     hasInt32LowerBound_ = other->hasInt32LowerBound_;
1136     upper_ = other->upper_;
1137     hasInt32UpperBound_ = other->hasInt32UpperBound_;
1138     canHaveFractionalPart_ = other->canHaveFractionalPart_;
1139     canBeNegativeZero_ = other->canBeNegativeZero_;
1140     max_exponent_ = other->max_exponent_;
1141     assertInvariants();
1142   }
1143 
1144   return changed;
1145 }
1146 
1147 ///////////////////////////////////////////////////////////////////////////////
1148 // Range Computation for MIR Nodes
1149 ///////////////////////////////////////////////////////////////////////////////
1150 
computeRange(TempAllocator & alloc)1151 void MPhi::computeRange(TempAllocator& alloc) {
1152   if (type() != MIRType::Int32 && type() != MIRType::Double) return;
1153 
1154   Range* range = nullptr;
1155   for (size_t i = 0, e = numOperands(); i < e; i++) {
1156     if (getOperand(i)->block()->unreachable()) {
1157       JitSpew(JitSpew_Range, "Ignoring unreachable input %d",
1158               getOperand(i)->id());
1159       continue;
1160     }
1161 
1162     // Peek at the pre-bailout range so we can take a short-cut; if any of
1163     // the operands has an unknown range, this phi has an unknown range.
1164     if (!getOperand(i)->range()) return;
1165 
1166     Range input(getOperand(i));
1167 
1168     if (range)
1169       range->unionWith(&input);
1170     else
1171       range = new (alloc) Range(input);
1172   }
1173 
1174   setRange(range);
1175 }
1176 
computeRange(TempAllocator & alloc)1177 void MBeta::computeRange(TempAllocator& alloc) {
1178   bool emptyRange = false;
1179 
1180   Range opRange(getOperand(0));
1181   Range* range = Range::intersect(alloc, &opRange, comparison_, &emptyRange);
1182   if (emptyRange) {
1183     JitSpew(JitSpew_Range, "Marking block for inst %d unreachable", id());
1184     block()->setUnreachableUnchecked();
1185   } else {
1186     setRange(range);
1187   }
1188 }
1189 
computeRange(TempAllocator & alloc)1190 void MConstant::computeRange(TempAllocator& alloc) {
1191   if (isTypeRepresentableAsDouble()) {
1192     double d = numberToDouble();
1193     setRange(Range::NewDoubleSingletonRange(alloc, d));
1194   } else if (type() == MIRType::Boolean) {
1195     bool b = toBoolean();
1196     setRange(Range::NewInt32Range(alloc, b, b));
1197   }
1198 }
1199 
computeRange(TempAllocator & alloc)1200 void MCharCodeAt::computeRange(TempAllocator& alloc) {
1201   // ECMA 262 says that the integer will be non-negative and at most 65535.
1202   setRange(Range::NewInt32Range(alloc, 0, 65535));
1203 }
1204 
computeRange(TempAllocator & alloc)1205 void MClampToUint8::computeRange(TempAllocator& alloc) {
1206   setRange(Range::NewUInt32Range(alloc, 0, 255));
1207 }
1208 
computeRange(TempAllocator & alloc)1209 void MBitAnd::computeRange(TempAllocator& alloc) {
1210   if (specialization_ == MIRType::Int64) return;
1211 
1212   Range left(getOperand(0));
1213   Range right(getOperand(1));
1214   left.wrapAroundToInt32();
1215   right.wrapAroundToInt32();
1216 
1217   setRange(Range::and_(alloc, &left, &right));
1218 }
1219 
computeRange(TempAllocator & alloc)1220 void MBitOr::computeRange(TempAllocator& alloc) {
1221   if (specialization_ == MIRType::Int64) return;
1222 
1223   Range left(getOperand(0));
1224   Range right(getOperand(1));
1225   left.wrapAroundToInt32();
1226   right.wrapAroundToInt32();
1227 
1228   setRange(Range::or_(alloc, &left, &right));
1229 }
1230 
computeRange(TempAllocator & alloc)1231 void MBitXor::computeRange(TempAllocator& alloc) {
1232   if (specialization_ == MIRType::Int64) return;
1233 
1234   Range left(getOperand(0));
1235   Range right(getOperand(1));
1236   left.wrapAroundToInt32();
1237   right.wrapAroundToInt32();
1238 
1239   setRange(Range::xor_(alloc, &left, &right));
1240 }
1241 
computeRange(TempAllocator & alloc)1242 void MBitNot::computeRange(TempAllocator& alloc) {
1243   Range op(getOperand(0));
1244   op.wrapAroundToInt32();
1245 
1246   setRange(Range::not_(alloc, &op));
1247 }
1248 
computeRange(TempAllocator & alloc)1249 void MLsh::computeRange(TempAllocator& alloc) {
1250   if (specialization_ == MIRType::Int64) return;
1251 
1252   Range left(getOperand(0));
1253   Range right(getOperand(1));
1254   left.wrapAroundToInt32();
1255 
1256   MConstant* rhsConst = getOperand(1)->maybeConstantValue();
1257   if (rhsConst && rhsConst->type() == MIRType::Int32) {
1258     int32_t c = rhsConst->toInt32();
1259     setRange(Range::lsh(alloc, &left, c));
1260     return;
1261   }
1262 
1263   right.wrapAroundToShiftCount();
1264   setRange(Range::lsh(alloc, &left, &right));
1265 }
1266 
computeRange(TempAllocator & alloc)1267 void MRsh::computeRange(TempAllocator& alloc) {
1268   if (specialization_ == MIRType::Int64) return;
1269 
1270   Range left(getOperand(0));
1271   Range right(getOperand(1));
1272   left.wrapAroundToInt32();
1273 
1274   MConstant* rhsConst = getOperand(1)->maybeConstantValue();
1275   if (rhsConst && rhsConst->type() == MIRType::Int32) {
1276     int32_t c = rhsConst->toInt32();
1277     setRange(Range::rsh(alloc, &left, c));
1278     return;
1279   }
1280 
1281   right.wrapAroundToShiftCount();
1282   setRange(Range::rsh(alloc, &left, &right));
1283 }
1284 
computeRange(TempAllocator & alloc)1285 void MUrsh::computeRange(TempAllocator& alloc) {
1286   if (specialization_ == MIRType::Int64) return;
1287 
1288   Range left(getOperand(0));
1289   Range right(getOperand(1));
1290 
1291   // ursh can be thought of as converting its left operand to uint32, or it
1292   // can be thought of as converting its left operand to int32, and then
1293   // reinterpreting the int32 bits as a uint32 value. Both approaches yield
1294   // the same result. Since we lack support for full uint32 ranges, we use
1295   // the second interpretation, though it does cause us to be conservative.
1296   left.wrapAroundToInt32();
1297   right.wrapAroundToShiftCount();
1298 
1299   MConstant* rhsConst = getOperand(1)->maybeConstantValue();
1300   if (rhsConst && rhsConst->type() == MIRType::Int32) {
1301     int32_t c = rhsConst->toInt32();
1302     setRange(Range::ursh(alloc, &left, c));
1303   } else {
1304     setRange(Range::ursh(alloc, &left, &right));
1305   }
1306 
1307   MOZ_ASSERT(range()->lower() >= 0);
1308 }
1309 
computeRange(TempAllocator & alloc)1310 void MAbs::computeRange(TempAllocator& alloc) {
1311   if (specialization_ != MIRType::Int32 && specialization_ != MIRType::Double)
1312     return;
1313 
1314   Range other(getOperand(0));
1315   Range* next = Range::abs(alloc, &other);
1316   if (implicitTruncate_) next->wrapAroundToInt32();
1317   setRange(next);
1318 }
1319 
computeRange(TempAllocator & alloc)1320 void MFloor::computeRange(TempAllocator& alloc) {
1321   Range other(getOperand(0));
1322   setRange(Range::floor(alloc, &other));
1323 }
1324 
computeRange(TempAllocator & alloc)1325 void MCeil::computeRange(TempAllocator& alloc) {
1326   Range other(getOperand(0));
1327   setRange(Range::ceil(alloc, &other));
1328 }
1329 
computeRange(TempAllocator & alloc)1330 void MClz::computeRange(TempAllocator& alloc) {
1331   if (type() != MIRType::Int32) return;
1332   setRange(Range::NewUInt32Range(alloc, 0, 32));
1333 }
1334 
computeRange(TempAllocator & alloc)1335 void MCtz::computeRange(TempAllocator& alloc) {
1336   if (type() != MIRType::Int32) return;
1337   setRange(Range::NewUInt32Range(alloc, 0, 32));
1338 }
1339 
computeRange(TempAllocator & alloc)1340 void MPopcnt::computeRange(TempAllocator& alloc) {
1341   if (type() != MIRType::Int32) return;
1342   setRange(Range::NewUInt32Range(alloc, 0, 32));
1343 }
1344 
computeRange(TempAllocator & alloc)1345 void MMinMax::computeRange(TempAllocator& alloc) {
1346   if (specialization_ != MIRType::Int32 && specialization_ != MIRType::Double)
1347     return;
1348 
1349   Range left(getOperand(0));
1350   Range right(getOperand(1));
1351   setRange(isMax() ? Range::max(alloc, &left, &right)
1352                    : Range::min(alloc, &left, &right));
1353 }
1354 
computeRange(TempAllocator & alloc)1355 void MAdd::computeRange(TempAllocator& alloc) {
1356   if (specialization() != MIRType::Int32 && specialization() != MIRType::Double)
1357     return;
1358   Range left(getOperand(0));
1359   Range right(getOperand(1));
1360   Range* next = Range::add(alloc, &left, &right);
1361   if (isTruncated()) next->wrapAroundToInt32();
1362   setRange(next);
1363 }
1364 
computeRange(TempAllocator & alloc)1365 void MSub::computeRange(TempAllocator& alloc) {
1366   if (specialization() != MIRType::Int32 && specialization() != MIRType::Double)
1367     return;
1368   Range left(getOperand(0));
1369   Range right(getOperand(1));
1370   Range* next = Range::sub(alloc, &left, &right);
1371   if (isTruncated()) next->wrapAroundToInt32();
1372   setRange(next);
1373 }
1374 
computeRange(TempAllocator & alloc)1375 void MMul::computeRange(TempAllocator& alloc) {
1376   if (specialization() != MIRType::Int32 && specialization() != MIRType::Double)
1377     return;
1378   Range left(getOperand(0));
1379   Range right(getOperand(1));
1380   if (canBeNegativeZero())
1381     canBeNegativeZero_ = Range::negativeZeroMul(&left, &right);
1382   Range* next = Range::mul(alloc, &left, &right);
1383   if (!next->canBeNegativeZero()) canBeNegativeZero_ = false;
1384   // Truncated multiplications could overflow in both directions
1385   if (isTruncated()) next->wrapAroundToInt32();
1386   setRange(next);
1387 }
1388 
computeRange(TempAllocator & alloc)1389 void MMod::computeRange(TempAllocator& alloc) {
1390   if (specialization() != MIRType::Int32 && specialization() != MIRType::Double)
1391     return;
1392   Range lhs(getOperand(0));
1393   Range rhs(getOperand(1));
1394 
1395   // If either operand is a NaN, the result is NaN. This also conservatively
1396   // handles Infinity cases.
1397   if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds()) return;
1398 
1399   // If RHS can be zero, the result can be NaN.
1400   if (rhs.lower() <= 0 && rhs.upper() >= 0) return;
1401 
1402   // If both operands are non-negative integers, we can optimize this to an
1403   // unsigned mod.
1404   if (specialization() == MIRType::Int32 && rhs.lower() > 0) {
1405     bool hasDoubles = lhs.lower() < 0 || lhs.canHaveFractionalPart() ||
1406                       rhs.canHaveFractionalPart();
1407     // It is not possible to check that lhs.lower() >= 0, since the range
1408     // of a ursh with rhs a 0 constant is wrapped around the int32 range in
1409     // Range::Range(). However, IsUint32Type() will only return true for
1410     // nodes that lie in the range [0, UINT32_MAX].
1411     bool hasUint32s =
1412         IsUint32Type(getOperand(0)) &&
1413         getOperand(1)->type() == MIRType::Int32 &&
1414         (IsUint32Type(getOperand(1)) || getOperand(1)->isConstant());
1415     if (!hasDoubles || hasUint32s) unsigned_ = true;
1416   }
1417 
1418   // For unsigned mod, we have to convert both operands to unsigned.
1419   // Note that we handled the case of a zero rhs above.
1420   if (unsigned_) {
1421     // The result of an unsigned mod will never be unsigned-greater than
1422     // either operand.
1423     uint32_t lhsBound = Max<uint32_t>(lhs.lower(), lhs.upper());
1424     uint32_t rhsBound = Max<uint32_t>(rhs.lower(), rhs.upper());
1425 
1426     // If either range crosses through -1 as a signed value, it could be
1427     // the maximum unsigned value when interpreted as unsigned. If the range
1428     // doesn't include -1, then the simple max value we computed above is
1429     // correct.
1430     if (lhs.lower() <= -1 && lhs.upper() >= -1) lhsBound = UINT32_MAX;
1431     if (rhs.lower() <= -1 && rhs.upper() >= -1) rhsBound = UINT32_MAX;
1432 
1433     // The result will never be equal to the rhs, and we shouldn't have
1434     // any rounding to worry about.
1435     MOZ_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart());
1436     --rhsBound;
1437 
1438     // This gives us two upper bounds, so we can take the best one.
1439     setRange(Range::NewUInt32Range(alloc, 0, Min(lhsBound, rhsBound)));
1440     return;
1441   }
1442 
1443   // Math.abs(lhs % rhs) == Math.abs(lhs) % Math.abs(rhs).
1444   // First, the absolute value of the result will always be less than the
1445   // absolute value of rhs. (And if rhs is zero, the result is NaN).
1446   int64_t a = Abs<int64_t>(rhs.lower());
1447   int64_t b = Abs<int64_t>(rhs.upper());
1448   if (a == 0 && b == 0) return;
1449   int64_t rhsAbsBound = Max(a, b);
1450 
1451   // If the value is known to be integer, less-than abs(rhs) is equivalent
1452   // to less-than-or-equal abs(rhs)-1. This is important for being able to
1453   // say that the result of x%256 is an 8-bit unsigned number.
1454   if (!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart())
1455     --rhsAbsBound;
1456 
1457   // Next, the absolute value of the result will never be greater than the
1458   // absolute value of lhs.
1459   int64_t lhsAbsBound =
1460       Max(Abs<int64_t>(lhs.lower()), Abs<int64_t>(lhs.upper()));
1461 
1462   // This gives us two upper bounds, so we can take the best one.
1463   int64_t absBound = Min(lhsAbsBound, rhsAbsBound);
1464 
1465   // Now consider the sign of the result.
1466   // If lhs is non-negative, the result will be non-negative.
1467   // If lhs is non-positive, the result will be non-positive.
1468   int64_t lower = lhs.lower() >= 0 ? 0 : -absBound;
1469   int64_t upper = lhs.upper() <= 0 ? 0 : absBound;
1470 
1471   Range::FractionalPartFlag newCanHaveFractionalPart =
1472       Range::FractionalPartFlag(lhs.canHaveFractionalPart() ||
1473                                 rhs.canHaveFractionalPart());
1474 
1475   // If the lhs can have the sign bit set and we can return a zero, it'll be a
1476   // negative zero.
1477   Range::NegativeZeroFlag newMayIncludeNegativeZero =
1478       Range::NegativeZeroFlag(lhs.canHaveSignBitSet());
1479 
1480   setRange(new (alloc) Range(lower, upper, newCanHaveFractionalPart,
1481                              newMayIncludeNegativeZero,
1482                              Min(lhs.exponent(), rhs.exponent())));
1483 }
1484 
computeRange(TempAllocator & alloc)1485 void MDiv::computeRange(TempAllocator& alloc) {
1486   if (specialization() != MIRType::Int32 && specialization() != MIRType::Double)
1487     return;
1488   Range lhs(getOperand(0));
1489   Range rhs(getOperand(1));
1490 
1491   // If either operand is a NaN, the result is NaN. This also conservatively
1492   // handles Infinity cases.
1493   if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds()) return;
1494 
1495   // Something simple for now: When dividing by a positive rhs, the result
1496   // won't be further from zero than lhs.
1497   if (lhs.lower() >= 0 && rhs.lower() >= 1) {
1498     setRange(new (alloc) Range(0, lhs.upper(), Range::IncludesFractionalParts,
1499                                Range::IncludesNegativeZero, lhs.exponent()));
1500   } else if (unsigned_ && rhs.lower() >= 1) {
1501     // We shouldn't set the unsigned flag if the inputs can have
1502     // fractional parts.
1503     MOZ_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart());
1504     // We shouldn't set the unsigned flag if the inputs can be
1505     // negative zero.
1506     MOZ_ASSERT(!lhs.canBeNegativeZero() && !rhs.canBeNegativeZero());
1507     // Unsigned division by a non-zero rhs will return a uint32 value.
1508     setRange(Range::NewUInt32Range(alloc, 0, UINT32_MAX));
1509   }
1510 }
1511 
computeRange(TempAllocator & alloc)1512 void MSqrt::computeRange(TempAllocator& alloc) {
1513   Range input(getOperand(0));
1514 
1515   // If either operand is a NaN, the result is NaN. This also conservatively
1516   // handles Infinity cases.
1517   if (!input.hasInt32Bounds()) return;
1518 
1519   // Sqrt of a negative non-zero value is NaN.
1520   if (input.lower() < 0) return;
1521 
1522   // Something simple for now: When taking the sqrt of a positive value, the
1523   // result won't be further from zero than the input.
1524   // And, sqrt of an integer may have a fractional part.
1525   setRange(new (alloc) Range(0, input.upper(), Range::IncludesFractionalParts,
1526                              input.canBeNegativeZero(), input.exponent()));
1527 }
1528 
computeRange(TempAllocator & alloc)1529 void MToDouble::computeRange(TempAllocator& alloc) {
1530   setRange(new (alloc) Range(getOperand(0)));
1531 }
1532 
computeRange(TempAllocator & alloc)1533 void MToFloat32::computeRange(TempAllocator& alloc) {}
1534 
computeRange(TempAllocator & alloc)1535 void MTruncateToInt32::computeRange(TempAllocator& alloc) {
1536   Range* output = new (alloc) Range(getOperand(0));
1537   output->wrapAroundToInt32();
1538   setRange(output);
1539 }
1540 
computeRange(TempAllocator & alloc)1541 void MToNumberInt32::computeRange(TempAllocator& alloc) {
1542   // No clamping since this computes the range *before* bailouts.
1543   setRange(new (alloc) Range(getOperand(0)));
1544 }
1545 
computeRange(TempAllocator & alloc)1546 void MLimitedTruncate::computeRange(TempAllocator& alloc) {
1547   Range* output = new (alloc) Range(input());
1548   setRange(output);
1549 }
1550 
computeRange(TempAllocator & alloc)1551 void MFilterTypeSet::computeRange(TempAllocator& alloc) {
1552   setRange(new (alloc) Range(getOperand(0)));
1553 }
1554 
GetTypedArrayRange(TempAllocator & alloc,Scalar::Type type)1555 static Range* GetTypedArrayRange(TempAllocator& alloc, Scalar::Type type) {
1556   switch (type) {
1557     case Scalar::Uint8Clamped:
1558     case Scalar::Uint8:
1559       return Range::NewUInt32Range(alloc, 0, UINT8_MAX);
1560     case Scalar::Uint16:
1561       return Range::NewUInt32Range(alloc, 0, UINT16_MAX);
1562     case Scalar::Uint32:
1563       return Range::NewUInt32Range(alloc, 0, UINT32_MAX);
1564 
1565     case Scalar::Int8:
1566       return Range::NewInt32Range(alloc, INT8_MIN, INT8_MAX);
1567     case Scalar::Int16:
1568       return Range::NewInt32Range(alloc, INT16_MIN, INT16_MAX);
1569     case Scalar::Int32:
1570       return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
1571 
1572     case Scalar::Int64:
1573     case Scalar::Float32:
1574     case Scalar::Float64:
1575     case Scalar::Float32x4:
1576     case Scalar::Int8x16:
1577     case Scalar::Int16x8:
1578     case Scalar::Int32x4:
1579     case Scalar::MaxTypedArrayViewType:
1580       break;
1581   }
1582   return nullptr;
1583 }
1584 
computeRange(TempAllocator & alloc)1585 void MLoadUnboxedScalar::computeRange(TempAllocator& alloc) {
1586   // We have an Int32 type and if this is a UInt32 load it may produce a value
1587   // outside of our range, but we have a bailout to handle those cases.
1588   setRange(GetTypedArrayRange(alloc, readType()));
1589 }
1590 
computeRange(TempAllocator & alloc)1591 void MArrayLength::computeRange(TempAllocator& alloc) {
1592   // Array lengths can go up to UINT32_MAX, but we only create MArrayLength
1593   // nodes when the value is known to be int32 (see the
1594   // OBJECT_FLAG_LENGTH_OVERFLOW flag).
1595   setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
1596 }
1597 
computeRange(TempAllocator & alloc)1598 void MInitializedLength::computeRange(TempAllocator& alloc) {
1599   setRange(
1600       Range::NewUInt32Range(alloc, 0, NativeObject::MAX_DENSE_ELEMENTS_COUNT));
1601 }
1602 
computeRange(TempAllocator & alloc)1603 void MTypedArrayLength::computeRange(TempAllocator& alloc) {
1604   setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
1605 }
1606 
computeRange(TempAllocator & alloc)1607 void MStringLength::computeRange(TempAllocator& alloc) {
1608   static_assert(JSString::MAX_LENGTH <= UINT32_MAX,
1609                 "NewUInt32Range requires a uint32 value");
1610   setRange(Range::NewUInt32Range(alloc, 0, JSString::MAX_LENGTH));
1611 }
1612 
computeRange(TempAllocator & alloc)1613 void MArgumentsLength::computeRange(TempAllocator& alloc) {
1614   // This is is a conservative upper bound on what |TooManyActualArguments|
1615   // checks.  If exceeded, Ion will not be entered in the first place.
1616   static_assert(ARGS_LENGTH_MAX <= UINT32_MAX,
1617                 "NewUInt32Range requires a uint32 value");
1618   setRange(Range::NewUInt32Range(alloc, 0, ARGS_LENGTH_MAX));
1619 }
1620 
computeRange(TempAllocator & alloc)1621 void MBoundsCheck::computeRange(TempAllocator& alloc) {
1622   // Just transfer the incoming index range to the output. The length() is
1623   // also interesting, but it is handled as a bailout check, and we're
1624   // computing a pre-bailout range here.
1625   setRange(new (alloc) Range(index()));
1626 }
1627 
computeRange(TempAllocator & alloc)1628 void MSpectreMaskIndex::computeRange(TempAllocator& alloc) {
1629   // Just transfer the incoming index range to the output for now.
1630   setRange(new (alloc) Range(index()));
1631 }
1632 
computeRange(TempAllocator & alloc)1633 void MArrayPush::computeRange(TempAllocator& alloc) {
1634   // MArrayPush returns the new array length.
1635   setRange(Range::NewUInt32Range(alloc, 0, UINT32_MAX));
1636 }
1637 
computeRange(TempAllocator & alloc)1638 void MMathFunction::computeRange(TempAllocator& alloc) {
1639   Range opRange(getOperand(0));
1640   switch (function()) {
1641     case Sin:
1642     case Cos:
1643       if (!opRange.canBeInfiniteOrNaN())
1644         setRange(Range::NewDoubleRange(alloc, -1.0, 1.0));
1645       break;
1646     case Sign:
1647       setRange(Range::sign(alloc, &opRange));
1648       break;
1649     default:
1650       break;
1651   }
1652 }
1653 
computeRange(TempAllocator & alloc)1654 void MRandom::computeRange(TempAllocator& alloc) {
1655   Range* r = Range::NewDoubleRange(alloc, 0.0, 1.0);
1656 
1657   // Random never returns negative zero.
1658   r->refineToExcludeNegativeZero();
1659 
1660   setRange(r);
1661 }
1662 
computeRange(TempAllocator & alloc)1663 void MNaNToZero::computeRange(TempAllocator& alloc) {
1664   Range other(input());
1665   setRange(Range::NaNToZero(alloc, &other));
1666 }
1667 
1668 ///////////////////////////////////////////////////////////////////////////////
1669 // Range Analysis
1670 ///////////////////////////////////////////////////////////////////////////////
1671 
analyzeLoop(MBasicBlock * header)1672 bool RangeAnalysis::analyzeLoop(MBasicBlock* header) {
1673   MOZ_ASSERT(header->hasUniqueBackedge());
1674 
1675   // Try to compute an upper bound on the number of times the loop backedge
1676   // will be taken. Look for tests that dominate the backedge and which have
1677   // an edge leaving the loop body.
1678   MBasicBlock* backedge = header->backedge();
1679 
1680   // Ignore trivial infinite loops.
1681   if (backedge == header) return true;
1682 
1683   bool canOsr;
1684   size_t numBlocks = MarkLoopBlocks(graph_, header, &canOsr);
1685 
1686   // Ignore broken loops.
1687   if (numBlocks == 0) return true;
1688 
1689   LoopIterationBound* iterationBound = nullptr;
1690 
1691   MBasicBlock* block = backedge;
1692   do {
1693     BranchDirection direction;
1694     MTest* branch = block->immediateDominatorBranch(&direction);
1695 
1696     if (block == block->immediateDominator()) break;
1697 
1698     block = block->immediateDominator();
1699 
1700     if (branch) {
1701       direction = NegateBranchDirection(direction);
1702       MBasicBlock* otherBlock = branch->branchSuccessor(direction);
1703       if (!otherBlock->isMarked()) {
1704         if (!alloc().ensureBallast()) return false;
1705         iterationBound = analyzeLoopIterationCount(header, branch, direction);
1706         if (iterationBound) break;
1707       }
1708     }
1709   } while (block != header);
1710 
1711   if (!iterationBound) {
1712     UnmarkLoopBlocks(graph_, header);
1713     return true;
1714   }
1715 
1716   if (!loopIterationBounds.append(iterationBound)) return false;
1717 
1718 #ifdef DEBUG
1719   if (JitSpewEnabled(JitSpew_Range)) {
1720     Sprinter sp(GetJitContext()->cx);
1721     if (!sp.init()) return false;
1722     iterationBound->boundSum.dump(sp);
1723     JitSpew(JitSpew_Range, "computed symbolic bound on backedges: %s",
1724             sp.string());
1725   }
1726 #endif
1727 
1728   // Try to compute symbolic bounds for the phi nodes at the head of this
1729   // loop, expressed in terms of the iteration bound just computed.
1730 
1731   for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd();
1732        iter++)
1733     analyzeLoopPhi(iterationBound, *iter);
1734 
1735   if (!mir->compilingWasm()) {
1736     // Try to hoist any bounds checks from the loop using symbolic bounds.
1737 
1738     Vector<MBoundsCheck*, 0, JitAllocPolicy> hoistedChecks(alloc());
1739 
1740     for (ReversePostorderIterator iter(graph_.rpoBegin(header));
1741          iter != graph_.rpoEnd(); iter++) {
1742       MBasicBlock* block = *iter;
1743       if (!block->isMarked()) continue;
1744 
1745       for (MDefinitionIterator iter(block); iter; iter++) {
1746         MDefinition* def = *iter;
1747         if (def->isBoundsCheck() && def->isMovable()) {
1748           if (!alloc().ensureBallast()) return false;
1749           if (tryHoistBoundsCheck(header, def->toBoundsCheck())) {
1750             if (!hoistedChecks.append(def->toBoundsCheck())) return false;
1751           }
1752         }
1753       }
1754     }
1755 
1756     // Note: replace all uses of the original bounds check with the
1757     // actual index. This is usually done during bounds check elimination,
1758     // but in this case it's safe to do it here since the load/store is
1759     // definitely not loop-invariant, so we will never move it before
1760     // one of the bounds checks we just added.
1761     for (size_t i = 0; i < hoistedChecks.length(); i++) {
1762       MBoundsCheck* ins = hoistedChecks[i];
1763       ins->replaceAllUsesWith(ins->index());
1764       ins->block()->discard(ins);
1765     }
1766   }
1767 
1768   UnmarkLoopBlocks(graph_, header);
1769   return true;
1770 }
1771 
1772 // Unbox beta nodes in order to hoist instruction properly, and not be limited
1773 // by the beta nodes which are added after each branch.
DefinitionOrBetaInputDefinition(MDefinition * ins)1774 static inline MDefinition* DefinitionOrBetaInputDefinition(MDefinition* ins) {
1775   while (ins->isBeta()) ins = ins->toBeta()->input();
1776   return ins;
1777 }
1778 
analyzeLoopIterationCount(MBasicBlock * header,MTest * test,BranchDirection direction)1779 LoopIterationBound* RangeAnalysis::analyzeLoopIterationCount(
1780     MBasicBlock* header, MTest* test, BranchDirection direction) {
1781   SimpleLinearSum lhs(nullptr, 0);
1782   MDefinition* rhs;
1783   bool lessEqual;
1784   if (!ExtractLinearInequality(test, direction, &lhs, &rhs, &lessEqual))
1785     return nullptr;
1786 
1787   // Ensure the rhs is a loop invariant term.
1788   if (rhs && rhs->block()->isMarked()) {
1789     if (lhs.term && lhs.term->block()->isMarked()) return nullptr;
1790     MDefinition* temp = lhs.term;
1791     lhs.term = rhs;
1792     rhs = temp;
1793     if (!SafeSub(0, lhs.constant, &lhs.constant)) return nullptr;
1794     lessEqual = !lessEqual;
1795   }
1796 
1797   MOZ_ASSERT_IF(rhs, !rhs->block()->isMarked());
1798 
1799   // Ensure the lhs is a phi node from the start of the loop body.
1800   if (!lhs.term || !lhs.term->isPhi() || lhs.term->block() != header)
1801     return nullptr;
1802 
1803   // Check that the value of the lhs changes by a constant amount with each
1804   // loop iteration. This requires that the lhs be written in every loop
1805   // iteration with a value that is a constant difference from its value at
1806   // the start of the iteration.
1807 
1808   if (lhs.term->toPhi()->numOperands() != 2) return nullptr;
1809 
1810   // The first operand of the phi should be the lhs' value at the start of
1811   // the first executed iteration, and not a value written which could
1812   // replace the second operand below during the middle of execution.
1813   MDefinition* lhsInitial = lhs.term->toPhi()->getLoopPredecessorOperand();
1814   if (lhsInitial->block()->isMarked()) return nullptr;
1815 
1816   // The second operand of the phi should be a value written by an add/sub
1817   // in every loop iteration, i.e. in a block which dominates the backedge.
1818   MDefinition* lhsWrite = DefinitionOrBetaInputDefinition(
1819       lhs.term->toPhi()->getLoopBackedgeOperand());
1820   if (!lhsWrite->isAdd() && !lhsWrite->isSub()) return nullptr;
1821   if (!lhsWrite->block()->isMarked()) return nullptr;
1822   MBasicBlock* bb = header->backedge();
1823   for (; bb != lhsWrite->block() && bb != header;
1824        bb = bb->immediateDominator()) {
1825   }
1826   if (bb != lhsWrite->block()) return nullptr;
1827 
1828   SimpleLinearSum lhsModified = ExtractLinearSum(lhsWrite);
1829 
1830   // Check that the value of the lhs at the backedge is of the form
1831   // 'old(lhs) + N'. We can be sure that old(lhs) is the value at the start
1832   // of the iteration, and not that written to lhs in a previous iteration,
1833   // as such a previous value could not appear directly in the addition:
1834   // it could not be stored in lhs as the lhs add/sub executes in every
1835   // iteration, and if it were stored in another variable its use here would
1836   // be as an operand to a phi node for that variable.
1837   if (lhsModified.term != lhs.term) return nullptr;
1838 
1839   LinearSum iterationBound(alloc());
1840   LinearSum currentIteration(alloc());
1841 
1842   if (lhsModified.constant == 1 && !lessEqual) {
1843     // The value of lhs is 'initial(lhs) + iterCount' and this will end
1844     // execution of the loop if 'lhs + lhsN >= rhs'. Thus, an upper bound
1845     // on the number of backedges executed is:
1846     //
1847     // initial(lhs) + iterCount + lhsN == rhs
1848     // iterCount == rhsN - initial(lhs) - lhsN
1849 
1850     if (rhs) {
1851       if (!iterationBound.add(rhs, 1)) return nullptr;
1852     }
1853     if (!iterationBound.add(lhsInitial, -1)) return nullptr;
1854 
1855     int32_t lhsConstant;
1856     if (!SafeSub(0, lhs.constant, &lhsConstant)) return nullptr;
1857     if (!iterationBound.add(lhsConstant)) return nullptr;
1858 
1859     if (!currentIteration.add(lhs.term, 1)) return nullptr;
1860     if (!currentIteration.add(lhsInitial, -1)) return nullptr;
1861   } else if (lhsModified.constant == -1 && lessEqual) {
1862     // The value of lhs is 'initial(lhs) - iterCount'. Similar to the above
1863     // case, an upper bound on the number of backedges executed is:
1864     //
1865     // initial(lhs) - iterCount + lhsN == rhs
1866     // iterCount == initial(lhs) - rhs + lhsN
1867 
1868     if (!iterationBound.add(lhsInitial, 1)) return nullptr;
1869     if (rhs) {
1870       if (!iterationBound.add(rhs, -1)) return nullptr;
1871     }
1872     if (!iterationBound.add(lhs.constant)) return nullptr;
1873 
1874     if (!currentIteration.add(lhsInitial, 1)) return nullptr;
1875     if (!currentIteration.add(lhs.term, -1)) return nullptr;
1876   } else {
1877     return nullptr;
1878   }
1879 
1880   return new (alloc())
1881       LoopIterationBound(header, test, iterationBound, currentIteration);
1882 }
1883 
analyzeLoopPhi(LoopIterationBound * loopBound,MPhi * phi)1884 void RangeAnalysis::analyzeLoopPhi(LoopIterationBound* loopBound, MPhi* phi) {
1885   // Given a bound on the number of backedges taken, compute an upper and
1886   // lower bound for a phi node that may change by a constant amount each
1887   // iteration. Unlike for the case when computing the iteration bound
1888   // itself, the phi does not need to change the same amount every iteration,
1889   // but is required to change at most N and be either nondecreasing or
1890   // nonincreasing.
1891 
1892   MOZ_ASSERT(phi->numOperands() == 2);
1893 
1894   MDefinition* initial = phi->getLoopPredecessorOperand();
1895   if (initial->block()->isMarked()) return;
1896 
1897   SimpleLinearSum modified =
1898       ExtractLinearSum(phi->getLoopBackedgeOperand(), MathSpace::Infinite);
1899 
1900   if (modified.term != phi || modified.constant == 0) return;
1901 
1902   if (!phi->range()) phi->setRange(new (alloc()) Range(phi));
1903 
1904   LinearSum initialSum(alloc());
1905   if (!initialSum.add(initial, 1)) return;
1906 
1907   // The phi may change by N each iteration, and is either nondecreasing or
1908   // nonincreasing. initial(phi) is either a lower or upper bound for the
1909   // phi, and initial(phi) + loopBound * N is either an upper or lower bound,
1910   // at all points within the loop, provided that loopBound >= 0.
1911   //
1912   // We are more interested, however, in the bound for phi at points
1913   // dominated by the loop bound's test; if the test dominates e.g. a bounds
1914   // check we want to hoist from the loop, using the value of the phi at the
1915   // head of the loop for this will usually be too imprecise to hoist the
1916   // check. These points will execute only if the backedge executes at least
1917   // one more time (as the test passed and the test dominates the backedge),
1918   // so we know both that loopBound >= 1 and that the phi's value has changed
1919   // at most loopBound - 1 times. Thus, another upper or lower bound for the
1920   // phi is initial(phi) + (loopBound - 1) * N, without requiring us to
1921   // ensure that loopBound >= 0.
1922 
1923   LinearSum limitSum(loopBound->boundSum);
1924   if (!limitSum.multiply(modified.constant) || !limitSum.add(initialSum))
1925     return;
1926 
1927   int32_t negativeConstant;
1928   if (!SafeSub(0, modified.constant, &negativeConstant) ||
1929       !limitSum.add(negativeConstant))
1930     return;
1931 
1932   Range* initRange = initial->range();
1933   if (modified.constant > 0) {
1934     if (initRange && initRange->hasInt32LowerBound())
1935       phi->range()->refineLower(initRange->lower());
1936     phi->range()->setSymbolicLower(
1937         SymbolicBound::New(alloc(), nullptr, initialSum));
1938     phi->range()->setSymbolicUpper(
1939         SymbolicBound::New(alloc(), loopBound, limitSum));
1940   } else {
1941     if (initRange && initRange->hasInt32UpperBound())
1942       phi->range()->refineUpper(initRange->upper());
1943     phi->range()->setSymbolicUpper(
1944         SymbolicBound::New(alloc(), nullptr, initialSum));
1945     phi->range()->setSymbolicLower(
1946         SymbolicBound::New(alloc(), loopBound, limitSum));
1947   }
1948 
1949   JitSpew(JitSpew_Range, "added symbolic range on %d", phi->id());
1950   SpewRange(phi);
1951 }
1952 
1953 // Whether bound is valid at the specified bounds check instruction in a loop,
1954 // and may be used to hoist ins.
SymbolicBoundIsValid(MBasicBlock * header,MBoundsCheck * ins,const SymbolicBound * bound)1955 static inline bool SymbolicBoundIsValid(MBasicBlock* header, MBoundsCheck* ins,
1956                                         const SymbolicBound* bound) {
1957   if (!bound->loop) return true;
1958   if (ins->block() == header) return false;
1959   MBasicBlock* bb = ins->block()->immediateDominator();
1960   while (bb != header && bb != bound->loop->test->block())
1961     bb = bb->immediateDominator();
1962   return bb == bound->loop->test->block();
1963 }
1964 
tryHoistBoundsCheck(MBasicBlock * header,MBoundsCheck * ins)1965 bool RangeAnalysis::tryHoistBoundsCheck(MBasicBlock* header,
1966                                         MBoundsCheck* ins) {
1967   // The bounds check's length must be loop invariant.
1968   MDefinition* length = DefinitionOrBetaInputDefinition(ins->length());
1969   if (length->block()->isMarked()) return false;
1970 
1971   // The bounds check's index should not be loop invariant (else we would
1972   // already have hoisted it during LICM).
1973   SimpleLinearSum index = ExtractLinearSum(ins->index());
1974   if (!index.term || !index.term->block()->isMarked()) return false;
1975 
1976   // Check for a symbolic lower and upper bound on the index. If either
1977   // condition depends on an iteration bound for the loop, only hoist if
1978   // the bounds check is dominated by the iteration bound's test.
1979   if (!index.term->range()) return false;
1980   const SymbolicBound* lower = index.term->range()->symbolicLower();
1981   if (!lower || !SymbolicBoundIsValid(header, ins, lower)) return false;
1982   const SymbolicBound* upper = index.term->range()->symbolicUpper();
1983   if (!upper || !SymbolicBoundIsValid(header, ins, upper)) return false;
1984 
1985   MBasicBlock* preLoop = header->loopPredecessor();
1986   MOZ_ASSERT(!preLoop->isMarked());
1987 
1988   MDefinition* lowerTerm = ConvertLinearSum(alloc(), preLoop, lower->sum);
1989   if (!lowerTerm) return false;
1990 
1991   MDefinition* upperTerm = ConvertLinearSum(alloc(), preLoop, upper->sum);
1992   if (!upperTerm) return false;
1993 
1994   // We are checking that index + indexConstant >= 0, and know that
1995   // index >= lowerTerm + lowerConstant. Thus, check that:
1996   //
1997   // lowerTerm + lowerConstant + indexConstant >= 0
1998   // lowerTerm >= -lowerConstant - indexConstant
1999 
2000   int32_t lowerConstant = 0;
2001   if (!SafeSub(lowerConstant, index.constant, &lowerConstant)) return false;
2002   if (!SafeSub(lowerConstant, lower->sum.constant(), &lowerConstant))
2003     return false;
2004 
2005   // We are checking that index < boundsLength, and know that
2006   // index <= upperTerm + upperConstant. Thus, check that:
2007   //
2008   // upperTerm + upperConstant < boundsLength
2009 
2010   int32_t upperConstant = index.constant;
2011   if (!SafeAdd(upper->sum.constant(), upperConstant, &upperConstant))
2012     return false;
2013 
2014   // Hoist the loop invariant lower bounds checks.
2015   MBoundsCheckLower* lowerCheck = MBoundsCheckLower::New(alloc(), lowerTerm);
2016   lowerCheck->setMinimum(lowerConstant);
2017   lowerCheck->computeRange(alloc());
2018   lowerCheck->collectRangeInfoPreTrunc();
2019   preLoop->insertBefore(preLoop->lastIns(), lowerCheck);
2020 
2021   // Hoist the loop invariant upper bounds checks.
2022   if (upperTerm != length || upperConstant >= 0) {
2023     MBoundsCheck* upperCheck = MBoundsCheck::New(alloc(), upperTerm, length);
2024     upperCheck->setMinimum(upperConstant);
2025     upperCheck->setMaximum(upperConstant);
2026     upperCheck->computeRange(alloc());
2027     upperCheck->collectRangeInfoPreTrunc();
2028     preLoop->insertBefore(preLoop->lastIns(), upperCheck);
2029   }
2030 
2031   return true;
2032 }
2033 
analyze()2034 bool RangeAnalysis::analyze() {
2035   JitSpew(JitSpew_Range, "Doing range propagation");
2036 
2037   for (ReversePostorderIterator iter(graph_.rpoBegin());
2038        iter != graph_.rpoEnd(); iter++) {
2039     MBasicBlock* block = *iter;
2040     // No blocks are supposed to be unreachable, except when we have an OSR
2041     // block, in which case the Value Numbering phase add fixup blocks which
2042     // are unreachable.
2043     MOZ_ASSERT(!block->unreachable() || graph_.osrBlock());
2044 
2045     // If the block's immediate dominator is unreachable, the block is
2046     // unreachable. Iterating in RPO, we'll always see the immediate
2047     // dominator before the block.
2048     if (block->immediateDominator()->unreachable()) {
2049       block->setUnreachableUnchecked();
2050       continue;
2051     }
2052 
2053     for (MDefinitionIterator iter(block); iter; iter++) {
2054       MDefinition* def = *iter;
2055       if (!alloc().ensureBallast()) return false;
2056 
2057       def->computeRange(alloc());
2058       JitSpew(JitSpew_Range, "computing range on %d", def->id());
2059       SpewRange(def);
2060     }
2061 
2062     // Beta node range analysis may have marked this block unreachable. If
2063     // so, it's no longer interesting to continue processing it.
2064     if (block->unreachable()) continue;
2065 
2066     if (block->isLoopHeader()) {
2067       if (!analyzeLoop(block)) return false;
2068     }
2069 
2070     // First pass at collecting range info - while the beta nodes are still
2071     // around and before truncation.
2072     for (MInstructionIterator iter(block->begin()); iter != block->end();
2073          iter++)
2074       iter->collectRangeInfoPreTrunc();
2075   }
2076 
2077   return true;
2078 }
2079 
addRangeAssertions()2080 bool RangeAnalysis::addRangeAssertions() {
2081   if (!JitOptions.checkRangeAnalysis) return true;
2082 
2083   // Check the computed range for this instruction, if the option is set. Note
2084   // that this code is quite invasive; it adds numerous additional
2085   // instructions for each MInstruction with a computed range, and it uses
2086   // registers, so it also affects register allocation.
2087   for (ReversePostorderIterator iter(graph_.rpoBegin());
2088        iter != graph_.rpoEnd(); iter++) {
2089     MBasicBlock* block = *iter;
2090 
2091     // Do not add assertions in unreachable blocks.
2092     if (block->unreachable()) continue;
2093 
2094     for (MDefinitionIterator iter(block); iter; iter++) {
2095       MDefinition* ins = *iter;
2096 
2097       // Perform range checking for all numeric and numeric-like types.
2098       if (!IsNumberType(ins->type()) && ins->type() != MIRType::Boolean &&
2099           ins->type() != MIRType::Value) {
2100         continue;
2101       }
2102 
2103       // MIsNoIter is fused with the MTest that follows it and emitted as
2104       // LIsNoIterAndBranch. Skip it to avoid complicating MIsNoIter
2105       // lowering.
2106       if (ins->isIsNoIter()) continue;
2107 
2108       Range r(ins);
2109 
2110       MOZ_ASSERT_IF(ins->type() == MIRType::Int64, r.isUnknown());
2111 
2112       // Don't insert assertions if there's nothing interesting to assert.
2113       if (r.isUnknown() ||
2114           (ins->type() == MIRType::Int32 && r.isUnknownInt32()))
2115         continue;
2116 
2117       // Don't add a use to an instruction that is recovered on bailout.
2118       if (ins->isRecoveredOnBailout()) continue;
2119 
2120       if (!alloc().ensureBallast()) return false;
2121       MAssertRange* guard =
2122           MAssertRange::New(alloc(), ins, new (alloc()) Range(r));
2123 
2124       // Beta nodes and interrupt checks are required to be located at the
2125       // beginnings of basic blocks, so we must insert range assertions
2126       // after any such instructions.
2127       MInstruction* insertAt = nullptr;
2128       if (block->graph().osrBlock() == block)
2129         insertAt = ins->toInstruction();
2130       else
2131         insertAt = block->safeInsertTop(ins);
2132 
2133       if (insertAt == *iter)
2134         block->insertAfter(insertAt, guard);
2135       else
2136         block->insertBefore(insertAt, guard);
2137     }
2138   }
2139 
2140   return true;
2141 }
2142 
2143 ///////////////////////////////////////////////////////////////////////////////
2144 // Range based Truncation
2145 ///////////////////////////////////////////////////////////////////////////////
2146 
clampToInt32()2147 void Range::clampToInt32() {
2148   if (isInt32()) return;
2149   int32_t l = hasInt32LowerBound() ? lower() : JSVAL_INT_MIN;
2150   int32_t h = hasInt32UpperBound() ? upper() : JSVAL_INT_MAX;
2151   setInt32(l, h);
2152 }
2153 
wrapAroundToInt32()2154 void Range::wrapAroundToInt32() {
2155   if (!hasInt32Bounds()) {
2156     setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX);
2157   } else if (canHaveFractionalPart()) {
2158     // Clearing the fractional field may provide an opportunity to refine
2159     // lower_ or upper_.
2160     canHaveFractionalPart_ = ExcludesFractionalParts;
2161     canBeNegativeZero_ = ExcludesNegativeZero;
2162     refineInt32BoundsByExponent(max_exponent_, &lower_, &hasInt32LowerBound_,
2163                                 &upper_, &hasInt32UpperBound_);
2164 
2165     assertInvariants();
2166   } else {
2167     // If nothing else, we can clear the negative zero flag.
2168     canBeNegativeZero_ = ExcludesNegativeZero;
2169   }
2170   MOZ_ASSERT(isInt32());
2171 }
2172 
wrapAroundToShiftCount()2173 void Range::wrapAroundToShiftCount() {
2174   wrapAroundToInt32();
2175   if (lower() < 0 || upper() >= 32) setInt32(0, 31);
2176 }
2177 
wrapAroundToBoolean()2178 void Range::wrapAroundToBoolean() {
2179   wrapAroundToInt32();
2180   if (!isBoolean()) setInt32(0, 1);
2181   MOZ_ASSERT(isBoolean());
2182 }
2183 
needTruncation(TruncateKind kind)2184 bool MDefinition::needTruncation(TruncateKind kind) {
2185   // No procedure defined for truncating this instruction.
2186   return false;
2187 }
2188 
truncate()2189 void MDefinition::truncate() {
2190   MOZ_CRASH("No procedure defined for truncating this instruction.");
2191 }
2192 
needTruncation(TruncateKind kind)2193 bool MConstant::needTruncation(TruncateKind kind) {
2194   return IsFloatingPointType(type());
2195 }
2196 
truncate()2197 void MConstant::truncate() {
2198   MOZ_ASSERT(needTruncation(Truncate));
2199 
2200   // Truncate the double to int, since all uses truncates it.
2201   int32_t res = ToInt32(numberToDouble());
2202   payload_.asBits = 0;
2203   payload_.i32 = res;
2204   setResultType(MIRType::Int32);
2205   if (range()) range()->setInt32(res, res);
2206 }
2207 
needTruncation(TruncateKind kind)2208 bool MPhi::needTruncation(TruncateKind kind) {
2209   if (type() == MIRType::Double || type() == MIRType::Int32) {
2210     truncateKind_ = kind;
2211     return true;
2212   }
2213 
2214   return false;
2215 }
2216 
truncate()2217 void MPhi::truncate() {
2218   setResultType(MIRType::Int32);
2219   if (truncateKind_ >= IndirectTruncate && range())
2220     range()->wrapAroundToInt32();
2221 }
2222 
needTruncation(TruncateKind kind)2223 bool MAdd::needTruncation(TruncateKind kind) {
2224   // Remember analysis, needed for fallible checks.
2225   setTruncateKind(kind);
2226 
2227   return type() == MIRType::Double || type() == MIRType::Int32;
2228 }
2229 
truncate()2230 void MAdd::truncate() {
2231   MOZ_ASSERT(needTruncation(truncateKind()));
2232   specialization_ = MIRType::Int32;
2233   setResultType(MIRType::Int32);
2234   if (truncateKind() >= IndirectTruncate && range())
2235     range()->wrapAroundToInt32();
2236 }
2237 
needTruncation(TruncateKind kind)2238 bool MSub::needTruncation(TruncateKind kind) {
2239   // Remember analysis, needed for fallible checks.
2240   setTruncateKind(kind);
2241 
2242   return type() == MIRType::Double || type() == MIRType::Int32;
2243 }
2244 
truncate()2245 void MSub::truncate() {
2246   MOZ_ASSERT(needTruncation(truncateKind()));
2247   specialization_ = MIRType::Int32;
2248   setResultType(MIRType::Int32);
2249   if (truncateKind() >= IndirectTruncate && range())
2250     range()->wrapAroundToInt32();
2251 }
2252 
needTruncation(TruncateKind kind)2253 bool MMul::needTruncation(TruncateKind kind) {
2254   // Remember analysis, needed for fallible checks.
2255   setTruncateKind(kind);
2256 
2257   return type() == MIRType::Double || type() == MIRType::Int32;
2258 }
2259 
truncate()2260 void MMul::truncate() {
2261   MOZ_ASSERT(needTruncation(truncateKind()));
2262   specialization_ = MIRType::Int32;
2263   setResultType(MIRType::Int32);
2264   if (truncateKind() >= IndirectTruncate) {
2265     setCanBeNegativeZero(false);
2266     if (range()) range()->wrapAroundToInt32();
2267   }
2268 }
2269 
needTruncation(TruncateKind kind)2270 bool MDiv::needTruncation(TruncateKind kind) {
2271   // Remember analysis, needed for fallible checks.
2272   setTruncateKind(kind);
2273 
2274   return type() == MIRType::Double || type() == MIRType::Int32;
2275 }
2276 
truncate()2277 void MDiv::truncate() {
2278   MOZ_ASSERT(needTruncation(truncateKind()));
2279   specialization_ = MIRType::Int32;
2280   setResultType(MIRType::Int32);
2281 
2282   // Divisions where the lhs and rhs are unsigned and the result is
2283   // truncated can be lowered more efficiently.
2284   if (unsignedOperands()) {
2285     replaceWithUnsignedOperands();
2286     unsigned_ = true;
2287   }
2288 }
2289 
needTruncation(TruncateKind kind)2290 bool MMod::needTruncation(TruncateKind kind) {
2291   // Remember analysis, needed for fallible checks.
2292   setTruncateKind(kind);
2293 
2294   return type() == MIRType::Double || type() == MIRType::Int32;
2295 }
2296 
truncate()2297 void MMod::truncate() {
2298   // As for division, handle unsigned modulus with a truncated result.
2299   MOZ_ASSERT(needTruncation(truncateKind()));
2300   specialization_ = MIRType::Int32;
2301   setResultType(MIRType::Int32);
2302 
2303   if (unsignedOperands()) {
2304     replaceWithUnsignedOperands();
2305     unsigned_ = true;
2306   }
2307 }
2308 
needTruncation(TruncateKind kind)2309 bool MToDouble::needTruncation(TruncateKind kind) {
2310   MOZ_ASSERT(type() == MIRType::Double);
2311   setTruncateKind(kind);
2312 
2313   return true;
2314 }
2315 
truncate()2316 void MToDouble::truncate() {
2317   MOZ_ASSERT(needTruncation(truncateKind()));
2318 
2319   // We use the return type to flag that this MToDouble should be replaced by
2320   // a MTruncateToInt32 when modifying the graph.
2321   setResultType(MIRType::Int32);
2322   if (truncateKind() >= IndirectTruncate) {
2323     if (range()) range()->wrapAroundToInt32();
2324   }
2325 }
2326 
needTruncation(TruncateKind kind)2327 bool MLimitedTruncate::needTruncation(TruncateKind kind) {
2328   setTruncateKind(kind);
2329   setResultType(MIRType::Int32);
2330   if (kind >= IndirectTruncate && range()) range()->wrapAroundToInt32();
2331   return false;
2332 }
2333 
needTruncation(TruncateKind kind)2334 bool MCompare::needTruncation(TruncateKind kind) {
2335   // If we're compiling wasm, don't try to optimize the comparison type, as
2336   // the code presumably is already using the type it wants. Also, wasm
2337   // doesn't support bailouts, so we woudn't be able to rely on
2338   // TruncateAfterBailouts to convert our inputs.
2339   if (block()->info().compilingWasm()) return false;
2340 
2341   if (!isDoubleComparison()) return false;
2342 
2343   // If both operands are naturally in the int32 range, we can convert from
2344   // a double comparison to being an int32 comparison.
2345   if (!Range(lhs()).isInt32() || !Range(rhs()).isInt32()) return false;
2346 
2347   return true;
2348 }
2349 
truncate()2350 void MCompare::truncate() {
2351   compareType_ = Compare_Int32;
2352 
2353   // Truncating the operands won't change their value because we don't force a
2354   // truncation, but it will change their type, which we need because we
2355   // now expect integer inputs.
2356   truncateOperands_ = true;
2357 }
2358 
operandTruncateKind(size_t index) const2359 MDefinition::TruncateKind MDefinition::operandTruncateKind(size_t index) const {
2360   // Generic routine: We don't know anything.
2361   return NoTruncate;
2362 }
2363 
operandTruncateKind(size_t index) const2364 MDefinition::TruncateKind MPhi::operandTruncateKind(size_t index) const {
2365   // The truncation applied to a phi is effectively applied to the phi's
2366   // operands.
2367   return truncateKind_;
2368 }
2369 
operandTruncateKind(size_t index) const2370 MDefinition::TruncateKind MTruncateToInt32::operandTruncateKind(
2371     size_t index) const {
2372   // This operator is an explicit truncate to int32.
2373   return Truncate;
2374 }
2375 
operandTruncateKind(size_t index) const2376 MDefinition::TruncateKind MBinaryBitwiseInstruction::operandTruncateKind(
2377     size_t index) const {
2378   // The bitwise operators truncate to int32.
2379   return Truncate;
2380 }
2381 
operandTruncateKind(size_t index) const2382 MDefinition::TruncateKind MLimitedTruncate::operandTruncateKind(
2383     size_t index) const {
2384   return Min(truncateKind(), truncateLimit_);
2385 }
2386 
operandTruncateKind(size_t index) const2387 MDefinition::TruncateKind MAdd::operandTruncateKind(size_t index) const {
2388   // This operator is doing some arithmetic. If its result is truncated,
2389   // it's an indirect truncate for its operands.
2390   return Min(truncateKind(), IndirectTruncate);
2391 }
2392 
operandTruncateKind(size_t index) const2393 MDefinition::TruncateKind MSub::operandTruncateKind(size_t index) const {
2394   // See the comment in MAdd::operandTruncateKind.
2395   return Min(truncateKind(), IndirectTruncate);
2396 }
2397 
operandTruncateKind(size_t index) const2398 MDefinition::TruncateKind MMul::operandTruncateKind(size_t index) const {
2399   // See the comment in MAdd::operandTruncateKind.
2400   return Min(truncateKind(), IndirectTruncate);
2401 }
2402 
operandTruncateKind(size_t index) const2403 MDefinition::TruncateKind MToDouble::operandTruncateKind(size_t index) const {
2404   // MToDouble propagates its truncate kind to its operand.
2405   return truncateKind();
2406 }
2407 
operandTruncateKind(size_t index) const2408 MDefinition::TruncateKind MStoreUnboxedScalar::operandTruncateKind(
2409     size_t index) const {
2410   // Some receiver objects, such as typed arrays, will truncate out of range
2411   // integer inputs.
2412   return (truncateInput() && index == 2 && isIntegerWrite()) ? Truncate
2413                                                              : NoTruncate;
2414 }
2415 
operandTruncateKind(size_t index) const2416 MDefinition::TruncateKind MStoreTypedArrayElementHole::operandTruncateKind(
2417     size_t index) const {
2418   // An integer store truncates the stored value.
2419   return index == 3 && isIntegerWrite() ? Truncate : NoTruncate;
2420 }
2421 
operandTruncateKind(size_t index) const2422 MDefinition::TruncateKind MDiv::operandTruncateKind(size_t index) const {
2423   return Min(truncateKind(), TruncateAfterBailouts);
2424 }
2425 
operandTruncateKind(size_t index) const2426 MDefinition::TruncateKind MMod::operandTruncateKind(size_t index) const {
2427   return Min(truncateKind(), TruncateAfterBailouts);
2428 }
2429 
operandTruncateKind(size_t index) const2430 MDefinition::TruncateKind MCompare::operandTruncateKind(size_t index) const {
2431   // If we're doing an int32 comparison on operands which were previously
2432   // floating-point, convert them!
2433   MOZ_ASSERT_IF(truncateOperands_, isInt32Comparison());
2434   return truncateOperands_ ? TruncateAfterBailouts : NoTruncate;
2435 }
2436 
TruncateTest(TempAllocator & alloc,MTest * test)2437 static bool TruncateTest(TempAllocator& alloc, MTest* test) {
2438   // If all possible inputs to the test are either int32 or boolean,
2439   // convert those inputs to int32 so that an int32 test can be performed.
2440 
2441   if (test->input()->type() != MIRType::Value) return true;
2442 
2443   if (!test->input()->isPhi() || !test->input()->hasOneDefUse() ||
2444       test->input()->isImplicitlyUsed())
2445     return true;
2446 
2447   MPhi* phi = test->input()->toPhi();
2448   for (size_t i = 0; i < phi->numOperands(); i++) {
2449     MDefinition* def = phi->getOperand(i);
2450     if (!def->isBox()) return true;
2451     MDefinition* inner = def->getOperand(0);
2452     if (inner->type() != MIRType::Boolean && inner->type() != MIRType::Int32)
2453       return true;
2454   }
2455 
2456   for (size_t i = 0; i < phi->numOperands(); i++) {
2457     MDefinition* inner = phi->getOperand(i)->getOperand(0);
2458     if (inner->type() != MIRType::Int32) {
2459       if (!alloc.ensureBallast()) return false;
2460       MBasicBlock* block = inner->block();
2461       inner = MToNumberInt32::New(alloc, inner);
2462       block->insertBefore(block->lastIns(), inner->toInstruction());
2463     }
2464     MOZ_ASSERT(inner->type() == MIRType::Int32);
2465     phi->replaceOperand(i, inner);
2466   }
2467 
2468   phi->setResultType(MIRType::Int32);
2469   return true;
2470 }
2471 
2472 // Truncating instruction result is an optimization which implies
2473 // knowing all uses of an instruction.  This implies that if one of
2474 // the uses got removed, then Range Analysis is not be allowed to do
2475 // any modification which can change the result, especially if the
2476 // result can be observed.
2477 //
2478 // This corner can easily be understood with UCE examples, but it
2479 // might also happen with type inference assumptions.  Note: Type
2480 // inference is implicitly branches where other types might be
2481 // flowing into.
CloneForDeadBranches(TempAllocator & alloc,MInstruction * candidate)2482 static bool CloneForDeadBranches(TempAllocator& alloc,
2483                                  MInstruction* candidate) {
2484   // Compare returns a boolean so it doesn't have to be recovered on bailout
2485   // because the output would remain correct.
2486   if (candidate->isCompare()) return true;
2487 
2488   MOZ_ASSERT(candidate->canClone());
2489   if (!alloc.ensureBallast()) return false;
2490 
2491   MDefinitionVector operands(alloc);
2492   size_t end = candidate->numOperands();
2493   if (!operands.reserve(end)) return false;
2494   for (size_t i = 0; i < end; ++i)
2495     operands.infallibleAppend(candidate->getOperand(i));
2496 
2497   MInstruction* clone = candidate->clone(alloc, operands);
2498   clone->setRange(nullptr);
2499 
2500   // Set UseRemoved flag on the cloned instruction in order to chain recover
2501   // instruction for the bailout path.
2502   clone->setUseRemovedUnchecked();
2503 
2504   candidate->block()->insertBefore(candidate, clone);
2505 
2506   if (!candidate->maybeConstantValue()) {
2507     MOZ_ASSERT(clone->canRecoverOnBailout());
2508     clone->setRecoveredOnBailout();
2509   }
2510 
2511   // Replace the candidate by its recovered on bailout clone within recovered
2512   // instructions and resume points operands.
2513   for (MUseIterator i(candidate->usesBegin()); i != candidate->usesEnd();) {
2514     MUse* use = *i++;
2515     MNode* ins = use->consumer();
2516     if (ins->isDefinition() && !ins->toDefinition()->isRecoveredOnBailout())
2517       continue;
2518 
2519     use->replaceProducer(clone);
2520   }
2521 
2522   return true;
2523 }
2524 
2525 // Examine all the users of |candidate| and determine the most aggressive
2526 // truncate kind that satisfies all of them.
ComputeRequestedTruncateKind(MDefinition * candidate,bool * shouldClone)2527 static MDefinition::TruncateKind ComputeRequestedTruncateKind(
2528     MDefinition* candidate, bool* shouldClone) {
2529   bool isCapturedResult =
2530       false;  // Check if used by a recovered instruction or a resume point.
2531   bool isObservableResult =
2532       false;  // Check if it can be read from another frame.
2533   bool isRecoverableResult = true;  // Check if it can safely be reconstructed.
2534   bool hasUseRemoved = candidate->isUseRemoved();
2535 
2536   MDefinition::TruncateKind kind = MDefinition::Truncate;
2537   for (MUseIterator use(candidate->usesBegin()); use != candidate->usesEnd();
2538        use++) {
2539     if (use->consumer()->isResumePoint()) {
2540       // Truncation is a destructive optimization, as such, we need to pay
2541       // attention to removed branches and prevent optimization
2542       // destructive optimizations if we have no alternative. (see
2543       // UseRemoved flag)
2544       isCapturedResult = true;
2545       isObservableResult =
2546           isObservableResult ||
2547           use->consumer()->toResumePoint()->isObservableOperand(*use);
2548       isRecoverableResult =
2549           isRecoverableResult &&
2550           use->consumer()->toResumePoint()->isRecoverableOperand(*use);
2551       continue;
2552     }
2553 
2554     MDefinition* consumer = use->consumer()->toDefinition();
2555     if (consumer->isRecoveredOnBailout()) {
2556       isCapturedResult = true;
2557       hasUseRemoved = hasUseRemoved || consumer->isUseRemoved();
2558       continue;
2559     }
2560 
2561     MDefinition::TruncateKind consumerKind =
2562         consumer->operandTruncateKind(consumer->indexOf(*use));
2563     kind = Min(kind, consumerKind);
2564     if (kind == MDefinition::NoTruncate) break;
2565   }
2566 
2567   // We cannot do full trunction on guarded instructions.
2568   if (candidate->isGuard() || candidate->isGuardRangeBailouts())
2569     kind = Min(kind, MDefinition::TruncateAfterBailouts);
2570 
2571   // If the value naturally produces an int32 value (before bailout checks)
2572   // that needs no conversion, we don't have to worry about resume points
2573   // seeing truncated values.
2574   bool needsConversion = !candidate->range() || !candidate->range()->isInt32();
2575 
2576   // If the instruction is explicitly truncated (not indirectly) by all its
2577   // uses and if it has no removed uses, then we can safely encode its
2578   // truncated result as part of the resume point operands.  This is safe,
2579   // because even if we resume with a truncated double, the next baseline
2580   // instruction operating on this instruction is going to be a no-op.
2581   //
2582   // Note, that if the result can be observed from another frame, then this
2583   // optimization is not safe.
2584   bool safeToConvert =
2585       kind == MDefinition::Truncate && !hasUseRemoved && !isObservableResult;
2586 
2587   // If the candidate instruction appears as operand of a resume point or a
2588   // recover instruction, and we have to truncate its result, then we might
2589   // have to either recover the result during the bailout, or avoid the
2590   // truncation.
2591   if (isCapturedResult && needsConversion && !safeToConvert) {
2592     // If the result can be recovered from all the resume points (not needed
2593     // for iterating over the inlined frames), and this instruction can be
2594     // recovered on bailout, then we can clone it and use the cloned
2595     // instruction to encode the recover instruction.  Otherwise, we should
2596     // keep the original result and bailout if the value is not in the int32
2597     // range.
2598     if (!JitOptions.disableRecoverIns && isRecoverableResult &&
2599         candidate->canRecoverOnBailout())
2600       *shouldClone = true;
2601     else
2602       kind = Min(kind, MDefinition::TruncateAfterBailouts);
2603   }
2604 
2605   return kind;
2606 }
2607 
ComputeTruncateKind(MDefinition * candidate,bool * shouldClone)2608 static MDefinition::TruncateKind ComputeTruncateKind(MDefinition* candidate,
2609                                                      bool* shouldClone) {
2610   // Compare operations might coerce its inputs to int32 if the ranges are
2611   // correct.  So we do not need to check if all uses are coerced.
2612   if (candidate->isCompare()) return MDefinition::TruncateAfterBailouts;
2613 
2614   // Set truncated flag if range analysis ensure that it has no
2615   // rounding errors and no fractional part. Note that we can't use
2616   // the MDefinition Range constructor, because we need to know if
2617   // the value will have rounding errors before any bailout checks.
2618   const Range* r = candidate->range();
2619   bool canHaveRoundingErrors = !r || r->canHaveRoundingErrors();
2620 
2621   // Special case integer division and modulo: a/b can be infinite, and a%b
2622   // can be NaN but cannot actually have rounding errors induced by truncation.
2623   if ((candidate->isDiv() || candidate->isMod()) &&
2624       static_cast<const MBinaryArithInstruction*>(candidate)
2625               ->specialization() == MIRType::Int32) {
2626     canHaveRoundingErrors = false;
2627   }
2628 
2629   if (canHaveRoundingErrors) return MDefinition::NoTruncate;
2630 
2631   // Ensure all observable uses are truncated.
2632   return ComputeRequestedTruncateKind(candidate, shouldClone);
2633 }
2634 
RemoveTruncatesOnOutput(MDefinition * truncated)2635 static void RemoveTruncatesOnOutput(MDefinition* truncated) {
2636   // Compare returns a boolean so it doen't have any output truncates.
2637   if (truncated->isCompare()) return;
2638 
2639   MOZ_ASSERT(truncated->type() == MIRType::Int32);
2640   MOZ_ASSERT(Range(truncated).isInt32());
2641 
2642   for (MUseDefIterator use(truncated); use; use++) {
2643     MDefinition* def = use.def();
2644     if (!def->isTruncateToInt32() || !def->isToNumberInt32()) continue;
2645 
2646     def->replaceAllUsesWith(truncated);
2647   }
2648 }
2649 
AdjustTruncatedInputs(TempAllocator & alloc,MDefinition * truncated)2650 static void AdjustTruncatedInputs(TempAllocator& alloc,
2651                                   MDefinition* truncated) {
2652   MBasicBlock* block = truncated->block();
2653   for (size_t i = 0, e = truncated->numOperands(); i < e; i++) {
2654     MDefinition::TruncateKind kind = truncated->operandTruncateKind(i);
2655     if (kind == MDefinition::NoTruncate) continue;
2656 
2657     MDefinition* input = truncated->getOperand(i);
2658     if (input->type() == MIRType::Int32) continue;
2659 
2660     if (input->isToDouble() && input->getOperand(0)->type() == MIRType::Int32) {
2661       truncated->replaceOperand(i, input->getOperand(0));
2662     } else {
2663       MInstruction* op;
2664       if (kind == MDefinition::TruncateAfterBailouts)
2665         op = MToNumberInt32::New(alloc, truncated->getOperand(i));
2666       else
2667         op = MTruncateToInt32::New(alloc, truncated->getOperand(i));
2668 
2669       if (truncated->isPhi()) {
2670         MBasicBlock* pred = block->getPredecessor(i);
2671         pred->insertBefore(pred->lastIns(), op);
2672       } else {
2673         block->insertBefore(truncated->toInstruction(), op);
2674       }
2675       truncated->replaceOperand(i, op);
2676     }
2677   }
2678 
2679   if (truncated->isToDouble()) {
2680     truncated->replaceAllUsesWith(truncated->toToDouble()->getOperand(0));
2681     block->discard(truncated->toToDouble());
2682   }
2683 }
2684 
2685 // Iterate backward on all instruction and attempt to truncate operations for
2686 // each instruction which respect the following list of predicates: Has been
2687 // analyzed by range analysis, the range has no rounding errors, all uses cases
2688 // are truncating the result.
2689 //
2690 // If the truncation of the operation is successful, then the instruction is
2691 // queue for later updating the graph to restore the type correctness by
2692 // converting the operands that need to be truncated.
2693 //
2694 // We iterate backward because it is likely that a truncated operation truncates
2695 // some of its operands.
truncate()2696 bool RangeAnalysis::truncate() {
2697   JitSpew(JitSpew_Range, "Do range-base truncation (backward loop)");
2698 
2699   // Automatic truncation is disabled for wasm because the truncation logic
2700   // is based on IonMonkey which assumes that we can bailout if the truncation
2701   // logic fails. As wasm code has no bailout mechanism, it is safer to avoid
2702   // any automatic truncations.
2703   MOZ_ASSERT(!mir->compilingWasm());
2704 
2705   Vector<MDefinition*, 16, SystemAllocPolicy> worklist;
2706 
2707   for (PostorderIterator block(graph_.poBegin()); block != graph_.poEnd();
2708        block++) {
2709     for (MInstructionReverseIterator iter(block->rbegin());
2710          iter != block->rend(); iter++) {
2711       if (iter->isRecoveredOnBailout()) continue;
2712 
2713       if (iter->type() == MIRType::None) {
2714         if (iter->isTest()) {
2715           if (!TruncateTest(alloc(), iter->toTest())) return false;
2716         }
2717         continue;
2718       }
2719 
2720       // Remember all bitop instructions for folding after range analysis.
2721       switch (iter->op()) {
2722         case MDefinition::Opcode::BitAnd:
2723         case MDefinition::Opcode::BitOr:
2724         case MDefinition::Opcode::BitXor:
2725         case MDefinition::Opcode::Lsh:
2726         case MDefinition::Opcode::Rsh:
2727         case MDefinition::Opcode::Ursh:
2728           if (!bitops.append(static_cast<MBinaryBitwiseInstruction*>(*iter)))
2729             return false;
2730           break;
2731         default:;
2732       }
2733 
2734       bool shouldClone = false;
2735       MDefinition::TruncateKind kind = ComputeTruncateKind(*iter, &shouldClone);
2736       if (kind == MDefinition::NoTruncate) continue;
2737 
2738       // Range Analysis is sometimes eager to do optimizations, even if we
2739       // are not be able to truncate an instruction. In such case, we
2740       // speculatively compile the instruction to an int32 instruction
2741       // while adding a guard. This is what is implied by
2742       // TruncateAfterBailout.
2743       //
2744       // If we already experienced an overflow bailout while executing
2745       // code within the current JSScript, we no longer attempt to make
2746       // this kind of eager optimizations.
2747       if (kind <= MDefinition::TruncateAfterBailouts &&
2748           block->info().hadOverflowBailout())
2749         continue;
2750 
2751       // Truncate this instruction if possible.
2752       if (!iter->needTruncation(kind)) continue;
2753 
2754       SpewTruncate(*iter, kind, shouldClone);
2755 
2756       // If needed, clone the current instruction for keeping it for the
2757       // bailout path.  This give us the ability to truncate instructions
2758       // even after the removal of branches.
2759       if (shouldClone && !CloneForDeadBranches(alloc(), *iter)) return false;
2760 
2761       iter->truncate();
2762 
2763       // Delay updates of inputs/outputs to avoid creating node which
2764       // would be removed by the truncation of the next operations.
2765       iter->setInWorklist();
2766       if (!worklist.append(*iter)) return false;
2767     }
2768     for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd());
2769          iter != end; ++iter) {
2770       bool shouldClone = false;
2771       MDefinition::TruncateKind kind = ComputeTruncateKind(*iter, &shouldClone);
2772       if (kind == MDefinition::NoTruncate) continue;
2773 
2774       // Truncate this phi if possible.
2775       if (shouldClone || !iter->needTruncation(kind)) continue;
2776 
2777       SpewTruncate(*iter, kind, shouldClone);
2778 
2779       iter->truncate();
2780 
2781       // Delay updates of inputs/outputs to avoid creating node which
2782       // would be removed by the truncation of the next operations.
2783       iter->setInWorklist();
2784       if (!worklist.append(*iter)) return false;
2785     }
2786   }
2787 
2788   // Update inputs/outputs of truncated instructions.
2789   JitSpew(JitSpew_Range, "Do graph type fixup (dequeue)");
2790   while (!worklist.empty()) {
2791     if (!alloc().ensureBallast()) return false;
2792     MDefinition* def = worklist.popCopy();
2793     def->setNotInWorklist();
2794     RemoveTruncatesOnOutput(def);
2795     AdjustTruncatedInputs(alloc(), def);
2796   }
2797 
2798   return true;
2799 }
2800 
removeUnnecessaryBitops()2801 bool RangeAnalysis::removeUnnecessaryBitops() {
2802   // Note: This operation change the semantic of the program in a way which
2803   // uniquely works with Int32, Recover Instructions added by the Sink phase
2804   // expects the MIR Graph to still have a valid flow as-if they were double
2805   // operations instead of Int32 operations. Thus, this phase should be
2806   // executed after the Sink phase, and before DCE.
2807 
2808   // Fold any unnecessary bitops in the graph, such as (x | 0) on an integer
2809   // input. This is done after range analysis rather than during GVN as the
2810   // presence of the bitop can change which instructions are truncated.
2811   for (size_t i = 0; i < bitops.length(); i++) {
2812     MBinaryBitwiseInstruction* ins = bitops[i];
2813     if (ins->isRecoveredOnBailout()) continue;
2814 
2815     MDefinition* folded = ins->foldUnnecessaryBitop();
2816     if (folded != ins) {
2817       ins->replaceAllLiveUsesWith(folded);
2818       ins->setRecoveredOnBailout();
2819     }
2820   }
2821 
2822   bitops.clear();
2823   return true;
2824 }
2825 
2826 ///////////////////////////////////////////////////////////////////////////////
2827 // Collect Range information of operands
2828 ///////////////////////////////////////////////////////////////////////////////
2829 
collectRangeInfoPreTrunc()2830 void MInArray::collectRangeInfoPreTrunc() {
2831   Range indexRange(index());
2832   if (indexRange.isFiniteNonNegative()) needsNegativeIntCheck_ = false;
2833 }
2834 
collectRangeInfoPreTrunc()2835 void MLoadElementHole::collectRangeInfoPreTrunc() {
2836   Range indexRange(index());
2837   if (indexRange.isFiniteNonNegative()) {
2838     needsNegativeIntCheck_ = false;
2839     setNotGuard();
2840   }
2841 }
2842 
collectRangeInfoPreTrunc()2843 void MClz::collectRangeInfoPreTrunc() {
2844   Range inputRange(input());
2845   if (!inputRange.canBeZero()) operandIsNeverZero_ = true;
2846 }
2847 
collectRangeInfoPreTrunc()2848 void MCtz::collectRangeInfoPreTrunc() {
2849   Range inputRange(input());
2850   if (!inputRange.canBeZero()) operandIsNeverZero_ = true;
2851 }
2852 
collectRangeInfoPreTrunc()2853 void MDiv::collectRangeInfoPreTrunc() {
2854   Range lhsRange(lhs());
2855   Range rhsRange(rhs());
2856 
2857   // Test if Dividend is non-negative.
2858   if (lhsRange.isFiniteNonNegative()) canBeNegativeDividend_ = false;
2859 
2860   // Try removing divide by zero check.
2861   if (!rhsRange.canBeZero()) canBeDivideByZero_ = false;
2862 
2863   // If lhsRange does not contain INT32_MIN in its range,
2864   // negative overflow check can be skipped.
2865   if (!lhsRange.contains(INT32_MIN)) canBeNegativeOverflow_ = false;
2866 
2867   // If rhsRange does not contain -1 likewise.
2868   if (!rhsRange.contains(-1)) canBeNegativeOverflow_ = false;
2869 
2870   // If lhsRange does not contain a zero,
2871   // negative zero check can be skipped.
2872   if (!lhsRange.canBeZero()) canBeNegativeZero_ = false;
2873 
2874   // If rhsRange >= 0 negative zero check can be skipped.
2875   if (rhsRange.isFiniteNonNegative()) canBeNegativeZero_ = false;
2876 }
2877 
collectRangeInfoPreTrunc()2878 void MMul::collectRangeInfoPreTrunc() {
2879   Range lhsRange(lhs());
2880   Range rhsRange(rhs());
2881 
2882   // If lhsRange contains only positive then we can skip negative zero check.
2883   if (lhsRange.isFiniteNonNegative() && !lhsRange.canBeZero())
2884     setCanBeNegativeZero(false);
2885 
2886   // Likewise rhsRange.
2887   if (rhsRange.isFiniteNonNegative() && !rhsRange.canBeZero())
2888     setCanBeNegativeZero(false);
2889 
2890   // If rhsRange and lhsRange contain Non-negative integers only,
2891   // We skip negative zero check.
2892   if (rhsRange.isFiniteNonNegative() && lhsRange.isFiniteNonNegative())
2893     setCanBeNegativeZero(false);
2894 
2895   // If rhsRange and lhsRange < 0. Then we skip negative zero check.
2896   if (rhsRange.isFiniteNegative() && lhsRange.isFiniteNegative())
2897     setCanBeNegativeZero(false);
2898 }
2899 
collectRangeInfoPreTrunc()2900 void MMod::collectRangeInfoPreTrunc() {
2901   Range lhsRange(lhs());
2902   Range rhsRange(rhs());
2903   if (lhsRange.isFiniteNonNegative()) canBeNegativeDividend_ = false;
2904   if (!rhsRange.canBeZero()) canBeDivideByZero_ = false;
2905 }
2906 
collectRangeInfoPreTrunc()2907 void MToNumberInt32::collectRangeInfoPreTrunc() {
2908   Range inputRange(input());
2909   if (!inputRange.canBeNegativeZero()) canBeNegativeZero_ = false;
2910 }
2911 
collectRangeInfoPreTrunc()2912 void MBoundsCheck::collectRangeInfoPreTrunc() {
2913   Range indexRange(index());
2914   Range lengthRange(length());
2915   if (!indexRange.hasInt32LowerBound() || !indexRange.hasInt32UpperBound())
2916     return;
2917   if (!lengthRange.hasInt32LowerBound() || lengthRange.canBeNaN()) return;
2918 
2919   int64_t indexLower = indexRange.lower();
2920   int64_t indexUpper = indexRange.upper();
2921   int64_t lengthLower = lengthRange.lower();
2922   int64_t min = minimum();
2923   int64_t max = maximum();
2924 
2925   if (indexLower + min >= 0 && indexUpper + max < lengthLower)
2926     fallible_ = false;
2927 }
2928 
collectRangeInfoPreTrunc()2929 void MBoundsCheckLower::collectRangeInfoPreTrunc() {
2930   Range indexRange(index());
2931   if (indexRange.hasInt32LowerBound() && indexRange.lower() >= minimum_)
2932     fallible_ = false;
2933 }
2934 
collectRangeInfoPreTrunc()2935 void MCompare::collectRangeInfoPreTrunc() {
2936   if (!Range(lhs()).canBeNaN() && !Range(rhs()).canBeNaN())
2937     operandsAreNeverNaN_ = true;
2938 }
2939 
collectRangeInfoPreTrunc()2940 void MNot::collectRangeInfoPreTrunc() {
2941   if (!Range(input()).canBeNaN()) operandIsNeverNaN_ = true;
2942 }
2943 
collectRangeInfoPreTrunc()2944 void MPowHalf::collectRangeInfoPreTrunc() {
2945   Range inputRange(input());
2946   if (!inputRange.canBeInfiniteOrNaN() || inputRange.hasInt32LowerBound())
2947     operandIsNeverNegativeInfinity_ = true;
2948   if (!inputRange.canBeNegativeZero()) operandIsNeverNegativeZero_ = true;
2949   if (!inputRange.canBeNaN()) operandIsNeverNaN_ = true;
2950 }
2951 
collectRangeInfoPreTrunc()2952 void MUrsh::collectRangeInfoPreTrunc() {
2953   if (specialization_ == MIRType::Int64) return;
2954 
2955   Range lhsRange(lhs()), rhsRange(rhs());
2956 
2957   // As in MUrsh::computeRange(), convert the inputs.
2958   lhsRange.wrapAroundToInt32();
2959   rhsRange.wrapAroundToShiftCount();
2960 
2961   // If the most significant bit of our result is always going to be zero,
2962   // we can optimize by disabling bailout checks for enforcing an int32 range.
2963   if (lhsRange.lower() >= 0 || rhsRange.lower() >= 1) bailoutsDisabled_ = true;
2964 }
2965 
DoesMaskMatchRange(int32_t mask,Range & range)2966 static bool DoesMaskMatchRange(int32_t mask, Range& range) {
2967   // Check if range is positive, because the bitand operator in `(-3) & 0xff`
2968   // can't be eliminated.
2969   if (range.lower() >= 0) {
2970     MOZ_ASSERT(range.isInt32());
2971     // Check that the mask value has all bits set given the range upper bound.
2972     // Note that the upper bound does not have to be exactly the mask value. For
2973     // example, consider `x & 0xfff` where `x` is a uint8. That expression can
2974     // still be optimized to `x`.
2975     int bits = 1 + FloorLog2(range.upper());
2976     uint32_t maskNeeded = (bits == 32) ? 0xffffffff : (uint32_t(1) << bits) - 1;
2977     if ((mask & maskNeeded) == maskNeeded) return true;
2978   }
2979 
2980   return false;
2981 }
2982 
collectRangeInfoPreTrunc()2983 void MBinaryBitwiseInstruction::collectRangeInfoPreTrunc() {
2984   Range lhsRange(lhs());
2985   Range rhsRange(rhs());
2986 
2987   if (lhs()->isConstant() && lhs()->type() == MIRType::Int32 &&
2988       DoesMaskMatchRange(lhs()->toConstant()->toInt32(), rhsRange)) {
2989     maskMatchesRightRange = true;
2990   }
2991 
2992   if (rhs()->isConstant() && rhs()->type() == MIRType::Int32 &&
2993       DoesMaskMatchRange(rhs()->toConstant()->toInt32(), lhsRange)) {
2994     maskMatchesLeftRange = true;
2995   }
2996 }
2997 
collectRangeInfoPreTrunc()2998 void MNaNToZero::collectRangeInfoPreTrunc() {
2999   Range inputRange(input());
3000 
3001   if (!inputRange.canBeNaN()) operandIsNeverNaN_ = true;
3002   if (!inputRange.canBeNegativeZero()) operandIsNeverNegativeZero_ = true;
3003 }
3004 
prepareForUCE(bool * shouldRemoveDeadCode)3005 bool RangeAnalysis::prepareForUCE(bool* shouldRemoveDeadCode) {
3006   *shouldRemoveDeadCode = false;
3007 
3008   for (ReversePostorderIterator iter(graph_.rpoBegin());
3009        iter != graph_.rpoEnd(); iter++) {
3010     MBasicBlock* block = *iter;
3011 
3012     if (!block->unreachable()) continue;
3013 
3014     // Filter out unreachable fake entries.
3015     if (block->numPredecessors() == 0) {
3016       // Ignore fixup blocks added by the Value Numbering phase, in order
3017       // to keep the dominator tree as-is when we have OSR Block which are
3018       // no longer reachable from the main entry point of the graph.
3019       MOZ_ASSERT(graph_.osrBlock());
3020       continue;
3021     }
3022 
3023     MControlInstruction* cond = block->getPredecessor(0)->lastIns();
3024     if (!cond->isTest()) continue;
3025 
3026     // Replace the condition of the test control instruction by a constant
3027     // chosen based which of the successors has the unreachable flag which is
3028     // added by MBeta::computeRange on its own block.
3029     MTest* test = cond->toTest();
3030     MDefinition* condition = test->input();
3031 
3032     // If the false-branch is unreachable, then the test condition must be true.
3033     // If the true-branch is unreachable, then the test condition must be false.
3034     MOZ_ASSERT(block == test->ifTrue() || block == test->ifFalse());
3035     bool value = block == test->ifFalse();
3036     MConstant* constant =
3037         MConstant::New(alloc().fallible(), BooleanValue(value));
3038     if (!constant) return false;
3039 
3040     condition->setGuardRangeBailoutsUnchecked();
3041 
3042     test->block()->insertBefore(test, constant);
3043 
3044     test->replaceOperand(0, constant);
3045     JitSpew(JitSpew_Range,
3046             "Update condition of %d to reflect unreachable branches.",
3047             test->id());
3048 
3049     *shouldRemoveDeadCode = true;
3050   }
3051 
3052   return tryRemovingGuards();
3053 }
3054 
tryRemovingGuards()3055 bool RangeAnalysis::tryRemovingGuards() {
3056   MDefinitionVector guards(alloc());
3057 
3058   for (ReversePostorderIterator block = graph_.rpoBegin();
3059        block != graph_.rpoEnd(); block++) {
3060     for (MDefinitionIterator iter(*block); iter; iter++) {
3061       if (!iter->isGuardRangeBailouts()) continue;
3062 
3063       iter->setInWorklist();
3064       if (!guards.append(*iter)) return false;
3065     }
3066   }
3067 
3068   // Flag all fallible instructions which were indirectly used in the
3069   // computation of the condition, such that we do not ignore
3070   // bailout-paths which are used to shrink the input range of the
3071   // operands of the condition.
3072   for (size_t i = 0; i < guards.length(); i++) {
3073     MDefinition* guard = guards[i];
3074 
3075     // If this ins is a guard even without guardRangeBailouts,
3076     // there is no reason in trying to hoist the guardRangeBailouts check.
3077     guard->setNotGuardRangeBailouts();
3078     if (!DeadIfUnused(guard)) {
3079       guard->setGuardRangeBailouts();
3080       continue;
3081     }
3082     guard->setGuardRangeBailouts();
3083 
3084     if (!guard->isPhi()) {
3085       if (!guard->range()) continue;
3086 
3087       // Filter the range of the instruction based on its MIRType.
3088       Range typeFilteredRange(guard);
3089 
3090       // If the output range is updated by adding the inner range,
3091       // then the MIRType act as an effectful filter. As we do not know if
3092       // this filtered Range might change or not the result of the
3093       // previous comparison, we have to keep this instruction as a guard
3094       // because it has to bailout in order to restrict the Range to its
3095       // MIRType.
3096       if (typeFilteredRange.update(guard->range())) continue;
3097     }
3098 
3099     guard->setNotGuardRangeBailouts();
3100 
3101     // Propagate the guard to its operands.
3102     for (size_t op = 0, e = guard->numOperands(); op < e; op++) {
3103       MDefinition* operand = guard->getOperand(op);
3104 
3105       // Already marked.
3106       if (operand->isInWorklist()) continue;
3107 
3108       MOZ_ASSERT(!operand->isGuardRangeBailouts());
3109 
3110       operand->setInWorklist();
3111       operand->setGuardRangeBailouts();
3112       if (!guards.append(operand)) return false;
3113     }
3114   }
3115 
3116   for (size_t i = 0; i < guards.length(); i++) {
3117     MDefinition* guard = guards[i];
3118     guard->setNotInWorklist();
3119   }
3120 
3121   return true;
3122 }
3123