1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/OptimizationTracking.h"
8 
9 #include "mozilla/SizePrintfMacros.h"
10 
11 #include "jsprf.h"
12 
13 #include "ds/Sort.h"
14 #include "jit/IonBuilder.h"
15 #include "jit/JitcodeMap.h"
16 #include "jit/JitSpewer.h"
17 #include "js/TrackedOptimizationInfo.h"
18 
19 #include "vm/ObjectGroup-inl.h"
20 #include "vm/TypeInference-inl.h"
21 
22 using namespace js;
23 using namespace js::jit;
24 
25 using mozilla::Maybe;
26 using mozilla::Some;
27 using mozilla::Nothing;
28 
29 using JS::TrackedStrategy;
30 using JS::TrackedOutcome;
31 using JS::TrackedTypeSite;
32 using JS::ForEachTrackedOptimizationAttemptOp;
33 using JS::ForEachTrackedOptimizationTypeInfoOp;
34 
35 bool
trackTypeInfo(OptimizationTypeInfo && ty)36 TrackedOptimizations::trackTypeInfo(OptimizationTypeInfo&& ty)
37 {
38     return types_.append(mozilla::Move(ty));
39 }
40 
41 bool
trackAttempt(TrackedStrategy strategy)42 TrackedOptimizations::trackAttempt(TrackedStrategy strategy)
43 {
44     OptimizationAttempt attempt(strategy, TrackedOutcome::GenericFailure);
45     currentAttempt_ = attempts_.length();
46     return attempts_.append(attempt);
47 }
48 
49 void
amendAttempt(uint32_t index)50 TrackedOptimizations::amendAttempt(uint32_t index)
51 {
52     currentAttempt_ = index;
53 }
54 
55 void
trackOutcome(TrackedOutcome outcome)56 TrackedOptimizations::trackOutcome(TrackedOutcome outcome)
57 {
58     attempts_[currentAttempt_].setOutcome(outcome);
59 }
60 
61 void
trackSuccess()62 TrackedOptimizations::trackSuccess()
63 {
64     attempts_[currentAttempt_].setOutcome(TrackedOutcome::GenericSuccess);
65 }
66 
67 template <class Vec>
68 static bool
VectorContentsMatch(const Vec * xs,const Vec * ys)69 VectorContentsMatch(const Vec* xs, const Vec* ys)
70 {
71     if (xs->length() != ys->length())
72         return false;
73     for (auto x = xs->begin(), y = ys->begin(); x != xs->end(); x++, y++) {
74         MOZ_ASSERT(y != ys->end());
75         if (*x != *y)
76             return false;
77     }
78     return true;
79 }
80 
81 bool
matchTypes(const TempOptimizationTypeInfoVector & other) const82 TrackedOptimizations::matchTypes(const TempOptimizationTypeInfoVector& other) const
83 {
84     return VectorContentsMatch(&types_, &other);
85 }
86 
87 bool
matchAttempts(const TempOptimizationAttemptsVector & other) const88 TrackedOptimizations::matchAttempts(const TempOptimizationAttemptsVector& other) const
89 {
90     return VectorContentsMatch(&attempts_, &other);
91 }
92 
JS_PUBLIC_API(const char *)93 JS_PUBLIC_API(const char*)
94 JS::TrackedStrategyString(TrackedStrategy strategy)
95 {
96     switch (strategy) {
97 #define STRATEGY_CASE(name)                       \
98       case TrackedStrategy::name:                 \
99         return #name;
100     TRACKED_STRATEGY_LIST(STRATEGY_CASE)
101 #undef STRATEGY_CASE
102 
103       default:
104         MOZ_CRASH("bad strategy");
105     }
106 }
107 
JS_PUBLIC_API(const char *)108 JS_PUBLIC_API(const char*)
109 JS::TrackedOutcomeString(TrackedOutcome outcome)
110 {
111     switch (outcome) {
112 #define OUTCOME_CASE(name)                        \
113       case TrackedOutcome::name:                  \
114         return #name;
115       TRACKED_OUTCOME_LIST(OUTCOME_CASE)
116 #undef OUTCOME_CASE
117 
118       default:
119         MOZ_CRASH("bad outcome");
120     }
121 }
122 
JS_PUBLIC_API(const char *)123 JS_PUBLIC_API(const char*)
124 JS::TrackedTypeSiteString(TrackedTypeSite site)
125 {
126     switch (site) {
127 #define TYPESITE_CASE(name)                       \
128       case TrackedTypeSite::name:                 \
129         return #name;
130       TRACKED_TYPESITE_LIST(TYPESITE_CASE)
131 #undef TYPESITE_CASE
132 
133       default:
134         MOZ_CRASH("bad type site");
135     }
136 }
137 
138 void
SpewTempOptimizationTypeInfoVector(const TempOptimizationTypeInfoVector * types,const char * indent=nullptr)139 SpewTempOptimizationTypeInfoVector(const TempOptimizationTypeInfoVector* types,
140                                    const char* indent = nullptr)
141 {
142 #ifdef JS_JITSPEW
143     for (const OptimizationTypeInfo* t = types->begin(); t != types->end(); t++) {
144         JitSpewStart(JitSpew_OptimizationTracking, "   %s%s of type %s, type set",
145                      indent ? indent : "",
146                      TrackedTypeSiteString(t->site()), StringFromMIRType(t->mirType()));
147         for (uint32_t i = 0; i < t->types().length(); i++)
148             JitSpewCont(JitSpew_OptimizationTracking, " %s", TypeSet::TypeString(t->types()[i]));
149         JitSpewFin(JitSpew_OptimizationTracking);
150     }
151 #endif
152 }
153 
154 void
SpewTempOptimizationAttemptsVector(const TempOptimizationAttemptsVector * attempts,const char * indent=nullptr)155 SpewTempOptimizationAttemptsVector(const TempOptimizationAttemptsVector* attempts,
156                                    const char* indent = nullptr)
157 {
158 #ifdef JS_JITSPEW
159     for (const OptimizationAttempt* a = attempts->begin(); a != attempts->end(); a++) {
160         JitSpew(JitSpew_OptimizationTracking, "   %s%s: %s", indent ? indent : "",
161                 TrackedStrategyString(a->strategy()), TrackedOutcomeString(a->outcome()));
162     }
163 #endif
164 }
165 
166 void
spew() const167 TrackedOptimizations::spew() const
168 {
169 #ifdef JS_JITSPEW
170     SpewTempOptimizationTypeInfoVector(&types_);
171     SpewTempOptimizationAttemptsVector(&attempts_);
172 #endif
173 }
174 
175 bool
trackTypeSet(TemporaryTypeSet * typeSet)176 OptimizationTypeInfo::trackTypeSet(TemporaryTypeSet* typeSet)
177 {
178     if (!typeSet)
179         return true;
180     return typeSet->enumerateTypes(&types_);
181 }
182 
183 bool
trackType(TypeSet::Type type)184 OptimizationTypeInfo::trackType(TypeSet::Type type)
185 {
186     return types_.append(type);
187 }
188 
189 bool
operator ==(const OptimizationTypeInfo & other) const190 OptimizationTypeInfo::operator ==(const OptimizationTypeInfo& other) const
191 {
192     return site_ == other.site_ && mirType_ == other.mirType_ &&
193            VectorContentsMatch(&types_, &other.types_);
194 }
195 
196 bool
operator !=(const OptimizationTypeInfo & other) const197 OptimizationTypeInfo::operator !=(const OptimizationTypeInfo& other) const
198 {
199     return !(*this == other);
200 }
201 
202 static inline HashNumber
CombineHash(HashNumber h,HashNumber n)203 CombineHash(HashNumber h, HashNumber n)
204 {
205     h += n;
206     h += (h << 10);
207     h ^= (h >> 6);
208     return h;
209 }
210 
211 static inline HashNumber
HashType(TypeSet::Type ty)212 HashType(TypeSet::Type ty)
213 {
214     if (ty.isObjectUnchecked())
215         return PointerHasher<TypeSet::ObjectKey*, 3>::hash(ty.objectKey());
216     return HashNumber(ty.raw());
217 }
218 
219 static HashNumber
HashTypeList(const TempTypeList & types)220 HashTypeList(const TempTypeList& types)
221 {
222     HashNumber h = 0;
223     for (uint32_t i = 0; i < types.length(); i++)
224         h = CombineHash(h, HashType(types[i]));
225     return h;
226 }
227 
228 HashNumber
hash() const229 OptimizationTypeInfo::hash() const
230 {
231     return ((HashNumber(site_) << 24) + (HashNumber(mirType_) << 16)) ^ HashTypeList(types_);
232 }
233 
234 template <class Vec>
235 static HashNumber
HashVectorContents(const Vec * xs,HashNumber h)236 HashVectorContents(const Vec* xs, HashNumber h)
237 {
238     for (auto x = xs->begin(); x != xs->end(); x++)
239         h = CombineHash(h, x->hash());
240     return h;
241 }
242 
243 /* static */ HashNumber
hash(const Lookup & lookup)244 UniqueTrackedOptimizations::Key::hash(const Lookup& lookup)
245 {
246     HashNumber h = HashVectorContents(lookup.types, 0);
247     h = HashVectorContents(lookup.attempts, h);
248     h += (h << 3);
249     h ^= (h >> 11);
250     h += (h << 15);
251     return h;
252 }
253 
254 /* static */ bool
match(const Key & key,const Lookup & lookup)255 UniqueTrackedOptimizations::Key::match(const Key& key, const Lookup& lookup)
256 {
257     return VectorContentsMatch(key.attempts, lookup.attempts) &&
258            VectorContentsMatch(key.types, lookup.types);
259 }
260 
261 bool
add(const TrackedOptimizations * optimizations)262 UniqueTrackedOptimizations::add(const TrackedOptimizations* optimizations)
263 {
264     MOZ_ASSERT(!sorted());
265     Key key;
266     key.types = &optimizations->types_;
267     key.attempts = &optimizations->attempts_;
268     AttemptsMap::AddPtr p = map_.lookupForAdd(key);
269     if (p) {
270         p->value().frequency++;
271         return true;
272     }
273     Entry entry;
274     entry.index = UINT8_MAX;
275     entry.frequency = 1;
276     return map_.add(p, key, entry);
277 }
278 
279 struct FrequencyComparator
280 {
operator ()FrequencyComparator281     bool operator()(const UniqueTrackedOptimizations::SortEntry& a,
282                     const UniqueTrackedOptimizations::SortEntry& b,
283                     bool* lessOrEqualp)
284     {
285         *lessOrEqualp = b.frequency <= a.frequency;
286         return true;
287     }
288 };
289 
290 bool
sortByFrequency(JSContext * cx)291 UniqueTrackedOptimizations::sortByFrequency(JSContext* cx)
292 {
293     MOZ_ASSERT(!sorted());
294 
295     JitSpew(JitSpew_OptimizationTracking, "=> Sorting unique optimizations by frequency");
296 
297     // Sort by frequency.
298     Vector<SortEntry> entries(cx);
299     for (AttemptsMap::Range r = map_.all(); !r.empty(); r.popFront()) {
300         SortEntry entry;
301         entry.types = r.front().key().types;
302         entry.attempts = r.front().key().attempts;
303         entry.frequency = r.front().value().frequency;
304         if (!entries.append(entry))
305             return false;
306     }
307 
308     // The compact table stores indices as a max of uint8_t. In practice each
309     // script has fewer unique optimization attempts than UINT8_MAX.
310     if (entries.length() >= UINT8_MAX - 1)
311         return false;
312 
313     Vector<SortEntry> scratch(cx);
314     if (!scratch.resize(entries.length()))
315         return false;
316 
317     FrequencyComparator comparator;
318     MOZ_ALWAYS_TRUE(MergeSort(entries.begin(), entries.length(), scratch.begin(), comparator));
319 
320     // Update map entries' indices.
321     for (size_t i = 0; i < entries.length(); i++) {
322         Key key;
323         key.types = entries[i].types;
324         key.attempts = entries[i].attempts;
325         AttemptsMap::Ptr p = map_.lookup(key);
326         MOZ_ASSERT(p);
327         p->value().index = sorted_.length();
328 
329         JitSpew(JitSpew_OptimizationTracking, "   Entry %u has frequency %u",
330                 sorted_.length(), p->value().frequency);
331 
332         if (!sorted_.append(entries[i]))
333             return false;
334     }
335 
336     return true;
337 }
338 
339 uint8_t
indexOf(const TrackedOptimizations * optimizations) const340 UniqueTrackedOptimizations::indexOf(const TrackedOptimizations* optimizations) const
341 {
342     MOZ_ASSERT(sorted());
343     Key key;
344     key.types = &optimizations->types_;
345     key.attempts = &optimizations->attempts_;
346     AttemptsMap::Ptr p = map_.lookup(key);
347     MOZ_ASSERT(p);
348     MOZ_ASSERT(p->value().index != UINT8_MAX);
349     return p->value().index;
350 }
351 
352 // Assigns each unique tracked type an index; outputs a compact list.
353 class jit::UniqueTrackedTypes
354 {
355   public:
356     struct TypeHasher
357     {
358         typedef TypeSet::Type Lookup;
359 
hashjit::UniqueTrackedTypes::TypeHasher360         static HashNumber hash(const Lookup& ty) { return HashType(ty); }
matchjit::UniqueTrackedTypes::TypeHasher361         static bool match(const TypeSet::Type& ty1, const TypeSet::Type& ty2) { return ty1 == ty2; }
362     };
363 
364   private:
365     // Map of unique TypeSet::Types to indices.
366     typedef HashMap<TypeSet::Type, uint8_t, TypeHasher> TypesMap;
367     TypesMap map_;
368 
369     Vector<TypeSet::Type, 1> list_;
370 
371   public:
UniqueTrackedTypes(JSContext * cx)372     explicit UniqueTrackedTypes(JSContext* cx)
373       : map_(cx),
374         list_(cx)
375     { }
376 
init()377     bool init() { return map_.init(); }
378     bool getIndexOf(JSContext* cx, TypeSet::Type ty, uint8_t* indexp);
379 
count() const380     uint32_t count() const { MOZ_ASSERT(map_.count() == list_.length()); return list_.length(); }
381     bool enumerate(TypeSet::TypeList* types) const;
382 };
383 
384 bool
getIndexOf(JSContext * cx,TypeSet::Type ty,uint8_t * indexp)385 UniqueTrackedTypes::getIndexOf(JSContext* cx, TypeSet::Type ty, uint8_t* indexp)
386 {
387     TypesMap::AddPtr p = map_.lookupForAdd(ty);
388     if (p) {
389         *indexp = p->value();
390         return true;
391     }
392 
393     // Store indices as max of uint8_t. In practice each script has fewer than
394     // UINT8_MAX of unique observed types.
395     if (count() >= UINT8_MAX)
396         return false;
397 
398     uint8_t index = (uint8_t) count();
399     if (!map_.add(p, ty, index))
400         return false;
401     if (!list_.append(ty))
402         return false;
403     *indexp = index;
404     return true;
405 }
406 
407 bool
enumerate(TypeSet::TypeList * types) const408 UniqueTrackedTypes::enumerate(TypeSet::TypeList* types) const
409 {
410     return types->append(list_.begin(), list_.end());
411 }
412 
413 void
unpackHeader()414 IonTrackedOptimizationsRegion::unpackHeader()
415 {
416     CompactBufferReader reader(start_, end_);
417     startOffset_ = reader.readUnsigned();
418     endOffset_ = reader.readUnsigned();
419     rangesStart_ = reader.currentPosition();
420     MOZ_ASSERT(startOffset_ < endOffset_);
421 }
422 
423 void
readNext(uint32_t * startOffset,uint32_t * endOffset,uint8_t * index)424 IonTrackedOptimizationsRegion::RangeIterator::readNext(uint32_t* startOffset, uint32_t* endOffset,
425                                                        uint8_t* index)
426 {
427     MOZ_ASSERT(more());
428 
429     CompactBufferReader reader(cur_, end_);
430 
431     // The very first entry isn't delta-encoded.
432     if (cur_ == start_) {
433         *startOffset = firstStartOffset_;
434         *endOffset = prevEndOffset_ = reader.readUnsigned();
435         *index = reader.readByte();
436         cur_ = reader.currentPosition();
437         MOZ_ASSERT(cur_ <= end_);
438         return;
439     }
440 
441     // Otherwise, read a delta.
442     uint32_t startDelta, length;
443     ReadDelta(reader, &startDelta, &length, index);
444     *startOffset = prevEndOffset_ + startDelta;
445     *endOffset = prevEndOffset_ = *startOffset + length;
446     cur_ = reader.currentPosition();
447     MOZ_ASSERT(cur_ <= end_);
448 }
449 
450 Maybe<uint8_t>
trackedOptimizationIndexAtAddr(JSRuntime * rt,void * ptr,uint32_t * entryOffsetOut)451 JitcodeGlobalEntry::IonEntry::trackedOptimizationIndexAtAddr(JSRuntime *rt, void* ptr,
452                                                              uint32_t* entryOffsetOut)
453 {
454     MOZ_ASSERT(hasTrackedOptimizations());
455     MOZ_ASSERT(containsPointer(ptr));
456     uint32_t ptrOffset = ((uint8_t*) ptr) - ((uint8_t*) nativeStartAddr());
457     Maybe<IonTrackedOptimizationsRegion> region = optsRegionTable_->findRegion(ptrOffset);
458     if (region.isNothing())
459         return Nothing();
460     return region->findIndex(ptrOffset, entryOffsetOut);
461 }
462 
463 void
forEachOptimizationAttempt(JSRuntime * rt,uint8_t index,ForEachTrackedOptimizationAttemptOp & op)464 JitcodeGlobalEntry::IonEntry::forEachOptimizationAttempt(JSRuntime *rt, uint8_t index,
465                                                          ForEachTrackedOptimizationAttemptOp& op)
466 {
467     trackedOptimizationAttempts(index).forEach(op);
468 }
469 
470 void
forEachOptimizationTypeInfo(JSRuntime * rt,uint8_t index,IonTrackedOptimizationsTypeInfo::ForEachOpAdapter & op)471 JitcodeGlobalEntry::IonEntry::forEachOptimizationTypeInfo(JSRuntime *rt, uint8_t index,
472                                     IonTrackedOptimizationsTypeInfo::ForEachOpAdapter& op)
473 {
474     trackedOptimizationTypeInfo(index).forEach(op, allTrackedTypes());
475 }
476 
477 void
forEach(ForEachTrackedOptimizationAttemptOp & op)478 IonTrackedOptimizationsAttempts::forEach(ForEachTrackedOptimizationAttemptOp& op)
479 {
480     CompactBufferReader reader(start_, end_);
481     const uint8_t* cur = start_;
482     while (cur != end_) {
483         TrackedStrategy strategy = TrackedStrategy(reader.readUnsigned());
484         TrackedOutcome outcome = TrackedOutcome(reader.readUnsigned());
485         MOZ_ASSERT(strategy < TrackedStrategy::Count);
486         MOZ_ASSERT(outcome < TrackedOutcome::Count);
487         op(strategy, outcome);
488         cur = reader.currentPosition();
489         MOZ_ASSERT(cur <= end_);
490     }
491 }
492 
493 void
forEach(ForEachOp & op,const IonTrackedTypeVector * allTypes)494 IonTrackedOptimizationsTypeInfo::forEach(ForEachOp& op, const IonTrackedTypeVector* allTypes)
495 {
496     CompactBufferReader reader(start_, end_);
497     const uint8_t* cur = start_;
498     while (cur != end_) {
499         TrackedTypeSite site = JS::TrackedTypeSite(reader.readUnsigned());
500         MOZ_ASSERT(site < JS::TrackedTypeSite::Count);
501         MIRType mirType = MIRType(reader.readUnsigned());
502         uint32_t length = reader.readUnsigned();
503         for (uint32_t i = 0; i < length; i++)
504             op.readType((*allTypes)[reader.readByte()]);
505         op(site, mirType);
506         cur = reader.currentPosition();
507         MOZ_ASSERT(cur <= end_);
508     }
509 }
510 
511 Maybe<uint8_t>
findIndex(uint32_t offset,uint32_t * entryOffsetOut) const512 IonTrackedOptimizationsRegion::findIndex(uint32_t offset, uint32_t* entryOffsetOut) const
513 {
514     if (offset <= startOffset_ || offset > endOffset_)
515         return Nothing();
516 
517     // Linear search through the run.
518     RangeIterator iter = ranges();
519     while (iter.more()) {
520         uint32_t startOffset, endOffset;
521         uint8_t index;
522         iter.readNext(&startOffset, &endOffset, &index);
523         if (startOffset < offset && offset <= endOffset) {
524             *entryOffsetOut = endOffset;
525             return Some(index);
526         }
527     }
528     return Nothing();
529 }
530 
531 Maybe<IonTrackedOptimizationsRegion>
findRegion(uint32_t offset) const532 IonTrackedOptimizationsRegionTable::findRegion(uint32_t offset) const
533 {
534     // For two contiguous regions, e.g., [i, j] and [j, k], an offset exactly
535     // at j will be associated with [i, j] instead of [j, k]. An offset
536     // exactly at j is often a return address from a younger frame, which case
537     // the next region, despite starting at j, has not yet logically started
538     // execution.
539 
540     static const uint32_t LINEAR_SEARCH_THRESHOLD = 8;
541     uint32_t regions = numEntries();
542     MOZ_ASSERT(regions > 0);
543 
544     // For small numbers of regions, do linear search.
545     if (regions <= LINEAR_SEARCH_THRESHOLD) {
546         for (uint32_t i = 0; i < regions; i++) {
547             IonTrackedOptimizationsRegion region = entry(i);
548             if (region.startOffset() < offset && offset <= region.endOffset()) {
549                 return Some(entry(i));
550             }
551         }
552         return Nothing();
553     }
554 
555     // Otherwise, do binary search.
556     uint32_t i = 0;
557     while (regions > 1) {
558         uint32_t step = regions / 2;
559         uint32_t mid = i + step;
560         IonTrackedOptimizationsRegion region = entry(mid);
561 
562         if (offset <= region.startOffset()) {
563             // Entry is below mid.
564             regions = step;
565         } else if (offset > region.endOffset()) {
566             // Entry is above mid.
567             i = mid;
568             regions -= step;
569         } else {
570             // Entry is in mid.
571             return Some(entry(i));
572         }
573     }
574     return Nothing();
575 }
576 
577 /* static */ uint32_t
ExpectedRunLength(const NativeToTrackedOptimizations * start,const NativeToTrackedOptimizations * end)578 IonTrackedOptimizationsRegion::ExpectedRunLength(const NativeToTrackedOptimizations* start,
579                                                  const NativeToTrackedOptimizations* end)
580 {
581     MOZ_ASSERT(start < end);
582 
583     // A run always has at least 1 entry, which is not delta encoded.
584     uint32_t runLength = 1;
585     uint32_t prevEndOffset = start->endOffset.offset();
586 
587     for (const NativeToTrackedOptimizations* entry = start + 1; entry != end; entry++) {
588         uint32_t startOffset = entry->startOffset.offset();
589         uint32_t endOffset = entry->endOffset.offset();
590         uint32_t startDelta = startOffset - prevEndOffset;
591         uint32_t length = endOffset - startOffset;
592 
593         if (!IsDeltaEncodeable(startDelta, length))
594             break;
595 
596         runLength++;
597         if (runLength == MAX_RUN_LENGTH)
598             break;
599 
600         prevEndOffset = endOffset;
601     }
602 
603     return runLength;
604 }
605 
606 void
writeCompact(CompactBufferWriter & writer) const607 OptimizationAttempt::writeCompact(CompactBufferWriter& writer) const
608 {
609     writer.writeUnsigned((uint32_t) strategy_);
610     writer.writeUnsigned((uint32_t) outcome_);
611 }
612 
613 bool
writeCompact(JSContext * cx,CompactBufferWriter & writer,UniqueTrackedTypes & uniqueTypes) const614 OptimizationTypeInfo::writeCompact(JSContext* cx, CompactBufferWriter& writer,
615                                    UniqueTrackedTypes& uniqueTypes) const
616 {
617     writer.writeUnsigned((uint32_t) site_);
618     writer.writeUnsigned((uint32_t) mirType_);
619     writer.writeUnsigned(types_.length());
620     for (uint32_t i = 0; i < types_.length(); i++) {
621         uint8_t index;
622         if (!uniqueTypes.getIndexOf(cx, types_[i], &index))
623             return false;
624         writer.writeByte(index);
625     }
626     return true;
627 }
628 
629 /* static */ void
ReadDelta(CompactBufferReader & reader,uint32_t * startDelta,uint32_t * length,uint8_t * index)630 IonTrackedOptimizationsRegion::ReadDelta(CompactBufferReader& reader,
631                                          uint32_t* startDelta, uint32_t* length,
632                                          uint8_t* index)
633 {
634     // 2 bytes
635     // SSSS-SSSL LLLL-LII0
636     const uint32_t firstByte = reader.readByte();
637     const uint32_t secondByte = reader.readByte();
638     if ((firstByte & ENC1_MASK) == ENC1_MASK_VAL) {
639         uint32_t encVal = firstByte | secondByte << 8;
640         *startDelta = encVal >> ENC1_START_DELTA_SHIFT;
641         *length = (encVal >> ENC1_LENGTH_SHIFT) & ENC1_LENGTH_MAX;
642         *index = (encVal >> ENC1_INDEX_SHIFT) & ENC1_INDEX_MAX;
643         MOZ_ASSERT(length != 0);
644         return;
645     }
646 
647     // 3 bytes
648     // SSSS-SSSS SSSS-LLLL LLII-II01
649     const uint32_t thirdByte = reader.readByte();
650     if ((firstByte & ENC2_MASK) == ENC2_MASK_VAL) {
651         uint32_t encVal = firstByte | secondByte << 8 | thirdByte << 16;
652         *startDelta = encVal >> ENC2_START_DELTA_SHIFT;
653         *length = (encVal >> ENC2_LENGTH_SHIFT) & ENC2_LENGTH_MAX;
654         *index = (encVal >> ENC2_INDEX_SHIFT) & ENC2_INDEX_MAX;
655         MOZ_ASSERT(length != 0);
656         return;
657     }
658 
659     // 4 bytes
660     // SSSS-SSSS SSSL-LLLL LLLL-LIII IIII-I011
661     const uint32_t fourthByte = reader.readByte();
662     if ((firstByte & ENC3_MASK) == ENC3_MASK_VAL) {
663         uint32_t encVal = firstByte | secondByte << 8 | thirdByte << 16 | fourthByte << 24;
664         *startDelta = encVal >> ENC3_START_DELTA_SHIFT;
665         *length = (encVal >> ENC3_LENGTH_SHIFT) & ENC3_LENGTH_MAX;
666         *index = (encVal >> ENC3_INDEX_SHIFT) & ENC3_INDEX_MAX;
667         MOZ_ASSERT(length != 0);
668         return;
669     }
670 
671     // 5 bytes
672     // SSSS-SSSS SSSS-SSSL LLLL-LLLL LLLL-LIII IIII-I111
673     MOZ_ASSERT((firstByte & ENC4_MASK) == ENC4_MASK_VAL);
674     uint64_t fifthByte = reader.readByte();
675     uint64_t encVal = firstByte | secondByte << 8 | thirdByte << 16 | fourthByte << 24 |
676                       fifthByte << 32;
677     *startDelta = encVal >> ENC4_START_DELTA_SHIFT;
678     *length = (encVal >> ENC4_LENGTH_SHIFT) & ENC4_LENGTH_MAX;
679     *index = (encVal >> ENC4_INDEX_SHIFT) & ENC4_INDEX_MAX;
680     MOZ_ASSERT(length != 0);
681 }
682 
683 /* static */ void
WriteDelta(CompactBufferWriter & writer,uint32_t startDelta,uint32_t length,uint8_t index)684 IonTrackedOptimizationsRegion::WriteDelta(CompactBufferWriter& writer,
685                                           uint32_t startDelta, uint32_t length,
686                                           uint8_t index)
687 {
688     // 2 bytes
689     // SSSS-SSSL LLLL-LII0
690     if (startDelta <= ENC1_START_DELTA_MAX &&
691         length <= ENC1_LENGTH_MAX &&
692         index <= ENC1_INDEX_MAX)
693     {
694         uint16_t val = ENC1_MASK_VAL |
695                        (startDelta << ENC1_START_DELTA_SHIFT) |
696                        (length << ENC1_LENGTH_SHIFT) |
697                        (index << ENC1_INDEX_SHIFT);
698         writer.writeByte(val & 0xff);
699         writer.writeByte((val >> 8) & 0xff);
700         return;
701     }
702 
703     // 3 bytes
704     // SSSS-SSSS SSSS-LLLL LLII-II01
705     if (startDelta <= ENC2_START_DELTA_MAX &&
706         length <= ENC2_LENGTH_MAX &&
707         index <= ENC2_INDEX_MAX)
708     {
709         uint32_t val = ENC2_MASK_VAL |
710                        (startDelta << ENC2_START_DELTA_SHIFT) |
711                        (length << ENC2_LENGTH_SHIFT) |
712                        (index << ENC2_INDEX_SHIFT);
713         writer.writeByte(val & 0xff);
714         writer.writeByte((val >> 8) & 0xff);
715         writer.writeByte((val >> 16) & 0xff);
716         return;
717     }
718 
719     // 4 bytes
720     // SSSS-SSSS SSSL-LLLL LLLL-LIII IIII-I011
721     if (startDelta <= ENC3_START_DELTA_MAX &&
722         length <= ENC3_LENGTH_MAX)
723     {
724         // index always fits because it's an uint8_t; change this if
725         // ENC3_INDEX_MAX changes.
726         MOZ_ASSERT(ENC3_INDEX_MAX == UINT8_MAX);
727         uint32_t val = ENC3_MASK_VAL |
728                        (startDelta << ENC3_START_DELTA_SHIFT) |
729                        (length << ENC3_LENGTH_SHIFT) |
730                        (index << ENC3_INDEX_SHIFT);
731         writer.writeByte(val & 0xff);
732         writer.writeByte((val >> 8) & 0xff);
733         writer.writeByte((val >> 16) & 0xff);
734         writer.writeByte((val >> 24) & 0xff);
735         return;
736     }
737 
738     // 5 bytes
739     // SSSS-SSSS SSSS-SSSL LLLL-LLLL LLLL-LIII IIII-I111
740     if (startDelta <= ENC4_START_DELTA_MAX &&
741         length <= ENC4_LENGTH_MAX)
742     {
743         // index always fits because it's an uint8_t; change this if
744         // ENC4_INDEX_MAX changes.
745         MOZ_ASSERT(ENC4_INDEX_MAX == UINT8_MAX);
746         uint64_t val = ENC4_MASK_VAL |
747                        (((uint64_t) startDelta) << ENC4_START_DELTA_SHIFT) |
748                        (((uint64_t) length) << ENC4_LENGTH_SHIFT) |
749                        (((uint64_t) index) << ENC4_INDEX_SHIFT);
750         writer.writeByte(val & 0xff);
751         writer.writeByte((val >> 8) & 0xff);
752         writer.writeByte((val >> 16) & 0xff);
753         writer.writeByte((val >> 24) & 0xff);
754         writer.writeByte((val >> 32) & 0xff);
755         return;
756     }
757 
758     MOZ_CRASH("startDelta,length,index triple too large to encode.");
759 }
760 
761 /* static */ bool
WriteRun(CompactBufferWriter & writer,const NativeToTrackedOptimizations * start,const NativeToTrackedOptimizations * end,const UniqueTrackedOptimizations & unique)762 IonTrackedOptimizationsRegion::WriteRun(CompactBufferWriter& writer,
763                                         const NativeToTrackedOptimizations* start,
764                                         const NativeToTrackedOptimizations* end,
765                                         const UniqueTrackedOptimizations& unique)
766 {
767     // Write the header, which is the range that this whole run encompasses.
768     JitSpew(JitSpew_OptimizationTracking, "     Header: [%u, %u]",
769             start->startOffset.offset(), (end - 1)->endOffset.offset());
770     writer.writeUnsigned(start->startOffset.offset());
771     writer.writeUnsigned((end - 1)->endOffset.offset());
772 
773     // Write the first entry of the run, which is not delta-encoded.
774     JitSpew(JitSpew_OptimizationTracking,
775             "     [%6u, %6u]                        vector %3u, offset %4u",
776             start->startOffset.offset(), start->endOffset.offset(),
777             unique.indexOf(start->optimizations), writer.length());
778     uint32_t prevEndOffset = start->endOffset.offset();
779     writer.writeUnsigned(prevEndOffset);
780     writer.writeByte(unique.indexOf(start->optimizations));
781 
782     // Delta encode the run.
783     for (const NativeToTrackedOptimizations* entry = start + 1; entry != end; entry++) {
784         uint32_t startOffset = entry->startOffset.offset();
785         uint32_t endOffset = entry->endOffset.offset();
786 
787         uint32_t startDelta = startOffset - prevEndOffset;
788         uint32_t length = endOffset - startOffset;
789         uint8_t index = unique.indexOf(entry->optimizations);
790 
791         JitSpew(JitSpew_OptimizationTracking,
792                 "     [%6u, %6u] delta [+%5u, +%5u] vector %3u, offset %4u",
793                 startOffset, endOffset, startDelta, length, index, writer.length());
794 
795         WriteDelta(writer, startDelta, length, index);
796 
797         prevEndOffset = endOffset;
798     }
799 
800     if (writer.oom())
801         return false;
802 
803     return true;
804 }
805 
806 static bool
WriteOffsetsTable(CompactBufferWriter & writer,const Vector<uint32_t,16> & offsets,uint32_t * tableOffsetp)807 WriteOffsetsTable(CompactBufferWriter& writer, const Vector<uint32_t, 16>& offsets,
808                   uint32_t* tableOffsetp)
809 {
810     // 4-byte align for the uint32s.
811     uint32_t padding = sizeof(uint32_t) - (writer.length() % sizeof(uint32_t));
812     if (padding == sizeof(uint32_t))
813         padding = 0;
814     JitSpew(JitSpew_OptimizationTracking, "   Padding %u byte%s",
815             padding, padding == 1 ? "" : "s");
816     for (uint32_t i = 0; i < padding; i++)
817         writer.writeByte(0);
818 
819     // Record the start of the table to compute reverse offsets for entries.
820     uint32_t tableOffset = writer.length();
821 
822     // Write how many bytes were padded and numEntries.
823     writer.writeNativeEndianUint32_t(padding);
824     writer.writeNativeEndianUint32_t(offsets.length());
825 
826     // Write entry offset table.
827     for (size_t i = 0; i < offsets.length(); i++) {
828         JitSpew(JitSpew_OptimizationTracking, "   Entry %u reverse offset %u",
829                 i, tableOffset - padding - offsets[i]);
830         writer.writeNativeEndianUint32_t(tableOffset - padding - offsets[i]);
831     }
832 
833     if (writer.oom())
834         return false;
835 
836     *tableOffsetp = tableOffset;
837     return true;
838 }
839 
840 static JSFunction*
MaybeConstructorFromType(TypeSet::Type ty)841 MaybeConstructorFromType(TypeSet::Type ty)
842 {
843     if (ty.isUnknown() || ty.isAnyObject() || !ty.isGroup())
844         return nullptr;
845     ObjectGroup* obj = ty.group();
846     TypeNewScript* newScript = obj->newScript();
847     if (!newScript && obj->maybeUnboxedLayout())
848         newScript = obj->unboxedLayout().newScript();
849     return newScript ? newScript->function() : nullptr;
850 }
851 
852 static void
SpewConstructor(TypeSet::Type ty,JSFunction * constructor)853 SpewConstructor(TypeSet::Type ty, JSFunction* constructor)
854 {
855 #ifdef JS_JITSPEW
856     if (!constructor->isInterpreted()) {
857         JitSpew(JitSpew_OptimizationTracking, "   Unique type %s has native constructor",
858                 TypeSet::TypeString(ty));
859         return;
860     }
861 
862     char buf[512];
863     if (constructor->displayAtom())
864         PutEscapedString(buf, 512, constructor->displayAtom(), 0);
865     else
866         JS_snprintf(buf, mozilla::ArrayLength(buf), "??");
867 
868     const char* filename;
869     size_t lineno;
870     if (constructor->hasScript()) {
871         filename = constructor->nonLazyScript()->filename();
872         lineno = constructor->nonLazyScript()->lineno();
873     } else {
874         filename = constructor->lazyScript()->filename();
875         lineno = constructor->lazyScript()->lineno();
876     }
877 
878     JitSpew(JitSpew_OptimizationTracking, "   Unique type %s has constructor %s (%s:%" PRIuSIZE ")",
879             TypeSet::TypeString(ty), buf, filename, lineno);
880 #endif
881 }
882 
883 static void
SpewAllocationSite(TypeSet::Type ty,JSScript * script,uint32_t offset)884 SpewAllocationSite(TypeSet::Type ty, JSScript* script, uint32_t offset)
885 {
886 #ifdef JS_JITSPEW
887     JitSpew(JitSpew_OptimizationTracking, "   Unique type %s has alloc site %s:%u",
888             TypeSet::TypeString(ty), script->filename(),
889             PCToLineNumber(script, script->offsetToPC(offset)));
890 #endif
891 }
892 
893 bool
WriteIonTrackedOptimizationsTable(JSContext * cx,CompactBufferWriter & writer,const NativeToTrackedOptimizations * start,const NativeToTrackedOptimizations * end,const UniqueTrackedOptimizations & unique,uint32_t * numRegions,uint32_t * regionTableOffsetp,uint32_t * typesTableOffsetp,uint32_t * optimizationTableOffsetp,IonTrackedTypeVector * allTypes)894 jit::WriteIonTrackedOptimizationsTable(JSContext* cx, CompactBufferWriter& writer,
895                                        const NativeToTrackedOptimizations* start,
896                                        const NativeToTrackedOptimizations* end,
897                                        const UniqueTrackedOptimizations& unique,
898                                        uint32_t* numRegions,
899                                        uint32_t* regionTableOffsetp,
900                                        uint32_t* typesTableOffsetp,
901                                        uint32_t* optimizationTableOffsetp,
902                                        IonTrackedTypeVector* allTypes)
903 {
904     MOZ_ASSERT(unique.sorted());
905 
906 #ifdef JS_JITSPEW
907     // Spew training data, which may be fed into a script to determine a good
908     // encoding strategy.
909     if (JitSpewEnabled(JitSpew_OptimizationTracking)) {
910         JitSpewStart(JitSpew_OptimizationTracking, "=> Training data: ");
911         for (const NativeToTrackedOptimizations* entry = start; entry != end; entry++) {
912             JitSpewCont(JitSpew_OptimizationTracking, "%u,%u,%u ",
913                         entry->startOffset.offset(), entry->endOffset.offset(),
914                         unique.indexOf(entry->optimizations));
915         }
916         JitSpewFin(JitSpew_OptimizationTracking);
917     }
918 #endif
919 
920     Vector<uint32_t, 16> offsets(cx);
921     const NativeToTrackedOptimizations* entry = start;
922 
923     // Write out region offloads, partitioned into runs.
924     JitSpew(JitSpew_Profiling, "=> Writing regions");
925     while (entry != end) {
926         uint32_t runLength = IonTrackedOptimizationsRegion::ExpectedRunLength(entry, end);
927         JitSpew(JitSpew_OptimizationTracking, "   Run at entry %u, length %u, offset %u",
928                 entry - start, runLength, writer.length());
929 
930         if (!offsets.append(writer.length()))
931             return false;
932 
933         if (!IonTrackedOptimizationsRegion::WriteRun(writer, entry, entry + runLength, unique))
934             return false;
935 
936         entry += runLength;
937     }
938 
939     // Write out the table indexing into the payloads. 4-byte align for the uint32s.
940     if (!WriteOffsetsTable(writer, offsets, regionTableOffsetp))
941         return false;
942 
943     *numRegions = offsets.length();
944 
945     // Clear offsets so that it may be reused below for the unique
946     // optimizations table.
947     offsets.clear();
948 
949     const UniqueTrackedOptimizations::SortedVector& vec = unique.sortedVector();
950     JitSpew(JitSpew_OptimizationTracking, "=> Writing unique optimizations table with %u entr%s",
951             vec.length(), vec.length() == 1 ? "y" : "ies");
952 
953     // Write out type info payloads.
954     UniqueTrackedTypes uniqueTypes(cx);
955     if (!uniqueTypes.init())
956         return false;
957 
958     for (const UniqueTrackedOptimizations::SortEntry* p = vec.begin(); p != vec.end(); p++) {
959         const TempOptimizationTypeInfoVector* v = p->types;
960         JitSpew(JitSpew_OptimizationTracking, "   Type info entry %u of length %u, offset %u",
961                 p - vec.begin(), v->length(), writer.length());
962         SpewTempOptimizationTypeInfoVector(v, "  ");
963 
964         if (!offsets.append(writer.length()))
965             return false;
966 
967         for (const OptimizationTypeInfo* t = v->begin(); t != v->end(); t++) {
968             if (!t->writeCompact(cx, writer, uniqueTypes))
969                 return false;
970         }
971     }
972 
973     // Enumerate the unique types, and pull out any 'new' script constructor
974     // functions and allocation site information. We do this during linking
975     // instead of during profiling to avoid touching compartment tables during
976     // profiling. Additionally, TypeNewScript is subject to GC in the
977     // meantime.
978     TypeSet::TypeList uniqueTypeList;
979     if (!uniqueTypes.enumerate(&uniqueTypeList))
980         return false;
981     for (uint32_t i = 0; i < uniqueTypeList.length(); i++) {
982         TypeSet::Type ty = uniqueTypeList[i];
983         if (JSFunction* constructor = MaybeConstructorFromType(ty)) {
984             if (!allTypes->append(IonTrackedTypeWithAddendum(ty, constructor)))
985                 return false;
986             SpewConstructor(ty, constructor);
987         } else {
988             JSScript* script;
989             uint32_t offset;
990             if (!ty.isUnknown() && !ty.isAnyObject() && ty.isGroup() &&
991                 ObjectGroup::findAllocationSite(cx, ty.group(), &script, &offset))
992             {
993                 if (!allTypes->append(IonTrackedTypeWithAddendum(ty, script, offset)))
994                     return false;
995                 SpewAllocationSite(ty, script, offset);
996             } else {
997                 if (!allTypes->append(IonTrackedTypeWithAddendum(ty)))
998                     return false;
999             }
1000         }
1001     }
1002 
1003     if (!WriteOffsetsTable(writer, offsets, typesTableOffsetp))
1004         return false;
1005     offsets.clear();
1006 
1007     // Write out attempts payloads.
1008     for (const UniqueTrackedOptimizations::SortEntry* p = vec.begin(); p != vec.end(); p++) {
1009         const TempOptimizationAttemptsVector* v = p->attempts;
1010         JitSpew(JitSpew_OptimizationTracking, "   Attempts entry %u of length %u, offset %u",
1011                 p - vec.begin(), v->length(), writer.length());
1012         SpewTempOptimizationAttemptsVector(v, "  ");
1013 
1014         if (!offsets.append(writer.length()))
1015             return false;
1016 
1017         for (const OptimizationAttempt* a = v->begin(); a != v->end(); a++)
1018             a->writeCompact(writer);
1019     }
1020 
1021     return WriteOffsetsTable(writer, offsets, optimizationTableOffsetp);
1022 }
1023 
1024 
1025 BytecodeSite*
maybeTrackedOptimizationSite(jsbytecode * pc)1026 IonBuilder::maybeTrackedOptimizationSite(jsbytecode* pc)
1027 {
1028     // BytecodeSites that track optimizations need to be 1-1 with the pc
1029     // when optimization tracking is enabled, so that all MIR generated by
1030     // a single pc are tracked at one place, even across basic blocks.
1031     //
1032     // Alternatively, we could make all BytecodeSites 1-1 with the pc, but
1033     // there is no real need as optimization tracking is a toggled
1034     // feature.
1035     //
1036     // Since sites that track optimizations should be sparse, just do a
1037     // reverse linear search, as we're most likely advancing in pc.
1038     MOZ_ASSERT(isOptimizationTrackingEnabled());
1039     for (size_t i = trackedOptimizationSites_.length(); i != 0; i--) {
1040         BytecodeSite* site = trackedOptimizationSites_[i - 1];
1041         if (site->pc() == pc) {
1042             MOZ_ASSERT(site->tree() == info().inlineScriptTree());
1043             return site;
1044         }
1045     }
1046     return nullptr;
1047 }
1048 
1049 void
startTrackingOptimizations()1050 IonBuilder::startTrackingOptimizations()
1051 {
1052     if (isOptimizationTrackingEnabled()) {
1053         BytecodeSite* site = maybeTrackedOptimizationSite(current->trackedSite()->pc());
1054 
1055         if (!site) {
1056             site = current->trackedSite();
1057             site->setOptimizations(new(alloc()) TrackedOptimizations(alloc()));
1058             // OOMs are handled as if optimization tracking were turned off.
1059             if (!trackedOptimizationSites_.append(site))
1060                 site = nullptr;
1061         } else {
1062             // The same bytecode may be visited multiple times (see
1063             // restartLoop). Only the last time matters, so clear any previous
1064             // tracked optimizations.
1065             site->optimizations()->clear();
1066         }
1067 
1068         if (site)
1069             current->updateTrackedSite(site);
1070     }
1071 }
1072 
1073 void
trackTypeInfoUnchecked(TrackedTypeSite kind,MIRType mirType,TemporaryTypeSet * typeSet)1074 IonBuilder::trackTypeInfoUnchecked(TrackedTypeSite kind, MIRType mirType,
1075                                    TemporaryTypeSet* typeSet)
1076 {
1077     BytecodeSite* site = current->trackedSite();
1078     // OOMs are handled as if optimization tracking were turned off.
1079     OptimizationTypeInfo typeInfo(alloc(), kind, mirType);
1080     if (!typeInfo.trackTypeSet(typeSet)) {
1081         site->setOptimizations(nullptr);
1082         return;
1083     }
1084     if (!site->optimizations()->trackTypeInfo(mozilla::Move(typeInfo)))
1085         site->setOptimizations(nullptr);
1086 }
1087 
1088 void
trackTypeInfoUnchecked(TrackedTypeSite kind,JSObject * obj)1089 IonBuilder::trackTypeInfoUnchecked(TrackedTypeSite kind, JSObject* obj)
1090 {
1091     BytecodeSite* site = current->trackedSite();
1092     // OOMs are handled as if optimization tracking were turned off.
1093     OptimizationTypeInfo typeInfo(alloc(), kind, MIRType_Object);
1094     if (!typeInfo.trackType(TypeSet::ObjectType(obj)))
1095         return;
1096     if (!site->optimizations()->trackTypeInfo(mozilla::Move(typeInfo)))
1097         site->setOptimizations(nullptr);
1098 }
1099 
1100 void
trackTypeInfoUnchecked(CallInfo & callInfo)1101 IonBuilder::trackTypeInfoUnchecked(CallInfo& callInfo)
1102 {
1103     MDefinition* thisArg = callInfo.thisArg();
1104     trackTypeInfoUnchecked(TrackedTypeSite::Call_This, thisArg->type(), thisArg->resultTypeSet());
1105 
1106     for (uint32_t i = 0; i < callInfo.argc(); i++) {
1107         MDefinition* arg = callInfo.getArg(i);
1108         trackTypeInfoUnchecked(TrackedTypeSite::Call_Arg, arg->type(), arg->resultTypeSet());
1109     }
1110 
1111     TemporaryTypeSet* returnTypes = getInlineReturnTypeSet();
1112     trackTypeInfoUnchecked(TrackedTypeSite::Call_Return, returnTypes->getKnownMIRType(),
1113                            returnTypes);
1114 }
1115 
1116 void
trackOptimizationAttemptUnchecked(TrackedStrategy strategy)1117 IonBuilder::trackOptimizationAttemptUnchecked(TrackedStrategy strategy)
1118 {
1119     BytecodeSite* site = current->trackedSite();
1120     // OOMs are handled as if optimization tracking were turned off.
1121     if (!site->optimizations()->trackAttempt(strategy))
1122         site->setOptimizations(nullptr);
1123 }
1124 
1125 void
amendOptimizationAttemptUnchecked(uint32_t index)1126 IonBuilder::amendOptimizationAttemptUnchecked(uint32_t index)
1127 {
1128     const BytecodeSite* site = current->trackedSite();
1129     site->optimizations()->amendAttempt(index);
1130 }
1131 
1132 void
trackOptimizationOutcomeUnchecked(TrackedOutcome outcome)1133 IonBuilder::trackOptimizationOutcomeUnchecked(TrackedOutcome outcome)
1134 {
1135     const BytecodeSite* site = current->trackedSite();
1136     site->optimizations()->trackOutcome(outcome);
1137 }
1138 
1139 void
trackOptimizationSuccessUnchecked()1140 IonBuilder::trackOptimizationSuccessUnchecked()
1141 {
1142     const BytecodeSite* site = current->trackedSite();
1143     site->optimizations()->trackSuccess();
1144 }
1145 
1146 void
trackInlineSuccessUnchecked(InliningStatus status)1147 IonBuilder::trackInlineSuccessUnchecked(InliningStatus status)
1148 {
1149     if (status == InliningStatus_Inlined)
1150         trackOptimizationOutcome(TrackedOutcome::Inlined);
1151 }
1152 
1153 static void
InterpretedFunctionFilenameAndLineNumber(JSFunction * fun,const char ** filename,Maybe<unsigned> * lineno)1154 InterpretedFunctionFilenameAndLineNumber(JSFunction* fun, const char** filename,
1155                                          Maybe<unsigned>* lineno)
1156 {
1157     if (fun->hasScript()) {
1158         *filename = fun->nonLazyScript()->maybeForwardedScriptSource()->filename();
1159         *lineno = Some((unsigned) fun->nonLazyScript()->lineno());
1160     } else if (fun->lazyScriptOrNull()) {
1161         *filename = fun->lazyScript()->maybeForwardedScriptSource()->filename();
1162         *lineno = Some((unsigned) fun->lazyScript()->lineno());
1163     } else {
1164         *filename = "(self-hosted builtin)";
1165         *lineno = Nothing();
1166     }
1167 }
1168 
1169 static JSFunction*
FunctionFromTrackedType(const IonTrackedTypeWithAddendum & tracked)1170 FunctionFromTrackedType(const IonTrackedTypeWithAddendum& tracked)
1171 {
1172     if (tracked.hasConstructor())
1173         return tracked.constructor;
1174 
1175     TypeSet::Type ty = tracked.type;
1176 
1177     if (ty.isSingleton()) {
1178         JSObject* obj = ty.singleton();
1179         return obj->is<JSFunction>() ? &obj->as<JSFunction>() : nullptr;
1180     }
1181 
1182     return ty.group()->maybeInterpretedFunction();
1183 }
1184 
1185 void
readType(const IonTrackedTypeWithAddendum & tracked)1186 IonTrackedOptimizationsTypeInfo::ForEachOpAdapter::readType(const IonTrackedTypeWithAddendum& tracked)
1187 {
1188     TypeSet::Type ty = tracked.type;
1189 
1190     if (ty.isPrimitive() || ty.isUnknown() || ty.isAnyObject()) {
1191         op_.readType("primitive", TypeSet::NonObjectTypeString(ty), nullptr, Nothing());
1192         return;
1193     }
1194 
1195     char buf[512];
1196     const uint32_t bufsize = mozilla::ArrayLength(buf);
1197 
1198     if (JSFunction* fun = FunctionFromTrackedType(tracked)) {
1199         // The displayAtom is useful for identifying both native and
1200         // interpreted functions.
1201         char* name = nullptr;
1202         if (fun->displayAtom()) {
1203             PutEscapedString(buf, bufsize, fun->displayAtom(), 0);
1204             name = buf;
1205         }
1206 
1207         if (fun->isNative()) {
1208             //
1209             // Try printing out the displayAtom of the native function and the
1210             // absolute address of the native function pointer.
1211             //
1212             // Note that this address is not usable without knowing the
1213             // starting address at which our shared library is loaded. Shared
1214             // library information is exposed by the profiler. If this address
1215             // needs to be symbolicated manually (e.g., when it is gotten via
1216             // debug spewing of all optimization information), it needs to be
1217             // converted to an offset from the beginning of the shared library
1218             // for use with utilities like `addr2line` on Linux and `atos` on
1219             // OS X. Converting to an offset may be done via dladdr():
1220             //
1221             //   void* addr = JS_FUNC_TO_DATA_PTR(void*, fun->native());
1222             //   uintptr_t offset;
1223             //   Dl_info info;
1224             //   if (dladdr(addr, &info) != 0)
1225             //       offset = uintptr_t(addr) - uintptr_t(info.dli_fbase);
1226             //
1227             char locationBuf[20];
1228             if (!name) {
1229                 uintptr_t addr = JS_FUNC_TO_DATA_PTR(uintptr_t, fun->native());
1230                 JS_snprintf(locationBuf, mozilla::ArrayLength(locationBuf), "%llx", addr);
1231             }
1232             op_.readType("native", name, name ? nullptr : locationBuf, Nothing());
1233             return;
1234         }
1235 
1236         const char* filename;
1237         Maybe<unsigned> lineno;
1238         InterpretedFunctionFilenameAndLineNumber(fun, &filename, &lineno);
1239         op_.readType(tracked.constructor ? "constructor" : "function",
1240                      name, filename, lineno);
1241         return;
1242     }
1243 
1244     const char* className = ty.objectKey()->clasp()->name;
1245     JS_snprintf(buf, bufsize, "[object %s]", className);
1246 
1247     if (tracked.hasAllocationSite()) {
1248         JSScript* script = tracked.script;
1249         op_.readType("alloc site", buf,
1250                      script->maybeForwardedScriptSource()->filename(),
1251                      Some(PCToLineNumber(script, script->offsetToPC(tracked.offset))));
1252         return;
1253     }
1254 
1255     if (ty.isGroup()) {
1256         op_.readType("prototype", buf, nullptr, Nothing());
1257         return;
1258     }
1259 
1260     op_.readType("singleton", buf, nullptr, Nothing());
1261 }
1262 
1263 void
operator ()(JS::TrackedTypeSite site,MIRType mirType)1264 IonTrackedOptimizationsTypeInfo::ForEachOpAdapter::operator()(JS::TrackedTypeSite site,
1265                                                               MIRType mirType)
1266 {
1267     op_(site, StringFromMIRType(mirType));
1268 }
1269 
1270 typedef JS::ForEachProfiledFrameOp::FrameHandle FrameHandle;
1271 
1272 void
updateHasTrackedOptimizations()1273 FrameHandle::updateHasTrackedOptimizations()
1274 {
1275     // All inlined frames will have the same optimization information by
1276     // virtue of sharing the JitcodeGlobalEntry, but such information is
1277     // only interpretable on the youngest frame.
1278     if (depth() != 0)
1279         return;
1280     if (!entry_.hasTrackedOptimizations())
1281         return;
1282 
1283     uint32_t entryOffset;
1284     optsIndex_ = entry_.trackedOptimizationIndexAtAddr(rt_, addr_, &entryOffset);
1285     if (optsIndex_.isSome())
1286         canonicalAddr_ = (void*)(((uint8_t*) entry_.nativeStartAddr()) + entryOffset);
1287 }
1288 
1289 void
forEachOptimizationAttempt(ForEachTrackedOptimizationAttemptOp & op,JSScript ** scriptOut,jsbytecode ** pcOut) const1290 FrameHandle::forEachOptimizationAttempt(ForEachTrackedOptimizationAttemptOp& op,
1291                                         JSScript** scriptOut, jsbytecode** pcOut) const
1292 {
1293     MOZ_ASSERT(optsIndex_.isSome());
1294     entry_.forEachOptimizationAttempt(rt_, *optsIndex_, op);
1295     entry_.youngestFrameLocationAtAddr(rt_, addr_, scriptOut, pcOut);
1296 }
1297 
1298 void
forEachOptimizationTypeInfo(ForEachTrackedOptimizationTypeInfoOp & op) const1299 FrameHandle::forEachOptimizationTypeInfo(ForEachTrackedOptimizationTypeInfoOp& op) const
1300 {
1301     MOZ_ASSERT(optsIndex_.isSome());
1302     IonTrackedOptimizationsTypeInfo::ForEachOpAdapter adapter(op);
1303     entry_.forEachOptimizationTypeInfo(rt_, *optsIndex_, adapter);
1304 }
1305