1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/OptimizationTracking.h"
8
9 #include "mozilla/SizePrintfMacros.h"
10
11 #include "jsprf.h"
12
13 #include "ds/Sort.h"
14 #include "jit/IonBuilder.h"
15 #include "jit/JitcodeMap.h"
16 #include "jit/JitSpewer.h"
17 #include "js/TrackedOptimizationInfo.h"
18
19 #include "vm/ObjectGroup-inl.h"
20 #include "vm/TypeInference-inl.h"
21
22 using namespace js;
23 using namespace js::jit;
24
25 using mozilla::Maybe;
26 using mozilla::Some;
27 using mozilla::Nothing;
28
29 using JS::TrackedStrategy;
30 using JS::TrackedOutcome;
31 using JS::TrackedTypeSite;
32 using JS::ForEachTrackedOptimizationAttemptOp;
33 using JS::ForEachTrackedOptimizationTypeInfoOp;
34
35 bool
trackTypeInfo(OptimizationTypeInfo && ty)36 TrackedOptimizations::trackTypeInfo(OptimizationTypeInfo&& ty)
37 {
38 return types_.append(mozilla::Move(ty));
39 }
40
41 bool
trackAttempt(TrackedStrategy strategy)42 TrackedOptimizations::trackAttempt(TrackedStrategy strategy)
43 {
44 OptimizationAttempt attempt(strategy, TrackedOutcome::GenericFailure);
45 currentAttempt_ = attempts_.length();
46 return attempts_.append(attempt);
47 }
48
49 void
amendAttempt(uint32_t index)50 TrackedOptimizations::amendAttempt(uint32_t index)
51 {
52 currentAttempt_ = index;
53 }
54
55 void
trackOutcome(TrackedOutcome outcome)56 TrackedOptimizations::trackOutcome(TrackedOutcome outcome)
57 {
58 attempts_[currentAttempt_].setOutcome(outcome);
59 }
60
61 void
trackSuccess()62 TrackedOptimizations::trackSuccess()
63 {
64 attempts_[currentAttempt_].setOutcome(TrackedOutcome::GenericSuccess);
65 }
66
67 template <class Vec>
68 static bool
VectorContentsMatch(const Vec * xs,const Vec * ys)69 VectorContentsMatch(const Vec* xs, const Vec* ys)
70 {
71 if (xs->length() != ys->length())
72 return false;
73 for (auto x = xs->begin(), y = ys->begin(); x != xs->end(); x++, y++) {
74 MOZ_ASSERT(y != ys->end());
75 if (*x != *y)
76 return false;
77 }
78 return true;
79 }
80
81 bool
matchTypes(const TempOptimizationTypeInfoVector & other) const82 TrackedOptimizations::matchTypes(const TempOptimizationTypeInfoVector& other) const
83 {
84 return VectorContentsMatch(&types_, &other);
85 }
86
87 bool
matchAttempts(const TempOptimizationAttemptsVector & other) const88 TrackedOptimizations::matchAttempts(const TempOptimizationAttemptsVector& other) const
89 {
90 return VectorContentsMatch(&attempts_, &other);
91 }
92
JS_PUBLIC_API(const char *)93 JS_PUBLIC_API(const char*)
94 JS::TrackedStrategyString(TrackedStrategy strategy)
95 {
96 switch (strategy) {
97 #define STRATEGY_CASE(name) \
98 case TrackedStrategy::name: \
99 return #name;
100 TRACKED_STRATEGY_LIST(STRATEGY_CASE)
101 #undef STRATEGY_CASE
102
103 default:
104 MOZ_CRASH("bad strategy");
105 }
106 }
107
JS_PUBLIC_API(const char *)108 JS_PUBLIC_API(const char*)
109 JS::TrackedOutcomeString(TrackedOutcome outcome)
110 {
111 switch (outcome) {
112 #define OUTCOME_CASE(name) \
113 case TrackedOutcome::name: \
114 return #name;
115 TRACKED_OUTCOME_LIST(OUTCOME_CASE)
116 #undef OUTCOME_CASE
117
118 default:
119 MOZ_CRASH("bad outcome");
120 }
121 }
122
JS_PUBLIC_API(const char *)123 JS_PUBLIC_API(const char*)
124 JS::TrackedTypeSiteString(TrackedTypeSite site)
125 {
126 switch (site) {
127 #define TYPESITE_CASE(name) \
128 case TrackedTypeSite::name: \
129 return #name;
130 TRACKED_TYPESITE_LIST(TYPESITE_CASE)
131 #undef TYPESITE_CASE
132
133 default:
134 MOZ_CRASH("bad type site");
135 }
136 }
137
138 void
SpewTempOptimizationTypeInfoVector(const TempOptimizationTypeInfoVector * types,const char * indent=nullptr)139 SpewTempOptimizationTypeInfoVector(const TempOptimizationTypeInfoVector* types,
140 const char* indent = nullptr)
141 {
142 #ifdef JS_JITSPEW
143 for (const OptimizationTypeInfo* t = types->begin(); t != types->end(); t++) {
144 JitSpewStart(JitSpew_OptimizationTracking, " %s%s of type %s, type set",
145 indent ? indent : "",
146 TrackedTypeSiteString(t->site()), StringFromMIRType(t->mirType()));
147 for (uint32_t i = 0; i < t->types().length(); i++)
148 JitSpewCont(JitSpew_OptimizationTracking, " %s", TypeSet::TypeString(t->types()[i]));
149 JitSpewFin(JitSpew_OptimizationTracking);
150 }
151 #endif
152 }
153
154 void
SpewTempOptimizationAttemptsVector(const TempOptimizationAttemptsVector * attempts,const char * indent=nullptr)155 SpewTempOptimizationAttemptsVector(const TempOptimizationAttemptsVector* attempts,
156 const char* indent = nullptr)
157 {
158 #ifdef JS_JITSPEW
159 for (const OptimizationAttempt* a = attempts->begin(); a != attempts->end(); a++) {
160 JitSpew(JitSpew_OptimizationTracking, " %s%s: %s", indent ? indent : "",
161 TrackedStrategyString(a->strategy()), TrackedOutcomeString(a->outcome()));
162 }
163 #endif
164 }
165
166 void
spew() const167 TrackedOptimizations::spew() const
168 {
169 #ifdef JS_JITSPEW
170 SpewTempOptimizationTypeInfoVector(&types_);
171 SpewTempOptimizationAttemptsVector(&attempts_);
172 #endif
173 }
174
175 bool
trackTypeSet(TemporaryTypeSet * typeSet)176 OptimizationTypeInfo::trackTypeSet(TemporaryTypeSet* typeSet)
177 {
178 if (!typeSet)
179 return true;
180 return typeSet->enumerateTypes(&types_);
181 }
182
183 bool
trackType(TypeSet::Type type)184 OptimizationTypeInfo::trackType(TypeSet::Type type)
185 {
186 return types_.append(type);
187 }
188
189 bool
operator ==(const OptimizationTypeInfo & other) const190 OptimizationTypeInfo::operator ==(const OptimizationTypeInfo& other) const
191 {
192 return site_ == other.site_ && mirType_ == other.mirType_ &&
193 VectorContentsMatch(&types_, &other.types_);
194 }
195
196 bool
operator !=(const OptimizationTypeInfo & other) const197 OptimizationTypeInfo::operator !=(const OptimizationTypeInfo& other) const
198 {
199 return !(*this == other);
200 }
201
202 static inline HashNumber
CombineHash(HashNumber h,HashNumber n)203 CombineHash(HashNumber h, HashNumber n)
204 {
205 h += n;
206 h += (h << 10);
207 h ^= (h >> 6);
208 return h;
209 }
210
211 static inline HashNumber
HashType(TypeSet::Type ty)212 HashType(TypeSet::Type ty)
213 {
214 if (ty.isObjectUnchecked())
215 return PointerHasher<TypeSet::ObjectKey*, 3>::hash(ty.objectKey());
216 return HashNumber(ty.raw());
217 }
218
219 static HashNumber
HashTypeList(const TempTypeList & types)220 HashTypeList(const TempTypeList& types)
221 {
222 HashNumber h = 0;
223 for (uint32_t i = 0; i < types.length(); i++)
224 h = CombineHash(h, HashType(types[i]));
225 return h;
226 }
227
228 HashNumber
hash() const229 OptimizationTypeInfo::hash() const
230 {
231 return ((HashNumber(site_) << 24) + (HashNumber(mirType_) << 16)) ^ HashTypeList(types_);
232 }
233
234 template <class Vec>
235 static HashNumber
HashVectorContents(const Vec * xs,HashNumber h)236 HashVectorContents(const Vec* xs, HashNumber h)
237 {
238 for (auto x = xs->begin(); x != xs->end(); x++)
239 h = CombineHash(h, x->hash());
240 return h;
241 }
242
243 /* static */ HashNumber
hash(const Lookup & lookup)244 UniqueTrackedOptimizations::Key::hash(const Lookup& lookup)
245 {
246 HashNumber h = HashVectorContents(lookup.types, 0);
247 h = HashVectorContents(lookup.attempts, h);
248 h += (h << 3);
249 h ^= (h >> 11);
250 h += (h << 15);
251 return h;
252 }
253
254 /* static */ bool
match(const Key & key,const Lookup & lookup)255 UniqueTrackedOptimizations::Key::match(const Key& key, const Lookup& lookup)
256 {
257 return VectorContentsMatch(key.attempts, lookup.attempts) &&
258 VectorContentsMatch(key.types, lookup.types);
259 }
260
261 bool
add(const TrackedOptimizations * optimizations)262 UniqueTrackedOptimizations::add(const TrackedOptimizations* optimizations)
263 {
264 MOZ_ASSERT(!sorted());
265 Key key;
266 key.types = &optimizations->types_;
267 key.attempts = &optimizations->attempts_;
268 AttemptsMap::AddPtr p = map_.lookupForAdd(key);
269 if (p) {
270 p->value().frequency++;
271 return true;
272 }
273 Entry entry;
274 entry.index = UINT8_MAX;
275 entry.frequency = 1;
276 return map_.add(p, key, entry);
277 }
278
279 struct FrequencyComparator
280 {
operator ()FrequencyComparator281 bool operator()(const UniqueTrackedOptimizations::SortEntry& a,
282 const UniqueTrackedOptimizations::SortEntry& b,
283 bool* lessOrEqualp)
284 {
285 *lessOrEqualp = b.frequency <= a.frequency;
286 return true;
287 }
288 };
289
290 bool
sortByFrequency(JSContext * cx)291 UniqueTrackedOptimizations::sortByFrequency(JSContext* cx)
292 {
293 MOZ_ASSERT(!sorted());
294
295 JitSpew(JitSpew_OptimizationTracking, "=> Sorting unique optimizations by frequency");
296
297 // Sort by frequency.
298 Vector<SortEntry> entries(cx);
299 for (AttemptsMap::Range r = map_.all(); !r.empty(); r.popFront()) {
300 SortEntry entry;
301 entry.types = r.front().key().types;
302 entry.attempts = r.front().key().attempts;
303 entry.frequency = r.front().value().frequency;
304 if (!entries.append(entry))
305 return false;
306 }
307
308 // The compact table stores indices as a max of uint8_t. In practice each
309 // script has fewer unique optimization attempts than UINT8_MAX.
310 if (entries.length() >= UINT8_MAX - 1)
311 return false;
312
313 Vector<SortEntry> scratch(cx);
314 if (!scratch.resize(entries.length()))
315 return false;
316
317 FrequencyComparator comparator;
318 MOZ_ALWAYS_TRUE(MergeSort(entries.begin(), entries.length(), scratch.begin(), comparator));
319
320 // Update map entries' indices.
321 for (size_t i = 0; i < entries.length(); i++) {
322 Key key;
323 key.types = entries[i].types;
324 key.attempts = entries[i].attempts;
325 AttemptsMap::Ptr p = map_.lookup(key);
326 MOZ_ASSERT(p);
327 p->value().index = sorted_.length();
328
329 JitSpew(JitSpew_OptimizationTracking, " Entry %" PRIuSIZE " has frequency %" PRIu32,
330 sorted_.length(), p->value().frequency);
331
332 if (!sorted_.append(entries[i]))
333 return false;
334 }
335
336 return true;
337 }
338
339 uint8_t
indexOf(const TrackedOptimizations * optimizations) const340 UniqueTrackedOptimizations::indexOf(const TrackedOptimizations* optimizations) const
341 {
342 MOZ_ASSERT(sorted());
343 Key key;
344 key.types = &optimizations->types_;
345 key.attempts = &optimizations->attempts_;
346 AttemptsMap::Ptr p = map_.lookup(key);
347 MOZ_ASSERT(p);
348 MOZ_ASSERT(p->value().index != UINT8_MAX);
349 return p->value().index;
350 }
351
352 // Assigns each unique tracked type an index; outputs a compact list.
353 class jit::UniqueTrackedTypes
354 {
355 public:
356 struct TypeHasher
357 {
358 typedef TypeSet::Type Lookup;
359
hashjit::UniqueTrackedTypes::TypeHasher360 static HashNumber hash(const Lookup& ty) { return HashType(ty); }
matchjit::UniqueTrackedTypes::TypeHasher361 static bool match(const TypeSet::Type& ty1, const TypeSet::Type& ty2) { return ty1 == ty2; }
362 };
363
364 private:
365 // Map of unique TypeSet::Types to indices.
366 typedef HashMap<TypeSet::Type, uint8_t, TypeHasher> TypesMap;
367 TypesMap map_;
368
369 Vector<TypeSet::Type, 1> list_;
370
371 public:
UniqueTrackedTypes(JSContext * cx)372 explicit UniqueTrackedTypes(JSContext* cx)
373 : map_(cx),
374 list_(cx)
375 { }
376
init()377 bool init() { return map_.init(); }
378 bool getIndexOf(JSContext* cx, TypeSet::Type ty, uint8_t* indexp);
379
count() const380 uint32_t count() const { MOZ_ASSERT(map_.count() == list_.length()); return list_.length(); }
381 bool enumerate(TypeSet::TypeList* types) const;
382 };
383
384 bool
getIndexOf(JSContext * cx,TypeSet::Type ty,uint8_t * indexp)385 UniqueTrackedTypes::getIndexOf(JSContext* cx, TypeSet::Type ty, uint8_t* indexp)
386 {
387 TypesMap::AddPtr p = map_.lookupForAdd(ty);
388 if (p) {
389 *indexp = p->value();
390 return true;
391 }
392
393 // Store indices as max of uint8_t. In practice each script has fewer than
394 // UINT8_MAX of unique observed types.
395 if (count() >= UINT8_MAX)
396 return false;
397
398 uint8_t index = (uint8_t) count();
399 if (!map_.add(p, ty, index))
400 return false;
401 if (!list_.append(ty))
402 return false;
403 *indexp = index;
404 return true;
405 }
406
407 bool
enumerate(TypeSet::TypeList * types) const408 UniqueTrackedTypes::enumerate(TypeSet::TypeList* types) const
409 {
410 return types->append(list_.begin(), list_.end());
411 }
412
413 void
unpackHeader()414 IonTrackedOptimizationsRegion::unpackHeader()
415 {
416 CompactBufferReader reader(start_, end_);
417 startOffset_ = reader.readUnsigned();
418 endOffset_ = reader.readUnsigned();
419 rangesStart_ = reader.currentPosition();
420 MOZ_ASSERT(startOffset_ < endOffset_);
421 }
422
423 void
readNext(uint32_t * startOffset,uint32_t * endOffset,uint8_t * index)424 IonTrackedOptimizationsRegion::RangeIterator::readNext(uint32_t* startOffset, uint32_t* endOffset,
425 uint8_t* index)
426 {
427 MOZ_ASSERT(more());
428
429 CompactBufferReader reader(cur_, end_);
430
431 // The very first entry isn't delta-encoded.
432 if (cur_ == start_) {
433 *startOffset = firstStartOffset_;
434 *endOffset = prevEndOffset_ = reader.readUnsigned();
435 *index = reader.readByte();
436 cur_ = reader.currentPosition();
437 MOZ_ASSERT(cur_ <= end_);
438 return;
439 }
440
441 // Otherwise, read a delta.
442 uint32_t startDelta, length;
443 ReadDelta(reader, &startDelta, &length, index);
444 *startOffset = prevEndOffset_ + startDelta;
445 *endOffset = prevEndOffset_ = *startOffset + length;
446 cur_ = reader.currentPosition();
447 MOZ_ASSERT(cur_ <= end_);
448 }
449
450 Maybe<uint8_t>
trackedOptimizationIndexAtAddr(JSRuntime * rt,void * ptr,uint32_t * entryOffsetOut)451 JitcodeGlobalEntry::IonEntry::trackedOptimizationIndexAtAddr(JSRuntime *rt, void* ptr,
452 uint32_t* entryOffsetOut)
453 {
454 MOZ_ASSERT(hasTrackedOptimizations());
455 MOZ_ASSERT(containsPointer(ptr));
456 uint32_t ptrOffset = ((uint8_t*) ptr) - ((uint8_t*) nativeStartAddr());
457 Maybe<IonTrackedOptimizationsRegion> region = optsRegionTable_->findRegion(ptrOffset);
458 if (region.isNothing())
459 return Nothing();
460 return region->findIndex(ptrOffset, entryOffsetOut);
461 }
462
463 void
forEachOptimizationAttempt(JSRuntime * rt,uint8_t index,ForEachTrackedOptimizationAttemptOp & op)464 JitcodeGlobalEntry::IonEntry::forEachOptimizationAttempt(JSRuntime *rt, uint8_t index,
465 ForEachTrackedOptimizationAttemptOp& op)
466 {
467 trackedOptimizationAttempts(index).forEach(op);
468 }
469
470 void
forEachOptimizationTypeInfo(JSRuntime * rt,uint8_t index,IonTrackedOptimizationsTypeInfo::ForEachOpAdapter & op)471 JitcodeGlobalEntry::IonEntry::forEachOptimizationTypeInfo(JSRuntime *rt, uint8_t index,
472 IonTrackedOptimizationsTypeInfo::ForEachOpAdapter& op)
473 {
474 trackedOptimizationTypeInfo(index).forEach(op, allTrackedTypes());
475 }
476
477 void
forEach(ForEachTrackedOptimizationAttemptOp & op)478 IonTrackedOptimizationsAttempts::forEach(ForEachTrackedOptimizationAttemptOp& op)
479 {
480 CompactBufferReader reader(start_, end_);
481 const uint8_t* cur = start_;
482 while (cur != end_) {
483 TrackedStrategy strategy = TrackedStrategy(reader.readUnsigned());
484 TrackedOutcome outcome = TrackedOutcome(reader.readUnsigned());
485 MOZ_ASSERT(strategy < TrackedStrategy::Count);
486 MOZ_ASSERT(outcome < TrackedOutcome::Count);
487 op(strategy, outcome);
488 cur = reader.currentPosition();
489 MOZ_ASSERT(cur <= end_);
490 }
491 }
492
493 void
forEach(ForEachOp & op,const IonTrackedTypeVector * allTypes)494 IonTrackedOptimizationsTypeInfo::forEach(ForEachOp& op, const IonTrackedTypeVector* allTypes)
495 {
496 CompactBufferReader reader(start_, end_);
497 const uint8_t* cur = start_;
498 while (cur != end_) {
499 TrackedTypeSite site = JS::TrackedTypeSite(reader.readUnsigned());
500 MOZ_ASSERT(site < JS::TrackedTypeSite::Count);
501 MIRType mirType = MIRType(reader.readUnsigned());
502 uint32_t length = reader.readUnsigned();
503 for (uint32_t i = 0; i < length; i++)
504 op.readType((*allTypes)[reader.readByte()]);
505 op(site, mirType);
506 cur = reader.currentPosition();
507 MOZ_ASSERT(cur <= end_);
508 }
509 }
510
511 Maybe<uint8_t>
findIndex(uint32_t offset,uint32_t * entryOffsetOut) const512 IonTrackedOptimizationsRegion::findIndex(uint32_t offset, uint32_t* entryOffsetOut) const
513 {
514 if (offset <= startOffset_ || offset > endOffset_)
515 return Nothing();
516
517 // Linear search through the run.
518 RangeIterator iter = ranges();
519 while (iter.more()) {
520 uint32_t startOffset, endOffset;
521 uint8_t index;
522 iter.readNext(&startOffset, &endOffset, &index);
523 if (startOffset < offset && offset <= endOffset) {
524 *entryOffsetOut = endOffset;
525 return Some(index);
526 }
527 }
528 return Nothing();
529 }
530
531 Maybe<IonTrackedOptimizationsRegion>
findRegion(uint32_t offset) const532 IonTrackedOptimizationsRegionTable::findRegion(uint32_t offset) const
533 {
534 // For two contiguous regions, e.g., [i, j] and [j, k], an offset exactly
535 // at j will be associated with [i, j] instead of [j, k]. An offset
536 // exactly at j is often a return address from a younger frame, which case
537 // the next region, despite starting at j, has not yet logically started
538 // execution.
539
540 static const uint32_t LINEAR_SEARCH_THRESHOLD = 8;
541 uint32_t regions = numEntries();
542 MOZ_ASSERT(regions > 0);
543
544 // For small numbers of regions, do linear search.
545 if (regions <= LINEAR_SEARCH_THRESHOLD) {
546 for (uint32_t i = 0; i < regions; i++) {
547 IonTrackedOptimizationsRegion region = entry(i);
548 if (region.startOffset() < offset && offset <= region.endOffset()) {
549 return Some(entry(i));
550 }
551 }
552 return Nothing();
553 }
554
555 // Otherwise, do binary search.
556 uint32_t i = 0;
557 while (regions > 1) {
558 uint32_t step = regions / 2;
559 uint32_t mid = i + step;
560 IonTrackedOptimizationsRegion region = entry(mid);
561
562 if (offset <= region.startOffset()) {
563 // Entry is below mid.
564 regions = step;
565 } else if (offset > region.endOffset()) {
566 // Entry is above mid.
567 i = mid;
568 regions -= step;
569 } else {
570 // Entry is in mid.
571 return Some(entry(i));
572 }
573 }
574 return Nothing();
575 }
576
577 /* static */ uint32_t
ExpectedRunLength(const NativeToTrackedOptimizations * start,const NativeToTrackedOptimizations * end)578 IonTrackedOptimizationsRegion::ExpectedRunLength(const NativeToTrackedOptimizations* start,
579 const NativeToTrackedOptimizations* end)
580 {
581 MOZ_ASSERT(start < end);
582
583 // A run always has at least 1 entry, which is not delta encoded.
584 uint32_t runLength = 1;
585 uint32_t prevEndOffset = start->endOffset.offset();
586
587 for (const NativeToTrackedOptimizations* entry = start + 1; entry != end; entry++) {
588 uint32_t startOffset = entry->startOffset.offset();
589 uint32_t endOffset = entry->endOffset.offset();
590 uint32_t startDelta = startOffset - prevEndOffset;
591 uint32_t length = endOffset - startOffset;
592
593 if (!IsDeltaEncodeable(startDelta, length))
594 break;
595
596 runLength++;
597 if (runLength == MAX_RUN_LENGTH)
598 break;
599
600 prevEndOffset = endOffset;
601 }
602
603 return runLength;
604 }
605
606 void
writeCompact(CompactBufferWriter & writer) const607 OptimizationAttempt::writeCompact(CompactBufferWriter& writer) const
608 {
609 writer.writeUnsigned((uint32_t) strategy_);
610 writer.writeUnsigned((uint32_t) outcome_);
611 }
612
613 bool
writeCompact(JSContext * cx,CompactBufferWriter & writer,UniqueTrackedTypes & uniqueTypes) const614 OptimizationTypeInfo::writeCompact(JSContext* cx, CompactBufferWriter& writer,
615 UniqueTrackedTypes& uniqueTypes) const
616 {
617 writer.writeUnsigned((uint32_t) site_);
618 writer.writeUnsigned((uint32_t) mirType_);
619 writer.writeUnsigned(types_.length());
620 for (uint32_t i = 0; i < types_.length(); i++) {
621 uint8_t index;
622 if (!uniqueTypes.getIndexOf(cx, types_[i], &index))
623 return false;
624 writer.writeByte(index);
625 }
626 return true;
627 }
628
629 /* static */ void
ReadDelta(CompactBufferReader & reader,uint32_t * startDelta,uint32_t * length,uint8_t * index)630 IonTrackedOptimizationsRegion::ReadDelta(CompactBufferReader& reader,
631 uint32_t* startDelta, uint32_t* length,
632 uint8_t* index)
633 {
634 // 2 bytes
635 // SSSS-SSSL LLLL-LII0
636 const uint32_t firstByte = reader.readByte();
637 const uint32_t secondByte = reader.readByte();
638 if ((firstByte & ENC1_MASK) == ENC1_MASK_VAL) {
639 uint32_t encVal = firstByte | secondByte << 8;
640 *startDelta = encVal >> ENC1_START_DELTA_SHIFT;
641 *length = (encVal >> ENC1_LENGTH_SHIFT) & ENC1_LENGTH_MAX;
642 *index = (encVal >> ENC1_INDEX_SHIFT) & ENC1_INDEX_MAX;
643 MOZ_ASSERT(length != 0);
644 return;
645 }
646
647 // 3 bytes
648 // SSSS-SSSS SSSS-LLLL LLII-II01
649 const uint32_t thirdByte = reader.readByte();
650 if ((firstByte & ENC2_MASK) == ENC2_MASK_VAL) {
651 uint32_t encVal = firstByte | secondByte << 8 | thirdByte << 16;
652 *startDelta = encVal >> ENC2_START_DELTA_SHIFT;
653 *length = (encVal >> ENC2_LENGTH_SHIFT) & ENC2_LENGTH_MAX;
654 *index = (encVal >> ENC2_INDEX_SHIFT) & ENC2_INDEX_MAX;
655 MOZ_ASSERT(length != 0);
656 return;
657 }
658
659 // 4 bytes
660 // SSSS-SSSS SSSL-LLLL LLLL-LIII IIII-I011
661 const uint32_t fourthByte = reader.readByte();
662 if ((firstByte & ENC3_MASK) == ENC3_MASK_VAL) {
663 uint32_t encVal = firstByte | secondByte << 8 | thirdByte << 16 | fourthByte << 24;
664 *startDelta = encVal >> ENC3_START_DELTA_SHIFT;
665 *length = (encVal >> ENC3_LENGTH_SHIFT) & ENC3_LENGTH_MAX;
666 *index = (encVal >> ENC3_INDEX_SHIFT) & ENC3_INDEX_MAX;
667 MOZ_ASSERT(length != 0);
668 return;
669 }
670
671 // 5 bytes
672 // SSSS-SSSS SSSS-SSSL LLLL-LLLL LLLL-LIII IIII-I111
673 MOZ_ASSERT((firstByte & ENC4_MASK) == ENC4_MASK_VAL);
674 uint64_t fifthByte = reader.readByte();
675 uint64_t encVal = firstByte | secondByte << 8 | thirdByte << 16 | fourthByte << 24 |
676 fifthByte << 32;
677 *startDelta = encVal >> ENC4_START_DELTA_SHIFT;
678 *length = (encVal >> ENC4_LENGTH_SHIFT) & ENC4_LENGTH_MAX;
679 *index = (encVal >> ENC4_INDEX_SHIFT) & ENC4_INDEX_MAX;
680 MOZ_ASSERT(length != 0);
681 }
682
683 /* static */ void
WriteDelta(CompactBufferWriter & writer,uint32_t startDelta,uint32_t length,uint8_t index)684 IonTrackedOptimizationsRegion::WriteDelta(CompactBufferWriter& writer,
685 uint32_t startDelta, uint32_t length,
686 uint8_t index)
687 {
688 // 2 bytes
689 // SSSS-SSSL LLLL-LII0
690 if (startDelta <= ENC1_START_DELTA_MAX &&
691 length <= ENC1_LENGTH_MAX &&
692 index <= ENC1_INDEX_MAX)
693 {
694 uint16_t val = ENC1_MASK_VAL |
695 (startDelta << ENC1_START_DELTA_SHIFT) |
696 (length << ENC1_LENGTH_SHIFT) |
697 (index << ENC1_INDEX_SHIFT);
698 writer.writeByte(val & 0xff);
699 writer.writeByte((val >> 8) & 0xff);
700 return;
701 }
702
703 // 3 bytes
704 // SSSS-SSSS SSSS-LLLL LLII-II01
705 if (startDelta <= ENC2_START_DELTA_MAX &&
706 length <= ENC2_LENGTH_MAX &&
707 index <= ENC2_INDEX_MAX)
708 {
709 uint32_t val = ENC2_MASK_VAL |
710 (startDelta << ENC2_START_DELTA_SHIFT) |
711 (length << ENC2_LENGTH_SHIFT) |
712 (index << ENC2_INDEX_SHIFT);
713 writer.writeByte(val & 0xff);
714 writer.writeByte((val >> 8) & 0xff);
715 writer.writeByte((val >> 16) & 0xff);
716 return;
717 }
718
719 // 4 bytes
720 // SSSS-SSSS SSSL-LLLL LLLL-LIII IIII-I011
721 if (startDelta <= ENC3_START_DELTA_MAX &&
722 length <= ENC3_LENGTH_MAX)
723 {
724 // index always fits because it's an uint8_t; change this if
725 // ENC3_INDEX_MAX changes.
726 MOZ_ASSERT(ENC3_INDEX_MAX == UINT8_MAX);
727 uint32_t val = ENC3_MASK_VAL |
728 (startDelta << ENC3_START_DELTA_SHIFT) |
729 (length << ENC3_LENGTH_SHIFT) |
730 (index << ENC3_INDEX_SHIFT);
731 writer.writeByte(val & 0xff);
732 writer.writeByte((val >> 8) & 0xff);
733 writer.writeByte((val >> 16) & 0xff);
734 writer.writeByte((val >> 24) & 0xff);
735 return;
736 }
737
738 // 5 bytes
739 // SSSS-SSSS SSSS-SSSL LLLL-LLLL LLLL-LIII IIII-I111
740 if (startDelta <= ENC4_START_DELTA_MAX &&
741 length <= ENC4_LENGTH_MAX)
742 {
743 // index always fits because it's an uint8_t; change this if
744 // ENC4_INDEX_MAX changes.
745 MOZ_ASSERT(ENC4_INDEX_MAX == UINT8_MAX);
746 uint64_t val = ENC4_MASK_VAL |
747 (((uint64_t) startDelta) << ENC4_START_DELTA_SHIFT) |
748 (((uint64_t) length) << ENC4_LENGTH_SHIFT) |
749 (((uint64_t) index) << ENC4_INDEX_SHIFT);
750 writer.writeByte(val & 0xff);
751 writer.writeByte((val >> 8) & 0xff);
752 writer.writeByte((val >> 16) & 0xff);
753 writer.writeByte((val >> 24) & 0xff);
754 writer.writeByte((val >> 32) & 0xff);
755 return;
756 }
757
758 MOZ_CRASH("startDelta,length,index triple too large to encode.");
759 }
760
761 /* static */ bool
WriteRun(CompactBufferWriter & writer,const NativeToTrackedOptimizations * start,const NativeToTrackedOptimizations * end,const UniqueTrackedOptimizations & unique)762 IonTrackedOptimizationsRegion::WriteRun(CompactBufferWriter& writer,
763 const NativeToTrackedOptimizations* start,
764 const NativeToTrackedOptimizations* end,
765 const UniqueTrackedOptimizations& unique)
766 {
767 // Write the header, which is the range that this whole run encompasses.
768 JitSpew(JitSpew_OptimizationTracking, " Header: [%" PRIuSIZE ", %" PRIuSIZE "]",
769 start->startOffset.offset(), (end - 1)->endOffset.offset());
770 writer.writeUnsigned(start->startOffset.offset());
771 writer.writeUnsigned((end - 1)->endOffset.offset());
772
773 // Write the first entry of the run, which is not delta-encoded.
774 JitSpew(JitSpew_OptimizationTracking,
775 " [%6" PRIuSIZE ", %6" PRIuSIZE "] vector %3u, offset %4" PRIuSIZE,
776 start->startOffset.offset(), start->endOffset.offset(),
777 unique.indexOf(start->optimizations), writer.length());
778 uint32_t prevEndOffset = start->endOffset.offset();
779 writer.writeUnsigned(prevEndOffset);
780 writer.writeByte(unique.indexOf(start->optimizations));
781
782 // Delta encode the run.
783 for (const NativeToTrackedOptimizations* entry = start + 1; entry != end; entry++) {
784 uint32_t startOffset = entry->startOffset.offset();
785 uint32_t endOffset = entry->endOffset.offset();
786
787 uint32_t startDelta = startOffset - prevEndOffset;
788 uint32_t length = endOffset - startOffset;
789 uint8_t index = unique.indexOf(entry->optimizations);
790
791 JitSpew(JitSpew_OptimizationTracking,
792 " [%6u, %6u] delta [+%5u, +%5u] vector %3u, offset %4" PRIuSIZE,
793 startOffset, endOffset, startDelta, length, index, writer.length());
794
795 WriteDelta(writer, startDelta, length, index);
796
797 prevEndOffset = endOffset;
798 }
799
800 if (writer.oom())
801 return false;
802
803 return true;
804 }
805
806 static bool
WriteOffsetsTable(CompactBufferWriter & writer,const Vector<uint32_t,16> & offsets,uint32_t * tableOffsetp)807 WriteOffsetsTable(CompactBufferWriter& writer, const Vector<uint32_t, 16>& offsets,
808 uint32_t* tableOffsetp)
809 {
810 // 4-byte align for the uint32s.
811 uint32_t padding = sizeof(uint32_t) - (writer.length() % sizeof(uint32_t));
812 if (padding == sizeof(uint32_t))
813 padding = 0;
814 JitSpew(JitSpew_OptimizationTracking, " Padding %u byte%s",
815 padding, padding == 1 ? "" : "s");
816 for (uint32_t i = 0; i < padding; i++)
817 writer.writeByte(0);
818
819 // Record the start of the table to compute reverse offsets for entries.
820 uint32_t tableOffset = writer.length();
821
822 // Write how many bytes were padded and numEntries.
823 writer.writeNativeEndianUint32_t(padding);
824 writer.writeNativeEndianUint32_t(offsets.length());
825
826 // Write entry offset table.
827 for (size_t i = 0; i < offsets.length(); i++) {
828 JitSpew(JitSpew_OptimizationTracking, " Entry %" PRIuSIZE " reverse offset %u",
829 i, tableOffset - padding - offsets[i]);
830 writer.writeNativeEndianUint32_t(tableOffset - padding - offsets[i]);
831 }
832
833 if (writer.oom())
834 return false;
835
836 *tableOffsetp = tableOffset;
837 return true;
838 }
839
840 static JSFunction*
MaybeConstructorFromType(TypeSet::Type ty)841 MaybeConstructorFromType(TypeSet::Type ty)
842 {
843 if (ty.isUnknown() || ty.isAnyObject() || !ty.isGroup())
844 return nullptr;
845 ObjectGroup* obj = ty.group();
846 TypeNewScript* newScript = obj->newScript();
847 if (!newScript && obj->maybeUnboxedLayout())
848 newScript = obj->unboxedLayout().newScript();
849 return newScript ? newScript->function() : nullptr;
850 }
851
852 static void
InterpretedFunctionFilenameAndLineNumber(JSFunction * fun,const char ** filename,Maybe<unsigned> * lineno)853 InterpretedFunctionFilenameAndLineNumber(JSFunction* fun, const char** filename,
854 Maybe<unsigned>* lineno)
855 {
856 if (fun->hasScript()) {
857 *filename = fun->nonLazyScript()->maybeForwardedScriptSource()->filename();
858 *lineno = Some((unsigned) fun->nonLazyScript()->lineno());
859 } else if (fun->lazyScriptOrNull()) {
860 *filename = fun->lazyScript()->maybeForwardedScriptSource()->filename();
861 *lineno = Some((unsigned) fun->lazyScript()->lineno());
862 } else {
863 *filename = "(self-hosted builtin)";
864 *lineno = Nothing();
865 }
866 }
867
868 static void
SpewConstructor(TypeSet::Type ty,JSFunction * constructor)869 SpewConstructor(TypeSet::Type ty, JSFunction* constructor)
870 {
871 #ifdef JS_JITSPEW
872 if (!constructor->isInterpreted()) {
873 JitSpew(JitSpew_OptimizationTracking, " Unique type %s has native constructor",
874 TypeSet::TypeString(ty));
875 return;
876 }
877
878 char buf[512];
879 if (constructor->displayAtom())
880 PutEscapedString(buf, 512, constructor->displayAtom(), 0);
881 else
882 snprintf(buf, mozilla::ArrayLength(buf), "??");
883
884 const char* filename;
885 Maybe<unsigned> lineno;
886 InterpretedFunctionFilenameAndLineNumber(constructor, &filename, &lineno);
887
888 JitSpew(JitSpew_OptimizationTracking, " Unique type %s has constructor %s (%s:%u)",
889 TypeSet::TypeString(ty), buf, filename, lineno.isSome() ? *lineno : 0);
890 #endif
891 }
892
893 static void
SpewAllocationSite(TypeSet::Type ty,JSScript * script,uint32_t offset)894 SpewAllocationSite(TypeSet::Type ty, JSScript* script, uint32_t offset)
895 {
896 #ifdef JS_JITSPEW
897 JitSpew(JitSpew_OptimizationTracking, " Unique type %s has alloc site %s:%u",
898 TypeSet::TypeString(ty), script->filename(),
899 PCToLineNumber(script, script->offsetToPC(offset)));
900 #endif
901 }
902
903 bool
WriteIonTrackedOptimizationsTable(JSContext * cx,CompactBufferWriter & writer,const NativeToTrackedOptimizations * start,const NativeToTrackedOptimizations * end,const UniqueTrackedOptimizations & unique,uint32_t * numRegions,uint32_t * regionTableOffsetp,uint32_t * typesTableOffsetp,uint32_t * optimizationTableOffsetp,IonTrackedTypeVector * allTypes)904 jit::WriteIonTrackedOptimizationsTable(JSContext* cx, CompactBufferWriter& writer,
905 const NativeToTrackedOptimizations* start,
906 const NativeToTrackedOptimizations* end,
907 const UniqueTrackedOptimizations& unique,
908 uint32_t* numRegions,
909 uint32_t* regionTableOffsetp,
910 uint32_t* typesTableOffsetp,
911 uint32_t* optimizationTableOffsetp,
912 IonTrackedTypeVector* allTypes)
913 {
914 MOZ_ASSERT(unique.sorted());
915
916 #ifdef JS_JITSPEW
917 // Spew training data, which may be fed into a script to determine a good
918 // encoding strategy.
919 if (JitSpewEnabled(JitSpew_OptimizationTracking)) {
920 JitSpewStart(JitSpew_OptimizationTracking, "=> Training data: ");
921 for (const NativeToTrackedOptimizations* entry = start; entry != end; entry++) {
922 JitSpewCont(JitSpew_OptimizationTracking, "%" PRIuSIZE ",%" PRIuSIZE ",%u ",
923 entry->startOffset.offset(), entry->endOffset.offset(),
924 unique.indexOf(entry->optimizations));
925 }
926 JitSpewFin(JitSpew_OptimizationTracking);
927 }
928 #endif
929
930 Vector<uint32_t, 16> offsets(cx);
931 const NativeToTrackedOptimizations* entry = start;
932
933 // Write out region offloads, partitioned into runs.
934 JitSpew(JitSpew_Profiling, "=> Writing regions");
935 while (entry != end) {
936 uint32_t runLength = IonTrackedOptimizationsRegion::ExpectedRunLength(entry, end);
937 JitSpew(JitSpew_OptimizationTracking,
938 " Run at entry %" PRIuSIZE ", length %" PRIu32 ", offset %" PRIuSIZE,
939 size_t(entry - start), runLength, writer.length());
940
941 if (!offsets.append(writer.length()))
942 return false;
943
944 if (!IonTrackedOptimizationsRegion::WriteRun(writer, entry, entry + runLength, unique))
945 return false;
946
947 entry += runLength;
948 }
949
950 // Write out the table indexing into the payloads. 4-byte align for the uint32s.
951 if (!WriteOffsetsTable(writer, offsets, regionTableOffsetp))
952 return false;
953
954 *numRegions = offsets.length();
955
956 // Clear offsets so that it may be reused below for the unique
957 // optimizations table.
958 offsets.clear();
959
960 const UniqueTrackedOptimizations::SortedVector& vec = unique.sortedVector();
961 JitSpew(JitSpew_OptimizationTracking, "=> Writing unique optimizations table with %" PRIuSIZE " entr%s",
962 vec.length(), vec.length() == 1 ? "y" : "ies");
963
964 // Write out type info payloads.
965 UniqueTrackedTypes uniqueTypes(cx);
966 if (!uniqueTypes.init())
967 return false;
968
969 for (const UniqueTrackedOptimizations::SortEntry* p = vec.begin(); p != vec.end(); p++) {
970 const TempOptimizationTypeInfoVector* v = p->types;
971 JitSpew(JitSpew_OptimizationTracking,
972 " Type info entry %" PRIuSIZE " of length %" PRIuSIZE ", offset %" PRIuSIZE,
973 size_t(p - vec.begin()), v->length(), writer.length());
974 SpewTempOptimizationTypeInfoVector(v, " ");
975
976 if (!offsets.append(writer.length()))
977 return false;
978
979 for (const OptimizationTypeInfo* t = v->begin(); t != v->end(); t++) {
980 if (!t->writeCompact(cx, writer, uniqueTypes))
981 return false;
982 }
983 }
984
985 // Enumerate the unique types, and pull out any 'new' script constructor
986 // functions and allocation site information. We do this during linking
987 // instead of during profiling to avoid touching compartment tables during
988 // profiling. Additionally, TypeNewScript is subject to GC in the
989 // meantime.
990 TypeSet::TypeList uniqueTypeList;
991 if (!uniqueTypes.enumerate(&uniqueTypeList))
992 return false;
993 for (uint32_t i = 0; i < uniqueTypeList.length(); i++) {
994 TypeSet::Type ty = uniqueTypeList[i];
995 if (JSFunction* constructor = MaybeConstructorFromType(ty)) {
996 if (!allTypes->append(IonTrackedTypeWithAddendum(ty, constructor)))
997 return false;
998 SpewConstructor(ty, constructor);
999 } else {
1000 JSScript* script;
1001 uint32_t offset;
1002 if (!ty.isUnknown() && !ty.isAnyObject() && ty.isGroup() &&
1003 ObjectGroup::findAllocationSite(cx, ty.group(), &script, &offset))
1004 {
1005 if (!allTypes->append(IonTrackedTypeWithAddendum(ty, script, offset)))
1006 return false;
1007 SpewAllocationSite(ty, script, offset);
1008 } else {
1009 if (!allTypes->append(IonTrackedTypeWithAddendum(ty)))
1010 return false;
1011 }
1012 }
1013 }
1014
1015 if (!WriteOffsetsTable(writer, offsets, typesTableOffsetp))
1016 return false;
1017 offsets.clear();
1018
1019 // Write out attempts payloads.
1020 for (const UniqueTrackedOptimizations::SortEntry* p = vec.begin(); p != vec.end(); p++) {
1021 const TempOptimizationAttemptsVector* v = p->attempts;
1022 JitSpew(JitSpew_OptimizationTracking,
1023 " Attempts entry %" PRIuSIZE " of length %" PRIuSIZE ", offset %" PRIuSIZE,
1024 size_t(p - vec.begin()), v->length(), writer.length());
1025 SpewTempOptimizationAttemptsVector(v, " ");
1026
1027 if (!offsets.append(writer.length()))
1028 return false;
1029
1030 for (const OptimizationAttempt* a = v->begin(); a != v->end(); a++)
1031 a->writeCompact(writer);
1032 }
1033
1034 return WriteOffsetsTable(writer, offsets, optimizationTableOffsetp);
1035 }
1036
1037
1038 BytecodeSite*
maybeTrackedOptimizationSite(jsbytecode * pc)1039 IonBuilder::maybeTrackedOptimizationSite(jsbytecode* pc)
1040 {
1041 // BytecodeSites that track optimizations need to be 1-1 with the pc
1042 // when optimization tracking is enabled, so that all MIR generated by
1043 // a single pc are tracked at one place, even across basic blocks.
1044 //
1045 // Alternatively, we could make all BytecodeSites 1-1 with the pc, but
1046 // there is no real need as optimization tracking is a toggled
1047 // feature.
1048 //
1049 // Since sites that track optimizations should be sparse, just do a
1050 // reverse linear search, as we're most likely advancing in pc.
1051 MOZ_ASSERT(isOptimizationTrackingEnabled());
1052 for (size_t i = trackedOptimizationSites_.length(); i != 0; i--) {
1053 BytecodeSite* site = trackedOptimizationSites_[i - 1];
1054 if (site->pc() == pc) {
1055 MOZ_ASSERT(site->tree() == info().inlineScriptTree());
1056 return site;
1057 }
1058 }
1059 return nullptr;
1060 }
1061
1062 void
startTrackingOptimizations()1063 IonBuilder::startTrackingOptimizations()
1064 {
1065 if (isOptimizationTrackingEnabled()) {
1066 BytecodeSite* site = maybeTrackedOptimizationSite(current->trackedSite()->pc());
1067
1068 if (!site) {
1069 site = current->trackedSite();
1070 site->setOptimizations(new(alloc()) TrackedOptimizations(alloc()));
1071 // OOMs are handled as if optimization tracking were turned off.
1072 if (!trackedOptimizationSites_.append(site))
1073 site = nullptr;
1074 } else if (site->hasOptimizations()) {
1075 // The same bytecode may be visited multiple times (see
1076 // restartLoop). Only the last time matters, so clear any previous
1077 // tracked optimizations.
1078 site->optimizations()->clear();
1079 }
1080
1081 // The case of !site->hasOptimizations() means we had an OOM when
1082 // previously attempting to track optimizations. Leave
1083 // site->optimizations_ nullptr to leave optimization tracking off.
1084
1085 if (site)
1086 current->updateTrackedSite(site);
1087 }
1088 }
1089
1090 void
trackTypeInfoUnchecked(TrackedTypeSite kind,MIRType mirType,TemporaryTypeSet * typeSet)1091 IonBuilder::trackTypeInfoUnchecked(TrackedTypeSite kind, MIRType mirType,
1092 TemporaryTypeSet* typeSet)
1093 {
1094 BytecodeSite* site = current->trackedSite();
1095 // OOMs are handled as if optimization tracking were turned off.
1096 OptimizationTypeInfo typeInfo(alloc(), kind, mirType);
1097 if (!typeInfo.trackTypeSet(typeSet)) {
1098 site->setOptimizations(nullptr);
1099 return;
1100 }
1101 if (!site->optimizations()->trackTypeInfo(mozilla::Move(typeInfo)))
1102 site->setOptimizations(nullptr);
1103 }
1104
1105 void
trackTypeInfoUnchecked(TrackedTypeSite kind,JSObject * obj)1106 IonBuilder::trackTypeInfoUnchecked(TrackedTypeSite kind, JSObject* obj)
1107 {
1108 BytecodeSite* site = current->trackedSite();
1109 // OOMs are handled as if optimization tracking were turned off.
1110 OptimizationTypeInfo typeInfo(alloc(), kind, MIRType::Object);
1111 if (!typeInfo.trackType(TypeSet::ObjectType(obj)))
1112 return;
1113 if (!site->optimizations()->trackTypeInfo(mozilla::Move(typeInfo)))
1114 site->setOptimizations(nullptr);
1115 }
1116
1117 void
trackTypeInfoUnchecked(CallInfo & callInfo)1118 IonBuilder::trackTypeInfoUnchecked(CallInfo& callInfo)
1119 {
1120 MDefinition* thisArg = callInfo.thisArg();
1121 trackTypeInfoUnchecked(TrackedTypeSite::Call_This, thisArg->type(), thisArg->resultTypeSet());
1122
1123 for (uint32_t i = 0; i < callInfo.argc(); i++) {
1124 MDefinition* arg = callInfo.getArg(i);
1125 trackTypeInfoUnchecked(TrackedTypeSite::Call_Arg, arg->type(), arg->resultTypeSet());
1126 }
1127
1128 TemporaryTypeSet* returnTypes = getInlineReturnTypeSet();
1129 trackTypeInfoUnchecked(TrackedTypeSite::Call_Return, returnTypes->getKnownMIRType(),
1130 returnTypes);
1131 }
1132
1133 void
trackOptimizationAttemptUnchecked(TrackedStrategy strategy)1134 IonBuilder::trackOptimizationAttemptUnchecked(TrackedStrategy strategy)
1135 {
1136 BytecodeSite* site = current->trackedSite();
1137 // OOMs are handled as if optimization tracking were turned off.
1138 if (!site->optimizations()->trackAttempt(strategy))
1139 site->setOptimizations(nullptr);
1140 }
1141
1142 void
amendOptimizationAttemptUnchecked(uint32_t index)1143 IonBuilder::amendOptimizationAttemptUnchecked(uint32_t index)
1144 {
1145 const BytecodeSite* site = current->trackedSite();
1146 site->optimizations()->amendAttempt(index);
1147 }
1148
1149 void
trackOptimizationOutcomeUnchecked(TrackedOutcome outcome)1150 IonBuilder::trackOptimizationOutcomeUnchecked(TrackedOutcome outcome)
1151 {
1152 const BytecodeSite* site = current->trackedSite();
1153 site->optimizations()->trackOutcome(outcome);
1154 }
1155
1156 void
trackOptimizationSuccessUnchecked()1157 IonBuilder::trackOptimizationSuccessUnchecked()
1158 {
1159 const BytecodeSite* site = current->trackedSite();
1160 site->optimizations()->trackSuccess();
1161 }
1162
1163 void
trackInlineSuccessUnchecked(InliningStatus status)1164 IonBuilder::trackInlineSuccessUnchecked(InliningStatus status)
1165 {
1166 if (status == InliningStatus_Inlined)
1167 trackOptimizationOutcome(TrackedOutcome::Inlined);
1168 }
1169
1170 static JSFunction*
FunctionFromTrackedType(const IonTrackedTypeWithAddendum & tracked)1171 FunctionFromTrackedType(const IonTrackedTypeWithAddendum& tracked)
1172 {
1173 if (tracked.hasConstructor())
1174 return tracked.constructor;
1175
1176 TypeSet::Type ty = tracked.type;
1177
1178 if (ty.isSingleton()) {
1179 JSObject* obj = ty.singleton();
1180 return obj->is<JSFunction>() ? &obj->as<JSFunction>() : nullptr;
1181 }
1182
1183 return ty.group()->maybeInterpretedFunction();
1184 }
1185
1186 void
readType(const IonTrackedTypeWithAddendum & tracked)1187 IonTrackedOptimizationsTypeInfo::ForEachOpAdapter::readType(const IonTrackedTypeWithAddendum& tracked)
1188 {
1189 TypeSet::Type ty = tracked.type;
1190
1191 if (ty.isPrimitive() || ty.isUnknown() || ty.isAnyObject()) {
1192 op_.readType("primitive", TypeSet::NonObjectTypeString(ty), nullptr, Nothing());
1193 return;
1194 }
1195
1196 char buf[512];
1197 const uint32_t bufsize = mozilla::ArrayLength(buf);
1198
1199 if (JSFunction* fun = FunctionFromTrackedType(tracked)) {
1200 // The displayAtom is useful for identifying both native and
1201 // interpreted functions.
1202 char* name = nullptr;
1203 if (fun->displayAtom()) {
1204 PutEscapedString(buf, bufsize, fun->displayAtom(), 0);
1205 name = buf;
1206 }
1207
1208 if (fun->isNative()) {
1209 //
1210 // Try printing out the displayAtom of the native function and the
1211 // absolute address of the native function pointer.
1212 //
1213 // Note that this address is not usable without knowing the
1214 // starting address at which our shared library is loaded. Shared
1215 // library information is exposed by the profiler. If this address
1216 // needs to be symbolicated manually (e.g., when it is gotten via
1217 // debug spewing of all optimization information), it needs to be
1218 // converted to an offset from the beginning of the shared library
1219 // for use with utilities like `addr2line` on Linux and `atos` on
1220 // OS X. Converting to an offset may be done via dladdr():
1221 //
1222 // void* addr = JS_FUNC_TO_DATA_PTR(void*, fun->native());
1223 // uintptr_t offset;
1224 // Dl_info info;
1225 // if (dladdr(addr, &info) != 0)
1226 // offset = uintptr_t(addr) - uintptr_t(info.dli_fbase);
1227 //
1228 char locationBuf[20];
1229 if (!name) {
1230 uintptr_t addr = JS_FUNC_TO_DATA_PTR(uintptr_t, fun->native());
1231 snprintf(locationBuf, mozilla::ArrayLength(locationBuf), "%" PRIxPTR, addr);
1232 }
1233 op_.readType("native", name, name ? nullptr : locationBuf, Nothing());
1234 return;
1235 }
1236
1237 const char* filename;
1238 Maybe<unsigned> lineno;
1239 InterpretedFunctionFilenameAndLineNumber(fun, &filename, &lineno);
1240 op_.readType(tracked.constructor ? "constructor" : "function",
1241 name, filename, lineno);
1242 return;
1243 }
1244
1245 const char* className = ty.objectKey()->clasp()->name;
1246 snprintf(buf, bufsize, "[object %s]", className);
1247
1248 if (tracked.hasAllocationSite()) {
1249 JSScript* script = tracked.script;
1250 op_.readType("alloc site", buf,
1251 script->maybeForwardedScriptSource()->filename(),
1252 Some(PCToLineNumber(script, script->offsetToPC(tracked.offset))));
1253 return;
1254 }
1255
1256 if (ty.isGroup()) {
1257 op_.readType("prototype", buf, nullptr, Nothing());
1258 return;
1259 }
1260
1261 op_.readType("singleton", buf, nullptr, Nothing());
1262 }
1263
1264 void
operator ()(JS::TrackedTypeSite site,MIRType mirType)1265 IonTrackedOptimizationsTypeInfo::ForEachOpAdapter::operator()(JS::TrackedTypeSite site,
1266 MIRType mirType)
1267 {
1268 op_(site, StringFromMIRType(mirType));
1269 }
1270
1271 typedef JS::ForEachProfiledFrameOp::FrameHandle FrameHandle;
1272
1273 void
updateHasTrackedOptimizations()1274 FrameHandle::updateHasTrackedOptimizations()
1275 {
1276 // All inlined frames will have the same optimization information by
1277 // virtue of sharing the JitcodeGlobalEntry, but such information is
1278 // only interpretable on the youngest frame.
1279 if (depth() != 0)
1280 return;
1281 if (!entry_.hasTrackedOptimizations())
1282 return;
1283
1284 uint32_t entryOffset;
1285 optsIndex_ = entry_.trackedOptimizationIndexAtAddr(rt_, addr_, &entryOffset);
1286 if (optsIndex_.isSome())
1287 canonicalAddr_ = (void*)(((uint8_t*) entry_.nativeStartAddr()) + entryOffset);
1288 }
1289
JS_PUBLIC_API(void)1290 JS_PUBLIC_API(void)
1291 FrameHandle::forEachOptimizationAttempt(ForEachTrackedOptimizationAttemptOp& op,
1292 JSScript** scriptOut, jsbytecode** pcOut) const
1293 {
1294 MOZ_ASSERT(optsIndex_.isSome());
1295 entry_.forEachOptimizationAttempt(rt_, *optsIndex_, op);
1296 entry_.youngestFrameLocationAtAddr(rt_, addr_, scriptOut, pcOut);
1297 }
1298
JS_PUBLIC_API(void)1299 JS_PUBLIC_API(void)
1300 FrameHandle::forEachOptimizationTypeInfo(ForEachTrackedOptimizationTypeInfoOp& op) const
1301 {
1302 MOZ_ASSERT(optsIndex_.isSome());
1303 IonTrackedOptimizationsTypeInfo::ForEachOpAdapter adapter(op);
1304 entry_.forEachOptimizationTypeInfo(rt_, *optsIndex_, adapter);
1305 }
1306