1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/JitcodeMap.h"
8 
9 #include "mozilla/ArrayUtils.h"
10 #include "mozilla/MathAlgorithms.h"
11 #include "mozilla/Maybe.h"
12 #include "mozilla/ScopeExit.h"
13 #include "mozilla/Sprintf.h"
14 
15 #include "gc/Marking.h"
16 #include "gc/Statistics.h"
17 #include "jit/BaselineJIT.h"
18 #include "jit/JitRealm.h"
19 #include "jit/JitSpewer.h"
20 #include "js/Vector.h"
21 #include "vm/GeckoProfiler.h"
22 
23 #include "vm/GeckoProfiler-inl.h"
24 #include "vm/JSScript-inl.h"
25 #include "vm/TypeInference-inl.h"
26 
27 using mozilla::Maybe;
28 
29 namespace js {
30 namespace jit {
31 
RegionAtAddr(const JitcodeGlobalEntry::IonEntry & entry,void * ptr,uint32_t * ptrOffset)32 static inline JitcodeRegionEntry RegionAtAddr(
33     const JitcodeGlobalEntry::IonEntry& entry, void* ptr, uint32_t* ptrOffset) {
34   MOZ_ASSERT(entry.containsPointer(ptr));
35   *ptrOffset = reinterpret_cast<uint8_t*>(ptr) -
36                reinterpret_cast<uint8_t*>(entry.nativeStartAddr());
37 
38   uint32_t regionIdx = entry.regionTable()->findRegionEntry(*ptrOffset);
39   MOZ_ASSERT(regionIdx < entry.regionTable()->numRegions());
40 
41   return entry.regionTable()->regionEntry(regionIdx);
42 }
43 
canonicalNativeAddrFor(void * ptr) const44 void* JitcodeGlobalEntry::IonEntry::canonicalNativeAddrFor(void* ptr) const {
45   uint32_t ptrOffset;
46   JitcodeRegionEntry region = RegionAtAddr(*this, ptr, &ptrOffset);
47   return (void*)(((uint8_t*)nativeStartAddr()) + region.nativeOffset());
48 }
49 
callStackAtAddr(void * ptr,BytecodeLocationVector & results,uint32_t * depth) const50 bool JitcodeGlobalEntry::IonEntry::callStackAtAddr(
51     void* ptr, BytecodeLocationVector& results, uint32_t* depth) const {
52   uint32_t ptrOffset;
53   JitcodeRegionEntry region = RegionAtAddr(*this, ptr, &ptrOffset);
54   *depth = region.scriptDepth();
55 
56   JitcodeRegionEntry::ScriptPcIterator locationIter = region.scriptPcIterator();
57   MOZ_ASSERT(locationIter.hasMore());
58   bool first = true;
59   while (locationIter.hasMore()) {
60     uint32_t scriptIdx, pcOffset;
61     locationIter.readNext(&scriptIdx, &pcOffset);
62     // For the first entry pushed (innermost frame), the pcOffset is obtained
63     // from the delta-run encodings.
64     if (first) {
65       pcOffset = region.findPcOffset(ptrOffset, pcOffset);
66       first = false;
67     }
68     JSScript* script = getScript(scriptIdx);
69     jsbytecode* pc = script->offsetToPC(pcOffset);
70     if (!results.append(BytecodeLocation(script, pc))) {
71       return false;
72     }
73   }
74 
75   return true;
76 }
77 
callStackAtAddr(void * ptr,const char ** results,uint32_t maxResults) const78 uint32_t JitcodeGlobalEntry::IonEntry::callStackAtAddr(
79     void* ptr, const char** results, uint32_t maxResults) const {
80   MOZ_ASSERT(maxResults >= 1);
81 
82   uint32_t ptrOffset;
83   JitcodeRegionEntry region = RegionAtAddr(*this, ptr, &ptrOffset);
84 
85   JitcodeRegionEntry::ScriptPcIterator locationIter = region.scriptPcIterator();
86   MOZ_ASSERT(locationIter.hasMore());
87   uint32_t count = 0;
88   while (locationIter.hasMore()) {
89     uint32_t scriptIdx, pcOffset;
90 
91     locationIter.readNext(&scriptIdx, &pcOffset);
92     MOZ_ASSERT(getStr(scriptIdx));
93 
94     results[count++] = getStr(scriptIdx);
95     if (count >= maxResults) {
96       break;
97     }
98   }
99 
100   return count;
101 }
102 
lookupRealmID(void * ptr) const103 uint64_t JitcodeGlobalEntry::IonEntry::lookupRealmID(void* ptr) const {
104   uint32_t ptrOffset;
105   JitcodeRegionEntry region = RegionAtAddr(*this, ptr, &ptrOffset);
106   JitcodeRegionEntry::ScriptPcIterator locationIter = region.scriptPcIterator();
107   MOZ_ASSERT(locationIter.hasMore());
108   uint32_t scriptIdx, pcOffset;
109   locationIter.readNext(&scriptIdx, &pcOffset);
110 
111   JSScript* script = getScript(scriptIdx);
112   return script->realm()->creationOptions().profilerRealmID();
113 }
114 
destroy()115 void JitcodeGlobalEntry::IonEntry::destroy() {
116   // The region table is stored at the tail of the compacted data,
117   // which means the start of the region table is a pointer to
118   // the _middle_ of the memory space allocated for it.
119   //
120   // When freeing it, obtain the payload start pointer first.
121   if (regionTable_) {
122     js_free((void*)(regionTable_->payloadStart()));
123   }
124   regionTable_ = nullptr;
125 
126   // Free the scriptList strs.
127   for (uint32_t i = 0; i < scriptList_->size; i++) {
128     js_free(scriptList_->pairs[i].str);
129     scriptList_->pairs[i].str = nullptr;
130   }
131 
132   // Free the script list
133   js_free(scriptList_);
134   scriptList_ = nullptr;
135 }
136 
canonicalNativeAddrFor(void * ptr) const137 void* JitcodeGlobalEntry::BaselineEntry::canonicalNativeAddrFor(
138     void* ptr) const {
139   // TODO: We can't yet normalize Baseline addresses until we unify
140   // BaselineScript's PCMappingEntries with JitcodeGlobalTable.
141   return ptr;
142 }
143 
callStackAtAddr(void * ptr,BytecodeLocationVector & results,uint32_t * depth) const144 bool JitcodeGlobalEntry::BaselineEntry::callStackAtAddr(
145     void* ptr, BytecodeLocationVector& results, uint32_t* depth) const {
146   MOZ_ASSERT(containsPointer(ptr));
147   MOZ_ASSERT(script_->hasBaselineScript());
148 
149   uint8_t* addr = reinterpret_cast<uint8_t*>(ptr);
150   jsbytecode* pc =
151       script_->baselineScript()->approximatePcForNativeAddress(script_, addr);
152   if (!results.append(BytecodeLocation(script_, pc))) {
153     return false;
154   }
155 
156   *depth = 1;
157 
158   return true;
159 }
160 
callStackAtAddr(void * ptr,const char ** results,uint32_t maxResults) const161 uint32_t JitcodeGlobalEntry::BaselineEntry::callStackAtAddr(
162     void* ptr, const char** results, uint32_t maxResults) const {
163   MOZ_ASSERT(containsPointer(ptr));
164   MOZ_ASSERT(maxResults >= 1);
165 
166   results[0] = str();
167   return 1;
168 }
169 
lookupRealmID() const170 uint64_t JitcodeGlobalEntry::BaselineEntry::lookupRealmID() const {
171   return script_->realm()->creationOptions().profilerRealmID();
172 }
173 
destroy()174 void JitcodeGlobalEntry::BaselineEntry::destroy() {
175   if (!str_) {
176     return;
177   }
178   js_free((void*)str_);
179   str_ = nullptr;
180 }
181 
canonicalNativeAddrFor(void * ptr) const182 void* JitcodeGlobalEntry::BaselineInterpreterEntry::canonicalNativeAddrFor(
183     void* ptr) const {
184   return ptr;
185 }
186 
callStackAtAddr(void * ptr,BytecodeLocationVector & results,uint32_t * depth) const187 bool JitcodeGlobalEntry::BaselineInterpreterEntry::callStackAtAddr(
188     void* ptr, BytecodeLocationVector& results, uint32_t* depth) const {
189   MOZ_CRASH("shouldn't be called for BaselineInterpreter entries");
190 }
191 
callStackAtAddr(void * ptr,const char ** results,uint32_t maxResults) const192 uint32_t JitcodeGlobalEntry::BaselineInterpreterEntry::callStackAtAddr(
193     void* ptr, const char** results, uint32_t maxResults) const {
194   MOZ_CRASH("shouldn't be called for BaselineInterpreter entries");
195 }
196 
lookupRealmID() const197 uint64_t JitcodeGlobalEntry::BaselineInterpreterEntry::lookupRealmID() const {
198   MOZ_CRASH("shouldn't be called for BaselineInterpreter entries");
199 }
200 
ComparePointers(const void * a,const void * b)201 static int ComparePointers(const void* a, const void* b) {
202   const uint8_t* a_ptr = reinterpret_cast<const uint8_t*>(a);
203   const uint8_t* b_ptr = reinterpret_cast<const uint8_t*>(b);
204   if (a_ptr < b_ptr) {
205     return -1;
206   }
207   if (a_ptr > b_ptr) {
208     return 1;
209   }
210   return 0;
211 }
212 
213 /* static */
compare(const JitcodeGlobalEntry & ent1,const JitcodeGlobalEntry & ent2)214 int JitcodeGlobalEntry::compare(const JitcodeGlobalEntry& ent1,
215                                 const JitcodeGlobalEntry& ent2) {
216   // Both parts of compare cannot be a query.
217   MOZ_ASSERT(!(ent1.isQuery() && ent2.isQuery()));
218 
219   // Ensure no overlaps for non-query lookups.
220   MOZ_ASSERT_IF(!ent1.isQuery() && !ent2.isQuery(), !ent1.overlapsWith(ent2));
221 
222   // For two non-query entries, just comapare the start addresses.
223   if (!ent1.isQuery() && !ent2.isQuery()) {
224     return ComparePointers(ent1.nativeStartAddr(), ent2.nativeStartAddr());
225   }
226 
227   void* ptr = ent1.isQuery() ? ent1.nativeStartAddr() : ent2.nativeStartAddr();
228   const JitcodeGlobalEntry& ent = ent1.isQuery() ? ent2 : ent1;
229   int flip = ent1.isQuery() ? 1 : -1;
230 
231   if (ent.startsBelowPointer(ptr)) {
232     if (ent.endsAbovePointer(ptr)) {
233       return 0;
234     }
235 
236     // query ptr > entry
237     return flip * 1;
238   }
239 
240   // query ptr < entry
241   return flip * -1;
242 }
243 
Enum(JitcodeGlobalTable & table,JSRuntime * rt)244 JitcodeGlobalTable::Enum::Enum(JitcodeGlobalTable& table, JSRuntime* rt)
245     : Range(table), rt_(rt), next_(cur_ ? cur_->tower_->next(0) : nullptr) {
246   for (int level = JitcodeSkiplistTower::MAX_HEIGHT - 1; level >= 0; level--) {
247     prevTower_[level] = nullptr;
248   }
249 }
250 
popFront()251 void JitcodeGlobalTable::Enum::popFront() {
252   MOZ_ASSERT(!empty());
253 
254   // Did not remove current entry; advance prevTower_.
255   if (cur_ != table_.freeEntries_) {
256     for (int level = cur_->tower_->height() - 1; level >= 0; level--) {
257       JitcodeGlobalEntry* prevTowerEntry = prevTower_[level];
258 
259       if (prevTowerEntry) {
260         if (prevTowerEntry->tower_->next(level) == cur_) {
261           prevTower_[level] = cur_;
262         }
263       } else {
264         prevTower_[level] = table_.startTower_[level];
265       }
266     }
267   }
268 
269   cur_ = next_;
270   if (!empty()) {
271     next_ = cur_->tower_->next(0);
272   }
273 }
274 
removeFront()275 void JitcodeGlobalTable::Enum::removeFront() {
276   MOZ_ASSERT(!empty());
277   table_.releaseEntry(*cur_, prevTower_, rt_);
278 }
279 
lookupForSampler(void * ptr,JSRuntime * rt,uint64_t samplePosInBuffer)280 const JitcodeGlobalEntry* JitcodeGlobalTable::lookupForSampler(
281     void* ptr, JSRuntime* rt, uint64_t samplePosInBuffer) {
282   JitcodeGlobalEntry* entry = lookupInternal(ptr);
283   if (!entry) {
284     return nullptr;
285   }
286 
287   entry->setSamplePositionInBuffer(samplePosInBuffer);
288 
289   // JitcodeGlobalEntries are marked at the end of the mark phase. A read
290   // barrier is not needed. Any JS frames sampled during the sweep phase of
291   // the GC must be on stack, and on-stack frames must already be marked at
292   // the beginning of the sweep phase. It's not possible to assert this here
293   // as we may be off main thread when called from the gecko profiler.
294 
295   return entry;
296 }
297 
lookupInternal(void * ptr)298 JitcodeGlobalEntry* JitcodeGlobalTable::lookupInternal(void* ptr) {
299   JitcodeGlobalEntry query = JitcodeGlobalEntry::MakeQuery(ptr);
300   JitcodeGlobalEntry* searchTower[JitcodeSkiplistTower::MAX_HEIGHT];
301   searchInternal(query, searchTower);
302 
303   if (searchTower[0] == nullptr) {
304     // Check startTower
305     if (startTower_[0] == nullptr) {
306       return nullptr;
307     }
308 
309     MOZ_ASSERT(startTower_[0]->compareTo(query) >= 0);
310     int cmp = startTower_[0]->compareTo(query);
311     MOZ_ASSERT(cmp >= 0);
312     return (cmp == 0) ? startTower_[0] : nullptr;
313   }
314 
315   JitcodeGlobalEntry* bottom = searchTower[0];
316   MOZ_ASSERT(bottom->compareTo(query) < 0);
317 
318   JitcodeGlobalEntry* bottomNext = bottom->tower_->next(0);
319   if (bottomNext == nullptr) {
320     return nullptr;
321   }
322 
323   int cmp = bottomNext->compareTo(query);
324   MOZ_ASSERT(cmp >= 0);
325   return (cmp == 0) ? bottomNext : nullptr;
326 }
327 
addEntry(const JitcodeGlobalEntry & entry)328 bool JitcodeGlobalTable::addEntry(const JitcodeGlobalEntry& entry) {
329   MOZ_ASSERT(entry.isIon() || entry.isBaseline() ||
330              entry.isBaselineInterpreter() || entry.isDummy());
331 
332   JitcodeGlobalEntry* searchTower[JitcodeSkiplistTower::MAX_HEIGHT];
333   searchInternal(entry, searchTower);
334 
335   // Allocate a new entry and tower.
336   JitcodeSkiplistTower* newTower = allocateTower(generateTowerHeight());
337   if (!newTower) {
338     return false;
339   }
340 
341   JitcodeGlobalEntry* newEntry = allocateEntry();
342   if (!newEntry) {
343     return false;
344   }
345 
346   *newEntry = entry;
347   newEntry->tower_ = newTower;
348 
349   // Suppress profiler sampling while skiplist is being mutated.
350   AutoSuppressProfilerSampling suppressSampling(TlsContext.get());
351 
352   // Link up entry with forward entries taken from tower.
353   for (int level = newTower->height() - 1; level >= 0; level--) {
354     JitcodeGlobalEntry* searchTowerEntry = searchTower[level];
355     if (searchTowerEntry) {
356       MOZ_ASSERT(searchTowerEntry->compareTo(*newEntry) < 0);
357       JitcodeGlobalEntry* searchTowerNextEntry =
358           searchTowerEntry->tower_->next(level);
359 
360       MOZ_ASSERT_IF(searchTowerNextEntry,
361                     searchTowerNextEntry->compareTo(*newEntry) > 0);
362 
363       newTower->setNext(level, searchTowerNextEntry);
364       searchTowerEntry->tower_->setNext(level, newEntry);
365     } else {
366       newTower->setNext(level, startTower_[level]);
367       startTower_[level] = newEntry;
368     }
369   }
370   skiplistSize_++;
371   // verifySkiplist(); - disabled for release.
372 
373   return true;
374 }
375 
removeEntry(JitcodeGlobalEntry & entry,JitcodeGlobalEntry ** prevTower)376 void JitcodeGlobalTable::removeEntry(JitcodeGlobalEntry& entry,
377                                      JitcodeGlobalEntry** prevTower) {
378   MOZ_ASSERT(!TlsContext.get()->isProfilerSamplingEnabled());
379 
380   // Unlink query entry.
381   for (int level = entry.tower_->height() - 1; level >= 0; level--) {
382     JitcodeGlobalEntry* prevTowerEntry = prevTower[level];
383     if (prevTowerEntry) {
384       MOZ_ASSERT(prevTowerEntry->tower_->next(level) == &entry);
385       prevTowerEntry->tower_->setNext(level, entry.tower_->next(level));
386     } else {
387       startTower_[level] = entry.tower_->next(level);
388     }
389   }
390   skiplistSize_--;
391   // verifySkiplist(); - disabled for release.
392 
393   // Entry has been unlinked.
394   entry.destroy();
395   entry.tower_->addToFreeList(&(freeTowers_[entry.tower_->height() - 1]));
396   entry.tower_ = nullptr;
397   entry = JitcodeGlobalEntry();
398   entry.addToFreeList(&freeEntries_);
399 }
400 
releaseEntry(JitcodeGlobalEntry & entry,JitcodeGlobalEntry ** prevTower,JSRuntime * rt)401 void JitcodeGlobalTable::releaseEntry(JitcodeGlobalEntry& entry,
402                                       JitcodeGlobalEntry** prevTower,
403                                       JSRuntime* rt) {
404 #ifdef DEBUG
405   Maybe<uint64_t> rangeStart = rt->profilerSampleBufferRangeStart();
406   MOZ_ASSERT_IF(rangeStart, !entry.isSampled(*rangeStart));
407 #endif
408   removeEntry(entry, prevTower);
409 }
410 
searchInternal(const JitcodeGlobalEntry & query,JitcodeGlobalEntry ** towerOut)411 void JitcodeGlobalTable::searchInternal(const JitcodeGlobalEntry& query,
412                                         JitcodeGlobalEntry** towerOut) {
413   JitcodeGlobalEntry* cur = nullptr;
414   for (int level = JitcodeSkiplistTower::MAX_HEIGHT - 1; level >= 0; level--) {
415     JitcodeGlobalEntry* entry = searchAtHeight(level, cur, query);
416     MOZ_ASSERT_IF(entry == nullptr, cur == nullptr);
417     towerOut[level] = entry;
418     cur = entry;
419   }
420 
421   // Validate the resulting tower.
422 #ifdef DEBUG
423   for (int level = JitcodeSkiplistTower::MAX_HEIGHT - 1; level >= 0; level--) {
424     if (towerOut[level] == nullptr) {
425       // If we got NULL for a given level, then we should have gotten NULL
426       // for the level above as well.
427       MOZ_ASSERT_IF(unsigned(level) < (JitcodeSkiplistTower::MAX_HEIGHT - 1),
428                     towerOut[level + 1] == nullptr);
429       continue;
430     }
431 
432     JitcodeGlobalEntry* cur = towerOut[level];
433 
434     // Non-null result at a given level must sort < query.
435     MOZ_ASSERT(cur->compareTo(query) < 0);
436 
437     // The entry must have a tower height that accomodates level.
438     if (!cur->tower_->next(level)) {
439       continue;
440     }
441 
442     JitcodeGlobalEntry* next = cur->tower_->next(level);
443 
444     // Next entry must have tower height that accomodates level.
445     MOZ_ASSERT(unsigned(level) < next->tower_->height());
446 
447     // Next entry must sort >= query.
448     MOZ_ASSERT(next->compareTo(query) >= 0);
449   }
450 #endif  // DEBUG
451 }
452 
searchAtHeight(unsigned level,JitcodeGlobalEntry * start,const JitcodeGlobalEntry & query)453 JitcodeGlobalEntry* JitcodeGlobalTable::searchAtHeight(
454     unsigned level, JitcodeGlobalEntry* start,
455     const JitcodeGlobalEntry& query) {
456   JitcodeGlobalEntry* cur = start;
457 
458   // If starting with nullptr, use the start tower.
459   if (start == nullptr) {
460     cur = startTower_[level];
461     if (cur == nullptr || cur->compareTo(query) >= 0) {
462       return nullptr;
463     }
464   }
465 
466   // Keep skipping at |level| until we reach an entry < query whose
467   // successor is an entry >= query.
468   for (;;) {
469     JitcodeGlobalEntry* next = cur->tower_->next(level);
470     if (next == nullptr || next->compareTo(query) >= 0) {
471       return cur;
472     }
473 
474     cur = next;
475   }
476 }
477 
generateTowerHeight()478 unsigned JitcodeGlobalTable::generateTowerHeight() {
479   // Implementation taken from Hars L. and Pteruska G.,
480   // "Pseudorandom Recursions: Small and fast Pseudorandom number generators for
481   //  embedded applications."
482   rand_ ^= mozilla::RotateLeft(rand_, 5) ^ mozilla::RotateLeft(rand_, 24);
483   rand_ += 0x37798849;
484 
485   // Return 1 + number of lowbit zeros in new randval, capped at MAX_HEIGHT.
486   unsigned result = 0;
487   for (unsigned i = 0; i < JitcodeSkiplistTower::MAX_HEIGHT - 1; i++) {
488     if ((rand_ >> i) & 0x1) {
489       break;
490     }
491     result++;
492   }
493   return result + 1;
494 }
495 
allocateTower(unsigned height)496 JitcodeSkiplistTower* JitcodeGlobalTable::allocateTower(unsigned height) {
497   MOZ_ASSERT(height >= 1);
498   JitcodeSkiplistTower* tower =
499       JitcodeSkiplistTower::PopFromFreeList(&freeTowers_[height - 1]);
500   if (tower) {
501     return tower;
502   }
503 
504   size_t size = JitcodeSkiplistTower::CalculateSize(height);
505   tower = (JitcodeSkiplistTower*)alloc_.alloc(size);
506   if (!tower) {
507     return nullptr;
508   }
509 
510   return new (tower) JitcodeSkiplistTower(height);
511 }
512 
allocateEntry()513 JitcodeGlobalEntry* JitcodeGlobalTable::allocateEntry() {
514   JitcodeGlobalEntry* entry =
515       JitcodeGlobalEntry::PopFromFreeList(&freeEntries_);
516   if (entry) {
517     return entry;
518   }
519 
520   return alloc_.new_<JitcodeGlobalEntry>();
521 }
522 
523 #ifdef DEBUG
verifySkiplist()524 void JitcodeGlobalTable::verifySkiplist() {
525   JitcodeGlobalEntry* curTower[JitcodeSkiplistTower::MAX_HEIGHT];
526   for (unsigned i = 0; i < JitcodeSkiplistTower::MAX_HEIGHT; i++) {
527     curTower[i] = startTower_[i];
528   }
529 
530   uint32_t count = 0;
531   JitcodeGlobalEntry* curEntry = startTower_[0];
532   while (curEntry) {
533     count++;
534     unsigned curHeight = curEntry->tower_->height();
535     MOZ_ASSERT(curHeight >= 1);
536 
537     for (unsigned i = 0; i < JitcodeSkiplistTower::MAX_HEIGHT; i++) {
538       if (i < curHeight) {
539         MOZ_ASSERT(curTower[i] == curEntry);
540         JitcodeGlobalEntry* nextEntry = curEntry->tower_->next(i);
541         MOZ_ASSERT_IF(nextEntry, curEntry->compareTo(*nextEntry) < 0);
542         curTower[i] = nextEntry;
543       } else {
544         MOZ_ASSERT_IF(curTower[i], curTower[i]->compareTo(*curEntry) > 0);
545       }
546     }
547     curEntry = curEntry->tower_->next(0);
548   }
549 
550   MOZ_ASSERT(count == skiplistSize_);
551 }
552 #endif  // DEBUG
553 
setAllEntriesAsExpired()554 void JitcodeGlobalTable::setAllEntriesAsExpired() {
555   AutoSuppressProfilerSampling suppressSampling(TlsContext.get());
556   for (Range r(*this); !r.empty(); r.popFront()) {
557     auto entry = r.front();
558     entry->setAsExpired();
559   }
560 }
561 
562 struct Unconditionally {
563   template <typename T>
ShouldTracejs::jit::Unconditionally564   static bool ShouldTrace(JSRuntime* rt, T* thingp) {
565     return true;
566   }
567 };
568 
569 struct IfUnmarked {
570   template <typename T>
ShouldTracejs::jit::IfUnmarked571   static bool ShouldTrace(JSRuntime* rt, T* thingp) {
572     return !IsMarkedUnbarriered(rt, thingp);
573   }
574 };
575 
576 template <>
ShouldTrace(JSRuntime * rt,TypeSet::Type * type)577 bool IfUnmarked::ShouldTrace<TypeSet::Type>(JSRuntime* rt,
578                                             TypeSet::Type* type) {
579   return !TypeSet::IsTypeMarked(rt, type);
580 }
581 
markIteratively(GCMarker * marker)582 bool JitcodeGlobalTable::markIteratively(GCMarker* marker) {
583   // JitcodeGlobalTable must keep entries that are in the sampler buffer
584   // alive. This conditionality is akin to holding the entries weakly.
585   //
586   // If this table were marked at the beginning of the mark phase, then
587   // sampling would require a read barrier for sampling in between
588   // incremental GC slices. However, invoking read barriers from the sampler
589   // is wildly unsafe. The sampler may run at any time, including during GC
590   // itself.
591   //
592   // Instead, JitcodeGlobalTable is marked at the beginning of the sweep
593   // phase, along with weak references. The key assumption is the
594   // following. At the beginning of the sweep phase, any JS frames that the
595   // sampler may put in its buffer that are not already there at the
596   // beginning of the mark phase must have already been marked, as either 1)
597   // the frame was on-stack at the beginning of the sweep phase, or 2) the
598   // frame was pushed between incremental sweep slices. Frames of case 1)
599   // are already marked. Frames of case 2) must have been reachable to have
600   // been newly pushed, and thus are already marked.
601   //
602   // The approach above obviates the need for read barriers. The assumption
603   // above is checked in JitcodeGlobalTable::lookupForSampler.
604 
605   MOZ_ASSERT(!JS::RuntimeHeapIsMinorCollecting());
606 
607   AutoSuppressProfilerSampling suppressSampling(TlsContext.get());
608 
609   // If the profiler is off, rangeStart will be Nothing() and all entries are
610   // considered to be expired.
611   Maybe<uint64_t> rangeStart =
612       marker->runtime()->profilerSampleBufferRangeStart();
613 
614   bool markedAny = false;
615   for (Range r(*this); !r.empty(); r.popFront()) {
616     JitcodeGlobalEntry* entry = r.front();
617 
618     // If an entry is not sampled, reset its buffer position to the invalid
619     // position, and conditionally mark the rest of the entry if its
620     // JitCode is not already marked. This conditional marking ensures
621     // that so long as the JitCode *may* be sampled, we keep any
622     // information that may be handed out to the sampler, like tracked
623     // types used by optimizations and scripts used for pc to line number
624     // mapping, alive as well.
625     if (!rangeStart || !entry->isSampled(*rangeStart)) {
626       entry->setAsExpired();
627       if (!entry->baseEntry().isJitcodeMarkedFromAnyThread(marker->runtime())) {
628         continue;
629       }
630     }
631 
632     // The table is runtime-wide. Not all zones may be participating in
633     // the GC.
634     if (!entry->zone()->isCollecting() || entry->zone()->isGCFinished()) {
635       continue;
636     }
637 
638     markedAny |= entry->trace<IfUnmarked>(marker);
639   }
640 
641   return markedAny;
642 }
643 
traceWeak(JSRuntime * rt,JSTracer * trc)644 void JitcodeGlobalTable::traceWeak(JSRuntime* rt, JSTracer* trc) {
645   AutoSuppressProfilerSampling suppressSampling(rt->mainContextFromOwnThread());
646   for (Enum e(*this, rt); !e.empty(); e.popFront()) {
647     JitcodeGlobalEntry* entry = e.front();
648 
649     if (!entry->zone()->isCollecting() || entry->zone()->isGCFinished()) {
650       continue;
651     }
652 
653     if (!TraceManuallyBarrieredWeakEdge(
654             trc, &entry->baseEntry().jitcode_,
655             "JitcodeGlobalTable::JitcodeGlobalEntry::jitcode_")) {
656       e.removeFront();
657     } else {
658       entry->sweepChildren(rt);
659     }
660   }
661 }
662 
663 template <class ShouldTraceProvider>
traceJitcode(JSTracer * trc)664 bool JitcodeGlobalEntry::BaseEntry::traceJitcode(JSTracer* trc) {
665   if (ShouldTraceProvider::ShouldTrace(trc->runtime(), &jitcode_)) {
666     TraceManuallyBarrieredEdge(trc, &jitcode_,
667                                "jitcodglobaltable-baseentry-jitcode");
668     return true;
669   }
670   return false;
671 }
672 
isJitcodeMarkedFromAnyThread(JSRuntime * rt)673 bool JitcodeGlobalEntry::BaseEntry::isJitcodeMarkedFromAnyThread(
674     JSRuntime* rt) {
675   return IsMarkedUnbarriered(rt, &jitcode_);
676 }
677 
678 template <class ShouldTraceProvider>
trace(JSTracer * trc)679 bool JitcodeGlobalEntry::BaselineEntry::trace(JSTracer* trc) {
680   if (ShouldTraceProvider::ShouldTrace(trc->runtime(), &script_)) {
681     TraceManuallyBarrieredEdge(trc, &script_,
682                                "jitcodeglobaltable-baselineentry-script");
683     return true;
684   }
685   return false;
686 }
687 
sweepChildren()688 void JitcodeGlobalEntry::BaselineEntry::sweepChildren() {
689   MOZ_ALWAYS_FALSE(IsAboutToBeFinalizedUnbarriered(&script_));
690 }
691 
isMarkedFromAnyThread(JSRuntime * rt)692 bool JitcodeGlobalEntry::BaselineEntry::isMarkedFromAnyThread(JSRuntime* rt) {
693   return IsMarkedUnbarriered(rt, &script_);
694 }
695 
696 template <class ShouldTraceProvider>
trace(JSTracer * trc)697 bool JitcodeGlobalEntry::IonEntry::trace(JSTracer* trc) {
698   bool tracedAny = false;
699 
700   JSRuntime* rt = trc->runtime();
701   for (unsigned i = 0; i < numScripts(); i++) {
702     if (ShouldTraceProvider::ShouldTrace(rt,
703                                          &sizedScriptList()->pairs[i].script)) {
704       TraceManuallyBarrieredEdge(trc, &sizedScriptList()->pairs[i].script,
705                                  "jitcodeglobaltable-ionentry-script");
706       tracedAny = true;
707     }
708   }
709 
710   return tracedAny;
711 }
712 
sweepChildren()713 void JitcodeGlobalEntry::IonEntry::sweepChildren() {
714   for (unsigned i = 0; i < numScripts(); i++) {
715     MOZ_ALWAYS_FALSE(
716         IsAboutToBeFinalizedUnbarriered(&sizedScriptList()->pairs[i].script));
717   }
718 }
719 
isMarkedFromAnyThread(JSRuntime * rt)720 bool JitcodeGlobalEntry::IonEntry::isMarkedFromAnyThread(JSRuntime* rt) {
721   for (unsigned i = 0; i < numScripts(); i++) {
722     if (!IsMarkedUnbarriered(rt, &sizedScriptList()->pairs[i].script)) {
723       return false;
724     }
725   }
726 
727   return true;
728 }
729 
730 /* static */
WriteHead(CompactBufferWriter & writer,uint32_t nativeOffset,uint8_t scriptDepth)731 void JitcodeRegionEntry::WriteHead(CompactBufferWriter& writer,
732                                    uint32_t nativeOffset, uint8_t scriptDepth) {
733   writer.writeUnsigned(nativeOffset);
734   writer.writeByte(scriptDepth);
735 }
736 
737 /* static */
ReadHead(CompactBufferReader & reader,uint32_t * nativeOffset,uint8_t * scriptDepth)738 void JitcodeRegionEntry::ReadHead(CompactBufferReader& reader,
739                                   uint32_t* nativeOffset,
740                                   uint8_t* scriptDepth) {
741   *nativeOffset = reader.readUnsigned();
742   *scriptDepth = reader.readByte();
743 }
744 
745 /* static */
WriteScriptPc(CompactBufferWriter & writer,uint32_t scriptIdx,uint32_t pcOffset)746 void JitcodeRegionEntry::WriteScriptPc(CompactBufferWriter& writer,
747                                        uint32_t scriptIdx, uint32_t pcOffset) {
748   writer.writeUnsigned(scriptIdx);
749   writer.writeUnsigned(pcOffset);
750 }
751 
752 /* static */
ReadScriptPc(CompactBufferReader & reader,uint32_t * scriptIdx,uint32_t * pcOffset)753 void JitcodeRegionEntry::ReadScriptPc(CompactBufferReader& reader,
754                                       uint32_t* scriptIdx, uint32_t* pcOffset) {
755   *scriptIdx = reader.readUnsigned();
756   *pcOffset = reader.readUnsigned();
757 }
758 
759 /* static */
WriteDelta(CompactBufferWriter & writer,uint32_t nativeDelta,int32_t pcDelta)760 void JitcodeRegionEntry::WriteDelta(CompactBufferWriter& writer,
761                                     uint32_t nativeDelta, int32_t pcDelta) {
762   if (pcDelta >= 0) {
763     // 1 and 2-byte formats possible.
764 
765     //  NNNN-BBB0
766     if (pcDelta <= ENC1_PC_DELTA_MAX && nativeDelta <= ENC1_NATIVE_DELTA_MAX) {
767       uint8_t encVal = ENC1_MASK_VAL | (pcDelta << ENC1_PC_DELTA_SHIFT) |
768                        (nativeDelta << ENC1_NATIVE_DELTA_SHIFT);
769       writer.writeByte(encVal);
770       return;
771     }
772 
773     //  NNNN-NNNN BBBB-BB01
774     if (pcDelta <= ENC2_PC_DELTA_MAX && nativeDelta <= ENC2_NATIVE_DELTA_MAX) {
775       uint16_t encVal = ENC2_MASK_VAL | (pcDelta << ENC2_PC_DELTA_SHIFT) |
776                         (nativeDelta << ENC2_NATIVE_DELTA_SHIFT);
777       writer.writeByte(encVal & 0xff);
778       writer.writeByte((encVal >> 8) & 0xff);
779       return;
780     }
781   }
782 
783   //  NNNN-NNNN NNNB-BBBB BBBB-B011
784   if (pcDelta >= ENC3_PC_DELTA_MIN && pcDelta <= ENC3_PC_DELTA_MAX &&
785       nativeDelta <= ENC3_NATIVE_DELTA_MAX) {
786     uint32_t encVal =
787         ENC3_MASK_VAL |
788         ((uint32_t(pcDelta) << ENC3_PC_DELTA_SHIFT) & ENC3_PC_DELTA_MASK) |
789         (nativeDelta << ENC3_NATIVE_DELTA_SHIFT);
790     writer.writeByte(encVal & 0xff);
791     writer.writeByte((encVal >> 8) & 0xff);
792     writer.writeByte((encVal >> 16) & 0xff);
793     return;
794   }
795 
796   //  NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
797   if (pcDelta >= ENC4_PC_DELTA_MIN && pcDelta <= ENC4_PC_DELTA_MAX &&
798       nativeDelta <= ENC4_NATIVE_DELTA_MAX) {
799     uint32_t encVal =
800         ENC4_MASK_VAL |
801         ((uint32_t(pcDelta) << ENC4_PC_DELTA_SHIFT) & ENC4_PC_DELTA_MASK) |
802         (nativeDelta << ENC4_NATIVE_DELTA_SHIFT);
803     writer.writeByte(encVal & 0xff);
804     writer.writeByte((encVal >> 8) & 0xff);
805     writer.writeByte((encVal >> 16) & 0xff);
806     writer.writeByte((encVal >> 24) & 0xff);
807     return;
808   }
809 
810   // Should never get here.
811   MOZ_CRASH("pcDelta/nativeDelta values are too large to encode.");
812 }
813 
814 /* static */
ReadDelta(CompactBufferReader & reader,uint32_t * nativeDelta,int32_t * pcDelta)815 void JitcodeRegionEntry::ReadDelta(CompactBufferReader& reader,
816                                    uint32_t* nativeDelta, int32_t* pcDelta) {
817   // NB:
818   // It's possible to get nativeDeltas with value 0 in two cases:
819   //
820   // 1. The last region's run.  This is because the region table's start
821   // must be 4-byte aligned, and we must insert padding bytes to align the
822   // payload section before emitting the table.
823   //
824   // 2. A zero-offset nativeDelta with a negative pcDelta.
825   //
826   // So if nativeDelta is zero, then pcDelta must be <= 0.
827 
828   //  NNNN-BBB0
829   const uint32_t firstByte = reader.readByte();
830   if ((firstByte & ENC1_MASK) == ENC1_MASK_VAL) {
831     uint32_t encVal = firstByte;
832     *nativeDelta = encVal >> ENC1_NATIVE_DELTA_SHIFT;
833     *pcDelta = (encVal & ENC1_PC_DELTA_MASK) >> ENC1_PC_DELTA_SHIFT;
834     MOZ_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
835     return;
836   }
837 
838   //  NNNN-NNNN BBBB-BB01
839   const uint32_t secondByte = reader.readByte();
840   if ((firstByte & ENC2_MASK) == ENC2_MASK_VAL) {
841     uint32_t encVal = firstByte | secondByte << 8;
842     *nativeDelta = encVal >> ENC2_NATIVE_DELTA_SHIFT;
843     *pcDelta = (encVal & ENC2_PC_DELTA_MASK) >> ENC2_PC_DELTA_SHIFT;
844     MOZ_ASSERT(*pcDelta != 0);
845     MOZ_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
846     return;
847   }
848 
849   //  NNNN-NNNN NNNB-BBBB BBBB-B011
850   const uint32_t thirdByte = reader.readByte();
851   if ((firstByte & ENC3_MASK) == ENC3_MASK_VAL) {
852     uint32_t encVal = firstByte | secondByte << 8 | thirdByte << 16;
853     *nativeDelta = encVal >> ENC3_NATIVE_DELTA_SHIFT;
854 
855     uint32_t pcDeltaU = (encVal & ENC3_PC_DELTA_MASK) >> ENC3_PC_DELTA_SHIFT;
856     // Fix sign if necessary.
857     if (pcDeltaU > static_cast<uint32_t>(ENC3_PC_DELTA_MAX)) {
858       pcDeltaU |= ~ENC3_PC_DELTA_MAX;
859     }
860     *pcDelta = pcDeltaU;
861     MOZ_ASSERT(*pcDelta != 0);
862     MOZ_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
863     return;
864   }
865 
866   //  NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
867   MOZ_ASSERT((firstByte & ENC4_MASK) == ENC4_MASK_VAL);
868   const uint32_t fourthByte = reader.readByte();
869   uint32_t encVal =
870       firstByte | secondByte << 8 | thirdByte << 16 | fourthByte << 24;
871   *nativeDelta = encVal >> ENC4_NATIVE_DELTA_SHIFT;
872 
873   uint32_t pcDeltaU = (encVal & ENC4_PC_DELTA_MASK) >> ENC4_PC_DELTA_SHIFT;
874   // fix sign if necessary
875   if (pcDeltaU > static_cast<uint32_t>(ENC4_PC_DELTA_MAX)) {
876     pcDeltaU |= ~ENC4_PC_DELTA_MAX;
877   }
878   *pcDelta = pcDeltaU;
879 
880   MOZ_ASSERT(*pcDelta != 0);
881   MOZ_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
882 }
883 
884 /* static */
ExpectedRunLength(const NativeToBytecode * entry,const NativeToBytecode * end)885 uint32_t JitcodeRegionEntry::ExpectedRunLength(const NativeToBytecode* entry,
886                                                const NativeToBytecode* end) {
887   MOZ_ASSERT(entry < end);
888 
889   // We always use the first entry, so runLength starts at 1
890   uint32_t runLength = 1;
891 
892   uint32_t curNativeOffset = entry->nativeOffset.offset();
893   uint32_t curBytecodeOffset = entry->tree->script()->pcToOffset(entry->pc);
894 
895   for (auto nextEntry = entry + 1; nextEntry != end; nextEntry += 1) {
896     // If the next run moves to a different inline site, stop the run.
897     if (nextEntry->tree != entry->tree) {
898       break;
899     }
900 
901     uint32_t nextNativeOffset = nextEntry->nativeOffset.offset();
902     uint32_t nextBytecodeOffset =
903         nextEntry->tree->script()->pcToOffset(nextEntry->pc);
904     MOZ_ASSERT(nextNativeOffset >= curNativeOffset);
905 
906     uint32_t nativeDelta = nextNativeOffset - curNativeOffset;
907     int32_t bytecodeDelta =
908         int32_t(nextBytecodeOffset) - int32_t(curBytecodeOffset);
909 
910     // If deltas are too large (very unlikely), stop the run.
911     if (!IsDeltaEncodeable(nativeDelta, bytecodeDelta)) {
912       break;
913     }
914 
915     runLength++;
916 
917     // If the run has grown to its maximum length, stop the run.
918     if (runLength == MAX_RUN_LENGTH) {
919       break;
920     }
921 
922     curNativeOffset = nextNativeOffset;
923     curBytecodeOffset = nextBytecodeOffset;
924   }
925 
926   return runLength;
927 }
928 
929 struct JitcodeMapBufferWriteSpewer {
930 #ifdef JS_JITSPEW
931   CompactBufferWriter* writer;
932   uint32_t startPos;
933 
934   static const uint32_t DumpMaxBytes = 50;
935 
JitcodeMapBufferWriteSpewerjs::jit::JitcodeMapBufferWriteSpewer936   explicit JitcodeMapBufferWriteSpewer(CompactBufferWriter& w)
937       : writer(&w), startPos(writer->length()) {}
938 
spewAndAdvancejs::jit::JitcodeMapBufferWriteSpewer939   void spewAndAdvance(const char* name) {
940     if (writer->oom()) {
941       return;
942     }
943 
944     uint32_t curPos = writer->length();
945     const uint8_t* start = writer->buffer() + startPos;
946     const uint8_t* end = writer->buffer() + curPos;
947     const char* MAP = "0123456789ABCDEF";
948     uint32_t bytes = end - start;
949 
950     char buffer[DumpMaxBytes * 3];
951     for (uint32_t i = 0; i < bytes; i++) {
952       buffer[i * 3] = MAP[(start[i] >> 4) & 0xf];
953       buffer[i * 3 + 1] = MAP[(start[i] >> 0) & 0xf];
954       buffer[i * 3 + 2] = ' ';
955     }
956     if (bytes >= DumpMaxBytes) {
957       buffer[DumpMaxBytes * 3 - 1] = '\0';
958     } else {
959       buffer[bytes * 3 - 1] = '\0';
960     }
961 
962     JitSpew(JitSpew_Profiling, "%s@%d[%d bytes] - %s", name, int(startPos),
963             int(bytes), buffer);
964 
965     // Move to the end of the current buffer.
966     startPos = writer->length();
967   }
968 #else   // !JS_JITSPEW
969   explicit JitcodeMapBufferWriteSpewer(CompactBufferWriter& w) {}
970   void spewAndAdvance(const char* name) {}
971 #endif  // JS_JITSPEW
972 };
973 
974 // Write a run, starting at the given NativeToBytecode entry, into the given
975 // buffer writer.
976 /* static */
WriteRun(CompactBufferWriter & writer,JSScript ** scriptList,uint32_t scriptListSize,uint32_t runLength,const NativeToBytecode * entry)977 bool JitcodeRegionEntry::WriteRun(CompactBufferWriter& writer,
978                                   JSScript** scriptList,
979                                   uint32_t scriptListSize, uint32_t runLength,
980                                   const NativeToBytecode* entry) {
981   MOZ_ASSERT(runLength > 0);
982   MOZ_ASSERT(runLength <= MAX_RUN_LENGTH);
983 
984   // Calculate script depth.
985   MOZ_ASSERT(entry->tree->depth() <= 0xff);
986   uint8_t scriptDepth = entry->tree->depth();
987   uint32_t regionNativeOffset = entry->nativeOffset.offset();
988 
989   JitcodeMapBufferWriteSpewer spewer(writer);
990 
991   // Write the head info.
992   JitSpew(JitSpew_Profiling, "    Head Info: nativeOffset=%d scriptDepth=%d",
993           int(regionNativeOffset), int(scriptDepth));
994   WriteHead(writer, regionNativeOffset, scriptDepth);
995   spewer.spewAndAdvance("      ");
996 
997   // Write each script/pc pair.
998   {
999     InlineScriptTree* curTree = entry->tree;
1000     jsbytecode* curPc = entry->pc;
1001     for (uint8_t i = 0; i < scriptDepth; i++) {
1002       // Find the index of the script within the list.
1003       // NB: scriptList is guaranteed to contain curTree->script()
1004       uint32_t scriptIdx = 0;
1005       for (; scriptIdx < scriptListSize; scriptIdx++) {
1006         if (scriptList[scriptIdx] == curTree->script()) {
1007           break;
1008         }
1009       }
1010       MOZ_ASSERT(scriptIdx < scriptListSize);
1011 
1012       uint32_t pcOffset = curTree->script()->pcToOffset(curPc);
1013 
1014       JitSpew(JitSpew_Profiling, "    Script/PC %d: scriptIdx=%d pcOffset=%d",
1015               int(i), int(scriptIdx), int(pcOffset));
1016       WriteScriptPc(writer, scriptIdx, pcOffset);
1017       spewer.spewAndAdvance("      ");
1018 
1019       MOZ_ASSERT_IF(i < scriptDepth - 1, curTree->hasCaller());
1020       curPc = curTree->callerPc();
1021       curTree = curTree->caller();
1022     }
1023   }
1024 
1025   // Start writing runs.
1026   uint32_t curNativeOffset = entry->nativeOffset.offset();
1027   uint32_t curBytecodeOffset = entry->tree->script()->pcToOffset(entry->pc);
1028 
1029   JitSpew(JitSpew_Profiling,
1030           "  Writing Delta Run from nativeOffset=%d bytecodeOffset=%d",
1031           int(curNativeOffset), int(curBytecodeOffset));
1032 
1033   // Skip first entry because it is implicit in the header.  Start at subsequent
1034   // entry.
1035   for (uint32_t i = 1; i < runLength; i++) {
1036     MOZ_ASSERT(entry[i].tree == entry->tree);
1037 
1038     uint32_t nextNativeOffset = entry[i].nativeOffset.offset();
1039     uint32_t nextBytecodeOffset =
1040         entry[i].tree->script()->pcToOffset(entry[i].pc);
1041     MOZ_ASSERT(nextNativeOffset >= curNativeOffset);
1042 
1043     uint32_t nativeDelta = nextNativeOffset - curNativeOffset;
1044     int32_t bytecodeDelta =
1045         int32_t(nextBytecodeOffset) - int32_t(curBytecodeOffset);
1046     MOZ_ASSERT(IsDeltaEncodeable(nativeDelta, bytecodeDelta));
1047 
1048     JitSpew(JitSpew_Profiling,
1049             "    RunEntry native: %d-%d [%d]  bytecode: %d-%d [%d]",
1050             int(curNativeOffset), int(nextNativeOffset), int(nativeDelta),
1051             int(curBytecodeOffset), int(nextBytecodeOffset),
1052             int(bytecodeDelta));
1053     WriteDelta(writer, nativeDelta, bytecodeDelta);
1054 
1055     // Spew the bytecode in these ranges.
1056     if (curBytecodeOffset < nextBytecodeOffset) {
1057       JitSpewStart(JitSpew_Profiling, "      OPS: ");
1058       uint32_t curBc = curBytecodeOffset;
1059       while (curBc < nextBytecodeOffset) {
1060         jsbytecode* pc = entry[i].tree->script()->offsetToPC(curBc);
1061 #ifdef JS_JITSPEW
1062         JSOp op = JSOp(*pc);
1063         JitSpewCont(JitSpew_Profiling, "%s ", CodeName(op));
1064 #endif
1065         curBc += GetBytecodeLength(pc);
1066       }
1067       JitSpewFin(JitSpew_Profiling);
1068     }
1069     spewer.spewAndAdvance("      ");
1070 
1071     curNativeOffset = nextNativeOffset;
1072     curBytecodeOffset = nextBytecodeOffset;
1073   }
1074 
1075   if (writer.oom()) {
1076     return false;
1077   }
1078 
1079   return true;
1080 }
1081 
unpack()1082 void JitcodeRegionEntry::unpack() {
1083   CompactBufferReader reader(data_, end_);
1084   ReadHead(reader, &nativeOffset_, &scriptDepth_);
1085   MOZ_ASSERT(scriptDepth_ > 0);
1086 
1087   scriptPcStack_ = reader.currentPosition();
1088   // Skip past script/pc stack
1089   for (unsigned i = 0; i < scriptDepth_; i++) {
1090     uint32_t scriptIdx, pcOffset;
1091     ReadScriptPc(reader, &scriptIdx, &pcOffset);
1092   }
1093 
1094   deltaRun_ = reader.currentPosition();
1095 }
1096 
findPcOffset(uint32_t queryNativeOffset,uint32_t startPcOffset) const1097 uint32_t JitcodeRegionEntry::findPcOffset(uint32_t queryNativeOffset,
1098                                           uint32_t startPcOffset) const {
1099   DeltaIterator iter = deltaIterator();
1100   uint32_t curNativeOffset = nativeOffset();
1101   uint32_t curPcOffset = startPcOffset;
1102   while (iter.hasMore()) {
1103     uint32_t nativeDelta;
1104     int32_t pcDelta;
1105     iter.readNext(&nativeDelta, &pcDelta);
1106 
1107     // The start address of the next delta-run entry is counted towards
1108     // the current delta-run entry, because return addresses should
1109     // associate with the bytecode op prior (the call) not the op after.
1110     if (queryNativeOffset <= curNativeOffset + nativeDelta) {
1111       break;
1112     }
1113     curNativeOffset += nativeDelta;
1114     curPcOffset += pcDelta;
1115   }
1116   return curPcOffset;
1117 }
1118 
makeIonEntry(JSContext * cx,JitCode * code,uint32_t numScripts,JSScript ** scripts,JitcodeGlobalEntry::IonEntry & out)1119 bool JitcodeIonTable::makeIonEntry(JSContext* cx, JitCode* code,
1120                                    uint32_t numScripts, JSScript** scripts,
1121                                    JitcodeGlobalEntry::IonEntry& out) {
1122   using SizedScriptList = JitcodeGlobalEntry::IonEntry::SizedScriptList;
1123 
1124   MOZ_ASSERT(numScripts > 0);
1125 
1126   // Create profiling strings for script, within vector.
1127   typedef js::Vector<char*, 32, SystemAllocPolicy> ProfilingStringVector;
1128 
1129   ProfilingStringVector profilingStrings;
1130   if (!profilingStrings.reserve(numScripts)) {
1131     return false;
1132   }
1133 
1134   // Cleanup allocations on failure.
1135   auto autoFreeProfilingStrings = mozilla::MakeScopeExit([&] {
1136     for (auto elem : profilingStrings) {
1137       js_free(elem);
1138     }
1139   });
1140 
1141   for (uint32_t i = 0; i < numScripts; i++) {
1142     UniqueChars str = GeckoProfilerRuntime::allocProfileString(cx, scripts[i]);
1143     if (!str) {
1144       return false;
1145     }
1146     if (!profilingStrings.append(str.release())) {
1147       return false;
1148     }
1149   }
1150 
1151   // Create SizedScriptList
1152   void* mem =
1153       (void*)cx->pod_malloc<uint8_t>(SizedScriptList::AllocSizeFor(numScripts));
1154   if (!mem) {
1155     return false;
1156   }
1157 
1158   // Keep allocated profiling strings.
1159   autoFreeProfilingStrings.release();
1160 
1161   SizedScriptList* scriptList =
1162       new (mem) SizedScriptList(numScripts, scripts, &profilingStrings[0]);
1163   out.init(code, code->raw(), code->rawEnd(), scriptList, this);
1164   return true;
1165 }
1166 
findRegionEntry(uint32_t nativeOffset) const1167 uint32_t JitcodeIonTable::findRegionEntry(uint32_t nativeOffset) const {
1168   static const uint32_t LINEAR_SEARCH_THRESHOLD = 8;
1169   uint32_t regions = numRegions();
1170   MOZ_ASSERT(regions > 0);
1171 
1172   // For small region lists, just search linearly.
1173   if (regions <= LINEAR_SEARCH_THRESHOLD) {
1174     JitcodeRegionEntry previousEntry = regionEntry(0);
1175     for (uint32_t i = 1; i < regions; i++) {
1176       JitcodeRegionEntry nextEntry = regionEntry(i);
1177       MOZ_ASSERT(nextEntry.nativeOffset() >= previousEntry.nativeOffset());
1178 
1179       // See note in binary-search code below about why we use '<=' here
1180       // instead of '<'.  Short explanation: regions are closed at their
1181       // ending addresses, and open at their starting addresses.
1182       if (nativeOffset <= nextEntry.nativeOffset()) {
1183         return i - 1;
1184       }
1185 
1186       previousEntry = nextEntry;
1187     }
1188     // If nothing found, assume it falls within last region.
1189     return regions - 1;
1190   }
1191 
1192   // For larger ones, binary search the region table.
1193   uint32_t idx = 0;
1194   uint32_t count = regions;
1195   while (count > 1) {
1196     uint32_t step = count / 2;
1197     uint32_t mid = idx + step;
1198     JitcodeRegionEntry midEntry = regionEntry(mid);
1199 
1200     // A region memory range is closed at its ending address, not starting
1201     // address.  This is because the return address for calls must associate
1202     // with the call's bytecode PC, not the PC of the bytecode operator after
1203     // the call.
1204     //
1205     // So a query is < an entry if the query nativeOffset is <= the start
1206     // address of the entry, and a query is >= an entry if the query
1207     // nativeOffset is > the start address of an entry.
1208     if (nativeOffset <= midEntry.nativeOffset()) {
1209       // Target entry is below midEntry.
1210       count = step;
1211     } else {  // if (nativeOffset > midEntry.nativeOffset())
1212       // Target entry is at midEntry or above.
1213       idx = mid;
1214       count -= step;
1215     }
1216   }
1217   return idx;
1218 }
1219 
1220 /* static */
WriteIonTable(CompactBufferWriter & writer,JSScript ** scriptList,uint32_t scriptListSize,const NativeToBytecode * start,const NativeToBytecode * end,uint32_t * tableOffsetOut,uint32_t * numRegionsOut)1221 bool JitcodeIonTable::WriteIonTable(
1222     CompactBufferWriter& writer, JSScript** scriptList, uint32_t scriptListSize,
1223     const NativeToBytecode* start, const NativeToBytecode* end,
1224     uint32_t* tableOffsetOut, uint32_t* numRegionsOut) {
1225   MOZ_ASSERT(tableOffsetOut != nullptr);
1226   MOZ_ASSERT(numRegionsOut != nullptr);
1227   MOZ_ASSERT(writer.length() == 0);
1228   MOZ_ASSERT(scriptListSize > 0);
1229 
1230   JitSpew(JitSpew_Profiling,
1231           "Writing native to bytecode map for %s:%u:%u (%zu entries)",
1232           scriptList[0]->filename(), scriptList[0]->lineno(),
1233           scriptList[0]->column(), mozilla::PointerRangeSize(start, end));
1234 
1235   JitSpew(JitSpew_Profiling, "  ScriptList of size %d", int(scriptListSize));
1236   for (uint32_t i = 0; i < scriptListSize; i++) {
1237     JitSpew(JitSpew_Profiling, "  Script %d - %s:%u:%u", int(i),
1238             scriptList[i]->filename(), scriptList[i]->lineno(),
1239             scriptList[i]->column());
1240   }
1241 
1242   // Write out runs first.  Keep a vector tracking the positive offsets from
1243   // payload start to the run.
1244   const NativeToBytecode* curEntry = start;
1245   js::Vector<uint32_t, 32, SystemAllocPolicy> runOffsets;
1246 
1247   while (curEntry != end) {
1248     // Calculate the length of the next run.
1249     uint32_t runLength = JitcodeRegionEntry::ExpectedRunLength(curEntry, end);
1250     MOZ_ASSERT(runLength > 0);
1251     MOZ_ASSERT(runLength <= uintptr_t(end - curEntry));
1252     JitSpew(JitSpew_Profiling, "  Run at entry %d, length %d, buffer offset %d",
1253             int(curEntry - start), int(runLength), int(writer.length()));
1254 
1255     // Store the offset of the run.
1256     if (!runOffsets.append(writer.length())) {
1257       return false;
1258     }
1259 
1260     // Encode the run.
1261     if (!JitcodeRegionEntry::WriteRun(writer, scriptList, scriptListSize,
1262                                       runLength, curEntry)) {
1263       return false;
1264     }
1265 
1266     curEntry += runLength;
1267   }
1268 
1269   // Done encoding regions.  About to start table.  Ensure we are aligned to 4
1270   // bytes since table is composed of uint32_t values.
1271   uint32_t padding = sizeof(uint32_t) - (writer.length() % sizeof(uint32_t));
1272   if (padding == sizeof(uint32_t)) {
1273     padding = 0;
1274   }
1275   JitSpew(JitSpew_Profiling, "  Padding %d bytes after run @%d", int(padding),
1276           int(writer.length()));
1277   for (uint32_t i = 0; i < padding; i++) {
1278     writer.writeByte(0);
1279   }
1280 
1281   // Now at start of table.
1282   uint32_t tableOffset = writer.length();
1283 
1284   // The table being written at this point will be accessed directly via
1285   // uint32_t pointers, so all writes below use native endianness.
1286 
1287   // Write out numRegions
1288   JitSpew(JitSpew_Profiling, "  Writing numRuns=%d", int(runOffsets.length()));
1289   writer.writeNativeEndianUint32_t(runOffsets.length());
1290 
1291   // Write out region offset table.  The offsets in |runOffsets| are currently
1292   // forward offsets from the beginning of the buffer.  We convert them to
1293   // backwards offsets from the start of the table before writing them into
1294   // their table entries.
1295   for (uint32_t i = 0; i < runOffsets.length(); i++) {
1296     JitSpew(JitSpew_Profiling, "  Run %d offset=%d backOffset=%d @%d", int(i),
1297             int(runOffsets[i]), int(tableOffset - runOffsets[i]),
1298             int(writer.length()));
1299     writer.writeNativeEndianUint32_t(tableOffset - runOffsets[i]);
1300   }
1301 
1302   if (writer.oom()) {
1303     return false;
1304   }
1305 
1306   *tableOffsetOut = tableOffset;
1307   *numRegionsOut = runOffsets.length();
1308   return true;
1309 }
1310 
1311 }  // namespace jit
1312 }  // namespace js
1313 
ProfiledFrameHandle(JSRuntime * rt,js::jit::JitcodeGlobalEntry & entry,void * addr,const char * label,uint32_t depth)1314 JS::ProfiledFrameHandle::ProfiledFrameHandle(JSRuntime* rt,
1315                                              js::jit::JitcodeGlobalEntry& entry,
1316                                              void* addr, const char* label,
1317                                              uint32_t depth)
1318     : rt_(rt),
1319       entry_(entry),
1320       addr_(addr),
1321       canonicalAddr_(nullptr),
1322       label_(label),
1323       depth_(depth) {
1324   if (!canonicalAddr_) {
1325     canonicalAddr_ = entry_.canonicalNativeAddrFor(rt_, addr_);
1326   }
1327 }
1328 
1329 JS_PUBLIC_API JS::ProfilingFrameIterator::FrameKind
frameKind() const1330 JS::ProfiledFrameHandle::frameKind() const {
1331   if (entry_.isBaselineInterpreter()) {
1332     return JS::ProfilingFrameIterator::Frame_BaselineInterpreter;
1333   }
1334   if (entry_.isBaseline()) {
1335     return JS::ProfilingFrameIterator::Frame_Baseline;
1336   }
1337   return JS::ProfilingFrameIterator::Frame_Ion;
1338 }
1339 
realmID() const1340 JS_PUBLIC_API uint64_t JS::ProfiledFrameHandle::realmID() const {
1341   return entry_.lookupRealmID(rt_, addr_);
1342 }
1343 
GetProfiledFrames(JSContext * cx,void * addr)1344 JS_PUBLIC_API JS::ProfiledFrameRange JS::GetProfiledFrames(JSContext* cx,
1345                                                            void* addr) {
1346   JSRuntime* rt = cx->runtime();
1347   js::jit::JitcodeGlobalTable* table =
1348       rt->jitRuntime()->getJitcodeGlobalTable();
1349   js::jit::JitcodeGlobalEntry* entry = table->lookup(addr);
1350 
1351   ProfiledFrameRange result(rt, addr, entry);
1352 
1353   if (entry) {
1354     result.depth_ = entry->callStackAtAddr(rt, addr, result.labels_,
1355                                            MOZ_ARRAY_LENGTH(result.labels_));
1356   }
1357   return result;
1358 }
1359 
operator *() const1360 JS::ProfiledFrameHandle JS::ProfiledFrameRange::Iter::operator*() const {
1361   // The iterator iterates in high depth to low depth order. index_ goes up,
1362   // and the depth we need to pass to ProfiledFrameHandle goes down.
1363   uint32_t depth = range_.depth_ - 1 - index_;
1364   return ProfiledFrameHandle(range_.rt_, *range_.entry_, range_.addr_,
1365                              range_.labels_[depth], depth);
1366 }
1367