1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "ProfileBufferEntry.h"
8
9 #include <ostream>
10 #include <type_traits>
11
12 #include "mozilla/Logging.h"
13 #include "mozilla/ScopeExit.h"
14 #include "mozilla/Sprintf.h"
15 #include "mozilla/StackWalk.h"
16
17 #include "BaseProfiler.h"
18 #include "mozilla/BaseProfilerMarkers.h"
19 #include "platform.h"
20 #include "ProfileBuffer.h"
21 #include "ProfilerBacktrace.h"
22
23 namespace mozilla {
24 namespace baseprofiler {
25
26 ////////////////////////////////////////////////////////////////////////
27 // BEGIN ProfileBufferEntry
28
ProfileBufferEntry()29 ProfileBufferEntry::ProfileBufferEntry()
30 : mKind(Kind::INVALID), mStorage{0, 0, 0, 0, 0, 0, 0, 0} {}
31
32 // aString must be a static string.
ProfileBufferEntry(Kind aKind,const char * aString)33 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, const char* aString)
34 : mKind(aKind) {
35 memcpy(mStorage, &aString, sizeof(aString));
36 }
37
ProfileBufferEntry(Kind aKind,char aChars[kNumChars])38 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, char aChars[kNumChars])
39 : mKind(aKind) {
40 memcpy(mStorage, aChars, kNumChars);
41 }
42
ProfileBufferEntry(Kind aKind,void * aPtr)43 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, void* aPtr) : mKind(aKind) {
44 memcpy(mStorage, &aPtr, sizeof(aPtr));
45 }
46
ProfileBufferEntry(Kind aKind,double aDouble)47 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, double aDouble)
48 : mKind(aKind) {
49 memcpy(mStorage, &aDouble, sizeof(aDouble));
50 }
51
ProfileBufferEntry(Kind aKind,int aInt)52 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, int aInt) : mKind(aKind) {
53 memcpy(mStorage, &aInt, sizeof(aInt));
54 }
55
ProfileBufferEntry(Kind aKind,int64_t aInt64)56 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, int64_t aInt64)
57 : mKind(aKind) {
58 memcpy(mStorage, &aInt64, sizeof(aInt64));
59 }
60
ProfileBufferEntry(Kind aKind,uint64_t aUint64)61 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, uint64_t aUint64)
62 : mKind(aKind) {
63 memcpy(mStorage, &aUint64, sizeof(aUint64));
64 }
65
ProfileBufferEntry(Kind aKind,BaseProfilerThreadId aThreadId)66 ProfileBufferEntry::ProfileBufferEntry(Kind aKind,
67 BaseProfilerThreadId aThreadId)
68 : mKind(aKind) {
69 static_assert(std::is_trivially_copyable_v<BaseProfilerThreadId>);
70 static_assert(sizeof(aThreadId) <= sizeof(mStorage));
71 memcpy(mStorage, &aThreadId, sizeof(aThreadId));
72 }
73
GetString() const74 const char* ProfileBufferEntry::GetString() const {
75 const char* result;
76 memcpy(&result, mStorage, sizeof(result));
77 return result;
78 }
79
GetPtr() const80 void* ProfileBufferEntry::GetPtr() const {
81 void* result;
82 memcpy(&result, mStorage, sizeof(result));
83 return result;
84 }
85
GetDouble() const86 double ProfileBufferEntry::GetDouble() const {
87 double result;
88 memcpy(&result, mStorage, sizeof(result));
89 return result;
90 }
91
GetInt() const92 int ProfileBufferEntry::GetInt() const {
93 int result;
94 memcpy(&result, mStorage, sizeof(result));
95 return result;
96 }
97
GetInt64() const98 int64_t ProfileBufferEntry::GetInt64() const {
99 int64_t result;
100 memcpy(&result, mStorage, sizeof(result));
101 return result;
102 }
103
GetUint64() const104 uint64_t ProfileBufferEntry::GetUint64() const {
105 uint64_t result;
106 memcpy(&result, mStorage, sizeof(result));
107 return result;
108 }
109
GetThreadId() const110 BaseProfilerThreadId ProfileBufferEntry::GetThreadId() const {
111 BaseProfilerThreadId result;
112 static_assert(std::is_trivially_copyable_v<BaseProfilerThreadId>);
113 memcpy(&result, mStorage, sizeof(result));
114 return result;
115 }
116
CopyCharsInto(char (& aOutArray)[kNumChars]) const117 void ProfileBufferEntry::CopyCharsInto(char (&aOutArray)[kNumChars]) const {
118 memcpy(aOutArray, mStorage, kNumChars);
119 }
120
121 // END ProfileBufferEntry
122 ////////////////////////////////////////////////////////////////////////
123
124 // As mentioned in ProfileBufferEntry.h, the JSON format contains many
125 // arrays whose elements are laid out according to various schemas to help
126 // de-duplication. This RAII class helps write these arrays by keeping track of
127 // the last non-null element written and adding the appropriate number of null
128 // elements when writing new non-null elements. It also automatically opens and
129 // closes an array element on the given JSON writer.
130 //
131 // You grant the AutoArraySchemaWriter exclusive access to the JSONWriter and
132 // the UniqueJSONStrings objects for the lifetime of AutoArraySchemaWriter. Do
133 // not access them independently while the AutoArraySchemaWriter is alive.
134 // If you need to add complex objects, call FreeFormElement(), which will give
135 // you temporary access to the writer.
136 //
137 // Example usage:
138 //
139 // // Define the schema of elements in this type of array: [FOO, BAR, BAZ]
140 // enum Schema : uint32_t {
141 // FOO = 0,
142 // BAR = 1,
143 // BAZ = 2
144 // };
145 //
146 // AutoArraySchemaWriter writer(someJsonWriter, someUniqueStrings);
147 // if (shouldWriteFoo) {
148 // writer.IntElement(FOO, getFoo());
149 // }
150 // ... etc ...
151 //
152 // The elements need to be added in-order.
153 class MOZ_RAII AutoArraySchemaWriter {
154 public:
AutoArraySchemaWriter(SpliceableJSONWriter & aWriter)155 explicit AutoArraySchemaWriter(SpliceableJSONWriter& aWriter)
156 : mJSONWriter(aWriter), mNextFreeIndex(0) {
157 mJSONWriter.StartArrayElement(SpliceableJSONWriter::SingleLineStyle);
158 }
159
~AutoArraySchemaWriter()160 ~AutoArraySchemaWriter() { mJSONWriter.EndArray(); }
161
162 template <typename T>
IntElement(uint32_t aIndex,T aValue)163 void IntElement(uint32_t aIndex, T aValue) {
164 static_assert(!std::is_same_v<T, uint64_t>,
165 "Narrowing uint64 -> int64 conversion not allowed");
166 FillUpTo(aIndex);
167 mJSONWriter.IntElement(static_cast<int64_t>(aValue));
168 }
169
DoubleElement(uint32_t aIndex,double aValue)170 void DoubleElement(uint32_t aIndex, double aValue) {
171 FillUpTo(aIndex);
172 mJSONWriter.DoubleElement(aValue);
173 }
174
TimeMsElement(uint32_t aIndex,double aTime_ms)175 void TimeMsElement(uint32_t aIndex, double aTime_ms) {
176 FillUpTo(aIndex);
177 mJSONWriter.TimeDoubleMsElement(aTime_ms);
178 }
179
BoolElement(uint32_t aIndex,bool aValue)180 void BoolElement(uint32_t aIndex, bool aValue) {
181 FillUpTo(aIndex);
182 mJSONWriter.BoolElement(aValue);
183 }
184
185 protected:
Writer()186 SpliceableJSONWriter& Writer() { return mJSONWriter; }
187
FillUpTo(uint32_t aIndex)188 void FillUpTo(uint32_t aIndex) {
189 MOZ_ASSERT(aIndex >= mNextFreeIndex);
190 mJSONWriter.NullElements(aIndex - mNextFreeIndex);
191 mNextFreeIndex = aIndex + 1;
192 }
193
194 private:
195 SpliceableJSONWriter& mJSONWriter;
196 uint32_t mNextFreeIndex;
197 };
198
199 // Same as AutoArraySchemaWriter, but this can also write strings (output as
200 // indexes into the table of unique strings).
201 class MOZ_RAII AutoArraySchemaWithStringsWriter : public AutoArraySchemaWriter {
202 public:
AutoArraySchemaWithStringsWriter(SpliceableJSONWriter & aWriter,UniqueJSONStrings & aStrings)203 AutoArraySchemaWithStringsWriter(SpliceableJSONWriter& aWriter,
204 UniqueJSONStrings& aStrings)
205 : AutoArraySchemaWriter(aWriter), mStrings(aStrings) {}
206
StringElement(uint32_t aIndex,const Span<const char> & aValue)207 void StringElement(uint32_t aIndex, const Span<const char>& aValue) {
208 FillUpTo(aIndex);
209 mStrings.WriteElement(Writer(), aValue);
210 }
211
212 private:
213 UniqueJSONStrings& mStrings;
214 };
215
BeginStack(const FrameKey & aFrame)216 UniqueStacks::StackKey UniqueStacks::BeginStack(const FrameKey& aFrame) {
217 return StackKey(GetOrAddFrameIndex(aFrame));
218 }
219
AppendFrame(const StackKey & aStack,const FrameKey & aFrame)220 UniqueStacks::StackKey UniqueStacks::AppendFrame(const StackKey& aStack,
221 const FrameKey& aFrame) {
222 return StackKey(aStack, GetOrAddStackIndex(aStack),
223 GetOrAddFrameIndex(aFrame));
224 }
225
operator ==(const NormalFrameData & aOther) const226 bool UniqueStacks::FrameKey::NormalFrameData::operator==(
227 const NormalFrameData& aOther) const {
228 return mLocation == aOther.mLocation &&
229 mRelevantForJS == aOther.mRelevantForJS &&
230 mInnerWindowID == aOther.mInnerWindowID && mLine == aOther.mLine &&
231 mColumn == aOther.mColumn && mCategoryPair == aOther.mCategoryPair;
232 }
233
UniqueStacks()234 UniqueStacks::UniqueStacks() : mUniqueStrings(MakeUnique<UniqueJSONStrings>()) {
235 mFrameTableWriter.StartBareList();
236 mStackTableWriter.StartBareList();
237 }
238
GetOrAddStackIndex(const StackKey & aStack)239 uint32_t UniqueStacks::GetOrAddStackIndex(const StackKey& aStack) {
240 uint32_t count = mStackToIndexMap.count();
241 auto entry = mStackToIndexMap.lookupForAdd(aStack);
242 if (entry) {
243 MOZ_ASSERT(entry->value() < count);
244 return entry->value();
245 }
246
247 MOZ_RELEASE_ASSERT(mStackToIndexMap.add(entry, aStack, count));
248 StreamStack(aStack);
249 return count;
250 }
251
GetOrAddFrameIndex(const FrameKey & aFrame)252 uint32_t UniqueStacks::GetOrAddFrameIndex(const FrameKey& aFrame) {
253 uint32_t count = mFrameToIndexMap.count();
254 auto entry = mFrameToIndexMap.lookupForAdd(aFrame);
255 if (entry) {
256 MOZ_ASSERT(entry->value() < count);
257 return entry->value();
258 }
259
260 MOZ_RELEASE_ASSERT(mFrameToIndexMap.add(entry, aFrame, count));
261 StreamNonJITFrame(aFrame);
262 return count;
263 }
264
SpliceFrameTableElements(SpliceableJSONWriter & aWriter)265 void UniqueStacks::SpliceFrameTableElements(SpliceableJSONWriter& aWriter) {
266 mFrameTableWriter.EndBareList();
267 aWriter.TakeAndSplice(mFrameTableWriter.TakeChunkedWriteFunc());
268 }
269
SpliceStackTableElements(SpliceableJSONWriter & aWriter)270 void UniqueStacks::SpliceStackTableElements(SpliceableJSONWriter& aWriter) {
271 mStackTableWriter.EndBareList();
272 aWriter.TakeAndSplice(mStackTableWriter.TakeChunkedWriteFunc());
273 }
274
StreamStack(const StackKey & aStack)275 void UniqueStacks::StreamStack(const StackKey& aStack) {
276 enum Schema : uint32_t { PREFIX = 0, FRAME = 1 };
277
278 AutoArraySchemaWriter writer(mStackTableWriter);
279 if (aStack.mPrefixStackIndex.isSome()) {
280 writer.IntElement(PREFIX, *aStack.mPrefixStackIndex);
281 }
282 writer.IntElement(FRAME, aStack.mFrameIndex);
283 }
284
StreamNonJITFrame(const FrameKey & aFrame)285 void UniqueStacks::StreamNonJITFrame(const FrameKey& aFrame) {
286 using NormalFrameData = FrameKey::NormalFrameData;
287
288 enum Schema : uint32_t {
289 LOCATION = 0,
290 RELEVANT_FOR_JS = 1,
291 INNER_WINDOW_ID = 2,
292 IMPLEMENTATION = 3,
293 OPTIMIZATIONS = 4,
294 LINE = 5,
295 COLUMN = 6,
296 CATEGORY = 7,
297 SUBCATEGORY = 8
298 };
299
300 AutoArraySchemaWithStringsWriter writer(mFrameTableWriter, *mUniqueStrings);
301
302 const NormalFrameData& data = aFrame.mData.as<NormalFrameData>();
303 writer.StringElement(LOCATION, data.mLocation);
304 writer.BoolElement(RELEVANT_FOR_JS, data.mRelevantForJS);
305
306 // It's okay to convert uint64_t to double here because DOM always creates IDs
307 // that are convertible to double.
308 writer.DoubleElement(INNER_WINDOW_ID, data.mInnerWindowID);
309
310 if (data.mLine.isSome()) {
311 writer.IntElement(LINE, *data.mLine);
312 }
313 if (data.mColumn.isSome()) {
314 writer.IntElement(COLUMN, *data.mColumn);
315 }
316 if (data.mCategoryPair.isSome()) {
317 const ProfilingCategoryPairInfo& info =
318 GetProfilingCategoryPairInfo(*data.mCategoryPair);
319 writer.IntElement(CATEGORY, uint32_t(info.mCategory));
320 writer.IntElement(SUBCATEGORY, info.mSubcategoryIndex);
321 }
322 }
323
324 struct CStringWriteFunc : public JSONWriteFunc {
325 std::string& mBuffer; // The struct must not outlive this buffer
CStringWriteFuncmozilla::baseprofiler::CStringWriteFunc326 explicit CStringWriteFunc(std::string& aBuffer) : mBuffer(aBuffer) {}
327
Writemozilla::baseprofiler::CStringWriteFunc328 void Write(const Span<const char>& aStr) override {
329 mBuffer.append(aStr.data(), aStr.size());
330 }
331 };
332
333 struct ProfileSample {
334 uint32_t mStack;
335 double mTime;
336 Maybe<double> mResponsiveness;
337 };
338
WriteSample(SpliceableJSONWriter & aWriter,const ProfileSample & aSample)339 static void WriteSample(SpliceableJSONWriter& aWriter,
340 const ProfileSample& aSample) {
341 enum Schema : uint32_t {
342 STACK = 0,
343 TIME = 1,
344 EVENT_DELAY = 2,
345 };
346
347 AutoArraySchemaWriter writer(aWriter);
348
349 writer.IntElement(STACK, aSample.mStack);
350
351 writer.TimeMsElement(TIME, aSample.mTime);
352
353 if (aSample.mResponsiveness.isSome()) {
354 writer.DoubleElement(EVENT_DELAY, *aSample.mResponsiveness);
355 }
356 }
357
358 class EntryGetter {
359 public:
EntryGetter(ProfileChunkedBuffer::Reader & aReader,uint64_t aInitialReadPos=0)360 explicit EntryGetter(ProfileChunkedBuffer::Reader& aReader,
361 uint64_t aInitialReadPos = 0)
362 : mBlockIt(
363 aReader.At(ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
364 aInitialReadPos))),
365 mBlockItEnd(aReader.end()) {
366 if (!ReadLegacyOrEnd()) {
367 // Find and read the next non-legacy entry.
368 Next();
369 }
370 }
371
Has() const372 bool Has() const { return mBlockIt != mBlockItEnd; }
373
Get() const374 const ProfileBufferEntry& Get() const {
375 MOZ_ASSERT(Has(), "Caller should have checked `Has()` before `Get()`");
376 return mEntry;
377 }
378
Next()379 void Next() {
380 MOZ_ASSERT(Has(), "Caller should have checked `Has()` before `Next()`");
381 for (;;) {
382 ++mBlockIt;
383 if (ReadLegacyOrEnd()) {
384 // Either we're at the end, or we could read a legacy entry -> Done.
385 break;
386 }
387 // Otherwise loop around until we hit the end or a legacy entry.
388 }
389 }
390
CurBlockIndex() const391 ProfileBufferBlockIndex CurBlockIndex() const {
392 return mBlockIt.CurrentBlockIndex();
393 }
394
CurPos() const395 uint64_t CurPos() const {
396 return CurBlockIndex().ConvertToProfileBufferIndex();
397 }
398
399 private:
400 // Try to read the entry at the current `mBlockIt` position.
401 // * If we're at the end of the buffer, just return `true`.
402 // * If there is a "legacy" entry (containing a real `ProfileBufferEntry`),
403 // read it into `mEntry`, and return `true` as well.
404 // * Otherwise the entry contains a "modern" type that cannot be read into
405 // `mEntry`, return `false` (so `EntryGetter` can skip to another entry).
ReadLegacyOrEnd()406 bool ReadLegacyOrEnd() {
407 if (!Has()) {
408 return true;
409 }
410 // Read the entry "kind", which is always at the start of all entries.
411 ProfileBufferEntryReader aER = *mBlockIt;
412 auto type = static_cast<ProfileBufferEntry::Kind>(
413 aER.ReadObject<ProfileBufferEntry::KindUnderlyingType>());
414 MOZ_ASSERT(static_cast<ProfileBufferEntry::KindUnderlyingType>(type) <
415 static_cast<ProfileBufferEntry::KindUnderlyingType>(
416 ProfileBufferEntry::Kind::MODERN_LIMIT));
417 if (type >= ProfileBufferEntry::Kind::LEGACY_LIMIT) {
418 aER.SetRemainingBytes(0);
419 return false;
420 }
421 // Here, we have a legacy item, we need to read it from the start.
422 // Because the above `ReadObject` moved the reader, we ned to reset it to
423 // the start of the entry before reading the whole entry.
424 aER = *mBlockIt;
425 aER.ReadBytes(&mEntry, aER.RemainingBytes());
426 return true;
427 }
428
429 ProfileBufferEntry mEntry;
430 ProfileChunkedBuffer::BlockIterator mBlockIt;
431 const ProfileChunkedBuffer::BlockIterator mBlockItEnd;
432 };
433
434 // The following grammar shows legal sequences of profile buffer entries.
435 // The sequences beginning with a ThreadId entry are known as "samples".
436 //
437 // (
438 // ( /* Samples */
439 // ThreadId
440 // Time
441 // ( NativeLeafAddr
442 // | Label FrameFlags? DynamicStringFragment* LineNumber? CategoryPair?
443 // | JitReturnAddr
444 // )+
445 // Responsiveness?
446 // )
447 // | MarkerData
448 // | ( /* Counters */
449 // CounterId
450 // Time
451 // (
452 // CounterKey
453 // Count
454 // Number?
455 // )*
456 // )
457 // | CollectionStart
458 // | CollectionEnd
459 // | Pause
460 // | Resume
461 // | ( ProfilerOverheadTime /* Sampling start timestamp */
462 // ProfilerOverheadDuration /* Lock acquisition */
463 // ProfilerOverheadDuration /* Expired data cleaning */
464 // ProfilerOverheadDuration /* Counters */
465 // ProfilerOverheadDuration /* Threads */
466 // )
467 // )*
468 //
469 // The most complicated part is the stack entry sequence that begins with
470 // Label. Here are some examples.
471 //
472 // - ProfilingStack frames without a dynamic string:
473 //
474 // Label("js::RunScript")
475 // CategoryPair(ProfilingCategoryPair::JS)
476 //
477 // Label("XREMain::XRE_main")
478 // LineNumber(4660)
479 // CategoryPair(ProfilingCategoryPair::OTHER)
480 //
481 // Label("ElementRestyler::ComputeStyleChangeFor")
482 // LineNumber(3003)
483 // CategoryPair(ProfilingCategoryPair::CSS)
484 //
485 // - ProfilingStack frames with a dynamic string:
486 //
487 // Label("nsObserverService::NotifyObservers")
488 // FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_LABEL_FRAME))
489 // DynamicStringFragment("domwindo")
490 // DynamicStringFragment("wopened")
491 // LineNumber(291)
492 // CategoryPair(ProfilingCategoryPair::OTHER)
493 //
494 // Label("")
495 // FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_JS_FRAME))
496 // DynamicStringFragment("closeWin")
497 // DynamicStringFragment("dow (chr")
498 // DynamicStringFragment("ome://gl")
499 // DynamicStringFragment("obal/con")
500 // DynamicStringFragment("tent/glo")
501 // DynamicStringFragment("balOverl")
502 // DynamicStringFragment("ay.js:5)")
503 // DynamicStringFragment("") # this string holds the closing '\0'
504 // LineNumber(25)
505 // CategoryPair(ProfilingCategoryPair::JS)
506 //
507 // Label("")
508 // FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_JS_FRAME))
509 // DynamicStringFragment("bound (s")
510 // DynamicStringFragment("elf-host")
511 // DynamicStringFragment("ed:914)")
512 // LineNumber(945)
513 // CategoryPair(ProfilingCategoryPair::JS)
514 //
515 // - A profiling stack frame with an overly long dynamic string:
516 //
517 // Label("")
518 // FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_LABEL_FRAME))
519 // DynamicStringFragment("(too lon")
520 // DynamicStringFragment("g)")
521 // LineNumber(100)
522 // CategoryPair(ProfilingCategoryPair::NETWORK)
523 //
524 // - A wasm JIT frame:
525 //
526 // Label("")
527 // FrameFlags(uint64_t(0))
528 // DynamicStringFragment("wasm-fun")
529 // DynamicStringFragment("ction[87")
530 // DynamicStringFragment("36] (blo")
531 // DynamicStringFragment("b:http:/")
532 // DynamicStringFragment("/webasse")
533 // DynamicStringFragment("mbly.org")
534 // DynamicStringFragment("/3dc5759")
535 // DynamicStringFragment("4-ce58-4")
536 // DynamicStringFragment("626-975b")
537 // DynamicStringFragment("-08ad116")
538 // DynamicStringFragment("30bc1:38")
539 // DynamicStringFragment("29856)")
540 //
541 // - A JS frame in a synchronous sample:
542 //
543 // Label("")
544 // FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_LABEL_FRAME))
545 // DynamicStringFragment("u (https")
546 // DynamicStringFragment("://perf-")
547 // DynamicStringFragment("html.io/")
548 // DynamicStringFragment("ac0da204")
549 // DynamicStringFragment("aaa44d75")
550 // DynamicStringFragment("a800.bun")
551 // DynamicStringFragment("dle.js:2")
552 // DynamicStringFragment("5)")
553
554 // Because this is a format entirely internal to the Profiler, any parsing
555 // error indicates a bug in the ProfileBuffer writing or the parser itself,
556 // or possibly flaky hardware.
557 #define ERROR_AND_CONTINUE(msg) \
558 { \
559 fprintf(stderr, "ProfileBuffer parse error: %s", msg); \
560 MOZ_ASSERT(false, msg); \
561 continue; \
562 }
563
StreamSamplesToJSON(SpliceableJSONWriter & aWriter,BaseProfilerThreadId aThreadId,double aSinceTime,UniqueStacks & aUniqueStacks) const564 BaseProfilerThreadId ProfileBuffer::StreamSamplesToJSON(
565 SpliceableJSONWriter& aWriter, BaseProfilerThreadId aThreadId,
566 double aSinceTime, UniqueStacks& aUniqueStacks) const {
567 UniquePtr<char[]> dynStrBuf = MakeUnique<char[]>(kMaxFrameKeyLength);
568
569 return mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) {
570 MOZ_ASSERT(aReader,
571 "ProfileChunkedBuffer cannot be out-of-session when sampler is "
572 "running");
573
574 BaseProfilerThreadId processedThreadId;
575
576 EntryGetter e(*aReader);
577
578 for (;;) {
579 // This block skips entries until we find the start of the next sample.
580 // This is useful in three situations.
581 //
582 // - The circular buffer overwrites old entries, so when we start parsing
583 // we might be in the middle of a sample, and we must skip forward to
584 // the start of the next sample.
585 //
586 // - We skip samples that don't have an appropriate ThreadId or Time.
587 //
588 // - We skip range Pause, Resume, CollectionStart, Counter and
589 // CollectionEnd entries between samples.
590 while (e.Has()) {
591 if (e.Get().IsThreadId()) {
592 break;
593 }
594 e.Next();
595 }
596
597 if (!e.Has()) {
598 break;
599 }
600
601 // Due to the skip_to_next_sample block above, if we have an entry here it
602 // must be a ThreadId entry.
603 MOZ_ASSERT(e.Get().IsThreadId());
604
605 BaseProfilerThreadId threadId = e.Get().GetThreadId();
606 e.Next();
607
608 // Ignore samples that are for the wrong thread.
609 if (threadId != aThreadId && aThreadId.IsSpecified()) {
610 continue;
611 }
612
613 MOZ_ASSERT(
614 aThreadId.IsSpecified() || !processedThreadId.IsSpecified(),
615 "Unspecified aThreadId should only be used with 1-sample buffer");
616
617 ProfileSample sample;
618
619 if (e.Has() && e.Get().IsTime()) {
620 sample.mTime = e.Get().GetDouble();
621 e.Next();
622
623 // Ignore samples that are too old.
624 if (sample.mTime < aSinceTime) {
625 continue;
626 }
627 } else {
628 ERROR_AND_CONTINUE("expected a Time entry");
629 }
630
631 UniqueStacks::StackKey stack =
632 aUniqueStacks.BeginStack(UniqueStacks::FrameKey("(root)"));
633
634 int numFrames = 0;
635 while (e.Has()) {
636 if (e.Get().IsNativeLeafAddr()) {
637 numFrames++;
638
639 void* pc = e.Get().GetPtr();
640 e.Next();
641
642 static const uint32_t BUF_SIZE = 256;
643 char buf[BUF_SIZE];
644
645 // Bug 753041: We need a double cast here to tell GCC that we don't
646 // want to sign extend 32-bit addresses starting with 0xFXXXXXX.
647 unsigned long long pcULL = (unsigned long long)(uintptr_t)pc;
648 SprintfLiteral(buf, "0x%llx", pcULL);
649
650 // If the "MOZ_PROFILER_SYMBOLICATE" env-var is set, we add a local
651 // symbolication description to the PC address. This is off by
652 // default, and mainly intended for local development.
653 static const bool preSymbolicate = []() {
654 const char* symbolicate = getenv("MOZ_PROFILER_SYMBOLICATE");
655 return symbolicate && symbolicate[0] != '\0';
656 }();
657 if (preSymbolicate) {
658 MozCodeAddressDetails details;
659 if (MozDescribeCodeAddress(pc, &details)) {
660 // Replace \0 terminator with space.
661 const uint32_t pcLen = strlen(buf);
662 buf[pcLen] = ' ';
663 // Add description after space. Note: Using a frame number of 0,
664 // as using `numFrames` wouldn't help here, and would prevent
665 // combining same function calls that happen at different depths.
666 // TODO: Remove unsightly "#00: " if too annoying. :-)
667 MozFormatCodeAddressDetails(
668 buf + pcLen + 1, BUF_SIZE - (pcLen + 1), 0, pc, &details);
669 }
670 }
671
672 stack = aUniqueStacks.AppendFrame(stack, UniqueStacks::FrameKey(buf));
673
674 } else if (e.Get().IsLabel()) {
675 numFrames++;
676
677 const char* label = e.Get().GetString();
678 e.Next();
679
680 using FrameFlags = ProfilingStackFrame::Flags;
681 uint32_t frameFlags = 0;
682 if (e.Has() && e.Get().IsFrameFlags()) {
683 frameFlags = uint32_t(e.Get().GetUint64());
684 e.Next();
685 }
686
687 bool relevantForJS =
688 frameFlags & uint32_t(FrameFlags::RELEVANT_FOR_JS);
689
690 // Copy potential dynamic string fragments into dynStrBuf, so that
691 // dynStrBuf will then contain the entire dynamic string.
692 size_t i = 0;
693 dynStrBuf[0] = '\0';
694 while (e.Has()) {
695 if (e.Get().IsDynamicStringFragment()) {
696 char chars[ProfileBufferEntry::kNumChars];
697 e.Get().CopyCharsInto(chars);
698 for (char c : chars) {
699 if (i < kMaxFrameKeyLength) {
700 dynStrBuf[i] = c;
701 i++;
702 }
703 }
704 e.Next();
705 } else {
706 break;
707 }
708 }
709 dynStrBuf[kMaxFrameKeyLength - 1] = '\0';
710 bool hasDynamicString = (i != 0);
711
712 std::string frameLabel;
713 if (label[0] != '\0' && hasDynamicString) {
714 if (frameFlags & uint32_t(FrameFlags::STRING_TEMPLATE_METHOD)) {
715 frameLabel += label;
716 frameLabel += '.';
717 frameLabel += dynStrBuf.get();
718 } else if (frameFlags &
719 uint32_t(FrameFlags::STRING_TEMPLATE_GETTER)) {
720 frameLabel += "get ";
721 frameLabel += label;
722 frameLabel += '.';
723 frameLabel += dynStrBuf.get();
724 } else if (frameFlags &
725 uint32_t(FrameFlags::STRING_TEMPLATE_SETTER)) {
726 frameLabel += "set ";
727 frameLabel += label;
728 frameLabel += '.';
729 frameLabel += dynStrBuf.get();
730 } else {
731 frameLabel += label;
732 frameLabel += ' ';
733 frameLabel += dynStrBuf.get();
734 }
735 } else if (hasDynamicString) {
736 frameLabel += dynStrBuf.get();
737 } else {
738 frameLabel += label;
739 }
740
741 uint64_t innerWindowID = 0;
742 if (e.Has() && e.Get().IsInnerWindowID()) {
743 innerWindowID = uint64_t(e.Get().GetUint64());
744 e.Next();
745 }
746
747 Maybe<unsigned> line;
748 if (e.Has() && e.Get().IsLineNumber()) {
749 line = Some(unsigned(e.Get().GetInt()));
750 e.Next();
751 }
752
753 Maybe<unsigned> column;
754 if (e.Has() && e.Get().IsColumnNumber()) {
755 column = Some(unsigned(e.Get().GetInt()));
756 e.Next();
757 }
758
759 Maybe<ProfilingCategoryPair> categoryPair;
760 if (e.Has() && e.Get().IsCategoryPair()) {
761 categoryPair =
762 Some(ProfilingCategoryPair(uint32_t(e.Get().GetInt())));
763 e.Next();
764 }
765
766 stack = aUniqueStacks.AppendFrame(
767 stack, UniqueStacks::FrameKey(std::move(frameLabel),
768 relevantForJS, innerWindowID, line,
769 column, categoryPair));
770
771 } else {
772 break;
773 }
774 }
775
776 if (numFrames == 0) {
777 // It is possible to have empty stacks if native stackwalking is
778 // disabled. Skip samples with empty stacks. (See Bug 1497985).
779 // Thus, don't use ERROR_AND_CONTINUE, but just continue.
780 continue;
781 }
782
783 sample.mStack = aUniqueStacks.GetOrAddStackIndex(stack);
784
785 if (e.Has() && e.Get().IsResponsiveness()) {
786 sample.mResponsiveness = Some(e.Get().GetDouble());
787 e.Next();
788 }
789
790 WriteSample(aWriter, sample);
791
792 processedThreadId = threadId;
793 }
794
795 return processedThreadId;
796 });
797 }
798
StreamMarkersToJSON(SpliceableJSONWriter & aWriter,BaseProfilerThreadId aThreadId,const TimeStamp & aProcessStartTime,double aSinceTime,UniqueStacks & aUniqueStacks) const799 void ProfileBuffer::StreamMarkersToJSON(SpliceableJSONWriter& aWriter,
800 BaseProfilerThreadId aThreadId,
801 const TimeStamp& aProcessStartTime,
802 double aSinceTime,
803 UniqueStacks& aUniqueStacks) const {
804 mEntries.ReadEach([&](ProfileBufferEntryReader& aER) {
805 auto type = static_cast<ProfileBufferEntry::Kind>(
806 aER.ReadObject<ProfileBufferEntry::KindUnderlyingType>());
807 MOZ_ASSERT(static_cast<ProfileBufferEntry::KindUnderlyingType>(type) <
808 static_cast<ProfileBufferEntry::KindUnderlyingType>(
809 ProfileBufferEntry::Kind::MODERN_LIMIT));
810 if (type == ProfileBufferEntry::Kind::Marker) {
811 ::mozilla::base_profiler_markers_detail::DeserializeAfterKindAndStream(
812 aER,
813 [&](const BaseProfilerThreadId& aMarkerThreadId) {
814 return (aMarkerThreadId == aThreadId) ? &aWriter : nullptr;
815 },
816 [&](ProfileChunkedBuffer& aChunkedBuffer) {
817 ProfilerBacktrace backtrace("", &aChunkedBuffer);
818 backtrace.StreamJSON(aWriter, TimeStamp::ProcessCreation(),
819 aUniqueStacks);
820 },
821 // We don't have Rust markers in the mozglue.
822 [&](mozilla::base_profiler_markers_detail::Streaming::
823 DeserializerTag) {
824 MOZ_ASSERT_UNREACHABLE("No Rust markers in mozglue.");
825 });
826 } else {
827 // The entry was not a marker, we need to skip to the end.
828 aER.SetRemainingBytes(0);
829 }
830 });
831 }
832
StreamProfilerOverheadToJSON(SpliceableJSONWriter & aWriter,const TimeStamp & aProcessStartTime,double aSinceTime) const833 void ProfileBuffer::StreamProfilerOverheadToJSON(
834 SpliceableJSONWriter& aWriter, const TimeStamp& aProcessStartTime,
835 double aSinceTime) const {
836 mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) {
837 MOZ_ASSERT(aReader,
838 "ProfileChunkedBuffer cannot be out-of-session when sampler is "
839 "running");
840
841 EntryGetter e(*aReader);
842
843 enum Schema : uint32_t {
844 TIME = 0,
845 LOCKING = 1,
846 MARKER_CLEANING = 2,
847 COUNTERS = 3,
848 THREADS = 4
849 };
850
851 aWriter.StartObjectProperty("profilerOverhead");
852 aWriter.StartObjectProperty("samples");
853 // Stream all sampling overhead data. We skip other entries, because we
854 // process them in StreamSamplesToJSON()/etc.
855 {
856 JSONSchemaWriter schema(aWriter);
857 schema.WriteField("time");
858 schema.WriteField("locking");
859 schema.WriteField("expiredMarkerCleaning");
860 schema.WriteField("counters");
861 schema.WriteField("threads");
862 }
863
864 aWriter.StartArrayProperty("data");
865 double firstTime = 0.0;
866 double lastTime = 0.0;
867 ProfilerStats intervals, overheads, lockings, cleanings, counters, threads;
868 while (e.Has()) {
869 // valid sequence: ProfilerOverheadTime, ProfilerOverheadDuration * 4
870 if (e.Get().IsProfilerOverheadTime()) {
871 double time = e.Get().GetDouble();
872 if (time >= aSinceTime) {
873 e.Next();
874 if (!e.Has() || !e.Get().IsProfilerOverheadDuration()) {
875 ERROR_AND_CONTINUE(
876 "expected a ProfilerOverheadDuration entry after "
877 "ProfilerOverheadTime");
878 }
879 double locking = e.Get().GetDouble();
880 e.Next();
881 if (!e.Has() || !e.Get().IsProfilerOverheadDuration()) {
882 ERROR_AND_CONTINUE(
883 "expected a ProfilerOverheadDuration entry after "
884 "ProfilerOverheadTime,ProfilerOverheadDuration");
885 }
886 double cleaning = e.Get().GetDouble();
887 e.Next();
888 if (!e.Has() || !e.Get().IsProfilerOverheadDuration()) {
889 ERROR_AND_CONTINUE(
890 "expected a ProfilerOverheadDuration entry after "
891 "ProfilerOverheadTime,ProfilerOverheadDuration*2");
892 }
893 double counter = e.Get().GetDouble();
894 e.Next();
895 if (!e.Has() || !e.Get().IsProfilerOverheadDuration()) {
896 ERROR_AND_CONTINUE(
897 "expected a ProfilerOverheadDuration entry after "
898 "ProfilerOverheadTime,ProfilerOverheadDuration*3");
899 }
900 double thread = e.Get().GetDouble();
901
902 if (firstTime == 0.0) {
903 firstTime = time;
904 } else {
905 // Note that we'll have 1 fewer interval than other numbers (because
906 // we need both ends of an interval to know its duration). The final
907 // difference should be insignificant over the expected many
908 // thousands of iterations.
909 intervals.Count(time - lastTime);
910 }
911 lastTime = time;
912 overheads.Count(locking + cleaning + counter + thread);
913 lockings.Count(locking);
914 cleanings.Count(cleaning);
915 counters.Count(counter);
916 threads.Count(thread);
917
918 AutoArraySchemaWriter writer(aWriter);
919 writer.TimeMsElement(TIME, time);
920 writer.DoubleElement(LOCKING, locking);
921 writer.DoubleElement(MARKER_CLEANING, cleaning);
922 writer.DoubleElement(COUNTERS, counter);
923 writer.DoubleElement(THREADS, thread);
924 }
925 }
926 e.Next();
927 }
928 aWriter.EndArray(); // data
929 aWriter.EndObject(); // samples
930
931 // Only output statistics if there is at least one full interval (and
932 // therefore at least two samplings.)
933 if (intervals.n > 0) {
934 aWriter.StartObjectProperty("statistics");
935 aWriter.DoubleProperty("profiledDuration", lastTime - firstTime);
936 aWriter.IntProperty("samplingCount", overheads.n);
937 aWriter.DoubleProperty("overheadDurations", overheads.sum);
938 aWriter.DoubleProperty("overheadPercentage",
939 overheads.sum / (lastTime - firstTime));
940 #define PROFILER_STATS(name, var) \
941 aWriter.DoubleProperty("mean" name, (var).sum / (var).n); \
942 aWriter.DoubleProperty("min" name, (var).min); \
943 aWriter.DoubleProperty("max" name, (var).max);
944 PROFILER_STATS("Interval", intervals);
945 PROFILER_STATS("Overhead", overheads);
946 PROFILER_STATS("Lockings", lockings);
947 PROFILER_STATS("Cleaning", cleanings);
948 PROFILER_STATS("Counter", counters);
949 PROFILER_STATS("Thread", threads);
950 #undef PROFILER_STATS
951 aWriter.EndObject(); // statistics
952 }
953 aWriter.EndObject(); // profilerOverhead
954 });
955 }
956
957 struct CounterKeyedSample {
958 double mTime;
959 uint64_t mNumber;
960 int64_t mCount;
961 };
962
963 using CounterKeyedSamples = Vector<CounterKeyedSample>;
964
965 using CounterMap = HashMap<uint64_t, CounterKeyedSamples>;
966
967 // HashMap lookup, if not found, a default value is inserted.
968 // Returns reference to (existing or new) value inside the HashMap.
969 template <typename HashM, typename Key>
LookupOrAdd(HashM & aMap,Key && aKey)970 static auto& LookupOrAdd(HashM& aMap, Key&& aKey) {
971 auto addPtr = aMap.lookupForAdd(aKey);
972 if (!addPtr) {
973 MOZ_RELEASE_ASSERT(aMap.add(addPtr, std::forward<Key>(aKey),
974 typename HashM::Entry::ValueType{}));
975 MOZ_ASSERT(!!addPtr);
976 }
977 return addPtr->value();
978 }
979
StreamCountersToJSON(SpliceableJSONWriter & aWriter,const TimeStamp & aProcessStartTime,double aSinceTime) const980 void ProfileBuffer::StreamCountersToJSON(SpliceableJSONWriter& aWriter,
981 const TimeStamp& aProcessStartTime,
982 double aSinceTime) const {
983 // Because this is a format entirely internal to the Profiler, any parsing
984 // error indicates a bug in the ProfileBuffer writing or the parser itself,
985 // or possibly flaky hardware.
986
987 mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) {
988 MOZ_ASSERT(aReader,
989 "ProfileChunkedBuffer cannot be out-of-session when sampler is "
990 "running");
991
992 EntryGetter e(*aReader);
993
994 enum Schema : uint32_t { TIME = 0, NUMBER = 1, COUNT = 2 };
995
996 // Stream all counters. We skip other entries, because we process them in
997 // StreamSamplesToJSON()/etc.
998 //
999 // Valid sequence in the buffer:
1000 // CounterID
1001 // Time
1002 // ( CounterKey Count Number? )*
1003 //
1004 // And the JSON (example):
1005 // "counters": {
1006 // "name": "malloc",
1007 // "category": "Memory",
1008 // "description": "Amount of allocated memory",
1009 // "sample_groups": {
1010 // "id": 0,
1011 // "samples": {
1012 // "schema": {"time": 0, "number": 1, "count": 2},
1013 // "data": [
1014 // [
1015 // 16117.033968000002,
1016 // 2446216,
1017 // 6801320
1018 // ],
1019 // [
1020 // 16118.037638,
1021 // 2446216,
1022 // 6801320
1023 // ],
1024 // ],
1025 // }
1026 // }
1027 // },
1028
1029 // Build the map of counters and populate it
1030 HashMap<void*, CounterMap> counters;
1031
1032 while (e.Has()) {
1033 // skip all non-Counters, including if we start in the middle of a counter
1034 if (e.Get().IsCounterId()) {
1035 void* id = e.Get().GetPtr();
1036 CounterMap& counter = LookupOrAdd(counters, id);
1037 e.Next();
1038 if (!e.Has() || !e.Get().IsTime()) {
1039 ERROR_AND_CONTINUE("expected a Time entry");
1040 }
1041 double time = e.Get().GetDouble();
1042 if (time >= aSinceTime) {
1043 e.Next();
1044 while (e.Has() && e.Get().IsCounterKey()) {
1045 uint64_t key = e.Get().GetUint64();
1046 CounterKeyedSamples& data = LookupOrAdd(counter, key);
1047 e.Next();
1048 if (!e.Has() || !e.Get().IsCount()) {
1049 ERROR_AND_CONTINUE("expected a Count entry");
1050 }
1051 int64_t count = e.Get().GetUint64();
1052 e.Next();
1053 uint64_t number;
1054 if (!e.Has() || !e.Get().IsNumber()) {
1055 number = 0;
1056 } else {
1057 number = e.Get().GetInt64();
1058 }
1059 CounterKeyedSample sample = {time, number, count};
1060 MOZ_RELEASE_ASSERT(data.append(sample));
1061 }
1062 } else {
1063 // skip counter sample - only need to skip the initial counter
1064 // id, then let the loop at the top skip the rest
1065 }
1066 }
1067 e.Next();
1068 }
1069 // we have a map of a map of counter entries; dump them to JSON
1070 if (counters.count() == 0) {
1071 return;
1072 }
1073
1074 aWriter.StartArrayProperty("counters");
1075 for (auto iter = counters.iter(); !iter.done(); iter.next()) {
1076 CounterMap& counter = iter.get().value();
1077 const BaseProfilerCount* base_counter =
1078 static_cast<const BaseProfilerCount*>(iter.get().key());
1079
1080 aWriter.Start();
1081 aWriter.StringProperty("name", MakeStringSpan(base_counter->mLabel));
1082 aWriter.StringProperty("category",
1083 MakeStringSpan(base_counter->mCategory));
1084 aWriter.StringProperty("description",
1085 MakeStringSpan(base_counter->mDescription));
1086
1087 aWriter.StartArrayProperty("sample_groups");
1088 for (auto counter_iter = counter.iter(); !counter_iter.done();
1089 counter_iter.next()) {
1090 CounterKeyedSamples& samples = counter_iter.get().value();
1091 uint64_t key = counter_iter.get().key();
1092
1093 size_t size = samples.length();
1094 if (size == 0) {
1095 continue;
1096 }
1097
1098 aWriter.StartObjectElement();
1099 {
1100 aWriter.IntProperty("id", static_cast<int64_t>(key));
1101 aWriter.StartObjectProperty("samples");
1102 {
1103 // XXX Can we assume a missing count means 0?
1104 JSONSchemaWriter schema(aWriter);
1105 schema.WriteField("time");
1106 schema.WriteField("number");
1107 schema.WriteField("count");
1108 }
1109
1110 aWriter.StartArrayProperty("data");
1111 uint64_t previousNumber = 0;
1112 int64_t previousCount = 0;
1113 for (size_t i = 0; i < size; i++) {
1114 // Encode as deltas, and only encode if different than the last
1115 // sample
1116 if (i == 0 || samples[i].mNumber != previousNumber ||
1117 samples[i].mCount != previousCount) {
1118 MOZ_ASSERT(i == 0 || samples[i].mTime >= samples[i - 1].mTime);
1119 MOZ_ASSERT(samples[i].mNumber >= previousNumber);
1120 MOZ_ASSERT(samples[i].mNumber - previousNumber <=
1121 uint64_t(std::numeric_limits<int64_t>::max()));
1122
1123 AutoArraySchemaWriter writer(aWriter);
1124 writer.TimeMsElement(TIME, samples[i].mTime);
1125 writer.IntElement(
1126 NUMBER,
1127 static_cast<int64_t>(samples[i].mNumber - previousNumber));
1128 writer.IntElement(COUNT, samples[i].mCount - previousCount);
1129 previousNumber = samples[i].mNumber;
1130 previousCount = samples[i].mCount;
1131 }
1132 }
1133 aWriter.EndArray(); // data
1134 aWriter.EndObject(); // samples
1135 }
1136 aWriter.EndObject(); // sample_groups item
1137 }
1138 aWriter.EndArray(); // sample groups
1139 aWriter.End(); // for each counter
1140 }
1141 aWriter.EndArray(); // counters
1142 });
1143 }
1144
1145 #undef ERROR_AND_CONTINUE
1146
AddPausedRange(SpliceableJSONWriter & aWriter,const char * aReason,const Maybe<double> & aStartTime,const Maybe<double> & aEndTime)1147 static void AddPausedRange(SpliceableJSONWriter& aWriter, const char* aReason,
1148 const Maybe<double>& aStartTime,
1149 const Maybe<double>& aEndTime) {
1150 aWriter.Start();
1151 if (aStartTime) {
1152 aWriter.TimeDoubleMsProperty("startTime", *aStartTime);
1153 } else {
1154 aWriter.NullProperty("startTime");
1155 }
1156 if (aEndTime) {
1157 aWriter.TimeDoubleMsProperty("endTime", *aEndTime);
1158 } else {
1159 aWriter.NullProperty("endTime");
1160 }
1161 aWriter.StringProperty("reason", MakeStringSpan(aReason));
1162 aWriter.End();
1163 }
1164
StreamPausedRangesToJSON(SpliceableJSONWriter & aWriter,double aSinceTime) const1165 void ProfileBuffer::StreamPausedRangesToJSON(SpliceableJSONWriter& aWriter,
1166 double aSinceTime) const {
1167 mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) {
1168 MOZ_ASSERT(aReader,
1169 "ProfileChunkedBuffer cannot be out-of-session when sampler is "
1170 "running");
1171
1172 EntryGetter e(*aReader);
1173
1174 Maybe<double> currentPauseStartTime;
1175 Maybe<double> currentCollectionStartTime;
1176
1177 while (e.Has()) {
1178 if (e.Get().IsPause()) {
1179 currentPauseStartTime = Some(e.Get().GetDouble());
1180 } else if (e.Get().IsResume()) {
1181 AddPausedRange(aWriter, "profiler-paused", currentPauseStartTime,
1182 Some(e.Get().GetDouble()));
1183 currentPauseStartTime = Nothing();
1184 } else if (e.Get().IsCollectionStart()) {
1185 currentCollectionStartTime = Some(e.Get().GetDouble());
1186 } else if (e.Get().IsCollectionEnd()) {
1187 AddPausedRange(aWriter, "collecting", currentCollectionStartTime,
1188 Some(e.Get().GetDouble()));
1189 currentCollectionStartTime = Nothing();
1190 }
1191 e.Next();
1192 }
1193
1194 if (currentPauseStartTime) {
1195 AddPausedRange(aWriter, "profiler-paused", currentPauseStartTime,
1196 Nothing());
1197 }
1198 if (currentCollectionStartTime) {
1199 AddPausedRange(aWriter, "collecting", currentCollectionStartTime,
1200 Nothing());
1201 }
1202 });
1203 }
1204
DuplicateLastSample(BaseProfilerThreadId aThreadId,const TimeStamp & aProcessStartTime,Maybe<uint64_t> & aLastSample)1205 bool ProfileBuffer::DuplicateLastSample(BaseProfilerThreadId aThreadId,
1206 const TimeStamp& aProcessStartTime,
1207 Maybe<uint64_t>& aLastSample) {
1208 if (!aLastSample) {
1209 return false;
1210 }
1211
1212 ProfileChunkedBuffer tempBuffer(
1213 ProfileChunkedBuffer::ThreadSafety::WithoutMutex, WorkerChunkManager());
1214
1215 auto retrieveWorkerChunk = MakeScopeExit(
1216 [&]() { WorkerChunkManager().Reset(tempBuffer.GetAllChunks()); });
1217
1218 const bool ok = mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) {
1219 MOZ_ASSERT(aReader,
1220 "ProfileChunkedBuffer cannot be out-of-session when sampler is "
1221 "running");
1222
1223 EntryGetter e(*aReader, *aLastSample);
1224
1225 if (e.CurPos() != *aLastSample) {
1226 // The last sample is no longer within the buffer range, so we cannot
1227 // use it. Reset the stored buffer position to Nothing().
1228 aLastSample.reset();
1229 return false;
1230 }
1231
1232 MOZ_RELEASE_ASSERT(e.Has() && e.Get().IsThreadId() &&
1233 e.Get().GetThreadId() == aThreadId);
1234
1235 e.Next();
1236
1237 // Go through the whole entry and duplicate it, until we find the next
1238 // one.
1239 while (e.Has()) {
1240 switch (e.Get().GetKind()) {
1241 case ProfileBufferEntry::Kind::Pause:
1242 case ProfileBufferEntry::Kind::Resume:
1243 case ProfileBufferEntry::Kind::PauseSampling:
1244 case ProfileBufferEntry::Kind::ResumeSampling:
1245 case ProfileBufferEntry::Kind::CollectionStart:
1246 case ProfileBufferEntry::Kind::CollectionEnd:
1247 case ProfileBufferEntry::Kind::ThreadId:
1248 // We're done.
1249 return true;
1250 case ProfileBufferEntry::Kind::Time:
1251 // Copy with new time
1252 AddEntry(
1253 tempBuffer,
1254 ProfileBufferEntry::Time(
1255 (TimeStamp::Now() - aProcessStartTime).ToMilliseconds()));
1256 break;
1257 case ProfileBufferEntry::Kind::CounterKey:
1258 case ProfileBufferEntry::Kind::Number:
1259 case ProfileBufferEntry::Kind::Count:
1260 case ProfileBufferEntry::Kind::Responsiveness:
1261 // Don't copy anything not part of a thread's stack sample
1262 break;
1263 case ProfileBufferEntry::Kind::CounterId:
1264 // CounterId is normally followed by Time - if so, we'd like
1265 // to skip it. If we duplicate Time, it won't hurt anything, just
1266 // waste buffer space (and this can happen if the CounterId has
1267 // fallen off the end of the buffer, but Time (and Number/Count)
1268 // are still in the buffer).
1269 e.Next();
1270 if (e.Has() && e.Get().GetKind() != ProfileBufferEntry::Kind::Time) {
1271 // this would only happen if there was an invalid sequence
1272 // in the buffer. Don't skip it.
1273 continue;
1274 }
1275 // we've skipped Time
1276 break;
1277 case ProfileBufferEntry::Kind::ProfilerOverheadTime:
1278 // ProfilerOverheadTime is normally followed by
1279 // ProfilerOverheadDuration*4 - if so, we'd like to skip it. Don't
1280 // duplicate, as we are in the middle of a sampling and will soon
1281 // capture its own overhead.
1282 e.Next();
1283 // A missing Time would only happen if there was an invalid
1284 // sequence in the buffer. Don't skip unexpected entry.
1285 if (e.Has() &&
1286 e.Get().GetKind() !=
1287 ProfileBufferEntry::Kind::ProfilerOverheadDuration) {
1288 continue;
1289 }
1290 e.Next();
1291 if (e.Has() &&
1292 e.Get().GetKind() !=
1293 ProfileBufferEntry::Kind::ProfilerOverheadDuration) {
1294 continue;
1295 }
1296 e.Next();
1297 if (e.Has() &&
1298 e.Get().GetKind() !=
1299 ProfileBufferEntry::Kind::ProfilerOverheadDuration) {
1300 continue;
1301 }
1302 e.Next();
1303 if (e.Has() &&
1304 e.Get().GetKind() !=
1305 ProfileBufferEntry::Kind::ProfilerOverheadDuration) {
1306 continue;
1307 }
1308 // we've skipped ProfilerOverheadTime and
1309 // ProfilerOverheadDuration*4.
1310 break;
1311 default: {
1312 // Copy anything else we don't know about.
1313 AddEntry(tempBuffer, e.Get());
1314 break;
1315 }
1316 }
1317 e.Next();
1318 }
1319 return true;
1320 });
1321
1322 if (!ok) {
1323 return false;
1324 }
1325
1326 // If the buffer was big enough, there won't be any cleared blocks.
1327 if (tempBuffer.GetState().mClearedBlockCount != 0) {
1328 // No need to try to read stack again as it won't fit. Reset the stored
1329 // buffer position to Nothing().
1330 aLastSample.reset();
1331 return false;
1332 }
1333
1334 aLastSample = Some(AddThreadIdEntry(aThreadId));
1335
1336 tempBuffer.Read([&](ProfileChunkedBuffer::Reader* aReader) {
1337 MOZ_ASSERT(aReader, "tempBuffer cannot be out-of-session");
1338
1339 EntryGetter e(*aReader);
1340
1341 while (e.Has()) {
1342 AddEntry(e.Get());
1343 e.Next();
1344 }
1345 });
1346
1347 return true;
1348 }
1349
DiscardSamplesBeforeTime(double aTime)1350 void ProfileBuffer::DiscardSamplesBeforeTime(double aTime) {
1351 // This function does nothing!
1352 // The duration limit will be removed from Firefox, see bug 1632365.
1353 Unused << aTime;
1354 }
1355
1356 // END ProfileBuffer
1357 ////////////////////////////////////////////////////////////////////////
1358
1359 } // namespace baseprofiler
1360 } // namespace mozilla
1361