1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "ProfileBufferEntry.h"
8
9 #include <ostream>
10 #include <type_traits>
11
12 #include "mozilla/Logging.h"
13 #include "mozilla/ScopeExit.h"
14 #include "mozilla/Sprintf.h"
15 #include "mozilla/StackWalk.h"
16
17 #include "BaseProfiler.h"
18 #include "mozilla/BaseProfilerMarkers.h"
19 #include "platform.h"
20 #include "ProfileBuffer.h"
21 #include "ProfilerBacktrace.h"
22
23 namespace mozilla {
24 namespace baseprofiler {
25
26 ////////////////////////////////////////////////////////////////////////
27 // BEGIN ProfileBufferEntry
28
ProfileBufferEntry()29 ProfileBufferEntry::ProfileBufferEntry()
30 : mKind(Kind::INVALID), mStorage{0, 0, 0, 0, 0, 0, 0, 0} {}
31
32 // aString must be a static string.
ProfileBufferEntry(Kind aKind,const char * aString)33 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, const char* aString)
34 : mKind(aKind) {
35 memcpy(mStorage, &aString, sizeof(aString));
36 }
37
ProfileBufferEntry(Kind aKind,char aChars[kNumChars])38 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, char aChars[kNumChars])
39 : mKind(aKind) {
40 memcpy(mStorage, aChars, kNumChars);
41 }
42
ProfileBufferEntry(Kind aKind,void * aPtr)43 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, void* aPtr) : mKind(aKind) {
44 memcpy(mStorage, &aPtr, sizeof(aPtr));
45 }
46
ProfileBufferEntry(Kind aKind,double aDouble)47 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, double aDouble)
48 : mKind(aKind) {
49 memcpy(mStorage, &aDouble, sizeof(aDouble));
50 }
51
ProfileBufferEntry(Kind aKind,int aInt)52 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, int aInt) : mKind(aKind) {
53 memcpy(mStorage, &aInt, sizeof(aInt));
54 }
55
ProfileBufferEntry(Kind aKind,int64_t aInt64)56 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, int64_t aInt64)
57 : mKind(aKind) {
58 memcpy(mStorage, &aInt64, sizeof(aInt64));
59 }
60
ProfileBufferEntry(Kind aKind,uint64_t aUint64)61 ProfileBufferEntry::ProfileBufferEntry(Kind aKind, uint64_t aUint64)
62 : mKind(aKind) {
63 memcpy(mStorage, &aUint64, sizeof(aUint64));
64 }
65
GetString() const66 const char* ProfileBufferEntry::GetString() const {
67 const char* result;
68 memcpy(&result, mStorage, sizeof(result));
69 return result;
70 }
71
GetPtr() const72 void* ProfileBufferEntry::GetPtr() const {
73 void* result;
74 memcpy(&result, mStorage, sizeof(result));
75 return result;
76 }
77
GetDouble() const78 double ProfileBufferEntry::GetDouble() const {
79 double result;
80 memcpy(&result, mStorage, sizeof(result));
81 return result;
82 }
83
GetInt() const84 int ProfileBufferEntry::GetInt() const {
85 int result;
86 memcpy(&result, mStorage, sizeof(result));
87 return result;
88 }
89
GetInt64() const90 int64_t ProfileBufferEntry::GetInt64() const {
91 int64_t result;
92 memcpy(&result, mStorage, sizeof(result));
93 return result;
94 }
95
GetUint64() const96 uint64_t ProfileBufferEntry::GetUint64() const {
97 uint64_t result;
98 memcpy(&result, mStorage, sizeof(result));
99 return result;
100 }
101
CopyCharsInto(char (& aOutArray)[kNumChars]) const102 void ProfileBufferEntry::CopyCharsInto(char (&aOutArray)[kNumChars]) const {
103 memcpy(aOutArray, mStorage, kNumChars);
104 }
105
106 // END ProfileBufferEntry
107 ////////////////////////////////////////////////////////////////////////
108
109 // As mentioned in ProfileBufferEntry.h, the JSON format contains many
110 // arrays whose elements are laid out according to various schemas to help
111 // de-duplication. This RAII class helps write these arrays by keeping track of
112 // the last non-null element written and adding the appropriate number of null
113 // elements when writing new non-null elements. It also automatically opens and
114 // closes an array element on the given JSON writer.
115 //
116 // You grant the AutoArraySchemaWriter exclusive access to the JSONWriter and
117 // the UniqueJSONStrings objects for the lifetime of AutoArraySchemaWriter. Do
118 // not access them independently while the AutoArraySchemaWriter is alive.
119 // If you need to add complex objects, call FreeFormElement(), which will give
120 // you temporary access to the writer.
121 //
122 // Example usage:
123 //
124 // // Define the schema of elements in this type of array: [FOO, BAR, BAZ]
125 // enum Schema : uint32_t {
126 // FOO = 0,
127 // BAR = 1,
128 // BAZ = 2
129 // };
130 //
131 // AutoArraySchemaWriter writer(someJsonWriter, someUniqueStrings);
132 // if (shouldWriteFoo) {
133 // writer.IntElement(FOO, getFoo());
134 // }
135 // ... etc ...
136 //
137 // The elements need to be added in-order.
138 class MOZ_RAII AutoArraySchemaWriter {
139 public:
AutoArraySchemaWriter(SpliceableJSONWriter & aWriter)140 explicit AutoArraySchemaWriter(SpliceableJSONWriter& aWriter)
141 : mJSONWriter(aWriter), mNextFreeIndex(0) {
142 mJSONWriter.StartArrayElement(SpliceableJSONWriter::SingleLineStyle);
143 }
144
~AutoArraySchemaWriter()145 ~AutoArraySchemaWriter() { mJSONWriter.EndArray(); }
146
147 template <typename T>
IntElement(uint32_t aIndex,T aValue)148 void IntElement(uint32_t aIndex, T aValue) {
149 static_assert(!std::is_same_v<T, uint64_t>,
150 "Narrowing uint64 -> int64 conversion not allowed");
151 FillUpTo(aIndex);
152 mJSONWriter.IntElement(static_cast<int64_t>(aValue));
153 }
154
DoubleElement(uint32_t aIndex,double aValue)155 void DoubleElement(uint32_t aIndex, double aValue) {
156 FillUpTo(aIndex);
157 mJSONWriter.DoubleElement(aValue);
158 }
159
BoolElement(uint32_t aIndex,bool aValue)160 void BoolElement(uint32_t aIndex, bool aValue) {
161 FillUpTo(aIndex);
162 mJSONWriter.BoolElement(aValue);
163 }
164
165 protected:
Writer()166 SpliceableJSONWriter& Writer() { return mJSONWriter; }
167
FillUpTo(uint32_t aIndex)168 void FillUpTo(uint32_t aIndex) {
169 MOZ_ASSERT(aIndex >= mNextFreeIndex);
170 mJSONWriter.NullElements(aIndex - mNextFreeIndex);
171 mNextFreeIndex = aIndex + 1;
172 }
173
174 private:
175 SpliceableJSONWriter& mJSONWriter;
176 uint32_t mNextFreeIndex;
177 };
178
179 // Same as AutoArraySchemaWriter, but this can also write strings (output as
180 // indexes into the table of unique strings).
181 class MOZ_RAII AutoArraySchemaWithStringsWriter : public AutoArraySchemaWriter {
182 public:
AutoArraySchemaWithStringsWriter(SpliceableJSONWriter & aWriter,UniqueJSONStrings & aStrings)183 AutoArraySchemaWithStringsWriter(SpliceableJSONWriter& aWriter,
184 UniqueJSONStrings& aStrings)
185 : AutoArraySchemaWriter(aWriter), mStrings(aStrings) {}
186
StringElement(uint32_t aIndex,const Span<const char> & aValue)187 void StringElement(uint32_t aIndex, const Span<const char>& aValue) {
188 FillUpTo(aIndex);
189 mStrings.WriteElement(Writer(), aValue);
190 }
191
192 private:
193 UniqueJSONStrings& mStrings;
194 };
195
BeginStack(const FrameKey & aFrame)196 UniqueStacks::StackKey UniqueStacks::BeginStack(const FrameKey& aFrame) {
197 return StackKey(GetOrAddFrameIndex(aFrame));
198 }
199
AppendFrame(const StackKey & aStack,const FrameKey & aFrame)200 UniqueStacks::StackKey UniqueStacks::AppendFrame(const StackKey& aStack,
201 const FrameKey& aFrame) {
202 return StackKey(aStack, GetOrAddStackIndex(aStack),
203 GetOrAddFrameIndex(aFrame));
204 }
205
operator ==(const NormalFrameData & aOther) const206 bool UniqueStacks::FrameKey::NormalFrameData::operator==(
207 const NormalFrameData& aOther) const {
208 return mLocation == aOther.mLocation &&
209 mRelevantForJS == aOther.mRelevantForJS &&
210 mInnerWindowID == aOther.mInnerWindowID && mLine == aOther.mLine &&
211 mColumn == aOther.mColumn && mCategoryPair == aOther.mCategoryPair;
212 }
213
UniqueStacks()214 UniqueStacks::UniqueStacks() : mUniqueStrings(MakeUnique<UniqueJSONStrings>()) {
215 mFrameTableWriter.StartBareList();
216 mStackTableWriter.StartBareList();
217 }
218
GetOrAddStackIndex(const StackKey & aStack)219 uint32_t UniqueStacks::GetOrAddStackIndex(const StackKey& aStack) {
220 uint32_t count = mStackToIndexMap.count();
221 auto entry = mStackToIndexMap.lookupForAdd(aStack);
222 if (entry) {
223 MOZ_ASSERT(entry->value() < count);
224 return entry->value();
225 }
226
227 MOZ_RELEASE_ASSERT(mStackToIndexMap.add(entry, aStack, count));
228 StreamStack(aStack);
229 return count;
230 }
231
GetOrAddFrameIndex(const FrameKey & aFrame)232 uint32_t UniqueStacks::GetOrAddFrameIndex(const FrameKey& aFrame) {
233 uint32_t count = mFrameToIndexMap.count();
234 auto entry = mFrameToIndexMap.lookupForAdd(aFrame);
235 if (entry) {
236 MOZ_ASSERT(entry->value() < count);
237 return entry->value();
238 }
239
240 MOZ_RELEASE_ASSERT(mFrameToIndexMap.add(entry, aFrame, count));
241 StreamNonJITFrame(aFrame);
242 return count;
243 }
244
SpliceFrameTableElements(SpliceableJSONWriter & aWriter)245 void UniqueStacks::SpliceFrameTableElements(SpliceableJSONWriter& aWriter) {
246 mFrameTableWriter.EndBareList();
247 aWriter.TakeAndSplice(mFrameTableWriter.TakeChunkedWriteFunc());
248 }
249
SpliceStackTableElements(SpliceableJSONWriter & aWriter)250 void UniqueStacks::SpliceStackTableElements(SpliceableJSONWriter& aWriter) {
251 mStackTableWriter.EndBareList();
252 aWriter.TakeAndSplice(mStackTableWriter.TakeChunkedWriteFunc());
253 }
254
StreamStack(const StackKey & aStack)255 void UniqueStacks::StreamStack(const StackKey& aStack) {
256 enum Schema : uint32_t { PREFIX = 0, FRAME = 1 };
257
258 AutoArraySchemaWriter writer(mStackTableWriter);
259 if (aStack.mPrefixStackIndex.isSome()) {
260 writer.IntElement(PREFIX, *aStack.mPrefixStackIndex);
261 }
262 writer.IntElement(FRAME, aStack.mFrameIndex);
263 }
264
StreamNonJITFrame(const FrameKey & aFrame)265 void UniqueStacks::StreamNonJITFrame(const FrameKey& aFrame) {
266 using NormalFrameData = FrameKey::NormalFrameData;
267
268 enum Schema : uint32_t {
269 LOCATION = 0,
270 RELEVANT_FOR_JS = 1,
271 INNER_WINDOW_ID = 2,
272 IMPLEMENTATION = 3,
273 OPTIMIZATIONS = 4,
274 LINE = 5,
275 COLUMN = 6,
276 CATEGORY = 7,
277 SUBCATEGORY = 8
278 };
279
280 AutoArraySchemaWithStringsWriter writer(mFrameTableWriter, *mUniqueStrings);
281
282 const NormalFrameData& data = aFrame.mData.as<NormalFrameData>();
283 writer.StringElement(LOCATION, data.mLocation);
284 writer.BoolElement(RELEVANT_FOR_JS, data.mRelevantForJS);
285
286 // It's okay to convert uint64_t to double here because DOM always creates IDs
287 // that are convertible to double.
288 writer.DoubleElement(INNER_WINDOW_ID, data.mInnerWindowID);
289
290 if (data.mLine.isSome()) {
291 writer.IntElement(LINE, *data.mLine);
292 }
293 if (data.mColumn.isSome()) {
294 writer.IntElement(COLUMN, *data.mColumn);
295 }
296 if (data.mCategoryPair.isSome()) {
297 const ProfilingCategoryPairInfo& info =
298 GetProfilingCategoryPairInfo(*data.mCategoryPair);
299 writer.IntElement(CATEGORY, uint32_t(info.mCategory));
300 writer.IntElement(SUBCATEGORY, info.mSubcategoryIndex);
301 }
302 }
303
304 struct CStringWriteFunc : public JSONWriteFunc {
305 std::string& mBuffer; // The struct must not outlive this buffer
CStringWriteFuncmozilla::baseprofiler::CStringWriteFunc306 explicit CStringWriteFunc(std::string& aBuffer) : mBuffer(aBuffer) {}
307
Writemozilla::baseprofiler::CStringWriteFunc308 void Write(const Span<const char>& aStr) override {
309 mBuffer.append(aStr.data(), aStr.size());
310 }
311 };
312
313 struct ProfileSample {
314 uint32_t mStack;
315 double mTime;
316 Maybe<double> mResponsiveness;
317 };
318
WriteSample(SpliceableJSONWriter & aWriter,const ProfileSample & aSample)319 static void WriteSample(SpliceableJSONWriter& aWriter,
320 const ProfileSample& aSample) {
321 enum Schema : uint32_t {
322 STACK = 0,
323 TIME = 1,
324 EVENT_DELAY = 2,
325 };
326
327 AutoArraySchemaWriter writer(aWriter);
328
329 writer.IntElement(STACK, aSample.mStack);
330
331 writer.DoubleElement(TIME, aSample.mTime);
332
333 if (aSample.mResponsiveness.isSome()) {
334 writer.DoubleElement(EVENT_DELAY, *aSample.mResponsiveness);
335 }
336 }
337
338 class EntryGetter {
339 public:
EntryGetter(ProfileChunkedBuffer::Reader & aReader,uint64_t aInitialReadPos=0)340 explicit EntryGetter(ProfileChunkedBuffer::Reader& aReader,
341 uint64_t aInitialReadPos = 0)
342 : mBlockIt(
343 aReader.At(ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
344 aInitialReadPos))),
345 mBlockItEnd(aReader.end()) {
346 if (!ReadLegacyOrEnd()) {
347 // Find and read the next non-legacy entry.
348 Next();
349 }
350 }
351
Has() const352 bool Has() const { return mBlockIt != mBlockItEnd; }
353
Get() const354 const ProfileBufferEntry& Get() const {
355 MOZ_ASSERT(Has(), "Caller should have checked `Has()` before `Get()`");
356 return mEntry;
357 }
358
Next()359 void Next() {
360 MOZ_ASSERT(Has(), "Caller should have checked `Has()` before `Next()`");
361 for (;;) {
362 ++mBlockIt;
363 if (ReadLegacyOrEnd()) {
364 // Either we're at the end, or we could read a legacy entry -> Done.
365 break;
366 }
367 // Otherwise loop around until we hit the end or a legacy entry.
368 }
369 }
370
CurBlockIndex() const371 ProfileBufferBlockIndex CurBlockIndex() const {
372 return mBlockIt.CurrentBlockIndex();
373 }
374
CurPos() const375 uint64_t CurPos() const {
376 return CurBlockIndex().ConvertToProfileBufferIndex();
377 }
378
379 private:
380 // Try to read the entry at the current `mBlockIt` position.
381 // * If we're at the end of the buffer, just return `true`.
382 // * If there is a "legacy" entry (containing a real `ProfileBufferEntry`),
383 // read it into `mEntry`, and return `true` as well.
384 // * Otherwise the entry contains a "modern" type that cannot be read into
385 // `mEntry`, return `false` (so `EntryGetter` can skip to another entry).
ReadLegacyOrEnd()386 bool ReadLegacyOrEnd() {
387 if (!Has()) {
388 return true;
389 }
390 // Read the entry "kind", which is always at the start of all entries.
391 ProfileBufferEntryReader aER = *mBlockIt;
392 auto type = static_cast<ProfileBufferEntry::Kind>(
393 aER.ReadObject<ProfileBufferEntry::KindUnderlyingType>());
394 MOZ_ASSERT(static_cast<ProfileBufferEntry::KindUnderlyingType>(type) <
395 static_cast<ProfileBufferEntry::KindUnderlyingType>(
396 ProfileBufferEntry::Kind::MODERN_LIMIT));
397 if (type >= ProfileBufferEntry::Kind::LEGACY_LIMIT) {
398 aER.SetRemainingBytes(0);
399 return false;
400 }
401 // Here, we have a legacy item, we need to read it from the start.
402 // Because the above `ReadObject` moved the reader, we ned to reset it to
403 // the start of the entry before reading the whole entry.
404 aER = *mBlockIt;
405 aER.ReadBytes(&mEntry, aER.RemainingBytes());
406 return true;
407 }
408
409 ProfileBufferEntry mEntry;
410 ProfileChunkedBuffer::BlockIterator mBlockIt;
411 const ProfileChunkedBuffer::BlockIterator mBlockItEnd;
412 };
413
414 // The following grammar shows legal sequences of profile buffer entries.
415 // The sequences beginning with a ThreadId entry are known as "samples".
416 //
417 // (
418 // ( /* Samples */
419 // ThreadId
420 // Time
421 // ( NativeLeafAddr
422 // | Label FrameFlags? DynamicStringFragment* LineNumber? CategoryPair?
423 // | JitReturnAddr
424 // )+
425 // Responsiveness?
426 // )
427 // | MarkerData
428 // | ( /* Counters */
429 // CounterId
430 // Time
431 // (
432 // CounterKey
433 // Count
434 // Number?
435 // )*
436 // )
437 // | CollectionStart
438 // | CollectionEnd
439 // | Pause
440 // | Resume
441 // | ( ProfilerOverheadTime /* Sampling start timestamp */
442 // ProfilerOverheadDuration /* Lock acquisition */
443 // ProfilerOverheadDuration /* Expired data cleaning */
444 // ProfilerOverheadDuration /* Counters */
445 // ProfilerOverheadDuration /* Threads */
446 // )
447 // )*
448 //
449 // The most complicated part is the stack entry sequence that begins with
450 // Label. Here are some examples.
451 //
452 // - ProfilingStack frames without a dynamic string:
453 //
454 // Label("js::RunScript")
455 // CategoryPair(ProfilingCategoryPair::JS)
456 //
457 // Label("XREMain::XRE_main")
458 // LineNumber(4660)
459 // CategoryPair(ProfilingCategoryPair::OTHER)
460 //
461 // Label("ElementRestyler::ComputeStyleChangeFor")
462 // LineNumber(3003)
463 // CategoryPair(ProfilingCategoryPair::CSS)
464 //
465 // - ProfilingStack frames with a dynamic string:
466 //
467 // Label("nsObserverService::NotifyObservers")
468 // FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_LABEL_FRAME))
469 // DynamicStringFragment("domwindo")
470 // DynamicStringFragment("wopened")
471 // LineNumber(291)
472 // CategoryPair(ProfilingCategoryPair::OTHER)
473 //
474 // Label("")
475 // FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_JS_FRAME))
476 // DynamicStringFragment("closeWin")
477 // DynamicStringFragment("dow (chr")
478 // DynamicStringFragment("ome://gl")
479 // DynamicStringFragment("obal/con")
480 // DynamicStringFragment("tent/glo")
481 // DynamicStringFragment("balOverl")
482 // DynamicStringFragment("ay.js:5)")
483 // DynamicStringFragment("") # this string holds the closing '\0'
484 // LineNumber(25)
485 // CategoryPair(ProfilingCategoryPair::JS)
486 //
487 // Label("")
488 // FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_JS_FRAME))
489 // DynamicStringFragment("bound (s")
490 // DynamicStringFragment("elf-host")
491 // DynamicStringFragment("ed:914)")
492 // LineNumber(945)
493 // CategoryPair(ProfilingCategoryPair::JS)
494 //
495 // - A profiling stack frame with an overly long dynamic string:
496 //
497 // Label("")
498 // FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_LABEL_FRAME))
499 // DynamicStringFragment("(too lon")
500 // DynamicStringFragment("g)")
501 // LineNumber(100)
502 // CategoryPair(ProfilingCategoryPair::NETWORK)
503 //
504 // - A wasm JIT frame:
505 //
506 // Label("")
507 // FrameFlags(uint64_t(0))
508 // DynamicStringFragment("wasm-fun")
509 // DynamicStringFragment("ction[87")
510 // DynamicStringFragment("36] (blo")
511 // DynamicStringFragment("b:http:/")
512 // DynamicStringFragment("/webasse")
513 // DynamicStringFragment("mbly.org")
514 // DynamicStringFragment("/3dc5759")
515 // DynamicStringFragment("4-ce58-4")
516 // DynamicStringFragment("626-975b")
517 // DynamicStringFragment("-08ad116")
518 // DynamicStringFragment("30bc1:38")
519 // DynamicStringFragment("29856)")
520 //
521 // - A JS frame in a synchronous sample:
522 //
523 // Label("")
524 // FrameFlags(uint64_t(ProfilingStackFrame::Flags::IS_LABEL_FRAME))
525 // DynamicStringFragment("u (https")
526 // DynamicStringFragment("://perf-")
527 // DynamicStringFragment("html.io/")
528 // DynamicStringFragment("ac0da204")
529 // DynamicStringFragment("aaa44d75")
530 // DynamicStringFragment("a800.bun")
531 // DynamicStringFragment("dle.js:2")
532 // DynamicStringFragment("5)")
533
534 // Because this is a format entirely internal to the Profiler, any parsing
535 // error indicates a bug in the ProfileBuffer writing or the parser itself,
536 // or possibly flaky hardware.
537 #define ERROR_AND_CONTINUE(msg) \
538 { \
539 fprintf(stderr, "ProfileBuffer parse error: %s", msg); \
540 MOZ_ASSERT(false, msg); \
541 continue; \
542 }
543
StreamSamplesToJSON(SpliceableJSONWriter & aWriter,int aThreadId,double aSinceTime,UniqueStacks & aUniqueStacks) const544 int ProfileBuffer::StreamSamplesToJSON(SpliceableJSONWriter& aWriter,
545 int aThreadId, double aSinceTime,
546 UniqueStacks& aUniqueStacks) const {
547 UniquePtr<char[]> dynStrBuf = MakeUnique<char[]>(kMaxFrameKeyLength);
548
549 return mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) {
550 MOZ_ASSERT(aReader,
551 "ProfileChunkedBuffer cannot be out-of-session when sampler is "
552 "running");
553
554 int processedThreadId = 0;
555
556 EntryGetter e(*aReader);
557
558 for (;;) {
559 // This block skips entries until we find the start of the next sample.
560 // This is useful in three situations.
561 //
562 // - The circular buffer overwrites old entries, so when we start parsing
563 // we might be in the middle of a sample, and we must skip forward to
564 // the start of the next sample.
565 //
566 // - We skip samples that don't have an appropriate ThreadId or Time.
567 //
568 // - We skip range Pause, Resume, CollectionStart, Counter and
569 // CollectionEnd entries between samples.
570 while (e.Has()) {
571 if (e.Get().IsThreadId()) {
572 break;
573 }
574 e.Next();
575 }
576
577 if (!e.Has()) {
578 break;
579 }
580
581 // Due to the skip_to_next_sample block above, if we have an entry here it
582 // must be a ThreadId entry.
583 MOZ_ASSERT(e.Get().IsThreadId());
584
585 int threadId = e.Get().GetInt();
586 e.Next();
587
588 // Ignore samples that are for the wrong thread.
589 if (threadId != aThreadId && aThreadId != 0) {
590 continue;
591 }
592
593 MOZ_ASSERT(aThreadId != 0 || processedThreadId == 0,
594 "aThreadId==0 should only be used with 1-sample buffer");
595
596 ProfileSample sample;
597
598 if (e.Has() && e.Get().IsTime()) {
599 sample.mTime = e.Get().GetDouble();
600 e.Next();
601
602 // Ignore samples that are too old.
603 if (sample.mTime < aSinceTime) {
604 continue;
605 }
606 } else {
607 ERROR_AND_CONTINUE("expected a Time entry");
608 }
609
610 UniqueStacks::StackKey stack =
611 aUniqueStacks.BeginStack(UniqueStacks::FrameKey("(root)"));
612
613 int numFrames = 0;
614 while (e.Has()) {
615 if (e.Get().IsNativeLeafAddr()) {
616 numFrames++;
617
618 void* pc = e.Get().GetPtr();
619 e.Next();
620
621 static const uint32_t BUF_SIZE = 256;
622 char buf[BUF_SIZE];
623
624 // Bug 753041: We need a double cast here to tell GCC that we don't
625 // want to sign extend 32-bit addresses starting with 0xFXXXXXX.
626 unsigned long long pcULL = (unsigned long long)(uintptr_t)pc;
627 SprintfLiteral(buf, "0x%llx", pcULL);
628
629 // If the "MOZ_PROFILER_SYMBOLICATE" env-var is set, we add a local
630 // symbolication description to the PC address. This is off by
631 // default, and mainly intended for local development.
632 static const bool preSymbolicate = []() {
633 const char* symbolicate = getenv("MOZ_PROFILER_SYMBOLICATE");
634 return symbolicate && symbolicate[0] != '\0';
635 }();
636 if (preSymbolicate) {
637 MozCodeAddressDetails details;
638 if (MozDescribeCodeAddress(pc, &details)) {
639 // Replace \0 terminator with space.
640 const uint32_t pcLen = strlen(buf);
641 buf[pcLen] = ' ';
642 // Add description after space. Note: Using a frame number of 0,
643 // as using `numFrames` wouldn't help here, and would prevent
644 // combining same function calls that happen at different depths.
645 // TODO: Remove unsightly "#00: " if too annoying. :-)
646 MozFormatCodeAddressDetails(
647 buf + pcLen + 1, BUF_SIZE - (pcLen + 1), 0, pc, &details);
648 }
649 }
650
651 stack = aUniqueStacks.AppendFrame(stack, UniqueStacks::FrameKey(buf));
652
653 } else if (e.Get().IsLabel()) {
654 numFrames++;
655
656 const char* label = e.Get().GetString();
657 e.Next();
658
659 using FrameFlags = ProfilingStackFrame::Flags;
660 uint32_t frameFlags = 0;
661 if (e.Has() && e.Get().IsFrameFlags()) {
662 frameFlags = uint32_t(e.Get().GetUint64());
663 e.Next();
664 }
665
666 bool relevantForJS =
667 frameFlags & uint32_t(FrameFlags::RELEVANT_FOR_JS);
668
669 // Copy potential dynamic string fragments into dynStrBuf, so that
670 // dynStrBuf will then contain the entire dynamic string.
671 size_t i = 0;
672 dynStrBuf[0] = '\0';
673 while (e.Has()) {
674 if (e.Get().IsDynamicStringFragment()) {
675 char chars[ProfileBufferEntry::kNumChars];
676 e.Get().CopyCharsInto(chars);
677 for (char c : chars) {
678 if (i < kMaxFrameKeyLength) {
679 dynStrBuf[i] = c;
680 i++;
681 }
682 }
683 e.Next();
684 } else {
685 break;
686 }
687 }
688 dynStrBuf[kMaxFrameKeyLength - 1] = '\0';
689 bool hasDynamicString = (i != 0);
690
691 std::string frameLabel;
692 if (label[0] != '\0' && hasDynamicString) {
693 if (frameFlags & uint32_t(FrameFlags::STRING_TEMPLATE_METHOD)) {
694 frameLabel += label;
695 frameLabel += '.';
696 frameLabel += dynStrBuf.get();
697 } else if (frameFlags &
698 uint32_t(FrameFlags::STRING_TEMPLATE_GETTER)) {
699 frameLabel += "get ";
700 frameLabel += label;
701 frameLabel += '.';
702 frameLabel += dynStrBuf.get();
703 } else if (frameFlags &
704 uint32_t(FrameFlags::STRING_TEMPLATE_SETTER)) {
705 frameLabel += "set ";
706 frameLabel += label;
707 frameLabel += '.';
708 frameLabel += dynStrBuf.get();
709 } else {
710 frameLabel += label;
711 frameLabel += ' ';
712 frameLabel += dynStrBuf.get();
713 }
714 } else if (hasDynamicString) {
715 frameLabel += dynStrBuf.get();
716 } else {
717 frameLabel += label;
718 }
719
720 uint64_t innerWindowID = 0;
721 if (e.Has() && e.Get().IsInnerWindowID()) {
722 innerWindowID = uint64_t(e.Get().GetUint64());
723 e.Next();
724 }
725
726 Maybe<unsigned> line;
727 if (e.Has() && e.Get().IsLineNumber()) {
728 line = Some(unsigned(e.Get().GetInt()));
729 e.Next();
730 }
731
732 Maybe<unsigned> column;
733 if (e.Has() && e.Get().IsColumnNumber()) {
734 column = Some(unsigned(e.Get().GetInt()));
735 e.Next();
736 }
737
738 Maybe<ProfilingCategoryPair> categoryPair;
739 if (e.Has() && e.Get().IsCategoryPair()) {
740 categoryPair =
741 Some(ProfilingCategoryPair(uint32_t(e.Get().GetInt())));
742 e.Next();
743 }
744
745 stack = aUniqueStacks.AppendFrame(
746 stack, UniqueStacks::FrameKey(std::move(frameLabel),
747 relevantForJS, innerWindowID, line,
748 column, categoryPair));
749
750 } else {
751 break;
752 }
753 }
754
755 if (numFrames == 0) {
756 // It is possible to have empty stacks if native stackwalking is
757 // disabled. Skip samples with empty stacks. (See Bug 1497985).
758 // Thus, don't use ERROR_AND_CONTINUE, but just continue.
759 continue;
760 }
761
762 sample.mStack = aUniqueStacks.GetOrAddStackIndex(stack);
763
764 if (e.Has() && e.Get().IsResponsiveness()) {
765 sample.mResponsiveness = Some(e.Get().GetDouble());
766 e.Next();
767 }
768
769 WriteSample(aWriter, sample);
770
771 processedThreadId = threadId;
772 }
773
774 return processedThreadId;
775 });
776 }
777
StreamMarkersToJSON(SpliceableJSONWriter & aWriter,int aThreadId,const TimeStamp & aProcessStartTime,double aSinceTime,UniqueStacks & aUniqueStacks) const778 void ProfileBuffer::StreamMarkersToJSON(SpliceableJSONWriter& aWriter,
779 int aThreadId,
780 const TimeStamp& aProcessStartTime,
781 double aSinceTime,
782 UniqueStacks& aUniqueStacks) const {
783 mEntries.ReadEach([&](ProfileBufferEntryReader& aER) {
784 auto type = static_cast<ProfileBufferEntry::Kind>(
785 aER.ReadObject<ProfileBufferEntry::KindUnderlyingType>());
786 MOZ_ASSERT(static_cast<ProfileBufferEntry::KindUnderlyingType>(type) <
787 static_cast<ProfileBufferEntry::KindUnderlyingType>(
788 ProfileBufferEntry::Kind::MODERN_LIMIT));
789 bool entryWasFullyRead = false;
790
791 if (type == ProfileBufferEntry::Kind::Marker) {
792 entryWasFullyRead = ::mozilla::base_profiler_markers_detail::
793 DeserializeAfterKindAndStream(
794 aER, aWriter, aThreadId,
795 [&](ProfileChunkedBuffer& aChunkedBuffer) {
796 ProfilerBacktrace backtrace("", &aChunkedBuffer);
797 backtrace.StreamJSON(aWriter, TimeStamp::ProcessCreation(),
798 aUniqueStacks);
799 });
800 }
801
802 if (!entryWasFullyRead) {
803 // Not a marker, or marker for another thread.
804 // We probably didn't read the whole entry, so we need to skip to the end.
805 aER.SetRemainingBytes(0);
806 }
807 });
808 }
809
StreamProfilerOverheadToJSON(SpliceableJSONWriter & aWriter,const TimeStamp & aProcessStartTime,double aSinceTime) const810 void ProfileBuffer::StreamProfilerOverheadToJSON(
811 SpliceableJSONWriter& aWriter, const TimeStamp& aProcessStartTime,
812 double aSinceTime) const {
813 mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) {
814 MOZ_ASSERT(aReader,
815 "ProfileChunkedBuffer cannot be out-of-session when sampler is "
816 "running");
817
818 EntryGetter e(*aReader);
819
820 enum Schema : uint32_t {
821 TIME = 0,
822 LOCKING = 1,
823 MARKER_CLEANING = 2,
824 COUNTERS = 3,
825 THREADS = 4
826 };
827
828 aWriter.StartObjectProperty("profilerOverhead");
829 aWriter.StartObjectProperty("samples");
830 // Stream all sampling overhead data. We skip other entries, because we
831 // process them in StreamSamplesToJSON()/etc.
832 {
833 JSONSchemaWriter schema(aWriter);
834 schema.WriteField("time");
835 schema.WriteField("locking");
836 schema.WriteField("expiredMarkerCleaning");
837 schema.WriteField("counters");
838 schema.WriteField("threads");
839 }
840
841 aWriter.StartArrayProperty("data");
842 double firstTime = 0.0;
843 double lastTime = 0.0;
844 ProfilerStats intervals, overheads, lockings, cleanings, counters, threads;
845 while (e.Has()) {
846 // valid sequence: ProfilerOverheadTime, ProfilerOverheadDuration * 4
847 if (e.Get().IsProfilerOverheadTime()) {
848 double time = e.Get().GetDouble();
849 if (time >= aSinceTime) {
850 e.Next();
851 if (!e.Has() || !e.Get().IsProfilerOverheadDuration()) {
852 ERROR_AND_CONTINUE(
853 "expected a ProfilerOverheadDuration entry after "
854 "ProfilerOverheadTime");
855 }
856 double locking = e.Get().GetDouble();
857 e.Next();
858 if (!e.Has() || !e.Get().IsProfilerOverheadDuration()) {
859 ERROR_AND_CONTINUE(
860 "expected a ProfilerOverheadDuration entry after "
861 "ProfilerOverheadTime,ProfilerOverheadDuration");
862 }
863 double cleaning = e.Get().GetDouble();
864 e.Next();
865 if (!e.Has() || !e.Get().IsProfilerOverheadDuration()) {
866 ERROR_AND_CONTINUE(
867 "expected a ProfilerOverheadDuration entry after "
868 "ProfilerOverheadTime,ProfilerOverheadDuration*2");
869 }
870 double counter = e.Get().GetDouble();
871 e.Next();
872 if (!e.Has() || !e.Get().IsProfilerOverheadDuration()) {
873 ERROR_AND_CONTINUE(
874 "expected a ProfilerOverheadDuration entry after "
875 "ProfilerOverheadTime,ProfilerOverheadDuration*3");
876 }
877 double thread = e.Get().GetDouble();
878
879 if (firstTime == 0.0) {
880 firstTime = time;
881 } else {
882 // Note that we'll have 1 fewer interval than other numbers (because
883 // we need both ends of an interval to know its duration). The final
884 // difference should be insignificant over the expected many
885 // thousands of iterations.
886 intervals.Count(time - lastTime);
887 }
888 lastTime = time;
889 overheads.Count(locking + cleaning + counter + thread);
890 lockings.Count(locking);
891 cleanings.Count(cleaning);
892 counters.Count(counter);
893 threads.Count(thread);
894
895 AutoArraySchemaWriter writer(aWriter);
896 writer.DoubleElement(TIME, time);
897 writer.DoubleElement(LOCKING, locking);
898 writer.DoubleElement(MARKER_CLEANING, cleaning);
899 writer.DoubleElement(COUNTERS, counter);
900 writer.DoubleElement(THREADS, thread);
901 }
902 }
903 e.Next();
904 }
905 aWriter.EndArray(); // data
906 aWriter.EndObject(); // samples
907
908 // Only output statistics if there is at least one full interval (and
909 // therefore at least two samplings.)
910 if (intervals.n > 0) {
911 aWriter.StartObjectProperty("statistics");
912 aWriter.DoubleProperty("profiledDuration", lastTime - firstTime);
913 aWriter.IntProperty("samplingCount", overheads.n);
914 aWriter.DoubleProperty("overheadDurations", overheads.sum);
915 aWriter.DoubleProperty("overheadPercentage",
916 overheads.sum / (lastTime - firstTime));
917 #define PROFILER_STATS(name, var) \
918 aWriter.DoubleProperty("mean" name, (var).sum / (var).n); \
919 aWriter.DoubleProperty("min" name, (var).min); \
920 aWriter.DoubleProperty("max" name, (var).max);
921 PROFILER_STATS("Interval", intervals);
922 PROFILER_STATS("Overhead", overheads);
923 PROFILER_STATS("Lockings", lockings);
924 PROFILER_STATS("Cleaning", cleanings);
925 PROFILER_STATS("Counter", counters);
926 PROFILER_STATS("Thread", threads);
927 #undef PROFILER_STATS
928 aWriter.EndObject(); // statistics
929 }
930 aWriter.EndObject(); // profilerOverhead
931 });
932 }
933
934 struct CounterKeyedSample {
935 double mTime;
936 uint64_t mNumber;
937 int64_t mCount;
938 };
939
940 using CounterKeyedSamples = Vector<CounterKeyedSample>;
941
942 using CounterMap = HashMap<uint64_t, CounterKeyedSamples>;
943
944 // HashMap lookup, if not found, a default value is inserted.
945 // Returns reference to (existing or new) value inside the HashMap.
946 template <typename HashM, typename Key>
LookupOrAdd(HashM & aMap,Key && aKey)947 static auto& LookupOrAdd(HashM& aMap, Key&& aKey) {
948 auto addPtr = aMap.lookupForAdd(aKey);
949 if (!addPtr) {
950 MOZ_RELEASE_ASSERT(aMap.add(addPtr, std::forward<Key>(aKey),
951 typename HashM::Entry::ValueType{}));
952 MOZ_ASSERT(!!addPtr);
953 }
954 return addPtr->value();
955 }
956
StreamCountersToJSON(SpliceableJSONWriter & aWriter,const TimeStamp & aProcessStartTime,double aSinceTime) const957 void ProfileBuffer::StreamCountersToJSON(SpliceableJSONWriter& aWriter,
958 const TimeStamp& aProcessStartTime,
959 double aSinceTime) const {
960 // Because this is a format entirely internal to the Profiler, any parsing
961 // error indicates a bug in the ProfileBuffer writing or the parser itself,
962 // or possibly flaky hardware.
963
964 mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) {
965 MOZ_ASSERT(aReader,
966 "ProfileChunkedBuffer cannot be out-of-session when sampler is "
967 "running");
968
969 EntryGetter e(*aReader);
970
971 enum Schema : uint32_t { TIME = 0, NUMBER = 1, COUNT = 2 };
972
973 // Stream all counters. We skip other entries, because we process them in
974 // StreamSamplesToJSON()/etc.
975 //
976 // Valid sequence in the buffer:
977 // CounterID
978 // Time
979 // ( CounterKey Count Number? )*
980 //
981 // And the JSON (example):
982 // "counters": {
983 // "name": "malloc",
984 // "category": "Memory",
985 // "description": "Amount of allocated memory",
986 // "sample_groups": {
987 // "id": 0,
988 // "samples": {
989 // "schema": {"time": 0, "number": 1, "count": 2},
990 // "data": [
991 // [
992 // 16117.033968000002,
993 // 2446216,
994 // 6801320
995 // ],
996 // [
997 // 16118.037638,
998 // 2446216,
999 // 6801320
1000 // ],
1001 // ],
1002 // }
1003 // }
1004 // },
1005
1006 // Build the map of counters and populate it
1007 HashMap<void*, CounterMap> counters;
1008
1009 while (e.Has()) {
1010 // skip all non-Counters, including if we start in the middle of a counter
1011 if (e.Get().IsCounterId()) {
1012 void* id = e.Get().GetPtr();
1013 CounterMap& counter = LookupOrAdd(counters, id);
1014 e.Next();
1015 if (!e.Has() || !e.Get().IsTime()) {
1016 ERROR_AND_CONTINUE("expected a Time entry");
1017 }
1018 double time = e.Get().GetDouble();
1019 if (time >= aSinceTime) {
1020 e.Next();
1021 while (e.Has() && e.Get().IsCounterKey()) {
1022 uint64_t key = e.Get().GetUint64();
1023 CounterKeyedSamples& data = LookupOrAdd(counter, key);
1024 e.Next();
1025 if (!e.Has() || !e.Get().IsCount()) {
1026 ERROR_AND_CONTINUE("expected a Count entry");
1027 }
1028 int64_t count = e.Get().GetUint64();
1029 e.Next();
1030 uint64_t number;
1031 if (!e.Has() || !e.Get().IsNumber()) {
1032 number = 0;
1033 } else {
1034 number = e.Get().GetInt64();
1035 }
1036 CounterKeyedSample sample = {time, number, count};
1037 MOZ_RELEASE_ASSERT(data.append(sample));
1038 }
1039 } else {
1040 // skip counter sample - only need to skip the initial counter
1041 // id, then let the loop at the top skip the rest
1042 }
1043 }
1044 e.Next();
1045 }
1046 // we have a map of a map of counter entries; dump them to JSON
1047 if (counters.count() == 0) {
1048 return;
1049 }
1050
1051 aWriter.StartArrayProperty("counters");
1052 for (auto iter = counters.iter(); !iter.done(); iter.next()) {
1053 CounterMap& counter = iter.get().value();
1054 const BaseProfilerCount* base_counter =
1055 static_cast<const BaseProfilerCount*>(iter.get().key());
1056
1057 aWriter.Start();
1058 aWriter.StringProperty("name", MakeStringSpan(base_counter->mLabel));
1059 aWriter.StringProperty("category",
1060 MakeStringSpan(base_counter->mCategory));
1061 aWriter.StringProperty("description",
1062 MakeStringSpan(base_counter->mDescription));
1063
1064 aWriter.StartArrayProperty("sample_groups");
1065 for (auto counter_iter = counter.iter(); !counter_iter.done();
1066 counter_iter.next()) {
1067 CounterKeyedSamples& samples = counter_iter.get().value();
1068 uint64_t key = counter_iter.get().key();
1069
1070 size_t size = samples.length();
1071 if (size == 0) {
1072 continue;
1073 }
1074
1075 aWriter.StartObjectElement();
1076 {
1077 aWriter.IntProperty("id", static_cast<int64_t>(key));
1078 aWriter.StartObjectProperty("samples");
1079 {
1080 // XXX Can we assume a missing count means 0?
1081 JSONSchemaWriter schema(aWriter);
1082 schema.WriteField("time");
1083 schema.WriteField("number");
1084 schema.WriteField("count");
1085 }
1086
1087 aWriter.StartArrayProperty("data");
1088 uint64_t previousNumber = 0;
1089 int64_t previousCount = 0;
1090 for (size_t i = 0; i < size; i++) {
1091 // Encode as deltas, and only encode if different than the last
1092 // sample
1093 if (i == 0 || samples[i].mNumber != previousNumber ||
1094 samples[i].mCount != previousCount) {
1095 MOZ_ASSERT(i == 0 || samples[i].mTime >= samples[i - 1].mTime);
1096 MOZ_ASSERT(samples[i].mNumber >= previousNumber);
1097 MOZ_ASSERT(samples[i].mNumber - previousNumber <=
1098 uint64_t(std::numeric_limits<int64_t>::max()));
1099
1100 AutoArraySchemaWriter writer(aWriter);
1101 writer.DoubleElement(TIME, samples[i].mTime);
1102 writer.IntElement(
1103 NUMBER,
1104 static_cast<int64_t>(samples[i].mNumber - previousNumber));
1105 writer.IntElement(COUNT, samples[i].mCount - previousCount);
1106 previousNumber = samples[i].mNumber;
1107 previousCount = samples[i].mCount;
1108 }
1109 }
1110 aWriter.EndArray(); // data
1111 aWriter.EndObject(); // samples
1112 }
1113 aWriter.EndObject(); // sample_groups item
1114 }
1115 aWriter.EndArray(); // sample groups
1116 aWriter.End(); // for each counter
1117 }
1118 aWriter.EndArray(); // counters
1119 });
1120 }
1121
1122 #undef ERROR_AND_CONTINUE
1123
AddPausedRange(SpliceableJSONWriter & aWriter,const char * aReason,const Maybe<double> & aStartTime,const Maybe<double> & aEndTime)1124 static void AddPausedRange(SpliceableJSONWriter& aWriter, const char* aReason,
1125 const Maybe<double>& aStartTime,
1126 const Maybe<double>& aEndTime) {
1127 aWriter.Start();
1128 if (aStartTime) {
1129 aWriter.DoubleProperty("startTime", *aStartTime);
1130 } else {
1131 aWriter.NullProperty("startTime");
1132 }
1133 if (aEndTime) {
1134 aWriter.DoubleProperty("endTime", *aEndTime);
1135 } else {
1136 aWriter.NullProperty("endTime");
1137 }
1138 aWriter.StringProperty("reason", MakeStringSpan(aReason));
1139 aWriter.End();
1140 }
1141
StreamPausedRangesToJSON(SpliceableJSONWriter & aWriter,double aSinceTime) const1142 void ProfileBuffer::StreamPausedRangesToJSON(SpliceableJSONWriter& aWriter,
1143 double aSinceTime) const {
1144 mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) {
1145 MOZ_ASSERT(aReader,
1146 "ProfileChunkedBuffer cannot be out-of-session when sampler is "
1147 "running");
1148
1149 EntryGetter e(*aReader);
1150
1151 Maybe<double> currentPauseStartTime;
1152 Maybe<double> currentCollectionStartTime;
1153
1154 while (e.Has()) {
1155 if (e.Get().IsPause()) {
1156 currentPauseStartTime = Some(e.Get().GetDouble());
1157 } else if (e.Get().IsResume()) {
1158 AddPausedRange(aWriter, "profiler-paused", currentPauseStartTime,
1159 Some(e.Get().GetDouble()));
1160 currentPauseStartTime = Nothing();
1161 } else if (e.Get().IsCollectionStart()) {
1162 currentCollectionStartTime = Some(e.Get().GetDouble());
1163 } else if (e.Get().IsCollectionEnd()) {
1164 AddPausedRange(aWriter, "collecting", currentCollectionStartTime,
1165 Some(e.Get().GetDouble()));
1166 currentCollectionStartTime = Nothing();
1167 }
1168 e.Next();
1169 }
1170
1171 if (currentPauseStartTime) {
1172 AddPausedRange(aWriter, "profiler-paused", currentPauseStartTime,
1173 Nothing());
1174 }
1175 if (currentCollectionStartTime) {
1176 AddPausedRange(aWriter, "collecting", currentCollectionStartTime,
1177 Nothing());
1178 }
1179 });
1180 }
1181
DuplicateLastSample(int aThreadId,const TimeStamp & aProcessStartTime,Maybe<uint64_t> & aLastSample)1182 bool ProfileBuffer::DuplicateLastSample(int aThreadId,
1183 const TimeStamp& aProcessStartTime,
1184 Maybe<uint64_t>& aLastSample) {
1185 if (!aLastSample) {
1186 return false;
1187 }
1188
1189 ProfileChunkedBuffer tempBuffer(
1190 ProfileChunkedBuffer::ThreadSafety::WithoutMutex, mWorkerChunkManager);
1191
1192 auto retrieveWorkerChunk = MakeScopeExit(
1193 [&]() { mWorkerChunkManager.Reset(tempBuffer.GetAllChunks()); });
1194
1195 const bool ok = mEntries.Read([&](ProfileChunkedBuffer::Reader* aReader) {
1196 MOZ_ASSERT(aReader,
1197 "ProfileChunkedBuffer cannot be out-of-session when sampler is "
1198 "running");
1199
1200 EntryGetter e(*aReader, *aLastSample);
1201
1202 if (e.CurPos() != *aLastSample) {
1203 // The last sample is no longer within the buffer range, so we cannot
1204 // use it. Reset the stored buffer position to Nothing().
1205 aLastSample.reset();
1206 return false;
1207 }
1208
1209 MOZ_RELEASE_ASSERT(e.Has() && e.Get().IsThreadId() &&
1210 e.Get().GetInt() == aThreadId);
1211
1212 e.Next();
1213
1214 // Go through the whole entry and duplicate it, until we find the next
1215 // one.
1216 while (e.Has()) {
1217 switch (e.Get().GetKind()) {
1218 case ProfileBufferEntry::Kind::Pause:
1219 case ProfileBufferEntry::Kind::Resume:
1220 case ProfileBufferEntry::Kind::PauseSampling:
1221 case ProfileBufferEntry::Kind::ResumeSampling:
1222 case ProfileBufferEntry::Kind::CollectionStart:
1223 case ProfileBufferEntry::Kind::CollectionEnd:
1224 case ProfileBufferEntry::Kind::ThreadId:
1225 // We're done.
1226 return true;
1227 case ProfileBufferEntry::Kind::Time:
1228 // Copy with new time
1229 AddEntry(tempBuffer,
1230 ProfileBufferEntry::Time(
1231 (TimeStamp::NowUnfuzzed() - aProcessStartTime)
1232 .ToMilliseconds()));
1233 break;
1234 case ProfileBufferEntry::Kind::CounterKey:
1235 case ProfileBufferEntry::Kind::Number:
1236 case ProfileBufferEntry::Kind::Count:
1237 case ProfileBufferEntry::Kind::Responsiveness:
1238 // Don't copy anything not part of a thread's stack sample
1239 break;
1240 case ProfileBufferEntry::Kind::CounterId:
1241 // CounterId is normally followed by Time - if so, we'd like
1242 // to skip it. If we duplicate Time, it won't hurt anything, just
1243 // waste buffer space (and this can happen if the CounterId has
1244 // fallen off the end of the buffer, but Time (and Number/Count)
1245 // are still in the buffer).
1246 e.Next();
1247 if (e.Has() && e.Get().GetKind() != ProfileBufferEntry::Kind::Time) {
1248 // this would only happen if there was an invalid sequence
1249 // in the buffer. Don't skip it.
1250 continue;
1251 }
1252 // we've skipped Time
1253 break;
1254 case ProfileBufferEntry::Kind::ProfilerOverheadTime:
1255 // ProfilerOverheadTime is normally followed by
1256 // ProfilerOverheadDuration*4 - if so, we'd like to skip it. Don't
1257 // duplicate, as we are in the middle of a sampling and will soon
1258 // capture its own overhead.
1259 e.Next();
1260 // A missing Time would only happen if there was an invalid
1261 // sequence in the buffer. Don't skip unexpected entry.
1262 if (e.Has() &&
1263 e.Get().GetKind() !=
1264 ProfileBufferEntry::Kind::ProfilerOverheadDuration) {
1265 continue;
1266 }
1267 e.Next();
1268 if (e.Has() &&
1269 e.Get().GetKind() !=
1270 ProfileBufferEntry::Kind::ProfilerOverheadDuration) {
1271 continue;
1272 }
1273 e.Next();
1274 if (e.Has() &&
1275 e.Get().GetKind() !=
1276 ProfileBufferEntry::Kind::ProfilerOverheadDuration) {
1277 continue;
1278 }
1279 e.Next();
1280 if (e.Has() &&
1281 e.Get().GetKind() !=
1282 ProfileBufferEntry::Kind::ProfilerOverheadDuration) {
1283 continue;
1284 }
1285 // we've skipped ProfilerOverheadTime and
1286 // ProfilerOverheadDuration*4.
1287 break;
1288 default: {
1289 // Copy anything else we don't know about.
1290 AddEntry(tempBuffer, e.Get());
1291 break;
1292 }
1293 }
1294 e.Next();
1295 }
1296 return true;
1297 });
1298
1299 if (!ok) {
1300 return false;
1301 }
1302
1303 // If the buffer was big enough, there won't be any cleared blocks.
1304 if (tempBuffer.GetState().mClearedBlockCount != 0) {
1305 // No need to try to read stack again as it won't fit. Reset the stored
1306 // buffer position to Nothing().
1307 aLastSample.reset();
1308 return false;
1309 }
1310
1311 aLastSample = Some(AddThreadIdEntry(aThreadId));
1312
1313 tempBuffer.Read([&](ProfileChunkedBuffer::Reader* aReader) {
1314 MOZ_ASSERT(aReader, "tempBuffer cannot be out-of-session");
1315
1316 EntryGetter e(*aReader);
1317
1318 while (e.Has()) {
1319 AddEntry(e.Get());
1320 e.Next();
1321 }
1322 });
1323
1324 return true;
1325 }
1326
DiscardSamplesBeforeTime(double aTime)1327 void ProfileBuffer::DiscardSamplesBeforeTime(double aTime) {
1328 // This function does nothing!
1329 // The duration limit will be removed from Firefox, see bug 1632365.
1330 Unused << aTime;
1331 }
1332
1333 // END ProfileBuffer
1334 ////////////////////////////////////////////////////////////////////////
1335
1336 } // namespace baseprofiler
1337 } // namespace mozilla
1338