1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3  * License, v. 2.0. If a copy of the MPL was not distributed with this
4  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
5 
6 #ifndef MOZ_PROFILE_BUFFER_H
7 #define MOZ_PROFILE_BUFFER_H
8 
9 #include "ProfileBufferEntry.h"
10 
11 #include "mozilla/Maybe.h"
12 #include "mozilla/PowerOfTwo.h"
13 #include "mozilla/ProfileBufferChunkManagerSingle.h"
14 #include "mozilla/ProfileChunkedBuffer.h"
15 
16 namespace mozilla {
17 namespace baseprofiler {
18 
19 // Class storing most profiling data in a ProfileChunkedBuffer.
20 //
21 // This class is used as a queue of entries which, after construction, never
22 // allocates. This makes it safe to use in the profiler's "critical section".
23 class ProfileBuffer final {
24  public:
25   // ProfileBuffer constructor
26   // @param aBuffer The in-session ProfileChunkedBuffer to use as buffer
27   // manager.
28   explicit ProfileBuffer(ProfileChunkedBuffer& aBuffer);
29 
UnderlyingChunkedBuffer()30   ProfileChunkedBuffer& UnderlyingChunkedBuffer() const { return mEntries; }
31 
IsThreadSafe()32   bool IsThreadSafe() const { return mEntries.IsThreadSafe(); }
33 
34   // Add |aEntry| to the buffer, ignoring what kind of entry it is.
35   // Returns the position of the entry.
36   uint64_t AddEntry(const ProfileBufferEntry& aEntry);
37 
38   // Add to the buffer a sample start (ThreadId) entry for aThreadId.
39   // Returns the position of the entry.
40   uint64_t AddThreadIdEntry(int aThreadId);
41 
42   void CollectCodeLocation(const char* aLabel, const char* aStr,
43                            uint32_t aFrameFlags, uint64_t aInnerWindowID,
44                            const Maybe<uint32_t>& aLineNumber,
45                            const Maybe<uint32_t>& aColumnNumber,
46                            const Maybe<ProfilingCategoryPair>& aCategoryPair);
47 
48   // Maximum size of a frameKey string that we'll handle.
49   static const size_t kMaxFrameKeyLength = 512;
50 
51   // Stream JSON for samples in the buffer to aWriter, using the supplied
52   // UniqueStacks object.
53   // Only streams samples for the given thread ID and which were taken at or
54   // after aSinceTime. If ID is 0, ignore the stored thread ID; this should only
55   // be used when the buffer contains only one sample.
56   // Return the thread ID of the streamed sample(s), or 0.
57   int StreamSamplesToJSON(SpliceableJSONWriter& aWriter, int aThreadId,
58                           double aSinceTime, UniqueStacks& aUniqueStacks) const;
59 
60   void StreamMarkersToJSON(SpliceableJSONWriter& aWriter, int aThreadId,
61                            const TimeStamp& aProcessStartTime,
62                            double aSinceTime,
63                            UniqueStacks& aUniqueStacks) const;
64   void StreamPausedRangesToJSON(SpliceableJSONWriter& aWriter,
65                                 double aSinceTime) const;
66   void StreamProfilerOverheadToJSON(SpliceableJSONWriter& aWriter,
67                                     const TimeStamp& aProcessStartTime,
68                                     double aSinceTime) const;
69   void StreamCountersToJSON(SpliceableJSONWriter& aWriter,
70                             const TimeStamp& aProcessStartTime,
71                             double aSinceTime) const;
72 
73   // Find (via |aLastSample|) the most recent sample for the thread denoted by
74   // |aThreadId| and clone it, patching in the current time as appropriate.
75   // Mutate |aLastSample| to point to the newly inserted sample.
76   // Returns whether duplication was successful.
77   bool DuplicateLastSample(int aThreadId, const TimeStamp& aProcessStartTime,
78                            Maybe<uint64_t>& aLastSample);
79 
80   void DiscardSamplesBeforeTime(double aTime);
81 
82   size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const;
83   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const;
84 
85   void CollectOverheadStats(TimeDuration aSamplingTime, TimeDuration aLocking,
86                             TimeDuration aCleaning, TimeDuration aCounters,
87                             TimeDuration aThreads);
88 
89   ProfilerBufferInfo GetProfilerBufferInfo() const;
90 
91  private:
92   // Add |aEntry| to the provider ProfileChunkedBuffer.
93   // `static` because it may be used to add an entry to a `ProfileChunkedBuffer`
94   // that is not attached to a `ProfileBuffer`.
95   static ProfileBufferBlockIndex AddEntry(
96       ProfileChunkedBuffer& aProfileChunkedBuffer,
97       const ProfileBufferEntry& aEntry);
98 
99   // Add a sample start (ThreadId) entry for aThreadId to the provided
100   // ProfileChunkedBuffer. Returns the position of the entry.
101   // `static` because it may be used to add an entry to a `ProfileChunkedBuffer`
102   // that is not attached to a `ProfileBuffer`.
103   static ProfileBufferBlockIndex AddThreadIdEntry(
104       ProfileChunkedBuffer& aProfileChunkedBuffer, int aThreadId);
105 
106   // The storage in which this ProfileBuffer stores its entries.
107   ProfileChunkedBuffer& mEntries;
108 
109  public:
110   // `BufferRangeStart()` and `BufferRangeEnd()` return `uint64_t` values
111   // corresponding to the first entry and past the last entry stored in
112   // `mEntries`.
113   //
114   // The returned values are not guaranteed to be stable, because other threads
115   // may also be accessing the buffer concurrently. But they will always
116   // increase, and can therefore give an indication of how far these values have
117   // *at least* reached. In particular:
118   // - Entries whose index is strictly less that `BufferRangeStart()` have been
119   //   discarded by now, so any related data may also be safely discarded.
120   // - It is safe to try and read entries at any index strictly less than
121   //   `BufferRangeEnd()` -- but note that these reads may fail by the time you
122   //   request them, as old entries get overwritten by new ones.
BufferRangeStart()123   uint64_t BufferRangeStart() const { return mEntries.GetState().mRangeStart; }
BufferRangeEnd()124   uint64_t BufferRangeEnd() const { return mEntries.GetState().mRangeEnd; }
125 
126  private:
127   // Single pre-allocated chunk (to avoid spurious mallocs), used when:
128   // - Duplicating sleeping stacks (hence scExpectedMaximumStackSize).
129   // - Adding JIT info.
130   // - Streaming stacks to JSON.
131   // Mutable because it's accessed from non-multithreaded const methods.
132   mutable ProfileBufferChunkManagerSingle mWorkerChunkManager{
133       ProfileBufferChunk::Create(
134           ProfileBufferChunk::SizeofChunkMetadata() +
135           ProfileBufferChunkManager::scExpectedMaximumStackSize)};
136 
137   // Time from launch (us) when first sampling was recorded.
138   double mFirstSamplingTimeUs = 0.0;
139   // Time from launch (us) when last sampling was recorded.
140   double mLastSamplingTimeUs = 0.0;
141   // Sampling stats: Interval (us) between successive samplings.
142   ProfilerStats mIntervalsUs;
143   // Sampling stats: Total duration (us) of each sampling. (Split detail below.)
144   ProfilerStats mOverheadsUs;
145   // Sampling stats: Time (us) to acquire the lock before sampling.
146   ProfilerStats mLockingsUs;
147   // Sampling stats: Time (us) to discard expired data.
148   ProfilerStats mCleaningsUs;
149   // Sampling stats: Time (us) to collect counter data.
150   ProfilerStats mCountersUs;
151   // Sampling stats: Time (us) to sample thread stacks.
152   ProfilerStats mThreadsUs;
153 };
154 
155 /**
156  * Helper type used to implement ProfilerStackCollector. This type is used as
157  * the collector for MergeStacks by ProfileBuffer. It holds a reference to the
158  * buffer, as well as additional feature flags which are needed to control the
159  * data collection strategy
160  */
161 class ProfileBufferCollector final : public ProfilerStackCollector {
162  public:
ProfileBufferCollector(ProfileBuffer & aBuf,uint64_t aSamplePos,uint64_t aBufferRangeStart)163   ProfileBufferCollector(ProfileBuffer& aBuf, uint64_t aSamplePos,
164                          uint64_t aBufferRangeStart)
165       : mBuf(aBuf),
166         mSamplePositionInBuffer(aSamplePos),
167         mBufferRangeStart(aBufferRangeStart) {
168     MOZ_ASSERT(
169         mSamplePositionInBuffer >= mBufferRangeStart,
170         "The sample position should always be after the buffer range start");
171   }
172 
173   // Position at which the sample starts in the profiler buffer (which may be
174   // different from the buffer in which the sample data is collected here).
SamplePositionInBuffer()175   Maybe<uint64_t> SamplePositionInBuffer() override {
176     return Some(mSamplePositionInBuffer);
177   }
178 
179   // Profiler buffer's range start (which may be different from the buffer in
180   // which the sample data is collected here).
BufferRangeStart()181   Maybe<uint64_t> BufferRangeStart() override {
182     return Some(mBufferRangeStart);
183   }
184 
185   virtual void CollectNativeLeafAddr(void* aAddr) override;
186   virtual void CollectProfilingStackFrame(
187       const ProfilingStackFrame& aFrame) override;
188 
189  private:
190   ProfileBuffer& mBuf;
191   uint64_t mSamplePositionInBuffer;
192   uint64_t mBufferRangeStart;
193 };
194 
195 }  // namespace baseprofiler
196 }  // namespace mozilla
197 
198 #endif
199