1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/trace_event/trace_buffer.h"
6 
7 #include <memory>
8 #include <utility>
9 #include <vector>
10 
11 #include "base/bind.h"
12 #include "base/macros.h"
13 #include "base/trace_event/heap_profiler.h"
14 #include "base/trace_event/trace_event_impl.h"
15 
16 namespace base {
17 namespace trace_event {
18 
19 namespace {
20 
21 class TraceBufferRingBuffer : public TraceBuffer {
22  public:
TraceBufferRingBuffer(size_t max_chunks)23   TraceBufferRingBuffer(size_t max_chunks)
24       : max_chunks_(max_chunks),
25         recyclable_chunks_queue_(new size_t[queue_capacity()]),
26         queue_head_(0),
27         queue_tail_(max_chunks),
28         current_iteration_index_(0),
29         current_chunk_seq_(1) {
30     chunks_.reserve(max_chunks);
31     for (size_t i = 0; i < max_chunks; ++i)
32       recyclable_chunks_queue_[i] = i;
33   }
34 
GetChunk(size_t * index)35   std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
36     HEAP_PROFILER_SCOPED_IGNORE;
37 
38     // Because the number of threads is much less than the number of chunks,
39     // the queue should never be empty.
40     DCHECK(!QueueIsEmpty());
41 
42     *index = recyclable_chunks_queue_[queue_head_];
43     queue_head_ = NextQueueIndex(queue_head_);
44     current_iteration_index_ = queue_head_;
45 
46     if (*index >= chunks_.size())
47       chunks_.resize(*index + 1);
48 
49     TraceBufferChunk* chunk = chunks_[*index].release();
50     chunks_[*index] = nullptr;  // Put nullptr in the slot of a in-flight chunk.
51     if (chunk)
52       chunk->Reset(current_chunk_seq_++);
53     else
54       chunk = new TraceBufferChunk(current_chunk_seq_++);
55 
56     return std::unique_ptr<TraceBufferChunk>(chunk);
57   }
58 
ReturnChunk(size_t index,std::unique_ptr<TraceBufferChunk> chunk)59   void ReturnChunk(size_t index,
60                    std::unique_ptr<TraceBufferChunk> chunk) override {
61     // When this method is called, the queue should not be full because it
62     // can contain all chunks including the one to be returned.
63     DCHECK(!QueueIsFull());
64     DCHECK(chunk);
65     DCHECK_LT(index, chunks_.size());
66     DCHECK(!chunks_[index]);
67     chunks_[index] = std::move(chunk);
68     recyclable_chunks_queue_[queue_tail_] = index;
69     queue_tail_ = NextQueueIndex(queue_tail_);
70   }
71 
IsFull() const72   bool IsFull() const override { return false; }
73 
Size() const74   size_t Size() const override {
75     // This is approximate because not all of the chunks are full.
76     return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
77   }
78 
Capacity() const79   size_t Capacity() const override {
80     return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
81   }
82 
GetEventByHandle(TraceEventHandle handle)83   TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
84     if (handle.chunk_index >= chunks_.size())
85       return nullptr;
86     TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
87     if (!chunk || chunk->seq() != handle.chunk_seq)
88       return nullptr;
89     return chunk->GetEventAt(handle.event_index);
90   }
91 
NextChunk()92   const TraceBufferChunk* NextChunk() override {
93     if (chunks_.empty())
94       return nullptr;
95 
96     while (current_iteration_index_ != queue_tail_) {
97       size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
98       current_iteration_index_ = NextQueueIndex(current_iteration_index_);
99       if (chunk_index >= chunks_.size())  // Skip uninitialized chunks.
100         continue;
101       DCHECK(chunks_[chunk_index]);
102       return chunks_[chunk_index].get();
103     }
104     return nullptr;
105   }
106 
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)107   void EstimateTraceMemoryOverhead(
108       TraceEventMemoryOverhead* overhead) override {
109     overhead->Add(TraceEventMemoryOverhead::kTraceBuffer, sizeof(*this));
110     for (size_t queue_index = queue_head_; queue_index != queue_tail_;
111          queue_index = NextQueueIndex(queue_index)) {
112       size_t chunk_index = recyclable_chunks_queue_[queue_index];
113       if (chunk_index >= chunks_.size())  // Skip uninitialized chunks.
114         continue;
115       chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead);
116     }
117   }
118 
119  private:
QueueIsEmpty() const120   bool QueueIsEmpty() const { return queue_head_ == queue_tail_; }
121 
QueueSize() const122   size_t QueueSize() const {
123     return queue_tail_ > queue_head_
124                ? queue_tail_ - queue_head_
125                : queue_tail_ + queue_capacity() - queue_head_;
126   }
127 
QueueIsFull() const128   bool QueueIsFull() const { return QueueSize() == queue_capacity() - 1; }
129 
queue_capacity() const130   size_t queue_capacity() const {
131     // One extra space to help distinguish full state and empty state.
132     return max_chunks_ + 1;
133   }
134 
NextQueueIndex(size_t index) const135   size_t NextQueueIndex(size_t index) const {
136     index++;
137     if (index >= queue_capacity())
138       index = 0;
139     return index;
140   }
141 
142   size_t max_chunks_;
143   std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
144 
145   std::unique_ptr<size_t[]> recyclable_chunks_queue_;
146   size_t queue_head_;
147   size_t queue_tail_;
148 
149   size_t current_iteration_index_;
150   uint32_t current_chunk_seq_;
151 
152   DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
153 };
154 
155 class TraceBufferVector : public TraceBuffer {
156  public:
TraceBufferVector(size_t max_chunks)157   TraceBufferVector(size_t max_chunks)
158       : in_flight_chunk_count_(0),
159         current_iteration_index_(0),
160         max_chunks_(max_chunks) {
161     chunks_.reserve(max_chunks_);
162   }
163 
GetChunk(size_t * index)164   std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
165     HEAP_PROFILER_SCOPED_IGNORE;
166 
167     // This function may be called when adding normal events or indirectly from
168     // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
169     // have to add the metadata events and flush thread-local buffers even if
170     // the buffer is full.
171     *index = chunks_.size();
172     // Put nullptr in the slot of a in-flight chunk.
173     chunks_.push_back(nullptr);
174     ++in_flight_chunk_count_;
175     // + 1 because zero chunk_seq is not allowed.
176     return std::unique_ptr<TraceBufferChunk>(
177         new TraceBufferChunk(static_cast<uint32_t>(*index) + 1));
178   }
179 
ReturnChunk(size_t index,std::unique_ptr<TraceBufferChunk> chunk)180   void ReturnChunk(size_t index,
181                    std::unique_ptr<TraceBufferChunk> chunk) override {
182     DCHECK_GT(in_flight_chunk_count_, 0u);
183     DCHECK_LT(index, chunks_.size());
184     DCHECK(!chunks_[index]);
185     --in_flight_chunk_count_;
186     chunks_[index] = std::move(chunk);
187   }
188 
IsFull() const189   bool IsFull() const override { return chunks_.size() >= max_chunks_; }
190 
Size() const191   size_t Size() const override {
192     // This is approximate because not all of the chunks are full.
193     return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
194   }
195 
Capacity() const196   size_t Capacity() const override {
197     return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
198   }
199 
GetEventByHandle(TraceEventHandle handle)200   TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
201     if (handle.chunk_index >= chunks_.size())
202       return nullptr;
203     TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
204     if (!chunk || chunk->seq() != handle.chunk_seq)
205       return nullptr;
206     return chunk->GetEventAt(handle.event_index);
207   }
208 
NextChunk()209   const TraceBufferChunk* NextChunk() override {
210     while (current_iteration_index_ < chunks_.size()) {
211       // Skip in-flight chunks.
212       const TraceBufferChunk* chunk = chunks_[current_iteration_index_++].get();
213       if (chunk)
214         return chunk;
215     }
216     return nullptr;
217   }
218 
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)219   void EstimateTraceMemoryOverhead(
220       TraceEventMemoryOverhead* overhead) override {
221     const size_t chunks_ptr_vector_allocated_size =
222         sizeof(*this) + max_chunks_ * sizeof(decltype(chunks_)::value_type);
223     const size_t chunks_ptr_vector_resident_size =
224         sizeof(*this) + chunks_.size() * sizeof(decltype(chunks_)::value_type);
225     overhead->Add(TraceEventMemoryOverhead::kTraceBuffer,
226                   chunks_ptr_vector_allocated_size,
227                   chunks_ptr_vector_resident_size);
228     for (size_t i = 0; i < chunks_.size(); ++i) {
229       TraceBufferChunk* chunk = chunks_[i].get();
230       // Skip the in-flight (nullptr) chunks. They will be accounted by the
231       // per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump.
232       if (chunk)
233         chunk->EstimateTraceMemoryOverhead(overhead);
234     }
235   }
236 
237  private:
238   size_t in_flight_chunk_count_;
239   size_t current_iteration_index_;
240   size_t max_chunks_;
241   std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
242 
243   DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
244 };
245 
246 }  // namespace
247 
TraceBufferChunk(uint32_t seq)248 TraceBufferChunk::TraceBufferChunk(uint32_t seq) : next_free_(0), seq_(seq) {}
249 
250 TraceBufferChunk::~TraceBufferChunk() = default;
251 
Reset(uint32_t new_seq)252 void TraceBufferChunk::Reset(uint32_t new_seq) {
253   for (size_t i = 0; i < next_free_; ++i)
254     chunk_[i].Reset();
255   next_free_ = 0;
256   seq_ = new_seq;
257   cached_overhead_estimate_.reset();
258 }
259 
AddTraceEvent(size_t * event_index)260 TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
261   DCHECK(!IsFull());
262   *event_index = next_free_++;
263   return &chunk_[*event_index];
264 }
265 
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)266 void TraceBufferChunk::EstimateTraceMemoryOverhead(
267     TraceEventMemoryOverhead* overhead) {
268   if (!cached_overhead_estimate_) {
269     cached_overhead_estimate_.reset(new TraceEventMemoryOverhead);
270 
271     // When estimating the size of TraceBufferChunk, exclude the array of trace
272     // events, as they are computed individually below.
273     cached_overhead_estimate_->Add(TraceEventMemoryOverhead::kTraceBufferChunk,
274                                    sizeof(*this) - sizeof(chunk_));
275   }
276 
277   const size_t num_cached_estimated_events =
278       cached_overhead_estimate_->GetCount(
279           TraceEventMemoryOverhead::kTraceEvent);
280   DCHECK_LE(num_cached_estimated_events, size());
281 
282   if (IsFull() && num_cached_estimated_events == size()) {
283     overhead->Update(*cached_overhead_estimate_);
284     return;
285   }
286 
287   for (size_t i = num_cached_estimated_events; i < size(); ++i)
288     chunk_[i].EstimateTraceMemoryOverhead(cached_overhead_estimate_.get());
289 
290   if (IsFull()) {
291     cached_overhead_estimate_->AddSelf();
292   } else {
293     // The unused TraceEvents in |chunks_| are not cached. They will keep
294     // changing as new TraceEvents are added to this chunk, so they are
295     // computed on the fly.
296     const size_t num_unused_trace_events = capacity() - size();
297     overhead->Add(TraceEventMemoryOverhead::kUnusedTraceEvent,
298                   num_unused_trace_events * sizeof(TraceEvent));
299   }
300 
301   overhead->Update(*cached_overhead_estimate_);
302 }
303 
304 TraceResultBuffer::OutputCallback
GetCallback()305 TraceResultBuffer::SimpleOutput::GetCallback() {
306   return BindRepeating(&SimpleOutput::Append, Unretained(this));
307 }
308 
Append(const std::string & json_trace_output)309 void TraceResultBuffer::SimpleOutput::Append(
310     const std::string& json_trace_output) {
311   json_output += json_trace_output;
312 }
313 
TraceResultBuffer()314 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {}
315 
316 TraceResultBuffer::~TraceResultBuffer() = default;
317 
SetOutputCallback(OutputCallback json_chunk_callback)318 void TraceResultBuffer::SetOutputCallback(OutputCallback json_chunk_callback) {
319   output_callback_ = std::move(json_chunk_callback);
320 }
321 
Start()322 void TraceResultBuffer::Start() {
323   append_comma_ = false;
324   output_callback_.Run("[");
325 }
326 
AddFragment(const std::string & trace_fragment)327 void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
328   if (append_comma_)
329     output_callback_.Run(",");
330   append_comma_ = true;
331   output_callback_.Run(trace_fragment);
332 }
333 
Finish()334 void TraceResultBuffer::Finish() {
335   output_callback_.Run("]");
336 }
337 
CreateTraceBufferRingBuffer(size_t max_chunks)338 TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) {
339   return new TraceBufferRingBuffer(max_chunks);
340 }
341 
CreateTraceBufferVectorOfSize(size_t max_chunks)342 TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) {
343   return new TraceBufferVector(max_chunks);
344 }
345 
346 }  // namespace trace_event
347 }  // namespace base
348