1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/tracing/core/trace_buffer.h"
18 
19 #include <limits>
20 
21 #include "perfetto/base/logging.h"
22 #include "perfetto/ext/base/utils.h"
23 #include "perfetto/ext/tracing/core/shared_memory_abi.h"
24 #include "perfetto/ext/tracing/core/trace_packet.h"
25 #include "perfetto/protozero/proto_utils.h"
26 
27 #define TRACE_BUFFER_VERBOSE_LOGGING() 0  // Set to 1 when debugging unittests.
28 #if TRACE_BUFFER_VERBOSE_LOGGING()
29 #define TRACE_BUFFER_DLOG PERFETTO_DLOG
30 namespace {
31 constexpr char kHexDigits[] = "0123456789abcdef";
HexDump(const uint8_t * src,size_t size)32 std::string HexDump(const uint8_t* src, size_t size) {
33   std::string buf;
34   buf.reserve(4096 * 4);
35   char line[64];
36   char* c = line;
37   for (size_t i = 0; i < size; i++) {
38     *c++ = kHexDigits[(src[i] >> 4) & 0x0f];
39     *c++ = kHexDigits[(src[i] >> 0) & 0x0f];
40     if (i % 16 == 15) {
41       buf.append("\n");
42       buf.append(line);
43       c = line;
44     }
45   }
46   return buf;
47 }
48 }  // namespace
49 #else
50 #define TRACE_BUFFER_DLOG(...) void()
51 #endif
52 
53 namespace perfetto {
54 
55 namespace {
56 constexpr uint8_t kFirstPacketContinuesFromPrevChunk =
57     SharedMemoryABI::ChunkHeader::kFirstPacketContinuesFromPrevChunk;
58 constexpr uint8_t kLastPacketContinuesOnNextChunk =
59     SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk;
60 constexpr uint8_t kChunkNeedsPatching =
61     SharedMemoryABI::ChunkHeader::kChunkNeedsPatching;
62 }  // namespace.
63 
64 constexpr size_t TraceBuffer::ChunkRecord::kMaxSize;
65 constexpr size_t TraceBuffer::InlineChunkHeaderSize = sizeof(ChunkRecord);
66 
67 // static
Create(size_t size_in_bytes,OverwritePolicy pol)68 std::unique_ptr<TraceBuffer> TraceBuffer::Create(size_t size_in_bytes,
69                                                  OverwritePolicy pol) {
70   std::unique_ptr<TraceBuffer> trace_buffer(new TraceBuffer(pol));
71   if (!trace_buffer->Initialize(size_in_bytes))
72     return nullptr;
73   return trace_buffer;
74 }
75 
TraceBuffer(OverwritePolicy pol)76 TraceBuffer::TraceBuffer(OverwritePolicy pol) : overwrite_policy_(pol) {
77   // See comments in ChunkRecord for the rationale of this.
78   static_assert(sizeof(ChunkRecord) == sizeof(SharedMemoryABI::PageHeader) +
79                                            sizeof(SharedMemoryABI::ChunkHeader),
80                 "ChunkRecord out of sync with the layout of SharedMemoryABI");
81 }
82 
83 TraceBuffer::~TraceBuffer() = default;
84 
Initialize(size_t size)85 bool TraceBuffer::Initialize(size_t size) {
86   static_assert(
87       SharedMemoryABI::kMinPageSize % sizeof(ChunkRecord) == 0,
88       "sizeof(ChunkRecord) must be an integer divider of a page size");
89   data_ = base::PagedMemory::Allocate(
90       size, base::PagedMemory::kMayFail | base::PagedMemory::kDontCommit);
91   if (!data_.IsValid()) {
92     PERFETTO_ELOG("Trace buffer allocation failed (size: %zu)", size);
93     return false;
94   }
95   size_ = size;
96   stats_.set_buffer_size(size);
97   max_chunk_size_ = std::min(size, ChunkRecord::kMaxSize);
98   wptr_ = begin();
99   index_.clear();
100   last_chunk_id_written_.clear();
101   read_iter_ = GetReadIterForSequence(index_.end());
102   return true;
103 }
104 
105 // Note: |src| points to a shmem region that is shared with the producer. Assume
106 // that the producer is malicious and will change the content of |src|
107 // while we execute here. Don't do any processing on it other than memcpy().
CopyChunkUntrusted(ProducerID producer_id_trusted,uid_t producer_uid_trusted,WriterID writer_id,ChunkID chunk_id,uint16_t num_fragments,uint8_t chunk_flags,bool chunk_complete,const uint8_t * src,size_t size)108 void TraceBuffer::CopyChunkUntrusted(ProducerID producer_id_trusted,
109                                      uid_t producer_uid_trusted,
110                                      WriterID writer_id,
111                                      ChunkID chunk_id,
112                                      uint16_t num_fragments,
113                                      uint8_t chunk_flags,
114                                      bool chunk_complete,
115                                      const uint8_t* src,
116                                      size_t size) {
117   // |record_size| = |size| + sizeof(ChunkRecord), rounded up to avoid to end
118   // up in a fragmented state where size_to_end() < sizeof(ChunkRecord).
119   const size_t record_size =
120       base::AlignUp<sizeof(ChunkRecord)>(size + sizeof(ChunkRecord));
121   if (PERFETTO_UNLIKELY(record_size > max_chunk_size_)) {
122     stats_.set_abi_violations(stats_.abi_violations() + 1);
123     PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
124     return;
125   }
126 
127   TRACE_BUFFER_DLOG("CopyChunk @ %lu, size=%zu", wptr_ - begin(), record_size);
128 
129 #if PERFETTO_DCHECK_IS_ON()
130   changed_since_last_read_ = true;
131 #endif
132 
133   // If the chunk hasn't been completed, we should only consider the first
134   // |num_fragments - 1| packets complete. For simplicity, we simply disregard
135   // the last one when we copy the chunk.
136   if (PERFETTO_UNLIKELY(!chunk_complete)) {
137     if (num_fragments > 0) {
138       num_fragments--;
139       // These flags should only affect the last packet in the chunk. We clear
140       // them, so that TraceBuffer is able to look at the remaining packets in
141       // this chunk.
142       chunk_flags &= ~kLastPacketContinuesOnNextChunk;
143       chunk_flags &= ~kChunkNeedsPatching;
144     }
145   }
146 
147   ChunkRecord record(record_size);
148   record.producer_id = producer_id_trusted;
149   record.chunk_id = chunk_id;
150   record.writer_id = writer_id;
151   record.num_fragments = num_fragments;
152   record.flags = chunk_flags;
153   ChunkMeta::Key key(record);
154 
155   // Check whether we have already copied the same chunk previously. This may
156   // happen if the service scrapes chunks in a potentially incomplete state
157   // before receiving commit requests for them from the producer. Note that the
158   // service may scrape and thus override chunks in arbitrary order since the
159   // chunks aren't ordered in the SMB.
160   const auto it = index_.find(key);
161   if (PERFETTO_UNLIKELY(it != index_.end())) {
162     ChunkMeta* record_meta = &it->second;
163     ChunkRecord* prev = record_meta->chunk_record;
164 
165     // Verify that the old chunk's metadata corresponds to the new one.
166     // Overridden chunks should never change size, since the page layout is
167     // fixed per writer. The number of fragments should also never decrease and
168     // flags should not be removed.
169     if (PERFETTO_UNLIKELY(ChunkMeta::Key(*prev) != key ||
170                           prev->size != record_size ||
171                           prev->num_fragments > num_fragments ||
172                           (prev->flags & chunk_flags) != prev->flags)) {
173       stats_.set_abi_violations(stats_.abi_violations() + 1);
174       PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
175       return;
176     }
177 
178     // If this chunk was previously copied with the same number of fragments and
179     // the number didn't change, there's no need to copy it again. If the
180     // previous chunk was complete already, this should always be the case.
181     PERFETTO_DCHECK(suppress_client_dchecks_for_testing_ ||
182                     !record_meta->is_complete() ||
183                     (chunk_complete && prev->num_fragments == num_fragments));
184     if (prev->num_fragments == num_fragments) {
185       TRACE_BUFFER_DLOG("  skipping recommit of identical chunk");
186       return;
187     }
188 
189     // If we've already started reading from chunk N+1 following this chunk N,
190     // don't override chunk N. Otherwise we may end up reading a packet from
191     // chunk N after having read from chunk N+1, thereby violating sequential
192     // read of packets. This shouldn't happen if the producer is well-behaved,
193     // because it shouldn't start chunk N+1 before completing chunk N.
194     ChunkMeta::Key subsequent_key = key;
195     static_assert(std::numeric_limits<ChunkID>::max() == kMaxChunkID,
196                   "ChunkID wraps");
197     subsequent_key.chunk_id++;
198     const auto subsequent_it = index_.find(subsequent_key);
199     if (subsequent_it != index_.end() &&
200         subsequent_it->second.num_fragments_read > 0) {
201       stats_.set_abi_violations(stats_.abi_violations() + 1);
202       PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
203       return;
204     }
205 
206     // We should not have read past the last packet.
207     if (record_meta->num_fragments_read > prev->num_fragments) {
208       PERFETTO_ELOG(
209           "TraceBuffer read too many fragments from an incomplete chunk");
210       PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
211       return;
212     }
213 
214     uint8_t* wptr = reinterpret_cast<uint8_t*>(prev);
215     TRACE_BUFFER_DLOG("  overriding chunk @ %lu, size=%zu", wptr - begin(),
216                       record_size);
217 
218     // Update chunk meta data stored in the index, as it may have changed.
219     record_meta->num_fragments = num_fragments;
220     record_meta->flags = chunk_flags;
221     record_meta->set_complete(chunk_complete);
222 
223     // Override the ChunkRecord contents at the original |wptr|.
224     TRACE_BUFFER_DLOG("  copying @ [%lu - %lu] %zu", wptr - begin(),
225                       uintptr_t(wptr - begin()) + record_size, record_size);
226     WriteChunkRecord(wptr, record, src, size);
227     TRACE_BUFFER_DLOG("Chunk raw: %s", HexDump(wptr, record_size).c_str());
228     stats_.set_chunks_rewritten(stats_.chunks_rewritten() + 1);
229     return;
230   }
231 
232   if (PERFETTO_UNLIKELY(discard_writes_))
233     return DiscardWrite();
234 
235   // If there isn't enough room from the given write position. Write a padding
236   // record to clear the end of the buffer and wrap back.
237   const size_t cached_size_to_end = size_to_end();
238   if (PERFETTO_UNLIKELY(record_size > cached_size_to_end)) {
239     ssize_t res = DeleteNextChunksFor(cached_size_to_end);
240     if (res == -1)
241       return DiscardWrite();
242     PERFETTO_DCHECK(static_cast<size_t>(res) <= cached_size_to_end);
243     AddPaddingRecord(cached_size_to_end);
244     wptr_ = begin();
245     stats_.set_write_wrap_count(stats_.write_wrap_count() + 1);
246     PERFETTO_DCHECK(size_to_end() >= record_size);
247   }
248 
249   // At this point either |wptr_| points to an untouched part of the buffer
250   // (i.e. *wptr_ == 0) or we are about to overwrite one or more ChunkRecord(s).
251   // In the latter case we need to first figure out where the next valid
252   // ChunkRecord is (if it exists) and add padding between the new record.
253   // Example ((w) == write cursor):
254   //
255   // Initial state (wtpr_ == 0):
256   // |0 (w)    |10               |30                  |50
257   // +---------+-----------------+--------------------+--------------------+
258   // | Chunk 1 | Chunk 2         | Chunk 3            | Chunk 4            |
259   // +---------+-----------------+--------------------+--------------------+
260   //
261   // Let's assume we now want now write a 5th Chunk of size == 35. The final
262   // state should look like this:
263   // |0                                |35 (w)         |50
264   // +---------------------------------+---------------+--------------------+
265   // | Chunk 5                         | Padding Chunk | Chunk 4            |
266   // +---------------------------------+---------------+--------------------+
267 
268   // Deletes all chunks from |wptr_| to |wptr_| + |record_size|.
269   ssize_t del_res = DeleteNextChunksFor(record_size);
270   if (del_res == -1)
271     return DiscardWrite();
272   size_t padding_size = static_cast<size_t>(del_res);
273 
274   // Now first insert the new chunk. At the end, if necessary, add the padding.
275   stats_.set_chunks_written(stats_.chunks_written() + 1);
276   stats_.set_bytes_written(stats_.bytes_written() + record_size);
277   auto it_and_inserted = index_.emplace(
278       key, ChunkMeta(GetChunkRecordAt(wptr_), num_fragments, chunk_complete,
279                      chunk_flags, producer_uid_trusted));
280   PERFETTO_DCHECK(it_and_inserted.second);
281   TRACE_BUFFER_DLOG("  copying @ [%lu - %lu] %zu", wptr_ - begin(),
282                     uintptr_t(wptr_ - begin()) + record_size, record_size);
283   WriteChunkRecord(wptr_, record, src, size);
284   TRACE_BUFFER_DLOG("Chunk raw: %s", HexDump(wptr_, record_size).c_str());
285   wptr_ += record_size;
286   if (wptr_ >= end()) {
287     PERFETTO_DCHECK(padding_size == 0);
288     wptr_ = begin();
289     stats_.set_write_wrap_count(stats_.write_wrap_count() + 1);
290   }
291   DcheckIsAlignedAndWithinBounds(wptr_);
292 
293   // Chunks may be received out of order, so only update last_chunk_id if the
294   // new chunk_id is larger. But take into account overflows by only selecting
295   // the new ID if its distance to the latest ID is smaller than half the number
296   // space.
297   //
298   // This accounts for both the case where the new ID has just overflown and
299   // last_chunk_id be updated even though it's smaller (e.g. |chunk_id| = 1 and
300   // |last_chunk_id| = kMaxChunkId; chunk_id - last_chunk_id = 0) and the case
301   // where the new ID is an out-of-order ID right after an overflow and
302   // last_chunk_id shouldn't be updated even though it's larger (e.g. |chunk_id|
303   // = kMaxChunkId and |last_chunk_id| = 1; chunk_id - last_chunk_id =
304   // kMaxChunkId - 1).
305   auto producer_and_writer_id = std::make_pair(producer_id_trusted, writer_id);
306   ChunkID& last_chunk_id = last_chunk_id_written_[producer_and_writer_id];
307   static_assert(std::numeric_limits<ChunkID>::max() == kMaxChunkID,
308                 "This code assumes that ChunkID wraps at kMaxChunkID");
309   if (chunk_id - last_chunk_id < kMaxChunkID / 2) {
310     last_chunk_id = chunk_id;
311   } else {
312     stats_.set_chunks_committed_out_of_order(
313         stats_.chunks_committed_out_of_order() + 1);
314   }
315 
316   if (padding_size)
317     AddPaddingRecord(padding_size);
318 }
319 
DeleteNextChunksFor(size_t bytes_to_clear)320 ssize_t TraceBuffer::DeleteNextChunksFor(size_t bytes_to_clear) {
321   PERFETTO_CHECK(!discard_writes_);
322 
323   // Find the position of the first chunk which begins at or after
324   // (|wptr_| + |bytes|). Note that such a chunk might not exist and we might
325   // either reach the end of the buffer or a zeroed region of the buffer.
326   uint8_t* next_chunk_ptr = wptr_;
327   uint8_t* search_end = wptr_ + bytes_to_clear;
328   TRACE_BUFFER_DLOG("Delete [%zu %zu]", wptr_ - begin(), search_end - begin());
329   DcheckIsAlignedAndWithinBounds(wptr_);
330   PERFETTO_DCHECK(search_end <= end());
331   std::vector<ChunkMap::iterator> index_delete;
332   uint64_t chunks_overwritten = stats_.chunks_overwritten();
333   uint64_t bytes_overwritten = stats_.bytes_overwritten();
334   uint64_t padding_bytes_cleared = stats_.padding_bytes_cleared();
335   while (next_chunk_ptr < search_end) {
336     const ChunkRecord& next_chunk = *GetChunkRecordAt(next_chunk_ptr);
337     TRACE_BUFFER_DLOG(
338         "  scanning chunk [%zu %zu] (valid=%d)", next_chunk_ptr - begin(),
339         next_chunk_ptr - begin() + next_chunk.size, next_chunk.is_valid());
340 
341     // We just reached the untouched part of the buffer, it's going to be all
342     // zeroes from here to end().
343     // Optimization: if during Initialize() we fill the buffer with padding
344     // records we could get rid of this branch.
345     if (PERFETTO_UNLIKELY(!next_chunk.is_valid())) {
346       // This should happen only at the first iteration. The zeroed area can
347       // only begin precisely at the |wptr_|, not after. Otherwise it means that
348       // we wrapped but screwed up the ChunkRecord chain.
349       PERFETTO_DCHECK(next_chunk_ptr == wptr_);
350       return 0;
351     }
352 
353     // Remove |next_chunk| from the index, unless it's a padding record (padding
354     // records are not part of the index).
355     if (PERFETTO_LIKELY(!next_chunk.is_padding)) {
356       ChunkMeta::Key key(next_chunk);
357       auto it = index_.find(key);
358       bool will_remove = false;
359       if (PERFETTO_LIKELY(it != index_.end())) {
360         const ChunkMeta& meta = it->second;
361         if (PERFETTO_UNLIKELY(meta.num_fragments_read < meta.num_fragments)) {
362           if (overwrite_policy_ == kDiscard)
363             return -1;
364           chunks_overwritten++;
365           bytes_overwritten += next_chunk.size;
366         }
367         index_delete.push_back(it);
368         will_remove = true;
369       }
370       TRACE_BUFFER_DLOG(
371           "  del index {%" PRIu32 ",%" PRIu32 ",%u} @ [%lu - %lu] %d",
372           key.producer_id, key.writer_id, key.chunk_id,
373           next_chunk_ptr - begin(), next_chunk_ptr - begin() + next_chunk.size,
374           will_remove);
375       PERFETTO_DCHECK(will_remove);
376     } else {
377       padding_bytes_cleared += next_chunk.size;
378     }
379 
380     next_chunk_ptr += next_chunk.size;
381 
382     // We should never hit this, unless we managed to screw up while writing
383     // to the buffer and breaking the ChunkRecord(s) chain.
384     // TODO(primiano): Write more meaningful logging with the status of the
385     // buffer, to get more actionable bugs in case we hit this.
386     PERFETTO_CHECK(next_chunk_ptr <= end());
387   }
388 
389   // Remove from the index.
390   for (auto it : index_delete) {
391     index_.erase(it);
392   }
393   stats_.set_chunks_overwritten(chunks_overwritten);
394   stats_.set_bytes_overwritten(bytes_overwritten);
395   stats_.set_padding_bytes_cleared(padding_bytes_cleared);
396 
397   PERFETTO_DCHECK(next_chunk_ptr >= search_end && next_chunk_ptr <= end());
398   return static_cast<ssize_t>(next_chunk_ptr - search_end);
399 }
400 
AddPaddingRecord(size_t size)401 void TraceBuffer::AddPaddingRecord(size_t size) {
402   PERFETTO_DCHECK(size >= sizeof(ChunkRecord) && size <= ChunkRecord::kMaxSize);
403   ChunkRecord record(size);
404   record.is_padding = 1;
405   TRACE_BUFFER_DLOG("AddPaddingRecord @ [%lu - %lu] %zu", wptr_ - begin(),
406                     uintptr_t(wptr_ - begin()) + size, size);
407   WriteChunkRecord(wptr_, record, nullptr, size - sizeof(ChunkRecord));
408   stats_.set_padding_bytes_written(stats_.padding_bytes_written() + size);
409   // |wptr_| is deliberately not advanced when writing a padding record.
410 }
411 
TryPatchChunkContents(ProducerID producer_id,WriterID writer_id,ChunkID chunk_id,const Patch * patches,size_t patches_size,bool other_patches_pending)412 bool TraceBuffer::TryPatchChunkContents(ProducerID producer_id,
413                                         WriterID writer_id,
414                                         ChunkID chunk_id,
415                                         const Patch* patches,
416                                         size_t patches_size,
417                                         bool other_patches_pending) {
418   ChunkMeta::Key key(producer_id, writer_id, chunk_id);
419   auto it = index_.find(key);
420   if (it == index_.end()) {
421     stats_.set_patches_failed(stats_.patches_failed() + 1);
422     return false;
423   }
424   ChunkMeta& chunk_meta = it->second;
425 
426   // Check that the index is consistent with the actual ProducerID/WriterID
427   // stored in the ChunkRecord.
428   PERFETTO_DCHECK(ChunkMeta::Key(*chunk_meta.chunk_record) == key);
429   uint8_t* chunk_begin = reinterpret_cast<uint8_t*>(chunk_meta.chunk_record);
430   PERFETTO_DCHECK(chunk_begin >= begin());
431   uint8_t* chunk_end = chunk_begin + chunk_meta.chunk_record->size;
432   PERFETTO_DCHECK(chunk_end <= end());
433 
434   static_assert(Patch::kSize == SharedMemoryABI::kPacketHeaderSize,
435                 "Patch::kSize out of sync with SharedMemoryABI");
436 
437   for (size_t i = 0; i < patches_size; i++) {
438     uint8_t* ptr =
439         chunk_begin + sizeof(ChunkRecord) + patches[i].offset_untrusted;
440     TRACE_BUFFER_DLOG("PatchChunk {%" PRIu32 ",%" PRIu32
441                       ",%u} size=%zu @ %zu with {%02x %02x %02x %02x} cur "
442                       "{%02x %02x %02x %02x}",
443                       producer_id, writer_id, chunk_id, chunk_end - chunk_begin,
444                       patches[i].offset_untrusted, patches[i].data[0],
445                       patches[i].data[1], patches[i].data[2],
446                       patches[i].data[3], ptr[0], ptr[1], ptr[2], ptr[3]);
447     if (ptr < chunk_begin + sizeof(ChunkRecord) ||
448         ptr > chunk_end - Patch::kSize) {
449       // Either the IPC was so slow and in the meantime the writer managed to
450       // wrap over |chunk_id| or the producer sent a malicious IPC.
451       stats_.set_patches_failed(stats_.patches_failed() + 1);
452       return false;
453     }
454 
455     // DCHECK that we are writing into a zero-filled size field and not into
456     // valid data. It relies on ScatteredStreamWriter::ReserveBytes() to
457     // zero-fill reservations in debug builds.
458     char zero[Patch::kSize]{};
459     PERFETTO_DCHECK(memcmp(ptr, &zero, Patch::kSize) == 0);
460 
461     memcpy(ptr, &patches[i].data[0], Patch::kSize);
462   }
463   TRACE_BUFFER_DLOG(
464       "Chunk raw (after patch): %s",
465       HexDump(chunk_begin, chunk_meta.chunk_record->size).c_str());
466 
467   stats_.set_patches_succeeded(stats_.patches_succeeded() + patches_size);
468   if (!other_patches_pending) {
469     chunk_meta.flags &= ~kChunkNeedsPatching;
470     chunk_meta.chunk_record->flags = chunk_meta.flags;
471   }
472   return true;
473 }
474 
BeginRead()475 void TraceBuffer::BeginRead() {
476   read_iter_ = GetReadIterForSequence(index_.begin());
477 #if PERFETTO_DCHECK_IS_ON()
478   changed_since_last_read_ = false;
479 #endif
480 }
481 
GetReadIterForSequence(ChunkMap::iterator seq_begin)482 TraceBuffer::SequenceIterator TraceBuffer::GetReadIterForSequence(
483     ChunkMap::iterator seq_begin) {
484   SequenceIterator iter;
485   iter.seq_begin = seq_begin;
486   if (seq_begin == index_.end()) {
487     iter.cur = iter.seq_end = index_.end();
488     return iter;
489   }
490 
491 #if PERFETTO_DCHECK_IS_ON()
492   // Either |seq_begin| is == index_.begin() or the item immediately before must
493   // belong to a different {ProducerID, WriterID} sequence.
494   if (seq_begin != index_.begin() && seq_begin != index_.end()) {
495     auto prev_it = seq_begin;
496     prev_it--;
497     PERFETTO_DCHECK(
498         seq_begin == index_.begin() ||
499         std::tie(prev_it->first.producer_id, prev_it->first.writer_id) <
500             std::tie(seq_begin->first.producer_id, seq_begin->first.writer_id));
501   }
502 #endif
503 
504   // Find the first entry that has a greater {ProducerID, WriterID} (or just
505   // index_.end() if we reached the end).
506   ChunkMeta::Key key = seq_begin->first;  // Deliberate copy.
507   key.chunk_id = kMaxChunkID;
508   iter.seq_end = index_.upper_bound(key);
509   PERFETTO_DCHECK(iter.seq_begin != iter.seq_end);
510 
511   // Now find the first entry between [seq_begin, seq_end) that is
512   // > last_chunk_id_written_. This is where we the sequence will start (see
513   // notes about wrapping of IDs in the header).
514   auto producer_and_writer_id = std::make_pair(key.producer_id, key.writer_id);
515   PERFETTO_DCHECK(last_chunk_id_written_.count(producer_and_writer_id));
516   iter.wrapping_id = last_chunk_id_written_[producer_and_writer_id];
517   key.chunk_id = iter.wrapping_id;
518   iter.cur = index_.upper_bound(key);
519   if (iter.cur == iter.seq_end)
520     iter.cur = iter.seq_begin;
521   return iter;
522 }
523 
MoveNext()524 void TraceBuffer::SequenceIterator::MoveNext() {
525   // Stop iterating when we reach the end of the sequence.
526   // Note: |seq_begin| might be == |seq_end|.
527   if (cur == seq_end || cur->first.chunk_id == wrapping_id) {
528     cur = seq_end;
529     return;
530   }
531 
532   // If the current chunk wasn't completed yet, we shouldn't advance past it as
533   // it may be rewritten with additional packets.
534   if (!cur->second.is_complete()) {
535     cur = seq_end;
536     return;
537   }
538 
539   ChunkID last_chunk_id = cur->first.chunk_id;
540   if (++cur == seq_end)
541     cur = seq_begin;
542 
543   // There may be a missing chunk in the sequence of chunks, in which case the
544   // next chunk's ID won't follow the last one's. If so, skip the rest of the
545   // sequence. We'll return to it later once the hole is filled.
546   if (last_chunk_id + 1 != cur->first.chunk_id)
547     cur = seq_end;
548 }
549 
ReadNextTracePacket(TracePacket * packet,PacketSequenceProperties * sequence_properties,bool * previous_packet_on_sequence_dropped)550 bool TraceBuffer::ReadNextTracePacket(
551     TracePacket* packet,
552     PacketSequenceProperties* sequence_properties,
553     bool* previous_packet_on_sequence_dropped) {
554   // Note: MoveNext() moves only within the next chunk within the same
555   // {ProducerID, WriterID} sequence. Here we want to:
556   // - return the next patched+complete packet in the current sequence, if any.
557   // - return the first patched+complete packet in the next sequence, if any.
558   // - return false if none of the above is found.
559   TRACE_BUFFER_DLOG("ReadNextTracePacket()");
560 
561   // Just in case we forget to initialize these below.
562   *sequence_properties = {0, kInvalidUid, 0};
563   *previous_packet_on_sequence_dropped = false;
564 
565   // At the start of each sequence iteration, we consider the last read packet
566   // dropped. While iterating over the chunks in the sequence, we update this
567   // flag based on our knowledge about the last packet that was read from each
568   // chunk (|last_read_packet_skipped| in ChunkMeta).
569   bool previous_packet_dropped = true;
570 
571 #if PERFETTO_DCHECK_IS_ON()
572   PERFETTO_DCHECK(!changed_since_last_read_);
573 #endif
574   for (;; read_iter_.MoveNext()) {
575     if (PERFETTO_UNLIKELY(!read_iter_.is_valid())) {
576       // We ran out of chunks in the current {ProducerID, WriterID} sequence or
577       // we just reached the index_.end().
578 
579       if (PERFETTO_UNLIKELY(read_iter_.seq_end == index_.end()))
580         return false;
581 
582       // We reached the end of sequence, move to the next one.
583       // Note: ++read_iter_.seq_end might become index_.end(), but
584       // GetReadIterForSequence() knows how to deal with that.
585       read_iter_ = GetReadIterForSequence(read_iter_.seq_end);
586       PERFETTO_DCHECK(read_iter_.is_valid() && read_iter_.cur != index_.end());
587       previous_packet_dropped = true;
588     }
589 
590     ChunkMeta* chunk_meta = &*read_iter_;
591 
592     // If the chunk has holes that are awaiting to be patched out-of-band,
593     // skip the current sequence and move to the next one.
594     if (chunk_meta->flags & kChunkNeedsPatching) {
595       read_iter_.MoveToEnd();
596       continue;
597     }
598 
599     const ProducerID trusted_producer_id = read_iter_.producer_id();
600     const WriterID writer_id = read_iter_.writer_id();
601     const uid_t trusted_uid = chunk_meta->trusted_uid;
602 
603     // At this point we have a chunk in |chunk_meta| that has not been fully
604     // read. We don't know yet whether we have enough data to read the full
605     // packet (in the case it's fragmented over several chunks) and we are about
606     // to find that out. Specifically:
607     // A) If the first fragment is unread and is a fragment continuing from a
608     //    previous chunk, it means we have missed the previous ChunkID. In
609     //    fact, if this wasn't the case, a previous call to ReadNext() shouldn't
610     //    have moved the cursor to this chunk.
611     // B) Any fragment > 0 && < last is always readable. By definition an inner
612     //    packet is never fragmented and hence doesn't require neither stitching
613     //    nor any out-of-band patching. The same applies to the last packet
614     //    iff it doesn't continue on the next chunk.
615     // C) If the last packet (which might be also the only packet in the chunk)
616     //    is a fragment and continues on the next chunk, we peek at the next
617     //    chunks and, if we have all of them, mark as read and move the cursor.
618     //
619     // +---------------+   +-------------------+  +---------------+
620     // | ChunkID: 1    |   | ChunkID: 2        |  | ChunkID: 3    |
621     // |---------------+   +-------------------+  +---------------+
622     // | Packet 1      |   |                   |  | ... Packet 3  |
623     // | Packet 2      |   | ... Packet 3  ... |  | Packet 4      |
624     // | Packet 3  ... |   |                   |  | Packet 5 ...  |
625     // +---------------+   +-------------------+  +---------------+
626 
627     PERFETTO_DCHECK(chunk_meta->num_fragments_read <=
628                     chunk_meta->num_fragments);
629 
630     // If we didn't read any packets from this chunk, the last packet was from
631     // the previous chunk we iterated over; so don't update
632     // |previous_packet_dropped| in this case.
633     if (chunk_meta->num_fragments_read > 0)
634       previous_packet_dropped = chunk_meta->last_read_packet_skipped();
635 
636     while (chunk_meta->num_fragments_read < chunk_meta->num_fragments) {
637       enum { kSkip = 0, kReadOnePacket, kTryReadAhead } action;
638       if (chunk_meta->num_fragments_read == 0) {
639         if (chunk_meta->flags & kFirstPacketContinuesFromPrevChunk) {
640           action = kSkip;  // Case A.
641         } else if (chunk_meta->num_fragments == 1 &&
642                    (chunk_meta->flags & kLastPacketContinuesOnNextChunk)) {
643           action = kTryReadAhead;  // Case C.
644         } else {
645           action = kReadOnePacket;  // Case B.
646         }
647       } else if (chunk_meta->num_fragments_read <
648                      chunk_meta->num_fragments - 1 ||
649                  !(chunk_meta->flags & kLastPacketContinuesOnNextChunk)) {
650         action = kReadOnePacket;  // Case B.
651       } else {
652         action = kTryReadAhead;  // Case C.
653       }
654 
655       TRACE_BUFFER_DLOG("  chunk %u, packet %hu of %hu, action=%d",
656                         read_iter_.chunk_id(), chunk_meta->num_fragments_read,
657                         chunk_meta->num_fragments, action);
658 
659       if (action == kSkip) {
660         // This fragment will be skipped forever, not just in this ReadPacket()
661         // iteration. This happens by virtue of ReadNextPacketInChunk()
662         // incrementing the |num_fragments_read| and marking the fragment as
663         // read even if we didn't really.
664         ReadNextPacketInChunk(chunk_meta, nullptr);
665         chunk_meta->set_last_read_packet_skipped(true);
666         previous_packet_dropped = true;
667         continue;
668       }
669 
670       if (action == kReadOnePacket) {
671         // The easy peasy case B.
672         ReadPacketResult result = ReadNextPacketInChunk(chunk_meta, packet);
673 
674         if (PERFETTO_LIKELY(result == ReadPacketResult::kSucceeded)) {
675           *sequence_properties = {trusted_producer_id, trusted_uid, writer_id};
676           *previous_packet_on_sequence_dropped = previous_packet_dropped;
677           return true;
678         } else if (result == ReadPacketResult::kFailedEmptyPacket) {
679           // We can ignore and skip empty packets.
680           PERFETTO_DCHECK(packet->slices().empty());
681           continue;
682         }
683 
684         // In extremely rare cases (producer bugged / malicious) the chunk might
685         // contain an invalid fragment. In such case we don't want to stall the
686         // sequence but just skip the chunk and move on. ReadNextPacketInChunk()
687         // marks the chunk as fully read, so we don't attempt to read from it
688         // again in a future call to ReadBuffers(). It also already records an
689         // abi violation for this.
690         PERFETTO_DCHECK(result == ReadPacketResult::kFailedInvalidPacket);
691         chunk_meta->set_last_read_packet_skipped(true);
692         previous_packet_dropped = true;
693         break;
694       }
695 
696       PERFETTO_DCHECK(action == kTryReadAhead);
697       ReadAheadResult ra_res = ReadAhead(packet);
698       if (ra_res == ReadAheadResult::kSucceededReturnSlices) {
699         stats_.set_readaheads_succeeded(stats_.readaheads_succeeded() + 1);
700         *sequence_properties = {trusted_producer_id, trusted_uid, writer_id};
701         *previous_packet_on_sequence_dropped = previous_packet_dropped;
702         return true;
703       }
704 
705       if (ra_res == ReadAheadResult::kFailedMoveToNextSequence) {
706         // readahead didn't find a contiguous packet sequence. We'll try again
707         // on the next ReadPacket() call.
708         stats_.set_readaheads_failed(stats_.readaheads_failed() + 1);
709 
710         // TODO(primiano): optimization: this MoveToEnd() is the reason why
711         // MoveNext() (that is called in the outer for(;;MoveNext)) needs to
712         // deal gracefully with the case of |cur|==|seq_end|. Maybe we can do
713         // something to avoid that check by reshuffling the code here?
714         read_iter_.MoveToEnd();
715 
716         // This break will go back to beginning of the for(;;MoveNext()). That
717         // will move to the next sequence because we set the read iterator to
718         // its end.
719         break;
720       }
721 
722       PERFETTO_DCHECK(ra_res == ReadAheadResult::kFailedStayOnSameSequence);
723 
724       // In this case ReadAhead() might advance |read_iter_|, so we need to
725       // re-cache the |chunk_meta| pointer to point to the current chunk.
726       chunk_meta = &*read_iter_;
727       chunk_meta->set_last_read_packet_skipped(true);
728       previous_packet_dropped = true;
729     }  // while(...)  [iterate over packet fragments for the current chunk].
730   }    // for(;;MoveNext()) [iterate over chunks].
731 }
732 
ReadAhead(TracePacket * packet)733 TraceBuffer::ReadAheadResult TraceBuffer::ReadAhead(TracePacket* packet) {
734   static_assert(static_cast<ChunkID>(kMaxChunkID + 1) == 0,
735                 "relying on kMaxChunkID to wrap naturally");
736   TRACE_BUFFER_DLOG(" readahead start @ chunk %u", read_iter_.chunk_id());
737   ChunkID next_chunk_id = read_iter_.chunk_id() + 1;
738   SequenceIterator it = read_iter_;
739   for (it.MoveNext(); it.is_valid(); it.MoveNext(), next_chunk_id++) {
740     // We should stay within the same sequence while iterating here.
741     PERFETTO_DCHECK(it.producer_id() == read_iter_.producer_id() &&
742                     it.writer_id() == read_iter_.writer_id());
743 
744     TRACE_BUFFER_DLOG("   expected chunk ID: %u, actual ID: %u", next_chunk_id,
745                       it.chunk_id());
746 
747     if (PERFETTO_UNLIKELY((*it).num_fragments == 0))
748       continue;
749 
750     // If we miss the next chunk, stop looking in the current sequence and
751     // try another sequence. This chunk might come in the near future.
752     // The second condition is the edge case of a buggy/malicious
753     // producer. The ChunkID is contiguous but its flags don't make sense.
754     if (it.chunk_id() != next_chunk_id ||
755         PERFETTO_UNLIKELY(
756             !((*it).flags & kFirstPacketContinuesFromPrevChunk))) {
757       return ReadAheadResult::kFailedMoveToNextSequence;
758     }
759 
760     // If the chunk is contiguous but has not been patched yet move to the next
761     // sequence and try coming back here on the next ReadNextTracePacket() call.
762     // TODO(primiano): add a test to cover this, it's a subtle case.
763     if ((*it).flags & kChunkNeedsPatching)
764       return ReadAheadResult::kFailedMoveToNextSequence;
765 
766     // This is the case of an intermediate chunk which contains only one
767     // fragment which continues on the next chunk. This is the case for large
768     // packets, e.g.: [Packet0, Packet1(0)] [Packet1(1)] [Packet1(2), ...]
769     // (Packet1(X) := fragment X of Packet1).
770     if ((*it).num_fragments == 1 &&
771         ((*it).flags & kLastPacketContinuesOnNextChunk)) {
772       continue;
773     }
774 
775     // We made it! We got all fragments for the packet without holes.
776     TRACE_BUFFER_DLOG("  readahead success @ chunk %u", it.chunk_id());
777     PERFETTO_DCHECK(((*it).num_fragments == 1 &&
778                      !((*it).flags & kLastPacketContinuesOnNextChunk)) ||
779                     (*it).num_fragments > 1);
780 
781     // Now let's re-iterate over the [read_iter_, it] sequence and mark
782     // all the fragments as read.
783     bool packet_corruption = false;
784     for (;;) {
785       PERFETTO_DCHECK(read_iter_.is_valid());
786       TRACE_BUFFER_DLOG("    commit chunk %u", read_iter_.chunk_id());
787       if (PERFETTO_LIKELY((*read_iter_).num_fragments > 0)) {
788         // In the unlikely case of a corrupted packet (corrupted or empty
789         // fragment), invalidate the all stitching and move on to the next chunk
790         // in the same sequence, if any.
791         packet_corruption |= ReadNextPacketInChunk(&*read_iter_, packet) ==
792                              ReadPacketResult::kFailedInvalidPacket;
793       }
794       if (read_iter_.cur == it.cur)
795         break;
796       read_iter_.MoveNext();
797     }  // for(;;)
798     PERFETTO_DCHECK(read_iter_.cur == it.cur);
799 
800     if (PERFETTO_UNLIKELY(packet_corruption)) {
801       // ReadNextPacketInChunk() already records an abi violation for this case.
802       *packet = TracePacket();  // clear.
803       return ReadAheadResult::kFailedStayOnSameSequence;
804     }
805 
806     return ReadAheadResult::kSucceededReturnSlices;
807   }  // for(it...)  [readahead loop]
808   return ReadAheadResult::kFailedMoveToNextSequence;
809 }
810 
ReadNextPacketInChunk(ChunkMeta * chunk_meta,TracePacket * packet)811 TraceBuffer::ReadPacketResult TraceBuffer::ReadNextPacketInChunk(
812     ChunkMeta* chunk_meta,
813     TracePacket* packet) {
814   PERFETTO_DCHECK(chunk_meta->num_fragments_read < chunk_meta->num_fragments);
815   PERFETTO_DCHECK(!(chunk_meta->flags & kChunkNeedsPatching));
816 
817   const uint8_t* record_begin =
818       reinterpret_cast<const uint8_t*>(chunk_meta->chunk_record);
819   const uint8_t* record_end = record_begin + chunk_meta->chunk_record->size;
820   const uint8_t* packets_begin = record_begin + sizeof(ChunkRecord);
821   const uint8_t* packet_begin = packets_begin + chunk_meta->cur_fragment_offset;
822 
823   if (PERFETTO_UNLIKELY(packet_begin < packets_begin ||
824                         packet_begin >= record_end)) {
825     // The producer has a bug or is malicious and did declare that the chunk
826     // contains more packets beyond its boundaries.
827     stats_.set_abi_violations(stats_.abi_violations() + 1);
828     PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
829     chunk_meta->cur_fragment_offset = 0;
830     chunk_meta->num_fragments_read = chunk_meta->num_fragments;
831     if (PERFETTO_LIKELY(chunk_meta->is_complete())) {
832       stats_.set_chunks_read(stats_.chunks_read() + 1);
833       stats_.set_bytes_read(stats_.bytes_read() +
834                             chunk_meta->chunk_record->size);
835     }
836     return ReadPacketResult::kFailedInvalidPacket;
837   }
838 
839   // A packet (or a fragment) starts with a varint stating its size, followed
840   // by its content. The varint shouldn't be larger than 4 bytes (just in case
841   // the producer is using a redundant encoding)
842   uint64_t packet_size = 0;
843   const uint8_t* header_end =
844       std::min(packet_begin + protozero::proto_utils::kMessageLengthFieldSize,
845                record_end);
846   const uint8_t* packet_data = protozero::proto_utils::ParseVarInt(
847       packet_begin, header_end, &packet_size);
848 
849   const uint8_t* next_packet = packet_data + packet_size;
850   if (PERFETTO_UNLIKELY(next_packet <= packet_begin ||
851                         next_packet > record_end)) {
852     // In BufferExhaustedPolicy::kDrop mode, TraceWriter may abort a fragmented
853     // packet by writing an invalid size in the last fragment's header. We
854     // should handle this case without recording an ABI violation (since Android
855     // R).
856     if (packet_size != SharedMemoryABI::kPacketSizeDropPacket) {
857       stats_.set_abi_violations(stats_.abi_violations() + 1);
858       PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
859     } else {
860       stats_.set_trace_writer_packet_loss(stats_.trace_writer_packet_loss() +
861                                           1);
862     }
863     chunk_meta->cur_fragment_offset = 0;
864     chunk_meta->num_fragments_read = chunk_meta->num_fragments;
865     if (PERFETTO_LIKELY(chunk_meta->is_complete())) {
866       stats_.set_chunks_read(stats_.chunks_read() + 1);
867       stats_.set_bytes_read(stats_.bytes_read() +
868                             chunk_meta->chunk_record->size);
869     }
870     return ReadPacketResult::kFailedInvalidPacket;
871   }
872 
873   chunk_meta->cur_fragment_offset =
874       static_cast<uint16_t>(next_packet - packets_begin);
875   chunk_meta->num_fragments_read++;
876 
877   if (PERFETTO_UNLIKELY(chunk_meta->num_fragments_read ==
878                             chunk_meta->num_fragments &&
879                         chunk_meta->is_complete())) {
880     stats_.set_chunks_read(stats_.chunks_read() + 1);
881     stats_.set_bytes_read(stats_.bytes_read() + chunk_meta->chunk_record->size);
882   } else {
883     // We have at least one more packet to parse. It should be within the chunk.
884     if (chunk_meta->cur_fragment_offset + sizeof(ChunkRecord) >=
885         chunk_meta->chunk_record->size) {
886       PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
887     }
888   }
889 
890   chunk_meta->set_last_read_packet_skipped(false);
891 
892   if (PERFETTO_UNLIKELY(packet_size == 0))
893     return ReadPacketResult::kFailedEmptyPacket;
894 
895   if (PERFETTO_LIKELY(packet))
896     packet->AddSlice(packet_data, static_cast<size_t>(packet_size));
897 
898   return ReadPacketResult::kSucceeded;
899 }
900 
DiscardWrite()901 void TraceBuffer::DiscardWrite() {
902   PERFETTO_DCHECK(overwrite_policy_ == kDiscard);
903   discard_writes_ = true;
904   stats_.set_chunks_discarded(stats_.chunks_discarded() + 1);
905   TRACE_BUFFER_DLOG("  discarding write");
906 }
907 
908 }  // namespace perfetto
909