1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef SRC_TRACE_PROCESSOR_STORAGE_TRACE_STORAGE_H_
18 #define SRC_TRACE_PROCESSOR_STORAGE_TRACE_STORAGE_H_
19 
20 #include <array>
21 #include <deque>
22 #include <map>
23 #include <string>
24 #include <unordered_map>
25 #include <utility>
26 #include <vector>
27 
28 #include "perfetto/base/logging.h"
29 #include "perfetto/base/time.h"
30 #include "perfetto/ext/base/hash.h"
31 #include "perfetto/ext/base/optional.h"
32 #include "perfetto/ext/base/string_view.h"
33 #include "perfetto/ext/base/utils.h"
34 #include "perfetto/trace_processor/basic_types.h"
35 #include "perfetto/trace_processor/status.h"
36 #include "src/trace_processor/containers/string_pool.h"
37 #include "src/trace_processor/storage/metadata.h"
38 #include "src/trace_processor/storage/stats.h"
39 #include "src/trace_processor/tables/android_tables.h"
40 #include "src/trace_processor/tables/counter_tables.h"
41 #include "src/trace_processor/tables/flow_tables.h"
42 #include "src/trace_processor/tables/memory_tables.h"
43 #include "src/trace_processor/tables/metadata_tables.h"
44 #include "src/trace_processor/tables/profiler_tables.h"
45 #include "src/trace_processor/tables/slice_tables.h"
46 #include "src/trace_processor/tables/track_tables.h"
47 #include "src/trace_processor/types/variadic.h"
48 
49 namespace perfetto {
50 namespace trace_processor {
51 
52 // UniquePid is an offset into |unique_processes_|. This is necessary because
53 // Unix pids are reused and thus not guaranteed to be unique over a long
54 // period of time.
55 using UniquePid = uint32_t;
56 
57 // UniqueTid is an offset into |unique_threads_|. Necessary because tids can
58 // be reused.
59 using UniqueTid = uint32_t;
60 
61 // StringId is an offset into |string_pool_|.
62 using StringId = StringPool::Id;
63 static const StringId kNullStringId = StringId::Null();
64 
65 using ArgSetId = uint32_t;
66 static const ArgSetId kInvalidArgSetId = 0;
67 
68 using TrackId = tables::TrackTable::Id;
69 
70 using CounterId = tables::CounterTable::Id;
71 
72 using SliceId = tables::SliceTable::Id;
73 
74 using InstantId = tables::InstantTable::Id;
75 
76 using SchedId = tables::SchedSliceTable::Id;
77 
78 using MappingId = tables::StackProfileMappingTable::Id;
79 
80 using FrameId = tables::StackProfileFrameTable::Id;
81 
82 using SymbolId = tables::SymbolTable::Id;
83 
84 using CallsiteId = tables::StackProfileCallsiteTable::Id;
85 
86 using MetadataId = tables::MetadataTable::Id;
87 
88 using RawId = tables::RawTable::Id;
89 
90 using FlamegraphId = tables::ExperimentalFlamegraphNodesTable::Id;
91 
92 using VulkanAllocId = tables::VulkanMemoryAllocationsTable::Id;
93 
94 using SnapshotNodeId = tables::MemorySnapshotNodeTable::Id;
95 
96 // TODO(lalitm): this is a temporary hack while migrating the counters table and
97 // will be removed when the migration is complete.
98 static const TrackId kInvalidTrackId =
99     TrackId(std::numeric_limits<TrackId>::max());
100 
101 enum class RefType {
102   kRefNoRef = 0,
103   kRefUtid = 1,
104   kRefCpuId = 2,
105   kRefIrq = 3,
106   kRefSoftIrq = 4,
107   kRefUpid = 5,
108   kRefGpuId = 6,
109   kRefTrack = 7,
110   kRefMax
111 };
112 
113 const std::vector<NullTermStringView>& GetRefTypeStringMap();
114 
115 // Stores a data inside a trace file in a columnar form. This makes it efficient
116 // to read or search across a single field of the trace (e.g. all the thread
117 // names for a given CPU).
118 class TraceStorage {
119  public:
120   TraceStorage(const Config& = Config());
121 
122   virtual ~TraceStorage();
123 
124   class ThreadSlices {
125    public:
AddThreadSlice(uint32_t slice_id,int64_t thread_timestamp_ns,int64_t thread_duration_ns,int64_t thread_instruction_count,int64_t thread_instruction_delta)126     inline uint32_t AddThreadSlice(uint32_t slice_id,
127                                    int64_t thread_timestamp_ns,
128                                    int64_t thread_duration_ns,
129                                    int64_t thread_instruction_count,
130                                    int64_t thread_instruction_delta) {
131       slice_ids_.emplace_back(slice_id);
132       thread_timestamp_ns_.emplace_back(thread_timestamp_ns);
133       thread_duration_ns_.emplace_back(thread_duration_ns);
134       thread_instruction_counts_.emplace_back(thread_instruction_count);
135       thread_instruction_deltas_.emplace_back(thread_instruction_delta);
136       return slice_count() - 1;
137     }
138 
slice_count()139     uint32_t slice_count() const {
140       return static_cast<uint32_t>(slice_ids_.size());
141     }
142 
slice_ids()143     const std::deque<uint32_t>& slice_ids() const { return slice_ids_; }
thread_timestamp_ns()144     const std::deque<int64_t>& thread_timestamp_ns() const {
145       return thread_timestamp_ns_;
146     }
thread_duration_ns()147     const std::deque<int64_t>& thread_duration_ns() const {
148       return thread_duration_ns_;
149     }
thread_instruction_counts()150     const std::deque<int64_t>& thread_instruction_counts() const {
151       return thread_instruction_counts_;
152     }
thread_instruction_deltas()153     const std::deque<int64_t>& thread_instruction_deltas() const {
154       return thread_instruction_deltas_;
155     }
156 
FindRowForSliceId(uint32_t slice_id)157     base::Optional<uint32_t> FindRowForSliceId(uint32_t slice_id) const {
158       auto it =
159           std::lower_bound(slice_ids().begin(), slice_ids().end(), slice_id);
160       if (it != slice_ids().end() && *it == slice_id) {
161         return static_cast<uint32_t>(std::distance(slice_ids().begin(), it));
162       }
163       return base::nullopt;
164     }
165 
UpdateThreadDeltasForSliceId(uint32_t slice_id,int64_t end_thread_timestamp_ns,int64_t end_thread_instruction_count)166     void UpdateThreadDeltasForSliceId(uint32_t slice_id,
167                                       int64_t end_thread_timestamp_ns,
168                                       int64_t end_thread_instruction_count) {
169       auto opt_row = FindRowForSliceId(slice_id);
170       if (!opt_row)
171         return;
172       uint32_t row = *opt_row;
173       int64_t begin_ns = thread_timestamp_ns_[row];
174       thread_duration_ns_[row] = end_thread_timestamp_ns - begin_ns;
175       int64_t begin_ticount = thread_instruction_counts_[row];
176       thread_instruction_deltas_[row] =
177           end_thread_instruction_count - begin_ticount;
178     }
179 
180    private:
181     std::deque<uint32_t> slice_ids_;
182     std::deque<int64_t> thread_timestamp_ns_;
183     std::deque<int64_t> thread_duration_ns_;
184     std::deque<int64_t> thread_instruction_counts_;
185     std::deque<int64_t> thread_instruction_deltas_;
186   };
187 
188   class VirtualTrackSlices {
189    public:
AddVirtualTrackSlice(uint32_t slice_id,int64_t thread_timestamp_ns,int64_t thread_duration_ns,int64_t thread_instruction_count,int64_t thread_instruction_delta)190     inline uint32_t AddVirtualTrackSlice(uint32_t slice_id,
191                                          int64_t thread_timestamp_ns,
192                                          int64_t thread_duration_ns,
193                                          int64_t thread_instruction_count,
194                                          int64_t thread_instruction_delta) {
195       slice_ids_.emplace_back(slice_id);
196       thread_timestamp_ns_.emplace_back(thread_timestamp_ns);
197       thread_duration_ns_.emplace_back(thread_duration_ns);
198       thread_instruction_counts_.emplace_back(thread_instruction_count);
199       thread_instruction_deltas_.emplace_back(thread_instruction_delta);
200       return slice_count() - 1;
201     }
202 
slice_count()203     uint32_t slice_count() const {
204       return static_cast<uint32_t>(slice_ids_.size());
205     }
206 
slice_ids()207     const std::deque<uint32_t>& slice_ids() const { return slice_ids_; }
thread_timestamp_ns()208     const std::deque<int64_t>& thread_timestamp_ns() const {
209       return thread_timestamp_ns_;
210     }
thread_duration_ns()211     const std::deque<int64_t>& thread_duration_ns() const {
212       return thread_duration_ns_;
213     }
thread_instruction_counts()214     const std::deque<int64_t>& thread_instruction_counts() const {
215       return thread_instruction_counts_;
216     }
thread_instruction_deltas()217     const std::deque<int64_t>& thread_instruction_deltas() const {
218       return thread_instruction_deltas_;
219     }
220 
FindRowForSliceId(uint32_t slice_id)221     base::Optional<uint32_t> FindRowForSliceId(uint32_t slice_id) const {
222       auto it =
223           std::lower_bound(slice_ids().begin(), slice_ids().end(), slice_id);
224       if (it != slice_ids().end() && *it == slice_id) {
225         return static_cast<uint32_t>(std::distance(slice_ids().begin(), it));
226       }
227       return base::nullopt;
228     }
229 
UpdateThreadDeltasForSliceId(uint32_t slice_id,int64_t end_thread_timestamp_ns,int64_t end_thread_instruction_count)230     void UpdateThreadDeltasForSliceId(uint32_t slice_id,
231                                       int64_t end_thread_timestamp_ns,
232                                       int64_t end_thread_instruction_count) {
233       auto opt_row = FindRowForSliceId(slice_id);
234       if (!opt_row)
235         return;
236       uint32_t row = *opt_row;
237       int64_t begin_ns = thread_timestamp_ns_[row];
238       thread_duration_ns_[row] = end_thread_timestamp_ns - begin_ns;
239       int64_t begin_ticount = thread_instruction_counts_[row];
240       thread_instruction_deltas_[row] =
241           end_thread_instruction_count - begin_ticount;
242     }
243 
244    private:
245     std::deque<uint32_t> slice_ids_;
246     std::deque<int64_t> thread_timestamp_ns_;
247     std::deque<int64_t> thread_duration_ns_;
248     std::deque<int64_t> thread_instruction_counts_;
249     std::deque<int64_t> thread_instruction_deltas_;
250   };
251 
252   class SqlStats {
253    public:
254     static constexpr size_t kMaxLogEntries = 100;
255     uint32_t RecordQueryBegin(const std::string& query,
256                               int64_t time_queued,
257                               int64_t time_started);
258     void RecordQueryFirstNext(uint32_t row, int64_t time_first_next);
259     void RecordQueryEnd(uint32_t row, int64_t time_end);
size()260     size_t size() const { return queries_.size(); }
queries()261     const std::deque<std::string>& queries() const { return queries_; }
times_queued()262     const std::deque<int64_t>& times_queued() const { return times_queued_; }
times_started()263     const std::deque<int64_t>& times_started() const { return times_started_; }
times_first_next()264     const std::deque<int64_t>& times_first_next() const {
265       return times_first_next_;
266     }
times_ended()267     const std::deque<int64_t>& times_ended() const { return times_ended_; }
268 
269    private:
270     uint32_t popped_queries_ = 0;
271 
272     std::deque<std::string> queries_;
273     std::deque<int64_t> times_queued_;
274     std::deque<int64_t> times_started_;
275     std::deque<int64_t> times_first_next_;
276     std::deque<int64_t> times_ended_;
277   };
278 
279   struct Stats {
280     using IndexMap = std::map<int, int64_t>;
281     int64_t value = 0;
282     IndexMap indexed_values;
283   };
284   using StatsMap = std::array<Stats, stats::kNumKeys>;
285 
286   // Return an unqiue identifier for the contents of each string.
287   // The string is copied internally and can be destroyed after this called.
288   // Virtual for testing.
InternString(base::StringView str)289   virtual StringId InternString(base::StringView str) {
290     return string_pool_.InternString(str);
291   }
292 
293   // Example usage: SetStats(stats::android_log_num_failed, 42);
SetStats(size_t key,int64_t value)294   void SetStats(size_t key, int64_t value) {
295     PERFETTO_DCHECK(key < stats::kNumKeys);
296     PERFETTO_DCHECK(stats::kTypes[key] == stats::kSingle);
297     stats_[key].value = value;
298   }
299 
300   // Example usage: IncrementStats(stats::android_log_num_failed, -1);
301   void IncrementStats(size_t key, int64_t increment = 1) {
302     PERFETTO_DCHECK(key < stats::kNumKeys);
303     PERFETTO_DCHECK(stats::kTypes[key] == stats::kSingle);
304     stats_[key].value += increment;
305   }
306 
307   // Example usage: IncrementIndexedStats(stats::cpu_failure, 1);
308   void IncrementIndexedStats(size_t key, int index, int64_t increment = 1) {
309     PERFETTO_DCHECK(key < stats::kNumKeys);
310     PERFETTO_DCHECK(stats::kTypes[key] == stats::kIndexed);
311     stats_[key].indexed_values[index] += increment;
312   }
313 
314   // Example usage: SetIndexedStats(stats::cpu_failure, 1, 42);
SetIndexedStats(size_t key,int index,int64_t value)315   void SetIndexedStats(size_t key, int index, int64_t value) {
316     PERFETTO_DCHECK(key < stats::kNumKeys);
317     PERFETTO_DCHECK(stats::kTypes[key] == stats::kIndexed);
318     stats_[key].indexed_values[index] = value;
319   }
320 
321   class ScopedStatsTracer {
322    public:
ScopedStatsTracer(TraceStorage * storage,size_t key)323     ScopedStatsTracer(TraceStorage* storage, size_t key)
324         : storage_(storage), key_(key), start_ns_(base::GetWallTimeNs()) {}
325 
~ScopedStatsTracer()326     ~ScopedStatsTracer() {
327       if (!storage_)
328         return;
329       auto delta_ns = base::GetWallTimeNs() - start_ns_;
330       storage_->IncrementStats(key_, delta_ns.count());
331     }
332 
ScopedStatsTracer(ScopedStatsTracer && other)333     ScopedStatsTracer(ScopedStatsTracer&& other) noexcept { MoveImpl(&other); }
334 
335     ScopedStatsTracer& operator=(ScopedStatsTracer&& other) {
336       MoveImpl(&other);
337       return *this;
338     }
339 
340    private:
341     ScopedStatsTracer(const ScopedStatsTracer&) = delete;
342     ScopedStatsTracer& operator=(const ScopedStatsTracer&) = delete;
343 
MoveImpl(ScopedStatsTracer * other)344     void MoveImpl(ScopedStatsTracer* other) {
345       storage_ = other->storage_;
346       key_ = other->key_;
347       start_ns_ = other->start_ns_;
348       other->storage_ = nullptr;
349     }
350 
351     TraceStorage* storage_;
352     size_t key_;
353     base::TimeNanos start_ns_;
354   };
355 
TraceExecutionTimeIntoStats(size_t key)356   ScopedStatsTracer TraceExecutionTimeIntoStats(size_t key) {
357     return ScopedStatsTracer(this, key);
358   }
359 
360   // Reading methods.
361   // Virtual for testing.
GetString(StringId id)362   virtual NullTermStringView GetString(StringId id) const {
363     return string_pool_.Get(id);
364   }
365 
thread_table()366   const tables::ThreadTable& thread_table() const { return thread_table_; }
mutable_thread_table()367   tables::ThreadTable* mutable_thread_table() { return &thread_table_; }
368 
process_table()369   const tables::ProcessTable& process_table() const { return process_table_; }
mutable_process_table()370   tables::ProcessTable* mutable_process_table() { return &process_table_; }
371 
track_table()372   const tables::TrackTable& track_table() const { return track_table_; }
mutable_track_table()373   tables::TrackTable* mutable_track_table() { return &track_table_; }
374 
process_track_table()375   const tables::ProcessTrackTable& process_track_table() const {
376     return process_track_table_;
377   }
mutable_process_track_table()378   tables::ProcessTrackTable* mutable_process_track_table() {
379     return &process_track_table_;
380   }
381 
thread_track_table()382   const tables::ThreadTrackTable& thread_track_table() const {
383     return thread_track_table_;
384   }
mutable_thread_track_table()385   tables::ThreadTrackTable* mutable_thread_track_table() {
386     return &thread_track_table_;
387   }
388 
counter_track_table()389   const tables::CounterTrackTable& counter_track_table() const {
390     return counter_track_table_;
391   }
mutable_counter_track_table()392   tables::CounterTrackTable* mutable_counter_track_table() {
393     return &counter_track_table_;
394   }
395 
thread_counter_track_table()396   const tables::ThreadCounterTrackTable& thread_counter_track_table() const {
397     return thread_counter_track_table_;
398   }
mutable_thread_counter_track_table()399   tables::ThreadCounterTrackTable* mutable_thread_counter_track_table() {
400     return &thread_counter_track_table_;
401   }
402 
process_counter_track_table()403   const tables::ProcessCounterTrackTable& process_counter_track_table() const {
404     return process_counter_track_table_;
405   }
mutable_process_counter_track_table()406   tables::ProcessCounterTrackTable* mutable_process_counter_track_table() {
407     return &process_counter_track_table_;
408   }
409 
cpu_counter_track_table()410   const tables::CpuCounterTrackTable& cpu_counter_track_table() const {
411     return cpu_counter_track_table_;
412   }
mutable_cpu_counter_track_table()413   tables::CpuCounterTrackTable* mutable_cpu_counter_track_table() {
414     return &cpu_counter_track_table_;
415   }
416 
irq_counter_track_table()417   const tables::IrqCounterTrackTable& irq_counter_track_table() const {
418     return irq_counter_track_table_;
419   }
mutable_irq_counter_track_table()420   tables::IrqCounterTrackTable* mutable_irq_counter_track_table() {
421     return &irq_counter_track_table_;
422   }
423 
softirq_counter_track_table()424   const tables::SoftirqCounterTrackTable& softirq_counter_track_table() const {
425     return softirq_counter_track_table_;
426   }
mutable_softirq_counter_track_table()427   tables::SoftirqCounterTrackTable* mutable_softirq_counter_track_table() {
428     return &softirq_counter_track_table_;
429   }
430 
gpu_counter_track_table()431   const tables::GpuCounterTrackTable& gpu_counter_track_table() const {
432     return gpu_counter_track_table_;
433   }
mutable_gpu_counter_track_table()434   tables::GpuCounterTrackTable* mutable_gpu_counter_track_table() {
435     return &gpu_counter_track_table_;
436   }
437 
gpu_counter_group_table()438   const tables::GpuCounterGroupTable& gpu_counter_group_table() const {
439     return gpu_counter_group_table_;
440   }
mutable_gpu_counter_group_table()441   tables::GpuCounterGroupTable* mutable_gpu_counter_group_table() {
442     return &gpu_counter_group_table_;
443   }
444 
sched_slice_table()445   const tables::SchedSliceTable& sched_slice_table() const {
446     return sched_slice_table_;
447   }
mutable_sched_slice_table()448   tables::SchedSliceTable* mutable_sched_slice_table() {
449     return &sched_slice_table_;
450   }
451 
slice_table()452   const tables::SliceTable& slice_table() const { return slice_table_; }
mutable_slice_table()453   tables::SliceTable* mutable_slice_table() { return &slice_table_; }
454 
flow_table()455   const tables::FlowTable& flow_table() const { return flow_table_; }
mutable_flow_table()456   tables::FlowTable* mutable_flow_table() { return &flow_table_; }
457 
thread_slices()458   const ThreadSlices& thread_slices() const { return thread_slices_; }
mutable_thread_slices()459   ThreadSlices* mutable_thread_slices() { return &thread_slices_; }
460 
virtual_track_slices()461   const VirtualTrackSlices& virtual_track_slices() const {
462     return virtual_track_slices_;
463   }
mutable_virtual_track_slices()464   VirtualTrackSlices* mutable_virtual_track_slices() {
465     return &virtual_track_slices_;
466   }
467 
gpu_slice_table()468   const tables::GpuSliceTable& gpu_slice_table() const {
469     return gpu_slice_table_;
470   }
mutable_gpu_slice_table()471   tables::GpuSliceTable* mutable_gpu_slice_table() { return &gpu_slice_table_; }
472 
counter_table()473   const tables::CounterTable& counter_table() const { return counter_table_; }
mutable_counter_table()474   tables::CounterTable* mutable_counter_table() { return &counter_table_; }
475 
sql_stats()476   const SqlStats& sql_stats() const { return sql_stats_; }
mutable_sql_stats()477   SqlStats* mutable_sql_stats() { return &sql_stats_; }
478 
instant_table()479   const tables::InstantTable& instant_table() const { return instant_table_; }
mutable_instant_table()480   tables::InstantTable* mutable_instant_table() { return &instant_table_; }
481 
android_log_table()482   const tables::AndroidLogTable& android_log_table() const {
483     return android_log_table_;
484   }
mutable_android_log_table()485   tables::AndroidLogTable* mutable_android_log_table() {
486     return &android_log_table_;
487   }
488 
stats()489   const StatsMap& stats() const { return stats_; }
490 
metadata_table()491   const tables::MetadataTable& metadata_table() const {
492     return metadata_table_;
493   }
mutable_metadata_table()494   tables::MetadataTable* mutable_metadata_table() { return &metadata_table_; }
495 
arg_table()496   const tables::ArgTable& arg_table() const { return arg_table_; }
mutable_arg_table()497   tables::ArgTable* mutable_arg_table() { return &arg_table_; }
498 
raw_table()499   const tables::RawTable& raw_table() const { return raw_table_; }
mutable_raw_table()500   tables::RawTable* mutable_raw_table() { return &raw_table_; }
501 
cpu_table()502   const tables::CpuTable& cpu_table() const { return cpu_table_; }
mutable_cpu_table()503   tables::CpuTable* mutable_cpu_table() { return &cpu_table_; }
504 
cpu_freq_table()505   const tables::CpuFreqTable& cpu_freq_table() const { return cpu_freq_table_; }
mutable_cpu_freq_table()506   tables::CpuFreqTable* mutable_cpu_freq_table() { return &cpu_freq_table_; }
507 
stack_profile_mapping_table()508   const tables::StackProfileMappingTable& stack_profile_mapping_table() const {
509     return stack_profile_mapping_table_;
510   }
mutable_stack_profile_mapping_table()511   tables::StackProfileMappingTable* mutable_stack_profile_mapping_table() {
512     return &stack_profile_mapping_table_;
513   }
514 
stack_profile_frame_table()515   const tables::StackProfileFrameTable& stack_profile_frame_table() const {
516     return stack_profile_frame_table_;
517   }
mutable_stack_profile_frame_table()518   tables::StackProfileFrameTable* mutable_stack_profile_frame_table() {
519     return &stack_profile_frame_table_;
520   }
521 
stack_profile_callsite_table()522   const tables::StackProfileCallsiteTable& stack_profile_callsite_table()
523       const {
524     return stack_profile_callsite_table_;
525   }
mutable_stack_profile_callsite_table()526   tables::StackProfileCallsiteTable* mutable_stack_profile_callsite_table() {
527     return &stack_profile_callsite_table_;
528   }
529 
heap_profile_allocation_table()530   const tables::HeapProfileAllocationTable& heap_profile_allocation_table()
531       const {
532     return heap_profile_allocation_table_;
533   }
mutable_heap_profile_allocation_table()534   tables::HeapProfileAllocationTable* mutable_heap_profile_allocation_table() {
535     return &heap_profile_allocation_table_;
536   }
537 
package_list_table()538   const tables::PackageListTable& package_list_table() const {
539     return package_list_table_;
540   }
mutable_package_list_table()541   tables::PackageListTable* mutable_package_list_table() {
542     return &package_list_table_;
543   }
544 
profiler_smaps_table()545   const tables::ProfilerSmapsTable& profiler_smaps_table() const {
546     return profiler_smaps_table_;
547   }
mutable_profiler_smaps_table()548   tables::ProfilerSmapsTable* mutable_profiler_smaps_table() {
549     return &profiler_smaps_table_;
550   }
551 
stack_sample_table()552   const tables::StackSampleTable& stack_sample_table() const {
553     return stack_sample_table_;
554   }
mutable_stack_sample_table()555   tables::StackSampleTable* mutable_stack_sample_table() {
556     return &stack_sample_table_;
557   }
558 
cpu_profile_stack_sample_table()559   const tables::CpuProfileStackSampleTable& cpu_profile_stack_sample_table()
560       const {
561     return cpu_profile_stack_sample_table_;
562   }
mutable_cpu_profile_stack_sample_table()563   tables::CpuProfileStackSampleTable* mutable_cpu_profile_stack_sample_table() {
564     return &cpu_profile_stack_sample_table_;
565   }
566 
perf_sample_table()567   const tables::PerfSampleTable& perf_sample_table() const {
568     return perf_sample_table_;
569   }
mutable_perf_sample_table()570   tables::PerfSampleTable* mutable_perf_sample_table() {
571     return &perf_sample_table_;
572   }
573 
symbol_table()574   const tables::SymbolTable& symbol_table() const { return symbol_table_; }
575 
mutable_symbol_table()576   tables::SymbolTable* mutable_symbol_table() { return &symbol_table_; }
577 
heap_graph_object_table()578   const tables::HeapGraphObjectTable& heap_graph_object_table() const {
579     return heap_graph_object_table_;
580   }
581 
mutable_heap_graph_object_table()582   tables::HeapGraphObjectTable* mutable_heap_graph_object_table() {
583     return &heap_graph_object_table_;
584   }
heap_graph_class_table()585   const tables::HeapGraphClassTable& heap_graph_class_table() const {
586     return heap_graph_class_table_;
587   }
588 
mutable_heap_graph_class_table()589   tables::HeapGraphClassTable* mutable_heap_graph_class_table() {
590     return &heap_graph_class_table_;
591   }
592 
heap_graph_reference_table()593   const tables::HeapGraphReferenceTable& heap_graph_reference_table() const {
594     return heap_graph_reference_table_;
595   }
596 
mutable_heap_graph_reference_table()597   tables::HeapGraphReferenceTable* mutable_heap_graph_reference_table() {
598     return &heap_graph_reference_table_;
599   }
600 
gpu_track_table()601   const tables::GpuTrackTable& gpu_track_table() const {
602     return gpu_track_table_;
603   }
mutable_gpu_track_table()604   tables::GpuTrackTable* mutable_gpu_track_table() { return &gpu_track_table_; }
605 
vulkan_memory_allocations_table()606   const tables::VulkanMemoryAllocationsTable& vulkan_memory_allocations_table()
607       const {
608     return vulkan_memory_allocations_table_;
609   }
610 
611   tables::VulkanMemoryAllocationsTable*
mutable_vulkan_memory_allocations_table()612   mutable_vulkan_memory_allocations_table() {
613     return &vulkan_memory_allocations_table_;
614   }
615 
graphics_frame_slice_table()616   const tables::GraphicsFrameSliceTable& graphics_frame_slice_table() const {
617     return graphics_frame_slice_table_;
618   }
619 
mutable_graphics_frame_slice_table()620   tables::GraphicsFrameSliceTable* mutable_graphics_frame_slice_table() {
621     return &graphics_frame_slice_table_;
622   }
623 
memory_snapshot_table()624   const tables::MemorySnapshotTable& memory_snapshot_table() const {
625     return memory_snapshot_table_;
626   }
mutable_memory_snapshot_table()627   tables::MemorySnapshotTable* mutable_memory_snapshot_table() {
628     return &memory_snapshot_table_;
629   }
630 
process_memory_snapshot_table()631   const tables::ProcessMemorySnapshotTable& process_memory_snapshot_table()
632       const {
633     return process_memory_snapshot_table_;
634   }
mutable_process_memory_snapshot_table()635   tables::ProcessMemorySnapshotTable* mutable_process_memory_snapshot_table() {
636     return &process_memory_snapshot_table_;
637   }
638 
memory_snapshot_node_table()639   const tables::MemorySnapshotNodeTable& memory_snapshot_node_table() const {
640     return memory_snapshot_node_table_;
641   }
mutable_memory_snapshot_node_table()642   tables::MemorySnapshotNodeTable* mutable_memory_snapshot_node_table() {
643     return &memory_snapshot_node_table_;
644   }
645 
memory_snapshot_edge_table()646   const tables::MemorySnapshotEdgeTable& memory_snapshot_edge_table() const {
647     return memory_snapshot_edge_table_;
648   }
mutable_memory_snapshot_edge_table()649   tables::MemorySnapshotEdgeTable* mutable_memory_snapshot_edge_table() {
650     return &memory_snapshot_edge_table_;
651   }
652 
string_pool()653   const StringPool& string_pool() const { return string_pool_; }
mutable_string_pool()654   StringPool* mutable_string_pool() { return &string_pool_; }
655 
656   // Number of interned strings in the pool. Includes the empty string w/ ID=0.
string_count()657   size_t string_count() const { return string_pool_.size(); }
658 
659   // Start / end ts (in nanoseconds) across the parsed trace events.
660   // Returns (0, 0) if the trace is empty.
661   std::pair<int64_t, int64_t> GetTraceTimestampBoundsNs() const;
662 
ExtractArg(uint32_t arg_set_id,const char * key,base::Optional<Variadic> * result)663   util::Status ExtractArg(uint32_t arg_set_id,
664                           const char* key,
665                           base::Optional<Variadic>* result) {
666     const auto& args = arg_table();
667     RowMap filtered = args.FilterToRowMap(
668         {args.arg_set_id().eq(arg_set_id), args.key().eq(key)});
669     if (filtered.empty()) {
670       *result = base::nullopt;
671       return util::OkStatus();
672     }
673     if (filtered.size() > 1) {
674       return util::ErrStatus(
675           "EXTRACT_ARG: received multiple args matching arg set id and key");
676     }
677     uint32_t idx = filtered.Get(0);
678     *result = GetArgValue(idx);
679     return util::OkStatus();
680   }
681 
GetArgValue(uint32_t row)682   Variadic GetArgValue(uint32_t row) const {
683     Variadic v;
684     v.type = *GetVariadicTypeForId(arg_table_.value_type()[row]);
685 
686     // Force initialization of union to stop GCC complaining.
687     v.int_value = 0;
688 
689     switch (v.type) {
690       case Variadic::Type::kBool:
691         v.bool_value = static_cast<bool>(*arg_table_.int_value()[row]);
692         break;
693       case Variadic::Type::kInt:
694         v.int_value = *arg_table_.int_value()[row];
695         break;
696       case Variadic::Type::kUint:
697         v.uint_value = static_cast<uint64_t>(*arg_table_.int_value()[row]);
698         break;
699       case Variadic::Type::kString: {
700         auto opt_value = arg_table_.string_value()[row];
701         v.string_value = opt_value ? *opt_value : kNullStringId;
702         break;
703       }
704       case Variadic::Type::kPointer:
705         v.pointer_value = static_cast<uint64_t>(*arg_table_.int_value()[row]);
706         break;
707       case Variadic::Type::kReal:
708         v.real_value = *arg_table_.real_value()[row];
709         break;
710       case Variadic::Type::kJson: {
711         auto opt_value = arg_table_.string_value()[row];
712         v.json_value = opt_value ? *opt_value : kNullStringId;
713         break;
714       }
715     }
716     return v;
717   }
718 
GetIdForVariadicType(Variadic::Type type)719   StringId GetIdForVariadicType(Variadic::Type type) const {
720     return variadic_type_ids_[type];
721   }
722 
GetVariadicTypeForId(StringId id)723   base::Optional<Variadic::Type> GetVariadicTypeForId(StringId id) const {
724     auto it =
725         std::find(variadic_type_ids_.begin(), variadic_type_ids_.end(), id);
726     if (it == variadic_type_ids_.end())
727       return base::nullopt;
728 
729     int64_t idx = std::distance(variadic_type_ids_.begin(), it);
730     return static_cast<Variadic::Type>(idx);
731   }
732 
733  private:
734   using StringHash = uint64_t;
735 
736   TraceStorage(const TraceStorage&) = delete;
737   TraceStorage& operator=(const TraceStorage&) = delete;
738 
739   TraceStorage(TraceStorage&&) = delete;
740   TraceStorage& operator=(TraceStorage&&) = delete;
741 
742   // One entry for each unique string in the trace.
743   StringPool string_pool_;
744 
745   // Stats about parsing the trace.
746   StatsMap stats_{};
747 
748   // Extra data extracted from the trace. Includes:
749   // * metadata from chrome and benchmarking infrastructure
750   // * descriptions of android packages
751   tables::MetadataTable metadata_table_{&string_pool_, nullptr};
752 
753   // Metadata for tracks.
754   tables::TrackTable track_table_{&string_pool_, nullptr};
755   tables::GpuTrackTable gpu_track_table_{&string_pool_, &track_table_};
756   tables::ProcessTrackTable process_track_table_{&string_pool_, &track_table_};
757   tables::ThreadTrackTable thread_track_table_{&string_pool_, &track_table_};
758 
759   // Track tables for counter events.
760   tables::CounterTrackTable counter_track_table_{&string_pool_, &track_table_};
761   tables::ThreadCounterTrackTable thread_counter_track_table_{
762       &string_pool_, &counter_track_table_};
763   tables::ProcessCounterTrackTable process_counter_track_table_{
764       &string_pool_, &counter_track_table_};
765   tables::CpuCounterTrackTable cpu_counter_track_table_{&string_pool_,
766                                                         &counter_track_table_};
767   tables::IrqCounterTrackTable irq_counter_track_table_{&string_pool_,
768                                                         &counter_track_table_};
769   tables::SoftirqCounterTrackTable softirq_counter_track_table_{
770       &string_pool_, &counter_track_table_};
771   tables::GpuCounterTrackTable gpu_counter_track_table_{&string_pool_,
772                                                         &counter_track_table_};
773   tables::GpuCounterGroupTable gpu_counter_group_table_{&string_pool_, nullptr};
774 
775   // Args for all other tables.
776   tables::ArgTable arg_table_{&string_pool_, nullptr};
777 
778   // Information about all the threads and processes in the trace.
779   tables::ThreadTable thread_table_{&string_pool_, nullptr};
780   tables::ProcessTable process_table_{&string_pool_, nullptr};
781 
782   // Slices coming from userspace events (e.g. Chromium TRACE_EVENT macros).
783   tables::SliceTable slice_table_{&string_pool_, nullptr};
784 
785   // Flow events from userspace events (e.g. Chromium TRACE_EVENT macros).
786   tables::FlowTable flow_table_{&string_pool_, nullptr};
787 
788   // Slices from CPU scheduling data.
789   tables::SchedSliceTable sched_slice_table_{&string_pool_, nullptr};
790 
791   // Additional attributes for threads slices (sub-type of NestableSlices).
792   ThreadSlices thread_slices_;
793 
794   // Additional attributes for virtual track slices (sub-type of
795   // NestableSlices).
796   VirtualTrackSlices virtual_track_slices_;
797 
798   // Additional attributes for gpu track slices (sub-type of
799   // NestableSlices).
800   tables::GpuSliceTable gpu_slice_table_{&string_pool_, &slice_table_};
801 
802   // The values from the Counter events from the trace. This includes CPU
803   // frequency events as well systrace trace_marker counter events.
804   tables::CounterTable counter_table_{&string_pool_, nullptr};
805 
806   SqlStats sql_stats_;
807 
808   // These are instantaneous events in the trace. They have no duration
809   // and do not have a value that make sense to track over time.
810   // e.g. signal events
811   tables::InstantTable instant_table_{&string_pool_, nullptr};
812 
813   // Raw events are every ftrace event in the trace. The raw event includes
814   // the timestamp and the pid. The args for the raw event will be in the
815   // args table. This table can be used to generate a text version of the
816   // trace.
817   tables::RawTable raw_table_{&string_pool_, nullptr};
818 
819   tables::CpuTable cpu_table_{&string_pool_, nullptr};
820 
821   tables::CpuFreqTable cpu_freq_table_{&string_pool_, nullptr};
822 
823   tables::AndroidLogTable android_log_table_{&string_pool_, nullptr};
824 
825   tables::StackProfileMappingTable stack_profile_mapping_table_{&string_pool_,
826                                                                 nullptr};
827   tables::StackProfileFrameTable stack_profile_frame_table_{&string_pool_,
828                                                             nullptr};
829   tables::StackProfileCallsiteTable stack_profile_callsite_table_{&string_pool_,
830                                                                   nullptr};
831   tables::StackSampleTable stack_sample_table_{&string_pool_, nullptr};
832   tables::HeapProfileAllocationTable heap_profile_allocation_table_{
833       &string_pool_, nullptr};
834   tables::CpuProfileStackSampleTable cpu_profile_stack_sample_table_{
835       &string_pool_, &stack_sample_table_};
836   tables::PerfSampleTable perf_sample_table_{&string_pool_,
837                                              &stack_sample_table_};
838   tables::PackageListTable package_list_table_{&string_pool_, nullptr};
839   tables::ProfilerSmapsTable profiler_smaps_table_{&string_pool_, nullptr};
840 
841   // Symbol tables (mappings from frames to symbol names)
842   tables::SymbolTable symbol_table_{&string_pool_, nullptr};
843   tables::HeapGraphObjectTable heap_graph_object_table_{&string_pool_, nullptr};
844   tables::HeapGraphClassTable heap_graph_class_table_{&string_pool_, nullptr};
845   tables::HeapGraphReferenceTable heap_graph_reference_table_{&string_pool_,
846                                                               nullptr};
847 
848   tables::VulkanMemoryAllocationsTable vulkan_memory_allocations_table_{
849       &string_pool_, nullptr};
850 
851   tables::GraphicsFrameSliceTable graphics_frame_slice_table_{&string_pool_,
852                                                               &slice_table_};
853 
854   // Metadata for memory snapshot.
855   tables::MemorySnapshotTable memory_snapshot_table_{&string_pool_, nullptr};
856   tables::ProcessMemorySnapshotTable process_memory_snapshot_table_{
857       &string_pool_, nullptr};
858   tables::MemorySnapshotNodeTable memory_snapshot_node_table_{&string_pool_,
859                                                               nullptr};
860   tables::MemorySnapshotEdgeTable memory_snapshot_edge_table_{&string_pool_,
861                                                               nullptr};
862 
863   // The below array allow us to map between enums and their string
864   // representations.
865   std::array<StringId, Variadic::kMaxType + 1> variadic_type_ids_;
866 };
867 
868 }  // namespace trace_processor
869 }  // namespace perfetto
870 
871 namespace std {
872 
873 template <>
874 struct hash<::perfetto::trace_processor::BaseId> {
875   using argument_type = ::perfetto::trace_processor::BaseId;
876   using result_type = size_t;
877 
878   result_type operator()(const argument_type& r) const {
879     return std::hash<uint32_t>{}(r.value);
880   }
881 };
882 
883 template <>
884 struct hash<::perfetto::trace_processor::TrackId>
885     : hash<::perfetto::trace_processor::BaseId> {};
886 template <>
887 struct hash<::perfetto::trace_processor::MappingId>
888     : hash<::perfetto::trace_processor::BaseId> {};
889 template <>
890 struct hash<::perfetto::trace_processor::CallsiteId>
891     : hash<::perfetto::trace_processor::BaseId> {};
892 template <>
893 struct hash<::perfetto::trace_processor::FrameId>
894     : hash<::perfetto::trace_processor::BaseId> {};
895 
896 template <>
897 struct hash<::perfetto::trace_processor::tables::StackProfileFrameTable::Row> {
898   using argument_type =
899       ::perfetto::trace_processor::tables::StackProfileFrameTable::Row;
900   using result_type = size_t;
901 
902   result_type operator()(const argument_type& r) const {
903     return std::hash<::perfetto::trace_processor::StringId>{}(r.name) ^
904            std::hash<::perfetto::base::Optional<
905                ::perfetto::trace_processor::MappingId>>{}(r.mapping) ^
906            std::hash<int64_t>{}(r.rel_pc);
907   }
908 };
909 
910 template <>
911 struct hash<
912     ::perfetto::trace_processor::tables::StackProfileCallsiteTable::Row> {
913   using argument_type =
914       ::perfetto::trace_processor::tables::StackProfileCallsiteTable::Row;
915   using result_type = size_t;
916 
917   result_type operator()(const argument_type& r) const {
918     return std::hash<int64_t>{}(r.depth) ^
919            std::hash<::perfetto::base::Optional<
920                ::perfetto::trace_processor::CallsiteId>>{}(r.parent_id) ^
921            std::hash<::perfetto::trace_processor::FrameId>{}(r.frame_id);
922   }
923 };
924 
925 template <>
926 struct hash<
927     ::perfetto::trace_processor::tables::StackProfileMappingTable::Row> {
928   using argument_type =
929       ::perfetto::trace_processor::tables::StackProfileMappingTable::Row;
930   using result_type = size_t;
931 
932   result_type operator()(const argument_type& r) const {
933     return std::hash<::perfetto::trace_processor::StringId>{}(r.build_id) ^
934            std::hash<int64_t>{}(r.exact_offset) ^
935            std::hash<int64_t>{}(r.start_offset) ^
936            std::hash<int64_t>{}(r.start) ^ std::hash<int64_t>{}(r.end) ^
937            std::hash<int64_t>{}(r.load_bias) ^
938            std::hash<::perfetto::trace_processor::StringId>{}(r.name);
939   }
940 };
941 
942 }  // namespace std
943 
944 #endif  // SRC_TRACE_PROCESSOR_STORAGE_TRACE_STORAGE_H_
945