1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "perfetto/ext/trace_processor/export_json.h"
18 #include "src/trace_processor/export_json.h"
19 
20 #include <inttypes.h>
21 #include <stdio.h>
22 #include <sstream>
23 
24 #include <algorithm>
25 #include <cmath>
26 #include <cstring>
27 #include <deque>
28 #include <limits>
29 #include <memory>
30 
31 #include "perfetto/base/build_config.h"
32 #include "perfetto/ext/base/string_splitter.h"
33 #include "perfetto/ext/base/string_utils.h"
34 #include "src/trace_processor/importers/json/json_utils.h"
35 #include "src/trace_processor/storage/metadata.h"
36 #include "src/trace_processor/storage/trace_storage.h"
37 #include "src/trace_processor/trace_processor_storage_impl.h"
38 #include "src/trace_processor/types/trace_processor_context.h"
39 
40 #if PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
41 #include <json/reader.h>
42 #include <json/writer.h>
43 #endif
44 
45 namespace perfetto {
46 namespace trace_processor {
47 namespace json {
48 
49 namespace {
50 
51 class FileWriter : public OutputWriter {
52  public:
FileWriter(FILE * file)53   FileWriter(FILE* file) : file_(file) {}
~FileWriter()54   ~FileWriter() override { fflush(file_); }
55 
AppendString(const std::string & s)56   util::Status AppendString(const std::string& s) override {
57     size_t written =
58         fwrite(s.data(), sizeof(std::string::value_type), s.size(), file_);
59     if (written != s.size())
60       return util::ErrStatus("Error writing to file: %d", ferror(file_));
61     return util::OkStatus();
62   }
63 
64  private:
65   FILE* file_;
66 };
67 
68 #if PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
69 using IndexMap = perfetto::trace_processor::TraceStorage::Stats::IndexMap;
70 
71 const char kLegacyEventArgsKey[] = "legacy_event";
72 const char kLegacyEventPassthroughUtidKey[] = "passthrough_utid";
73 const char kLegacyEventCategoryKey[] = "category";
74 const char kLegacyEventNameKey[] = "name";
75 const char kLegacyEventPhaseKey[] = "phase";
76 const char kLegacyEventDurationNsKey[] = "duration_ns";
77 const char kLegacyEventThreadTimestampNsKey[] = "thread_timestamp_ns";
78 const char kLegacyEventThreadDurationNsKey[] = "thread_duration_ns";
79 const char kLegacyEventThreadInstructionCountKey[] = "thread_instruction_count";
80 const char kLegacyEventThreadInstructionDeltaKey[] = "thread_instruction_delta";
81 const char kLegacyEventUseAsyncTtsKey[] = "use_async_tts";
82 const char kLegacyEventUnscopedIdKey[] = "unscoped_id";
83 const char kLegacyEventGlobalIdKey[] = "global_id";
84 const char kLegacyEventLocalIdKey[] = "local_id";
85 const char kLegacyEventIdScopeKey[] = "id_scope";
86 const char kStrippedArgument[] = "__stripped__";
87 
GetNonNullString(const TraceStorage * storage,StringId id)88 const char* GetNonNullString(const TraceStorage* storage, StringId id) {
89   return id == kNullStringId ? "" : storage->GetString(id).c_str();
90 }
91 
92 class JsonExporter {
93  public:
JsonExporter(const TraceStorage * storage,OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)94   JsonExporter(const TraceStorage* storage,
95                OutputWriter* output,
96                ArgumentFilterPredicate argument_filter,
97                MetadataFilterPredicate metadata_filter,
98                LabelFilterPredicate label_filter)
99       : storage_(storage),
100         args_builder_(storage_),
101         writer_(output, argument_filter, metadata_filter, label_filter) {}
102 
Export()103   util::Status Export() {
104     util::Status status = MapUniquePidsAndTids();
105     if (!status.ok())
106       return status;
107 
108     status = ExportThreadNames();
109     if (!status.ok())
110       return status;
111 
112     status = ExportProcessNames();
113     if (!status.ok())
114       return status;
115 
116     status = ExportSlices();
117     if (!status.ok())
118       return status;
119 
120     status = ExportFlows();
121     if (!status.ok())
122       return status;
123 
124     status = ExportRawEvents();
125     if (!status.ok())
126       return status;
127 
128     status = ExportCpuProfileSamples();
129     if (!status.ok())
130       return status;
131 
132     status = ExportMetadata();
133     if (!status.ok())
134       return status;
135 
136     status = ExportStats();
137     if (!status.ok())
138       return status;
139 
140     status = ExportMemorySnapshots();
141     if (!status.ok())
142       return status;
143 
144     return util::OkStatus();
145   }
146 
147  private:
148   class TraceFormatWriter {
149    public:
TraceFormatWriter(OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)150     TraceFormatWriter(OutputWriter* output,
151                       ArgumentFilterPredicate argument_filter,
152                       MetadataFilterPredicate metadata_filter,
153                       LabelFilterPredicate label_filter)
154         : output_(output),
155           argument_filter_(argument_filter),
156           metadata_filter_(metadata_filter),
157           label_filter_(label_filter),
158           first_event_(true) {
159       Json::StreamWriterBuilder b;
160       b.settings_["indentation"] = "";
161       writer_.reset(b.newStreamWriter());
162       WriteHeader();
163     }
164 
~TraceFormatWriter()165     ~TraceFormatWriter() { WriteFooter(); }
166 
WriteCommonEvent(const Json::Value & event)167     void WriteCommonEvent(const Json::Value& event) {
168       if (label_filter_ && !label_filter_("traceEvents"))
169         return;
170 
171       DoWriteEvent(event);
172     }
173 
AddAsyncBeginEvent(const Json::Value & event)174     void AddAsyncBeginEvent(const Json::Value& event) {
175       if (label_filter_ && !label_filter_("traceEvents"))
176         return;
177 
178       async_begin_events_.push_back(event);
179     }
180 
AddAsyncInstantEvent(const Json::Value & event)181     void AddAsyncInstantEvent(const Json::Value& event) {
182       if (label_filter_ && !label_filter_("traceEvents"))
183         return;
184 
185       async_instant_events_.push_back(event);
186     }
187 
AddAsyncEndEvent(const Json::Value & event)188     void AddAsyncEndEvent(const Json::Value& event) {
189       if (label_filter_ && !label_filter_("traceEvents"))
190         return;
191 
192       async_end_events_.push_back(event);
193     }
194 
SortAndEmitAsyncEvents()195     void SortAndEmitAsyncEvents() {
196       // Catapult doesn't handle out-of-order begin/end events well, especially
197       // when their timestamps are the same, but their order is incorrect. Since
198       // we process events sorted by begin timestamp, |async_begin_events_| and
199       // |async_instant_events_| are already sorted. We now only have to sort
200       // |async_end_events_| and merge-sort all events into a single sequence.
201 
202       // Sort |async_end_events_|. Note that we should order by ascending
203       // timestamp, but in reverse-stable order. This way, a child slices's end
204       // is emitted before its parent's end event, even if both end events have
205       // the same timestamp. To accomplish this, we perform a stable sort in
206       // descending order and later iterate via reverse iterators.
207       struct {
208         bool operator()(const Json::Value& a, const Json::Value& b) const {
209           return a["ts"].asInt64() > b["ts"].asInt64();
210         }
211       } CompareEvents;
212       std::stable_sort(async_end_events_.begin(), async_end_events_.end(),
213                        CompareEvents);
214 
215       // Merge sort by timestamp. If events share the same timestamp, prefer
216       // instant events, then end events, so that old slices close before new
217       // ones are opened, but instant events remain in their deepest nesting
218       // level.
219       auto instant_event_it = async_instant_events_.begin();
220       auto end_event_it = async_end_events_.rbegin();
221       auto begin_event_it = async_begin_events_.begin();
222 
223       auto has_instant_event = instant_event_it != async_instant_events_.end();
224       auto has_end_event = end_event_it != async_end_events_.rend();
225       auto has_begin_event = begin_event_it != async_begin_events_.end();
226 
227       auto emit_next_instant = [&instant_event_it, &has_instant_event, this]() {
228         DoWriteEvent(*instant_event_it);
229         instant_event_it++;
230         has_instant_event = instant_event_it != async_instant_events_.end();
231       };
232       auto emit_next_end = [&end_event_it, &has_end_event, this]() {
233         DoWriteEvent(*end_event_it);
234         end_event_it++;
235         has_end_event = end_event_it != async_end_events_.rend();
236       };
237       auto emit_next_begin = [&begin_event_it, &has_begin_event, this]() {
238         DoWriteEvent(*begin_event_it);
239         begin_event_it++;
240         has_begin_event = begin_event_it != async_begin_events_.end();
241       };
242 
243       auto emit_next_instant_or_end = [&instant_event_it, &end_event_it,
244                                        &emit_next_instant, &emit_next_end]() {
245         if ((*instant_event_it)["ts"].asInt64() <=
246             (*end_event_it)["ts"].asInt64()) {
247           emit_next_instant();
248         } else {
249           emit_next_end();
250         }
251       };
252       auto emit_next_instant_or_begin = [&instant_event_it, &begin_event_it,
253                                          &emit_next_instant,
254                                          &emit_next_begin]() {
255         if ((*instant_event_it)["ts"].asInt64() <=
256             (*begin_event_it)["ts"].asInt64()) {
257           emit_next_instant();
258         } else {
259           emit_next_begin();
260         }
261       };
262       auto emit_next_end_or_begin = [&end_event_it, &begin_event_it,
263                                      &emit_next_end, &emit_next_begin]() {
264         if ((*end_event_it)["ts"].asInt64() <=
265             (*begin_event_it)["ts"].asInt64()) {
266           emit_next_end();
267         } else {
268           emit_next_begin();
269         }
270       };
271 
272       // While we still have events in all iterators, consider each.
273       while (has_instant_event && has_end_event && has_begin_event) {
274         if ((*instant_event_it)["ts"].asInt64() <=
275             (*end_event_it)["ts"].asInt64()) {
276           emit_next_instant_or_begin();
277         } else {
278           emit_next_end_or_begin();
279         }
280       }
281 
282       // Only instant and end events left.
283       while (has_instant_event && has_end_event) {
284         emit_next_instant_or_end();
285       }
286 
287       // Only instant and begin events left.
288       while (has_instant_event && has_begin_event) {
289         emit_next_instant_or_begin();
290       }
291 
292       // Only end and begin events left.
293       while (has_end_event && has_begin_event) {
294         emit_next_end_or_begin();
295       }
296 
297       // Remaining instant events.
298       while (has_instant_event) {
299         emit_next_instant();
300       }
301 
302       // Remaining end events.
303       while (has_end_event) {
304         emit_next_end();
305       }
306 
307       // Remaining begin events.
308       while (has_begin_event) {
309         emit_next_begin();
310       }
311     }
312 
WriteMetadataEvent(const char * metadata_type,const char * metadata_value,uint32_t pid,uint32_t tid)313     void WriteMetadataEvent(const char* metadata_type,
314                             const char* metadata_value,
315                             uint32_t pid,
316                             uint32_t tid) {
317       if (label_filter_ && !label_filter_("traceEvents"))
318         return;
319 
320       std::ostringstream ss;
321       if (!first_event_)
322         ss << ",\n";
323 
324       Json::Value value;
325       value["ph"] = "M";
326       value["cat"] = "__metadata";
327       value["ts"] = 0;
328       value["name"] = metadata_type;
329       value["pid"] = Json::Int(pid);
330       value["tid"] = Json::Int(tid);
331 
332       Json::Value args;
333       args["name"] = metadata_value;
334       value["args"] = args;
335 
336       writer_->write(value, &ss);
337       output_->AppendString(ss.str());
338       first_event_ = false;
339     }
340 
MergeMetadata(const Json::Value & value)341     void MergeMetadata(const Json::Value& value) {
342       for (const auto& member : value.getMemberNames()) {
343         metadata_[member] = value[member];
344       }
345     }
346 
AppendTelemetryMetadataString(const char * key,const char * value)347     void AppendTelemetryMetadataString(const char* key, const char* value) {
348       metadata_["telemetry"][key].append(value);
349     }
350 
AppendTelemetryMetadataInt(const char * key,int64_t value)351     void AppendTelemetryMetadataInt(const char* key, int64_t value) {
352       metadata_["telemetry"][key].append(Json::Int64(value));
353     }
354 
AppendTelemetryMetadataBool(const char * key,bool value)355     void AppendTelemetryMetadataBool(const char* key, bool value) {
356       metadata_["telemetry"][key].append(value);
357     }
358 
SetTelemetryMetadataTimestamp(const char * key,int64_t value)359     void SetTelemetryMetadataTimestamp(const char* key, int64_t value) {
360       metadata_["telemetry"][key] = static_cast<double>(value) / 1000.0;
361     }
362 
SetStats(const char * key,int64_t value)363     void SetStats(const char* key, int64_t value) {
364       metadata_["trace_processor_stats"][key] = Json::Int64(value);
365     }
366 
SetStats(const char * key,const IndexMap & indexed_values)367     void SetStats(const char* key, const IndexMap& indexed_values) {
368       constexpr const char* kBufferStatsPrefix = "traced_buf_";
369 
370       // Stats for the same buffer should be grouped together in the JSON.
371       if (strncmp(kBufferStatsPrefix, key, strlen(kBufferStatsPrefix)) == 0) {
372         for (const auto& value : indexed_values) {
373           metadata_["trace_processor_stats"]["traced_buf"][value.first]
374                    [key + strlen(kBufferStatsPrefix)] =
375                        Json::Int64(value.second);
376         }
377         return;
378       }
379 
380       // Other indexed value stats are exported as array under their key.
381       for (const auto& value : indexed_values) {
382         metadata_["trace_processor_stats"][key][value.first] =
383             Json::Int64(value.second);
384       }
385     }
386 
AddSystemTraceData(const std::string & data)387     void AddSystemTraceData(const std::string& data) {
388       system_trace_data_ += data;
389     }
390 
AddUserTraceData(const std::string & data)391     void AddUserTraceData(const std::string& data) {
392       if (user_trace_data_.empty())
393         user_trace_data_ = "[";
394       user_trace_data_ += data;
395     }
396 
397    private:
WriteHeader()398     void WriteHeader() {
399       if (!label_filter_)
400         output_->AppendString("{\"traceEvents\":[\n");
401     }
402 
WriteFooter()403     void WriteFooter() {
404       SortAndEmitAsyncEvents();
405 
406       // Filter metadata entries.
407       if (metadata_filter_) {
408         for (const auto& member : metadata_.getMemberNames()) {
409           if (!metadata_filter_(member.c_str()))
410             metadata_[member] = kStrippedArgument;
411         }
412       }
413 
414       if ((!label_filter_ || label_filter_("traceEvents")) &&
415           !user_trace_data_.empty()) {
416         user_trace_data_ += "]";
417 
418         Json::CharReaderBuilder builder;
419         auto reader =
420             std::unique_ptr<Json::CharReader>(builder.newCharReader());
421         Json::Value result;
422         if (reader->parse(user_trace_data_.data(),
423                           user_trace_data_.data() + user_trace_data_.length(),
424                           &result, nullptr)) {
425           for (const auto& event : result) {
426             WriteCommonEvent(event);
427           }
428         } else {
429           PERFETTO_DLOG(
430               "can't parse legacy user json trace export, skipping. data: %s",
431               user_trace_data_.c_str());
432         }
433       }
434 
435       std::ostringstream ss;
436       if (!label_filter_)
437         ss << "]";
438 
439       if ((!label_filter_ || label_filter_("systemTraceEvents")) &&
440           !system_trace_data_.empty()) {
441         ss << ",\"systemTraceEvents\":\n";
442         writer_->write(Json::Value(system_trace_data_), &ss);
443       }
444 
445       if ((!label_filter_ || label_filter_("metadata")) && !metadata_.empty()) {
446         ss << ",\"metadata\":\n";
447         writer_->write(metadata_, &ss);
448       }
449 
450       if (!label_filter_)
451         ss << "}";
452 
453       output_->AppendString(ss.str());
454     }
455 
DoWriteEvent(const Json::Value & event)456     void DoWriteEvent(const Json::Value& event) {
457       std::ostringstream ss;
458       if (!first_event_)
459         ss << ",\n";
460 
461       ArgumentNameFilterPredicate argument_name_filter;
462       bool strip_args =
463           argument_filter_ &&
464           !argument_filter_(event["cat"].asCString(), event["name"].asCString(),
465                             &argument_name_filter);
466       if ((strip_args || argument_name_filter) && event.isMember("args")) {
467         Json::Value event_copy = event;
468         if (strip_args) {
469           event_copy["args"] = kStrippedArgument;
470         } else {
471           auto& args = event_copy["args"];
472           for (const auto& member : event["args"].getMemberNames()) {
473             if (!argument_name_filter(member.c_str()))
474               args[member] = kStrippedArgument;
475           }
476         }
477         writer_->write(event_copy, &ss);
478       } else {
479         writer_->write(event, &ss);
480       }
481       first_event_ = false;
482 
483       output_->AppendString(ss.str());
484     }
485 
486     OutputWriter* output_;
487     ArgumentFilterPredicate argument_filter_;
488     MetadataFilterPredicate metadata_filter_;
489     LabelFilterPredicate label_filter_;
490 
491     std::unique_ptr<Json::StreamWriter> writer_;
492     bool first_event_;
493     Json::Value metadata_;
494     std::string system_trace_data_;
495     std::string user_trace_data_;
496     std::vector<Json::Value> async_begin_events_;
497     std::vector<Json::Value> async_instant_events_;
498     std::vector<Json::Value> async_end_events_;
499   };
500 
501   class ArgsBuilder {
502    public:
ArgsBuilder(const TraceStorage * storage)503     explicit ArgsBuilder(const TraceStorage* storage)
504         : storage_(storage),
505           empty_value_(Json::objectValue),
506           nan_value_(Json::StaticString("NaN")),
507           inf_value_(Json::StaticString("Infinity")),
508           neg_inf_value_(Json::StaticString("-Infinity")) {
509       const auto& arg_table = storage_->arg_table();
510       uint32_t count = arg_table.row_count();
511       if (count == 0) {
512         args_sets_.resize(1, empty_value_);
513         return;
514       }
515       args_sets_.resize(arg_table.arg_set_id()[count - 1] + 1, empty_value_);
516 
517       for (uint32_t i = 0; i < count; ++i) {
518         ArgSetId set_id = arg_table.arg_set_id()[i];
519         const char* key = arg_table.key().GetString(i).c_str();
520         Variadic value = storage_->GetArgValue(i);
521         AppendArg(set_id, key, VariadicToJson(value));
522       }
523       PostprocessArgs();
524     }
525 
GetArgs(ArgSetId set_id) const526     const Json::Value& GetArgs(ArgSetId set_id) const {
527       // If |set_id| was empty and added to the storage last, it may not be in
528       // args_sets_.
529       if (set_id > args_sets_.size())
530         return empty_value_;
531       return args_sets_[set_id];
532     }
533 
534    private:
VariadicToJson(Variadic variadic)535     Json::Value VariadicToJson(Variadic variadic) {
536       switch (variadic.type) {
537         case Variadic::kInt:
538           return Json::Int64(variadic.int_value);
539         case Variadic::kUint:
540           return Json::UInt64(variadic.uint_value);
541         case Variadic::kString:
542           return GetNonNullString(storage_, variadic.string_value);
543         case Variadic::kReal:
544           if (std::isnan(variadic.real_value)) {
545             return nan_value_;
546           } else if (std::isinf(variadic.real_value) &&
547                      variadic.real_value > 0) {
548             return inf_value_;
549           } else if (std::isinf(variadic.real_value) &&
550                      variadic.real_value < 0) {
551             return neg_inf_value_;
552           } else {
553             return variadic.real_value;
554           }
555         case Variadic::kPointer:
556           return base::Uint64ToHexString(variadic.pointer_value);
557         case Variadic::kBool:
558           return variadic.bool_value;
559         case Variadic::kJson:
560           Json::CharReaderBuilder b;
561           auto reader = std::unique_ptr<Json::CharReader>(b.newCharReader());
562 
563           Json::Value result;
564           std::string v = GetNonNullString(storage_, variadic.json_value);
565           reader->parse(v.data(), v.data() + v.length(), &result, nullptr);
566           return result;
567       }
568       PERFETTO_FATAL("Not reached");  // For gcc.
569     }
570 
AppendArg(ArgSetId set_id,const std::string & key,const Json::Value & value)571     void AppendArg(ArgSetId set_id,
572                    const std::string& key,
573                    const Json::Value& value) {
574       Json::Value* target = &args_sets_[set_id];
575       for (base::StringSplitter parts(key, '.'); parts.Next();) {
576         if (PERFETTO_UNLIKELY(!target->isNull() && !target->isObject())) {
577           PERFETTO_DLOG("Malformed arguments. Can't append %s to %s.",
578                         key.c_str(),
579                         args_sets_[set_id].toStyledString().c_str());
580           return;
581         }
582         std::string key_part = parts.cur_token();
583         size_t bracketpos = key_part.find('[');
584         if (bracketpos == key_part.npos) {  // A single item
585           target = &(*target)[key_part];
586         } else {  // A list item
587           target = &(*target)[key_part.substr(0, bracketpos)];
588           while (bracketpos != key_part.npos) {
589             // We constructed this string from an int earlier in trace_processor
590             // so it shouldn't be possible for this (or the StringToUInt32
591             // below) to fail.
592             std::string s =
593                 key_part.substr(bracketpos + 1, key_part.find(']', bracketpos) -
594                                                     bracketpos - 1);
595             if (PERFETTO_UNLIKELY(!target->isNull() && !target->isArray())) {
596               PERFETTO_DLOG("Malformed arguments. Can't append %s to %s.",
597                             key.c_str(),
598                             args_sets_[set_id].toStyledString().c_str());
599               return;
600             }
601             base::Optional<uint32_t> index = base::StringToUInt32(s);
602             if (PERFETTO_UNLIKELY(!index)) {
603               PERFETTO_ELOG("Expected to be able to extract index from %s",
604                             key_part.c_str());
605               return;
606             }
607             target = &(*target)[index.value()];
608             bracketpos = key_part.find('[', bracketpos + 1);
609           }
610         }
611       }
612       *target = value;
613     }
614 
PostprocessArgs()615     void PostprocessArgs() {
616       for (Json::Value& args : args_sets_) {
617         // Move all fields from "debug" key to upper level.
618         if (args.isMember("debug")) {
619           Json::Value debug = args["debug"];
620           args.removeMember("debug");
621           for (const auto& member : debug.getMemberNames()) {
622             args[member] = debug[member];
623           }
624         }
625 
626         // Rename source fields.
627         if (args.isMember("task")) {
628           if (args["task"].isMember("posted_from")) {
629             Json::Value posted_from = args["task"]["posted_from"];
630             args["task"].removeMember("posted_from");
631             if (posted_from.isMember("function_name")) {
632               args["src_func"] = posted_from["function_name"];
633               args["src_file"] = posted_from["file_name"];
634             } else if (posted_from.isMember("file_name")) {
635               args["src"] = posted_from["file_name"];
636             }
637           }
638           if (args["task"].empty())
639             args.removeMember("task");
640         }
641       }
642     }
643 
644     const TraceStorage* storage_;
645     std::vector<Json::Value> args_sets_;
646     const Json::Value empty_value_;
647     const Json::Value nan_value_;
648     const Json::Value inf_value_;
649     const Json::Value neg_inf_value_;
650   };
651 
MapUniquePidsAndTids()652   util::Status MapUniquePidsAndTids() {
653     const auto& process_table = storage_->process_table();
654     for (UniquePid upid = 0; upid < process_table.row_count(); upid++) {
655       uint32_t exported_pid = process_table.pid()[upid];
656       auto it_and_inserted =
657           exported_pids_to_upids_.emplace(exported_pid, upid);
658       if (!it_and_inserted.second) {
659         exported_pid = NextExportedPidOrTidForDuplicates();
660         it_and_inserted = exported_pids_to_upids_.emplace(exported_pid, upid);
661       }
662       upids_to_exported_pids_.emplace(upid, exported_pid);
663     }
664 
665     const auto& thread_table = storage_->thread_table();
666     for (UniqueTid utid = 0; utid < thread_table.row_count(); utid++) {
667       uint32_t exported_pid = 0;
668       base::Optional<UniquePid> upid = thread_table.upid()[utid];
669       if (upid) {
670         auto exported_pid_it = upids_to_exported_pids_.find(*upid);
671         PERFETTO_DCHECK(exported_pid_it != upids_to_exported_pids_.end());
672         exported_pid = exported_pid_it->second;
673       }
674 
675       uint32_t exported_tid = thread_table.tid()[utid];
676       auto it_and_inserted = exported_pids_and_tids_to_utids_.emplace(
677           std::make_pair(exported_pid, exported_tid), utid);
678       if (!it_and_inserted.second) {
679         exported_tid = NextExportedPidOrTidForDuplicates();
680         it_and_inserted = exported_pids_and_tids_to_utids_.emplace(
681             std::make_pair(exported_pid, exported_tid), utid);
682       }
683       utids_to_exported_pids_and_tids_.emplace(
684           utid, std::make_pair(exported_pid, exported_tid));
685     }
686 
687     return util::OkStatus();
688   }
689 
ExportThreadNames()690   util::Status ExportThreadNames() {
691     const auto& thread_table = storage_->thread_table();
692     for (UniqueTid utid = 0; utid < thread_table.row_count(); ++utid) {
693       auto opt_name = thread_table.name()[utid];
694       if (!opt_name.is_null()) {
695         const char* thread_name = GetNonNullString(storage_, opt_name);
696         auto pid_and_tid = UtidToPidAndTid(utid);
697         writer_.WriteMetadataEvent("thread_name", thread_name,
698                                    pid_and_tid.first, pid_and_tid.second);
699       }
700     }
701     return util::OkStatus();
702   }
703 
ExportProcessNames()704   util::Status ExportProcessNames() {
705     const auto& process_table = storage_->process_table();
706     for (UniquePid upid = 0; upid < process_table.row_count(); ++upid) {
707       auto opt_name = process_table.name()[upid];
708       if (!opt_name.is_null()) {
709         const char* process_name = GetNonNullString(storage_, opt_name);
710         writer_.WriteMetadataEvent("process_name", process_name,
711                                    UpidToPid(upid), /*tid=*/0);
712       }
713     }
714     return util::OkStatus();
715   }
716 
ExportSlices()717   util::Status ExportSlices() {
718     const auto& slices = storage_->slice_table();
719     for (uint32_t i = 0; i < slices.row_count(); ++i) {
720       // Skip slices with empty category - these are ftrace/system slices that
721       // were also imported into the raw table and will be exported from there
722       // by trace_to_text.
723       // TODO(b/153609716): Add a src column or do_not_export flag instead.
724       auto cat = slices.category().GetString(i);
725       if (cat.c_str() == nullptr || cat == "binder")
726         continue;
727 
728       Json::Value event;
729       event["ts"] = Json::Int64(slices.ts()[i] / 1000);
730       event["cat"] = GetNonNullString(storage_, slices.category()[i]);
731       event["name"] = GetNonNullString(storage_, slices.name()[i]);
732       event["pid"] = 0;
733       event["tid"] = 0;
734 
735       base::Optional<UniqueTid> legacy_utid;
736 
737       event["args"] =
738           args_builder_.GetArgs(slices.arg_set_id()[i]);  // Makes a copy.
739       if (event["args"].isMember(kLegacyEventArgsKey)) {
740 
741         if (event["args"][kLegacyEventArgsKey].isMember(
742                 kLegacyEventPassthroughUtidKey)) {
743           legacy_utid =
744               event["args"][kLegacyEventArgsKey][kLegacyEventPassthroughUtidKey]
745                   .asUInt();
746         }
747 
748         event["args"].removeMember(kLegacyEventArgsKey);
749       }
750 
751       // To prevent duplicate export of slices, only export slices on descriptor
752       // or chrome tracks (i.e. TrackEvent slices). Slices on other tracks may
753       // also be present as raw events and handled by trace_to_text. Only add
754       // more track types here if they are not already covered by trace_to_text.
755       TrackId track_id = slices.track_id()[i];
756 
757       const auto& track_table = storage_->track_table();
758 
759       uint32_t track_row = *track_table.id().IndexOf(track_id);
760       auto track_args_id = track_table.source_arg_set_id()[track_row];
761       const Json::Value* track_args = nullptr;
762       bool legacy_chrome_track = false;
763       bool is_child_track = false;
764       if (track_args_id) {
765         track_args = &args_builder_.GetArgs(*track_args_id);
766         legacy_chrome_track = (*track_args)["source"].asString() == "chrome";
767         is_child_track = track_args->isMember("parent_track_id");
768       }
769 
770       const auto& thread_track = storage_->thread_track_table();
771       const auto& process_track = storage_->process_track_table();
772       const auto& thread_slices = storage_->thread_slices();
773       const auto& virtual_track_slices = storage_->virtual_track_slices();
774 
775       int64_t duration_ns = slices.dur()[i];
776       int64_t thread_ts_ns = 0;
777       int64_t thread_duration_ns = 0;
778       int64_t thread_instruction_count = 0;
779       int64_t thread_instruction_delta = 0;
780 
781       base::Optional<uint32_t> thread_slice_row =
782           thread_slices.FindRowForSliceId(i);
783       if (thread_slice_row) {
784         thread_ts_ns = thread_slices.thread_timestamp_ns()[*thread_slice_row];
785         thread_duration_ns =
786             thread_slices.thread_duration_ns()[*thread_slice_row];
787         thread_instruction_count =
788             thread_slices.thread_instruction_counts()[*thread_slice_row];
789         thread_instruction_delta =
790             thread_slices.thread_instruction_deltas()[*thread_slice_row];
791       } else {
792         base::Optional<uint32_t> vtrack_slice_row =
793             virtual_track_slices.FindRowForSliceId(i);
794         if (vtrack_slice_row) {
795           thread_ts_ns =
796               virtual_track_slices.thread_timestamp_ns()[*vtrack_slice_row];
797           thread_duration_ns =
798               virtual_track_slices.thread_duration_ns()[*vtrack_slice_row];
799           thread_instruction_count =
800               virtual_track_slices
801                   .thread_instruction_counts()[*vtrack_slice_row];
802           thread_instruction_delta =
803               virtual_track_slices
804                   .thread_instruction_deltas()[*vtrack_slice_row];
805         }
806       }
807 
808       auto opt_thread_track_row = thread_track.id().IndexOf(TrackId{track_id});
809 
810       if (opt_thread_track_row && !is_child_track) {
811         // Synchronous (thread) slice or instant event.
812         UniqueTid utid = thread_track.utid()[*opt_thread_track_row];
813         auto pid_and_tid = UtidToPidAndTid(utid);
814         event["pid"] = Json::Int(pid_and_tid.first);
815         event["tid"] = Json::Int(pid_and_tid.second);
816 
817         if (duration_ns == 0) {
818           // Use "I" instead of "i" phase for backwards-compat with old
819           // consumers.
820           event["ph"] = "I";
821           if (thread_ts_ns > 0) {
822             event["tts"] = Json::Int64(thread_ts_ns / 1000);
823           }
824           if (thread_instruction_count > 0) {
825             event["ticount"] = Json::Int64(thread_instruction_count);
826           }
827           event["s"] = "t";
828         } else {
829           if (duration_ns > 0) {
830             event["ph"] = "X";
831             event["dur"] = Json::Int64(duration_ns / 1000);
832           } else {
833             // If the slice didn't finish, the duration may be negative. Only
834             // write a begin event without end event in this case.
835             event["ph"] = "B";
836           }
837           if (thread_ts_ns > 0) {
838             event["tts"] = Json::Int64(thread_ts_ns / 1000);
839             // Only write thread duration for completed events.
840             if (duration_ns > 0)
841               event["tdur"] = Json::Int64(thread_duration_ns / 1000);
842           }
843           if (thread_instruction_count > 0) {
844             event["ticount"] = Json::Int64(thread_instruction_count);
845             // Only write thread instruction delta for completed events.
846             if (duration_ns > 0)
847               event["tidelta"] = Json::Int64(thread_instruction_delta);
848           }
849         }
850         writer_.WriteCommonEvent(event);
851       } else if (is_child_track ||
852                  (legacy_chrome_track && track_args->isMember("source_id"))) {
853         // Async event slice.
854         auto opt_process_row = process_track.id().IndexOf(TrackId{track_id});
855         if (legacy_chrome_track) {
856           // Legacy async tracks are always process-associated and have args.
857           PERFETTO_DCHECK(opt_process_row);
858           PERFETTO_DCHECK(track_args);
859           uint32_t upid = process_track.upid()[*opt_process_row];
860           uint32_t exported_pid = UpidToPid(upid);
861           event["pid"] = Json::Int(exported_pid);
862           event["tid"] =
863               Json::Int(legacy_utid ? UtidToPidAndTid(*legacy_utid).second
864                                     : exported_pid);
865 
866           // Preserve original event IDs for legacy tracks. This is so that e.g.
867           // memory dump IDs show up correctly in the JSON trace.
868           PERFETTO_DCHECK(track_args->isMember("source_id"));
869           PERFETTO_DCHECK(track_args->isMember("source_id_is_process_scoped"));
870           PERFETTO_DCHECK(track_args->isMember("source_scope"));
871           uint64_t source_id =
872               static_cast<uint64_t>((*track_args)["source_id"].asInt64());
873           std::string source_scope = (*track_args)["source_scope"].asString();
874           if (!source_scope.empty())
875             event["scope"] = source_scope;
876           bool source_id_is_process_scoped =
877               (*track_args)["source_id_is_process_scoped"].asBool();
878           if (source_id_is_process_scoped) {
879             event["id2"]["local"] = base::Uint64ToHexString(source_id);
880           } else {
881             // Some legacy importers don't understand "id2" fields, so we use
882             // the "usually" global "id" field instead. This works as long as
883             // the event phase is not in {'N', 'D', 'O', '(', ')'}, see
884             // "LOCAL_ID_PHASES" in catapult.
885             event["id"] = base::Uint64ToHexString(source_id);
886           }
887         } else {
888           if (opt_thread_track_row) {
889             UniqueTid utid = thread_track.utid()[*opt_thread_track_row];
890             auto pid_and_tid = UtidToPidAndTid(utid);
891             event["pid"] = Json::Int(pid_and_tid.first);
892             event["tid"] = Json::Int(pid_and_tid.second);
893             event["id2"]["local"] = base::Uint64ToHexString(track_id.value);
894           } else if (opt_process_row) {
895             uint32_t upid = process_track.upid()[*opt_process_row];
896             uint32_t exported_pid = UpidToPid(upid);
897             event["pid"] = Json::Int(exported_pid);
898             event["tid"] =
899                 Json::Int(legacy_utid ? UtidToPidAndTid(*legacy_utid).second
900                                       : exported_pid);
901             event["id2"]["local"] = base::Uint64ToHexString(track_id.value);
902           } else {
903             if (legacy_utid) {
904               auto pid_and_tid = UtidToPidAndTid(*legacy_utid);
905               event["pid"] = Json::Int(pid_and_tid.first);
906               event["tid"] = Json::Int(pid_and_tid.second);
907             }
908 
909             // Some legacy importers don't understand "id2" fields, so we use
910             // the "usually" global "id" field instead. This works as long as
911             // the event phase is not in {'N', 'D', 'O', '(', ')'}, see
912             // "LOCAL_ID_PHASES" in catapult.
913             event["id"] = base::Uint64ToHexString(track_id.value);
914           }
915         }
916 
917         if (thread_ts_ns > 0) {
918           event["tts"] = Json::Int64(thread_ts_ns / 1000);
919           event["use_async_tts"] = Json::Int(1);
920         }
921         if (thread_instruction_count > 0) {
922           event["ticount"] = Json::Int64(thread_instruction_count);
923           event["use_async_tts"] = Json::Int(1);
924         }
925 
926         if (duration_ns == 0) {  // Instant async event.
927           event["ph"] = "n";
928           writer_.AddAsyncInstantEvent(event);
929         } else {  // Async start and end.
930           event["ph"] = "b";
931           writer_.AddAsyncBeginEvent(event);
932           // If the slice didn't finish, the duration may be negative. Don't
933           // write the end event in this case.
934           if (duration_ns > 0) {
935             event["ph"] = "e";
936             event["ts"] = Json::Int64((slices.ts()[i] + duration_ns) / 1000);
937             if (thread_ts_ns > 0) {
938               event["tts"] =
939                   Json::Int64((thread_ts_ns + thread_duration_ns) / 1000);
940             }
941             if (thread_instruction_count > 0) {
942               event["ticount"] = Json::Int64(
943                   (thread_instruction_count + thread_instruction_delta));
944             }
945             event["args"].clear();
946             writer_.AddAsyncEndEvent(event);
947           }
948         }
949       } else {
950         // Global or process-scoped instant event.
951         PERFETTO_DCHECK(legacy_chrome_track || !is_child_track);
952         if (duration_ns != 0) {
953           // We don't support exporting slices on the default global or process
954           // track to JSON (JSON only supports instant events on these tracks).
955           PERFETTO_DLOG(
956               "skipping non-instant slice on global or process track");
957         } else {
958           // Use "I" instead of "i" phase for backwards-compat with old
959           // consumers.
960           event["ph"] = "I";
961 
962           auto opt_process_row = process_track.id().IndexOf(TrackId{track_id});
963           if (opt_process_row.has_value()) {
964             uint32_t upid = process_track.upid()[*opt_process_row];
965             uint32_t exported_pid = UpidToPid(upid);
966             event["pid"] = Json::Int(exported_pid);
967             event["tid"] =
968                 Json::Int(legacy_utid ? UtidToPidAndTid(*legacy_utid).second
969                                       : exported_pid);
970             event["s"] = "p";
971           } else {
972             event["s"] = "g";
973           }
974           writer_.WriteCommonEvent(event);
975         }
976       }
977     }
978     return util::OkStatus();
979   }
980 
CreateFlowEventV1(uint32_t flow_id,SliceId slice_id,std::string name,std::string cat,Json::Value args,bool flow_begin)981   base::Optional<Json::Value> CreateFlowEventV1(uint32_t flow_id,
982                                                 SliceId slice_id,
983                                                 std::string name,
984                                                 std::string cat,
985                                                 Json::Value args,
986                                                 bool flow_begin) {
987     const auto& slices = storage_->slice_table();
988     const auto& thread_tracks = storage_->thread_track_table();
989 
990     auto opt_slice_idx = slices.id().IndexOf(slice_id);
991     if (!opt_slice_idx)
992       return base::nullopt;
993     uint32_t slice_idx = opt_slice_idx.value();
994 
995     TrackId track_id = storage_->slice_table().track_id()[slice_idx];
996     auto opt_thread_track_idx = thread_tracks.id().IndexOf(track_id);
997     // catapult only supports flow events attached to thread-track slices
998     if (!opt_thread_track_idx)
999       return base::nullopt;
1000 
1001     UniqueTid utid = thread_tracks.utid()[opt_thread_track_idx.value()];
1002     auto pid_and_tid = UtidToPidAndTid(utid);
1003     Json::Value event;
1004     event["id"] = flow_id;
1005     event["pid"] = Json::Int(pid_and_tid.first);
1006     event["tid"] = Json::Int(pid_and_tid.second);
1007     event["cat"] = cat;
1008     event["name"] = name;
1009     event["ph"] = (flow_begin ? "s" : "f");
1010     event["ts"] = Json::Int64(slices.ts()[slice_idx] / 1000);
1011     if (!flow_begin) {
1012       event["bp"] = "e";
1013     }
1014     event["args"] = std::move(args);
1015     return std::move(event);
1016   }
1017 
ExportFlows()1018   util::Status ExportFlows() {
1019     const auto& flow_table = storage_->flow_table();
1020     const auto& slice_table = storage_->slice_table();
1021     for (uint32_t i = 0; i < flow_table.row_count(); i++) {
1022       SliceId slice_out = flow_table.slice_out()[i];
1023       SliceId slice_in = flow_table.slice_in()[i];
1024       uint32_t arg_set_id = flow_table.arg_set_id()[i];
1025 
1026       std::string cat;
1027       std::string name;
1028       auto args = args_builder_.GetArgs(arg_set_id);
1029       if (arg_set_id != kInvalidArgSetId) {
1030         cat = args["cat"].asString();
1031         name = args["name"].asString();
1032         // Don't export these args since they are only used for this export and
1033         // weren't part of the original event.
1034         args.removeMember("name");
1035         args.removeMember("cat");
1036       } else {
1037         auto opt_slice_out_idx = slice_table.id().IndexOf(slice_out);
1038         PERFETTO_DCHECK(opt_slice_out_idx.has_value());
1039         StringId cat_id = slice_table.category()[opt_slice_out_idx.value()];
1040         StringId name_id = slice_table.name()[opt_slice_out_idx.value()];
1041         cat = GetNonNullString(storage_, cat_id);
1042         name = GetNonNullString(storage_, name_id);
1043       }
1044 
1045       auto out_event = CreateFlowEventV1(i, slice_out, name, cat, args,
1046                                          /* flow_begin = */ true);
1047       auto in_event = CreateFlowEventV1(i, slice_in, name, cat, std::move(args),
1048                                         /* flow_begin = */ false);
1049 
1050       if (out_event && in_event) {
1051         writer_.WriteCommonEvent(out_event.value());
1052         writer_.WriteCommonEvent(in_event.value());
1053       }
1054     }
1055     return util::OkStatus();
1056   }
1057 
ConvertLegacyRawEventToJson(uint32_t index)1058   Json::Value ConvertLegacyRawEventToJson(uint32_t index) {
1059     const auto& events = storage_->raw_table();
1060 
1061     Json::Value event;
1062     event["ts"] = Json::Int64(events.ts()[index] / 1000);
1063 
1064     UniqueTid utid = static_cast<UniqueTid>(events.utid()[index]);
1065     auto pid_and_tid = UtidToPidAndTid(utid);
1066     event["pid"] = Json::Int(pid_and_tid.first);
1067     event["tid"] = Json::Int(pid_and_tid.second);
1068 
1069     // Raw legacy events store all other params in the arg set. Make a copy of
1070     // the converted args here, parse, and then remove the legacy params.
1071     event["args"] = args_builder_.GetArgs(events.arg_set_id()[index]);
1072     const Json::Value& legacy_args = event["args"][kLegacyEventArgsKey];
1073 
1074     PERFETTO_DCHECK(legacy_args.isMember(kLegacyEventCategoryKey));
1075     event["cat"] = legacy_args[kLegacyEventCategoryKey];
1076 
1077     PERFETTO_DCHECK(legacy_args.isMember(kLegacyEventNameKey));
1078     event["name"] = legacy_args[kLegacyEventNameKey];
1079 
1080     PERFETTO_DCHECK(legacy_args.isMember(kLegacyEventPhaseKey));
1081     event["ph"] = legacy_args[kLegacyEventPhaseKey];
1082 
1083     // Object snapshot events are supposed to have a mandatory "snapshot" arg,
1084     // which may be removed in trace processor if it is empty.
1085     if (legacy_args[kLegacyEventPhaseKey] == "O" &&
1086         !event["args"].isMember("snapshot")) {
1087       event["args"]["snapshot"] = Json::Value(Json::objectValue);
1088     }
1089 
1090     if (legacy_args.isMember(kLegacyEventDurationNsKey))
1091       event["dur"] = legacy_args[kLegacyEventDurationNsKey].asInt64() / 1000;
1092 
1093     if (legacy_args.isMember(kLegacyEventThreadTimestampNsKey)) {
1094       event["tts"] =
1095           legacy_args[kLegacyEventThreadTimestampNsKey].asInt64() / 1000;
1096     }
1097 
1098     if (legacy_args.isMember(kLegacyEventThreadDurationNsKey)) {
1099       event["tdur"] =
1100           legacy_args[kLegacyEventThreadDurationNsKey].asInt64() / 1000;
1101     }
1102 
1103     if (legacy_args.isMember(kLegacyEventThreadInstructionCountKey))
1104       event["ticount"] = legacy_args[kLegacyEventThreadInstructionCountKey];
1105 
1106     if (legacy_args.isMember(kLegacyEventThreadInstructionDeltaKey))
1107       event["tidelta"] = legacy_args[kLegacyEventThreadInstructionDeltaKey];
1108 
1109     if (legacy_args.isMember(kLegacyEventUseAsyncTtsKey))
1110       event["use_async_tts"] = legacy_args[kLegacyEventUseAsyncTtsKey];
1111 
1112     if (legacy_args.isMember(kLegacyEventUnscopedIdKey)) {
1113       event["id"] = base::Uint64ToHexString(
1114           legacy_args[kLegacyEventUnscopedIdKey].asUInt64());
1115     }
1116 
1117     if (legacy_args.isMember(kLegacyEventGlobalIdKey)) {
1118       event["id2"]["global"] = base::Uint64ToHexString(
1119           legacy_args[kLegacyEventGlobalIdKey].asUInt64());
1120     }
1121 
1122     if (legacy_args.isMember(kLegacyEventLocalIdKey)) {
1123       event["id2"]["local"] = base::Uint64ToHexString(
1124           legacy_args[kLegacyEventLocalIdKey].asUInt64());
1125     }
1126 
1127     if (legacy_args.isMember(kLegacyEventIdScopeKey))
1128       event["scope"] = legacy_args[kLegacyEventIdScopeKey];
1129 
1130     event["args"].removeMember(kLegacyEventArgsKey);
1131 
1132     return event;
1133   }
1134 
ExportRawEvents()1135   util::Status ExportRawEvents() {
1136     base::Optional<StringId> raw_legacy_event_key_id =
1137         storage_->string_pool().GetId("track_event.legacy_event");
1138     base::Optional<StringId> raw_legacy_system_trace_event_id =
1139         storage_->string_pool().GetId("chrome_event.legacy_system_trace");
1140     base::Optional<StringId> raw_legacy_user_trace_event_id =
1141         storage_->string_pool().GetId("chrome_event.legacy_user_trace");
1142     base::Optional<StringId> raw_chrome_metadata_event_id =
1143         storage_->string_pool().GetId("chrome_event.metadata");
1144 
1145     const auto& events = storage_->raw_table();
1146     for (uint32_t i = 0; i < events.row_count(); ++i) {
1147       if (raw_legacy_event_key_id &&
1148           events.name()[i] == *raw_legacy_event_key_id) {
1149         Json::Value event = ConvertLegacyRawEventToJson(i);
1150         writer_.WriteCommonEvent(event);
1151       } else if (raw_legacy_system_trace_event_id &&
1152                  events.name()[i] == *raw_legacy_system_trace_event_id) {
1153         Json::Value args = args_builder_.GetArgs(events.arg_set_id()[i]);
1154         PERFETTO_DCHECK(args.isMember("data"));
1155         writer_.AddSystemTraceData(args["data"].asString());
1156       } else if (raw_legacy_user_trace_event_id &&
1157                  events.name()[i] == *raw_legacy_user_trace_event_id) {
1158         Json::Value args = args_builder_.GetArgs(events.arg_set_id()[i]);
1159         PERFETTO_DCHECK(args.isMember("data"));
1160         writer_.AddUserTraceData(args["data"].asString());
1161       } else if (raw_chrome_metadata_event_id &&
1162                  events.name()[i] == *raw_chrome_metadata_event_id) {
1163         Json::Value args = args_builder_.GetArgs(events.arg_set_id()[i]);
1164         writer_.MergeMetadata(args);
1165       }
1166     }
1167     return util::OkStatus();
1168   }
1169 
ExportCpuProfileSamples()1170   util::Status ExportCpuProfileSamples() {
1171     const tables::CpuProfileStackSampleTable& samples =
1172         storage_->cpu_profile_stack_sample_table();
1173     for (uint32_t i = 0; i < samples.row_count(); ++i) {
1174       Json::Value event;
1175       event["ts"] = Json::Int64(samples.ts()[i] / 1000);
1176 
1177       UniqueTid utid = static_cast<UniqueTid>(samples.utid()[i]);
1178       auto pid_and_tid = UtidToPidAndTid(utid);
1179       event["pid"] = Json::Int(pid_and_tid.first);
1180       event["tid"] = Json::Int(pid_and_tid.second);
1181 
1182       event["ph"] = "n";
1183       event["cat"] = "disabled-by-default-cpu_profiler";
1184       event["name"] = "StackCpuSampling";
1185       event["s"] = "t";
1186 
1187       // Add a dummy thread timestamp to this event to match the format of
1188       // instant events. Useful in the UI to view args of a selected group of
1189       // samples.
1190       event["tts"] = Json::Int64(1);
1191 
1192       // "n"-phase events are nestable async events which get tied together with
1193       // their id, so we need to give each one a unique ID as we only
1194       // want the samples to show up on their own track in the trace-viewer but
1195       // not nested together.
1196       static size_t g_id_counter = 0;
1197       event["id"] = base::Uint64ToHexString(++g_id_counter);
1198 
1199       const auto& callsites = storage_->stack_profile_callsite_table();
1200       const auto& frames = storage_->stack_profile_frame_table();
1201       const auto& mappings = storage_->stack_profile_mapping_table();
1202 
1203       std::vector<std::string> callstack;
1204       base::Optional<CallsiteId> opt_callsite_id = samples.callsite_id()[i];
1205 
1206       while (opt_callsite_id) {
1207         CallsiteId callsite_id = *opt_callsite_id;
1208         uint32_t callsite_row = *callsites.id().IndexOf(callsite_id);
1209 
1210         FrameId frame_id = callsites.frame_id()[callsite_row];
1211         uint32_t frame_row = *frames.id().IndexOf(frame_id);
1212 
1213         MappingId mapping_id = frames.mapping()[frame_row];
1214         uint32_t mapping_row = *mappings.id().IndexOf(mapping_id);
1215 
1216         NullTermStringView symbol_name;
1217         auto opt_symbol_set_id = frames.symbol_set_id()[frame_row];
1218         if (opt_symbol_set_id) {
1219           symbol_name = storage_->GetString(
1220               storage_->symbol_table().name()[*opt_symbol_set_id]);
1221         }
1222 
1223         char frame_entry[1024];
1224         snprintf(frame_entry, sizeof(frame_entry), "%s - %s [%s]\n",
1225                  (symbol_name.empty()
1226                       ? base::Uint64ToHexString(
1227                             static_cast<uint64_t>(frames.rel_pc()[frame_row]))
1228                             .c_str()
1229                       : symbol_name.c_str()),
1230                  GetNonNullString(storage_, mappings.name()[mapping_row]),
1231                  GetNonNullString(storage_, mappings.build_id()[mapping_row]));
1232 
1233         callstack.emplace_back(frame_entry);
1234 
1235         opt_callsite_id = callsites.parent_id()[callsite_row];
1236       }
1237 
1238       std::string merged_callstack;
1239       for (auto entry = callstack.rbegin(); entry != callstack.rend();
1240            ++entry) {
1241         merged_callstack += *entry;
1242       }
1243 
1244       event["args"]["frames"] = merged_callstack;
1245       event["args"]["process_priority"] = samples.process_priority()[i];
1246 
1247       // TODO(oysteine): Used for backwards compatibility with the memlog
1248       // pipeline, should remove once we've switched to looking directly at the
1249       // tid.
1250       event["args"]["thread_id"] = Json::Int(pid_and_tid.second);
1251 
1252       writer_.WriteCommonEvent(event);
1253     }
1254 
1255     return util::OkStatus();
1256   }
1257 
ExportMetadata()1258   util::Status ExportMetadata() {
1259     const auto& trace_metadata = storage_->metadata_table();
1260     const auto& keys = trace_metadata.name();
1261     const auto& int_values = trace_metadata.int_value();
1262     const auto& str_values = trace_metadata.str_value();
1263 
1264     // Create a mapping from key string ids to keys.
1265     std::unordered_map<StringId, metadata::KeyId> key_map;
1266     for (uint32_t i = 0; i < metadata::kNumKeys; ++i) {
1267       auto id = *storage_->string_pool().GetId(metadata::kNames[i]);
1268       key_map[id] = static_cast<metadata::KeyId>(i);
1269     }
1270 
1271     for (uint32_t pos = 0; pos < trace_metadata.row_count(); pos++) {
1272       // Cast away from enum type, as otherwise -Wswitch-enum will demand an
1273       // exhaustive list of cases, even if there's a default case.
1274       metadata::KeyId key = key_map[keys[pos]];
1275       switch (static_cast<size_t>(key)) {
1276         case metadata::benchmark_description:
1277           writer_.AppendTelemetryMetadataString(
1278               "benchmarkDescriptions", str_values.GetString(pos).c_str());
1279           break;
1280 
1281         case metadata::benchmark_name:
1282           writer_.AppendTelemetryMetadataString(
1283               "benchmarks", str_values.GetString(pos).c_str());
1284           break;
1285 
1286         case metadata::benchmark_start_time_us:
1287           writer_.SetTelemetryMetadataTimestamp("benchmarkStart",
1288                                                 *int_values[pos]);
1289           break;
1290 
1291         case metadata::benchmark_had_failures:
1292           writer_.AppendTelemetryMetadataBool("hadFailures", *int_values[pos]);
1293           break;
1294 
1295         case metadata::benchmark_label:
1296           writer_.AppendTelemetryMetadataString(
1297               "labels", str_values.GetString(pos).c_str());
1298           break;
1299 
1300         case metadata::benchmark_story_name:
1301           writer_.AppendTelemetryMetadataString(
1302               "stories", str_values.GetString(pos).c_str());
1303           break;
1304 
1305         case metadata::benchmark_story_run_index:
1306           writer_.AppendTelemetryMetadataInt("storysetRepeats",
1307                                              *int_values[pos]);
1308           break;
1309 
1310         case metadata::benchmark_story_run_time_us:
1311           writer_.SetTelemetryMetadataTimestamp("traceStart", *int_values[pos]);
1312           break;
1313 
1314         case metadata::benchmark_story_tags:  // repeated
1315           writer_.AppendTelemetryMetadataString(
1316               "storyTags", str_values.GetString(pos).c_str());
1317           break;
1318 
1319         default:
1320           PERFETTO_DLOG("Ignoring metadata key %zu", static_cast<size_t>(key));
1321           break;
1322       }
1323     }
1324     return util::OkStatus();
1325   }
1326 
ExportStats()1327   util::Status ExportStats() {
1328     const auto& stats = storage_->stats();
1329 
1330     for (size_t idx = 0; idx < stats::kNumKeys; idx++) {
1331       if (stats::kTypes[idx] == stats::kSingle) {
1332         writer_.SetStats(stats::kNames[idx], stats[idx].value);
1333       } else {
1334         PERFETTO_DCHECK(stats::kTypes[idx] == stats::kIndexed);
1335         writer_.SetStats(stats::kNames[idx], stats[idx].indexed_values);
1336       }
1337     }
1338 
1339     return util::OkStatus();
1340   }
1341 
ExportMemorySnapshots()1342   util::Status ExportMemorySnapshots() {
1343     const auto& memory_snapshots = storage_->memory_snapshot_table();
1344     base::Optional<StringId> private_footprint_id =
1345         storage_->string_pool().GetId("chrome.private_footprint_kb");
1346     base::Optional<StringId> peak_resident_set_id =
1347         storage_->string_pool().GetId("chrome.peak_resident_set_kb");
1348 
1349     for (uint32_t memory_index = 0; memory_index < memory_snapshots.row_count();
1350          ++memory_index) {
1351       Json::Value event_base;
1352 
1353       event_base["ph"] = "v";
1354       event_base["cat"] = "disabled-by-default-memory-infra";
1355       auto snapshot_id = memory_snapshots.id()[memory_index].value;
1356       event_base["id"] = base::Uint64ToHexString(snapshot_id);
1357       int64_t snapshot_ts = memory_snapshots.timestamp()[memory_index];
1358       event_base["ts"] = Json::Int64(snapshot_ts / 1000);
1359       // TODO(crbug:1116359): Add dump type to the snapshot proto
1360       // to properly fill event_base["name"]
1361       event_base["name"] = "periodic_interval";
1362       event_base["args"]["dumps"]["level_of_detail"] = GetNonNullString(
1363           storage_, memory_snapshots.detail_level()[memory_index]);
1364 
1365       // Export OS dump events for processes with relevant data.
1366       const auto& process_table = storage_->process_table();
1367       for (UniquePid upid = 0; upid < process_table.row_count(); ++upid) {
1368         Json::Value event =
1369             FillInProcessEventDetails(event_base, process_table.pid()[upid]);
1370         Json::Value& totals = event["args"]["dumps"]["process_totals"];
1371 
1372         const auto& process_counters = storage_->process_counter_track_table();
1373 
1374         for (uint32_t counter_index = 0;
1375              counter_index < process_counters.row_count(); ++counter_index) {
1376           if (process_counters.upid()[counter_index] != upid)
1377             continue;
1378           TrackId track_id = process_counters.id()[counter_index];
1379           if (private_footprint_id && (process_counters.name()[counter_index] ==
1380                                        private_footprint_id)) {
1381             totals["private_footprint_bytes"] = base::Uint64ToHexStringNoPrefix(
1382                 GetCounterValue(track_id, snapshot_ts));
1383           } else if (peak_resident_set_id &&
1384                      (process_counters.name()[counter_index] ==
1385                       peak_resident_set_id)) {
1386             totals["peak_resident_set_size"] = base::Uint64ToHexStringNoPrefix(
1387                 GetCounterValue(track_id, snapshot_ts));
1388           }
1389         }
1390 
1391         auto process_args_id = process_table.arg_set_id()[upid];
1392         if (process_args_id) {
1393           const Json::Value* process_args =
1394               &args_builder_.GetArgs(process_args_id);
1395           if (process_args->isMember("is_peak_rss_resettable")) {
1396             totals["is_peak_rss_resettable"] =
1397                 (*process_args)["is_peak_rss_resettable"];
1398           }
1399         }
1400 
1401         Json::Value& smaps =
1402             event["args"]["dumps"]["process_mmaps"]["vm_regions"];
1403         const auto& smaps_table = storage_->profiler_smaps_table();
1404         for (uint32_t smaps_index = 0; smaps_index < smaps_table.row_count();
1405              ++smaps_index) {
1406           if (smaps_table.upid()[smaps_index] != upid)
1407             continue;
1408           if (smaps_table.ts()[smaps_index] != snapshot_ts)
1409             continue;
1410           Json::Value region;
1411           region["mf"] =
1412               GetNonNullString(storage_, smaps_table.file_name()[smaps_index]);
1413           region["pf"] =
1414               Json::Int64(smaps_table.protection_flags()[smaps_index]);
1415           region["sa"] = base::Uint64ToHexStringNoPrefix(
1416               static_cast<uint64_t>(smaps_table.start_address()[smaps_index]));
1417           region["sz"] = base::Uint64ToHexStringNoPrefix(
1418               static_cast<uint64_t>(smaps_table.size_kb()[smaps_index]));
1419           region["ts"] =
1420               Json::Int64(smaps_table.module_timestamp()[smaps_index]);
1421           region["id"] = GetNonNullString(
1422               storage_, smaps_table.module_debugid()[smaps_index]);
1423           region["df"] = GetNonNullString(
1424               storage_, smaps_table.module_debug_path()[smaps_index]);
1425           region["bs"]["pc"] =
1426               base::Uint64ToHexStringNoPrefix(static_cast<uint64_t>(
1427                   smaps_table.private_clean_resident_kb()[smaps_index]));
1428           region["bs"]["pd"] =
1429               base::Uint64ToHexStringNoPrefix(static_cast<uint64_t>(
1430                   smaps_table.private_dirty_kb()[smaps_index]));
1431           region["bs"]["pss"] =
1432               base::Uint64ToHexStringNoPrefix(static_cast<uint64_t>(
1433                   smaps_table.proportional_resident_kb()[smaps_index]));
1434           region["bs"]["sc"] =
1435               base::Uint64ToHexStringNoPrefix(static_cast<uint64_t>(
1436                   smaps_table.shared_clean_resident_kb()[smaps_index]));
1437           region["bs"]["sd"] =
1438               base::Uint64ToHexStringNoPrefix(static_cast<uint64_t>(
1439                   smaps_table.shared_dirty_resident_kb()[smaps_index]));
1440           region["bs"]["sw"] = base::Uint64ToHexStringNoPrefix(
1441               static_cast<uint64_t>(smaps_table.swap_kb()[smaps_index]));
1442           smaps.append(region);
1443         }
1444 
1445         if (!totals.empty() || !smaps.empty())
1446           writer_.WriteCommonEvent(event);
1447       }
1448 
1449       // Export chrome dump events for process snapshots in current memory
1450       // snapshot.
1451       const auto& process_snapshots = storage_->process_memory_snapshot_table();
1452 
1453       for (uint32_t process_index = 0;
1454            process_index < process_snapshots.row_count(); ++process_index) {
1455         if (process_snapshots.snapshot_id()[process_index].value != snapshot_id)
1456           continue;
1457 
1458         auto process_snapshot_id = process_snapshots.id()[process_index].value;
1459         UniquePid upid = process_snapshots.upid()[process_index];
1460         Json::Value event =
1461             FillInProcessEventDetails(event_base, UpidToPid(upid));
1462 
1463         const auto& snapshot_nodes = storage_->memory_snapshot_node_table();
1464 
1465         for (uint32_t node_index = 0; node_index < snapshot_nodes.row_count();
1466              ++node_index) {
1467           if (snapshot_nodes.process_snapshot_id()[node_index].value !=
1468               process_snapshot_id) {
1469             continue;
1470           }
1471           const char* path =
1472               GetNonNullString(storage_, snapshot_nodes.path()[node_index]);
1473           event["args"]["dumps"]["allocators"][path]["guid"] =
1474               base::Uint64ToHexStringNoPrefix(
1475                   static_cast<uint64_t>(snapshot_nodes.id()[node_index].value));
1476           if (snapshot_nodes.size()[node_index]) {
1477             AddAttributeToMemoryNode(&event, path, "size",
1478                                      snapshot_nodes.size()[node_index],
1479                                      "bytes");
1480           }
1481           if (snapshot_nodes.effective_size()[node_index]) {
1482             AddAttributeToMemoryNode(
1483                 &event, path, "effective_size",
1484                 snapshot_nodes.effective_size()[node_index], "bytes");
1485           }
1486 
1487           auto node_args_id = snapshot_nodes.arg_set_id()[node_index];
1488           if (!node_args_id)
1489             continue;
1490           const Json::Value* node_args =
1491               &args_builder_.GetArgs(node_args_id.value());
1492           for (const auto& arg_name : node_args->getMemberNames()) {
1493             const Json::Value& arg_value = (*node_args)[arg_name]["value"];
1494             if (arg_value.empty())
1495               continue;
1496             if (arg_value.isString()) {
1497               AddAttributeToMemoryNode(&event, path, arg_name,
1498                                        arg_value.asString());
1499             } else if (arg_value.isInt64()) {
1500               Json::Value unit = (*node_args)[arg_name]["unit"];
1501               if (unit.empty())
1502                 unit = "unknown";
1503               AddAttributeToMemoryNode(&event, path, arg_name,
1504                                        arg_value.asInt64(), unit.asString());
1505             }
1506           }
1507         }
1508 
1509         const auto& snapshot_edges = storage_->memory_snapshot_edge_table();
1510 
1511         for (uint32_t edge_index = 0; edge_index < snapshot_edges.row_count();
1512              ++edge_index) {
1513           SnapshotNodeId source_node_id =
1514               snapshot_edges.source_node_id()[edge_index];
1515           uint32_t source_node_row =
1516               *snapshot_nodes.id().IndexOf(source_node_id);
1517 
1518           if (snapshot_nodes.process_snapshot_id()[source_node_row].value !=
1519               process_snapshot_id) {
1520             continue;
1521           }
1522           Json::Value edge;
1523           edge["source"] = base::Uint64ToHexStringNoPrefix(
1524               snapshot_edges.source_node_id()[edge_index].value);
1525           edge["target"] = base::Uint64ToHexStringNoPrefix(
1526               snapshot_edges.target_node_id()[edge_index].value);
1527           edge["importance"] =
1528               Json::Int(snapshot_edges.importance()[edge_index]);
1529           edge["type"] = "ownership";
1530           event["args"]["dumps"]["allocators_graph"].append(edge);
1531         }
1532         writer_.WriteCommonEvent(event);
1533       }
1534     }
1535     return util::OkStatus();
1536   }
1537 
UpidToPid(UniquePid upid)1538   uint32_t UpidToPid(UniquePid upid) {
1539     auto pid_it = upids_to_exported_pids_.find(upid);
1540     PERFETTO_DCHECK(pid_it != upids_to_exported_pids_.end());
1541     return pid_it->second;
1542   }
1543 
UtidToPidAndTid(UniqueTid utid)1544   std::pair<uint32_t, uint32_t> UtidToPidAndTid(UniqueTid utid) {
1545     auto pid_and_tid_it = utids_to_exported_pids_and_tids_.find(utid);
1546     PERFETTO_DCHECK(pid_and_tid_it != utids_to_exported_pids_and_tids_.end());
1547     return pid_and_tid_it->second;
1548   }
1549 
NextExportedPidOrTidForDuplicates()1550   uint32_t NextExportedPidOrTidForDuplicates() {
1551     // Ensure that the exported substitute value does not represent a valid
1552     // pid/tid. This would be very unlikely in practice.
1553     while (IsValidPidOrTid(next_exported_pid_or_tid_for_duplicates_))
1554       next_exported_pid_or_tid_for_duplicates_--;
1555     return next_exported_pid_or_tid_for_duplicates_--;
1556   }
1557 
IsValidPidOrTid(uint32_t pid_or_tid)1558   bool IsValidPidOrTid(uint32_t pid_or_tid) {
1559     const auto& process_table = storage_->process_table();
1560     for (UniquePid upid = 0; upid < process_table.row_count(); upid++) {
1561       if (process_table.pid()[upid] == pid_or_tid)
1562         return true;
1563     }
1564 
1565     const auto& thread_table = storage_->thread_table();
1566     for (UniqueTid utid = 0; utid < thread_table.row_count(); utid++) {
1567       if (thread_table.tid()[utid] == pid_or_tid)
1568         return true;
1569     }
1570 
1571     return false;
1572   }
1573 
FillInProcessEventDetails(const Json::Value & event,uint32_t pid)1574   Json::Value FillInProcessEventDetails(const Json::Value& event,
1575                                         uint32_t pid) {
1576     Json::Value output = event;
1577     output["pid"] = Json::Int(pid);
1578     output["tid"] = Json::Int(-1);
1579     return output;
1580   }
1581 
AddAttributeToMemoryNode(Json::Value * event,const std::string & path,const std::string & key,int64_t value,const std::string & units)1582   void AddAttributeToMemoryNode(Json::Value* event,
1583                                 const std::string& path,
1584                                 const std::string& key,
1585                                 int64_t value,
1586                                 const std::string& units) {
1587     (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["value"] =
1588         base::Uint64ToHexStringNoPrefix(static_cast<uint64_t>(value));
1589     (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["type"] =
1590         "scalar";
1591     (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["units"] =
1592         units;
1593   }
1594 
AddAttributeToMemoryNode(Json::Value * event,const std::string & path,const std::string & key,const std::string & value,const std::string & units="")1595   void AddAttributeToMemoryNode(Json::Value* event,
1596                                 const std::string& path,
1597                                 const std::string& key,
1598                                 const std::string& value,
1599                                 const std::string& units = "") {
1600     (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["value"] =
1601         value;
1602     (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["type"] =
1603         "string";
1604     (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["units"] =
1605         units;
1606   }
1607 
GetCounterValue(TrackId track_id,int64_t ts)1608   uint64_t GetCounterValue(TrackId track_id, int64_t ts) {
1609     const auto& counter_table = storage_->counter_table();
1610     auto begin = counter_table.ts().begin();
1611     auto end = counter_table.ts().end();
1612     PERFETTO_DCHECK(counter_table.ts().IsSorted() &&
1613                     counter_table.ts().IsColumnType<int64_t>());
1614     // The timestamp column is sorted, so we can binary search for a matching
1615     // timestamp. Note that we don't use RowMap operations like FilterInto()
1616     // here because they bloat trace processor's binary size in Chrome too much.
1617     auto it = std::lower_bound(begin, end, ts,
1618                                [](const SqlValue& value, int64_t expected_ts) {
1619                                  return value.AsLong() < expected_ts;
1620                                });
1621     for (; it < end; ++it) {
1622       if ((*it).AsLong() != ts)
1623         break;
1624       if (counter_table.track_id()[it.row()].value == track_id.value)
1625         return static_cast<uint64_t>(counter_table.value()[it.row()]);
1626     }
1627     return 0;
1628   }
1629 
1630   const TraceStorage* storage_;
1631   ArgsBuilder args_builder_;
1632   TraceFormatWriter writer_;
1633 
1634   // If a pid/tid is duplicated between two or more  different processes/threads
1635   // (pid/tid reuse), we export the subsequent occurrences with different
1636   // pids/tids that is visibly different from regular pids/tids - counting down
1637   // from uint32_t max.
1638   uint32_t next_exported_pid_or_tid_for_duplicates_ =
1639       std::numeric_limits<uint32_t>::max();
1640 
1641   std::map<UniquePid, uint32_t> upids_to_exported_pids_;
1642   std::map<uint32_t, UniquePid> exported_pids_to_upids_;
1643   std::map<UniqueTid, std::pair<uint32_t, uint32_t>>
1644       utids_to_exported_pids_and_tids_;
1645   std::map<std::pair<uint32_t, uint32_t>, UniqueTid>
1646       exported_pids_and_tids_to_utids_;
1647 };
1648 
1649 #endif  // PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
1650 
1651 }  // namespace
1652 
1653 OutputWriter::OutputWriter() = default;
1654 OutputWriter::~OutputWriter() = default;
1655 
ExportJson(const TraceStorage * storage,OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)1656 util::Status ExportJson(const TraceStorage* storage,
1657                         OutputWriter* output,
1658                         ArgumentFilterPredicate argument_filter,
1659                         MetadataFilterPredicate metadata_filter,
1660                         LabelFilterPredicate label_filter) {
1661 #if PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
1662   JsonExporter exporter(storage, output, std::move(argument_filter),
1663                         std::move(metadata_filter), std::move(label_filter));
1664   return exporter.Export();
1665 #else
1666   perfetto::base::ignore_result(storage);
1667   perfetto::base::ignore_result(output);
1668   perfetto::base::ignore_result(argument_filter);
1669   perfetto::base::ignore_result(metadata_filter);
1670   perfetto::base::ignore_result(label_filter);
1671   return util::ErrStatus("JSON support is not compiled in this build");
1672 #endif  // PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
1673 }
1674 
ExportJson(TraceProcessorStorage * tp,OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)1675 util::Status ExportJson(TraceProcessorStorage* tp,
1676                         OutputWriter* output,
1677                         ArgumentFilterPredicate argument_filter,
1678                         MetadataFilterPredicate metadata_filter,
1679                         LabelFilterPredicate label_filter) {
1680   const TraceStorage* storage = reinterpret_cast<TraceProcessorStorageImpl*>(tp)
1681                                     ->context()
1682                                     ->storage.get();
1683   return ExportJson(storage, output, argument_filter, metadata_filter,
1684                     label_filter);
1685 }
1686 
ExportJson(const TraceStorage * storage,FILE * output)1687 util::Status ExportJson(const TraceStorage* storage, FILE* output) {
1688   FileWriter writer(output);
1689   return ExportJson(storage, &writer, nullptr, nullptr, nullptr);
1690 }
1691 
1692 }  // namespace json
1693 }  // namespace trace_processor
1694 }  // namespace perfetto
1695 
1696