1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/profiler/profile-generator.h"
6 
7 #include <algorithm>
8 
9 #include "src/codegen/source-position.h"
10 #include "src/objects/shared-function-info-inl.h"
11 #include "src/profiler/cpu-profiler.h"
12 #include "src/profiler/profile-generator-inl.h"
13 #include "src/profiler/profiler-stats.h"
14 #include "src/tracing/trace-event.h"
15 #include "src/tracing/traced-value.h"
16 
17 namespace v8 {
18 namespace internal {
19 
SetPosition(int pc_offset,int line,int inlining_id)20 void SourcePositionTable::SetPosition(int pc_offset, int line,
21                                       int inlining_id) {
22   DCHECK_GE(pc_offset, 0);
23   DCHECK_GT(line, 0);  // The 1-based number of the source line.
24   // It's possible that we map multiple source positions to a pc_offset in
25   // optimized code. Usually these map to the same line, so there is no
26   // difference here as we only store line number and not line/col in the form
27   // of a script offset. Ignore any subsequent sets to the same offset.
28   if (!pc_offsets_to_lines_.empty() &&
29       pc_offsets_to_lines_.back().pc_offset == pc_offset) {
30     return;
31   }
32   // Check that we are inserting in ascending order, so that the vector remains
33   // sorted.
34   DCHECK(pc_offsets_to_lines_.empty() ||
35          pc_offsets_to_lines_.back().pc_offset < pc_offset);
36   if (pc_offsets_to_lines_.empty() ||
37       pc_offsets_to_lines_.back().line_number != line ||
38       pc_offsets_to_lines_.back().inlining_id != inlining_id) {
39     pc_offsets_to_lines_.push_back({pc_offset, line, inlining_id});
40   }
41 }
42 
GetSourceLineNumber(int pc_offset) const43 int SourcePositionTable::GetSourceLineNumber(int pc_offset) const {
44   if (pc_offsets_to_lines_.empty()) {
45     return v8::CpuProfileNode::kNoLineNumberInfo;
46   }
47   auto it = std::lower_bound(
48       pc_offsets_to_lines_.begin(), pc_offsets_to_lines_.end(),
49       SourcePositionTuple{pc_offset, 0, SourcePosition::kNotInlined});
50   if (it != pc_offsets_to_lines_.begin()) --it;
51   return it->line_number;
52 }
53 
GetInliningId(int pc_offset) const54 int SourcePositionTable::GetInliningId(int pc_offset) const {
55   if (pc_offsets_to_lines_.empty()) {
56     return SourcePosition::kNotInlined;
57   }
58   auto it = std::lower_bound(
59       pc_offsets_to_lines_.begin(), pc_offsets_to_lines_.end(),
60       SourcePositionTuple{pc_offset, 0, SourcePosition::kNotInlined});
61   if (it != pc_offsets_to_lines_.begin()) --it;
62   return it->inlining_id;
63 }
64 
print() const65 void SourcePositionTable::print() const {
66   base::OS::Print(" - source position table at %p\n", this);
67   for (const SourcePositionTuple& pos_info : pc_offsets_to_lines_) {
68     base::OS::Print("    %d --> line_number: %d inlining_id: %d\n",
69                     pos_info.pc_offset, pos_info.line_number,
70                     pos_info.inlining_id);
71   }
72 }
73 
74 const char* const CodeEntry::kWasmResourceNamePrefix = "wasm ";
75 const char* const CodeEntry::kEmptyResourceName = "";
76 const char* const CodeEntry::kEmptyBailoutReason = "";
77 const char* const CodeEntry::kNoDeoptReason = "";
78 
79 const char* const CodeEntry::kProgramEntryName = "(program)";
80 const char* const CodeEntry::kIdleEntryName = "(idle)";
81 const char* const CodeEntry::kGarbageCollectorEntryName = "(garbage collector)";
82 const char* const CodeEntry::kUnresolvedFunctionName = "(unresolved function)";
83 const char* const CodeEntry::kRootEntryName = "(root)";
84 
85 base::LazyDynamicInstance<CodeEntry, CodeEntry::ProgramEntryCreateTrait>::type
86     CodeEntry::kProgramEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
87 
88 base::LazyDynamicInstance<CodeEntry, CodeEntry::IdleEntryCreateTrait>::type
89     CodeEntry::kIdleEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
90 
91 base::LazyDynamicInstance<CodeEntry, CodeEntry::GCEntryCreateTrait>::type
92     CodeEntry::kGCEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
93 
94 base::LazyDynamicInstance<CodeEntry,
95                           CodeEntry::UnresolvedEntryCreateTrait>::type
96     CodeEntry::kUnresolvedEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
97 
98 base::LazyDynamicInstance<CodeEntry, CodeEntry::RootEntryCreateTrait>::type
99     CodeEntry::kRootEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
100 
Create()101 CodeEntry* CodeEntry::ProgramEntryCreateTrait::Create() {
102   return new CodeEntry(CodeEventListener::FUNCTION_TAG,
103                        CodeEntry::kProgramEntryName);
104 }
105 
Create()106 CodeEntry* CodeEntry::IdleEntryCreateTrait::Create() {
107   return new CodeEntry(CodeEventListener::FUNCTION_TAG,
108                        CodeEntry::kIdleEntryName);
109 }
110 
Create()111 CodeEntry* CodeEntry::GCEntryCreateTrait::Create() {
112   return new CodeEntry(CodeEventListener::BUILTIN_TAG,
113                        CodeEntry::kGarbageCollectorEntryName);
114 }
115 
Create()116 CodeEntry* CodeEntry::UnresolvedEntryCreateTrait::Create() {
117   return new CodeEntry(CodeEventListener::FUNCTION_TAG,
118                        CodeEntry::kUnresolvedFunctionName);
119 }
120 
Create()121 CodeEntry* CodeEntry::RootEntryCreateTrait::Create() {
122   return new CodeEntry(CodeEventListener::FUNCTION_TAG,
123                        CodeEntry::kRootEntryName);
124 }
125 
GetHash() const126 uint32_t CodeEntry::GetHash() const {
127   uint32_t hash = ComputeUnseededHash(tag());
128   if (script_id_ != v8::UnboundScript::kNoScriptId) {
129     hash ^= ComputeUnseededHash(static_cast<uint32_t>(script_id_));
130     hash ^= ComputeUnseededHash(static_cast<uint32_t>(position_));
131   } else {
132     hash ^= ComputeUnseededHash(
133         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)));
134     hash ^= ComputeUnseededHash(
135         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)));
136     hash ^= ComputeUnseededHash(line_number_);
137   }
138   return hash;
139 }
140 
IsSameFunctionAs(const CodeEntry * entry) const141 bool CodeEntry::IsSameFunctionAs(const CodeEntry* entry) const {
142   if (this == entry) return true;
143   if (script_id_ != v8::UnboundScript::kNoScriptId) {
144     return script_id_ == entry->script_id_ && position_ == entry->position_;
145   }
146   return name_ == entry->name_ && resource_name_ == entry->resource_name_ &&
147          line_number_ == entry->line_number_;
148 }
149 
150 
SetBuiltinId(Builtins::Name id)151 void CodeEntry::SetBuiltinId(Builtins::Name id) {
152   bit_field_ = TagField::update(bit_field_, CodeEventListener::BUILTIN_TAG);
153   bit_field_ = BuiltinIdField::update(bit_field_, id);
154 }
155 
156 
GetSourceLine(int pc_offset) const157 int CodeEntry::GetSourceLine(int pc_offset) const {
158   if (line_info_) return line_info_->GetSourceLineNumber(pc_offset);
159   return v8::CpuProfileNode::kNoLineNumberInfo;
160 }
161 
SetInlineStacks(std::unordered_set<std::unique_ptr<CodeEntry>,Hasher,Equals> inline_entries,std::unordered_map<int,std::vector<CodeEntryAndLineNumber>> inline_stacks)162 void CodeEntry::SetInlineStacks(
163     std::unordered_set<std::unique_ptr<CodeEntry>, Hasher, Equals>
164         inline_entries,
165     std::unordered_map<int, std::vector<CodeEntryAndLineNumber>>
166         inline_stacks) {
167   EnsureRareData()->inline_entries_ = std::move(inline_entries);
168   rare_data_->inline_stacks_ = std::move(inline_stacks);
169 }
170 
GetInlineStack(int pc_offset) const171 const std::vector<CodeEntryAndLineNumber>* CodeEntry::GetInlineStack(
172     int pc_offset) const {
173   if (!line_info_) return nullptr;
174 
175   int inlining_id = line_info_->GetInliningId(pc_offset);
176   if (inlining_id == SourcePosition::kNotInlined) return nullptr;
177   DCHECK(rare_data_);
178 
179   auto it = rare_data_->inline_stacks_.find(inlining_id);
180   return it != rare_data_->inline_stacks_.end() ? &it->second : nullptr;
181 }
182 
set_deopt_info(const char * deopt_reason,int deopt_id,std::vector<CpuProfileDeoptFrame> inlined_frames)183 void CodeEntry::set_deopt_info(
184     const char* deopt_reason, int deopt_id,
185     std::vector<CpuProfileDeoptFrame> inlined_frames) {
186   DCHECK(!has_deopt_info());
187   RareData* rare_data = EnsureRareData();
188   rare_data->deopt_reason_ = deopt_reason;
189   rare_data->deopt_id_ = deopt_id;
190   rare_data->deopt_inlined_frames_ = std::move(inlined_frames);
191 }
192 
FillFunctionInfo(SharedFunctionInfo shared)193 void CodeEntry::FillFunctionInfo(SharedFunctionInfo shared) {
194   if (!shared.script().IsScript()) return;
195   Script script = Script::cast(shared.script());
196   set_script_id(script.id());
197   set_position(shared.StartPosition());
198   if (shared.optimization_disabled()) {
199     set_bailout_reason(GetBailoutReason(shared.disable_optimization_reason()));
200   }
201 }
202 
GetDeoptInfo()203 CpuProfileDeoptInfo CodeEntry::GetDeoptInfo() {
204   DCHECK(has_deopt_info());
205 
206   CpuProfileDeoptInfo info;
207   info.deopt_reason = rare_data_->deopt_reason_;
208   DCHECK_NE(kNoDeoptimizationId, rare_data_->deopt_id_);
209   if (rare_data_->deopt_inlined_frames_.empty()) {
210     info.stack.push_back(CpuProfileDeoptFrame(
211         {script_id_, static_cast<size_t>(std::max(0, position()))}));
212   } else {
213     info.stack = rare_data_->deopt_inlined_frames_;
214   }
215   return info;
216 }
217 
EnsureRareData()218 CodeEntry::RareData* CodeEntry::EnsureRareData() {
219   if (!rare_data_) {
220     rare_data_.reset(new RareData());
221   }
222   return rare_data_.get();
223 }
224 
print() const225 void CodeEntry::print() const {
226   base::OS::Print("CodeEntry: at %p\n", this);
227 
228   base::OS::Print(" - name: %s\n", name_);
229   base::OS::Print(" - resource_name: %s\n", resource_name_);
230   base::OS::Print(" - line_number: %d\n", line_number_);
231   base::OS::Print(" - column_number: %d\n", column_number_);
232   base::OS::Print(" - script_id: %d\n", script_id_);
233   base::OS::Print(" - position: %d\n", position_);
234 
235   if (line_info_) {
236     line_info_->print();
237   }
238 
239   if (rare_data_) {
240     base::OS::Print(" - deopt_reason: %s\n", rare_data_->deopt_reason_);
241     base::OS::Print(" - bailout_reason: %s\n", rare_data_->bailout_reason_);
242     base::OS::Print(" - deopt_id: %d\n", rare_data_->deopt_id_);
243 
244     if (!rare_data_->inline_stacks_.empty()) {
245       base::OS::Print(" - inline stacks:\n");
246       for (auto it = rare_data_->inline_stacks_.begin();
247            it != rare_data_->inline_stacks_.end(); it++) {
248         base::OS::Print("    inlining_id: [%d]\n", it->first);
249         for (const auto& e : it->second) {
250           base::OS::Print("     %s --> %d\n", e.code_entry->name(),
251                           e.line_number);
252         }
253       }
254     } else {
255       base::OS::Print(" - inline stacks: (empty)\n");
256     }
257 
258     if (!rare_data_->deopt_inlined_frames_.empty()) {
259       base::OS::Print(" - deopt inlined frames:\n");
260       for (const CpuProfileDeoptFrame& frame :
261            rare_data_->deopt_inlined_frames_) {
262         base::OS::Print("script_id: %d position: %zu\n", frame.script_id,
263                         frame.position);
264       }
265     } else {
266       base::OS::Print(" - deopt inlined frames: (empty)\n");
267     }
268   }
269   base::OS::Print("\n");
270 }
271 
CollectDeoptInfo(CodeEntry * entry)272 void ProfileNode::CollectDeoptInfo(CodeEntry* entry) {
273   deopt_infos_.push_back(entry->GetDeoptInfo());
274   entry->clear_deopt_info();
275 }
276 
FindChild(CodeEntry * entry,int line_number)277 ProfileNode* ProfileNode::FindChild(CodeEntry* entry, int line_number) {
278   auto map_entry = children_.find({entry, line_number});
279   return map_entry != children_.end() ? map_entry->second : nullptr;
280 }
281 
FindOrAddChild(CodeEntry * entry,int line_number)282 ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry, int line_number) {
283   auto map_entry = children_.find({entry, line_number});
284   if (map_entry == children_.end()) {
285     ProfileNode* node = new ProfileNode(tree_, entry, this, line_number);
286     children_[{entry, line_number}] = node;
287     children_list_.push_back(node);
288     return node;
289   } else {
290     return map_entry->second;
291   }
292 }
293 
294 
IncrementLineTicks(int src_line)295 void ProfileNode::IncrementLineTicks(int src_line) {
296   if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) return;
297   // Increment a hit counter of a certain source line.
298   // Add a new source line if not found.
299   auto map_entry = line_ticks_.find(src_line);
300   if (map_entry == line_ticks_.end()) {
301     line_ticks_[src_line] = 1;
302   } else {
303     line_ticks_[src_line]++;
304   }
305 }
306 
307 
GetLineTicks(v8::CpuProfileNode::LineTick * entries,unsigned int length) const308 bool ProfileNode::GetLineTicks(v8::CpuProfileNode::LineTick* entries,
309                                unsigned int length) const {
310   if (entries == nullptr || length == 0) return false;
311 
312   unsigned line_count = static_cast<unsigned>(line_ticks_.size());
313 
314   if (line_count == 0) return true;
315   if (length < line_count) return false;
316 
317   v8::CpuProfileNode::LineTick* entry = entries;
318 
319   for (auto p = line_ticks_.begin(); p != line_ticks_.end(); p++, entry++) {
320     entry->line = p->first;
321     entry->hit_count = p->second;
322   }
323 
324   return true;
325 }
326 
Print(int indent) const327 void ProfileNode::Print(int indent) const {
328   int line_number = line_number_ != 0 ? line_number_ : entry_->line_number();
329   base::OS::Print("%5u %*s %s:%d %d %d #%d", self_ticks_, indent, "",
330                   entry_->name(), line_number, source_type(),
331                   entry_->script_id(), id());
332   if (entry_->resource_name()[0] != '\0')
333     base::OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
334   base::OS::Print("\n");
335   for (const CpuProfileDeoptInfo& info : deopt_infos_) {
336     base::OS::Print(
337         "%*s;;; deopted at script_id: %d position: %zu with reason '%s'.\n",
338         indent + 10, "", info.stack[0].script_id, info.stack[0].position,
339         info.deopt_reason);
340     for (size_t index = 1; index < info.stack.size(); ++index) {
341       base::OS::Print("%*s;;;     Inline point: script_id %d position: %zu.\n",
342                       indent + 10, "", info.stack[index].script_id,
343                       info.stack[index].position);
344     }
345   }
346   const char* bailout_reason = entry_->bailout_reason();
347   if (bailout_reason != GetBailoutReason(BailoutReason::kNoReason) &&
348       bailout_reason != CodeEntry::kEmptyBailoutReason) {
349     base::OS::Print("%*s bailed out due to '%s'\n", indent + 10, "",
350                     bailout_reason);
351   }
352   for (auto child : children_) {
353     child.second->Print(indent + 2);
354   }
355 }
356 
357 class DeleteNodesCallback {
358  public:
BeforeTraversingChild(ProfileNode *,ProfileNode *)359   void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
360 
AfterAllChildrenTraversed(ProfileNode * node)361   void AfterAllChildrenTraversed(ProfileNode* node) {
362     delete node;
363   }
364 
AfterChildTraversed(ProfileNode *,ProfileNode *)365   void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
366 };
367 
ProfileTree(Isolate * isolate)368 ProfileTree::ProfileTree(Isolate* isolate)
369     : next_node_id_(1),
370       root_(new ProfileNode(this, CodeEntry::root_entry(), nullptr)),
371       isolate_(isolate) {}
372 
~ProfileTree()373 ProfileTree::~ProfileTree() {
374   DeleteNodesCallback cb;
375   TraverseDepthFirst(&cb);
376 }
377 
AddPathFromEnd(const std::vector<CodeEntry * > & path,int src_line,bool update_stats)378 ProfileNode* ProfileTree::AddPathFromEnd(const std::vector<CodeEntry*>& path,
379                                          int src_line, bool update_stats) {
380   ProfileNode* node = root_;
381   CodeEntry* last_entry = nullptr;
382   for (auto it = path.rbegin(); it != path.rend(); ++it) {
383     if (*it == nullptr) continue;
384     last_entry = *it;
385     node = node->FindOrAddChild(*it, v8::CpuProfileNode::kNoLineNumberInfo);
386   }
387   if (last_entry && last_entry->has_deopt_info()) {
388     node->CollectDeoptInfo(last_entry);
389   }
390   if (update_stats) {
391     node->IncrementSelfTicks();
392     if (src_line != v8::CpuProfileNode::kNoLineNumberInfo) {
393       node->IncrementLineTicks(src_line);
394     }
395   }
396   return node;
397 }
398 
AddPathFromEnd(const ProfileStackTrace & path,int src_line,bool update_stats,ProfilingMode mode)399 ProfileNode* ProfileTree::AddPathFromEnd(const ProfileStackTrace& path,
400                                          int src_line, bool update_stats,
401                                          ProfilingMode mode) {
402   ProfileNode* node = root_;
403   CodeEntry* last_entry = nullptr;
404   int parent_line_number = v8::CpuProfileNode::kNoLineNumberInfo;
405   for (auto it = path.rbegin(); it != path.rend(); ++it) {
406     if (it->code_entry == nullptr) continue;
407     last_entry = it->code_entry;
408     node = node->FindOrAddChild(it->code_entry, parent_line_number);
409     parent_line_number = mode == ProfilingMode::kCallerLineNumbers
410                              ? it->line_number
411                              : v8::CpuProfileNode::kNoLineNumberInfo;
412   }
413   if (last_entry && last_entry->has_deopt_info()) {
414     node->CollectDeoptInfo(last_entry);
415   }
416   if (update_stats) {
417     node->IncrementSelfTicks();
418     if (src_line != v8::CpuProfileNode::kNoLineNumberInfo) {
419       node->IncrementLineTicks(src_line);
420     }
421   }
422   return node;
423 }
424 
425 class Position {
426  public:
Position(ProfileNode * node)427   explicit Position(ProfileNode* node)
428       : node(node), child_idx_(0) { }
current_child()429   V8_INLINE ProfileNode* current_child() {
430     return node->children()->at(child_idx_);
431   }
has_current_child()432   V8_INLINE bool has_current_child() {
433     return child_idx_ < static_cast<int>(node->children()->size());
434   }
next_child()435   V8_INLINE void next_child() { ++child_idx_; }
436 
437   ProfileNode* node;
438  private:
439   int child_idx_;
440 };
441 
442 
443 // Non-recursive implementation of a depth-first post-order tree traversal.
444 template <typename Callback>
TraverseDepthFirst(Callback * callback)445 void ProfileTree::TraverseDepthFirst(Callback* callback) {
446   std::vector<Position> stack;
447   stack.emplace_back(root_);
448   while (stack.size() > 0) {
449     Position& current = stack.back();
450     if (current.has_current_child()) {
451       callback->BeforeTraversingChild(current.node, current.current_child());
452       stack.emplace_back(current.current_child());
453     } else {
454       callback->AfterAllChildrenTraversed(current.node);
455       if (stack.size() > 1) {
456         Position& parent = stack[stack.size() - 2];
457         callback->AfterChildTraversed(parent.node, current.node);
458         parent.next_child();
459       }
460       // Remove child from the stack.
461       stack.pop_back();
462     }
463   }
464 }
465 
466 using v8::tracing::TracedValue;
467 
468 std::atomic<uint32_t> CpuProfile::last_id_;
469 
CpuProfile(CpuProfiler * profiler,const char * title,CpuProfilingOptions options)470 CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
471                        CpuProfilingOptions options)
472     : title_(title),
473       options_(options),
474       start_time_(base::TimeTicks::HighResolutionNow()),
475       top_down_(profiler->isolate()),
476       profiler_(profiler),
477       streaming_next_sample_(0),
478       id_(++last_id_) {
479   // The startTime timestamp is not converted to Perfetto's clock domain and
480   // will get out of sync with other timestamps Perfetto knows about, including
481   // the automatic trace event "ts" timestamp. startTime is included for
482   // backward compatibility with the tracing protocol but the value of "ts"
483   // should be used instead (it is recorded nearly immediately after).
484   auto value = TracedValue::Create();
485   value->SetDouble("startTime", start_time_.since_origin().InMicroseconds());
486   TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
487                               "Profile", id_, "data", std::move(value));
488 }
489 
CheckSubsample(base::TimeDelta source_sampling_interval)490 bool CpuProfile::CheckSubsample(base::TimeDelta source_sampling_interval) {
491   DCHECK_GE(source_sampling_interval, base::TimeDelta());
492 
493   // If the sampling source's sampling interval is 0, record as many samples
494   // are possible irrespective of the profile's sampling interval. Manually
495   // taken samples (via CollectSample) fall into this case as well.
496   if (source_sampling_interval.IsZero()) return true;
497 
498   next_sample_delta_ -= source_sampling_interval;
499   if (next_sample_delta_ <= base::TimeDelta()) {
500     next_sample_delta_ =
501         base::TimeDelta::FromMicroseconds(options_.sampling_interval_us());
502     return true;
503   }
504   return false;
505 }
506 
AddPath(base::TimeTicks timestamp,const ProfileStackTrace & path,int src_line,bool update_stats,base::TimeDelta sampling_interval)507 void CpuProfile::AddPath(base::TimeTicks timestamp,
508                          const ProfileStackTrace& path, int src_line,
509                          bool update_stats, base::TimeDelta sampling_interval) {
510   if (!CheckSubsample(sampling_interval)) return;
511 
512   ProfileNode* top_frame_node =
513       top_down_.AddPathFromEnd(path, src_line, update_stats, options_.mode());
514 
515   bool should_record_sample =
516       !timestamp.IsNull() && timestamp >= start_time_ &&
517       (options_.max_samples() == CpuProfilingOptions::kNoSampleLimit ||
518        samples_.size() < options_.max_samples());
519 
520   if (should_record_sample)
521     samples_.push_back({top_frame_node, timestamp, src_line});
522 
523   const int kSamplesFlushCount = 100;
524   const int kNodesFlushCount = 10;
525   if (samples_.size() - streaming_next_sample_ >= kSamplesFlushCount ||
526       top_down_.pending_nodes_count() >= kNodesFlushCount) {
527     StreamPendingTraceEvents();
528   }
529 }
530 
531 namespace {
532 
BuildNodeValue(const ProfileNode * node,TracedValue * value)533 void BuildNodeValue(const ProfileNode* node, TracedValue* value) {
534   const CodeEntry* entry = node->entry();
535   value->BeginDictionary("callFrame");
536   value->SetString("functionName", entry->name());
537   if (*entry->resource_name()) {
538     value->SetString("url", entry->resource_name());
539   }
540   value->SetInteger("scriptId", entry->script_id());
541   if (entry->line_number()) {
542     value->SetInteger("lineNumber", entry->line_number() - 1);
543   }
544   if (entry->column_number()) {
545     value->SetInteger("columnNumber", entry->column_number() - 1);
546   }
547   value->EndDictionary();
548   value->SetInteger("id", node->id());
549   if (node->parent()) {
550     value->SetInteger("parent", node->parent()->id());
551   }
552   const char* deopt_reason = entry->bailout_reason();
553   if (deopt_reason && deopt_reason[0] && strcmp(deopt_reason, "no reason")) {
554     value->SetString("deoptReason", deopt_reason);
555   }
556 }
557 
558 }  // namespace
559 
StreamPendingTraceEvents()560 void CpuProfile::StreamPendingTraceEvents() {
561   std::vector<const ProfileNode*> pending_nodes = top_down_.TakePendingNodes();
562   if (pending_nodes.empty() && samples_.empty()) return;
563   auto value = TracedValue::Create();
564 
565   if (!pending_nodes.empty() || streaming_next_sample_ != samples_.size()) {
566     value->BeginDictionary("cpuProfile");
567     if (!pending_nodes.empty()) {
568       value->BeginArray("nodes");
569       for (auto node : pending_nodes) {
570         value->BeginDictionary();
571         BuildNodeValue(node, value.get());
572         value->EndDictionary();
573       }
574       value->EndArray();
575     }
576     if (streaming_next_sample_ != samples_.size()) {
577       value->BeginArray("samples");
578       for (size_t i = streaming_next_sample_; i < samples_.size(); ++i) {
579         value->AppendInteger(samples_[i].node->id());
580       }
581       value->EndArray();
582     }
583     value->EndDictionary();
584   }
585   if (streaming_next_sample_ != samples_.size()) {
586     // timeDeltas are computed within CLOCK_MONOTONIC. However, trace event
587     // "ts" timestamps are converted to CLOCK_BOOTTIME by Perfetto. To get
588     // absolute timestamps in CLOCK_BOOTTIME from timeDeltas, add them to
589     // the "ts" timestamp from the initial "Profile" trace event sent by
590     // CpuProfile::CpuProfile().
591     //
592     // Note that if the system is suspended and resumed while samples_ is
593     // captured, timeDeltas derived after resume will not be convertible to
594     // correct CLOCK_BOOTTIME time values (for instance, producing
595     // CLOCK_BOOTTIME time values in the middle of the suspended period).
596     value->BeginArray("timeDeltas");
597     base::TimeTicks lastTimestamp =
598         streaming_next_sample_ ? samples_[streaming_next_sample_ - 1].timestamp
599                                : start_time();
600     for (size_t i = streaming_next_sample_; i < samples_.size(); ++i) {
601       value->AppendInteger(static_cast<int>(
602           (samples_[i].timestamp - lastTimestamp).InMicroseconds()));
603       lastTimestamp = samples_[i].timestamp;
604     }
605     value->EndArray();
606     bool has_non_zero_lines =
607         std::any_of(samples_.begin() + streaming_next_sample_, samples_.end(),
608                     [](const SampleInfo& sample) { return sample.line != 0; });
609     if (has_non_zero_lines) {
610       value->BeginArray("lines");
611       for (size_t i = streaming_next_sample_; i < samples_.size(); ++i) {
612         value->AppendInteger(samples_[i].line);
613       }
614       value->EndArray();
615     }
616     streaming_next_sample_ = samples_.size();
617   }
618 
619   TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
620                               "ProfileChunk", id_, "data", std::move(value));
621 }
622 
FinishProfile()623 void CpuProfile::FinishProfile() {
624   end_time_ = base::TimeTicks::HighResolutionNow();
625   StreamPendingTraceEvents();
626   auto value = TracedValue::Create();
627   // The endTime timestamp is not converted to Perfetto's clock domain and will
628   // get out of sync with other timestamps Perfetto knows about, including the
629   // automatic trace event "ts" timestamp. endTime is included for backward
630   // compatibility with the tracing protocol: its presence in "data" is used by
631   // devtools to identify the last ProfileChunk but the value of "ts" should be
632   // used instead (it is recorded nearly immediately after).
633   value->SetDouble("endTime", end_time_.since_origin().InMicroseconds());
634   TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
635                               "ProfileChunk", id_, "data", std::move(value));
636 }
637 
Print() const638 void CpuProfile::Print() const {
639   base::OS::Print("[Top down]:\n");
640   top_down_.Print();
641   ProfilerStats::Instance()->Print();
642   ProfilerStats::Instance()->Clear();
643 }
644 
645 CodeMap::CodeMap() = default;
646 
~CodeMap()647 CodeMap::~CodeMap() { Clear(); }
648 
Clear()649 void CodeMap::Clear() {
650   // First clean the free list as it's otherwise impossible to tell
651   // the slot type.
652   unsigned free_slot = free_list_head_;
653   while (free_slot != kNoFreeSlot) {
654     unsigned next_slot = code_entries_[free_slot].next_free_slot;
655     code_entries_[free_slot].entry = nullptr;
656     free_slot = next_slot;
657   }
658   for (auto slot : code_entries_) delete slot.entry;
659 
660   code_entries_.clear();
661   code_map_.clear();
662   free_list_head_ = kNoFreeSlot;
663 }
664 
AddCode(Address addr,CodeEntry * entry,unsigned size)665 void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
666   ClearCodesInRange(addr, addr + size);
667   unsigned index = AddCodeEntry(addr, entry);
668   code_map_.emplace(addr, CodeEntryMapInfo{index, size});
669 }
670 
ClearCodesInRange(Address start,Address end)671 void CodeMap::ClearCodesInRange(Address start, Address end) {
672   auto left = code_map_.upper_bound(start);
673   if (left != code_map_.begin()) {
674     --left;
675     if (left->first + left->second.size <= start) ++left;
676   }
677   auto right = left;
678   for (; right != code_map_.end() && right->first < end; ++right) {
679     if (!entry(right->second.index)->used()) {
680       DeleteCodeEntry(right->second.index);
681     }
682   }
683   code_map_.erase(left, right);
684 }
685 
FindEntry(Address addr,Address * out_instruction_start)686 CodeEntry* CodeMap::FindEntry(Address addr, Address* out_instruction_start) {
687   auto it = code_map_.upper_bound(addr);
688   if (it == code_map_.begin()) return nullptr;
689   --it;
690   Address start_address = it->first;
691   Address end_address = start_address + it->second.size;
692   CodeEntry* ret = addr < end_address ? entry(it->second.index) : nullptr;
693   DCHECK(!ret || (addr >= start_address && addr < end_address));
694   if (ret && out_instruction_start) *out_instruction_start = start_address;
695   return ret;
696 }
697 
MoveCode(Address from,Address to)698 void CodeMap::MoveCode(Address from, Address to) {
699   if (from == to) return;
700   auto it = code_map_.find(from);
701   if (it == code_map_.end()) return;
702   CodeEntryMapInfo info = it->second;
703   code_map_.erase(it);
704   DCHECK(from + info.size <= to || to + info.size <= from);
705   ClearCodesInRange(to, to + info.size);
706   code_map_.emplace(to, info);
707 }
708 
AddCodeEntry(Address start,CodeEntry * entry)709 unsigned CodeMap::AddCodeEntry(Address start, CodeEntry* entry) {
710   if (free_list_head_ == kNoFreeSlot) {
711     code_entries_.push_back(CodeEntrySlotInfo{entry});
712     return static_cast<unsigned>(code_entries_.size()) - 1;
713   }
714   unsigned index = free_list_head_;
715   free_list_head_ = code_entries_[index].next_free_slot;
716   code_entries_[index].entry = entry;
717   return index;
718 }
719 
DeleteCodeEntry(unsigned index)720 void CodeMap::DeleteCodeEntry(unsigned index) {
721   delete code_entries_[index].entry;
722   code_entries_[index].next_free_slot = free_list_head_;
723   free_list_head_ = index;
724 }
725 
Print()726 void CodeMap::Print() {
727   for (const auto& pair : code_map_) {
728     base::OS::Print("%p %5d %s\n", reinterpret_cast<void*>(pair.first),
729                     pair.second.size, entry(pair.second.index)->name());
730   }
731 }
732 
CpuProfilesCollection(Isolate * isolate)733 CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
734     : profiler_(nullptr), current_profiles_semaphore_(1) {}
735 
StartProfiling(const char * title,CpuProfilingOptions options)736 CpuProfilingStatus CpuProfilesCollection::StartProfiling(
737     const char* title, CpuProfilingOptions options) {
738   current_profiles_semaphore_.Wait();
739 
740   if (static_cast<int>(current_profiles_.size()) >= kMaxSimultaneousProfiles) {
741     current_profiles_semaphore_.Signal();
742 
743     return CpuProfilingStatus::kErrorTooManyProfilers;
744   }
745   for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
746     if (strcmp(profile->title(), title) == 0) {
747       // Ignore attempts to start profile with the same title...
748       current_profiles_semaphore_.Signal();
749       // ... though return kAlreadyStarted to force it collect a sample.
750       return CpuProfilingStatus::kAlreadyStarted;
751     }
752   }
753   current_profiles_.emplace_back(new CpuProfile(profiler_, title, options));
754   current_profiles_semaphore_.Signal();
755   return CpuProfilingStatus::kStarted;
756 }
757 
StopProfiling(const char * title)758 CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
759   const bool empty_title = (title[0] == '\0');
760   CpuProfile* profile = nullptr;
761   current_profiles_semaphore_.Wait();
762 
763   auto it = std::find_if(current_profiles_.rbegin(), current_profiles_.rend(),
764                          [&](const std::unique_ptr<CpuProfile>& p) {
765                            return empty_title || strcmp(p->title(), title) == 0;
766                          });
767 
768   if (it != current_profiles_.rend()) {
769     (*it)->FinishProfile();
770     profile = it->get();
771     finished_profiles_.push_back(std::move(*it));
772     // Convert reverse iterator to matching forward iterator.
773     current_profiles_.erase(--(it.base()));
774   }
775 
776   current_profiles_semaphore_.Signal();
777   return profile;
778 }
779 
IsLastProfile(const char * title)780 bool CpuProfilesCollection::IsLastProfile(const char* title) {
781   // Called from VM thread, and only it can mutate the list,
782   // so no locking is needed here.
783   if (current_profiles_.size() != 1) return false;
784   return title[0] == '\0' || strcmp(current_profiles_[0]->title(), title) == 0;
785 }
786 
787 
RemoveProfile(CpuProfile * profile)788 void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
789   // Called from VM thread for a completed profile.
790   auto pos =
791       std::find_if(finished_profiles_.begin(), finished_profiles_.end(),
792                    [&](const std::unique_ptr<CpuProfile>& finished_profile) {
793                      return finished_profile.get() == profile;
794                    });
795   DCHECK(pos != finished_profiles_.end());
796   finished_profiles_.erase(pos);
797 }
798 
799 namespace {
800 
GreatestCommonDivisor(int64_t a,int64_t b)801 int64_t GreatestCommonDivisor(int64_t a, int64_t b) {
802   return b ? GreatestCommonDivisor(b, a % b) : a;
803 }
804 
805 }  // namespace
806 
GetCommonSamplingInterval() const807 base::TimeDelta CpuProfilesCollection::GetCommonSamplingInterval() const {
808   DCHECK(profiler_);
809 
810   int64_t base_sampling_interval_us =
811       profiler_->sampling_interval().InMicroseconds();
812   if (base_sampling_interval_us == 0) return base::TimeDelta();
813 
814   int64_t interval_us = 0;
815   for (const auto& profile : current_profiles_) {
816     // Snap the profile's requested sampling interval to the next multiple of
817     // the base sampling interval.
818     int64_t profile_interval_us =
819         std::max<int64_t>(
820             (profile->sampling_interval_us() + base_sampling_interval_us - 1) /
821                 base_sampling_interval_us,
822             1) *
823         base_sampling_interval_us;
824     interval_us = GreatestCommonDivisor(interval_us, profile_interval_us);
825   }
826   return base::TimeDelta::FromMicroseconds(interval_us);
827 }
828 
AddPathToCurrentProfiles(base::TimeTicks timestamp,const ProfileStackTrace & path,int src_line,bool update_stats,base::TimeDelta sampling_interval)829 void CpuProfilesCollection::AddPathToCurrentProfiles(
830     base::TimeTicks timestamp, const ProfileStackTrace& path, int src_line,
831     bool update_stats, base::TimeDelta sampling_interval) {
832   // As starting / stopping profiles is rare relatively to this
833   // method, we don't bother minimizing the duration of lock holding,
834   // e.g. copying contents of the list to a local vector.
835   current_profiles_semaphore_.Wait();
836   for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
837     profile->AddPath(timestamp, path, src_line, update_stats,
838                      sampling_interval);
839   }
840   current_profiles_semaphore_.Signal();
841 }
842 
843 }  // namespace internal
844 }  // namespace v8
845