1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_V8_PROFILER_H_
6 #define V8_V8_PROFILER_H_
7 
8 #include <limits.h>
9 
10 #include <memory>
11 #include <unordered_set>
12 #include <vector>
13 
14 #include "v8-local-handle.h"       // NOLINT(build/include_directory)
15 #include "v8-message.h"            // NOLINT(build/include_directory)
16 #include "v8-persistent-handle.h"  // NOLINT(build/include_directory)
17 
18 /**
19  * Profiler support for the V8 JavaScript engine.
20  */
21 namespace v8 {
22 
23 class HeapGraphNode;
24 struct HeapStatsUpdate;
25 class Object;
26 
27 using NativeObject = void*;
28 using SnapshotObjectId = uint32_t;
29 
30 struct CpuProfileDeoptFrame {
31   int script_id;
32   size_t position;
33 };
34 
35 namespace internal {
36 class CpuProfile;
37 }  // namespace internal
38 
39 }  // namespace v8
40 
41 #ifdef V8_OS_WIN
42 template class V8_EXPORT std::vector<v8::CpuProfileDeoptFrame>;
43 #endif
44 
45 namespace v8 {
46 
47 struct V8_EXPORT CpuProfileDeoptInfo {
48   /** A pointer to a static string owned by v8. */
49   const char* deopt_reason;
50   std::vector<CpuProfileDeoptFrame> stack;
51 };
52 
53 }  // namespace v8
54 
55 #ifdef V8_OS_WIN
56 template class V8_EXPORT std::vector<v8::CpuProfileDeoptInfo>;
57 #endif
58 
59 namespace v8 {
60 
61 /**
62  * CpuProfileNode represents a node in a call graph.
63  */
64 class V8_EXPORT CpuProfileNode {
65  public:
66   struct LineTick {
67     /** The 1-based number of the source line where the function originates. */
68     int line;
69 
70     /** The count of samples associated with the source line. */
71     unsigned int hit_count;
72   };
73 
74   // An annotation hinting at the source of a CpuProfileNode.
75   enum SourceType {
76     // User-supplied script with associated resource information.
77     kScript = 0,
78     // Native scripts and provided builtins.
79     kBuiltin = 1,
80     // Callbacks into native code.
81     kCallback = 2,
82     // VM-internal functions or state.
83     kInternal = 3,
84     // A node that failed to symbolize.
85     kUnresolved = 4,
86   };
87 
88   /** Returns function name (empty string for anonymous functions.) */
89   Local<String> GetFunctionName() const;
90 
91   /**
92    * Returns function name (empty string for anonymous functions.)
93    * The string ownership is *not* passed to the caller. It stays valid until
94    * profile is deleted. The function is thread safe.
95    */
96   const char* GetFunctionNameStr() const;
97 
98   /** Returns id of the script where function is located. */
99   int GetScriptId() const;
100 
101   /** Returns resource name for script from where the function originates. */
102   Local<String> GetScriptResourceName() const;
103 
104   /**
105    * Returns resource name for script from where the function originates.
106    * The string ownership is *not* passed to the caller. It stays valid until
107    * profile is deleted. The function is thread safe.
108    */
109   const char* GetScriptResourceNameStr() const;
110 
111   /**
112    * Return true if the script from where the function originates is flagged as
113    * being shared cross-origin.
114    */
115   bool IsScriptSharedCrossOrigin() const;
116 
117   /**
118    * Returns the number, 1-based, of the line where the function originates.
119    * kNoLineNumberInfo if no line number information is available.
120    */
121   int GetLineNumber() const;
122 
123   /**
124    * Returns 1-based number of the column where the function originates.
125    * kNoColumnNumberInfo if no column number information is available.
126    */
127   int GetColumnNumber() const;
128 
129   /**
130    * Returns the number of the function's source lines that collect the samples.
131    */
132   unsigned int GetHitLineCount() const;
133 
134   /** Returns the set of source lines that collect the samples.
135    *  The caller allocates buffer and responsible for releasing it.
136    *  True if all available entries are copied, otherwise false.
137    *  The function copies nothing if buffer is not large enough.
138    */
139   bool GetLineTicks(LineTick* entries, unsigned int length) const;
140 
141   /** Returns bailout reason for the function
142     * if the optimization was disabled for it.
143     */
144   const char* GetBailoutReason() const;
145 
146   /**
147     * Returns the count of samples where the function was currently executing.
148     */
149   unsigned GetHitCount() const;
150 
151   /** Returns id of the node. The id is unique within the tree */
152   unsigned GetNodeId() const;
153 
154   /**
155    * Gets the type of the source which the node was captured from.
156    */
157   SourceType GetSourceType() const;
158 
159   /** Returns child nodes count of the node. */
160   int GetChildrenCount() const;
161 
162   /** Retrieves a child node by index. */
163   const CpuProfileNode* GetChild(int index) const;
164 
165   /** Retrieves the ancestor node, or null if the root. */
166   const CpuProfileNode* GetParent() const;
167 
168   /** Retrieves deopt infos for the node. */
169   const std::vector<CpuProfileDeoptInfo>& GetDeoptInfos() const;
170 
171   static const int kNoLineNumberInfo = Message::kNoLineNumberInfo;
172   static const int kNoColumnNumberInfo = Message::kNoColumnInfo;
173 };
174 
175 
176 /**
177  * CpuProfile contains a CPU profile in a form of top-down call tree
178  * (from main() down to functions that do all the work).
179  */
180 class V8_EXPORT CpuProfile {
181  public:
182   /** Returns CPU profile title. */
183   Local<String> GetTitle() const;
184 
185   /** Returns the root node of the top down call tree. */
186   const CpuProfileNode* GetTopDownRoot() const;
187 
188   /**
189    * Returns number of samples recorded. The samples are not recorded unless
190    * |record_samples| parameter of CpuProfiler::StartCpuProfiling is true.
191    */
192   int GetSamplesCount() const;
193 
194   /**
195    * Returns profile node corresponding to the top frame the sample at
196    * the given index.
197    */
198   const CpuProfileNode* GetSample(int index) const;
199 
200   /**
201    * Returns the timestamp of the sample. The timestamp is the number of
202    * microseconds since some unspecified starting point.
203    * The point is equal to the starting point used by GetStartTime.
204    */
205   int64_t GetSampleTimestamp(int index) const;
206 
207   /**
208    * Returns time when the profile recording was started (in microseconds)
209    * since some unspecified starting point.
210    */
211   int64_t GetStartTime() const;
212 
213   /**
214    * Returns time when the profile recording was stopped (in microseconds)
215    * since some unspecified starting point.
216    * The point is equal to the starting point used by GetStartTime.
217    */
218   int64_t GetEndTime() const;
219 
220   /**
221    * Deletes the profile and removes it from CpuProfiler's list.
222    * All pointers to nodes previously returned become invalid.
223    */
224   void Delete();
225 };
226 
227 enum CpuProfilingMode {
228   // In the resulting CpuProfile tree, intermediate nodes in a stack trace
229   // (from the root to a leaf) will have line numbers that point to the start
230   // line of the function, rather than the line of the callsite of the child.
231   kLeafNodeLineNumbers,
232   // In the resulting CpuProfile tree, nodes are separated based on the line
233   // number of their callsite in their parent.
234   kCallerLineNumbers,
235 };
236 
237 // Determines how names are derived for functions sampled.
238 enum CpuProfilingNamingMode {
239   // Use the immediate name of functions at compilation time.
240   kStandardNaming,
241   // Use more verbose naming for functions without names, inferred from scope
242   // where possible.
243   kDebugNaming,
244 };
245 
246 enum CpuProfilingLoggingMode {
247   // Enables logging when a profile is active, and disables logging when all
248   // profiles are detached.
249   kLazyLogging,
250   // Enables logging for the lifetime of the CpuProfiler. Calls to
251   // StartRecording are faster, at the expense of runtime overhead.
252   kEagerLogging,
253 };
254 
255 // Enum for returning profiling status. Once StartProfiling is called,
256 // we want to return to clients whether the profiling was able to start
257 // correctly, or return a descriptive error.
258 enum class CpuProfilingStatus {
259   kStarted,
260   kAlreadyStarted,
261   kErrorTooManyProfilers
262 };
263 
264 /**
265  * Delegate for when max samples reached and samples are discarded.
266  */
267 class V8_EXPORT DiscardedSamplesDelegate {
268  public:
DiscardedSamplesDelegate()269   DiscardedSamplesDelegate() {}
270 
271   virtual ~DiscardedSamplesDelegate() = default;
272   virtual void Notify() = 0;
273 };
274 
275 /**
276  * Optional profiling attributes.
277  */
278 class V8_EXPORT CpuProfilingOptions {
279  public:
280   // Indicates that the sample buffer size should not be explicitly limited.
281   static const unsigned kNoSampleLimit = UINT_MAX;
282 
283   /**
284    * \param mode Type of computation of stack frame line numbers.
285    * \param max_samples The maximum number of samples that should be recorded by
286    *                    the profiler. Samples obtained after this limit will be
287    *                    discarded.
288    * \param sampling_interval_us controls the profile-specific target
289    *                             sampling interval. The provided sampling
290    *                             interval will be snapped to the next lowest
291    *                             non-zero multiple of the profiler's sampling
292    *                             interval, set via SetSamplingInterval(). If
293    *                             zero, the sampling interval will be equal to
294    *                             the profiler's sampling interval.
295    * \param filter_context If specified, profiles will only contain frames
296    *                       using this context. Other frames will be elided.
297    */
298   CpuProfilingOptions(
299       CpuProfilingMode mode = kLeafNodeLineNumbers,
300       unsigned max_samples = kNoSampleLimit, int sampling_interval_us = 0,
301       MaybeLocal<Context> filter_context = MaybeLocal<Context>());
302 
mode()303   CpuProfilingMode mode() const { return mode_; }
max_samples()304   unsigned max_samples() const { return max_samples_; }
sampling_interval_us()305   int sampling_interval_us() const { return sampling_interval_us_; }
306 
307  private:
308   friend class internal::CpuProfile;
309 
has_filter_context()310   bool has_filter_context() const { return !filter_context_.IsEmpty(); }
311   void* raw_filter_context() const;
312 
313   CpuProfilingMode mode_;
314   unsigned max_samples_;
315   int sampling_interval_us_;
316   CopyablePersistentTraits<Context>::CopyablePersistent filter_context_;
317 };
318 
319 /**
320  * Interface for controlling CPU profiling. Instance of the
321  * profiler can be created using v8::CpuProfiler::New method.
322  */
323 class V8_EXPORT CpuProfiler {
324  public:
325   /**
326    * Creates a new CPU profiler for the |isolate|. The isolate must be
327    * initialized. The profiler object must be disposed after use by calling
328    * |Dispose| method.
329    */
330   static CpuProfiler* New(Isolate* isolate,
331                           CpuProfilingNamingMode = kDebugNaming,
332                           CpuProfilingLoggingMode = kLazyLogging);
333 
334   /**
335    * Synchronously collect current stack sample in all profilers attached to
336    * the |isolate|. The call does not affect number of ticks recorded for
337    * the current top node.
338    */
339   static void CollectSample(Isolate* isolate);
340 
341   /**
342    * Disposes the CPU profiler object.
343    */
344   void Dispose();
345 
346   /**
347    * Changes default CPU profiler sampling interval to the specified number
348    * of microseconds. Default interval is 1000us. This method must be called
349    * when there are no profiles being recorded.
350    */
351   void SetSamplingInterval(int us);
352 
353   /**
354    * Sets whether or not the profiler should prioritize consistency of sample
355    * periodicity on Windows. Disabling this can greatly reduce CPU usage, but
356    * may result in greater variance in sample timings from the platform's
357    * scheduler. Defaults to enabled. This method must be called when there are
358    * no profiles being recorded.
359    */
360   void SetUsePreciseSampling(bool);
361 
362   /**
363    * Starts collecting a CPU profile. Title may be an empty string. Several
364    * profiles may be collected at once. Attempts to start collecting several
365    * profiles with the same title are silently ignored.
366    */
367   CpuProfilingStatus StartProfiling(
368       Local<String> title, CpuProfilingOptions options,
369       std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
370 
371   /**
372    * Starts profiling with the same semantics as above, except with expanded
373    * parameters.
374    *
375    * |record_samples| parameter controls whether individual samples should
376    * be recorded in addition to the aggregated tree.
377    *
378    * |max_samples| controls the maximum number of samples that should be
379    * recorded by the profiler. Samples obtained after this limit will be
380    * discarded.
381    */
382   CpuProfilingStatus StartProfiling(
383       Local<String> title, CpuProfilingMode mode, bool record_samples = false,
384       unsigned max_samples = CpuProfilingOptions::kNoSampleLimit);
385   /**
386    * The same as StartProfiling above, but the CpuProfilingMode defaults to
387    * kLeafNodeLineNumbers mode, which was the previous default behavior of the
388    * profiler.
389    */
390   CpuProfilingStatus StartProfiling(Local<String> title,
391                                     bool record_samples = false);
392 
393   /**
394    * Stops collecting CPU profile with a given title and returns it.
395    * If the title given is empty, finishes the last profile started.
396    */
397   CpuProfile* StopProfiling(Local<String> title);
398 
399   /**
400    * Generate more detailed source positions to code objects. This results in
401    * better results when mapping profiling samples to script source.
402    */
403   static void UseDetailedSourcePositionsForProfiling(Isolate* isolate);
404 
405  private:
406   CpuProfiler();
407   ~CpuProfiler();
408   CpuProfiler(const CpuProfiler&);
409   CpuProfiler& operator=(const CpuProfiler&);
410 };
411 
412 /**
413  * HeapSnapshotEdge represents a directed connection between heap
414  * graph nodes: from retainers to retained nodes.
415  */
416 class V8_EXPORT HeapGraphEdge {
417  public:
418   enum Type {
419     kContextVariable = 0,  // A variable from a function context.
420     kElement = 1,          // An element of an array.
421     kProperty = 2,         // A named object property.
422     kInternal = 3,         // A link that can't be accessed from JS,
423                            // thus, its name isn't a real property name
424                            // (e.g. parts of a ConsString).
425     kHidden = 4,           // A link that is needed for proper sizes
426                            // calculation, but may be hidden from user.
427     kShortcut = 5,         // A link that must not be followed during
428                            // sizes calculation.
429     kWeak = 6              // A weak reference (ignored by the GC).
430   };
431 
432   /** Returns edge type (see HeapGraphEdge::Type). */
433   Type GetType() const;
434 
435   /**
436    * Returns edge name. This can be a variable name, an element index, or
437    * a property name.
438    */
439   Local<Value> GetName() const;
440 
441   /** Returns origin node. */
442   const HeapGraphNode* GetFromNode() const;
443 
444   /** Returns destination node. */
445   const HeapGraphNode* GetToNode() const;
446 };
447 
448 
449 /**
450  * HeapGraphNode represents a node in a heap graph.
451  */
452 class V8_EXPORT HeapGraphNode {
453  public:
454   enum Type {
455     kHidden = 0,         // Hidden node, may be filtered when shown to user.
456     kArray = 1,          // An array of elements.
457     kString = 2,         // A string.
458     kObject = 3,         // A JS object (except for arrays and strings).
459     kCode = 4,           // Compiled code.
460     kClosure = 5,        // Function closure.
461     kRegExp = 6,         // RegExp.
462     kHeapNumber = 7,     // Number stored in the heap.
463     kNative = 8,         // Native object (not from V8 heap).
464     kSynthetic = 9,      // Synthetic object, usually used for grouping
465                          // snapshot items together.
466     kConsString = 10,    // Concatenated string. A pair of pointers to strings.
467     kSlicedString = 11,  // Sliced string. A fragment of another string.
468     kSymbol = 12,        // A Symbol (ES6).
469     kBigInt = 13         // BigInt.
470   };
471 
472   /** Returns node type (see HeapGraphNode::Type). */
473   Type GetType() const;
474 
475   /**
476    * Returns node name. Depending on node's type this can be the name
477    * of the constructor (for objects), the name of the function (for
478    * closures), string value, or an empty string (for compiled code).
479    */
480   Local<String> GetName() const;
481 
482   /**
483    * Returns node id. For the same heap object, the id remains the same
484    * across all snapshots.
485    */
486   SnapshotObjectId GetId() const;
487 
488   /** Returns node's own size, in bytes. */
489   size_t GetShallowSize() const;
490 
491   /** Returns child nodes count of the node. */
492   int GetChildrenCount() const;
493 
494   /** Retrieves a child by index. */
495   const HeapGraphEdge* GetChild(int index) const;
496 };
497 
498 
499 /**
500  * An interface for exporting data from V8, using "push" model.
501  */
502 class V8_EXPORT OutputStream {
503  public:
504   enum WriteResult {
505     kContinue = 0,
506     kAbort = 1
507   };
508   virtual ~OutputStream() = default;
509   /** Notify about the end of stream. */
510   virtual void EndOfStream() = 0;
511   /** Get preferred output chunk size. Called only once. */
GetChunkSize()512   virtual int GetChunkSize() { return 1024; }
513   /**
514    * Writes the next chunk of snapshot data into the stream. Writing
515    * can be stopped by returning kAbort as function result. EndOfStream
516    * will not be called in case writing was aborted.
517    */
518   virtual WriteResult WriteAsciiChunk(char* data, int size) = 0;
519   /**
520    * Writes the next chunk of heap stats data into the stream. Writing
521    * can be stopped by returning kAbort as function result. EndOfStream
522    * will not be called in case writing was aborted.
523    */
WriteHeapStatsChunk(HeapStatsUpdate * data,int count)524   virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) {
525     return kAbort;
526   }
527 };
528 
529 /**
530  * HeapSnapshots record the state of the JS heap at some moment.
531  */
532 class V8_EXPORT HeapSnapshot {
533  public:
534   enum SerializationFormat {
535     kJSON = 0  // See format description near 'Serialize' method.
536   };
537 
538   /** Returns the root node of the heap graph. */
539   const HeapGraphNode* GetRoot() const;
540 
541   /** Returns a node by its id. */
542   const HeapGraphNode* GetNodeById(SnapshotObjectId id) const;
543 
544   /** Returns total nodes count in the snapshot. */
545   int GetNodesCount() const;
546 
547   /** Returns a node by index. */
548   const HeapGraphNode* GetNode(int index) const;
549 
550   /** Returns a max seen JS object Id. */
551   SnapshotObjectId GetMaxSnapshotJSObjectId() const;
552 
553   /**
554    * Deletes the snapshot and removes it from HeapProfiler's list.
555    * All pointers to nodes, edges and paths previously returned become
556    * invalid.
557    */
558   void Delete();
559 
560   /**
561    * Prepare a serialized representation of the snapshot. The result
562    * is written into the stream provided in chunks of specified size.
563    * The total length of the serialized snapshot is unknown in
564    * advance, it can be roughly equal to JS heap size (that means,
565    * it can be really big - tens of megabytes).
566    *
567    * For the JSON format, heap contents are represented as an object
568    * with the following structure:
569    *
570    *  {
571    *    snapshot: {
572    *      title: "...",
573    *      uid: nnn,
574    *      meta: { meta-info },
575    *      node_count: nnn,
576    *      edge_count: nnn
577    *    },
578    *    nodes: [nodes array],
579    *    edges: [edges array],
580    *    strings: [strings array]
581    *  }
582    *
583    * Nodes reference strings, other nodes, and edges by their indexes
584    * in corresponding arrays.
585    */
586   void Serialize(OutputStream* stream,
587                  SerializationFormat format = kJSON) const;
588 };
589 
590 
591 /**
592  * An interface for reporting progress and controlling long-running
593  * activities.
594  */
595 class V8_EXPORT ActivityControl {
596  public:
597   enum ControlOption {
598     kContinue = 0,
599     kAbort = 1
600   };
601   virtual ~ActivityControl() = default;
602   /**
603    * Notify about current progress. The activity can be stopped by
604    * returning kAbort as the callback result.
605    */
606   virtual ControlOption ReportProgressValue(int done, int total) = 0;
607 };
608 
609 /**
610  * AllocationProfile is a sampled profile of allocations done by the program.
611  * This is structured as a call-graph.
612  */
613 class V8_EXPORT AllocationProfile {
614  public:
615   struct Allocation {
616     /**
617      * Size of the sampled allocation object.
618      */
619     size_t size;
620 
621     /**
622      * The number of objects of such size that were sampled.
623      */
624     unsigned int count;
625   };
626 
627   /**
628    * Represents a node in the call-graph.
629    */
630   struct Node {
631     /**
632      * Name of the function. May be empty for anonymous functions or if the
633      * script corresponding to this function has been unloaded.
634      */
635     Local<String> name;
636 
637     /**
638      * Name of the script containing the function. May be empty if the script
639      * name is not available, or if the script has been unloaded.
640      */
641     Local<String> script_name;
642 
643     /**
644      * id of the script where the function is located. May be equal to
645      * v8::UnboundScript::kNoScriptId in cases where the script doesn't exist.
646      */
647     int script_id;
648 
649     /**
650      * Start position of the function in the script.
651      */
652     int start_position;
653 
654     /**
655      * 1-indexed line number where the function starts. May be
656      * kNoLineNumberInfo if no line number information is available.
657      */
658     int line_number;
659 
660     /**
661      * 1-indexed column number where the function starts. May be
662      * kNoColumnNumberInfo if no line number information is available.
663      */
664     int column_number;
665 
666     /**
667      * Unique id of the node.
668      */
669     uint32_t node_id;
670 
671     /**
672      * List of callees called from this node for which we have sampled
673      * allocations. The lifetime of the children is scoped to the containing
674      * AllocationProfile.
675      */
676     std::vector<Node*> children;
677 
678     /**
679      * List of self allocations done by this node in the call-graph.
680      */
681     std::vector<Allocation> allocations;
682   };
683 
684   /**
685    * Represent a single sample recorded for an allocation.
686    */
687   struct Sample {
688     /**
689      * id of the node in the profile tree.
690      */
691     uint32_t node_id;
692 
693     /**
694      * Size of the sampled allocation object.
695      */
696     size_t size;
697 
698     /**
699      * The number of objects of such size that were sampled.
700      */
701     unsigned int count;
702 
703     /**
704      * Unique time-ordered id of the allocation sample. Can be used to track
705      * what samples were added or removed between two snapshots.
706      */
707     uint64_t sample_id;
708   };
709 
710   /**
711    * Returns the root node of the call-graph. The root node corresponds to an
712    * empty JS call-stack. The lifetime of the returned Node* is scoped to the
713    * containing AllocationProfile.
714    */
715   virtual Node* GetRootNode() = 0;
716   virtual const std::vector<Sample>& GetSamples() = 0;
717 
718   virtual ~AllocationProfile() = default;
719 
720   static const int kNoLineNumberInfo = Message::kNoLineNumberInfo;
721   static const int kNoColumnNumberInfo = Message::kNoColumnInfo;
722 };
723 
724 /**
725  * An object graph consisting of embedder objects and V8 objects.
726  * Edges of the graph are strong references between the objects.
727  * The embedder can build this graph during heap snapshot generation
728  * to include the embedder objects in the heap snapshot.
729  * Usage:
730  * 1) Define derived class of EmbedderGraph::Node for embedder objects.
731  * 2) Set the build embedder graph callback on the heap profiler using
732  *    HeapProfiler::AddBuildEmbedderGraphCallback.
733  * 3) In the callback use graph->AddEdge(node1, node2) to add an edge from
734  *    node1 to node2.
735  * 4) To represent references from/to V8 object, construct V8 nodes using
736  *    graph->V8Node(value).
737  */
738 class V8_EXPORT EmbedderGraph {
739  public:
740   class Node {
741    public:
742     /**
743      * Detachedness specifies whether an object is attached or detached from the
744      * main application state. While unkown in general, there may be objects
745      * that specifically know their state. V8 passes this information along in
746      * the snapshot. Users of the snapshot may use it to annotate the object
747      * graph.
748      */
749     enum class Detachedness : uint8_t {
750       kUnknown = 0,
751       kAttached = 1,
752       kDetached = 2,
753     };
754 
755     Node() = default;
756     virtual ~Node() = default;
757     virtual const char* Name() = 0;
758     virtual size_t SizeInBytes() = 0;
759     /**
760      * The corresponding V8 wrapper node if not null.
761      * During heap snapshot generation the embedder node and the V8 wrapper
762      * node will be merged into one node to simplify retaining paths.
763      */
WrapperNode()764     virtual Node* WrapperNode() { return nullptr; }
IsRootNode()765     virtual bool IsRootNode() { return false; }
766     /** Must return true for non-V8 nodes. */
IsEmbedderNode()767     virtual bool IsEmbedderNode() { return true; }
768     /**
769      * Optional name prefix. It is used in Chrome for tagging detached nodes.
770      */
NamePrefix()771     virtual const char* NamePrefix() { return nullptr; }
772 
773     /**
774      * Returns the NativeObject that can be used for querying the
775      * |HeapSnapshot|.
776      */
GetNativeObject()777     virtual NativeObject GetNativeObject() { return nullptr; }
778 
779     /**
780      * Detachedness state of a given object. While unkown in general, there may
781      * be objects that specifically know their state. V8 passes this information
782      * along in the snapshot. Users of the snapshot may use it to annotate the
783      * object graph.
784      */
GetDetachedness()785     virtual Detachedness GetDetachedness() { return Detachedness::kUnknown; }
786 
787     Node(const Node&) = delete;
788     Node& operator=(const Node&) = delete;
789   };
790 
791   /**
792    * Returns a node corresponding to the given V8 value. Ownership is not
793    * transferred. The result pointer is valid while the graph is alive.
794    */
795   virtual Node* V8Node(const v8::Local<v8::Value>& value) = 0;
796 
797   /**
798    * Adds the given node to the graph and takes ownership of the node.
799    * Returns a raw pointer to the node that is valid while the graph is alive.
800    */
801   virtual Node* AddNode(std::unique_ptr<Node> node) = 0;
802 
803   /**
804    * Adds an edge that represents a strong reference from the given
805    * node |from| to the given node |to|. The nodes must be added to the graph
806    * before calling this function.
807    *
808    * If name is nullptr, the edge will have auto-increment indexes, otherwise
809    * it will be named accordingly.
810    */
811   virtual void AddEdge(Node* from, Node* to, const char* name = nullptr) = 0;
812 
813   virtual ~EmbedderGraph() = default;
814 };
815 
816 /**
817  * Interface for controlling heap profiling. Instance of the
818  * profiler can be retrieved using v8::Isolate::GetHeapProfiler.
819  */
820 class V8_EXPORT HeapProfiler {
821  public:
822   enum SamplingFlags {
823     kSamplingNoFlags = 0,
824     kSamplingForceGC = 1 << 0,
825   };
826 
827   /**
828    * Callback function invoked during heap snapshot generation to retrieve
829    * the embedder object graph. The callback should use graph->AddEdge(..) to
830    * add references between the objects.
831    * The callback must not trigger garbage collection in V8.
832    */
833   typedef void (*BuildEmbedderGraphCallback)(v8::Isolate* isolate,
834                                              v8::EmbedderGraph* graph,
835                                              void* data);
836 
837   /**
838    * Callback function invoked during heap snapshot generation to retrieve
839    * the detachedness state of an object referenced by a TracedReference.
840    *
841    * The callback takes Local<Value> as parameter to allow the embedder to
842    * unpack the TracedReference into a Local and reuse that Local for different
843    * purposes.
844    */
845   using GetDetachednessCallback = EmbedderGraph::Node::Detachedness (*)(
846       v8::Isolate* isolate, const v8::Local<v8::Value>& v8_value,
847       uint16_t class_id, void* data);
848 
849   /** Returns the number of snapshots taken. */
850   int GetSnapshotCount();
851 
852   /** Returns a snapshot by index. */
853   const HeapSnapshot* GetHeapSnapshot(int index);
854 
855   /**
856    * Returns SnapshotObjectId for a heap object referenced by |value| if
857    * it has been seen by the heap profiler, kUnknownObjectId otherwise.
858    */
859   SnapshotObjectId GetObjectId(Local<Value> value);
860 
861   /**
862    * Returns SnapshotObjectId for a native object referenced by |value| if it
863    * has been seen by the heap profiler, kUnknownObjectId otherwise.
864    */
865   SnapshotObjectId GetObjectId(NativeObject value);
866 
867   /**
868    * Returns heap object with given SnapshotObjectId if the object is alive,
869    * otherwise empty handle is returned.
870    */
871   Local<Value> FindObjectById(SnapshotObjectId id);
872 
873   /**
874    * Clears internal map from SnapshotObjectId to heap object. The new objects
875    * will not be added into it unless a heap snapshot is taken or heap object
876    * tracking is kicked off.
877    */
878   void ClearObjectIds();
879 
880   /**
881    * A constant for invalid SnapshotObjectId. GetSnapshotObjectId will return
882    * it in case heap profiler cannot find id  for the object passed as
883    * parameter. HeapSnapshot::GetNodeById will always return NULL for such id.
884    */
885   static const SnapshotObjectId kUnknownObjectId = 0;
886 
887   /**
888    * Callback interface for retrieving user friendly names of global objects.
889    */
890   class ObjectNameResolver {
891    public:
892     /**
893      * Returns name to be used in the heap snapshot for given node. Returned
894      * string must stay alive until snapshot collection is completed.
895      */
896     virtual const char* GetName(Local<Object> object) = 0;
897 
898    protected:
899     virtual ~ObjectNameResolver() = default;
900   };
901 
902   /**
903    * Takes a heap snapshot and returns it.
904    */
905   const HeapSnapshot* TakeHeapSnapshot(
906       ActivityControl* control = nullptr,
907       ObjectNameResolver* global_object_name_resolver = nullptr,
908       bool treat_global_objects_as_roots = true,
909       bool capture_numeric_value = false);
910 
911   /**
912    * Starts tracking of heap objects population statistics. After calling
913    * this method, all heap objects relocations done by the garbage collector
914    * are being registered.
915    *
916    * |track_allocations| parameter controls whether stack trace of each
917    * allocation in the heap will be recorded and reported as part of
918    * HeapSnapshot.
919    */
920   void StartTrackingHeapObjects(bool track_allocations = false);
921 
922   /**
923    * Adds a new time interval entry to the aggregated statistics array. The
924    * time interval entry contains information on the current heap objects
925    * population size. The method also updates aggregated statistics and
926    * reports updates for all previous time intervals via the OutputStream
927    * object. Updates on each time interval are provided as a stream of the
928    * HeapStatsUpdate structure instances.
929    * If |timestamp_us| is supplied, timestamp of the new entry will be written
930    * into it. The return value of the function is the last seen heap object Id.
931    *
932    * StartTrackingHeapObjects must be called before the first call to this
933    * method.
934    */
935   SnapshotObjectId GetHeapStats(OutputStream* stream,
936                                 int64_t* timestamp_us = nullptr);
937 
938   /**
939    * Stops tracking of heap objects population statistics, cleans up all
940    * collected data. StartHeapObjectsTracking must be called again prior to
941    * calling GetHeapStats next time.
942    */
943   void StopTrackingHeapObjects();
944 
945   /**
946    * Starts gathering a sampling heap profile. A sampling heap profile is
947    * similar to tcmalloc's heap profiler and Go's mprof. It samples object
948    * allocations and builds an online 'sampling' heap profile. At any point in
949    * time, this profile is expected to be a representative sample of objects
950    * currently live in the system. Each sampled allocation includes the stack
951    * trace at the time of allocation, which makes this really useful for memory
952    * leak detection.
953    *
954    * This mechanism is intended to be cheap enough that it can be used in
955    * production with minimal performance overhead.
956    *
957    * Allocations are sampled using a randomized Poisson process. On average, one
958    * allocation will be sampled every |sample_interval| bytes allocated. The
959    * |stack_depth| parameter controls the maximum number of stack frames to be
960    * captured on each allocation.
961    *
962    * NOTE: This is a proof-of-concept at this point. Right now we only sample
963    * newspace allocations. Support for paged space allocation (e.g. pre-tenured
964    * objects, large objects, code objects, etc.) and native allocations
965    * doesn't exist yet, but is anticipated in the future.
966    *
967    * Objects allocated before the sampling is started will not be included in
968    * the profile.
969    *
970    * Returns false if a sampling heap profiler is already running.
971    */
972   bool StartSamplingHeapProfiler(uint64_t sample_interval = 512 * 1024,
973                                  int stack_depth = 16,
974                                  SamplingFlags flags = kSamplingNoFlags);
975 
976   /**
977    * Stops the sampling heap profile and discards the current profile.
978    */
979   void StopSamplingHeapProfiler();
980 
981   /**
982    * Returns the sampled profile of allocations allocated (and still live) since
983    * StartSamplingHeapProfiler was called. The ownership of the pointer is
984    * transferred to the caller. Returns nullptr if sampling heap profiler is not
985    * active.
986    */
987   AllocationProfile* GetAllocationProfile();
988 
989   /**
990    * Deletes all snapshots taken. All previously returned pointers to
991    * snapshots and their contents become invalid after this call.
992    */
993   void DeleteAllHeapSnapshots();
994 
995   void AddBuildEmbedderGraphCallback(BuildEmbedderGraphCallback callback,
996                                      void* data);
997   void RemoveBuildEmbedderGraphCallback(BuildEmbedderGraphCallback callback,
998                                         void* data);
999 
1000   void SetGetDetachednessCallback(GetDetachednessCallback callback, void* data);
1001 
1002   /**
1003    * Default value of persistent handle class ID. Must not be used to
1004    * define a class. Can be used to reset a class of a persistent
1005    * handle.
1006    */
1007   static const uint16_t kPersistentHandleNoClassId = 0;
1008 
1009  private:
1010   HeapProfiler();
1011   ~HeapProfiler();
1012   HeapProfiler(const HeapProfiler&);
1013   HeapProfiler& operator=(const HeapProfiler&);
1014 };
1015 
1016 /**
1017  * A struct for exporting HeapStats data from V8, using "push" model.
1018  * See HeapProfiler::GetHeapStats.
1019  */
1020 struct HeapStatsUpdate {
HeapStatsUpdateHeapStatsUpdate1021   HeapStatsUpdate(uint32_t index, uint32_t count, uint32_t size)
1022     : index(index), count(count), size(size) { }
1023   uint32_t index;  // Index of the time interval that was changed.
1024   uint32_t count;  // New value of count field for the interval with this index.
1025   uint32_t size;  // New value of size field for the interval with this index.
1026 };
1027 
1028 #define CODE_EVENTS_LIST(V) \
1029   V(Builtin)                \
1030   V(Callback)               \
1031   V(Eval)                   \
1032   V(Function)               \
1033   V(InterpretedFunction)    \
1034   V(Handler)                \
1035   V(BytecodeHandler)        \
1036   V(LazyCompile)            \
1037   V(RegExp)                 \
1038   V(Script)                 \
1039   V(Stub)                   \
1040   V(Relocation)
1041 
1042 /**
1043  * Note that this enum may be extended in the future. Please include a default
1044  * case if this enum is used in a switch statement.
1045  */
1046 enum CodeEventType {
1047   kUnknownType = 0
1048 #define V(Name) , k##Name##Type
1049   CODE_EVENTS_LIST(V)
1050 #undef V
1051 };
1052 
1053 /**
1054  * Representation of a code creation event
1055  */
1056 class V8_EXPORT CodeEvent {
1057  public:
1058   uintptr_t GetCodeStartAddress();
1059   size_t GetCodeSize();
1060   Local<String> GetFunctionName();
1061   Local<String> GetScriptName();
1062   int GetScriptLine();
1063   int GetScriptColumn();
1064   /**
1065    * NOTE (mmarchini): We can't allocate objects in the heap when we collect
1066    * existing code, and both the code type and the comment are not stored in the
1067    * heap, so we return those as const char*.
1068    */
1069   CodeEventType GetCodeType();
1070   const char* GetComment();
1071 
1072   static const char* GetCodeEventTypeName(CodeEventType code_event_type);
1073 
1074   uintptr_t GetPreviousCodeStartAddress();
1075 };
1076 
1077 /**
1078  * Interface to listen to code creation and code relocation events.
1079  */
1080 class V8_EXPORT CodeEventHandler {
1081  public:
1082   /**
1083    * Creates a new listener for the |isolate|. The isolate must be initialized.
1084    * The listener object must be disposed after use by calling |Dispose| method.
1085    * Multiple listeners can be created for the same isolate.
1086    */
1087   explicit CodeEventHandler(Isolate* isolate);
1088   virtual ~CodeEventHandler();
1089 
1090   /**
1091    * Handle is called every time a code object is created or moved. Information
1092    * about each code event will be available through the `code_event`
1093    * parameter.
1094    *
1095    * When the CodeEventType is kRelocationType, the code for this CodeEvent has
1096    * moved from `GetPreviousCodeStartAddress()` to `GetCodeStartAddress()`.
1097    */
1098   virtual void Handle(CodeEvent* code_event) = 0;
1099 
1100   /**
1101    * Call `Enable()` to starts listening to code creation and code relocation
1102    * events. These events will be handled by `Handle()`.
1103    */
1104   void Enable();
1105 
1106   /**
1107    * Call `Disable()` to stop listening to code creation and code relocation
1108    * events.
1109    */
1110   void Disable();
1111 
1112  private:
1113   CodeEventHandler();
1114   CodeEventHandler(const CodeEventHandler&);
1115   CodeEventHandler& operator=(const CodeEventHandler&);
1116   void* internal_listener_;
1117 };
1118 
1119 }  // namespace v8
1120 
1121 
1122 #endif  // V8_V8_PROFILER_H_
1123