1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_
6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_
7 
8 #include <vector>
9 
10 #include "base/atomicops.h"
11 #include "base/base_export.h"
12 #include "base/macros.h"
13 #include "base/trace_event/heap_profiler_allocation_context.h"
14 
15 namespace base {
16 namespace trace_event {
17 
18 // AllocationContextTracker is a thread-local object. Its main purpose is to
19 // keep track of a pseudo stack of trace events. Chrome has been instrumented
20 // with lots of `TRACE_EVENT` macros. These trace events push their name to a
21 // thread-local stack when they go into scope, and pop when they go out of
22 // scope, if all of the following conditions have been met:
23 //
24 //  * A trace is being recorded.
25 //  * The category of the event is enabled in the trace config.
26 //  * Heap profiling is enabled (with the `--enable-heap-profiling` flag).
27 //
28 // This means that allocations that occur before tracing is started will not
29 // have backtrace information in their context.
30 //
31 // AllocationContextTracker also keeps track of some thread state not related to
32 // trace events. See |AllocationContext|.
33 //
34 // A thread-local instance of the context tracker is initialized lazily when it
35 // is first accessed. This might be because a trace event pushed or popped, or
36 // because `GetContextSnapshot()` was called when an allocation occurred
37 class BASE_EXPORT AllocationContextTracker {
38  public:
39   enum class CaptureMode : int32_t {
40     DISABLED,      // Don't capture anything
41     PSEUDO_STACK,  // Backtrace has trace events
42     MIXED_STACK,   // Backtrace has trace events + from
43                    // HeapProfilerScopedStackFrame
44     NATIVE_STACK,  // Backtrace has full native backtraces from stack unwinding
45   };
46 
47   // Stack frame constructed from trace events in codebase.
48   struct BASE_EXPORT PseudoStackFrame {
49     const char* trace_event_category;
50     const char* trace_event_name;
51 
52     bool operator==(const PseudoStackFrame& other) const {
53       return trace_event_category == other.trace_event_category &&
54              trace_event_name == other.trace_event_name;
55     }
56   };
57 
58   // Globally sets capturing mode.
59   // TODO(primiano): How to guard against *_STACK -> DISABLED -> *_STACK?
60   static void SetCaptureMode(CaptureMode mode);
61 
62   // Returns global capturing mode.
capture_mode()63   inline static CaptureMode capture_mode() {
64     // A little lag after heap profiling is enabled or disabled is fine, it is
65     // more important that the check is as cheap as possible when capturing is
66     // not enabled, so do not issue a memory barrier in the fast path.
67     if (subtle::NoBarrier_Load(&capture_mode_) ==
68             static_cast<int32_t>(CaptureMode::DISABLED))
69       return CaptureMode::DISABLED;
70 
71     // In the slow path, an acquire load is required to pair with the release
72     // store in |SetCaptureMode|. This is to ensure that the TLS slot for
73     // the thread-local allocation context tracker has been initialized if
74     // |capture_mode| returns something other than DISABLED.
75     return static_cast<CaptureMode>(subtle::Acquire_Load(&capture_mode_));
76   }
77 
78   // Returns the thread-local instance, creating one if necessary. Returns
79   // always a valid instance, unless it is called re-entrantly, in which case
80   // returns nullptr in the nested calls.
81   static AllocationContextTracker* GetInstanceForCurrentThread();
82 
83   // Set the thread name in the AllocationContextTracker of the current thread
84   // if capture is enabled.
85   static void SetCurrentThreadName(const char* name);
86 
87   // Starts and ends a new ignore scope between which the allocations are
88   // ignored by the heap profiler. GetContextSnapshot() returns false when
89   // allocations are ignored.
begin_ignore_scope()90   void begin_ignore_scope() { ignore_scope_depth_++; }
end_ignore_scope()91   void end_ignore_scope() {
92     if (ignore_scope_depth_)
93       ignore_scope_depth_--;
94   }
95 
96   // Pushes and pops a frame onto the thread-local pseudo stack.
97   // TODO(ssid): Change PseudoStackFrame to const char*. Only event name is
98   // used.
99   void PushPseudoStackFrame(PseudoStackFrame stack_frame);
100   void PopPseudoStackFrame(PseudoStackFrame stack_frame);
101 
102   // Pushes and pops a native stack frame onto thread local tracked stack.
103   void PushNativeStackFrame(const void* pc);
104   void PopNativeStackFrame(const void* pc);
105 
106   // Push and pop current task's context. A stack is used to support nested
107   // tasks and the top of the stack will be used in allocation context.
108   void PushCurrentTaskContext(const char* context);
109   void PopCurrentTaskContext(const char* context);
110 
111   // Returns most recent task context added by ScopedTaskExecutionTracker.
TaskContext()112   const char* TaskContext() const {
113     return task_contexts_.empty() ? nullptr : task_contexts_.back();
114   }
115 
116   // Fills a snapshot of the current thread-local context. Doesn't fill and
117   // returns false if allocations are being ignored.
118   bool GetContextSnapshot(AllocationContext* snapshot);
119 
120   ~AllocationContextTracker();
121 
122  private:
123   AllocationContextTracker();
124 
125   static subtle::Atomic32 capture_mode_;
126 
127   // The pseudo stack where frames are |TRACE_EVENT| names or inserted PCs.
128   std::vector<StackFrame> tracked_stack_;
129 
130   // The thread name is used as the first entry in the pseudo stack.
131   const char* thread_name_;
132 
133   // Stack of tasks' contexts. Context serves as a different dimension than
134   // pseudo stack to cluster allocations.
135   std::vector<const char*> task_contexts_;
136 
137   uint32_t ignore_scope_depth_;
138 
139   DISALLOW_COPY_AND_ASSIGN(AllocationContextTracker);
140 };
141 
142 }  // namespace trace_event
143 }  // namespace base
144 
145 #endif  // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_
146