1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
6
7 #include <algorithm>
8 #include <iterator>
9
10 #include "base/atomicops.h"
11 #include "base/debug/debugging_buildflags.h"
12 #include "base/debug/leak_annotations.h"
13 #include "base/debug/stack_trace.h"
14 #include "base/no_destructor.h"
15 #include "base/stl_util.h"
16 #include "base/threading/platform_thread.h"
17 #include "base/threading/thread_local_storage.h"
18 #include "base/trace_event/heap_profiler_allocation_context.h"
19 #include "build/build_config.h"
20
21 #if defined(OS_ANDROID) && BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
22 #include "base/trace_event/cfi_backtrace_android.h"
23 #endif
24
25 #if defined(OS_LINUX) || defined(OS_ANDROID)
26 #include <sys/prctl.h>
27 #endif
28
29 namespace base {
30 namespace trace_event {
31
32 subtle::Atomic32 AllocationContextTracker::capture_mode_ =
33 static_cast<int32_t>(AllocationContextTracker::CaptureMode::DISABLED);
34
35 namespace {
36
37 const size_t kMaxStackDepth = 128u;
38 const size_t kMaxTaskDepth = 16u;
39 AllocationContextTracker* const kInitializingSentinel =
40 reinterpret_cast<AllocationContextTracker*>(-1);
41
42 // This function is added to the TLS slot to clean up the instance when the
43 // thread exits.
DestructAllocationContextTracker(void * alloc_ctx_tracker)44 void DestructAllocationContextTracker(void* alloc_ctx_tracker) {
45 delete static_cast<AllocationContextTracker*>(alloc_ctx_tracker);
46 }
47
AllocationContextTrackerTLS()48 ThreadLocalStorage::Slot& AllocationContextTrackerTLS() {
49 static NoDestructor<ThreadLocalStorage::Slot> tls_alloc_ctx_tracker(
50 &DestructAllocationContextTracker);
51 return *tls_alloc_ctx_tracker;
52 }
53
54 // Cannot call ThreadIdNameManager::GetName because it holds a lock and causes
55 // deadlock when lock is already held by ThreadIdNameManager before the current
56 // allocation. Gets the thread name from kernel if available or returns a string
57 // with id. This function intentionally leaks the allocated strings since they
58 // are used to tag allocations even after the thread dies.
GetAndLeakThreadName()59 const char* GetAndLeakThreadName() {
60 char name[16];
61 #if defined(OS_LINUX) || defined(OS_ANDROID)
62 // If the thread name is not set, try to get it from prctl. Thread name might
63 // not be set in cases where the thread started before heap profiling was
64 // enabled.
65 int err = prctl(PR_GET_NAME, name);
66 if (!err) {
67 return strdup(name);
68 }
69 #endif // defined(OS_LINUX) || defined(OS_ANDROID)
70
71 // Use tid if we don't have a thread name.
72 snprintf(name, sizeof(name), "%lu",
73 static_cast<unsigned long>(PlatformThread::CurrentId()));
74 return strdup(name);
75 }
76
77 } // namespace
78
79 // static
80 AllocationContextTracker*
GetInstanceForCurrentThread()81 AllocationContextTracker::GetInstanceForCurrentThread() {
82 AllocationContextTracker* tracker = static_cast<AllocationContextTracker*>(
83 AllocationContextTrackerTLS().Get());
84 if (tracker == kInitializingSentinel)
85 return nullptr; // Re-entrancy case.
86
87 if (!tracker) {
88 AllocationContextTrackerTLS().Set(kInitializingSentinel);
89 tracker = new AllocationContextTracker();
90 AllocationContextTrackerTLS().Set(tracker);
91 }
92
93 return tracker;
94 }
95
AllocationContextTracker()96 AllocationContextTracker::AllocationContextTracker()
97 : thread_name_(nullptr), ignore_scope_depth_(0) {
98 tracked_stack_.reserve(kMaxStackDepth);
99 task_contexts_.reserve(kMaxTaskDepth);
100 task_contexts_.push_back("UntrackedTask");
101 }
102 AllocationContextTracker::~AllocationContextTracker() = default;
103
104 // static
SetCurrentThreadName(const char * name)105 void AllocationContextTracker::SetCurrentThreadName(const char* name) {
106 if (name && capture_mode() != CaptureMode::DISABLED) {
107 GetInstanceForCurrentThread()->thread_name_ = name;
108 }
109 }
110
111 // static
SetCaptureMode(CaptureMode mode)112 void AllocationContextTracker::SetCaptureMode(CaptureMode mode) {
113 // Release ordering ensures that when a thread observes |capture_mode_| to
114 // be true through an acquire load, the TLS slot has been initialized.
115 subtle::Release_Store(&capture_mode_, static_cast<int32_t>(mode));
116 }
117
PushPseudoStackFrame(AllocationContextTracker::PseudoStackFrame stack_frame)118 void AllocationContextTracker::PushPseudoStackFrame(
119 AllocationContextTracker::PseudoStackFrame stack_frame) {
120 // Impose a limit on the height to verify that every push is popped, because
121 // in practice the pseudo stack never grows higher than ~20 frames.
122 if (tracked_stack_.size() < kMaxStackDepth) {
123 tracked_stack_.push_back(
124 StackFrame::FromTraceEventName(stack_frame.trace_event_name));
125 } else {
126 NOTREACHED();
127 }
128 }
129
PopPseudoStackFrame(AllocationContextTracker::PseudoStackFrame stack_frame)130 void AllocationContextTracker::PopPseudoStackFrame(
131 AllocationContextTracker::PseudoStackFrame stack_frame) {
132 // Guard for stack underflow. If tracing was started with a TRACE_EVENT in
133 // scope, the frame was never pushed, so it is possible that pop is called
134 // on an empty stack.
135 if (tracked_stack_.empty())
136 return;
137
138 tracked_stack_.pop_back();
139 }
140
PushNativeStackFrame(const void * pc)141 void AllocationContextTracker::PushNativeStackFrame(const void* pc) {
142 if (tracked_stack_.size() < kMaxStackDepth)
143 tracked_stack_.push_back(StackFrame::FromProgramCounter(pc));
144 else
145 NOTREACHED();
146 }
147
PopNativeStackFrame(const void * pc)148 void AllocationContextTracker::PopNativeStackFrame(const void* pc) {
149 if (tracked_stack_.empty())
150 return;
151
152 DCHECK_EQ(pc, tracked_stack_.back().value);
153 tracked_stack_.pop_back();
154 }
155
PushCurrentTaskContext(const char * context)156 void AllocationContextTracker::PushCurrentTaskContext(const char* context) {
157 DCHECK(context);
158 if (task_contexts_.size() < kMaxTaskDepth)
159 task_contexts_.push_back(context);
160 else
161 NOTREACHED();
162 }
163
PopCurrentTaskContext(const char * context)164 void AllocationContextTracker::PopCurrentTaskContext(const char* context) {
165 // Guard for stack underflow. If tracing was started with a TRACE_EVENT in
166 // scope, the context was never pushed, so it is possible that pop is called
167 // on an empty stack. Note that the context always contains "UntrackedTask".
168 if (task_contexts_.size() == 1)
169 return;
170
171 DCHECK_EQ(context, task_contexts_.back())
172 << "Encountered an unmatched context end";
173 task_contexts_.pop_back();
174 }
175
GetContextSnapshot(AllocationContext * ctx)176 bool AllocationContextTracker::GetContextSnapshot(AllocationContext* ctx) {
177 if (ignore_scope_depth_)
178 return false;
179
180 CaptureMode mode = static_cast<CaptureMode>(
181 subtle::NoBarrier_Load(&capture_mode_));
182
183 auto* backtrace = std::begin(ctx->backtrace.frames);
184 auto* backtrace_end = std::end(ctx->backtrace.frames);
185
186 if (!thread_name_) {
187 // Ignore the string allocation made by GetAndLeakThreadName to avoid
188 // reentrancy.
189 ignore_scope_depth_++;
190 thread_name_ = GetAndLeakThreadName();
191 ANNOTATE_LEAKING_OBJECT_PTR(thread_name_);
192 DCHECK(thread_name_);
193 ignore_scope_depth_--;
194 }
195
196 // Add the thread name as the first entry in pseudo stack.
197 if (thread_name_) {
198 *backtrace++ = StackFrame::FromThreadName(thread_name_);
199 }
200
201 switch (mode) {
202 case CaptureMode::DISABLED:
203 {
204 break;
205 }
206 case CaptureMode::PSEUDO_STACK:
207 case CaptureMode::MIXED_STACK:
208 {
209 for (const StackFrame& stack_frame : tracked_stack_) {
210 if (backtrace == backtrace_end)
211 break;
212 *backtrace++ = stack_frame;
213 }
214 break;
215 }
216 case CaptureMode::NATIVE_STACK:
217 {
218 // Backtrace contract requires us to return bottom frames, i.e.
219 // from main() and up. Stack unwinding produces top frames, i.e.
220 // from this point and up until main(). We intentionally request
221 // kMaxFrameCount + 1 frames, so that we know if there are more frames
222 // than our backtrace capacity.
223 #if !defined(OS_NACL) // We don't build base/debug/stack_trace.cc for NaCl.
224 #if defined(OS_ANDROID) && BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
225 const void* frames[Backtrace::kMaxFrameCount + 1];
226 static_assert(base::size(frames) >= Backtrace::kMaxFrameCount,
227 "not requesting enough frames to fill Backtrace");
228 size_t frame_count =
229 CFIBacktraceAndroid::GetInitializedInstance()->Unwind(
230 frames, base::size(frames));
231 #elif BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
232 const void* frames[Backtrace::kMaxFrameCount + 1];
233 static_assert(base::size(frames) >= Backtrace::kMaxFrameCount,
234 "not requesting enough frames to fill Backtrace");
235 size_t frame_count = debug::TraceStackFramePointers(
236 frames, base::size(frames),
237 1 /* exclude this function from the trace */);
238 #else
239 // Fall-back to capturing the stack with base::debug::StackTrace,
240 // which is likely slower, but more reliable.
241 base::debug::StackTrace stack_trace(Backtrace::kMaxFrameCount + 1);
242 size_t frame_count = 0u;
243 const void* const* frames = stack_trace.Addresses(&frame_count);
244 #endif
245
246 // If there are too many frames, keep the ones furthest from main().
247 size_t backtrace_capacity = backtrace_end - backtrace;
248 int32_t starting_frame_index = frame_count;
249 if (frame_count > backtrace_capacity) {
250 starting_frame_index = backtrace_capacity - 1;
251 *backtrace++ = StackFrame::FromTraceEventName("<truncated>");
252 }
253 for (int32_t i = starting_frame_index - 1; i >= 0; --i) {
254 const void* frame = frames[i];
255 *backtrace++ = StackFrame::FromProgramCounter(frame);
256 }
257 #endif // !defined(OS_NACL)
258 break;
259 }
260 }
261
262 ctx->backtrace.frame_count = backtrace - std::begin(ctx->backtrace.frames);
263
264 ctx->type_name = TaskContext();
265
266 return true;
267 }
268
269 } // namespace trace_event
270 } // namespace base
271