1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/trace_event/trace_log.h"
6
7 #include <algorithm>
8 #include <cmath>
9 #include <limits>
10 #include <memory>
11 #include <unordered_set>
12 #include <utility>
13
14 #include "base/base_switches.h"
15 #include "base/bind.h"
16 #include "base/command_line.h"
17 #include "base/debug/leak_annotations.h"
18 #include "base/location.h"
19 #include "base/macros.h"
20 #include "base/memory/ptr_util.h"
21 #include "base/memory/ref_counted_memory.h"
22 #include "base/message_loop/message_loop_current.h"
23 #include "base/no_destructor.h"
24 #include "base/process/process.h"
25 #include "base/process/process_metrics.h"
26 #include "base/stl_util.h"
27 #include "base/strings/string_piece.h"
28 #include "base/strings/string_split.h"
29 #include "base/strings/string_tokenizer.h"
30 #include "base/strings/stringprintf.h"
31 #include "base/system/sys_info.h"
32 #include "base/task/post_task.h"
33 #include "base/task/thread_pool.h"
34 #include "base/threading/platform_thread.h"
35 #include "base/threading/sequenced_task_runner_handle.h"
36 #include "base/threading/thread_id_name_manager.h"
37 #include "base/threading/thread_task_runner_handle.h"
38 #include "base/time/time.h"
39 #include "base/trace_event/event_name_filter.h"
40 #include "base/trace_event/heap_profiler.h"
41 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
42 #include "base/trace_event/heap_profiler_event_filter.h"
43 #include "base/trace_event/memory_dump_manager.h"
44 #include "base/trace_event/memory_dump_provider.h"
45 #include "base/trace_event/process_memory_dump.h"
46 #include "base/trace_event/thread_instruction_count.h"
47 #include "base/trace_event/trace_buffer.h"
48 #include "base/trace_event/trace_event.h"
49 #include "build/build_config.h"
50
51 #if defined(OS_WIN)
52 #include "base/trace_event/trace_event_etw_export_win.h"
53 #endif
54
55 #if defined(OS_ANDROID)
56 #include "base/debug/elf_reader.h"
57
58 // The linker assigns the virtual address of the start of current library to
59 // this symbol.
60 extern char __executable_start;
61 #endif
62
63 namespace base {
64 namespace trace_event {
65
66 namespace {
67
68 // Controls the number of trace events we will buffer in-memory
69 // before throwing them away.
70 const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize;
71
72 const size_t kTraceEventVectorBigBufferChunks =
73 512000000 / kTraceBufferChunkSize;
74 static_assert(
75 kTraceEventVectorBigBufferChunks <= TraceBufferChunk::kMaxChunkIndex,
76 "Too many big buffer chunks");
77 const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize;
78 static_assert(
79 kTraceEventVectorBufferChunks <= TraceBufferChunk::kMaxChunkIndex,
80 "Too many vector buffer chunks");
81 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
82
83 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
84 const size_t kEchoToConsoleTraceEventBufferChunks = 256;
85
86 const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
87 const int kThreadFlushTimeoutMs = 3000;
88
89 TraceLog* g_trace_log_for_testing = nullptr;
90
91 #define MAX_TRACE_EVENT_FILTERS 32
92
93 // List of TraceEventFilter objects from the most recent tracing session.
GetCategoryGroupFilters()94 std::vector<std::unique_ptr<TraceEventFilter>>& GetCategoryGroupFilters() {
95 static auto* filters = new std::vector<std::unique_ptr<TraceEventFilter>>();
96 return *filters;
97 }
98
ThreadNow()99 ThreadTicks ThreadNow() {
100 return ThreadTicks::IsSupported()
101 ? base::subtle::ThreadTicksNowIgnoringOverride()
102 : ThreadTicks();
103 }
104
ThreadInstructionNow()105 ThreadInstructionCount ThreadInstructionNow() {
106 return ThreadInstructionCount::IsSupported() ? ThreadInstructionCount::Now()
107 : ThreadInstructionCount();
108 }
109
110 template <typename T>
InitializeMetadataEvent(TraceEvent * trace_event,int thread_id,const char * metadata_name,const char * arg_name,const T & value)111 void InitializeMetadataEvent(TraceEvent* trace_event,
112 int thread_id,
113 const char* metadata_name,
114 const char* arg_name,
115 const T& value) {
116 if (!trace_event)
117 return;
118
119 TraceArguments args(arg_name, value);
120 base::TimeTicks now = TRACE_TIME_TICKS_NOW();
121 ThreadTicks thread_now = ThreadNow();
122 ThreadInstructionCount thread_instruction_count = ThreadInstructionNow();
123 trace_event->Reset(thread_id, now, thread_now, thread_instruction_count,
124 TRACE_EVENT_PHASE_METADATA,
125 CategoryRegistry::kCategoryMetadata->state_ptr(),
126 metadata_name,
127 trace_event_internal::kGlobalScope, // scope
128 trace_event_internal::kNoId, // id
129 trace_event_internal::kNoId, // bind_id
130 &args, TRACE_EVENT_FLAG_NONE);
131 }
132
133 class AutoThreadLocalBoolean {
134 public:
AutoThreadLocalBoolean(ThreadLocalBoolean * thread_local_boolean)135 explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean)
136 : thread_local_boolean_(thread_local_boolean) {
137 DCHECK(!thread_local_boolean_->Get());
138 thread_local_boolean_->Set(true);
139 }
~AutoThreadLocalBoolean()140 ~AutoThreadLocalBoolean() { thread_local_boolean_->Set(false); }
141
142 private:
143 ThreadLocalBoolean* thread_local_boolean_;
144 DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean);
145 };
146
147 // Use this function instead of TraceEventHandle constructor to keep the
148 // overhead of ScopedTracer (trace_event.h) constructor minimum.
MakeHandle(uint32_t chunk_seq,size_t chunk_index,size_t event_index,TraceEventHandle * handle)149 void MakeHandle(uint32_t chunk_seq,
150 size_t chunk_index,
151 size_t event_index,
152 TraceEventHandle* handle) {
153 DCHECK(chunk_seq);
154 DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex);
155 DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize);
156 DCHECK(chunk_index <= std::numeric_limits<uint16_t>::max());
157 handle->chunk_seq = chunk_seq;
158 handle->chunk_index = static_cast<uint16_t>(chunk_index);
159 handle->event_index = static_cast<uint16_t>(event_index);
160 }
161
162 template <typename Function>
ForEachCategoryFilter(const unsigned char * category_group_enabled,Function filter_fn)163 void ForEachCategoryFilter(const unsigned char* category_group_enabled,
164 Function filter_fn) {
165 const TraceCategory* category =
166 CategoryRegistry::GetCategoryByStatePtr(category_group_enabled);
167 uint32_t filter_bitmap = category->enabled_filters();
168 for (int index = 0; filter_bitmap != 0; filter_bitmap >>= 1, index++) {
169 if (filter_bitmap & 1 && GetCategoryGroupFilters()[index])
170 filter_fn(GetCategoryGroupFilters()[index].get());
171 }
172 }
173
174 // The fallback arguments filtering function will filter away every argument.
DefaultIsTraceEventArgsWhitelisted(const char * category_group_name,const char * event_name,base::trace_event::ArgumentNameFilterPredicate * arg_name_filter)175 bool DefaultIsTraceEventArgsWhitelisted(
176 const char* category_group_name,
177 const char* event_name,
178 base::trace_event::ArgumentNameFilterPredicate* arg_name_filter) {
179 return false;
180 }
181
182 } // namespace
183
184 // A helper class that allows the lock to be acquired in the middle of the scope
185 // and unlocks at the end of scope if locked.
186 class TraceLog::OptionalAutoLock {
187 public:
OptionalAutoLock(Lock * lock)188 explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {}
189
~OptionalAutoLock()190 ~OptionalAutoLock() {
191 if (locked_)
192 lock_->Release();
193 }
194
EnsureAcquired()195 void EnsureAcquired() EXCLUSIVE_LOCK_FUNCTION(lock_) {
196 if (!locked_) {
197 lock_->Acquire();
198 locked_ = true;
199 } else {
200 lock_->AssertAcquired();
201 }
202 }
203
204 private:
205 Lock* lock_;
206 bool locked_;
207 DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock);
208 };
209
210 class TraceLog::ThreadLocalEventBuffer
211 : public MessageLoopCurrent::DestructionObserver,
212 public MemoryDumpProvider {
213 public:
214 explicit ThreadLocalEventBuffer(TraceLog* trace_log);
215 ~ThreadLocalEventBuffer() override;
216
217 TraceEvent* AddTraceEvent(TraceEventHandle* handle);
218
GetEventByHandle(TraceEventHandle handle)219 TraceEvent* GetEventByHandle(TraceEventHandle handle) {
220 if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
221 handle.chunk_index != chunk_index_) {
222 return nullptr;
223 }
224
225 return chunk_->GetEventAt(handle.event_index);
226 }
227
generation() const228 int generation() const { return generation_; }
229
230 private:
231 // MessageLoopCurrent::DestructionObserver
232 void WillDestroyCurrentMessageLoop() override;
233
234 // MemoryDumpProvider implementation.
235 bool OnMemoryDump(const MemoryDumpArgs& args,
236 ProcessMemoryDump* pmd) override;
237
238 void FlushWhileLocked();
239
CheckThisIsCurrentBuffer() const240 void CheckThisIsCurrentBuffer() const {
241 DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
242 }
243
244 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
245 // as long as the thread exists.
246 TraceLog* trace_log_;
247 std::unique_ptr<TraceBufferChunk> chunk_;
248 size_t chunk_index_;
249 int generation_;
250
251 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
252 };
253
ThreadLocalEventBuffer(TraceLog * trace_log)254 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
255 : trace_log_(trace_log),
256 chunk_index_(0),
257 generation_(trace_log->generation()) {
258 // ThreadLocalEventBuffer is created only if the thread has a message loop, so
259 // the following message_loop won't be NULL.
260 MessageLoopCurrent::Get()->AddDestructionObserver(this);
261
262 // This is to report the local memory usage when memory-infra is enabled.
263 MemoryDumpManager::GetInstance()->RegisterDumpProvider(
264 this, "ThreadLocalEventBuffer", ThreadTaskRunnerHandle::Get());
265
266 int thread_id = static_cast<int>(PlatformThread::CurrentId());
267
268 AutoLock lock(trace_log->lock_);
269 trace_log->thread_task_runners_[thread_id] = ThreadTaskRunnerHandle::Get();
270 }
271
~ThreadLocalEventBuffer()272 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
273 CheckThisIsCurrentBuffer();
274 MessageLoopCurrent::Get()->RemoveDestructionObserver(this);
275 MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
276
277 {
278 AutoLock lock(trace_log_->lock_);
279 FlushWhileLocked();
280
281 int thread_id = static_cast<int>(PlatformThread::CurrentId());
282 trace_log_->thread_task_runners_.erase(thread_id);
283 }
284 trace_log_->thread_local_event_buffer_.Set(nullptr);
285 }
286
AddTraceEvent(TraceEventHandle * handle)287 TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
288 TraceEventHandle* handle) {
289 CheckThisIsCurrentBuffer();
290
291 if (chunk_ && chunk_->IsFull()) {
292 AutoLock lock(trace_log_->lock_);
293 FlushWhileLocked();
294 chunk_.reset();
295 }
296 if (!chunk_) {
297 AutoLock lock(trace_log_->lock_);
298 chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_);
299 trace_log_->CheckIfBufferIsFullWhileLocked();
300 }
301 if (!chunk_)
302 return nullptr;
303
304 size_t event_index;
305 TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
306 if (trace_event && handle)
307 MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
308
309 return trace_event;
310 }
311
WillDestroyCurrentMessageLoop()312 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
313 delete this;
314 }
315
OnMemoryDump(const MemoryDumpArgs & args,ProcessMemoryDump * pmd)316 bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(const MemoryDumpArgs& args,
317 ProcessMemoryDump* pmd) {
318 if (!chunk_)
319 return true;
320 std::string dump_base_name = StringPrintf(
321 "tracing/thread_%d", static_cast<int>(PlatformThread::CurrentId()));
322 TraceEventMemoryOverhead overhead;
323 chunk_->EstimateTraceMemoryOverhead(&overhead);
324 overhead.DumpInto(dump_base_name.c_str(), pmd);
325 return true;
326 }
327
FlushWhileLocked()328 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
329 if (!chunk_)
330 return;
331
332 trace_log_->lock_.AssertAcquired();
333 if (trace_log_->CheckGeneration(generation_)) {
334 // Return the chunk to the buffer only if the generation matches.
335 trace_log_->logged_events_->ReturnChunk(chunk_index_, std::move(chunk_));
336 }
337 // Otherwise this method may be called from the destructor, or TraceLog will
338 // find the generation mismatch and delete this buffer soon.
339 }
340
SetAddTraceEventOverrides(const AddTraceEventOverrideFunction & add_event_override,const OnFlushFunction & on_flush_override,const UpdateDurationFunction & update_duration_override)341 void TraceLog::SetAddTraceEventOverrides(
342 const AddTraceEventOverrideFunction& add_event_override,
343 const OnFlushFunction& on_flush_override,
344 const UpdateDurationFunction& update_duration_override) {
345 add_trace_event_override_.store(add_event_override);
346 on_flush_override_.store(on_flush_override);
347 update_duration_override_.store(update_duration_override);
348 }
349
350 struct TraceLog::RegisteredAsyncObserver {
RegisteredAsyncObserverbase::trace_event::TraceLog::RegisteredAsyncObserver351 explicit RegisteredAsyncObserver(WeakPtr<AsyncEnabledStateObserver> observer)
352 : observer(observer), task_runner(ThreadTaskRunnerHandle::Get()) {}
353 ~RegisteredAsyncObserver() = default;
354
355 WeakPtr<AsyncEnabledStateObserver> observer;
356 scoped_refptr<SequencedTaskRunner> task_runner;
357 };
358
TraceLogStatus()359 TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {}
360
361 TraceLogStatus::~TraceLogStatus() = default;
362
363 // static
GetInstance()364 TraceLog* TraceLog::GetInstance() {
365 static base::NoDestructor<TraceLog> instance;
366 return instance.get();
367 }
368
369 // static
ResetForTesting()370 void TraceLog::ResetForTesting() {
371 if (!g_trace_log_for_testing)
372 return;
373 {
374 AutoLock lock(g_trace_log_for_testing->lock_);
375 CategoryRegistry::ResetForTesting();
376 }
377 g_trace_log_for_testing->~TraceLog();
378 new (g_trace_log_for_testing) TraceLog;
379 }
380
TraceLog()381 TraceLog::TraceLog()
382 : enabled_modes_(0),
383 num_traces_recorded_(0),
384 process_sort_index_(0),
385 process_id_hash_(0),
386 process_id_(base::kNullProcessId),
387 trace_options_(kInternalRecordUntilFull),
388 trace_config_(TraceConfig()),
389 thread_shared_chunk_index_(0),
390 generation_(0),
391 use_worker_thread_(false),
392 filter_factory_for_testing_(nullptr) {
393 CategoryRegistry::Initialize();
394
395 #if defined(OS_NACL) // NaCl shouldn't expose the process id.
396 SetProcessID(0);
397 #else
398 SetProcessID(static_cast<int>(GetCurrentProcId()));
399 #endif
400
401 // Linux renderer processes and Android O processes are not allowed to read
402 // "proc/stat" file, crbug.com/788870.
403 #if defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS))
404 process_creation_time_ = Process::Current().CreationTime();
405 #else
406 // Use approximate time when creation time is not available.
407 process_creation_time_ = TRACE_TIME_NOW();
408 #endif
409
410 logged_events_.reset(CreateTraceBuffer());
411
412 MemoryDumpManager::GetInstance()->RegisterDumpProvider(this, "TraceLog",
413 nullptr);
414 g_trace_log_for_testing = this;
415 }
416
417 TraceLog::~TraceLog() = default;
418
InitializeThreadLocalEventBufferIfSupported()419 void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
420 // A ThreadLocalEventBuffer needs the message loop with a task runner
421 // - to know when the thread exits;
422 // - to handle the final flush.
423 // For a thread without a message loop or if the message loop may be blocked,
424 // the trace events will be added into the main buffer directly.
425 if (thread_blocks_message_loop_.Get() || !MessageLoopCurrent::IsSet() ||
426 !ThreadTaskRunnerHandle::IsSet()) {
427 return;
428 }
429 HEAP_PROFILER_SCOPED_IGNORE;
430 auto* thread_local_event_buffer = thread_local_event_buffer_.Get();
431 if (thread_local_event_buffer &&
432 !CheckGeneration(thread_local_event_buffer->generation())) {
433 delete thread_local_event_buffer;
434 thread_local_event_buffer = nullptr;
435 }
436 if (!thread_local_event_buffer) {
437 thread_local_event_buffer = new ThreadLocalEventBuffer(this);
438 thread_local_event_buffer_.Set(thread_local_event_buffer);
439 }
440 }
441
OnMemoryDump(const MemoryDumpArgs & args,ProcessMemoryDump * pmd)442 bool TraceLog::OnMemoryDump(const MemoryDumpArgs& args,
443 ProcessMemoryDump* pmd) {
444 // TODO(ssid): Use MemoryDumpArgs to create light dumps when requested
445 // (crbug.com/499731).
446 TraceEventMemoryOverhead overhead;
447 overhead.Add(TraceEventMemoryOverhead::kOther, sizeof(*this));
448 {
449 AutoLock lock(lock_);
450 if (logged_events_)
451 logged_events_->EstimateTraceMemoryOverhead(&overhead);
452
453 for (auto& metadata_event : metadata_events_)
454 metadata_event->EstimateTraceMemoryOverhead(&overhead);
455 }
456 overhead.AddSelf();
457 overhead.DumpInto("tracing/main_trace_log", pmd);
458 return true;
459 }
460
GetCategoryGroupEnabled(const char * category_group)461 const unsigned char* TraceLog::GetCategoryGroupEnabled(
462 const char* category_group) {
463 TraceLog* tracelog = GetInstance();
464 if (!tracelog) {
465 DCHECK(!CategoryRegistry::kCategoryAlreadyShutdown->is_enabled());
466 return CategoryRegistry::kCategoryAlreadyShutdown->state_ptr();
467 }
468 TraceCategory* category = CategoryRegistry::GetCategoryByName(category_group);
469 if (!category) {
470 // Slow path: in the case of a new category we have to repeat the check
471 // holding the lock, as multiple threads might have reached this point
472 // at the same time.
473 auto category_initializer = [](TraceCategory* category) {
474 TraceLog::GetInstance()->UpdateCategoryState(category);
475 };
476 AutoLock lock(tracelog->lock_);
477 CategoryRegistry::GetOrCreateCategoryLocked(
478 category_group, category_initializer, &category);
479 }
480 DCHECK(category->state_ptr());
481 return category->state_ptr();
482 }
483
GetCategoryGroupName(const unsigned char * category_group_enabled)484 const char* TraceLog::GetCategoryGroupName(
485 const unsigned char* category_group_enabled) {
486 return CategoryRegistry::GetCategoryByStatePtr(category_group_enabled)
487 ->name();
488 }
489
UpdateCategoryState(TraceCategory * category)490 void TraceLog::UpdateCategoryState(TraceCategory* category) {
491 lock_.AssertAcquired();
492 DCHECK(category->is_valid());
493 unsigned char state_flags = 0;
494 if (enabled_modes_ & RECORDING_MODE &&
495 trace_config_.IsCategoryGroupEnabled(category->name())) {
496 state_flags |= TraceCategory::ENABLED_FOR_RECORDING;
497 }
498
499 // TODO(primiano): this is a temporary workaround for catapult:#2341,
500 // to guarantee that metadata events are always added even if the category
501 // filter is "-*". See crbug.com/618054 for more details and long-term fix.
502 if (enabled_modes_ & RECORDING_MODE &&
503 category == CategoryRegistry::kCategoryMetadata) {
504 state_flags |= TraceCategory::ENABLED_FOR_RECORDING;
505 }
506
507 #if defined(OS_WIN)
508 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
509 category->name())) {
510 state_flags |= TraceCategory::ENABLED_FOR_ETW_EXPORT;
511 }
512 #endif
513
514 uint32_t enabled_filters_bitmap = 0;
515 int index = 0;
516 for (const auto& event_filter : enabled_event_filters_) {
517 if (event_filter.IsCategoryGroupEnabled(category->name())) {
518 state_flags |= TraceCategory::ENABLED_FOR_FILTERING;
519 DCHECK(GetCategoryGroupFilters()[index]);
520 enabled_filters_bitmap |= 1 << index;
521 }
522 if (index++ >= MAX_TRACE_EVENT_FILTERS) {
523 NOTREACHED();
524 break;
525 }
526 }
527 category->set_enabled_filters(enabled_filters_bitmap);
528 category->set_state(state_flags);
529 }
530
UpdateCategoryRegistry()531 void TraceLog::UpdateCategoryRegistry() {
532 lock_.AssertAcquired();
533 CreateFiltersForTraceConfig();
534 for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
535 UpdateCategoryState(&category);
536 }
537 }
538
CreateFiltersForTraceConfig()539 void TraceLog::CreateFiltersForTraceConfig() {
540 if (!(enabled_modes_ & FILTERING_MODE))
541 return;
542
543 // Filters were already added and tracing could be enabled. Filters list
544 // cannot be changed when trace events are using them.
545 if (GetCategoryGroupFilters().size())
546 return;
547
548 for (auto& filter_config : enabled_event_filters_) {
549 if (GetCategoryGroupFilters().size() >= MAX_TRACE_EVENT_FILTERS) {
550 NOTREACHED()
551 << "Too many trace event filters installed in the current session";
552 break;
553 }
554
555 std::unique_ptr<TraceEventFilter> new_filter;
556 const std::string& predicate_name = filter_config.predicate_name();
557 if (predicate_name == EventNameFilter::kName) {
558 auto whitelist = std::make_unique<std::unordered_set<std::string>>();
559 CHECK(filter_config.GetArgAsSet("event_name_whitelist", &*whitelist));
560 new_filter = std::make_unique<EventNameFilter>(std::move(whitelist));
561 } else if (predicate_name == HeapProfilerEventFilter::kName) {
562 new_filter = std::make_unique<HeapProfilerEventFilter>();
563 } else {
564 if (filter_factory_for_testing_)
565 new_filter = filter_factory_for_testing_(predicate_name);
566 CHECK(new_filter) << "Unknown trace filter " << predicate_name;
567 }
568 GetCategoryGroupFilters().push_back(std::move(new_filter));
569 }
570 }
571
SetEnabled(const TraceConfig & trace_config,uint8_t modes_to_enable)572 void TraceLog::SetEnabled(const TraceConfig& trace_config,
573 uint8_t modes_to_enable) {
574 DCHECK(trace_config.process_filter_config().IsEnabled(process_id_));
575
576 AutoLock lock(lock_);
577
578 // Can't enable tracing when Flush() is in progress.
579 DCHECK(!flush_task_runner_);
580
581 InternalTraceOptions new_options =
582 GetInternalOptionsFromTraceConfig(trace_config);
583
584 InternalTraceOptions old_options = trace_options();
585
586 if (dispatching_to_observers_) {
587 // TODO(ssid): Change to NOTREACHED after fixing crbug.com/625170.
588 DLOG(ERROR)
589 << "Cannot manipulate TraceLog::Enabled state from an observer.";
590 return;
591 }
592
593 // Clear all filters from previous tracing session. These filters are not
594 // cleared at the end of tracing because some threads which hit trace event
595 // when disabling, could try to use the filters.
596 if (!enabled_modes_)
597 GetCategoryGroupFilters().clear();
598
599 // Update trace config for recording.
600 const bool already_recording = enabled_modes_ & RECORDING_MODE;
601 if (modes_to_enable & RECORDING_MODE) {
602 if (already_recording) {
603 trace_config_.Merge(trace_config);
604 } else {
605 trace_config_ = trace_config;
606 }
607 }
608
609 // Update event filters only if filtering was not enabled.
610 if (modes_to_enable & FILTERING_MODE && enabled_event_filters_.empty()) {
611 DCHECK(!trace_config.event_filters().empty());
612 enabled_event_filters_ = trace_config.event_filters();
613 }
614 // Keep the |trace_config_| updated with only enabled filters in case anyone
615 // tries to read it using |GetCurrentTraceConfig| (even if filters are
616 // empty).
617 trace_config_.SetEventFilters(enabled_event_filters_);
618
619 enabled_modes_ |= modes_to_enable;
620 UpdateCategoryRegistry();
621
622 // Do not notify observers or create trace buffer if only enabled for
623 // filtering or if recording was already enabled.
624 if (!(modes_to_enable & RECORDING_MODE) || already_recording)
625 return;
626
627 // Discard events if new trace options are different. Reducing trace buffer
628 // size is not supported while already recording, so only replace trace
629 // buffer if we were not already recording.
630 if (new_options != old_options ||
631 (trace_config_.GetTraceBufferSizeInEvents() && !already_recording)) {
632 subtle::NoBarrier_Store(&trace_options_, new_options);
633 UseNextTraceBuffer();
634 }
635
636 num_traces_recorded_++;
637
638 UpdateCategoryRegistry();
639
640 dispatching_to_observers_ = true;
641 {
642 // Notify observers outside of the thread events lock, so they can trigger
643 // trace events.
644 AutoUnlock unlock(lock_);
645 AutoLock lock2(observers_lock_);
646 for (EnabledStateObserver* observer : enabled_state_observers_)
647 observer->OnTraceLogEnabled();
648 for (const auto& it : async_observers_) {
649 it.second.task_runner->PostTask(
650 FROM_HERE, BindOnce(&AsyncEnabledStateObserver::OnTraceLogEnabled,
651 it.second.observer));
652 }
653 }
654 dispatching_to_observers_ = false;
655 }
656
SetArgumentFilterPredicate(const ArgumentFilterPredicate & argument_filter_predicate)657 void TraceLog::SetArgumentFilterPredicate(
658 const ArgumentFilterPredicate& argument_filter_predicate) {
659 AutoLock lock(lock_);
660 DCHECK(!argument_filter_predicate.is_null());
661 // Replace the existing argument filter.
662 argument_filter_predicate_ = argument_filter_predicate;
663 }
664
GetArgumentFilterPredicate() const665 ArgumentFilterPredicate TraceLog::GetArgumentFilterPredicate() const {
666 AutoLock lock(lock_);
667 return argument_filter_predicate_;
668 }
669
SetMetadataFilterPredicate(const MetadataFilterPredicate & metadata_filter_predicate)670 void TraceLog::SetMetadataFilterPredicate(
671 const MetadataFilterPredicate& metadata_filter_predicate) {
672 AutoLock lock(lock_);
673 DCHECK(!metadata_filter_predicate.is_null());
674 // Replace the existing argument filter.
675 metadata_filter_predicate_ = metadata_filter_predicate;
676 }
677
GetMetadataFilterPredicate() const678 MetadataFilterPredicate TraceLog::GetMetadataFilterPredicate() const {
679 AutoLock lock(lock_);
680 return metadata_filter_predicate_;
681 }
682
GetInternalOptionsFromTraceConfig(const TraceConfig & config)683 TraceLog::InternalTraceOptions TraceLog::GetInternalOptionsFromTraceConfig(
684 const TraceConfig& config) {
685 InternalTraceOptions ret = config.IsArgumentFilterEnabled()
686 ? kInternalEnableArgumentFilter
687 : kInternalNone;
688 switch (config.GetTraceRecordMode()) {
689 case RECORD_UNTIL_FULL:
690 return ret | kInternalRecordUntilFull;
691 case RECORD_CONTINUOUSLY:
692 return ret | kInternalRecordContinuously;
693 case ECHO_TO_CONSOLE:
694 return ret | kInternalEchoToConsole;
695 case RECORD_AS_MUCH_AS_POSSIBLE:
696 return ret | kInternalRecordAsMuchAsPossible;
697 }
698 NOTREACHED();
699 return kInternalNone;
700 }
701
GetCurrentTraceConfig() const702 TraceConfig TraceLog::GetCurrentTraceConfig() const {
703 AutoLock lock(lock_);
704 return trace_config_;
705 }
706
SetDisabled()707 void TraceLog::SetDisabled() {
708 AutoLock lock(lock_);
709 SetDisabledWhileLocked(RECORDING_MODE);
710 }
711
SetDisabled(uint8_t modes_to_disable)712 void TraceLog::SetDisabled(uint8_t modes_to_disable) {
713 AutoLock lock(lock_);
714 SetDisabledWhileLocked(modes_to_disable);
715 }
716
SetDisabledWhileLocked(uint8_t modes_to_disable)717 void TraceLog::SetDisabledWhileLocked(uint8_t modes_to_disable) {
718 lock_.AssertAcquired();
719
720 if (!(enabled_modes_ & modes_to_disable))
721 return;
722
723 if (dispatching_to_observers_) {
724 // TODO(ssid): Change to NOTREACHED after fixing crbug.com/625170.
725 DLOG(ERROR)
726 << "Cannot manipulate TraceLog::Enabled state from an observer.";
727 return;
728 }
729
730 bool is_recording_mode_disabled =
731 (enabled_modes_ & RECORDING_MODE) && (modes_to_disable & RECORDING_MODE);
732 enabled_modes_ &= ~modes_to_disable;
733
734 if (modes_to_disable & FILTERING_MODE)
735 enabled_event_filters_.clear();
736
737 if (modes_to_disable & RECORDING_MODE)
738 trace_config_.Clear();
739
740 UpdateCategoryRegistry();
741
742 // Add metadata events and notify observers only if recording mode was
743 // disabled now.
744 if (!is_recording_mode_disabled)
745 return;
746
747 AddMetadataEventsWhileLocked();
748
749 // Remove metadata events so they will not get added to a subsequent trace.
750 metadata_events_.clear();
751
752 dispatching_to_observers_ = true;
753 {
754 // Release trace events lock, so observers can trigger trace events.
755 AutoUnlock unlock(lock_);
756 AutoLock lock2(observers_lock_);
757 for (auto* it : enabled_state_observers_)
758 it->OnTraceLogDisabled();
759 for (const auto& it : async_observers_) {
760 it.second.task_runner->PostTask(
761 FROM_HERE, BindOnce(&AsyncEnabledStateObserver::OnTraceLogDisabled,
762 it.second.observer));
763 }
764 }
765 dispatching_to_observers_ = false;
766 }
767
GetNumTracesRecorded()768 int TraceLog::GetNumTracesRecorded() {
769 AutoLock lock(lock_);
770 return (enabled_modes_ & RECORDING_MODE) ? num_traces_recorded_ : -1;
771 }
772
AddEnabledStateObserver(EnabledStateObserver * listener)773 void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
774 AutoLock lock(observers_lock_);
775 enabled_state_observers_.push_back(listener);
776 }
777
RemoveEnabledStateObserver(EnabledStateObserver * listener)778 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) {
779 AutoLock lock(observers_lock_);
780 enabled_state_observers_.erase(
781 std::remove(enabled_state_observers_.begin(),
782 enabled_state_observers_.end(), listener),
783 enabled_state_observers_.end());
784 }
785
AddOwnedEnabledStateObserver(std::unique_ptr<EnabledStateObserver> listener)786 void TraceLog::AddOwnedEnabledStateObserver(
787 std::unique_ptr<EnabledStateObserver> listener) {
788 AutoLock lock(observers_lock_);
789 enabled_state_observers_.push_back(listener.get());
790 owned_enabled_state_observer_copy_.push_back(std::move(listener));
791 }
792
HasEnabledStateObserver(EnabledStateObserver * listener) const793 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const {
794 AutoLock lock(observers_lock_);
795 return Contains(enabled_state_observers_, listener);
796 }
797
AddAsyncEnabledStateObserver(WeakPtr<AsyncEnabledStateObserver> listener)798 void TraceLog::AddAsyncEnabledStateObserver(
799 WeakPtr<AsyncEnabledStateObserver> listener) {
800 AutoLock lock(observers_lock_);
801 async_observers_.emplace(listener.get(), RegisteredAsyncObserver(listener));
802 }
803
RemoveAsyncEnabledStateObserver(AsyncEnabledStateObserver * listener)804 void TraceLog::RemoveAsyncEnabledStateObserver(
805 AsyncEnabledStateObserver* listener) {
806 AutoLock lock(observers_lock_);
807 async_observers_.erase(listener);
808 }
809
HasAsyncEnabledStateObserver(AsyncEnabledStateObserver * listener) const810 bool TraceLog::HasAsyncEnabledStateObserver(
811 AsyncEnabledStateObserver* listener) const {
812 AutoLock lock(observers_lock_);
813 return Contains(async_observers_, listener);
814 }
815
GetStatus() const816 TraceLogStatus TraceLog::GetStatus() const {
817 AutoLock lock(lock_);
818 TraceLogStatus result;
819 result.event_capacity = static_cast<uint32_t>(logged_events_->Capacity());
820 result.event_count = static_cast<uint32_t>(logged_events_->Size());
821 return result;
822 }
823
BufferIsFull() const824 bool TraceLog::BufferIsFull() const {
825 AutoLock lock(lock_);
826 return logged_events_->IsFull();
827 }
828
AddEventToThreadSharedChunkWhileLocked(TraceEventHandle * handle,bool check_buffer_is_full)829 TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked(
830 TraceEventHandle* handle,
831 bool check_buffer_is_full) {
832 lock_.AssertAcquired();
833
834 if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) {
835 logged_events_->ReturnChunk(thread_shared_chunk_index_,
836 std::move(thread_shared_chunk_));
837 }
838
839 if (!thread_shared_chunk_) {
840 thread_shared_chunk_ =
841 logged_events_->GetChunk(&thread_shared_chunk_index_);
842 if (check_buffer_is_full)
843 CheckIfBufferIsFullWhileLocked();
844 }
845 if (!thread_shared_chunk_)
846 return nullptr;
847
848 size_t event_index;
849 TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index);
850 if (trace_event && handle) {
851 MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_,
852 event_index, handle);
853 }
854 return trace_event;
855 }
856
CheckIfBufferIsFullWhileLocked()857 void TraceLog::CheckIfBufferIsFullWhileLocked() {
858 lock_.AssertAcquired();
859 if (logged_events_->IsFull()) {
860 if (buffer_limit_reached_timestamp_.is_null()) {
861 buffer_limit_reached_timestamp_ = OffsetNow();
862 }
863 SetDisabledWhileLocked(RECORDING_MODE);
864 }
865 }
866
867 // Flush() works as the following:
868 // 1. Flush() is called in thread A whose task runner is saved in
869 // flush_task_runner_;
870 // 2. If thread_message_loops_ is not empty, thread A posts task to each message
871 // loop to flush the thread local buffers; otherwise finish the flush;
872 // 3. FlushCurrentThread() deletes the thread local event buffer:
873 // - The last batch of events of the thread are flushed into the main buffer;
874 // - The message loop will be removed from thread_message_loops_;
875 // If this is the last message loop, finish the flush;
876 // 4. If any thread hasn't finish its flush in time, finish the flush.
Flush(const TraceLog::OutputCallback & cb,bool use_worker_thread)877 void TraceLog::Flush(const TraceLog::OutputCallback& cb,
878 bool use_worker_thread) {
879 FlushInternal(cb, use_worker_thread, false);
880 }
881
CancelTracing(const OutputCallback & cb)882 void TraceLog::CancelTracing(const OutputCallback& cb) {
883 SetDisabled();
884 FlushInternal(cb, false, true);
885 }
886
FlushInternal(const TraceLog::OutputCallback & cb,bool use_worker_thread,bool discard_events)887 void TraceLog::FlushInternal(const TraceLog::OutputCallback& cb,
888 bool use_worker_thread,
889 bool discard_events) {
890 use_worker_thread_ = use_worker_thread;
891 if (IsEnabled()) {
892 // Can't flush when tracing is enabled because otherwise PostTask would
893 // - generate more trace events;
894 // - deschedule the calling thread on some platforms causing inaccurate
895 // timing of the trace events.
896 scoped_refptr<RefCountedString> empty_result = new RefCountedString;
897 if (!cb.is_null())
898 cb.Run(empty_result, false);
899 LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled";
900 return;
901 }
902
903 int gen = generation();
904 // Copy of thread_task_runners_ to be used without locking.
905 std::vector<scoped_refptr<SingleThreadTaskRunner>> task_runners;
906 {
907 AutoLock lock(lock_);
908 DCHECK(!flush_task_runner_);
909 flush_task_runner_ = SequencedTaskRunnerHandle::IsSet()
910 ? SequencedTaskRunnerHandle::Get()
911 : nullptr;
912 DCHECK(thread_task_runners_.empty() || flush_task_runner_);
913 flush_output_callback_ = cb;
914
915 if (thread_shared_chunk_) {
916 logged_events_->ReturnChunk(thread_shared_chunk_index_,
917 std::move(thread_shared_chunk_));
918 }
919
920 for (const auto& it : thread_task_runners_)
921 task_runners.push_back(it.second);
922 }
923
924 if (!task_runners.empty()) {
925 for (auto& task_runner : task_runners) {
926 task_runner->PostTask(
927 FROM_HERE, BindOnce(&TraceLog::FlushCurrentThread, Unretained(this),
928 gen, discard_events));
929 }
930 flush_task_runner_->PostDelayedTask(
931 FROM_HERE,
932 BindOnce(&TraceLog::OnFlushTimeout, Unretained(this), gen,
933 discard_events),
934 TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
935 return;
936 }
937
938 FinishFlush(gen, discard_events);
939 }
940
941 // Usually it runs on a different thread.
ConvertTraceEventsToTraceFormat(std::unique_ptr<TraceBuffer> logged_events,const OutputCallback & flush_output_callback,const ArgumentFilterPredicate & argument_filter_predicate)942 void TraceLog::ConvertTraceEventsToTraceFormat(
943 std::unique_ptr<TraceBuffer> logged_events,
944 const OutputCallback& flush_output_callback,
945 const ArgumentFilterPredicate& argument_filter_predicate) {
946 if (flush_output_callback.is_null())
947 return;
948
949 HEAP_PROFILER_SCOPED_IGNORE;
950 // The callback need to be called at least once even if there is no events
951 // to let the caller know the completion of flush.
952 scoped_refptr<RefCountedString> json_events_str_ptr = new RefCountedString();
953 const size_t kReserveCapacity = kTraceEventBufferSizeInBytes * 5 / 4;
954 json_events_str_ptr->data().reserve(kReserveCapacity);
955 while (const TraceBufferChunk* chunk = logged_events->NextChunk()) {
956 for (size_t j = 0; j < chunk->size(); ++j) {
957 size_t size = json_events_str_ptr->size();
958 if (size > kTraceEventBufferSizeInBytes) {
959 flush_output_callback.Run(json_events_str_ptr, true);
960 json_events_str_ptr = new RefCountedString();
961 json_events_str_ptr->data().reserve(kReserveCapacity);
962 } else if (size) {
963 json_events_str_ptr->data().append(",\n");
964 }
965 chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()),
966 argument_filter_predicate);
967 }
968 }
969 flush_output_callback.Run(json_events_str_ptr, false);
970 }
971
FinishFlush(int generation,bool discard_events)972 void TraceLog::FinishFlush(int generation, bool discard_events) {
973 std::unique_ptr<TraceBuffer> previous_logged_events;
974 OutputCallback flush_output_callback;
975 ArgumentFilterPredicate argument_filter_predicate;
976
977 if (!CheckGeneration(generation))
978 return;
979
980 {
981 AutoLock lock(lock_);
982
983 previous_logged_events.swap(logged_events_);
984 UseNextTraceBuffer();
985 thread_task_runners_.clear();
986
987 flush_task_runner_ = nullptr;
988 flush_output_callback = flush_output_callback_;
989 flush_output_callback_.Reset();
990
991 if (trace_options() & kInternalEnableArgumentFilter) {
992 // If argument filtering is activated and there is no filtering predicate,
993 // use the safe default filtering predicate.
994 if (argument_filter_predicate_.is_null()) {
995 argument_filter_predicate =
996 base::BindRepeating(&DefaultIsTraceEventArgsWhitelisted);
997 } else {
998 argument_filter_predicate = argument_filter_predicate_;
999 }
1000 }
1001 }
1002
1003 if (discard_events) {
1004 if (!flush_output_callback.is_null()) {
1005 scoped_refptr<RefCountedString> empty_result = new RefCountedString;
1006 flush_output_callback.Run(empty_result, false);
1007 }
1008 return;
1009 }
1010
1011 if (use_worker_thread_) {
1012 base::ThreadPool::PostTask(
1013 FROM_HERE,
1014 {MayBlock(), TaskPriority::BEST_EFFORT,
1015 TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
1016 BindOnce(&TraceLog::ConvertTraceEventsToTraceFormat,
1017 std::move(previous_logged_events), flush_output_callback,
1018 argument_filter_predicate));
1019 return;
1020 }
1021
1022 ConvertTraceEventsToTraceFormat(std::move(previous_logged_events),
1023 flush_output_callback,
1024 argument_filter_predicate);
1025 }
1026
1027 // Run in each thread holding a local event buffer.
FlushCurrentThread(int generation,bool discard_events)1028 void TraceLog::FlushCurrentThread(int generation, bool discard_events) {
1029 {
1030 AutoLock lock(lock_);
1031 if (!CheckGeneration(generation) || !flush_task_runner_) {
1032 // This is late. The corresponding flush has finished.
1033 return;
1034 }
1035 }
1036
1037 // This will flush the thread local buffer.
1038 delete thread_local_event_buffer_.Get();
1039
1040 auto on_flush_override = on_flush_override_.load(std::memory_order_relaxed);
1041 if (on_flush_override) {
1042 on_flush_override();
1043 }
1044
1045 // Scheduler uses TRACE_EVENT macros when posting a task, which can lead
1046 // to acquiring a tracing lock. Given that posting a task requires grabbing
1047 // a scheduler lock, we need to post this task outside tracing lock to avoid
1048 // deadlocks.
1049 scoped_refptr<SequencedTaskRunner> cached_flush_task_runner;
1050 {
1051 AutoLock lock(lock_);
1052 cached_flush_task_runner = flush_task_runner_;
1053 if (!CheckGeneration(generation) || !flush_task_runner_ ||
1054 !thread_task_runners_.empty())
1055 return;
1056 }
1057 cached_flush_task_runner->PostTask(
1058 FROM_HERE, BindOnce(&TraceLog::FinishFlush, Unretained(this), generation,
1059 discard_events));
1060 }
1061
OnFlushTimeout(int generation,bool discard_events)1062 void TraceLog::OnFlushTimeout(int generation, bool discard_events) {
1063 {
1064 AutoLock lock(lock_);
1065 if (!CheckGeneration(generation) || !flush_task_runner_) {
1066 // Flush has finished before timeout.
1067 return;
1068 }
1069
1070 LOG(WARNING)
1071 << "The following threads haven't finished flush in time. "
1072 "If this happens stably for some thread, please call "
1073 "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
1074 "the thread to avoid its trace events from being lost.";
1075 for (const auto& it : thread_task_runners_) {
1076 LOG(WARNING) << "Thread: "
1077 << ThreadIdNameManager::GetInstance()->GetName(it.first);
1078 }
1079 }
1080 FinishFlush(generation, discard_events);
1081 }
1082
UseNextTraceBuffer()1083 void TraceLog::UseNextTraceBuffer() {
1084 logged_events_.reset(CreateTraceBuffer());
1085 subtle::NoBarrier_AtomicIncrement(&generation_, 1);
1086 thread_shared_chunk_.reset();
1087 thread_shared_chunk_index_ = 0;
1088 }
1089
ShouldAddAfterUpdatingState(char phase,const unsigned char * category_group_enabled,const char * name,unsigned long long id,int thread_id,TraceArguments * args)1090 bool TraceLog::ShouldAddAfterUpdatingState(
1091 char phase,
1092 const unsigned char* category_group_enabled,
1093 const char* name,
1094 unsigned long long id,
1095 int thread_id,
1096 TraceArguments* args) {
1097 if (!*category_group_enabled)
1098 return false;
1099
1100 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1101 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1102 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1103 if (thread_is_in_trace_event_.Get())
1104 return false;
1105
1106 DCHECK(name);
1107
1108 // Check and update the current thread name only if the event is for the
1109 // current thread to avoid locks in most cases.
1110 if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
1111 const char* new_name =
1112 ThreadIdNameManager::GetInstance()->GetNameForCurrentThread();
1113 // Check if the thread name has been set or changed since the previous
1114 // call (if any), but don't bother if the new name is empty. Note this will
1115 // not detect a thread name change within the same char* buffer address: we
1116 // favor common case performance over corner case correctness.
1117 static auto* current_thread_name = new ThreadLocalPointer<const char>();
1118 if (new_name != current_thread_name->Get() && new_name && *new_name) {
1119 current_thread_name->Set(new_name);
1120
1121 AutoLock thread_info_lock(thread_info_lock_);
1122
1123 auto existing_name = thread_names_.find(thread_id);
1124 if (existing_name == thread_names_.end()) {
1125 // This is a new thread id, and a new name.
1126 thread_names_[thread_id] = new_name;
1127 } else {
1128 // This is a thread id that we've seen before, but potentially with a
1129 // new name.
1130 std::vector<StringPiece> existing_names = base::SplitStringPiece(
1131 existing_name->second, ",", base::KEEP_WHITESPACE,
1132 base::SPLIT_WANT_NONEMPTY);
1133 if (!Contains(existing_names, new_name)) {
1134 if (!existing_names.empty())
1135 existing_name->second.push_back(',');
1136 existing_name->second.append(new_name);
1137 }
1138 }
1139 }
1140 }
1141
1142 #if defined(OS_WIN)
1143 // This is done sooner rather than later, to avoid creating the event and
1144 // acquiring the lock, which is not needed for ETW as it's already threadsafe.
1145 if (*category_group_enabled & TraceCategory::ENABLED_FOR_ETW_EXPORT) {
1146 TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id,
1147 args);
1148 }
1149 #endif // OS_WIN
1150 return true;
1151 }
1152
AddTraceEvent(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,unsigned long long id,TraceArguments * args,unsigned int flags)1153 TraceEventHandle TraceLog::AddTraceEvent(
1154 char phase,
1155 const unsigned char* category_group_enabled,
1156 const char* name,
1157 const char* scope,
1158 unsigned long long id,
1159 TraceArguments* args,
1160 unsigned int flags) {
1161 int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
1162 base::TimeTicks now = TRACE_TIME_TICKS_NOW();
1163 return AddTraceEventWithThreadIdAndTimestamp(
1164 phase, category_group_enabled, name, scope, id,
1165 trace_event_internal::kNoId, // bind_id
1166 thread_id, now, args, flags);
1167 }
1168
AddTraceEventWithBindId(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,unsigned long long id,unsigned long long bind_id,TraceArguments * args,unsigned int flags)1169 TraceEventHandle TraceLog::AddTraceEventWithBindId(
1170 char phase,
1171 const unsigned char* category_group_enabled,
1172 const char* name,
1173 const char* scope,
1174 unsigned long long id,
1175 unsigned long long bind_id,
1176 TraceArguments* args,
1177 unsigned int flags) {
1178 int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
1179 base::TimeTicks now = TRACE_TIME_TICKS_NOW();
1180 return AddTraceEventWithThreadIdAndTimestamp(
1181 phase, category_group_enabled, name, scope, id, bind_id, thread_id, now,
1182 args, flags | TRACE_EVENT_FLAG_HAS_CONTEXT_ID);
1183 }
1184
AddTraceEventWithProcessId(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,unsigned long long id,int process_id,TraceArguments * args,unsigned int flags)1185 TraceEventHandle TraceLog::AddTraceEventWithProcessId(
1186 char phase,
1187 const unsigned char* category_group_enabled,
1188 const char* name,
1189 const char* scope,
1190 unsigned long long id,
1191 int process_id,
1192 TraceArguments* args,
1193 unsigned int flags) {
1194 base::TimeTicks now = TRACE_TIME_TICKS_NOW();
1195 return AddTraceEventWithThreadIdAndTimestamp(
1196 phase, category_group_enabled, name, scope, id,
1197 trace_event_internal::kNoId, // bind_id
1198 process_id, now, args, flags | TRACE_EVENT_FLAG_HAS_PROCESS_ID);
1199 }
1200
1201 // Handle legacy calls to AddTraceEventWithThreadIdAndTimestamp
1202 // with kNoId as bind_id
AddTraceEventWithThreadIdAndTimestamp(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,unsigned long long id,int thread_id,const TimeTicks & timestamp,TraceArguments * args,unsigned int flags)1203 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1204 char phase,
1205 const unsigned char* category_group_enabled,
1206 const char* name,
1207 const char* scope,
1208 unsigned long long id,
1209 int thread_id,
1210 const TimeTicks& timestamp,
1211 TraceArguments* args,
1212 unsigned int flags) {
1213 return AddTraceEventWithThreadIdAndTimestamp(
1214 phase, category_group_enabled, name, scope, id,
1215 trace_event_internal::kNoId, // bind_id
1216 thread_id, timestamp, args, flags);
1217 }
1218
AddTraceEventWithThreadIdAndTimestamp(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,unsigned long long id,unsigned long long bind_id,int thread_id,const TimeTicks & timestamp,TraceArguments * args,unsigned int flags)1219 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1220 char phase,
1221 const unsigned char* category_group_enabled,
1222 const char* name,
1223 const char* scope,
1224 unsigned long long id,
1225 unsigned long long bind_id,
1226 int thread_id,
1227 const TimeTicks& timestamp,
1228 TraceArguments* args,
1229 unsigned int flags) NO_THREAD_SAFETY_ANALYSIS {
1230 TraceEventHandle handle = {0, 0, 0};
1231 if (!ShouldAddAfterUpdatingState(phase, category_group_enabled, name, id,
1232 thread_id, args)) {
1233 return handle;
1234 }
1235 DCHECK(!timestamp.is_null());
1236
1237 AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
1238
1239 // Flow bind_ids don't have scopes, so we need to mangle in-process ones to
1240 // avoid collisions.
1241 bool has_flow =
1242 flags & (TRACE_EVENT_FLAG_FLOW_OUT | TRACE_EVENT_FLAG_FLOW_IN);
1243 if (has_flow && (flags & TRACE_EVENT_FLAG_HAS_LOCAL_ID))
1244 bind_id = MangleEventId(bind_id);
1245
1246 TimeTicks offset_event_timestamp = OffsetTimestamp(timestamp);
1247 ThreadTicks thread_now = ThreadNow();
1248 ThreadInstructionCount thread_instruction_now = ThreadInstructionNow();
1249
1250 ThreadLocalEventBuffer* thread_local_event_buffer = nullptr;
1251 if (*category_group_enabled & RECORDING_MODE) {
1252 // |thread_local_event_buffer_| can be null if the current thread doesn't
1253 // have a message loop or the message loop is blocked.
1254 InitializeThreadLocalEventBufferIfSupported();
1255 thread_local_event_buffer = thread_local_event_buffer_.Get();
1256 }
1257
1258 if (*category_group_enabled & RECORDING_MODE) {
1259 auto trace_event_override =
1260 add_trace_event_override_.load(std::memory_order_relaxed);
1261 if (trace_event_override) {
1262 TraceEvent new_trace_event(
1263 thread_id, offset_event_timestamp, thread_now, thread_instruction_now,
1264 phase, category_group_enabled, name, scope, id, bind_id, args, flags);
1265
1266 trace_event_override(
1267 &new_trace_event,
1268 /*thread_will_flush=*/thread_local_event_buffer != nullptr, &handle);
1269 return handle;
1270 }
1271 }
1272
1273 std::string console_message;
1274 std::unique_ptr<TraceEvent> filtered_trace_event;
1275 bool disabled_by_filters = false;
1276 if (*category_group_enabled & TraceCategory::ENABLED_FOR_FILTERING) {
1277 auto new_trace_event = std::make_unique<TraceEvent>(
1278 thread_id, offset_event_timestamp, thread_now, thread_instruction_now,
1279 phase, category_group_enabled, name, scope, id, bind_id, args, flags);
1280
1281 disabled_by_filters = true;
1282 ForEachCategoryFilter(
1283 category_group_enabled, [&new_trace_event, &disabled_by_filters](
1284 TraceEventFilter* trace_event_filter) {
1285 if (trace_event_filter->FilterTraceEvent(*new_trace_event))
1286 disabled_by_filters = false;
1287 });
1288 if (!disabled_by_filters)
1289 filtered_trace_event = std::move(new_trace_event);
1290 }
1291
1292 // If enabled for recording, the event should be added only if one of the
1293 // filters indicates or category is not enabled for filtering.
1294 if ((*category_group_enabled & TraceCategory::ENABLED_FOR_RECORDING) &&
1295 !disabled_by_filters) {
1296 OptionalAutoLock lock(&lock_);
1297
1298 TraceEvent* trace_event = nullptr;
1299 if (thread_local_event_buffer) {
1300 trace_event = thread_local_event_buffer->AddTraceEvent(&handle);
1301 } else {
1302 lock.EnsureAcquired();
1303 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
1304 }
1305
1306 // NO_THREAD_SAFETY_ANALYSIS: Conditional locking above.
1307 if (trace_event) {
1308 if (filtered_trace_event) {
1309 *trace_event = std::move(*filtered_trace_event);
1310 } else {
1311 trace_event->Reset(thread_id, offset_event_timestamp, thread_now,
1312 thread_instruction_now, phase,
1313 category_group_enabled, name, scope, id, bind_id,
1314 args, flags);
1315 }
1316
1317 #if defined(OS_ANDROID)
1318 trace_event->SendToATrace();
1319 #endif
1320 }
1321
1322 if (trace_options() & kInternalEchoToConsole) {
1323 console_message = EventToConsoleMessage(
1324 phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
1325 timestamp, trace_event);
1326 }
1327 }
1328
1329 if (!console_message.empty())
1330 LOG(ERROR) << console_message;
1331
1332 return handle;
1333 }
1334
AddMetadataEvent(const unsigned char * category_group_enabled,const char * name,TraceArguments * args,unsigned int flags)1335 void TraceLog::AddMetadataEvent(const unsigned char* category_group_enabled,
1336 const char* name,
1337 TraceArguments* args,
1338 unsigned int flags) {
1339 HEAP_PROFILER_SCOPED_IGNORE;
1340 int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
1341 ThreadTicks thread_now = ThreadNow();
1342 TimeTicks now = OffsetNow();
1343 ThreadInstructionCount thread_instruction_now = ThreadInstructionNow();
1344 AutoLock lock(lock_);
1345 auto trace_event = std::make_unique<TraceEvent>(
1346 thread_id, now, thread_now, thread_instruction_now,
1347 TRACE_EVENT_PHASE_METADATA, category_group_enabled, name,
1348 trace_event_internal::kGlobalScope, // scope
1349 trace_event_internal::kNoId, // id
1350 trace_event_internal::kNoId, // bind_id
1351 args, flags);
1352 metadata_events_.push_back(std::move(trace_event));
1353 }
1354
1355 // May be called when a COMPELETE event ends and the unfinished event has been
1356 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
EventToConsoleMessage(unsigned char phase,const TimeTicks & timestamp,TraceEvent * trace_event)1357 std::string TraceLog::EventToConsoleMessage(unsigned char phase,
1358 const TimeTicks& timestamp,
1359 TraceEvent* trace_event) {
1360 HEAP_PROFILER_SCOPED_IGNORE;
1361 AutoLock thread_info_lock(thread_info_lock_);
1362
1363 // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
1364 // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
1365 DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE);
1366
1367 TimeDelta duration;
1368 int thread_id =
1369 trace_event ? trace_event->thread_id() : PlatformThread::CurrentId();
1370 if (phase == TRACE_EVENT_PHASE_END) {
1371 duration = timestamp - thread_event_start_times_[thread_id].top();
1372 thread_event_start_times_[thread_id].pop();
1373 }
1374
1375 std::string thread_name = thread_names_[thread_id];
1376 if (thread_colors_.find(thread_name) == thread_colors_.end()) {
1377 size_t next_color = (thread_colors_.size() % 6) + 1;
1378 thread_colors_[thread_name] = next_color;
1379 }
1380
1381 std::ostringstream log;
1382 log << base::StringPrintf("%s: \x1b[0;3%dm", thread_name.c_str(),
1383 thread_colors_[thread_name]);
1384
1385 size_t depth = 0;
1386 auto it = thread_event_start_times_.find(thread_id);
1387 if (it != thread_event_start_times_.end())
1388 depth = it->second.size();
1389
1390 for (size_t i = 0; i < depth; ++i)
1391 log << "| ";
1392
1393 if (trace_event)
1394 trace_event->AppendPrettyPrinted(&log);
1395 if (phase == TRACE_EVENT_PHASE_END)
1396 log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
1397
1398 log << "\x1b[0;m";
1399
1400 if (phase == TRACE_EVENT_PHASE_BEGIN)
1401 thread_event_start_times_[thread_id].push(timestamp);
1402
1403 return log.str();
1404 }
1405
EndFilteredEvent(const unsigned char * category_group_enabled,const char * name,TraceEventHandle handle)1406 void TraceLog::EndFilteredEvent(const unsigned char* category_group_enabled,
1407 const char* name,
1408 TraceEventHandle handle) {
1409 const char* category_name = GetCategoryGroupName(category_group_enabled);
1410 ForEachCategoryFilter(
1411 category_group_enabled,
1412 [name, category_name](TraceEventFilter* trace_event_filter) {
1413 trace_event_filter->EndEvent(category_name, name);
1414 });
1415 }
1416
UpdateTraceEventDuration(const unsigned char * category_group_enabled,const char * name,TraceEventHandle handle)1417 void TraceLog::UpdateTraceEventDuration(
1418 const unsigned char* category_group_enabled,
1419 const char* name,
1420 TraceEventHandle handle) {
1421 char category_group_enabled_local = *category_group_enabled;
1422 if (!category_group_enabled_local)
1423 return;
1424
1425 UpdateTraceEventDurationExplicit(
1426 category_group_enabled, name, handle,
1427 static_cast<int>(base::PlatformThread::CurrentId()),
1428 /*explicit_timestamps=*/false, OffsetNow(), ThreadNow(),
1429 ThreadInstructionNow());
1430 }
1431
UpdateTraceEventDurationExplicit(const unsigned char * category_group_enabled,const char * name,TraceEventHandle handle,int thread_id,bool explicit_timestamps,const TimeTicks & now,const ThreadTicks & thread_now,ThreadInstructionCount thread_instruction_now)1432 void TraceLog::UpdateTraceEventDurationExplicit(
1433 const unsigned char* category_group_enabled,
1434 const char* name,
1435 TraceEventHandle handle,
1436 int thread_id,
1437 bool explicit_timestamps,
1438 const TimeTicks& now,
1439 const ThreadTicks& thread_now,
1440 ThreadInstructionCount thread_instruction_now) {
1441 char category_group_enabled_local = *category_group_enabled;
1442 if (!category_group_enabled_local)
1443 return;
1444
1445 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1446 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1447 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1448 if (thread_is_in_trace_event_.Get())
1449 return;
1450 AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
1451
1452 #if defined(OS_WIN)
1453 // Generate an ETW event that marks the end of a complete event.
1454 if (category_group_enabled_local & TraceCategory::ENABLED_FOR_ETW_EXPORT)
1455 TraceEventETWExport::AddCompleteEndEvent(name);
1456 #endif // OS_WIN
1457
1458 if (category_group_enabled_local & TraceCategory::ENABLED_FOR_RECORDING) {
1459 auto update_duration_override =
1460 update_duration_override_.load(std::memory_order_relaxed);
1461 if (update_duration_override) {
1462 update_duration_override(category_group_enabled, name, handle, thread_id,
1463 explicit_timestamps, now, thread_now,
1464 thread_instruction_now);
1465 return;
1466 }
1467 }
1468
1469 std::string console_message;
1470 if (category_group_enabled_local & TraceCategory::ENABLED_FOR_RECORDING) {
1471 OptionalAutoLock lock(&lock_);
1472
1473 TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
1474 if (trace_event) {
1475 DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
1476
1477 trace_event->UpdateDuration(now, thread_now, thread_instruction_now);
1478 #if defined(OS_ANDROID)
1479 trace_event->SendToATrace();
1480 #endif
1481 }
1482
1483 if (trace_options() & kInternalEchoToConsole) {
1484 console_message =
1485 EventToConsoleMessage(TRACE_EVENT_PHASE_END, now, trace_event);
1486 }
1487 }
1488
1489 if (!console_message.empty())
1490 LOG(ERROR) << console_message;
1491
1492 if (category_group_enabled_local & TraceCategory::ENABLED_FOR_FILTERING)
1493 EndFilteredEvent(category_group_enabled, name, handle);
1494 }
1495
MangleEventId(uint64_t id)1496 uint64_t TraceLog::MangleEventId(uint64_t id) {
1497 return id ^ process_id_hash_;
1498 }
1499
1500 template <typename T>
AddMetadataEventWhileLocked(int thread_id,const char * metadata_name,const char * arg_name,const T & value)1501 void TraceLog::AddMetadataEventWhileLocked(int thread_id,
1502 const char* metadata_name,
1503 const char* arg_name,
1504 const T& value) {
1505 auto trace_event_override =
1506 add_trace_event_override_.load(std::memory_order_relaxed);
1507 if (trace_event_override) {
1508 TraceEvent trace_event;
1509 InitializeMetadataEvent(&trace_event, thread_id, metadata_name, arg_name,
1510 value);
1511 trace_event_override(&trace_event, /*thread_will_flush=*/true, nullptr);
1512 } else {
1513 InitializeMetadataEvent(
1514 AddEventToThreadSharedChunkWhileLocked(nullptr, false), thread_id,
1515 metadata_name, arg_name, value);
1516 }
1517 }
1518
AddMetadataEventsWhileLocked()1519 void TraceLog::AddMetadataEventsWhileLocked() {
1520 lock_.AssertAcquired();
1521
1522 auto trace_event_override =
1523 add_trace_event_override_.load(std::memory_order_relaxed);
1524
1525 // Move metadata added by |AddMetadataEvent| into the trace log.
1526 if (trace_event_override) {
1527 while (!metadata_events_.empty()) {
1528 trace_event_override(metadata_events_.back().get(),
1529 /*thread_will_flush=*/true, nullptr);
1530 metadata_events_.pop_back();
1531 }
1532 } else {
1533 while (!metadata_events_.empty()) {
1534 TraceEvent* event =
1535 AddEventToThreadSharedChunkWhileLocked(nullptr, false);
1536 *event = std::move(*metadata_events_.back());
1537 metadata_events_.pop_back();
1538 }
1539 }
1540
1541 #if !defined(OS_NACL) // NaCl shouldn't expose the process id.
1542 AddMetadataEventWhileLocked(0, "num_cpus", "number",
1543 base::SysInfo::NumberOfProcessors());
1544 #endif
1545
1546 int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId());
1547 if (process_sort_index_ != 0) {
1548 AddMetadataEventWhileLocked(current_thread_id, "process_sort_index",
1549 "sort_index", process_sort_index_);
1550 }
1551
1552 if (!process_name_.empty()) {
1553 AddMetadataEventWhileLocked(current_thread_id, "process_name", "name",
1554 process_name_);
1555 }
1556
1557 TimeDelta process_uptime = TRACE_TIME_NOW() - process_creation_time_;
1558 AddMetadataEventWhileLocked(current_thread_id, "process_uptime_seconds",
1559 "uptime", process_uptime.InSeconds());
1560
1561 #if defined(OS_ANDROID)
1562 AddMetadataEventWhileLocked(current_thread_id, "chrome_library_address",
1563 "start_address",
1564 base::StringPrintf("%p", &__executable_start));
1565 base::debug::ElfBuildIdBuffer build_id;
1566 size_t build_id_length =
1567 base::debug::ReadElfBuildId(&__executable_start, true, build_id);
1568 if (build_id_length > 0) {
1569 AddMetadataEventWhileLocked(current_thread_id, "chrome_library_module",
1570 "id", std::string(build_id));
1571 }
1572 #endif
1573
1574 if (!process_labels_.empty()) {
1575 std::vector<base::StringPiece> labels;
1576 for (const auto& it : process_labels_)
1577 labels.push_back(it.second);
1578 AddMetadataEventWhileLocked(current_thread_id, "process_labels", "labels",
1579 base::JoinString(labels, ","));
1580 }
1581
1582 // Thread sort indices.
1583 for (const auto& it : thread_sort_indices_) {
1584 if (it.second == 0)
1585 continue;
1586 AddMetadataEventWhileLocked(it.first, "thread_sort_index", "sort_index",
1587 it.second);
1588 }
1589
1590 // TODO(ssid): Stop emitting and tracking thread names when perfetto is
1591 // enabled and after crbug/978093 if fixed. The JSON exporter will emit thread
1592 // names from thread descriptors.
1593 AutoLock thread_info_lock(thread_info_lock_);
1594 for (const auto& it : thread_names_) {
1595 if (it.second.empty())
1596 continue;
1597 AddMetadataEventWhileLocked(it.first, "thread_name", "name", it.second);
1598 }
1599
1600 // If buffer is full, add a metadata record to report this.
1601 if (!buffer_limit_reached_timestamp_.is_null()) {
1602 AddMetadataEventWhileLocked(current_thread_id, "trace_buffer_overflowed",
1603 "overflowed_at_ts",
1604 buffer_limit_reached_timestamp_);
1605 }
1606 }
1607
GetEventByHandle(TraceEventHandle handle)1608 TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
1609 return GetEventByHandleInternal(handle, nullptr);
1610 }
1611
GetEventByHandleInternal(TraceEventHandle handle,OptionalAutoLock * lock)1612 TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
1613 OptionalAutoLock* lock)
1614 NO_THREAD_SAFETY_ANALYSIS {
1615 if (!handle.chunk_seq)
1616 return nullptr;
1617
1618 DCHECK(handle.chunk_seq);
1619 DCHECK(handle.chunk_index <= TraceBufferChunk::kMaxChunkIndex);
1620 DCHECK(handle.event_index <= TraceBufferChunk::kTraceBufferChunkSize - 1);
1621
1622 if (thread_local_event_buffer_.Get()) {
1623 TraceEvent* trace_event =
1624 thread_local_event_buffer_.Get()->GetEventByHandle(handle);
1625 if (trace_event)
1626 return trace_event;
1627 }
1628
1629 // The event has been out-of-control of the thread local buffer.
1630 // Try to get the event from the main buffer with a lock.
1631 // NO_THREAD_SAFETY_ANALYSIS: runtime-dependent locking here.
1632 if (lock)
1633 lock->EnsureAcquired();
1634
1635 if (thread_shared_chunk_ &&
1636 handle.chunk_index == thread_shared_chunk_index_) {
1637 return handle.chunk_seq == thread_shared_chunk_->seq()
1638 ? thread_shared_chunk_->GetEventAt(handle.event_index)
1639 : nullptr;
1640 }
1641
1642 return logged_events_->GetEventByHandle(handle);
1643 }
1644
SetProcessID(int process_id)1645 void TraceLog::SetProcessID(int process_id) {
1646 process_id_ = process_id;
1647 // Create a FNV hash from the process ID for XORing.
1648 // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
1649 const unsigned long long kOffsetBasis = 14695981039346656037ull;
1650 const unsigned long long kFnvPrime = 1099511628211ull;
1651 const unsigned long long pid = static_cast<unsigned long long>(process_id_);
1652 process_id_hash_ = (kOffsetBasis ^ pid) * kFnvPrime;
1653 }
1654
SetProcessSortIndex(int sort_index)1655 void TraceLog::SetProcessSortIndex(int sort_index) {
1656 AutoLock lock(lock_);
1657 process_sort_index_ = sort_index;
1658 }
1659
UpdateProcessLabel(int label_id,const std::string & current_label)1660 void TraceLog::UpdateProcessLabel(int label_id,
1661 const std::string& current_label) {
1662 if (!current_label.length())
1663 return RemoveProcessLabel(label_id);
1664
1665 AutoLock lock(lock_);
1666 process_labels_[label_id] = current_label;
1667 }
1668
RemoveProcessLabel(int label_id)1669 void TraceLog::RemoveProcessLabel(int label_id) {
1670 AutoLock lock(lock_);
1671 process_labels_.erase(label_id);
1672 }
1673
SetThreadSortIndex(PlatformThreadId thread_id,int sort_index)1674 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
1675 AutoLock lock(lock_);
1676 thread_sort_indices_[static_cast<int>(thread_id)] = sort_index;
1677 }
1678
SetTimeOffset(TimeDelta offset)1679 void TraceLog::SetTimeOffset(TimeDelta offset) {
1680 time_offset_ = offset;
1681 }
1682
GetObserverCountForTest() const1683 size_t TraceLog::GetObserverCountForTest() const {
1684 return enabled_state_observers_.size();
1685 }
1686
SetCurrentThreadBlocksMessageLoop()1687 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
1688 thread_blocks_message_loop_.Set(true);
1689 // This will flush the thread local buffer.
1690 delete thread_local_event_buffer_.Get();
1691 }
1692
CreateTraceBuffer()1693 TraceBuffer* TraceLog::CreateTraceBuffer() {
1694 HEAP_PROFILER_SCOPED_IGNORE;
1695 InternalTraceOptions options = trace_options();
1696 const size_t config_buffer_chunks =
1697 trace_config_.GetTraceBufferSizeInEvents() / kTraceBufferChunkSize;
1698 if (options & kInternalRecordContinuously) {
1699 return TraceBuffer::CreateTraceBufferRingBuffer(
1700 config_buffer_chunks > 0 ? config_buffer_chunks
1701 : kTraceEventRingBufferChunks);
1702 }
1703 if (options & kInternalEchoToConsole) {
1704 return TraceBuffer::CreateTraceBufferRingBuffer(
1705 config_buffer_chunks > 0 ? config_buffer_chunks
1706 : kEchoToConsoleTraceEventBufferChunks);
1707 }
1708 if (options & kInternalRecordAsMuchAsPossible) {
1709 return TraceBuffer::CreateTraceBufferVectorOfSize(
1710 config_buffer_chunks > 0 ? config_buffer_chunks
1711 : kTraceEventVectorBigBufferChunks);
1712 }
1713 return TraceBuffer::CreateTraceBufferVectorOfSize(
1714 config_buffer_chunks > 0 ? config_buffer_chunks
1715 : kTraceEventVectorBufferChunks);
1716 }
1717
1718 #if defined(OS_WIN)
UpdateETWCategoryGroupEnabledFlags()1719 void TraceLog::UpdateETWCategoryGroupEnabledFlags() {
1720 // Go through each category and set/clear the ETW bit depending on whether the
1721 // category is enabled.
1722 for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
1723 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
1724 category.name())) {
1725 category.set_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT);
1726 } else {
1727 category.clear_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT);
1728 }
1729 }
1730 }
1731 #endif // defined(OS_WIN)
1732
SetTraceBufferForTesting(std::unique_ptr<TraceBuffer> trace_buffer)1733 void TraceLog::SetTraceBufferForTesting(
1734 std::unique_ptr<TraceBuffer> trace_buffer) {
1735 AutoLock lock(lock_);
1736 logged_events_ = std::move(trace_buffer);
1737 }
1738
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)1739 void ConvertableToTraceFormat::EstimateTraceMemoryOverhead(
1740 TraceEventMemoryOverhead* overhead) {
1741 overhead->Add(TraceEventMemoryOverhead::kConvertableToTraceFormat,
1742 sizeof(*this));
1743 }
1744
1745 } // namespace trace_event
1746 } // namespace base
1747
1748 namespace trace_event_internal {
1749
AddTraceEvent(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,unsigned long long id,base::trace_event::TraceArguments * args,unsigned int flags)1750 base::trace_event::TraceEventHandle AddTraceEvent(
1751 char phase,
1752 const unsigned char* category_group_enabled,
1753 const char* name,
1754 const char* scope,
1755 unsigned long long id,
1756 base::trace_event::TraceArguments* args,
1757 unsigned int flags) {
1758 return base::trace_event::TraceLog::GetInstance()->AddTraceEvent(
1759 phase, category_group_enabled, name, scope, id, args, flags);
1760 }
1761
AddTraceEventWithBindId(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,unsigned long long id,unsigned long long bind_id,base::trace_event::TraceArguments * args,unsigned int flags)1762 base::trace_event::TraceEventHandle AddTraceEventWithBindId(
1763 char phase,
1764 const unsigned char* category_group_enabled,
1765 const char* name,
1766 const char* scope,
1767 unsigned long long id,
1768 unsigned long long bind_id,
1769 base::trace_event::TraceArguments* args,
1770 unsigned int flags) {
1771 return base::trace_event::TraceLog::GetInstance()->AddTraceEventWithBindId(
1772 phase, category_group_enabled, name, scope, id, bind_id, args, flags);
1773 }
1774
AddTraceEventWithProcessId(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,unsigned long long id,int process_id,base::trace_event::TraceArguments * args,unsigned int flags)1775 base::trace_event::TraceEventHandle AddTraceEventWithProcessId(
1776 char phase,
1777 const unsigned char* category_group_enabled,
1778 const char* name,
1779 const char* scope,
1780 unsigned long long id,
1781 int process_id,
1782 base::trace_event::TraceArguments* args,
1783 unsigned int flags) {
1784 return base::trace_event::TraceLog::GetInstance()->AddTraceEventWithProcessId(
1785 phase, category_group_enabled, name, scope, id, process_id, args, flags);
1786 }
1787
AddTraceEventWithThreadIdAndTimestamp(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,unsigned long long id,int thread_id,const base::TimeTicks & timestamp,base::trace_event::TraceArguments * args,unsigned int flags)1788 base::trace_event::TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
1789 char phase,
1790 const unsigned char* category_group_enabled,
1791 const char* name,
1792 const char* scope,
1793 unsigned long long id,
1794 int thread_id,
1795 const base::TimeTicks& timestamp,
1796 base::trace_event::TraceArguments* args,
1797 unsigned int flags) {
1798 return base::trace_event::TraceLog::GetInstance()
1799 ->AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled,
1800 name, scope, id, thread_id,
1801 timestamp, args, flags);
1802 }
1803
AddTraceEventWithThreadIdAndTimestamp(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,unsigned long long id,unsigned long long bind_id,int thread_id,const base::TimeTicks & timestamp,base::trace_event::TraceArguments * args,unsigned int flags)1804 base::trace_event::TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
1805 char phase,
1806 const unsigned char* category_group_enabled,
1807 const char* name,
1808 const char* scope,
1809 unsigned long long id,
1810 unsigned long long bind_id,
1811 int thread_id,
1812 const base::TimeTicks& timestamp,
1813 base::trace_event::TraceArguments* args,
1814 unsigned int flags) {
1815 return base::trace_event::TraceLog::GetInstance()
1816 ->AddTraceEventWithThreadIdAndTimestamp(
1817 phase, category_group_enabled, name, scope, id, bind_id, thread_id,
1818 timestamp, args, flags);
1819 }
1820
AddMetadataEvent(const unsigned char * category_group_enabled,const char * name,base::trace_event::TraceArguments * args,unsigned int flags)1821 void AddMetadataEvent(const unsigned char* category_group_enabled,
1822 const char* name,
1823 base::trace_event::TraceArguments* args,
1824 unsigned int flags) {
1825 return base::trace_event::TraceLog::GetInstance()->AddMetadataEvent(
1826 category_group_enabled, name, args, flags);
1827 }
1828
GetNumTracesRecorded()1829 int GetNumTracesRecorded() {
1830 return base::trace_event::TraceLog::GetInstance()->GetNumTracesRecorded();
1831 }
1832
UpdateTraceEventDuration(const unsigned char * category_group_enabled,const char * name,base::trace_event::TraceEventHandle handle)1833 void UpdateTraceEventDuration(const unsigned char* category_group_enabled,
1834 const char* name,
1835 base::trace_event::TraceEventHandle handle) {
1836 return base::trace_event::TraceLog::GetInstance()->UpdateTraceEventDuration(
1837 category_group_enabled, name, handle);
1838 }
1839
UpdateTraceEventDurationExplicit(const unsigned char * category_group_enabled,const char * name,base::trace_event::TraceEventHandle handle,int thread_id,bool explicit_timestamps,const base::TimeTicks & now,const base::ThreadTicks & thread_now,base::trace_event::ThreadInstructionCount thread_instruction_now)1840 void UpdateTraceEventDurationExplicit(
1841 const unsigned char* category_group_enabled,
1842 const char* name,
1843 base::trace_event::TraceEventHandle handle,
1844 int thread_id,
1845 bool explicit_timestamps,
1846 const base::TimeTicks& now,
1847 const base::ThreadTicks& thread_now,
1848 base::trace_event::ThreadInstructionCount thread_instruction_now) {
1849 return base::trace_event::TraceLog::GetInstance()
1850 ->UpdateTraceEventDurationExplicit(category_group_enabled, name, handle,
1851 thread_id, explicit_timestamps, now,
1852 thread_now, thread_instruction_now);
1853 }
1854
ScopedTraceBinaryEfficient(const char * category_group,const char * name)1855 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
1856 const char* category_group,
1857 const char* name) {
1858 // The single atom works because for now the category_group can only be "gpu".
1859 DCHECK_EQ(strcmp(category_group, "gpu"), 0);
1860 static TRACE_EVENT_API_ATOMIC_WORD atomic = 0;
1861 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
1862 category_group, atomic, category_group_enabled_);
1863 name_ = name;
1864 if (*category_group_enabled_) {
1865 event_handle_ =
1866 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
1867 TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name,
1868 trace_event_internal::kGlobalScope, // scope
1869 trace_event_internal::kNoId, // id
1870 static_cast<int>(base::PlatformThread::CurrentId()), // thread_id
1871 TRACE_TIME_TICKS_NOW(), nullptr, TRACE_EVENT_FLAG_NONE);
1872 }
1873 }
1874
~ScopedTraceBinaryEfficient()1875 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
1876 if (*category_group_enabled_) {
1877 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_,
1878 event_handle_);
1879 }
1880 }
1881
1882 } // namespace trace_event_internal
1883