1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/trace_event/memory_dump_manager.h"
6 
7 #include <inttypes.h>
8 #include <stdio.h>
9 
10 #include <algorithm>
11 #include <memory>
12 #include <utility>
13 
14 #include "base/allocator/buildflags.h"
15 #include "base/base_switches.h"
16 #include "base/command_line.h"
17 #include "base/debug/alias.h"
18 #include "base/debug/stack_trace.h"
19 #include "base/memory/ptr_util.h"
20 #include "base/sequenced_task_runner.h"
21 #include "base/strings/string_util.h"
22 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
23 #include "base/threading/thread.h"
24 #include "base/threading/thread_task_runner_handle.h"
25 #include "base/trace_event/heap_profiler.h"
26 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
27 #include "base/trace_event/heap_profiler_event_filter.h"
28 #include "base/trace_event/malloc_dump_provider.h"
29 #include "base/trace_event/memory_dump_provider.h"
30 #include "base/trace_event/memory_dump_scheduler.h"
31 #include "base/trace_event/memory_infra_background_allowlist.h"
32 #include "base/trace_event/process_memory_dump.h"
33 #include "base/trace_event/trace_event.h"
34 #include "base/trace_event/traced_value.h"
35 #include "build/build_config.h"
36 
37 #if defined(OS_ANDROID)
38 #include "base/trace_event/java_heap_dump_provider_android.h"
39 
40 #if BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
41 #include "base/trace_event/cfi_backtrace_android.h"
42 #endif
43 
44 #endif  // defined(OS_ANDROID)
45 
46 namespace base {
47 namespace trace_event {
48 
49 namespace {
50 
51 MemoryDumpManager* g_memory_dump_manager_for_testing = nullptr;
52 
53 // Temporary (until scheduler is moved outside of here)
54 // trampoline function to match the |request_dump_function| passed to Initialize
55 // to the callback expected by MemoryDumpScheduler.
56 // TODO(primiano): remove this.
DoGlobalDumpWithoutCallback(MemoryDumpManager::RequestGlobalDumpFunction global_dump_fn,MemoryDumpType dump_type,MemoryDumpLevelOfDetail level_of_detail)57 void DoGlobalDumpWithoutCallback(
58     MemoryDumpManager::RequestGlobalDumpFunction global_dump_fn,
59     MemoryDumpType dump_type,
60     MemoryDumpLevelOfDetail level_of_detail) {
61   global_dump_fn.Run(dump_type, level_of_detail);
62 }
63 
64 }  // namespace
65 
66 // static
67 constexpr const char* MemoryDumpManager::kTraceCategory;
68 
69 // static
70 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3;
71 
72 // static
73 const uint64_t MemoryDumpManager::kInvalidTracingProcessId = 0;
74 
75 // static
76 const char* const MemoryDumpManager::kSystemAllocatorPoolName =
77 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
78     MallocDumpProvider::kAllocatedObjects;
79 #else
80     nullptr;
81 #endif
82 
83 // static
GetInstance()84 MemoryDumpManager* MemoryDumpManager::GetInstance() {
85   if (g_memory_dump_manager_for_testing)
86     return g_memory_dump_manager_for_testing;
87 
88   return Singleton<MemoryDumpManager,
89                    LeakySingletonTraits<MemoryDumpManager>>::get();
90 }
91 
92 // static
93 std::unique_ptr<MemoryDumpManager>
CreateInstanceForTesting()94 MemoryDumpManager::CreateInstanceForTesting() {
95   DCHECK(!g_memory_dump_manager_for_testing);
96   std::unique_ptr<MemoryDumpManager> instance(new MemoryDumpManager());
97   g_memory_dump_manager_for_testing = instance.get();
98   return instance;
99 }
100 
MemoryDumpManager()101 MemoryDumpManager::MemoryDumpManager()
102     : is_coordinator_(false),
103       tracing_process_id_(kInvalidTracingProcessId),
104       dumper_registrations_ignored_for_testing_(false) {}
105 
~MemoryDumpManager()106 MemoryDumpManager::~MemoryDumpManager() {
107   Thread* dump_thread = nullptr;
108   {
109     AutoLock lock(lock_);
110     if (dump_thread_) {
111       dump_thread = dump_thread_.get();
112     }
113   }
114   if (dump_thread) {
115     dump_thread->Stop();
116   }
117   AutoLock lock(lock_);
118   dump_thread_.reset();
119   g_memory_dump_manager_for_testing = nullptr;
120 }
121 
Initialize(RequestGlobalDumpFunction request_dump_function,bool is_coordinator)122 void MemoryDumpManager::Initialize(
123     RequestGlobalDumpFunction request_dump_function,
124     bool is_coordinator) {
125   {
126     AutoLock lock(lock_);
127     DCHECK(!request_dump_function.is_null());
128     DCHECK(!can_request_global_dumps());
129     request_dump_function_ = request_dump_function;
130     is_coordinator_ = is_coordinator;
131   }
132 
133 // Enable the core dump providers.
134 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
135   RegisterDumpProvider(MallocDumpProvider::GetInstance(), "Malloc", nullptr);
136 #endif
137 
138 #if defined(OS_ANDROID)
139   RegisterDumpProvider(JavaHeapDumpProvider::GetInstance(), "JavaHeap",
140                        nullptr);
141 #endif
142 }
143 
RegisterDumpProvider(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SingleThreadTaskRunner> task_runner,MemoryDumpProvider::Options options)144 void MemoryDumpManager::RegisterDumpProvider(
145     MemoryDumpProvider* mdp,
146     const char* name,
147     scoped_refptr<SingleThreadTaskRunner> task_runner,
148     MemoryDumpProvider::Options options) {
149   options.dumps_on_single_thread_task_runner = true;
150   RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
151 }
152 
RegisterDumpProvider(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SingleThreadTaskRunner> task_runner)153 void MemoryDumpManager::RegisterDumpProvider(
154     MemoryDumpProvider* mdp,
155     const char* name,
156     scoped_refptr<SingleThreadTaskRunner> task_runner) {
157   // Set |dumps_on_single_thread_task_runner| to true because all providers
158   // without task runner are run on dump thread.
159   MemoryDumpProvider::Options options;
160   options.dumps_on_single_thread_task_runner = true;
161   RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
162 }
163 
RegisterDumpProviderWithSequencedTaskRunner(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SequencedTaskRunner> task_runner,MemoryDumpProvider::Options options)164 void MemoryDumpManager::RegisterDumpProviderWithSequencedTaskRunner(
165     MemoryDumpProvider* mdp,
166     const char* name,
167     scoped_refptr<SequencedTaskRunner> task_runner,
168     MemoryDumpProvider::Options options) {
169   DCHECK(task_runner);
170   options.dumps_on_single_thread_task_runner = false;
171   RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
172 }
173 
RegisterDumpProviderInternal(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SequencedTaskRunner> task_runner,const MemoryDumpProvider::Options & options)174 void MemoryDumpManager::RegisterDumpProviderInternal(
175     MemoryDumpProvider* mdp,
176     const char* name,
177     scoped_refptr<SequencedTaskRunner> task_runner,
178     const MemoryDumpProvider::Options& options) {
179   if (dumper_registrations_ignored_for_testing_)
180     return;
181 
182   // Only a handful of MDPs are required to compute the memory metrics. These
183   // have small enough performance overhead that it is reasonable to run them
184   // in the background while the user is doing other things. Those MDPs are
185   // 'allowed in background mode'.
186   bool allowed_in_background_mode = IsMemoryDumpProviderInAllowlist(name);
187 
188   scoped_refptr<MemoryDumpProviderInfo> mdpinfo = new MemoryDumpProviderInfo(
189       mdp, name, std::move(task_runner), options, allowed_in_background_mode);
190 
191   {
192     AutoLock lock(lock_);
193     bool already_registered = !dump_providers_.insert(mdpinfo).second;
194     // This actually happens in some tests which don't have a clean tear-down
195     // path for RenderThreadImpl::Init().
196     if (already_registered)
197       return;
198   }
199 }
200 
UnregisterDumpProvider(MemoryDumpProvider * mdp)201 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
202   UnregisterDumpProviderInternal(mdp, false /* delete_async */);
203 }
204 
UnregisterAndDeleteDumpProviderSoon(std::unique_ptr<MemoryDumpProvider> mdp)205 void MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon(
206     std::unique_ptr<MemoryDumpProvider> mdp) {
207   UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */);
208 }
209 
UnregisterDumpProviderInternal(MemoryDumpProvider * mdp,bool take_mdp_ownership_and_delete_async)210 void MemoryDumpManager::UnregisterDumpProviderInternal(
211     MemoryDumpProvider* mdp,
212     bool take_mdp_ownership_and_delete_async) {
213   std::unique_ptr<MemoryDumpProvider> owned_mdp;
214   if (take_mdp_ownership_and_delete_async)
215     owned_mdp.reset(mdp);
216 
217   AutoLock lock(lock_);
218 
219   auto mdp_iter = dump_providers_.begin();
220   for (; mdp_iter != dump_providers_.end(); ++mdp_iter) {
221     if ((*mdp_iter)->dump_provider == mdp)
222       break;
223   }
224 
225   if (mdp_iter == dump_providers_.end())
226     return;  // Not registered / already unregistered.
227 
228   if (take_mdp_ownership_and_delete_async) {
229     // The MDP will be deleted whenever the MDPInfo struct will, that is either:
230     // - At the end of this function, if no dump is in progress.
231     // - In ContinueAsyncProcessDump() when MDPInfo is removed from
232     //   |pending_dump_providers|.
233     DCHECK(!(*mdp_iter)->owned_dump_provider);
234     (*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
235   } else {
236     // If you hit this DCHECK, your dump provider has a bug.
237     // Unregistration of a MemoryDumpProvider is safe only if:
238     // - The MDP has specified a sequenced task runner affinity AND the
239     //   unregistration happens on the same task runner. So that the MDP cannot
240     //   unregister and be in the middle of a OnMemoryDump() at the same time.
241     // - The MDP has NOT specified a task runner affinity and its ownership is
242     //   transferred via UnregisterAndDeleteDumpProviderSoon().
243     // In all the other cases, it is not possible to guarantee that the
244     // unregistration will not race with OnMemoryDump() calls.
245     DCHECK((*mdp_iter)->task_runner &&
246            (*mdp_iter)->task_runner->RunsTasksInCurrentSequence())
247         << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to "
248         << "unregister itself in a racy way. Please file a crbug.";
249   }
250 
251   // The MDPInfo instance can still be referenced by the
252   // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
253   // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump()
254   // to just skip it, without actually invoking the |mdp|, which might be
255   // destroyed by the caller soon after this method returns.
256   (*mdp_iter)->disabled = true;
257   dump_providers_.erase(mdp_iter);
258 }
259 
IsDumpProviderRegisteredForTesting(MemoryDumpProvider * provider)260 bool MemoryDumpManager::IsDumpProviderRegisteredForTesting(
261     MemoryDumpProvider* provider) {
262   AutoLock lock(lock_);
263 
264   for (const auto& info : dump_providers_) {
265     if (info->dump_provider == provider)
266       return true;
267   }
268   return false;
269 }
270 
271 scoped_refptr<SequencedTaskRunner>
GetDumpThreadTaskRunner()272 MemoryDumpManager::GetDumpThreadTaskRunner() {
273   base::AutoLock lock(lock_);
274   return GetOrCreateBgTaskRunnerLocked();
275 }
276 
277 scoped_refptr<base::SequencedTaskRunner>
GetOrCreateBgTaskRunnerLocked()278 MemoryDumpManager::GetOrCreateBgTaskRunnerLocked() {
279   lock_.AssertAcquired();
280 
281   if (dump_thread_)
282     return dump_thread_->task_runner();
283 
284   dump_thread_ = std::make_unique<Thread>("MemoryInfra");
285   bool started = dump_thread_->Start();
286   CHECK(started);
287 
288   return dump_thread_->task_runner();
289 }
290 
CreateProcessDump(const MemoryDumpRequestArgs & args,ProcessMemoryDumpCallback callback)291 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
292                                           ProcessMemoryDumpCallback callback) {
293   char guid_str[20];
294   sprintf(guid_str, "0x%" PRIx64, args.dump_guid);
295   TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(kTraceCategory, "ProcessMemoryDump",
296                                     TRACE_ID_LOCAL(args.dump_guid), "dump_guid",
297                                     TRACE_STR_COPY(guid_str));
298 
299   // If argument filter is enabled then only background mode dumps should be
300   // allowed. In case the trace config passed for background tracing session
301   // missed the allowed modes argument, it crashes here instead of creating
302   // unexpected dumps.
303   if (TraceLog::GetInstance()
304           ->GetCurrentTraceConfig()
305           .IsArgumentFilterEnabled()) {
306     CHECK_EQ(MemoryDumpLevelOfDetail::BACKGROUND, args.level_of_detail);
307   }
308 
309   std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
310   {
311     AutoLock lock(lock_);
312 
313     pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
314         args, dump_providers_, std::move(callback),
315         GetOrCreateBgTaskRunnerLocked()));
316   }
317 
318   // Start the process dump. This involves task runner hops as specified by the
319   // MemoryDumpProvider(s) in RegisterDumpProvider()).
320   ContinueAsyncProcessDump(pmd_async_state.release());
321 }
322 
323 // Invokes OnMemoryDump() on all MDPs that are next in the pending list and run
324 // on the current sequenced task runner. If the next MDP does not run in current
325 // sequenced task runner, then switches to that task runner and continues. All
326 // OnMemoryDump() invocations are linearized. |lock_| is used in these functions
327 // purely to ensure consistency w.r.t. (un)registrations of |dump_providers_|.
ContinueAsyncProcessDump(ProcessMemoryDumpAsyncState * owned_pmd_async_state)328 void MemoryDumpManager::ContinueAsyncProcessDump(
329     ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
330   HEAP_PROFILER_SCOPED_IGNORE;
331   // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
332   // in the PostTask below don't end up registering their own dump providers
333   // (for discounting trace memory overhead) while holding the |lock_|.
334   TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
335 
336   // In theory |owned_pmd_async_state| should be a unique_ptr. The only reason
337   // why it isn't is because of the corner case logic of |did_post_task|
338   // above, which needs to take back the ownership of the |pmd_async_state| when
339   // the PostTask() fails.
340   // Unfortunately, PostTask() destroys the unique_ptr arguments upon failure
341   // to prevent accidental leaks. Using a unique_ptr would prevent us to to
342   // skip the hop and move on. Hence the manual naked -> unique ptr juggling.
343   auto pmd_async_state = WrapUnique(owned_pmd_async_state);
344   owned_pmd_async_state = nullptr;
345 
346   while (!pmd_async_state->pending_dump_providers.empty()) {
347     // Read MemoryDumpProviderInfo thread safety considerations in
348     // memory_dump_manager.h when accessing |mdpinfo| fields.
349     MemoryDumpProviderInfo* mdpinfo =
350         pmd_async_state->pending_dump_providers.back().get();
351 
352     // If we are in background mode, we should invoke only the whitelisted
353     // providers. Ignore other providers and continue.
354     if (pmd_async_state->req_args.level_of_detail ==
355             MemoryDumpLevelOfDetail::BACKGROUND &&
356         !mdpinfo->allowed_in_background_mode) {
357       pmd_async_state->pending_dump_providers.pop_back();
358       continue;
359     }
360 
361     // If the dump provider did not specify a task runner affinity, dump on
362     // |dump_thread_|.
363     scoped_refptr<SequencedTaskRunner> task_runner = mdpinfo->task_runner;
364     if (!task_runner) {
365       DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner);
366       task_runner = pmd_async_state->dump_thread_task_runner;
367       DCHECK(task_runner);
368     }
369 
370     // If |RunsTasksInCurrentSequence()| is true then no PostTask is
371     // required since we are on the right SequencedTaskRunner.
372     if (task_runner->RunsTasksInCurrentSequence()) {
373       InvokeOnMemoryDump(mdpinfo, pmd_async_state->process_memory_dump.get());
374       pmd_async_state->pending_dump_providers.pop_back();
375       continue;
376     }
377 
378     bool did_post_task = task_runner->PostTask(
379         FROM_HERE,
380         BindOnce(&MemoryDumpManager::ContinueAsyncProcessDump, Unretained(this),
381                  Unretained(pmd_async_state.get())));
382 
383     if (did_post_task) {
384       // Ownership is tranferred to the posted task.
385       ignore_result(pmd_async_state.release());
386       return;
387     }
388 
389     // PostTask usually fails only if the process or thread is shut down. So,
390     // the dump provider is disabled here. But, don't disable unbound dump
391     // providers, since the |dump_thread_| is controlled by MDM.
392     if (mdpinfo->task_runner) {
393       // A locked access is required to R/W |disabled| (for the
394       // UnregisterAndDeleteDumpProviderSoon() case).
395       AutoLock lock(lock_);
396       mdpinfo->disabled = true;
397     }
398 
399     // PostTask failed. Ignore the dump provider and continue.
400     pmd_async_state->pending_dump_providers.pop_back();
401   }
402 
403   FinishAsyncProcessDump(std::move(pmd_async_state));
404 }
405 
406 // This function is called on the right task runner for current MDP. It is
407 // either the task runner specified by MDP or |dump_thread_task_runner| if the
408 // MDP did not specify task runner. Invokes the dump provider's OnMemoryDump()
409 // (unless disabled).
InvokeOnMemoryDump(MemoryDumpProviderInfo * mdpinfo,ProcessMemoryDump * pmd)410 void MemoryDumpManager::InvokeOnMemoryDump(MemoryDumpProviderInfo* mdpinfo,
411                                            ProcessMemoryDump* pmd) {
412   HEAP_PROFILER_SCOPED_IGNORE;
413   DCHECK(!mdpinfo->task_runner ||
414          mdpinfo->task_runner->RunsTasksInCurrentSequence());
415 
416   TRACE_EVENT1(kTraceCategory, "MemoryDumpManager::InvokeOnMemoryDump",
417                "dump_provider.name", mdpinfo->name);
418 
419   // Do not add any other TRACE_EVENT macro (or function that might have them)
420   // below this point. Under some rare circunstances, they can re-initialize
421   // and invalide the current ThreadLocalEventBuffer MDP, making the
422   // |should_dump| check below susceptible to TOCTTOU bugs
423   // (https://crbug.com/763365).
424 
425   bool is_thread_bound;
426   {
427     // A locked access is required to R/W |disabled| (for the
428     // UnregisterAndDeleteDumpProviderSoon() case).
429     AutoLock lock(lock_);
430 
431     // Unregister the dump provider if it failed too many times consecutively.
432     if (!mdpinfo->disabled &&
433         mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
434       mdpinfo->disabled = true;
435       DLOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
436                   << "\". Dump failed multiple times consecutively.";
437     }
438     if (mdpinfo->disabled)
439       return;
440 
441     is_thread_bound = mdpinfo->task_runner != nullptr;
442   }  // AutoLock lock(lock_);
443 
444   // Invoke the dump provider.
445 
446   // A stack allocated string with dump provider name is useful to debug
447   // crashes while invoking dump after a |dump_provider| is not unregistered
448   // in safe way.
449   char provider_name_for_debugging[16];
450   strncpy(provider_name_for_debugging, mdpinfo->name,
451           sizeof(provider_name_for_debugging) - 1);
452   provider_name_for_debugging[sizeof(provider_name_for_debugging) - 1] = '\0';
453   base::debug::Alias(provider_name_for_debugging);
454 
455   ANNOTATE_BENIGN_RACE(&mdpinfo->disabled, "best-effort race detection");
456   CHECK(!is_thread_bound ||
457         !*(static_cast<volatile bool*>(&mdpinfo->disabled)));
458   bool dump_successful =
459       mdpinfo->dump_provider->OnMemoryDump(pmd->dump_args(), pmd);
460   mdpinfo->consecutive_failures =
461       dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
462 }
463 
FinishAsyncProcessDump(std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state)464 void MemoryDumpManager::FinishAsyncProcessDump(
465     std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
466   HEAP_PROFILER_SCOPED_IGNORE;
467   DCHECK(pmd_async_state->pending_dump_providers.empty());
468   const uint64_t dump_guid = pmd_async_state->req_args.dump_guid;
469   if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) {
470     scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
471         pmd_async_state->callback_task_runner;
472     callback_task_runner->PostTask(
473         FROM_HERE, BindOnce(&MemoryDumpManager::FinishAsyncProcessDump,
474                             Unretained(this), std::move(pmd_async_state)));
475     return;
476   }
477 
478   TRACE_EVENT0(kTraceCategory, "MemoryDumpManager::FinishAsyncProcessDump");
479 
480   if (!pmd_async_state->callback.is_null()) {
481     std::move(pmd_async_state->callback)
482         .Run(true /* success */, dump_guid,
483              std::move(pmd_async_state->process_memory_dump));
484   }
485 
486   TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump",
487                                   TRACE_ID_LOCAL(dump_guid));
488 }
489 
SetupForTracing(const TraceConfig::MemoryDumpConfig & memory_dump_config)490 void MemoryDumpManager::SetupForTracing(
491     const TraceConfig::MemoryDumpConfig& memory_dump_config) {
492   AutoLock lock(lock_);
493 
494   // At this point we must have the ability to request global dumps.
495   DCHECK(can_request_global_dumps());
496 
497   MemoryDumpScheduler::Config periodic_config;
498   for (const auto& trigger : memory_dump_config.triggers) {
499     if (trigger.trigger_type == MemoryDumpType::PERIODIC_INTERVAL) {
500       if (periodic_config.triggers.empty()) {
501         periodic_config.callback =
502             BindRepeating(&DoGlobalDumpWithoutCallback, request_dump_function_,
503                           MemoryDumpType::PERIODIC_INTERVAL);
504       }
505       periodic_config.triggers.push_back(
506           {trigger.level_of_detail, trigger.min_time_between_dumps_ms});
507     }
508   }
509 
510   // Only coordinator process triggers periodic memory dumps.
511   if (is_coordinator_ && !periodic_config.triggers.empty()) {
512     MemoryDumpScheduler::GetInstance()->Start(periodic_config,
513                                               GetOrCreateBgTaskRunnerLocked());
514   }
515 }
516 
TeardownForTracing()517 void MemoryDumpManager::TeardownForTracing() {
518   // There might be a memory dump in progress while this happens. Therefore,
519   // ensure that the MDM state which depends on the tracing enabled / disabled
520   // state is always accessed by the dumping methods holding the |lock_|.
521   AutoLock lock(lock_);
522 
523   MemoryDumpScheduler::GetInstance()->Stop();
524 }
525 
ProcessMemoryDumpAsyncState(MemoryDumpRequestArgs req_args,const MemoryDumpProviderInfo::OrderedSet & dump_providers,ProcessMemoryDumpCallback callback,scoped_refptr<SequencedTaskRunner> dump_thread_task_runner)526 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
527     MemoryDumpRequestArgs req_args,
528     const MemoryDumpProviderInfo::OrderedSet& dump_providers,
529     ProcessMemoryDumpCallback callback,
530     scoped_refptr<SequencedTaskRunner> dump_thread_task_runner)
531     : req_args(req_args),
532       callback(std::move(callback)),
533       callback_task_runner(ThreadTaskRunnerHandle::Get()),
534       dump_thread_task_runner(std::move(dump_thread_task_runner)) {
535   pending_dump_providers.reserve(dump_providers.size());
536   pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend());
537   MemoryDumpArgs args = {req_args.level_of_detail, req_args.determinism,
538                          req_args.dump_guid};
539   process_memory_dump = std::make_unique<ProcessMemoryDump>(args);
540 }
541 
542 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() =
543     default;
544 
545 }  // namespace trace_event
546 }  // namespace base
547