1 // Copyright 2018 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/task/sequence_manager/sequence_manager_impl.h"
6 
7 #include <queue>
8 #include <vector>
9 
10 #include "base/bind.h"
11 #include "base/bit_cast.h"
12 #include "base/compiler_specific.h"
13 #include "base/debug/crash_logging.h"
14 #include "base/debug/stack_trace.h"
15 #include "base/json/json_writer.h"
16 #include "base/logging.h"
17 #include "base/memory/ptr_util.h"
18 #include "base/no_destructor.h"
19 #include "base/optional.h"
20 #include "base/rand_util.h"
21 #include "base/ranges/algorithm.h"
22 #include "base/task/sequence_manager/real_time_domain.h"
23 #include "base/task/sequence_manager/task_time_observer.h"
24 #include "base/task/sequence_manager/thread_controller_impl.h"
25 #include "base/task/sequence_manager/thread_controller_with_message_pump_impl.h"
26 #include "base/task/sequence_manager/work_queue.h"
27 #include "base/task/sequence_manager/work_queue_sets.h"
28 #include "base/threading/thread_id_name_manager.h"
29 #include "base/threading/thread_local.h"
30 #include "base/time/default_tick_clock.h"
31 #include "base/time/tick_clock.h"
32 #include "base/trace_event/base_tracing.h"
33 #include "build/build_config.h"
34 
35 namespace base {
36 namespace sequence_manager {
37 namespace {
38 
39 base::ThreadLocalPointer<internal::SequenceManagerImpl>*
GetTLSSequenceManagerImpl()40 GetTLSSequenceManagerImpl() {
41   static NoDestructor<ThreadLocalPointer<internal::SequenceManagerImpl>>
42       lazy_tls_ptr;
43   return lazy_tls_ptr.get();
44 }
45 
46 class TracedBaseValue : public trace_event::ConvertableToTraceFormat {
47  public:
TracedBaseValue(Value value)48   explicit TracedBaseValue(Value value) : value_(std::move(value)) {}
49   ~TracedBaseValue() override = default;
50 
AppendAsTraceFormat(std::string * out) const51   void AppendAsTraceFormat(std::string* out) const override {
52     if (!value_.is_none()) {
53       std::string tmp;
54       JSONWriter::Write(value_, &tmp);
55       *out += tmp;
56     } else {
57       *out += "{}";
58     }
59   }
60 
61  private:
62   base::Value value_;
63 };
64 
65 }  // namespace
66 
CreateSequenceManagerOnCurrentThread(SequenceManager::Settings settings)67 std::unique_ptr<SequenceManager> CreateSequenceManagerOnCurrentThread(
68     SequenceManager::Settings settings) {
69   return internal::SequenceManagerImpl::CreateOnCurrentThread(
70       std::move(settings));
71 }
72 
CreateSequenceManagerOnCurrentThreadWithPump(std::unique_ptr<MessagePump> message_pump,SequenceManager::Settings settings)73 std::unique_ptr<SequenceManager> CreateSequenceManagerOnCurrentThreadWithPump(
74     std::unique_ptr<MessagePump> message_pump,
75     SequenceManager::Settings settings) {
76   std::unique_ptr<SequenceManager> sequence_manager =
77       internal::SequenceManagerImpl::CreateUnbound(std::move(settings));
78   sequence_manager->BindToMessagePump(std::move(message_pump));
79   return sequence_manager;
80 }
81 
CreateUnboundSequenceManager(SequenceManager::Settings settings)82 std::unique_ptr<SequenceManager> CreateUnboundSequenceManager(
83     SequenceManager::Settings settings) {
84   return internal::SequenceManagerImpl::CreateUnbound(std::move(settings));
85 }
86 
87 namespace internal {
88 
89 using TimeRecordingPolicy =
90     base::sequence_manager::TaskQueue::TaskTiming::TimeRecordingPolicy;
91 
92 namespace {
93 
94 constexpr TimeDelta kLongTaskTraceEventThreshold =
95     TimeDelta::FromMilliseconds(50);
96 // Proportion of tasks which will record thread time for metrics.
97 const double kTaskSamplingRateForRecordingCPUTime = 0.01;
98 // Proprortion of SequenceManagers which will record thread time for each task,
99 // enabling advanced metrics.
100 const double kThreadSamplingRateForRecordingCPUTime = 0.0001;
101 
102 // Magic value to protect against memory corruption and bail out
103 // early when detected.
104 constexpr int kMemoryCorruptionSentinelValue = 0xdeadbeef;
105 
ReclaimMemoryFromQueue(internal::TaskQueueImpl * queue,std::map<TimeDomain *,TimeTicks> * time_domain_now)106 void ReclaimMemoryFromQueue(internal::TaskQueueImpl* queue,
107                             std::map<TimeDomain*, TimeTicks>* time_domain_now) {
108   TimeDomain* time_domain = queue->GetTimeDomain();
109   if (time_domain_now->find(time_domain) == time_domain_now->end())
110     time_domain_now->insert(std::make_pair(time_domain, time_domain->Now()));
111   queue->ReclaimMemory(time_domain_now->at(time_domain));
112   queue->delayed_work_queue()->RemoveAllCanceledTasksFromFront();
113   queue->immediate_work_queue()->RemoveAllCanceledTasksFromFront();
114 }
115 
InitializeMetricRecordingSettings(bool randomised_sampling_enabled)116 SequenceManager::MetricRecordingSettings InitializeMetricRecordingSettings(
117     bool randomised_sampling_enabled) {
118   if (!randomised_sampling_enabled)
119     return SequenceManager::MetricRecordingSettings(0);
120   bool records_cpu_time_for_each_task =
121       base::RandDouble() < kThreadSamplingRateForRecordingCPUTime;
122   return SequenceManager::MetricRecordingSettings(
123       records_cpu_time_for_each_task ? 1
124                                      : kTaskSamplingRateForRecordingCPUTime);
125 }
126 
127 // Writes |address| in hexadecimal ("0x11223344") form starting from |output|
128 // and moving backwards in memory. Returns a pointer to the first digit of the
129 // result. Does *not* NUL-terminate the number.
130 #if !defined(OS_NACL)
PrependHexAddress(char * output,const void * address)131 char* PrependHexAddress(char* output, const void* address) {
132   uintptr_t value = reinterpret_cast<uintptr_t>(address);
133   static const char kHexChars[] = "0123456789ABCDEF";
134   do {
135     *output-- = kHexChars[value % 16];
136     value /= 16;
137   } while (value);
138   *output-- = 'x';
139   *output = '0';
140   return output;
141 }
142 #endif  // !defined(OS_NACL)
143 
144 }  // namespace
145 
146 class SequenceManagerImpl::NativeWorkHandleImpl final
147     : public NativeWorkHandle {
148  public:
NativeWorkHandleImpl(SequenceManagerImpl * sequence_manager,TaskQueue::QueuePriority priority)149   NativeWorkHandleImpl(SequenceManagerImpl* sequence_manager,
150                        TaskQueue::QueuePriority priority)
151       : sequence_manager_(sequence_manager->GetWeakPtr()), priority_(priority) {
152     TRACE_EVENT_NESTABLE_ASYNC_BEGIN1("sequence_manager", "NativeWork", this,
153                                       "priority",
154                                       TaskQueue::PriorityToString(priority_));
155     sequence_manager_->main_thread_only().pending_native_work.insert(priority_);
156   }
157 
~NativeWorkHandleImpl()158   ~NativeWorkHandleImpl() final {
159     TRACE_EVENT_NESTABLE_ASYNC_END0("sequence_manager", "NativeWork", this);
160     if (!sequence_manager_)
161       return;
162     TaskQueue::QueuePriority prev_priority = effective_priority();
163     sequence_manager_->main_thread_only().pending_native_work.erase(priority_);
164     // We should always have at least one instance of pending native work. By
165     // default it is of the lowest priority, which doesn't cause SequenceManager
166     // to yield.
167     DCHECK_GE(sequence_manager_->main_thread_only().pending_native_work.size(),
168               1u);
169     if (prev_priority != effective_priority())
170       sequence_manager_->ScheduleWork();
171   }
172 
effective_priority() const173   TaskQueue::QueuePriority effective_priority() const {
174     return *sequence_manager_->main_thread_only().pending_native_work.begin();
175   }
176 
177  private:
178   WeakPtr<SequenceManagerImpl> sequence_manager_;
179   const TaskQueue::QueuePriority priority_;
180 };
181 
182 // static
GetCurrent()183 SequenceManagerImpl* SequenceManagerImpl::GetCurrent() {
184   return GetTLSSequenceManagerImpl()->Get();
185 }
186 
SequenceManagerImpl(std::unique_ptr<internal::ThreadController> controller,SequenceManager::Settings settings)187 SequenceManagerImpl::SequenceManagerImpl(
188     std::unique_ptr<internal::ThreadController> controller,
189     SequenceManager::Settings settings)
190     : associated_thread_(controller->GetAssociatedThread()),
191       controller_(std::move(controller)),
192       settings_(std::move(settings)),
193       metric_recording_settings_(InitializeMetricRecordingSettings(
194           settings_.randomised_sampling_enabled)),
195       add_queue_time_to_tasks_(settings_.add_queue_time_to_tasks),
196 
197       empty_queues_to_reload_(associated_thread_),
198       memory_corruption_sentinel_(kMemoryCorruptionSentinelValue),
199       main_thread_only_(associated_thread_, settings_) {
200   TRACE_EVENT_OBJECT_CREATED_WITH_ID(
201       TRACE_DISABLED_BY_DEFAULT("sequence_manager"), "SequenceManager", this);
202   main_thread_only().selector.SetTaskQueueSelectorObserver(this);
203 
204   main_thread_only().next_time_to_reclaim_memory =
205       settings_.clock->NowTicks() + kReclaimMemoryInterval;
206 
207   RegisterTimeDomain(main_thread_only().real_time_domain.get());
208 
209   controller_->SetSequencedTaskSource(this);
210 }
211 
~SequenceManagerImpl()212 SequenceManagerImpl::~SequenceManagerImpl() {
213   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
214   TRACE_EVENT_OBJECT_DELETED_WITH_ID(
215       TRACE_DISABLED_BY_DEFAULT("sequence_manager"), "SequenceManager", this);
216 
217 #if defined(OS_IOS)
218   if (settings_.message_loop_type == MessagePumpType::UI &&
219       associated_thread_->IsBound()) {
220     controller_->DetachFromMessagePump();
221   }
222 #endif
223 
224   // Make sure no Task is running as given that RunLoop does not support the
225   // Delegate being destroyed from a Task and
226   // ThreadControllerWithMessagePumpImpl does not support being destroyed from a
227   // Task. If we are using a ThreadControllerImpl (i.e. no pump) destruction is
228   // fine
229   DCHECK(!controller_->GetBoundMessagePump() ||
230          main_thread_only().task_execution_stack.empty());
231 
232   for (internal::TaskQueueImpl* queue : main_thread_only().active_queues) {
233     main_thread_only().selector.RemoveQueue(queue);
234     queue->UnregisterTaskQueue();
235   }
236 
237   // TODO(altimin): restore default task runner automatically when
238   // ThreadController is destroyed.
239   controller_->RestoreDefaultTaskRunner();
240 
241   main_thread_only().active_queues.clear();
242   main_thread_only().queues_to_gracefully_shutdown.clear();
243   main_thread_only().selector.SetTaskQueueSelectorObserver(nullptr);
244 
245   // In some tests a NestingObserver may not have been registered.
246   if (main_thread_only().nesting_observer_registered_)
247     controller_->RemoveNestingObserver(this);
248 
249   // Let interested parties have one last shot at accessing this.
250   for (auto& observer : main_thread_only().destruction_observers)
251     observer.WillDestroyCurrentMessageLoop();
252 
253   // OK, now make it so that no one can find us.
254   if (GetMessagePump()) {
255     DCHECK_EQ(this, GetTLSSequenceManagerImpl()->Get());
256     GetTLSSequenceManagerImpl()->Set(nullptr);
257   }
258 }
259 
MainThreadOnly(const scoped_refptr<AssociatedThreadId> & associated_thread,const SequenceManager::Settings & settings)260 SequenceManagerImpl::MainThreadOnly::MainThreadOnly(
261     const scoped_refptr<AssociatedThreadId>& associated_thread,
262     const SequenceManager::Settings& settings)
263     : selector(associated_thread, settings),
264       real_time_domain(new internal::RealTimeDomain()) {
265   if (settings.randomised_sampling_enabled) {
266     random_generator = std::mt19937_64(RandUint64());
267     uniform_distribution = std::uniform_real_distribution<double>(0.0, 1.0);
268   }
269 }
270 
271 SequenceManagerImpl::MainThreadOnly::~MainThreadOnly() = default;
272 
273 // static
274 std::unique_ptr<ThreadControllerImpl>
CreateThreadControllerImplForCurrentThread(const TickClock * clock)275 SequenceManagerImpl::CreateThreadControllerImplForCurrentThread(
276     const TickClock* clock) {
277   auto* sequence_manager = GetTLSSequenceManagerImpl()->Get();
278   return ThreadControllerImpl::Create(sequence_manager, clock);
279 }
280 
281 // static
CreateOnCurrentThread(SequenceManager::Settings settings)282 std::unique_ptr<SequenceManagerImpl> SequenceManagerImpl::CreateOnCurrentThread(
283     SequenceManager::Settings settings) {
284   auto thread_controller =
285       CreateThreadControllerImplForCurrentThread(settings.clock);
286   std::unique_ptr<SequenceManagerImpl> manager(new SequenceManagerImpl(
287       std::move(thread_controller), std::move(settings)));
288   manager->BindToCurrentThread();
289   return manager;
290 }
291 
292 // static
CreateUnbound(SequenceManager::Settings settings)293 std::unique_ptr<SequenceManagerImpl> SequenceManagerImpl::CreateUnbound(
294     SequenceManager::Settings settings) {
295   auto thread_controller =
296       ThreadControllerWithMessagePumpImpl::CreateUnbound(settings);
297   return WrapUnique(new SequenceManagerImpl(std::move(thread_controller),
298                                             std::move(settings)));
299 }
300 
BindToMessagePump(std::unique_ptr<MessagePump> pump)301 void SequenceManagerImpl::BindToMessagePump(std::unique_ptr<MessagePump> pump) {
302   controller_->BindToCurrentThread(std::move(pump));
303   CompleteInitializationOnBoundThread();
304 
305   // On Android attach to the native loop when there is one.
306 #if defined(OS_ANDROID)
307   if (settings_.message_loop_type == MessagePumpType::UI ||
308       settings_.message_loop_type == MessagePumpType::JAVA) {
309     controller_->AttachToMessagePump();
310   }
311 #endif
312 
313   // On iOS attach to the native loop when there is one.
314 #if defined(OS_IOS)
315   if (settings_.message_loop_type == MessagePumpType::UI) {
316     controller_->AttachToMessagePump();
317   }
318 #endif
319 }
320 
BindToCurrentThread()321 void SequenceManagerImpl::BindToCurrentThread() {
322   associated_thread_->BindToCurrentThread();
323   CompleteInitializationOnBoundThread();
324 }
325 
BindToCurrentThread(std::unique_ptr<MessagePump> pump)326 void SequenceManagerImpl::BindToCurrentThread(
327     std::unique_ptr<MessagePump> pump) {
328   associated_thread_->BindToCurrentThread();
329   BindToMessagePump(std::move(pump));
330 }
331 
332 scoped_refptr<SequencedTaskRunner>
GetTaskRunnerForCurrentTask()333 SequenceManagerImpl::GetTaskRunnerForCurrentTask() {
334   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
335   if (main_thread_only().task_execution_stack.empty())
336     return nullptr;
337   return main_thread_only()
338       .task_execution_stack.back()
339       .pending_task.task_runner;
340 }
341 
CompleteInitializationOnBoundThread()342 void SequenceManagerImpl::CompleteInitializationOnBoundThread() {
343   controller_->AddNestingObserver(this);
344   main_thread_only().nesting_observer_registered_ = true;
345   if (GetMessagePump()) {
346     DCHECK(!GetTLSSequenceManagerImpl()->Get())
347         << "Can't register a second SequenceManagerImpl on the same thread.";
348     GetTLSSequenceManagerImpl()->Set(this);
349   }
350 }
351 
RegisterTimeDomain(TimeDomain * time_domain)352 void SequenceManagerImpl::RegisterTimeDomain(TimeDomain* time_domain) {
353   main_thread_only().time_domains.insert(time_domain);
354   time_domain->OnRegisterWithSequenceManager(this);
355 }
356 
UnregisterTimeDomain(TimeDomain * time_domain)357 void SequenceManagerImpl::UnregisterTimeDomain(TimeDomain* time_domain) {
358   main_thread_only().time_domains.erase(time_domain);
359 }
360 
GetRealTimeDomain() const361 TimeDomain* SequenceManagerImpl::GetRealTimeDomain() const {
362   return main_thread_only().real_time_domain.get();
363 }
364 
365 std::unique_ptr<internal::TaskQueueImpl>
CreateTaskQueueImpl(const TaskQueue::Spec & spec)366 SequenceManagerImpl::CreateTaskQueueImpl(const TaskQueue::Spec& spec) {
367   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
368   TimeDomain* time_domain = spec.time_domain
369                                 ? spec.time_domain
370                                 : main_thread_only().real_time_domain.get();
371   DCHECK(main_thread_only().time_domains.find(time_domain) !=
372          main_thread_only().time_domains.end());
373   std::unique_ptr<internal::TaskQueueImpl> task_queue =
374       std::make_unique<internal::TaskQueueImpl>(this, time_domain, spec);
375   main_thread_only().active_queues.insert(task_queue.get());
376   main_thread_only().selector.AddQueue(task_queue.get());
377   return task_queue;
378 }
379 
SetAddQueueTimeToTasks(bool enable)380 void SequenceManagerImpl::SetAddQueueTimeToTasks(bool enable) {
381   base::subtle::NoBarrier_Store(&add_queue_time_to_tasks_, enable ? 1 : 0);
382 }
383 
GetAddQueueTimeToTasks()384 bool SequenceManagerImpl::GetAddQueueTimeToTasks() {
385   return base::subtle::NoBarrier_Load(&add_queue_time_to_tasks_);
386 }
387 
SetObserver(Observer * observer)388 void SequenceManagerImpl::SetObserver(Observer* observer) {
389   main_thread_only().observer = observer;
390 }
391 
ShutdownTaskQueueGracefully(std::unique_ptr<internal::TaskQueueImpl> task_queue)392 void SequenceManagerImpl::ShutdownTaskQueueGracefully(
393     std::unique_ptr<internal::TaskQueueImpl> task_queue) {
394   main_thread_only().queues_to_gracefully_shutdown[task_queue.get()] =
395       std::move(task_queue);
396 }
397 
UnregisterTaskQueueImpl(std::unique_ptr<internal::TaskQueueImpl> task_queue)398 void SequenceManagerImpl::UnregisterTaskQueueImpl(
399     std::unique_ptr<internal::TaskQueueImpl> task_queue) {
400   TRACE_EVENT1("sequence_manager", "SequenceManagerImpl::UnregisterTaskQueue",
401                "queue_name", task_queue->GetName());
402   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
403 
404   main_thread_only().selector.RemoveQueue(task_queue.get());
405 
406   // After UnregisterTaskQueue returns no new tasks can be posted.
407   // It's important to call it first to avoid race condition between removing
408   // the task queue from various lists here and adding it to the same lists
409   // when posting a task.
410   task_queue->UnregisterTaskQueue();
411 
412   // Add |task_queue| to |main_thread_only().queues_to_delete| so we can prevent
413   // it from being freed while any of our structures hold hold a raw pointer to
414   // it.
415   main_thread_only().active_queues.erase(task_queue.get());
416   main_thread_only().queues_to_delete[task_queue.get()] = std::move(task_queue);
417 }
418 
419 AtomicFlagSet::AtomicFlag
GetFlagToRequestReloadForEmptyQueue(TaskQueueImpl * task_queue)420 SequenceManagerImpl::GetFlagToRequestReloadForEmptyQueue(
421     TaskQueueImpl* task_queue) {
422   return empty_queues_to_reload_.AddFlag(BindRepeating(
423       &TaskQueueImpl::ReloadEmptyImmediateWorkQueue, Unretained(task_queue)));
424 }
425 
ReloadEmptyWorkQueues() const426 void SequenceManagerImpl::ReloadEmptyWorkQueues() const {
427   // There are two cases where a queue needs reloading.  First, it might be
428   // completely empty and we've just posted a task (this method handles that
429   // case). Secondly if the work queue becomes empty when calling
430   // WorkQueue::TakeTaskFromWorkQueue (handled there).
431   //
432   // Invokes callbacks created by GetFlagToRequestReloadForEmptyQueue above.
433   empty_queues_to_reload_.RunActiveCallbacks();
434 }
435 
MoveReadyDelayedTasksToWorkQueues(LazyNow * lazy_now)436 void SequenceManagerImpl::MoveReadyDelayedTasksToWorkQueues(LazyNow* lazy_now) {
437   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
438                "SequenceManagerImpl::MoveReadyDelayedTasksToWorkQueues");
439 
440   for (TimeDomain* time_domain : main_thread_only().time_domains) {
441     if (time_domain == main_thread_only().real_time_domain.get()) {
442       time_domain->MoveReadyDelayedTasksToWorkQueues(lazy_now);
443     } else {
444       LazyNow time_domain_lazy_now = time_domain->CreateLazyNow();
445       time_domain->MoveReadyDelayedTasksToWorkQueues(&time_domain_lazy_now);
446     }
447   }
448 }
449 
OnBeginNestedRunLoop()450 void SequenceManagerImpl::OnBeginNestedRunLoop() {
451   main_thread_only().nesting_depth++;
452   if (main_thread_only().observer)
453     main_thread_only().observer->OnBeginNestedRunLoop();
454 }
455 
OnExitNestedRunLoop()456 void SequenceManagerImpl::OnExitNestedRunLoop() {
457   main_thread_only().nesting_depth--;
458   DCHECK_GE(main_thread_only().nesting_depth, 0);
459   if (main_thread_only().nesting_depth == 0) {
460     // While we were nested some non-nestable tasks may have been deferred.
461     // We push them back onto the *front* of their original work queues,
462     // that's why we iterate |non_nestable_task_queue| in FIFO order.
463     while (!main_thread_only().non_nestable_task_queue.empty()) {
464       internal::TaskQueueImpl::DeferredNonNestableTask& non_nestable_task =
465           main_thread_only().non_nestable_task_queue.back();
466       auto* const task_queue = non_nestable_task.task_queue;
467       task_queue->RequeueDeferredNonNestableTask(std::move(non_nestable_task));
468       main_thread_only().non_nestable_task_queue.pop_back();
469     }
470   }
471   if (main_thread_only().observer)
472     main_thread_only().observer->OnExitNestedRunLoop();
473 }
474 
ScheduleWork()475 void SequenceManagerImpl::ScheduleWork() {
476   controller_->ScheduleWork();
477 }
478 
SetNextDelayedDoWork(LazyNow * lazy_now,TimeTicks run_time)479 void SequenceManagerImpl::SetNextDelayedDoWork(LazyNow* lazy_now,
480                                                TimeTicks run_time) {
481   controller_->SetNextDelayedDoWork(lazy_now, run_time);
482 }
483 
484 namespace {
485 
RunTaskTraceNameForPriority(TaskQueue::QueuePriority priority)486 const char* RunTaskTraceNameForPriority(TaskQueue::QueuePriority priority) {
487   switch (priority) {
488     case TaskQueue::QueuePriority::kControlPriority:
489       return "RunControlPriorityTask";
490     case TaskQueue::QueuePriority::kHighestPriority:
491       return "RunHighestPriorityTask";
492     case TaskQueue::QueuePriority::kVeryHighPriority:
493       return "RunVeryHighPriorityTask";
494     case TaskQueue::QueuePriority::kHighPriority:
495       return "RunHighPriorityTask";
496     case TaskQueue::QueuePriority::kNormalPriority:
497       return "RunNormalPriorityTask";
498     case TaskQueue::QueuePriority::kLowPriority:
499       return "RunLowPriorityTask";
500     case TaskQueue::QueuePriority::kBestEffortPriority:
501       return "RunBestEffortPriorityTask";
502     case TaskQueue::QueuePriority::kQueuePriorityCount:
503       NOTREACHED();
504       return nullptr;
505   }
506 }
507 
508 }  // namespace
509 
SelectNextTask(SelectTaskOption option)510 Task* SequenceManagerImpl::SelectNextTask(SelectTaskOption option) {
511   Task* task = SelectNextTaskImpl(option);
512   if (!task)
513     return nullptr;
514 
515   ExecutingTask& executing_task =
516       *main_thread_only().task_execution_stack.rbegin();
517 
518   // It's important that there are no active trace events here which will
519   // terminate before we finish executing the task.
520   TRACE_EVENT_BEGIN1("sequence_manager",
521                      RunTaskTraceNameForPriority(executing_task.priority),
522                      "task_type", executing_task.task_type);
523   TRACE_EVENT_BEGIN0("sequence_manager", executing_task.task_queue_name);
524 
525   return task;
526 }
527 
528 #if DCHECK_IS_ON() && !defined(OS_NACL)
LogTaskDebugInfo(const WorkQueue * selected_work_queue) const529 void SequenceManagerImpl::LogTaskDebugInfo(
530     const WorkQueue* selected_work_queue) const {
531   const Task* task = selected_work_queue->GetFrontTask();
532   switch (settings_.task_execution_logging) {
533     case Settings::TaskLogging::kNone:
534       break;
535 
536     case Settings::TaskLogging::kEnabled:
537       LOG(INFO) << "#" << static_cast<uint64_t>(task->enqueue_order()) << " "
538                 << selected_work_queue->task_queue()->GetName()
539                 << (task->cross_thread_ ? " Run crossthread " : " Run ")
540                 << task->posted_from.ToString();
541       break;
542 
543     case Settings::TaskLogging::kEnabledWithBacktrace: {
544       std::array<const void*, PendingTask::kTaskBacktraceLength + 1> task_trace;
545       task_trace[0] = task->posted_from.program_counter();
546       ranges::copy(task->task_backtrace, task_trace.begin() + 1);
547       size_t length = 0;
548       while (length < task_trace.size() && task_trace[length])
549         ++length;
550       if (length == 0)
551         break;
552       LOG(INFO) << "#" << static_cast<uint64_t>(task->enqueue_order()) << " "
553                 << selected_work_queue->task_queue()->GetName()
554                 << (task->cross_thread_ ? " Run crossthread " : " Run ")
555                 << debug::StackTrace(task_trace.data(), length);
556       break;
557     }
558 
559     case Settings::TaskLogging::kReorderedOnly: {
560       std::vector<const Task*> skipped_tasks;
561       main_thread_only().selector.CollectSkippedOverLowerPriorityTasks(
562           selected_work_queue, &skipped_tasks);
563 
564       if (skipped_tasks.empty())
565         break;
566 
567       LOG(INFO) << "#" << static_cast<uint64_t>(task->enqueue_order()) << " "
568                 << selected_work_queue->task_queue()->GetName()
569                 << (task->cross_thread_ ? " Run crossthread " : " Run ")
570                 << task->posted_from.ToString();
571 
572       for (const Task* skipped_task : skipped_tasks) {
573         LOG(INFO) << "# (skipped over) "
574                   << static_cast<uint64_t>(skipped_task->enqueue_order()) << " "
575                   << skipped_task->posted_from.ToString();
576       }
577     }
578   }
579 }
580 #endif  // DCHECK_IS_ON() && !defined(OS_NACL)
581 
SelectNextTaskImpl(SelectTaskOption option)582 Task* SequenceManagerImpl::SelectNextTaskImpl(SelectTaskOption option) {
583   CHECK(Validate());
584 
585   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
586   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
587                "SequenceManagerImpl::SelectNextTask");
588 
589   ReloadEmptyWorkQueues();
590   LazyNow lazy_now(controller_->GetClock());
591   MoveReadyDelayedTasksToWorkQueues(&lazy_now);
592 
593   // If we sampled now, check if it's time to reclaim memory next time we go
594   // idle.
595   if (lazy_now.has_value() &&
596       lazy_now.Now() >= main_thread_only().next_time_to_reclaim_memory) {
597     main_thread_only().memory_reclaim_scheduled = true;
598   }
599 
600   while (true) {
601     internal::WorkQueue* work_queue =
602         main_thread_only().selector.SelectWorkQueueToService(option);
603     TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
604         TRACE_DISABLED_BY_DEFAULT("sequence_manager.debug"), "SequenceManager",
605         this,
606         AsValueWithSelectorResultForTracing(work_queue,
607                                             /* force_verbose */ false));
608 
609     if (!work_queue)
610       return nullptr;
611 
612     // If the head task was canceled, remove it and run the selector again.
613     if (UNLIKELY(work_queue->RemoveAllCanceledTasksFromFront()))
614       continue;
615 
616     if (UNLIKELY(work_queue->GetFrontTask()->nestable ==
617                      Nestable::kNonNestable &&
618                  main_thread_only().nesting_depth > 0)) {
619       // Defer non-nestable work. NOTE these tasks can be arbitrarily delayed so
620       // the additional delay should not be a problem.
621       // Note because we don't delete queues while nested, it's perfectly OK to
622       // store the raw pointer for |queue| here.
623       internal::TaskQueueImpl::DeferredNonNestableTask deferred_task{
624           work_queue->TakeTaskFromWorkQueue(), work_queue->task_queue(),
625           work_queue->queue_type()};
626       main_thread_only().non_nestable_task_queue.push_back(
627           std::move(deferred_task));
628       continue;
629     }
630 
631     if (UNLIKELY(!ShouldRunTaskOfPriority(
632             work_queue->task_queue()->GetQueuePriority()))) {
633       TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
634                    "SequenceManager.YieldToNative");
635       return nullptr;
636     }
637 
638 #if DCHECK_IS_ON() && !defined(OS_NACL)
639     LogTaskDebugInfo(work_queue);
640 #endif  // DCHECK_IS_ON() && !defined(OS_NACL)
641 
642     main_thread_only().task_execution_stack.emplace_back(
643         work_queue->TakeTaskFromWorkQueue(), work_queue->task_queue(),
644         InitializeTaskTiming(work_queue->task_queue()));
645 
646     ExecutingTask& executing_task =
647         *main_thread_only().task_execution_stack.rbegin();
648     NotifyWillProcessTask(&executing_task, &lazy_now);
649 
650     return &executing_task.pending_task;
651   }
652 }
653 
ShouldRunTaskOfPriority(TaskQueue::QueuePriority priority) const654 bool SequenceManagerImpl::ShouldRunTaskOfPriority(
655     TaskQueue::QueuePriority priority) const {
656   return priority <= *main_thread_only().pending_native_work.begin();
657 }
658 
DidRunTask()659 void SequenceManagerImpl::DidRunTask() {
660   LazyNow lazy_now(controller_->GetClock());
661   ExecutingTask& executing_task =
662       *main_thread_only().task_execution_stack.rbegin();
663 
664   TRACE_EVENT_END0("sequence_manager", executing_task.task_queue_name);
665   TRACE_EVENT_END0("sequence_manager",
666                    RunTaskTraceNameForPriority(executing_task.priority));
667 
668   NotifyDidProcessTask(&executing_task, &lazy_now);
669   main_thread_only().task_execution_stack.pop_back();
670 
671   if (main_thread_only().nesting_depth == 0)
672     CleanUpQueues();
673 }
674 
DelayTillNextTask(LazyNow * lazy_now,SelectTaskOption option) const675 TimeDelta SequenceManagerImpl::DelayTillNextTask(
676     LazyNow* lazy_now,
677     SelectTaskOption option) const {
678   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
679 
680   if (auto priority =
681           main_thread_only().selector.GetHighestPendingPriority(option)) {
682     // If the selector has non-empty queues we trivially know there is immediate
683     // work to be done. However we may want to yield to native work if it is
684     // more important.
685     if (UNLIKELY(!ShouldRunTaskOfPriority(*priority)))
686       return GetDelayTillNextDelayedTask(lazy_now, option);
687     return TimeDelta();
688   }
689 
690   // There may be some incoming immediate work which we haven't accounted for.
691   // NB ReloadEmptyWorkQueues involves a memory barrier, so it's fastest to not
692   // do this always.
693   ReloadEmptyWorkQueues();
694 
695   if (auto priority =
696           main_thread_only().selector.GetHighestPendingPriority(option)) {
697     if (UNLIKELY(!ShouldRunTaskOfPriority(*priority)))
698       return GetDelayTillNextDelayedTask(lazy_now, option);
699     return TimeDelta();
700   }
701 
702   // Otherwise we need to find the shortest delay, if any.  NB we don't need to
703   // call MoveReadyDelayedTasksToWorkQueues because it's assumed
704   // DelayTillNextTask will return TimeDelta>() if the delayed task is due to
705   // run now.
706   return GetDelayTillNextDelayedTask(lazy_now, option);
707 }
708 
GetDelayTillNextDelayedTask(LazyNow * lazy_now,SelectTaskOption option) const709 TimeDelta SequenceManagerImpl::GetDelayTillNextDelayedTask(
710     LazyNow* lazy_now,
711     SelectTaskOption option) const {
712   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
713 
714   if (option == SelectTaskOption::kSkipDelayedTask)
715     return TimeDelta::Max();
716 
717   TimeDelta delay_till_next_task = TimeDelta::Max();
718   for (TimeDomain* time_domain : main_thread_only().time_domains) {
719     Optional<TimeDelta> delay = time_domain->DelayTillNextTask(lazy_now);
720     if (!delay)
721       continue;
722 
723     if (*delay < delay_till_next_task)
724       delay_till_next_task = *delay;
725   }
726   return delay_till_next_task;
727 }
728 
HasPendingHighResolutionTasks()729 bool SequenceManagerImpl::HasPendingHighResolutionTasks() {
730   for (TimeDomain* time_domain : main_thread_only().time_domains) {
731     if (time_domain->has_pending_high_resolution_tasks())
732       return true;
733   }
734   return false;
735 }
736 
OnSystemIdle()737 bool SequenceManagerImpl::OnSystemIdle() {
738   bool have_work_to_do = false;
739   for (TimeDomain* time_domain : main_thread_only().time_domains) {
740     if (time_domain->MaybeFastForwardToNextTask(
741             controller_->ShouldQuitRunLoopWhenIdle())) {
742       have_work_to_do = true;
743     }
744   }
745   if (!have_work_to_do)
746     MaybeReclaimMemory();
747   return have_work_to_do;
748 }
749 
WillQueueTask(Task * pending_task,const char * task_queue_name)750 void SequenceManagerImpl::WillQueueTask(Task* pending_task,
751                                         const char* task_queue_name) {
752   controller_->WillQueueTask(pending_task, task_queue_name);
753 }
754 
InitializeTaskTiming(internal::TaskQueueImpl * task_queue)755 TaskQueue::TaskTiming SequenceManagerImpl::InitializeTaskTiming(
756     internal::TaskQueueImpl* task_queue) {
757   bool records_wall_time =
758       ShouldRecordTaskTiming(task_queue) == TimeRecordingPolicy::DoRecord;
759   bool records_thread_time = records_wall_time && ShouldRecordCPUTimeForTask();
760   return TaskQueue::TaskTiming(records_wall_time, records_thread_time);
761 }
762 
ShouldRecordTaskTiming(const internal::TaskQueueImpl * task_queue)763 TimeRecordingPolicy SequenceManagerImpl::ShouldRecordTaskTiming(
764     const internal::TaskQueueImpl* task_queue) {
765   if (task_queue->RequiresTaskTiming())
766     return TimeRecordingPolicy::DoRecord;
767   if (main_thread_only().nesting_depth == 0 &&
768       main_thread_only().task_time_observers.might_have_observers()) {
769     return TimeRecordingPolicy::DoRecord;
770   }
771   return TimeRecordingPolicy::DoNotRecord;
772 }
773 
NotifyWillProcessTask(ExecutingTask * executing_task,LazyNow * time_before_task)774 void SequenceManagerImpl::NotifyWillProcessTask(ExecutingTask* executing_task,
775                                                 LazyNow* time_before_task) {
776   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
777                "SequenceManagerImpl::NotifyWillProcessTaskObservers");
778 
779   RecordCrashKeys(executing_task->pending_task);
780 
781   if (executing_task->task_queue->GetQuiescenceMonitored())
782     main_thread_only().task_was_run_on_quiescence_monitored_queue = true;
783 
784   TimeRecordingPolicy recording_policy =
785       ShouldRecordTaskTiming(executing_task->task_queue);
786   if (recording_policy == TimeRecordingPolicy::DoRecord)
787     executing_task->task_timing.RecordTaskStart(time_before_task);
788 
789   if (!executing_task->task_queue->GetShouldNotifyObservers())
790     return;
791 
792   const bool was_blocked_or_low_priority =
793       executing_task->task_queue->WasBlockedOrLowPriority(
794           executing_task->pending_task.enqueue_order());
795 
796   {
797     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
798                  "SequenceManager.WillProcessTaskObservers");
799     for (auto& observer : main_thread_only().task_observers) {
800       observer.WillProcessTask(executing_task->pending_task,
801                                was_blocked_or_low_priority);
802     }
803   }
804 
805   {
806     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
807                  "SequenceManager.QueueNotifyWillProcessTask");
808     executing_task->task_queue->NotifyWillProcessTask(
809         executing_task->pending_task, was_blocked_or_low_priority);
810   }
811 
812   if (recording_policy != TimeRecordingPolicy::DoRecord)
813     return;
814 
815   if (main_thread_only().nesting_depth == 0) {
816     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
817                  "SequenceManager.WillProcessTaskTimeObservers");
818     for (auto& observer : main_thread_only().task_time_observers)
819       observer.WillProcessTask(executing_task->task_timing.start_time());
820   }
821 
822   {
823     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
824                  "SequenceManager.QueueOnTaskStarted");
825     executing_task->task_queue->OnTaskStarted(executing_task->pending_task,
826                                               executing_task->task_timing);
827   }
828 }
829 
NotifyDidProcessTask(ExecutingTask * executing_task,LazyNow * time_after_task)830 void SequenceManagerImpl::NotifyDidProcessTask(ExecutingTask* executing_task,
831                                                LazyNow* time_after_task) {
832   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
833                "SequenceManagerImpl::NotifyDidProcessTaskObservers");
834   if (!executing_task->task_queue->GetShouldNotifyObservers())
835     return;
836 
837   TaskQueue::TaskTiming& task_timing = executing_task->task_timing;
838 
839   {
840     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
841                  "SequenceManager.QueueOnTaskCompleted");
842     if (task_timing.has_wall_time()) {
843       executing_task->task_queue->OnTaskCompleted(
844           executing_task->pending_task, &task_timing, time_after_task);
845     }
846   }
847 
848   TimeRecordingPolicy recording_policy =
849       ShouldRecordTaskTiming(executing_task->task_queue);
850   // Record end time ASAP to avoid bias due to the overhead of observers.
851   if (recording_policy == TimeRecordingPolicy::DoRecord)
852     task_timing.RecordTaskEnd(time_after_task);
853 
854   if (task_timing.has_wall_time() && main_thread_only().nesting_depth == 0) {
855     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
856                  "SequenceManager.DidProcessTaskTimeObservers");
857     for (auto& observer : main_thread_only().task_time_observers) {
858       observer.DidProcessTask(task_timing.start_time(), task_timing.end_time());
859     }
860   }
861 
862   {
863     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
864                  "SequenceManager.DidProcessTaskObservers");
865     for (auto& observer : main_thread_only().task_observers)
866       observer.DidProcessTask(executing_task->pending_task);
867   }
868 
869   {
870     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
871                  "SequenceManager.QueueNotifyDidProcessTask");
872     executing_task->task_queue->NotifyDidProcessTask(
873         executing_task->pending_task);
874   }
875 
876   // TODO(altimin): Move this back to blink.
877   if (task_timing.has_wall_time() &&
878       recording_policy == TimeRecordingPolicy::DoRecord &&
879       task_timing.wall_duration() > kLongTaskTraceEventThreshold &&
880       main_thread_only().nesting_depth == 0) {
881     TRACE_EVENT_INSTANT1("blink", "LongTask", TRACE_EVENT_SCOPE_THREAD,
882                          "duration", task_timing.wall_duration().InSecondsF());
883   }
884 }
885 
SetWorkBatchSize(int work_batch_size)886 void SequenceManagerImpl::SetWorkBatchSize(int work_batch_size) {
887   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
888   DCHECK_GE(work_batch_size, 1);
889   controller_->SetWorkBatchSize(work_batch_size);
890 }
891 
SetTimerSlack(TimerSlack timer_slack)892 void SequenceManagerImpl::SetTimerSlack(TimerSlack timer_slack) {
893   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
894   controller_->SetTimerSlack(timer_slack);
895 }
896 
AddTaskObserver(TaskObserver * task_observer)897 void SequenceManagerImpl::AddTaskObserver(TaskObserver* task_observer) {
898   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
899   main_thread_only().task_observers.AddObserver(task_observer);
900 }
901 
RemoveTaskObserver(TaskObserver * task_observer)902 void SequenceManagerImpl::RemoveTaskObserver(TaskObserver* task_observer) {
903   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
904   main_thread_only().task_observers.RemoveObserver(task_observer);
905 }
906 
AddTaskTimeObserver(TaskTimeObserver * task_time_observer)907 void SequenceManagerImpl::AddTaskTimeObserver(
908     TaskTimeObserver* task_time_observer) {
909   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
910   main_thread_only().task_time_observers.AddObserver(task_time_observer);
911 }
912 
RemoveTaskTimeObserver(TaskTimeObserver * task_time_observer)913 void SequenceManagerImpl::RemoveTaskTimeObserver(
914     TaskTimeObserver* task_time_observer) {
915   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
916   main_thread_only().task_time_observers.RemoveObserver(task_time_observer);
917 }
918 
GetAndClearSystemIsQuiescentBit()919 bool SequenceManagerImpl::GetAndClearSystemIsQuiescentBit() {
920   bool task_was_run =
921       main_thread_only().task_was_run_on_quiescence_monitored_queue;
922   main_thread_only().task_was_run_on_quiescence_monitored_queue = false;
923   return !task_was_run;
924 }
925 
GetNextSequenceNumber()926 EnqueueOrder SequenceManagerImpl::GetNextSequenceNumber() {
927   return enqueue_order_generator_.GenerateNext();
928 }
929 
930 std::unique_ptr<trace_event::ConvertableToTraceFormat>
AsValueWithSelectorResultForTracing(internal::WorkQueue * selected_work_queue,bool force_verbose) const931 SequenceManagerImpl::AsValueWithSelectorResultForTracing(
932     internal::WorkQueue* selected_work_queue,
933     bool force_verbose) const {
934   return std::make_unique<TracedBaseValue>(
935       AsValueWithSelectorResult(selected_work_queue, force_verbose));
936 }
937 
AsValueWithSelectorResult(internal::WorkQueue * selected_work_queue,bool force_verbose) const938 Value SequenceManagerImpl::AsValueWithSelectorResult(
939     internal::WorkQueue* selected_work_queue,
940     bool force_verbose) const {
941   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
942   TimeTicks now = NowTicks();
943   Value state(Value::Type::DICTIONARY);
944   Value active_queues(Value::Type::LIST);
945   for (auto* const queue : main_thread_only().active_queues)
946     active_queues.Append(queue->AsValue(now, force_verbose));
947   state.SetKey("active_queues", std::move(active_queues));
948   Value shutdown_queues(Value::Type::LIST);
949   for (const auto& pair : main_thread_only().queues_to_gracefully_shutdown)
950     shutdown_queues.Append(pair.first->AsValue(now, force_verbose));
951   state.SetKey("queues_to_gracefully_shutdown", std::move(shutdown_queues));
952   Value queues_to_delete(Value::Type::LIST);
953   for (const auto& pair : main_thread_only().queues_to_delete)
954     queues_to_delete.Append(pair.first->AsValue(now, force_verbose));
955   state.SetKey("queues_to_delete", std::move(queues_to_delete));
956   state.SetKey("selector", main_thread_only().selector.AsValue());
957   if (selected_work_queue) {
958     state.SetStringKey("selected_queue",
959                        selected_work_queue->task_queue()->GetName());
960     state.SetStringKey("work_queue_name", selected_work_queue->name());
961   }
962   state.SetStringKey("native_work_priority",
963                      TaskQueue::PriorityToString(
964                          *main_thread_only().pending_native_work.begin()));
965   Value time_domains(Value::Type::LIST);
966   for (auto* time_domain : main_thread_only().time_domains)
967     time_domains.Append(time_domain->AsValue());
968   state.SetKey("time_domains", std::move(time_domains));
969   return state;
970 }
971 
OnTaskQueueEnabled(internal::TaskQueueImpl * queue)972 void SequenceManagerImpl::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) {
973   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
974   DCHECK(queue->IsQueueEnabled());
975   // Only schedule DoWork if there's something to do.
976   if (queue->HasTaskToRunImmediately() && !queue->BlockedByFence())
977     ScheduleWork();
978 }
979 
MaybeReclaimMemory()980 void SequenceManagerImpl::MaybeReclaimMemory() {
981   if (!main_thread_only().memory_reclaim_scheduled)
982     return;
983 
984   TRACE_EVENT0("sequence_manager", "SequenceManagerImpl::MaybeReclaimMemory");
985   ReclaimMemory();
986 
987   // To avoid performance regressions we only want to do this every so often.
988   main_thread_only().next_time_to_reclaim_memory =
989       NowTicks() + kReclaimMemoryInterval;
990   main_thread_only().memory_reclaim_scheduled = false;
991 }
992 
ReclaimMemory()993 void SequenceManagerImpl::ReclaimMemory() {
994   std::map<TimeDomain*, TimeTicks> time_domain_now;
995   for (auto* const queue : main_thread_only().active_queues)
996     ReclaimMemoryFromQueue(queue, &time_domain_now);
997   for (const auto& pair : main_thread_only().queues_to_gracefully_shutdown)
998     ReclaimMemoryFromQueue(pair.first, &time_domain_now);
999 }
1000 
CleanUpQueues()1001 void SequenceManagerImpl::CleanUpQueues() {
1002   for (auto it = main_thread_only().queues_to_gracefully_shutdown.begin();
1003        it != main_thread_only().queues_to_gracefully_shutdown.end();) {
1004     if (it->first->IsEmpty()) {
1005       UnregisterTaskQueueImpl(std::move(it->second));
1006       main_thread_only().active_queues.erase(it->first);
1007       main_thread_only().queues_to_gracefully_shutdown.erase(it++);
1008     } else {
1009       ++it;
1010     }
1011   }
1012   main_thread_only().queues_to_delete.clear();
1013 }
1014 
RemoveAllCanceledTasksFromFrontOfWorkQueues()1015 void SequenceManagerImpl::RemoveAllCanceledTasksFromFrontOfWorkQueues() {
1016   for (internal::TaskQueueImpl* queue : main_thread_only().active_queues) {
1017     queue->delayed_work_queue()->RemoveAllCanceledTasksFromFront();
1018     queue->immediate_work_queue()->RemoveAllCanceledTasksFromFront();
1019   }
1020 }
1021 
GetWeakPtr()1022 WeakPtr<SequenceManagerImpl> SequenceManagerImpl::GetWeakPtr() {
1023   return weak_factory_.GetWeakPtr();
1024 }
1025 
SetDefaultTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner)1026 void SequenceManagerImpl::SetDefaultTaskRunner(
1027     scoped_refptr<SingleThreadTaskRunner> task_runner) {
1028   controller_->SetDefaultTaskRunner(task_runner);
1029 }
1030 
GetTickClock() const1031 const TickClock* SequenceManagerImpl::GetTickClock() const {
1032   return controller_->GetClock();
1033 }
1034 
NowTicks() const1035 TimeTicks SequenceManagerImpl::NowTicks() const {
1036   return controller_->GetClock()->NowTicks();
1037 }
1038 
ShouldRecordCPUTimeForTask()1039 bool SequenceManagerImpl::ShouldRecordCPUTimeForTask() {
1040   DCHECK(ThreadTicks::IsSupported() ||
1041          !metric_recording_settings_.records_cpu_time_for_some_tasks());
1042   return metric_recording_settings_.records_cpu_time_for_some_tasks() &&
1043          main_thread_only().uniform_distribution(
1044              main_thread_only().random_generator) <
1045              metric_recording_settings_
1046                  .task_sampling_rate_for_recording_cpu_time;
1047 }
1048 
1049 const SequenceManager::MetricRecordingSettings&
GetMetricRecordingSettings() const1050 SequenceManagerImpl::GetMetricRecordingSettings() const {
1051   return metric_recording_settings_;
1052 }
1053 
1054 // TODO(altimin): Ensure that this removes all pending tasks.
DeletePendingTasks()1055 void SequenceManagerImpl::DeletePendingTasks() {
1056   DCHECK(main_thread_only().task_execution_stack.empty())
1057       << "Tasks should be deleted outside RunLoop";
1058 
1059   for (TaskQueueImpl* task_queue : main_thread_only().active_queues)
1060     task_queue->DeletePendingTasks();
1061   for (const auto& it : main_thread_only().queues_to_gracefully_shutdown)
1062     it.first->DeletePendingTasks();
1063   for (const auto& it : main_thread_only().queues_to_delete)
1064     it.first->DeletePendingTasks();
1065 }
1066 
HasTasks()1067 bool SequenceManagerImpl::HasTasks() {
1068   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
1069   RemoveAllCanceledTasksFromFrontOfWorkQueues();
1070 
1071   for (TaskQueueImpl* task_queue : main_thread_only().active_queues) {
1072     if (task_queue->HasTasks())
1073       return true;
1074   }
1075   for (const auto& it : main_thread_only().queues_to_gracefully_shutdown) {
1076     if (it.first->HasTasks())
1077       return true;
1078   }
1079   for (const auto& it : main_thread_only().queues_to_delete) {
1080     if (it.first->HasTasks())
1081       return true;
1082   }
1083   return false;
1084 }
1085 
GetType() const1086 MessagePumpType SequenceManagerImpl::GetType() const {
1087   return settings_.message_loop_type;
1088 }
1089 
SetTaskExecutionAllowed(bool allowed)1090 void SequenceManagerImpl::SetTaskExecutionAllowed(bool allowed) {
1091   controller_->SetTaskExecutionAllowed(allowed);
1092 }
1093 
IsTaskExecutionAllowed() const1094 bool SequenceManagerImpl::IsTaskExecutionAllowed() const {
1095   return controller_->IsTaskExecutionAllowed();
1096 }
1097 
1098 #if defined(OS_IOS)
AttachToMessagePump()1099 void SequenceManagerImpl::AttachToMessagePump() {
1100   return controller_->AttachToMessagePump();
1101 }
1102 #endif
1103 
IsIdleForTesting()1104 bool SequenceManagerImpl::IsIdleForTesting() {
1105   ReloadEmptyWorkQueues();
1106   RemoveAllCanceledTasksFromFrontOfWorkQueues();
1107   return !main_thread_only().selector.GetHighestPendingPriority().has_value();
1108 }
1109 
GetPendingTaskCountForTesting() const1110 size_t SequenceManagerImpl::GetPendingTaskCountForTesting() const {
1111   size_t total = 0;
1112   for (internal::TaskQueueImpl* task_queue : main_thread_only().active_queues) {
1113     total += task_queue->GetNumberOfPendingTasks();
1114   }
1115   return total;
1116 }
1117 
CreateTaskQueue(const TaskQueue::Spec & spec)1118 scoped_refptr<TaskQueue> SequenceManagerImpl::CreateTaskQueue(
1119     const TaskQueue::Spec& spec) {
1120   return WrapRefCounted(new TaskQueue(CreateTaskQueueImpl(spec), spec));
1121 }
1122 
DescribeAllPendingTasks() const1123 std::string SequenceManagerImpl::DescribeAllPendingTasks() const {
1124   Value value = AsValueWithSelectorResult(nullptr, /* force_verbose */ true);
1125   std::string result;
1126   JSONWriter::Write(value, &result);
1127   return result;
1128 }
1129 
OnNativeWorkPending(TaskQueue::QueuePriority priority)1130 std::unique_ptr<NativeWorkHandle> SequenceManagerImpl::OnNativeWorkPending(
1131     TaskQueue::QueuePriority priority) {
1132   return std::make_unique<NativeWorkHandleImpl>(this, priority);
1133 }
1134 
AddDestructionObserver(CurrentThread::DestructionObserver * destruction_observer)1135 void SequenceManagerImpl::AddDestructionObserver(
1136     CurrentThread::DestructionObserver* destruction_observer) {
1137   main_thread_only().destruction_observers.AddObserver(destruction_observer);
1138 }
1139 
RemoveDestructionObserver(CurrentThread::DestructionObserver * destruction_observer)1140 void SequenceManagerImpl::RemoveDestructionObserver(
1141     CurrentThread::DestructionObserver* destruction_observer) {
1142   main_thread_only().destruction_observers.RemoveObserver(destruction_observer);
1143 }
1144 
SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner)1145 void SequenceManagerImpl::SetTaskRunner(
1146     scoped_refptr<SingleThreadTaskRunner> task_runner) {
1147   controller_->SetDefaultTaskRunner(task_runner);
1148 }
1149 
GetTaskRunner()1150 scoped_refptr<SingleThreadTaskRunner> SequenceManagerImpl::GetTaskRunner() {
1151   return controller_->GetDefaultTaskRunner();
1152 }
1153 
IsBoundToCurrentThread() const1154 bool SequenceManagerImpl::IsBoundToCurrentThread() const {
1155   return associated_thread_->IsBoundToCurrentThread();
1156 }
1157 
GetMessagePump() const1158 MessagePump* SequenceManagerImpl::GetMessagePump() const {
1159   return controller_->GetBoundMessagePump();
1160 }
1161 
IsType(MessagePumpType type) const1162 bool SequenceManagerImpl::IsType(MessagePumpType type) const {
1163   return settings_.message_loop_type == type;
1164 }
1165 
Validate()1166 NOINLINE bool SequenceManagerImpl::Validate() {
1167   return memory_corruption_sentinel_ == kMemoryCorruptionSentinelValue;
1168 }
1169 
EnableCrashKeys(const char * async_stack_crash_key)1170 void SequenceManagerImpl::EnableCrashKeys(const char* async_stack_crash_key) {
1171   DCHECK(!main_thread_only().async_stack_crash_key);
1172 #if !defined(OS_NACL)
1173   main_thread_only().async_stack_crash_key = debug::AllocateCrashKeyString(
1174       async_stack_crash_key, debug::CrashKeySize::Size64);
1175   static_assert(sizeof(main_thread_only().async_stack_buffer) ==
1176                     static_cast<size_t>(debug::CrashKeySize::Size64),
1177                 "Async stack buffer size must match crash key size.");
1178 #endif  // OS_NACL
1179 }
1180 
RecordCrashKeys(const PendingTask & pending_task)1181 void SequenceManagerImpl::RecordCrashKeys(const PendingTask& pending_task) {
1182 #if !defined(OS_NACL)
1183   // SetCrashKeyString is a no-op even if the crash key is null, but we'd still
1184   // have construct the StringPiece that is passed in.
1185   if (!main_thread_only().async_stack_crash_key)
1186     return;
1187 
1188   // Write the async stack trace onto a crash key as whitespace-delimited hex
1189   // addresses. These will be symbolized by the crash reporting system. With
1190   // 63 characters we can fit the address of the task that posted the current
1191   // task and its predecessor. Avoid HexEncode since it incurs a memory
1192   // allocation and snprintf because it's about 3.5x slower on Android this
1193   // this.
1194   //
1195   // See
1196   // https://chromium.googlesource.com/chromium/src/+/master/docs/debugging_with_crash_keys.md
1197   // for instructions for symbolizing these crash keys.
1198   //
1199   // TODO(skyostil): Find a way to extract the destination function address
1200   // from the task.
1201   size_t max_size = main_thread_only().async_stack_buffer.size();
1202   char* const buffer = &main_thread_only().async_stack_buffer[0];
1203   char* const buffer_end = &buffer[max_size - 1];
1204   char* pos = buffer_end;
1205   // Leave space for the NUL terminator.
1206   pos = PrependHexAddress(pos - 1, pending_task.task_backtrace[0]);
1207   *(--pos) = ' ';
1208   pos = PrependHexAddress(pos - 1, pending_task.posted_from.program_counter());
1209   DCHECK_GE(pos, buffer);
1210   debug::SetCrashKeyString(main_thread_only().async_stack_crash_key,
1211                            StringPiece(pos, buffer_end - pos));
1212 #endif  // OS_NACL
1213 }
1214 
currently_executing_task_queue() const1215 internal::TaskQueueImpl* SequenceManagerImpl::currently_executing_task_queue()
1216     const {
1217   if (main_thread_only().task_execution_stack.empty())
1218     return nullptr;
1219   return main_thread_only().task_execution_stack.rbegin()->task_queue;
1220 }
1221 
1222 constexpr TimeDelta SequenceManagerImpl::kReclaimMemoryInterval;
1223 
1224 }  // namespace internal
1225 }  // namespace sequence_manager
1226 }  // namespace base
1227