1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/task/sequence_manager/work_queue.h"
6
7 #include "base/task/sequence_manager/sequence_manager_impl.h"
8 #include "base/task/sequence_manager/work_queue_sets.h"
9
10 namespace base {
11 namespace sequence_manager {
12 namespace internal {
13
WorkQueue(TaskQueueImpl * task_queue,const char * name,QueueType queue_type)14 WorkQueue::WorkQueue(TaskQueueImpl* task_queue,
15 const char* name,
16 QueueType queue_type)
17 : task_queue_(task_queue), name_(name), queue_type_(queue_type) {}
18
AsValueInto(TimeTicks now,trace_event::TracedValue * state) const19 void WorkQueue::AsValueInto(TimeTicks now,
20 trace_event::TracedValue* state) const {
21 for (const Task& task : tasks_) {
22 TaskQueueImpl::TaskAsValueInto(task, now, state);
23 }
24 }
25
~WorkQueue()26 WorkQueue::~WorkQueue() {
27 DCHECK(!work_queue_sets_) << task_queue_->GetName() << " : "
28 << work_queue_sets_->GetName() << " : " << name_;
29 }
30
GetFrontTask() const31 const Task* WorkQueue::GetFrontTask() const {
32 if (tasks_.empty())
33 return nullptr;
34 return &tasks_.front();
35 }
36
GetBackTask() const37 const Task* WorkQueue::GetBackTask() const {
38 if (tasks_.empty())
39 return nullptr;
40 return &tasks_.back();
41 }
42
BlockedByFence() const43 bool WorkQueue::BlockedByFence() const {
44 if (!fence_)
45 return false;
46
47 // If the queue is empty then any future tasks will have a higher enqueue
48 // order and will be blocked. The queue is also blocked if the head is past
49 // the fence.
50 return tasks_.empty() || tasks_.front().enqueue_order() >= fence_;
51 }
52
GetFrontTaskEnqueueOrder(EnqueueOrder * enqueue_order) const53 bool WorkQueue::GetFrontTaskEnqueueOrder(EnqueueOrder* enqueue_order) const {
54 if (tasks_.empty() || BlockedByFence())
55 return false;
56 // Quick sanity check.
57 DCHECK_LE(tasks_.front().enqueue_order(), tasks_.back().enqueue_order())
58 << task_queue_->GetName() << " : " << work_queue_sets_->GetName() << " : "
59 << name_;
60 *enqueue_order = tasks_.front().enqueue_order();
61 return true;
62 }
63
Push(Task task)64 void WorkQueue::Push(Task task) {
65 bool was_empty = tasks_.empty();
66 #ifndef NDEBUG
67 DCHECK(task.enqueue_order_set());
68 #endif
69
70 // Make sure the |enqueue_order()| is monotonically increasing.
71 DCHECK(was_empty || tasks_.back().enqueue_order() < task.enqueue_order());
72
73 // Amortized O(1).
74 tasks_.push_back(std::move(task));
75
76 if (!was_empty)
77 return;
78
79 // If we hit the fence, pretend to WorkQueueSets that we're empty.
80 if (work_queue_sets_ && !BlockedByFence())
81 work_queue_sets_->OnTaskPushedToEmptyQueue(this);
82 }
83
TaskPusher(WorkQueue * work_queue)84 WorkQueue::TaskPusher::TaskPusher(WorkQueue* work_queue)
85 : work_queue_(work_queue), was_empty_(work_queue->Empty()) {}
86
TaskPusher(TaskPusher && other)87 WorkQueue::TaskPusher::TaskPusher(TaskPusher&& other)
88 : work_queue_(other.work_queue_), was_empty_(other.was_empty_) {
89 other.work_queue_ = nullptr;
90 }
91
Push(Task * task)92 void WorkQueue::TaskPusher::Push(Task* task) {
93 DCHECK(work_queue_);
94
95 #ifndef NDEBUG
96 DCHECK(task->enqueue_order_set());
97 #endif
98
99 // Make sure the |enqueue_order()| is monotonically increasing.
100 DCHECK(work_queue_->tasks_.empty() ||
101 work_queue_->tasks_.back().enqueue_order() < task->enqueue_order());
102
103 // Amortized O(1).
104 work_queue_->tasks_.push_back(std::move(*task));
105 }
106
~TaskPusher()107 WorkQueue::TaskPusher::~TaskPusher() {
108 // If |work_queue_| became non empty and it isn't blocked by a fence then we
109 // must notify |work_queue_->work_queue_sets_|.
110 if (was_empty_ && work_queue_ && !work_queue_->Empty() &&
111 work_queue_->work_queue_sets_ && !work_queue_->BlockedByFence()) {
112 work_queue_->work_queue_sets_->OnTaskPushedToEmptyQueue(work_queue_);
113 }
114 }
115
CreateTaskPusher()116 WorkQueue::TaskPusher WorkQueue::CreateTaskPusher() {
117 return TaskPusher(this);
118 }
119
PushNonNestableTaskToFront(Task task)120 void WorkQueue::PushNonNestableTaskToFront(Task task) {
121 DCHECK(task.nestable == Nestable::kNonNestable);
122
123 bool was_empty = tasks_.empty();
124 bool was_blocked = BlockedByFence();
125 #ifndef NDEBUG
126 DCHECK(task.enqueue_order_set());
127 #endif
128
129 if (!was_empty) {
130 // Make sure the |enqueue_order| is monotonically increasing.
131 DCHECK_LE(task.enqueue_order(), tasks_.front().enqueue_order())
132 << task_queue_->GetName() << " : " << work_queue_sets_->GetName()
133 << " : " << name_;
134 }
135
136 // Amortized O(1).
137 tasks_.push_front(std::move(task));
138
139 if (!work_queue_sets_)
140 return;
141
142 // Pretend to WorkQueueSets that nothing has changed if we're blocked.
143 if (BlockedByFence())
144 return;
145
146 // Pushing task to front may unblock the fence.
147 if (was_empty || was_blocked) {
148 work_queue_sets_->OnTaskPushedToEmptyQueue(this);
149 } else {
150 work_queue_sets_->OnQueuesFrontTaskChanged(this);
151 }
152 }
153
TakeImmediateIncomingQueueTasks()154 void WorkQueue::TakeImmediateIncomingQueueTasks() {
155 DCHECK(tasks_.empty());
156
157 task_queue_->TakeImmediateIncomingQueueTasks(&tasks_);
158 if (tasks_.empty())
159 return;
160
161 // If we hit the fence, pretend to WorkQueueSets that we're empty.
162 if (work_queue_sets_ && !BlockedByFence())
163 work_queue_sets_->OnTaskPushedToEmptyQueue(this);
164 }
165
TakeTaskFromWorkQueue()166 Task WorkQueue::TakeTaskFromWorkQueue() {
167 DCHECK(work_queue_sets_);
168 DCHECK(!tasks_.empty());
169
170 Task pending_task = std::move(tasks_.front());
171 tasks_.pop_front();
172 // NB immediate tasks have a different pipeline to delayed ones.
173 if (tasks_.empty()) {
174 // NB delayed tasks are inserted via Push, no don't need to reload those.
175 if (queue_type_ == QueueType::kImmediate) {
176 // Short-circuit the queue reload so that OnPopMinQueueInSet does the
177 // right thing.
178 task_queue_->TakeImmediateIncomingQueueTasks(&tasks_);
179 }
180 // Since the queue is empty, now is a good time to consider reducing it's
181 // capacity if we're wasting memory.
182 tasks_.MaybeShrinkQueue();
183 }
184
185 DCHECK(work_queue_sets_);
186 #if DCHECK_IS_ON()
187 // If diagnostics are on it's possible task queues are being selected at
188 // random so we can't use the (slightly) more efficient OnPopMinQueueInSet.
189 work_queue_sets_->OnQueuesFrontTaskChanged(this);
190 #else
191 // OnPopMinQueueInSet calls GetFrontTaskEnqueueOrder which checks
192 // BlockedByFence() so we don't need to here.
193 work_queue_sets_->OnPopMinQueueInSet(this);
194 #endif
195 task_queue_->TraceQueueSize();
196 return pending_task;
197 }
198
RemoveAllCanceledTasksFromFront()199 bool WorkQueue::RemoveAllCanceledTasksFromFront() {
200 if (!work_queue_sets_)
201 return false;
202 bool task_removed = false;
203 while (!tasks_.empty() &&
204 (!tasks_.front().task || tasks_.front().task.IsCancelled())) {
205 tasks_.pop_front();
206 task_removed = true;
207 }
208 if (task_removed) {
209 if (tasks_.empty()) {
210 // NB delayed tasks are inserted via Push, no don't need to reload those.
211 if (queue_type_ == QueueType::kImmediate) {
212 // Short-circuit the queue reload so that OnPopMinQueueInSet does the
213 // right thing.
214 task_queue_->TakeImmediateIncomingQueueTasks(&tasks_);
215 }
216 // Since the queue is empty, now is a good time to consider reducing it's
217 // capacity if we're wasting memory.
218 tasks_.MaybeShrinkQueue();
219 }
220 // If we have a valid |heap_handle_| (i.e. we're not blocked by a fence or
221 // disabled) then |work_queue_sets_| needs to be told.
222 if (heap_handle_.IsValid())
223 work_queue_sets_->OnQueuesFrontTaskChanged(this);
224 task_queue_->TraceQueueSize();
225 }
226 return task_removed;
227 }
228
AssignToWorkQueueSets(WorkQueueSets * work_queue_sets)229 void WorkQueue::AssignToWorkQueueSets(WorkQueueSets* work_queue_sets) {
230 work_queue_sets_ = work_queue_sets;
231 }
232
AssignSetIndex(size_t work_queue_set_index)233 void WorkQueue::AssignSetIndex(size_t work_queue_set_index) {
234 work_queue_set_index_ = work_queue_set_index;
235 }
236
InsertFenceImpl(EnqueueOrder fence)237 bool WorkQueue::InsertFenceImpl(EnqueueOrder fence) {
238 DCHECK_NE(fence, 0u);
239 DCHECK(fence >= fence_ || fence == EnqueueOrder::blocking_fence());
240 bool was_blocked_by_fence = BlockedByFence();
241 fence_ = fence;
242 return was_blocked_by_fence;
243 }
244
InsertFenceSilently(EnqueueOrder fence)245 void WorkQueue::InsertFenceSilently(EnqueueOrder fence) {
246 // Ensure that there is no fence present or a new one blocks queue completely.
247 DCHECK(!fence_ || fence_ == EnqueueOrder::blocking_fence());
248 InsertFenceImpl(fence);
249 }
250
InsertFence(EnqueueOrder fence)251 bool WorkQueue::InsertFence(EnqueueOrder fence) {
252 bool was_blocked_by_fence = InsertFenceImpl(fence);
253 if (!work_queue_sets_)
254 return false;
255
256 // Moving the fence forward may unblock some tasks.
257 if (!tasks_.empty() && was_blocked_by_fence && !BlockedByFence()) {
258 work_queue_sets_->OnTaskPushedToEmptyQueue(this);
259 return true;
260 }
261 // Fence insertion may have blocked all tasks in this work queue.
262 if (BlockedByFence())
263 work_queue_sets_->OnQueueBlocked(this);
264 return false;
265 }
266
RemoveFence()267 bool WorkQueue::RemoveFence() {
268 bool was_blocked_by_fence = BlockedByFence();
269 fence_ = EnqueueOrder::none();
270 if (work_queue_sets_ && !tasks_.empty() && was_blocked_by_fence) {
271 work_queue_sets_->OnTaskPushedToEmptyQueue(this);
272 return true;
273 }
274 return false;
275 }
276
ShouldRunBefore(const WorkQueue * other_queue) const277 bool WorkQueue::ShouldRunBefore(const WorkQueue* other_queue) const {
278 DCHECK(!tasks_.empty());
279 DCHECK(!other_queue->tasks_.empty());
280 EnqueueOrder enqueue_order;
281 EnqueueOrder other_enqueue_order;
282 bool have_task = GetFrontTaskEnqueueOrder(&enqueue_order);
283 bool have_other_task =
284 other_queue->GetFrontTaskEnqueueOrder(&other_enqueue_order);
285 DCHECK(have_task);
286 DCHECK(have_other_task);
287 return enqueue_order < other_enqueue_order;
288 }
289
MaybeShrinkQueue()290 void WorkQueue::MaybeShrinkQueue() {
291 tasks_.MaybeShrinkQueue();
292 }
293
DeletePendingTasks()294 void WorkQueue::DeletePendingTasks() {
295 tasks_.clear();
296
297 if (work_queue_sets_ && heap_handle().IsValid())
298 work_queue_sets_->OnQueuesFrontTaskChanged(this);
299 DCHECK(!heap_handle_.IsValid());
300 }
301
PopTaskForTesting()302 void WorkQueue::PopTaskForTesting() {
303 if (tasks_.empty())
304 return;
305 tasks_.pop_front();
306 }
307
CollectTasksOlderThan(EnqueueOrder reference,std::vector<const Task * > * result) const308 void WorkQueue::CollectTasksOlderThan(EnqueueOrder reference,
309 std::vector<const Task*>* result) const {
310 for (const Task& task : tasks_) {
311 if (task.enqueue_order() >= reference)
312 break;
313
314 result->push_back(&task);
315 }
316 }
317
318 } // namespace internal
319 } // namespace sequence_manager
320 } // namespace base
321