1 // Copyright 2019 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/task/sequence_manager/atomic_flag_set.h"
6 
7 #include <utility>
8 
9 #include "base/bits.h"
10 #include "base/callback.h"
11 #include "base/check_op.h"
12 
13 namespace base {
14 namespace sequence_manager {
15 namespace internal {
16 
AtomicFlagSet(scoped_refptr<AssociatedThreadId> associated_thread)17 AtomicFlagSet::AtomicFlagSet(
18     scoped_refptr<AssociatedThreadId> associated_thread)
19     : associated_thread_(std::move(associated_thread)) {}
20 
~AtomicFlagSet()21 AtomicFlagSet::~AtomicFlagSet() {
22   DCHECK(!alloc_list_head_);
23   DCHECK(!partially_free_list_head_);
24 }
25 
26 AtomicFlagSet::AtomicFlag::AtomicFlag() = default;
27 
~AtomicFlag()28 AtomicFlagSet::AtomicFlag::~AtomicFlag() {
29   ReleaseAtomicFlag();
30 }
31 
AtomicFlag(AtomicFlagSet * outer,Group * element,size_t flag_bit)32 AtomicFlagSet::AtomicFlag::AtomicFlag(AtomicFlagSet* outer,
33                                       Group* element,
34                                       size_t flag_bit)
35     : outer_(outer), group_(element), flag_bit_(flag_bit) {}
36 
AtomicFlag(AtomicFlag && other)37 AtomicFlagSet::AtomicFlag::AtomicFlag(AtomicFlag&& other)
38     : outer_(other.outer_), group_(other.group_), flag_bit_(other.flag_bit_) {
39   other.outer_ = nullptr;
40   other.group_ = nullptr;
41 }
42 
SetActive(bool active)43 void AtomicFlagSet::AtomicFlag::SetActive(bool active) {
44   DCHECK(group_);
45   if (active) {
46     // Release semantics are required to ensure that all memory accesses made on
47     // this thread happen-before any others done on the thread running the
48     // active callbacks.
49     group_->flags.fetch_or(flag_bit_, std::memory_order_release);
50   } else {
51     // No operation is being performed based on the bit *not* being set (i.e.
52     // state of other memory is irrelevant); hence no memory order is required
53     // when unsetting it.
54     group_->flags.fetch_and(~flag_bit_, std::memory_order_relaxed);
55   }
56 }
57 
ReleaseAtomicFlag()58 void AtomicFlagSet::AtomicFlag::ReleaseAtomicFlag() {
59   if (!group_)
60     return;
61 
62   DCHECK_CALLED_ON_VALID_THREAD(outer_->associated_thread_->thread_checker);
63   SetActive(false);
64 
65   // If |group_| was full then add it on the partially free list.
66   if (group_->IsFull())
67     outer_->AddToPartiallyFreeList(group_);
68 
69   int index = Group::IndexOfFirstFlagSet(flag_bit_);
70   DCHECK(!group_->flag_callbacks[index].is_null());
71   group_->flag_callbacks[index] = RepeatingClosure();
72   group_->allocated_flags &= ~flag_bit_;
73 
74   // If |group_| has become empty delete it.
75   if (group_->IsEmpty()) {
76     outer_->RemoveFromPartiallyFreeList(group_);
77     outer_->RemoveFromAllocList(group_);
78   }
79 
80   outer_ = nullptr;
81   group_ = nullptr;
82 }
83 
AddFlag(RepeatingClosure callback)84 AtomicFlagSet::AtomicFlag AtomicFlagSet::AddFlag(RepeatingClosure callback) {
85   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
86   // Allocate a new Group if needed.
87   if (!partially_free_list_head_) {
88     AddToAllocList(std::make_unique<Group>());
89     AddToPartiallyFreeList(alloc_list_head_.get());
90   }
91 
92   DCHECK(partially_free_list_head_);
93   Group* group = partially_free_list_head_;
94   size_t first_unoccupied_index =
95       static_cast<size_t>(group->FindFirstUnallocatedFlag());
96   DCHECK(!group->flag_callbacks[first_unoccupied_index]);
97   group->flag_callbacks[first_unoccupied_index] = std::move(callback);
98 
99   size_t flag_bit = size_t{1} << first_unoccupied_index;
100   group->allocated_flags |= flag_bit;
101 
102   if (group->IsFull())
103     RemoveFromPartiallyFreeList(group);
104 
105   return AtomicFlag(this, group, flag_bit);
106 }
107 
RunActiveCallbacks() const108 void AtomicFlagSet::RunActiveCallbacks() const {
109   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
110   for (Group* iter = alloc_list_head_.get(); iter; iter = iter->next.get()) {
111     // Acquire semantics are required to guarantee that all memory side-effects
112     // made by other threads that were allowed to perform operations are
113     // synchronized with this thread before it returns from this method.
114     size_t active_flags = std::atomic_exchange_explicit(
115         &iter->flags, size_t{0}, std::memory_order_acquire);
116     // This is O(number of bits set).
117     while (active_flags) {
118       int index = Group::IndexOfFirstFlagSet(active_flags);
119       // Clear the flag.
120       active_flags ^= size_t{1} << index;
121       iter->flag_callbacks[index].Run();
122     }
123   }
124 }
125 
126 AtomicFlagSet::Group::Group() = default;
127 
~Group()128 AtomicFlagSet::Group::~Group() {
129   DCHECK_EQ(allocated_flags, 0u);
130   DCHECK(!partially_free_list_prev);
131   DCHECK(!partially_free_list_next);
132 }
133 
IsFull() const134 bool AtomicFlagSet::Group::IsFull() const {
135   return (~allocated_flags) == 0u;
136 }
137 
IsEmpty() const138 bool AtomicFlagSet::Group::IsEmpty() const {
139   return allocated_flags == 0u;
140 }
141 
FindFirstUnallocatedFlag() const142 int AtomicFlagSet::Group::FindFirstUnallocatedFlag() const {
143   size_t unallocated_flags = ~allocated_flags;
144   DCHECK_NE(unallocated_flags, 0u);
145   int index = IndexOfFirstFlagSet(unallocated_flags);
146   DCHECK_LT(index, kNumFlags);
147   return index;
148 }
149 
150 // static
IndexOfFirstFlagSet(size_t flag)151 int AtomicFlagSet::Group::IndexOfFirstFlagSet(size_t flag) {
152   DCHECK_NE(flag, 0u);
153   return bits::CountTrailingZeroBits(flag);
154 }
155 
AddToAllocList(std::unique_ptr<Group> group)156 void AtomicFlagSet::AddToAllocList(std::unique_ptr<Group> group) {
157   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
158   if (alloc_list_head_)
159     alloc_list_head_->prev = group.get();
160 
161   group->next = std::move(alloc_list_head_);
162   alloc_list_head_ = std::move(group);
163 }
164 
RemoveFromAllocList(Group * group)165 void AtomicFlagSet::RemoveFromAllocList(Group* group) {
166   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
167   if (group->next)
168     group->next->prev = group->prev;
169 
170   if (group->prev) {
171     group->prev->next = std::move(group->next);
172   } else {
173     alloc_list_head_ = std::move(group->next);
174   }
175 }
176 
AddToPartiallyFreeList(Group * group)177 void AtomicFlagSet::AddToPartiallyFreeList(Group* group) {
178   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
179   DCHECK_NE(partially_free_list_head_, group);
180   DCHECK(!group->partially_free_list_prev);
181   DCHECK(!group->partially_free_list_next);
182   if (partially_free_list_head_)
183     partially_free_list_head_->partially_free_list_prev = group;
184 
185   group->partially_free_list_next = partially_free_list_head_;
186   partially_free_list_head_ = group;
187 }
188 
RemoveFromPartiallyFreeList(Group * group)189 void AtomicFlagSet::RemoveFromPartiallyFreeList(Group* group) {
190   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
191   DCHECK(partially_free_list_head_);
192   // Check |group| is in the list.
193   DCHECK(partially_free_list_head_ == group || group->partially_free_list_prev);
194   if (group->partially_free_list_next) {
195     group->partially_free_list_next->partially_free_list_prev =
196         group->partially_free_list_prev;
197   }
198 
199   if (group->partially_free_list_prev) {
200     group->partially_free_list_prev->partially_free_list_next =
201         group->partially_free_list_next;
202   } else {
203     partially_free_list_head_ = group->partially_free_list_next;
204   }
205 
206   group->partially_free_list_prev = nullptr;
207   group->partially_free_list_next = nullptr;
208 }
209 
210 }  // namespace internal
211 }  // namespace sequence_manager
212 }  // namespace base
213