1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_PERSISTENT_NODE_H_
6 #define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_PERSISTENT_NODE_H_
7 
8 #include <atomic>
9 #include <memory>
10 #include "third_party/blink/renderer/platform/heap/process_heap.h"
11 #include "third_party/blink/renderer/platform/heap/thread_state.h"
12 #include "third_party/blink/renderer/platform/platform_export.h"
13 #include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
14 #include "third_party/blink/renderer/platform/wtf/assertions.h"
15 #include "third_party/blink/renderer/platform/wtf/threading_primitives.h"
16 
17 namespace blink {
18 
19 class CrossThreadPersistentRegion;
20 class PersistentRegion;
21 
22 enum WeaknessPersistentConfiguration {
23   kNonWeakPersistentConfiguration,
24   kWeakPersistentConfiguration
25 };
26 
27 enum CrossThreadnessPersistentConfiguration {
28   kSingleThreadPersistentConfiguration,
29   kCrossThreadPersistentConfiguration
30 };
31 
32 template <CrossThreadnessPersistentConfiguration>
33 struct PersistentMutexTraits {
34   struct [[maybe_unused]] Locker{};
AssertAcquiredPersistentMutexTraits35   static void AssertAcquired() {}
36 };
37 
38 template <>
39 struct PersistentMutexTraits<kCrossThreadPersistentConfiguration> {
40   struct Locker {
41     MutexLocker locker{ProcessHeap::CrossThreadPersistentMutex()};
42   };
43   static void AssertAcquired() {
44 #if DCHECK_IS_ON()
45     ProcessHeap::CrossThreadPersistentMutex().AssertAcquired();
46 #endif
47   }
48 };
49 
50 class PersistentNode final {
51   DISALLOW_NEW();
52 
53  public:
54   PersistentNode() { DCHECK(IsUnused()); }
55 
56 #if DCHECK_IS_ON()
57   ~PersistentNode() {
58     // If you hit this assert, it means that the thread finished
59     // without clearing persistent handles that the thread created.
60     // We don't enable the assert for the main thread because the
61     // main thread finishes without clearing all persistent handles.
62     DCHECK(IsMainThread() || IsUnused());
63   }
64 #endif
65 
66   // It is dangerous to copy the PersistentNode because it breaks the
67   // free list.
68   PersistentNode& operator=(const PersistentNode& otherref) = delete;
69 
70   // Ideally the trace method should be virtual and automatically dispatch
71   // to the most specific implementation. However having a virtual method
72   // on PersistentNode leads to too eager template instantiation with MSVC
73   // which leads to include cycles.
74   // Instead we call the constructor with a TraceCallback which knows the
75   // type of the most specific child and calls trace directly. See
76   // TraceMethodDelegate in Visitor.h for how this is done.
77   void TracePersistentNode(Visitor* visitor) const {
78     DCHECK(!IsUnused());
79     DCHECK(trace_);
80     trace_(visitor, self_);
81   }
82 
83   void Initialize(void* self, TraceCallback trace) {
84     DCHECK(IsUnused());
85     self_ = self;
86     trace_ = trace;
87   }
88 
89   void Reinitialize(void* self, TraceCallback trace) {
90     self_ = self;
91     trace_ = trace;
92   }
93 
94   void SetFreeListNext(PersistentNode* node) {
95     DCHECK(!node || node->IsUnused());
96     self_ = node;
97     trace_ = nullptr;
98     DCHECK(IsUnused());
99   }
100 
101   PersistentNode* FreeListNext() {
102     DCHECK(IsUnused());
103     PersistentNode* node = reinterpret_cast<PersistentNode*>(self_);
104     DCHECK(!node || node->IsUnused());
105     return node;
106   }
107 
108   bool IsUnused() const { return !trace_; }
109 
110   void* Self() const { return self_; }
111 
112  private:
113   // If this PersistentNode is in use:
114   //   - m_self points to the corresponding Persistent handle.
115   //   - m_trace points to the trace method.
116   // If this PersistentNode is freed:
117   //   - m_self points to the next freed PersistentNode.
118   //   - m_trace is nullptr.
119   void* self_ = nullptr;
120   TraceCallback trace_ = nullptr;
121 };
122 
123 struct PersistentNodeSlots final {
124   USING_FAST_MALLOC(PersistentNodeSlots);
125 
126  public:
127   static constexpr int kSlotCount = 256;
128 
129   PersistentNodeSlots* next;
130   PersistentNode slot[kSlotCount];
131 };
132 
133 // Used by PersistentBase to manage a pointer to a thread heap persistent node.
134 // This class mostly passes accesses through, but provides an interface
135 // compatible with CrossThreadPersistentNodePtr.
136 template <ThreadAffinity affinity,
137           WeaknessPersistentConfiguration weakness_configuration>
138 class PersistentNodePtr {
139   STACK_ALLOCATED();
140 
141  public:
142   PersistentNode* Get() const { return ptr_; }
143   bool IsInitialized() const { return ptr_; }
144 
145   void Initialize(void* owner, TraceCallback);
146   void Uninitialize();
147 
148   PersistentNodePtr& operator=(PersistentNodePtr&& other) {
149     ptr_ = other.ptr_;
150     other.ptr_ = nullptr;
151     return *this;
152   }
153 
154  private:
155   PersistentNode* ptr_ = nullptr;
156 #if DCHECK_IS_ON()
157   ThreadState* state_ = nullptr;
158 #endif
159 };
160 
161 // Used by PersistentBase to manage a pointer to a cross-thread persistent node.
162 // It uses ProcessHeap::CrossThreadPersistentMutex() to protect most accesses,
163 // but can be polled to see whether it is initialized without the mutex.
164 template <WeaknessPersistentConfiguration weakness_configuration>
165 class CrossThreadPersistentNodePtr {
166   STACK_ALLOCATED();
167 
168  public:
169   PersistentNode* Get() const {
170     PersistentMutexTraits<
171         kCrossThreadPersistentConfiguration>::AssertAcquired();
172     return ptr_.load(std::memory_order_relaxed);
173   }
174   bool IsInitialized() const { return ptr_.load(std::memory_order_acquire); }
175 
176   void Initialize(void* owner, TraceCallback);
177   void Uninitialize();
178 
179   void ClearWithLockHeld();
180 
181   CrossThreadPersistentNodePtr& operator=(
182       CrossThreadPersistentNodePtr&& other) {
183     PersistentMutexTraits<
184         kCrossThreadPersistentConfiguration>::AssertAcquired();
185     PersistentNode* node = other.ptr_.load(std::memory_order_relaxed);
186     ptr_.store(node, std::memory_order_relaxed);
187     other.ptr_.store(nullptr, std::memory_order_relaxed);
188     return *this;
189   }
190 
191  private:
192   // Access must either be protected by the cross-thread persistent mutex or
193   // handle the fact that this may be changed concurrently (with a
194   // release-store).
195   std::atomic<PersistentNode*> ptr_{nullptr};
196 };
197 
198 class PLATFORM_EXPORT PersistentRegionBase {
199  public:
200   ~PersistentRegionBase();
201 
202   inline PersistentNode* AllocateNode(void* self, TraceCallback trace);
203   inline void FreeNode(PersistentNode* persistent_node);
204   int NodesInUse() const;
205 
206  protected:
207   using ShouldTraceCallback = bool (*)(Visitor*, PersistentNode*);
208 
209   void TraceNodesImpl(Visitor*, ShouldTraceCallback);
210 
211   void EnsureNodeSlots();
212 
213   PersistentNode* free_list_head_ = nullptr;
214   PersistentNodeSlots* slots_ = nullptr;
215 #if DCHECK_IS_ON()
216   size_t used_node_count_ = 0;
217 #endif
218 };
219 
220 inline PersistentNode* PersistentRegionBase::AllocateNode(void* self,
221                                                           TraceCallback trace) {
222 #if DCHECK_IS_ON()
223   ++used_node_count_;
224 #endif
225   if (UNLIKELY(!free_list_head_))
226     EnsureNodeSlots();
227   DCHECK(free_list_head_);
228   PersistentNode* node = free_list_head_;
229   free_list_head_ = free_list_head_->FreeListNext();
230   node->Initialize(self, trace);
231   DCHECK(!node->IsUnused());
232   return node;
233 }
234 
235 void PersistentRegionBase::FreeNode(PersistentNode* persistent_node) {
236 #if DCHECK_IS_ON()
237   DCHECK_GT(used_node_count_, 0u);
238 #endif
239   persistent_node->SetFreeListNext(free_list_head_);
240   free_list_head_ = persistent_node;
241 #if DCHECK_IS_ON()
242   --used_node_count_;
243 #endif
244 }
245 
246 class PLATFORM_EXPORT PersistentRegion final : public PersistentRegionBase {
247   USING_FAST_MALLOC(PersistentRegion);
248 
249  public:
250   inline void TraceNodes(Visitor*);
251 
252   // Clears the Persistent and then frees the node.
253   void ReleaseNode(PersistentNode*);
254 
255   void PrepareForThreadStateTermination(ThreadState*);
256 
257  private:
258   static constexpr bool ShouldTracePersistentNode(Visitor*, PersistentNode*) {
259     return true;
260   }
261 };
262 
263 inline void PersistentRegion::TraceNodes(Visitor* visitor) {
264   PersistentRegionBase::TraceNodesImpl(visitor, ShouldTracePersistentNode);
265 }
266 
267 class PLATFORM_EXPORT CrossThreadPersistentRegion final
268     : public PersistentRegionBase {
269   USING_FAST_MALLOC(CrossThreadPersistentRegion);
270 
271  public:
272   inline PersistentNode* AllocateNode(void* self, TraceCallback trace);
273   inline void FreeNode(PersistentNode*);
274   inline void TraceNodes(Visitor*);
275 
276   void PrepareForThreadStateTermination(ThreadState*);
277 
278 #if defined(ADDRESS_SANITIZER)
279   void UnpoisonCrossThreadPersistents();
280 #endif
281 
282  private:
283   NO_SANITIZE_ADDRESS
284   static bool ShouldTracePersistentNode(Visitor*, PersistentNode*);
285 };
286 
287 inline PersistentNode* CrossThreadPersistentRegion::AllocateNode(
288     void* self,
289     TraceCallback trace) {
290   PersistentMutexTraits<kCrossThreadPersistentConfiguration>::AssertAcquired();
291   return PersistentRegionBase::AllocateNode(self, trace);
292 }
293 
294 inline void CrossThreadPersistentRegion::FreeNode(PersistentNode* node) {
295   PersistentMutexTraits<kCrossThreadPersistentConfiguration>::AssertAcquired();
296   // PersistentBase::UninitializeSafe opportunistically checks for uninitialized
297   // nodes to allow a fast path destruction of unused nodes. This check is
298   // performed without taking the lock that is required for processing a
299   // cross-thread node. After taking the lock the condition needs to checked
300   // again to avoid double-freeing a node because the node may have been
301   // concurrently freed by the garbage collector on another thread.
302   if (!node)
303     return;
304   PersistentRegionBase::FreeNode(node);
305 }
306 
307 inline void CrossThreadPersistentRegion::TraceNodes(Visitor* visitor) {
308   PersistentRegionBase::TraceNodesImpl(visitor, ShouldTracePersistentNode);
309 }
310 
311 template <ThreadAffinity affinity,
312           WeaknessPersistentConfiguration weakness_configuration>
313 void PersistentNodePtr<affinity, weakness_configuration>::Initialize(
314     void* owner,
315     TraceCallback trace_callback) {
316   ThreadState* state = ThreadStateFor<affinity>::GetState();
317   DCHECK(state->CheckThread());
318   PersistentRegion* region =
319       weakness_configuration == kWeakPersistentConfiguration
320           ? state->GetWeakPersistentRegion()
321           : state->GetPersistentRegion();
322   ptr_ = region->AllocateNode(owner, trace_callback);
323 #if DCHECK_IS_ON()
324   state_ = state;
325 #endif
326 }
327 
328 template <ThreadAffinity affinity,
329           WeaknessPersistentConfiguration weakness_configuration>
330 void PersistentNodePtr<affinity, weakness_configuration>::Uninitialize() {
331   if (!ptr_)
332     return;
333   ThreadState* state = ThreadStateFor<affinity>::GetState();
334   DCHECK(state->CheckThread());
335 #if DCHECK_IS_ON()
336   DCHECK_EQ(state_, state)
337       << "must be initialized and uninitialized on the same thread";
338   state_ = nullptr;
339 #endif
340   PersistentRegion* region =
341       weakness_configuration == kWeakPersistentConfiguration
342           ? state->GetWeakPersistentRegion()
343           : state->GetPersistentRegion();
344   state->FreePersistentNode(region, ptr_);
345   ptr_ = nullptr;
346 }
347 
348 template <WeaknessPersistentConfiguration weakness_configuration>
349 void CrossThreadPersistentNodePtr<weakness_configuration>::Initialize(
350     void* owner,
351     TraceCallback trace_callback) {
352   PersistentMutexTraits<kCrossThreadPersistentConfiguration>::AssertAcquired();
353   CrossThreadPersistentRegion& region =
354       weakness_configuration == kWeakPersistentConfiguration
355           ? ProcessHeap::GetCrossThreadWeakPersistentRegion()
356           : ProcessHeap::GetCrossThreadPersistentRegion();
357   PersistentNode* node = region.AllocateNode(owner, trace_callback);
358   ptr_.store(node, std::memory_order_release);
359 }
360 
361 template <WeaknessPersistentConfiguration weakness_configuration>
362 void CrossThreadPersistentNodePtr<weakness_configuration>::Uninitialize() {
363   PersistentMutexTraits<kCrossThreadPersistentConfiguration>::AssertAcquired();
364   CrossThreadPersistentRegion& region =
365       weakness_configuration == kWeakPersistentConfiguration
366           ? ProcessHeap::GetCrossThreadWeakPersistentRegion()
367           : ProcessHeap::GetCrossThreadPersistentRegion();
368   region.FreeNode(ptr_.load(std::memory_order_relaxed));
369   ptr_.store(nullptr, std::memory_order_release);
370 }
371 
372 template <WeaknessPersistentConfiguration weakness_configuration>
373 void CrossThreadPersistentNodePtr<weakness_configuration>::ClearWithLockHeld() {
374   PersistentMutexTraits<kCrossThreadPersistentConfiguration>::AssertAcquired();
375   CrossThreadPersistentRegion& region =
376       weakness_configuration == kWeakPersistentConfiguration
377           ? ProcessHeap::GetCrossThreadWeakPersistentRegion()
378           : ProcessHeap::GetCrossThreadPersistentRegion();
379   region.FreeNode(ptr_.load(std::memory_order_relaxed));
380   ptr_.store(nullptr, std::memory_order_release);
381 }
382 
383 }  // namespace blink
384 
385 #endif  // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_PERSISTENT_NODE_H_
386