1 /*
2  * Copyright (C) 2013 Google Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are
6  * met:
7  *
8  *     * Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  *     * Redistributions in binary form must reproduce the above
11  * copyright notice, this list of conditions and the following disclaimer
12  * in the documentation and/or other materials provided with the
13  * distribution.
14  *     * Neither the name of Google Inc. nor the names of its
15  * contributors may be used to endorse or promote products derived from
16  * this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_HEAP_H_
32 #define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_HEAP_H_
33 
34 #include <limits>
35 #include <memory>
36 
37 #include "base/macros.h"
38 #include "build/build_config.h"
39 #include "third_party/blink/renderer/platform/heap/finalizer_traits.h"
40 #include "third_party/blink/renderer/platform/heap/gc_info.h"
41 #include "third_party/blink/renderer/platform/heap/heap_page.h"
42 #include "third_party/blink/renderer/platform/heap/process_heap.h"
43 #include "third_party/blink/renderer/platform/heap/thread_state.h"
44 #include "third_party/blink/renderer/platform/heap/thread_state_statistics.h"
45 #include "third_party/blink/renderer/platform/heap/visitor.h"
46 #include "third_party/blink/renderer/platform/heap/worklist.h"
47 #include "third_party/blink/renderer/platform/platform_export.h"
48 #include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
49 #include "third_party/blink/renderer/platform/wtf/assertions.h"
50 #include "third_party/blink/renderer/platform/wtf/forward.h"
51 #include "third_party/blink/renderer/platform/wtf/sanitizers.h"
52 
53 namespace blink {
54 
55 namespace incremental_marking_test {
56 class IncrementalMarkingScopeBase;
57 }  // namespace incremental_marking_test
58 
59 namespace weakness_marking_test {
60 class EphemeronCallbacksCounter;
61 }  // namespace weakness_marking_test
62 
63 class ConcurrentMarkingVisitor;
64 class ThreadHeapStatsCollector;
65 class PageBloomFilter;
66 class PagePool;
67 class ProcessHeapReporter;
68 class RegionTree;
69 
70 using MarkingItem = TraceDescriptor;
71 using NotFullyConstructedItem = const void*;
72 using WeakTableItem = MarkingItem;
73 
74 struct BackingStoreCallbackItem {
75   const void* backing;
76   MovingObjectCallback callback;
77 };
78 
79 struct CustomCallbackItem {
80   WeakCallback callback;
81   const void* parameter;
82 };
83 
84 using V8Reference = const TraceWrapperV8Reference<v8::Value>*;
85 
86 // Segment size of 512 entries necessary to avoid throughput regressions. Since
87 // the work list is currently a temporary object this is not a problem.
88 using MarkingWorklist = Worklist<MarkingItem, 512 /* local entries */>;
89 using WriteBarrierWorklist = Worklist<HeapObjectHeader*, 64>;
90 using NotFullyConstructedWorklist =
91     Worklist<NotFullyConstructedItem, 16 /* local entries */>;
92 using WeakCallbackWorklist =
93     Worklist<CustomCallbackItem, 64 /* local entries */>;
94 // Using large local segments here (sized 512 entries) to avoid throughput
95 // regressions.
96 using MovableReferenceWorklist =
97     Worklist<const MovableReference*, 256 /* local entries */>;
98 using WeakTableWorklist = Worklist<WeakTableItem, 16 /* local entries */>;
99 using BackingStoreCallbackWorklist =
100     Worklist<BackingStoreCallbackItem, 16 /* local entries */>;
101 using V8ReferencesWorklist = Worklist<V8Reference, 16 /* local entries */>;
102 using NotSafeToConcurrentlyTraceWorklist =
103     Worklist<MarkingItem, 64 /* local entries */>;
104 
105 class PLATFORM_EXPORT HeapAllocHooks {
106   STATIC_ONLY(HeapAllocHooks);
107 
108  public:
109   // TODO(hajimehoshi): Pass a type name of the allocated object.
110   typedef void AllocationHook(Address, size_t, const char*);
111   typedef void FreeHook(Address);
112 
113   // Sets allocation hook. Only one hook is supported.
SetAllocationHook(AllocationHook * hook)114   static void SetAllocationHook(AllocationHook* hook) {
115     CHECK(!allocation_hook_ || !hook);
116     allocation_hook_ = hook;
117   }
118 
119   // Sets free hook. Only one hook is supported.
SetFreeHook(FreeHook * hook)120   static void SetFreeHook(FreeHook* hook) {
121     CHECK(!free_hook_ || !hook);
122     free_hook_ = hook;
123   }
124 
AllocationHookIfEnabled(Address address,size_t size,const char * type_name)125   static void AllocationHookIfEnabled(Address address,
126                                       size_t size,
127                                       const char* type_name) {
128     AllocationHook* allocation_hook = allocation_hook_;
129     if (UNLIKELY(!!allocation_hook))
130       allocation_hook(address, size, type_name);
131   }
132 
FreeHookIfEnabled(Address address)133   static void FreeHookIfEnabled(Address address) {
134     FreeHook* free_hook = free_hook_;
135     if (UNLIKELY(!!free_hook))
136       free_hook(address);
137   }
138 
139  private:
140   static AllocationHook* allocation_hook_;
141   static FreeHook* free_hook_;
142 };
143 
144 class HeapCompact;
145 template <typename T>
146 class Member;
147 template <typename T>
148 class WeakMember;
149 template <typename T>
150 class UntracedMember;
151 
152 namespace internal {
153 
154 template <typename T, bool = NeedsAdjustPointer<T>::value>
155 class ObjectAliveTrait;
156 
157 template <typename T>
158 class ObjectAliveTrait<T, false> {
159   STATIC_ONLY(ObjectAliveTrait);
160 
161  public:
IsHeapObjectAlive(const T * object)162   static bool IsHeapObjectAlive(const T* object) {
163     static_assert(sizeof(T), "T must be fully defined");
164     return HeapObjectHeader::FromPayload(object)->IsMarked();
165   }
166 };
167 
168 template <typename T>
169 class ObjectAliveTrait<T, true> {
170   STATIC_ONLY(ObjectAliveTrait);
171 
172  public:
173   NO_SANITIZE_ADDRESS
IsHeapObjectAlive(const T * object)174   static bool IsHeapObjectAlive(const T* object) {
175     static_assert(sizeof(T), "T must be fully defined");
176     const HeapObjectHeader* header = object->GetHeapObjectHeader();
177     if (header == BlinkGC::kNotFullyConstructedObject) {
178       // Objects under construction are always alive.
179       return true;
180     }
181     return header->IsMarked();
182   }
183 };
184 
185 template <typename T, typename = int>
186 struct IsGarbageCollectedContainer : std::false_type {};
187 
188 template <typename T>
189 struct IsGarbageCollectedContainer<
190     T,
191     typename T::IsGarbageCollectedCollectionTypeMarker> : std::true_type {};
192 
193 }  // namespace internal
194 
195 class PLATFORM_EXPORT ThreadHeap {
196   USING_FAST_MALLOC(ThreadHeap);
197 
198  public:
199   explicit ThreadHeap(ThreadState*);
200   ~ThreadHeap();
201 
202   template <typename T>
203   static inline bool IsHeapObjectAlive(const T* object) {
204     static_assert(sizeof(T), "T must be fully defined");
205     // The strongification of collections relies on the fact that once a
206     // collection has been strongified, there is no way that it can contain
207     // non-live entries, so no entries will be removed. Since you can't set
208     // the mark bit on a null pointer, that means that null pointers are
209     // always 'alive'.
210     if (!object)
211       return true;
212     // TODO(keishi): some tests create CrossThreadPersistent on non attached
213     // threads.
214     if (!ThreadState::Current())
215       return true;
216     DCHECK(&ThreadState::Current()->Heap() ==
217            &PageFromObject(object)->Arena()->GetThreadState()->Heap());
218     return internal::ObjectAliveTrait<T>::IsHeapObjectAlive(object);
219   }
220   template <typename T>
221   static inline bool IsHeapObjectAlive(const Member<T>& member) {
222     return IsHeapObjectAlive(member.Get());
223   }
224   template <typename T>
225   static inline bool IsHeapObjectAlive(const WeakMember<T>& member) {
226     return IsHeapObjectAlive(member.Get());
227   }
228   template <typename T>
229   static inline bool IsHeapObjectAlive(const UntracedMember<T>& member) {
230     return IsHeapObjectAlive(member.Get());
231   }
232 
233   MarkingWorklist* GetMarkingWorklist() const {
234     return marking_worklist_.get();
235   }
236 
237   WriteBarrierWorklist* GetWriteBarrierWorklist() const {
238     return write_barrier_worklist_.get();
239   }
240 
241   NotFullyConstructedWorklist* GetNotFullyConstructedWorklist() const {
242     return not_fully_constructed_worklist_.get();
243   }
244 
245   WeakCallbackWorklist* GetWeakCallbackWorklist() const {
246     return weak_callback_worklist_.get();
247   }
248 
249   MovableReferenceWorklist* GetMovableReferenceWorklist() const {
250     return movable_reference_worklist_.get();
251   }
252 
253   WeakTableWorklist* GetWeakTableWorklist() const {
254     return weak_table_worklist_.get();
255   }
256 
257   BackingStoreCallbackWorklist* GetBackingStoreCallbackWorklist() const {
258     return backing_store_callback_worklist_.get();
259   }
260 
261   V8ReferencesWorklist* GetV8ReferencesWorklist() const {
262     return v8_references_worklist_.get();
263   }
264 
265   NotSafeToConcurrentlyTraceWorklist* GetNotSafeToConcurrentlyTraceWorklist()
266       const {
267     return not_safe_to_concurrently_trace_worklist_.get();
268   }
269   // Register an ephemeron table for fixed-point iteration.
270   void RegisterWeakTable(void* container_object,
271                          EphemeronCallback);
272 
273   // Heap compaction registration methods:
274 
275   // Checks whether we need to register |addr| as a backing store or a slot
276   // containing reference to it.
277   bool ShouldRegisterMovingAddress();
278 
279   RegionTree* GetRegionTree() { return region_tree_.get(); }
280 
281   static inline size_t AllocationSizeFromSize(size_t size) {
282     // Add space for header.
283     size_t allocation_size = size + sizeof(HeapObjectHeader);
284     // The allocation size calculation can overflow for large sizes.
285     CHECK_GT(allocation_size, size);
286     // Align size with allocation granularity.
287     allocation_size = (allocation_size + kAllocationMask) & ~kAllocationMask;
288     return allocation_size;
289   }
290   Address AllocateOnArenaIndex(ThreadState*,
291                                size_t,
292                                int arena_index,
293                                uint32_t gc_info_index,
294                                const char* type_name);
295   template <typename T>
296   static Address Allocate(size_t);
297 
298   void WeakProcessing(MarkingVisitor*);
299 
300   // Moves not fully constructed objects to previously not fully constructed
301   // objects. Such objects can be iterated using the Trace() method and do
302   // not need to rely on conservative handling.
303   void FlushNotFullyConstructedObjects();
304 
305   // Marks not fully constructed objects.
306   void MarkNotFullyConstructedObjects(MarkingVisitor*);
307   // Marks the transitive closure including ephemerons.
308   bool AdvanceMarking(MarkingVisitor*, base::TimeTicks deadline);
309   void VerifyMarking();
310 
311   // Returns true if concurrent markers will have work to steal
312   bool HasWorkForConcurrentMarking() const;
313   // Returns true if marker is done
314   bool AdvanceConcurrentMarking(ConcurrentMarkingVisitor*, base::TimeTicks);
315 
316   // Conservatively checks whether an address is a pointer in any of the
317   // thread heaps.  If so marks the object pointed to as live.
318   Address CheckAndMarkPointer(MarkingVisitor*, Address);
319 
320   // Visits remembered sets.
321   void VisitRememberedSets(MarkingVisitor*);
322 
323   size_t ObjectPayloadSizeForTesting();
324   void ResetAllocationPointForTesting();
325 
326   PagePool* GetFreePagePool() { return free_page_pool_.get(); }
327 
328   // This look-up uses the region search tree and a negative contains cache to
329   // provide an efficient mapping from arbitrary addresses to the containing
330   // heap-page if one exists.
331   BasePage* LookupPageForAddress(ConstAddress);
332 
333   HeapCompact* Compaction();
334 
335   // Get one of the heap structures for this thread.
336   // The thread heap is split into multiple heap parts based on object types
337   // and object sizes.
338   BaseArena* Arena(int arena_index) const {
339     DCHECK_LE(0, arena_index);
340     DCHECK_LT(arena_index, BlinkGC::kNumberOfArenas);
341     return arenas_[arena_index];
342   }
343 
344   static bool IsVectorArenaIndex(int arena_index) {
345     return BlinkGC::kVectorArenaIndex == arena_index;
346   }
347   static bool IsNormalArenaIndex(int);
348 
349   void MakeConsistentForGC();
350   // MakeConsistentForMutator() drops marks from marked objects and rebuild
351   // free lists. This is called after taking a snapshot and before resuming
352   // the executions of mutators.
353   void MakeConsistentForMutator();
354 
355   // Unmarks all objects in the entire heap. This is supposed to be called in
356   // the beginning of major GC.
357   void Unmark();
358 
359   void Compact();
360 
361   bool AdvanceLazySweep(base::TimeTicks deadline);
362   bool AdvanceConcurrentSweep(base::JobDelegate*);
363 
364   void PrepareForSweep(BlinkGC::CollectionType);
365   void RemoveAllPages();
366   void InvokeFinalizersOnSweptPages();
367   void CompleteSweep();
368 
369   void CollectStatistics(ThreadState::Statistics* statistics);
370 
371   ThreadHeapStatsCollector* stats_collector() const {
372     return heap_stats_collector_.get();
373   }
374 
375 #if defined(ADDRESS_SANITIZER)
376   void PoisonUnmarkedObjects();
377 #endif
378 
379 #if DCHECK_IS_ON()
380   // Infrastructure to determine if an address is within one of the
381   // address ranges for the Blink heap. If the address is in the Blink
382   // heap the containing heap page is returned.
383   BasePage* FindPageFromAddress(Address);
384   BasePage* FindPageFromAddress(const void* pointer) {
385     return FindPageFromAddress(
386         reinterpret_cast<Address>(const_cast<void*>(pointer)));
387   }
388 #endif
389 
390   PageBloomFilter* page_bloom_filter() { return page_bloom_filter_.get(); }
391 
392   bool IsInLastAllocatedRegion(Address address) const;
393   void SetLastAllocatedRegion(Address start, size_t length);
394 
395  private:
396   struct LastAllocatedRegion {
397     Address start = nullptr;
398     size_t length = 0;
399   };
400 
401   static int ArenaIndexForObjectSize(size_t);
402 
403   void SetupWorklists(bool);
404   void DestroyMarkingWorklists(BlinkGC::StackState);
405   void DestroyCompactionWorklists();
406 
407   bool InvokeEphemeronCallbacks(MarkingVisitor*, base::TimeTicks);
408 
409   bool FlushV8References(base::TimeTicks);
410 
411   ThreadState* thread_state_;
412   std::unique_ptr<ThreadHeapStatsCollector> heap_stats_collector_;
413   std::unique_ptr<RegionTree> region_tree_;
414   std::unique_ptr<PageBloomFilter> page_bloom_filter_;
415   std::unique_ptr<PagePool> free_page_pool_;
416   std::unique_ptr<ProcessHeapReporter> process_heap_reporter_;
417 
418   // All objects on this worklist have been fully initialized and assigned a
419   // trace callback for iterating the body of the object. This worklist should
420   // contain almost all objects.
421   std::unique_ptr<MarkingWorklist> marking_worklist_;
422 
423   // Objects on this worklist have been collected in the write barrier. The
424   // worklist is different from |marking_worklist_| to minimize execution in the
425   // path where a write barrier is executed.
426   std::unique_ptr<WriteBarrierWorklist> write_barrier_worklist_;
427 
428   // Objects on this worklist were observed to be in construction (in their
429   // constructor) and thus have been delayed for processing. They have not yet
430   // been assigned a valid header and trace callback.
431   std::unique_ptr<NotFullyConstructedWorklist> not_fully_constructed_worklist_;
432 
433   // Objects on this worklist were previously in construction but have been
434   // moved here upon observing a safepoint, i.e., processing without stack. They
435   // have not yet been assigned a valid header and trace callback but are fully
436   // specified and can thus be iterated using the trace callback (which can be
437   // looked up dynamically).
438   std::unique_ptr<NotFullyConstructedWorklist>
439       previously_not_fully_constructed_worklist_;
440 
441   // Worklist of weak callbacks accumulated for objects. Such callbacks are
442   // processed after finishing marking objects.
443   std::unique_ptr<WeakCallbackWorklist> weak_callback_worklist_;
444 
445   // The worklist is to remember slots that are traced during
446   // marking phases. The mapping between the slots and the backing stores are
447   // created at the atomic pause phase.
448   std::unique_ptr<MovableReferenceWorklist> movable_reference_worklist_;
449 
450   // Worklist of ephemeron callbacks. Used to pass new callbacks from
451   // MarkingVisitor to ThreadHeap.
452   std::unique_ptr<WeakTableWorklist> weak_table_worklist_;
453 
454   // This worklist is used to passing backing store callback to HeapCompact.
455   std::unique_ptr<BackingStoreCallbackWorklist>
456       backing_store_callback_worklist_;
457 
458   // Worklist for storing the V8 references until ThreadHeap can flush them
459   // to V8.
460   std::unique_ptr<V8ReferencesWorklist> v8_references_worklist_;
461 
462   std::unique_ptr<NotSafeToConcurrentlyTraceWorklist>
463       not_safe_to_concurrently_trace_worklist_;
464 
465   // No duplicates allowed for ephemeron callbacks. Hence, we use a hashmap
466   // with the key being the HashTable.
467   WTF::HashMap<const void*, EphemeronCallback> ephemeron_callbacks_;
468 
469   std::unique_ptr<HeapCompact> compaction_;
470 
471   LastAllocatedRegion last_allocated_region_;
472 
473   BaseArena* arenas_[BlinkGC::kNumberOfArenas];
474 
475   static ThreadHeap* main_thread_heap_;
476 
477   friend class incremental_marking_test::IncrementalMarkingScopeBase;
478   template <typename T>
479   friend class Member;
480   friend class ThreadState;
481   friend class weakness_marking_test::EphemeronCallbacksCounter;
482 };
483 
484 template <typename T>
485 class GarbageCollected {
486   IS_GARBAGE_COLLECTED_TYPE();
487 
488  public:
489   using ParentMostGarbageCollectedType = T;
490 
491   // Must use MakeGarbageCollected.
492   void* operator new(size_t) = delete;
493   void* operator new[](size_t) = delete;
494   // The garbage collector is taking care of reclaiming the object. Also,
495   // virtual destructor requires an unambiguous, accessible 'operator delete'.
496   void operator delete(void*) { NOTREACHED(); }
497   void operator delete[](void*) = delete;
498 
499   template <typename Derived>
500   static void* AllocateObject(size_t size) {
501     return ThreadHeap::Allocate<GCInfoFoldedType<Derived>>(size);
502   }
503 
504  protected:
505   // This trait in theory can be moved to gc_info.h, but that would cause
506   // significant memory bloat caused by huge number of ThreadHeap::Allocate<>
507   // instantiations, which linker is not able to fold.
508   template <typename Derived>
509   class GCInfoFolded {
510     static constexpr bool is_virtual_destructor_at_base =
511         std::has_virtual_destructor<ParentMostGarbageCollectedType>::value;
512     static constexpr bool both_trivially_destructible =
513         std::is_trivially_destructible<ParentMostGarbageCollectedType>::value &&
514         std::is_trivially_destructible<Derived>::value;
515     static constexpr bool has_custom_dispatch_at_base =
516         internal::HasFinalizeGarbageCollectedObject<
517             ParentMostGarbageCollectedType>::value;
518 
519    public:
520     using Type = std::conditional_t<is_virtual_destructor_at_base ||
521                                         both_trivially_destructible ||
522                                         has_custom_dispatch_at_base,
523                                     ParentMostGarbageCollectedType,
524                                     Derived>;
525   };
526 
527   template <typename Derived>
528   using GCInfoFoldedType = typename GCInfoFolded<Derived>::Type;
529 
530   GarbageCollected() = default;
531 
532   DISALLOW_COPY_AND_ASSIGN(GarbageCollected);
533 };
534 
535 // Used for passing custom sizes to MakeGarbageCollected.
536 struct AdditionalBytes {
537   explicit AdditionalBytes(size_t bytes) : value(bytes) {}
538   const size_t value;
539 };
540 
541 template <typename T>
542 struct MakeGarbageCollectedTrait {
543   template <typename... Args>
544   static T* Call(Args&&... args) {
545     static_assert(WTF::IsGarbageCollectedType<T>::value,
546                   "T needs to be a garbage collected object");
547     static_assert(
548         std::is_trivially_destructible<T>::value ||
549             std::has_virtual_destructor<T>::value || std::is_final<T>::value ||
550             internal::IsGarbageCollectedContainer<T>::value ||
551             internal::HasFinalizeGarbageCollectedObject<T>::value,
552         "Finalized GarbageCollected class should either have a virtual "
553         "destructor or be marked as final");
554     static_assert(!IsGarbageCollectedMixin<T>::value ||
555                       sizeof(T) <= kLargeObjectSizeThreshold,
556                   "GarbageCollectedMixin may not be a large object");
557     void* memory = T::template AllocateObject<T>(sizeof(T));
558     HeapObjectHeader* header = HeapObjectHeader::FromPayload(memory);
559     // Placement new as regular operator new() is deleted.
560     T* object = ::new (memory) T(std::forward<Args>(args)...);
561     header->MarkFullyConstructed<HeapObjectHeader::AccessMode::kAtomic>();
562     return object;
563   }
564 
565   template <typename... Args>
566   static T* Call(AdditionalBytes additional_bytes, Args&&... args) {
567     static_assert(WTF::IsGarbageCollectedType<T>::value,
568                   "T needs to be a garbage collected object");
569     static_assert(
570         std::is_trivially_destructible<T>::value ||
571             std::has_virtual_destructor<T>::value || std::is_final<T>::value ||
572             internal::IsGarbageCollectedContainer<T>::value ||
573             internal::HasFinalizeGarbageCollectedObject<T>::value,
574         "Finalized GarbageCollected class should either have a virtual "
575         "destructor or be marked as final.");
576     const size_t size = sizeof(T) + additional_bytes.value;
577     if (IsGarbageCollectedMixin<T>::value) {
578       // Ban large mixin so we can use PageFromObject() on them.
579       CHECK_GE(kLargeObjectSizeThreshold, size)
580           << "GarbageCollectedMixin may not be a large object";
581     }
582     void* memory = T::template AllocateObject<T>(size);
583     HeapObjectHeader* header = HeapObjectHeader::FromPayload(memory);
584     // Placement new as regular operator new() is deleted.
585     T* object = ::new (memory) T(std::forward<Args>(args)...);
586     header->MarkFullyConstructed<HeapObjectHeader::AccessMode::kAtomic>();
587     return object;
588   }
589 };
590 
591 // Default MakeGarbageCollected: Constructs an instance of T, which is a garbage
592 // collected type.
593 template <typename T, typename... Args>
594 T* MakeGarbageCollected(Args&&... args) {
595   return MakeGarbageCollectedTrait<T>::Call(std::forward<Args>(args)...);
596 }
597 
598 // Constructs an instance of T, which is a garbage collected type. This special
599 // version takes size which enables constructing inline objects.
600 template <typename T, typename... Args>
601 T* MakeGarbageCollected(AdditionalBytes additional_bytes, Args&&... args) {
602   return MakeGarbageCollectedTrait<T>::Call(additional_bytes,
603                                             std::forward<Args>(args)...);
604 }
605 
606 // Assigning class types to their arenas.
607 //
608 // We use sized arenas for most 'normal' objects to improve memory locality.
609 // It seems that the same type of objects are likely to be accessed together,
610 // which means that we want to group objects by type. That's one reason
611 // why we provide dedicated arenas for popular types (e.g., Node, CSSValue),
612 // but it's not practical to prepare dedicated arenas for all types.
613 // Thus we group objects by their sizes, hoping that this will approximately
614 // group objects by their types.
615 //
616 
617 inline int ThreadHeap::ArenaIndexForObjectSize(size_t size) {
618   if (size < 64) {
619     if (size < 32)
620       return BlinkGC::kNormalPage1ArenaIndex;
621     return BlinkGC::kNormalPage2ArenaIndex;
622   }
623   if (size < 128)
624     return BlinkGC::kNormalPage3ArenaIndex;
625   return BlinkGC::kNormalPage4ArenaIndex;
626 }
627 
628 inline bool ThreadHeap::IsNormalArenaIndex(int index) {
629   return index >= BlinkGC::kNormalPage1ArenaIndex &&
630          index <= BlinkGC::kNormalPage4ArenaIndex;
631 }
632 
633 inline Address ThreadHeap::AllocateOnArenaIndex(ThreadState* state,
634                                                 size_t size,
635                                                 int arena_index,
636                                                 uint32_t gc_info_index,
637                                                 const char* type_name) {
638   DCHECK(state->IsAllocationAllowed());
639   DCHECK_NE(arena_index, BlinkGC::kLargeObjectArenaIndex);
640   NormalPageArena* arena = static_cast<NormalPageArena*>(Arena(arena_index));
641   Address address =
642       arena->AllocateObject(AllocationSizeFromSize(size), gc_info_index);
643   HeapAllocHooks::AllocationHookIfEnabled(address, size, type_name);
644   return address;
645 }
646 
647 template <typename T>
648 Address ThreadHeap::Allocate(size_t size) {
649   ThreadState* state = ThreadStateFor<ThreadingTrait<T>::kAffinity>::GetState();
650   const char* type_name = WTF_HEAP_PROFILER_TYPE_NAME(T);
651   return state->Heap().AllocateOnArenaIndex(
652       state, size, ThreadHeap::ArenaIndexForObjectSize(size),
653       GCInfoTrait<T>::Index(), type_name);
654 }
655 
656 inline bool ThreadHeap::IsInLastAllocatedRegion(Address address) const {
657   return last_allocated_region_.start <= address &&
658          address <
659              (last_allocated_region_.start + last_allocated_region_.length);
660 }
661 
662 inline void ThreadHeap::SetLastAllocatedRegion(Address start, size_t length) {
663   last_allocated_region_.start = start;
664   last_allocated_region_.length = length;
665 }
666 
667 template <typename T>
668 void Visitor::HandleWeakCell(const WeakCallbackInfo&, const void* object) {
669   WeakMember<T>* weak_member =
670       reinterpret_cast<WeakMember<T>*>(const_cast<void*>(object));
671   if (weak_member->Get()) {
672     if (weak_member->IsHashTableDeletedValue()) {
673       // This can happen when weak fields are deleted while incremental marking
674       // is running. Deleted values need to be preserved to avoid reviving
675       // objects in containers.
676       return;
677     }
678     if (!ThreadHeap::IsHeapObjectAlive(weak_member->Get()))
679       weak_member->Clear();
680   }
681 }
682 
683 class PLATFORM_EXPORT WeakCallbackInfo final {
684  public:
685   template <typename T>
686   bool IsHeapObjectAlive(const T*) const;
687   template <typename T>
688   bool IsHeapObjectAlive(const WeakMember<T>&) const;
689   template <typename T>
690   bool IsHeapObjectAlive(const UntracedMember<T>&) const;
691 
692  private:
693   WeakCallbackInfo() = default;
694   friend class ThreadHeap;
695 };
696 
697 template <typename T>
698 bool WeakCallbackInfo::IsHeapObjectAlive(const T* object) const {
699   return ThreadHeap::IsHeapObjectAlive(object);
700 }
701 
702 template <typename T>
703 bool WeakCallbackInfo::IsHeapObjectAlive(
704     const WeakMember<T>& weak_member) const {
705   return ThreadHeap::IsHeapObjectAlive(weak_member);
706 }
707 
708 template <typename T>
709 bool WeakCallbackInfo::IsHeapObjectAlive(
710     const UntracedMember<T>& untraced_member) const {
711   return ThreadHeap::IsHeapObjectAlive(untraced_member.Get());
712 }
713 
714 }  // namespace blink
715 
716 #endif  // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_HEAP_H_
717