1 // Copyright 2016 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_COMPACT_H_ 6 #define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_COMPACT_H_ 7 8 #include <memory> 9 10 #include "base/memory/ptr_util.h" 11 #include "third_party/blink/renderer/platform/heap/blink_gc.h" 12 #include "third_party/blink/renderer/platform/platform_export.h" 13 #include "third_party/blink/renderer/platform/wtf/hash_map.h" 14 #include "third_party/blink/renderer/platform/wtf/hash_set.h" 15 #include "third_party/blink/renderer/platform/wtf/threading_primitives.h" 16 17 #include <bitset> 18 #include <utility> 19 20 // Compaction-specific debug switches: 21 22 // Emit debug info during compaction. 23 #define DEBUG_HEAP_COMPACTION 0 24 25 // Emit stats on freelist occupancy. 26 // 0 - disabled, 1 - minimal, 2 - verbose. 27 #define DEBUG_HEAP_FREELIST 0 28 29 namespace blink { 30 31 class NormalPageArena; 32 class BasePage; 33 class ThreadState; 34 class ThreadHeap; 35 36 class PLATFORM_EXPORT HeapCompact final { 37 public: 38 // Returns |true| if the ongoing GC may compact the given arena/sub-heap. IsCompactableArena(int arena_index)39 static bool IsCompactableArena(int arena_index) { 40 return arena_index >= BlinkGC::kVectorArenaIndex && 41 arena_index <= BlinkGC::kHashTableArenaIndex; 42 } 43 44 explicit HeapCompact(ThreadHeap*); 45 ~HeapCompact(); 46 47 // Returns true if compaction can and should be used for the provided 48 // parameters. 49 bool ShouldCompact(BlinkGC::StackState, 50 BlinkGC::MarkingType, 51 BlinkGC::GCReason); 52 53 // Compaction should be performed as part of the ongoing GC, initialize 54 // the heap compaction pass. 55 void Initialize(ThreadState*); 56 57 // Returns true if the ongoing GC will perform compaction. IsCompacting()58 bool IsCompacting() const { return do_compact_; } 59 60 // Returns true if the ongoing GC will perform compaction for the given 61 // heap arena. IsCompactingArena(int arena_index)62 bool IsCompactingArena(int arena_index) const { 63 return do_compact_ && (compactable_arenas_ & (0x1u << arena_index)); 64 } 65 66 // See |Heap::ShouldRegisterMovingAddress()| documentation. 67 bool ShouldRegisterMovingAddress(); 68 69 // Slots that are not contained within live objects are filtered. This can 70 // happen when the write barrier for in-payload objects triggers but the outer 71 // backing store does not survive the marking phase because all its referents 72 // die before being reached by the marker. 73 void FilterNonLiveSlots(); 74 75 // Finishes compaction and clears internal state. 76 void Finish(); 77 78 // Cancels compaction after slots may have been recorded already. 79 void Cancel(); 80 81 // Perform any relocation post-processing after having completed compacting 82 // the given arena. The number of pages that were freed together with the 83 // total size (in bytes) of freed heap storage, are passed in as arguments. 84 void FinishedArenaCompaction(NormalPageArena*, 85 size_t freed_pages, 86 size_t freed_size); 87 88 // Register the heap page as containing live objects that will all be 89 // compacted. Registration happens as part of making the arenas ready 90 // for a GC. 91 void AddCompactingPage(BasePage*); 92 93 // Notify heap compaction that object at |from| has been relocated to.. |to|. 94 // (Called by the sweep compaction pass.) 95 void Relocate(Address from, Address to); 96 97 // Enables compaction for the next garbage collection if technically possible. EnableCompactionForNextGCForTesting()98 void EnableCompactionForNextGCForTesting() { force_for_next_gc_ = true; } 99 100 // Returns true if one or more vector arenas are being compacted. IsCompactingVectorArenasForTesting()101 bool IsCompactingVectorArenasForTesting() const { 102 return IsCompactingArena(BlinkGC::kVectorArenaIndex); 103 } 104 LastFixupCountForTesting()105 size_t LastFixupCountForTesting() const { 106 return last_fixup_count_for_testing_; 107 } 108 109 private: 110 class MovableObjectFixups; 111 112 // Freelist size threshold that must be exceeded before compaction 113 // should be considered. 114 static const size_t kFreeListSizeThreshold = 512 * 1024; 115 116 // Sample the amount of fragmentation and heap memory currently residing 117 // on the freelists of the arenas we're able to compact. The computed 118 // numbers will be subsequently used to determine if a heap compaction 119 // is on order (shouldCompact().) 120 void UpdateHeapResidency(); 121 122 MovableObjectFixups& Fixups(); 123 124 ThreadHeap* const heap_; 125 std::unique_ptr<MovableObjectFixups> fixups_; 126 127 // Set to |true| when a compacting sweep will go ahead. 128 bool do_compact_ = false; 129 size_t gc_count_since_last_compaction_ = 0; 130 131 // Last reported freelist size, across all compactable arenas. 132 size_t free_list_size_ = 0; 133 134 // If compacting, i'th heap arena will be compacted if corresponding bit is 135 // set. Indexes are in the range of BlinkGC::ArenaIndices. 136 unsigned compactable_arenas_ = 0u; 137 138 size_t last_fixup_count_for_testing_ = 0; 139 140 bool force_for_next_gc_ = false; 141 }; 142 143 } // namespace blink 144 145 // Logging macros activated by debug switches. 146 147 #define LOG_HEAP_COMPACTION_INTERNAL() DLOG(INFO) 148 149 #if DEBUG_HEAP_COMPACTION 150 #define LOG_HEAP_COMPACTION() LOG_HEAP_COMPACTION_INTERNAL() 151 #else 152 #define LOG_HEAP_COMPACTION() EAT_STREAM_PARAMETERS 153 #endif 154 155 #if DEBUG_HEAP_FREELIST 156 #define LOG_HEAP_FREELIST() LOG_HEAP_COMPACTION_INTERNAL() 157 #else 158 #define LOG_HEAP_FREELIST() EAT_STREAM_PARAMETERS 159 #endif 160 161 #if DEBUG_HEAP_FREELIST == 2 162 #define LOG_HEAP_FREELIST_VERBOSE() LOG_HEAP_COMPACTION_INTERNAL() 163 #else 164 #define LOG_HEAP_FREELIST_VERBOSE() EAT_STREAM_PARAMETERS 165 #endif 166 167 #endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_COMPACT_H_ 168