1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/cppgc/compactor.h"
6 
7 #include <map>
8 #include <numeric>
9 #include <unordered_map>
10 #include <unordered_set>
11 
12 #include "include/cppgc/macros.h"
13 #include "src/heap/cppgc/compaction-worklists.h"
14 #include "src/heap/cppgc/globals.h"
15 #include "src/heap/cppgc/heap-base.h"
16 #include "src/heap/cppgc/heap-page.h"
17 #include "src/heap/cppgc/heap-space.h"
18 #include "src/heap/cppgc/memory.h"
19 #include "src/heap/cppgc/object-poisoner.h"
20 #include "src/heap/cppgc/raw-heap.h"
21 #include "src/heap/cppgc/stats-collector.h"
22 
23 namespace cppgc {
24 namespace internal {
25 
26 namespace {
27 // Freelist size threshold that must be exceeded before compaction
28 // should be considered.
29 static constexpr size_t kFreeListSizeThreshold = 512 * kKB;
30 
31 // The real worker behind heap compaction, recording references to movable
32 // objects ("slots".) When the objects end up being compacted and moved,
33 // relocate() will adjust the slots to point to the new location of the
34 // object along with handling references for interior pointers.
35 //
36 // The MovableReferences object is created and maintained for the lifetime
37 // of one heap compaction-enhanced GC.
38 class MovableReferences final {
39   using MovableReference = CompactionWorklists::MovableReference;
40 
41  public:
MovableReferences(HeapBase & heap)42   explicit MovableReferences(HeapBase& heap) : heap_(heap) {}
43 
44   // Adds a slot for compaction. Filters slots in dead objects.
45   void AddOrFilter(MovableReference*);
46 
47   // Relocates a backing store |from| -> |to|.
48   void Relocate(Address from, Address to);
49 
50   // Relocates interior slots in a backing store that is moved |from| -> |to|.
51   void RelocateInteriorReferences(Address from, Address to, size_t size);
52 
53   // Updates the collection of callbacks from the item pushed the worklist by
54   // marking visitors.
55   void UpdateCallbacks();
56 
57  private:
58   HeapBase& heap_;
59 
60   // Map from movable reference (value) to its slot. Upon moving an object its
61   // slot pointing to it requires updating. Movable reference should currently
62   // have only a single movable reference to them registered.
63   std::unordered_map<MovableReference, MovableReference*> movable_references_;
64 
65   // Map of interior slots to their final location. Needs to be an ordered map
66   // as it is used to walk through slots starting at a given memory address.
67   // Requires log(n) lookup to make the early bailout reasonably fast.
68   //
69   // - The initial value for a given key is nullptr.
70   // - Upon moving an object this value is adjusted accordingly.
71   std::map<MovableReference*, Address> interior_movable_references_;
72 
73 #if DEBUG
74   // The following two collections are used to allow refer back from a slot to
75   // an already moved object.
76   std::unordered_set<const void*> moved_objects_;
77   std::unordered_map<MovableReference*, MovableReference>
78       interior_slot_to_object_;
79 #endif  // DEBUG
80 };
81 
AddOrFilter(MovableReference * slot)82 void MovableReferences::AddOrFilter(MovableReference* slot) {
83   const BasePage* slot_page = BasePage::FromInnerAddress(&heap_, slot);
84   CHECK_NOT_NULL(slot_page);
85 
86   const void* value = *slot;
87   if (!value) return;
88 
89   // All slots and values are part of Oilpan's heap.
90   // - Slots may be contained within dead objects if e.g. the write barrier
91   //   registered the slot while backing itself has not been marked live in
92   //   time. Slots in dead objects are filtered below.
93   // - Values may only be contained in or point to live objects.
94 
95   const HeapObjectHeader& slot_header =
96       slot_page->ObjectHeaderFromInnerAddress(slot);
97   // Filter the slot since the object that contains the slot is dead.
98   if (!slot_header.IsMarked()) return;
99 
100   const BasePage* value_page = BasePage::FromInnerAddress(&heap_, value);
101   CHECK_NOT_NULL(value_page);
102 
103   // The following cases are not compacted and do not require recording:
104   // - Compactable object on large pages.
105   // - Compactable object on non-compactable spaces.
106   if (value_page->is_large() || !value_page->space().is_compactable()) return;
107 
108   // Slots must reside in and values must point to live objects at this
109   // point. |value| usually points to a separate object but can also point
110   // to the an interior pointer in the same object storage which is why the
111   // dynamic header lookup is required.
112   const HeapObjectHeader& value_header =
113       value_page->ObjectHeaderFromInnerAddress(value);
114   CHECK(value_header.IsMarked());
115 
116   // Slots may have been recorded already but must point to the same value.
117   auto reference_it = movable_references_.find(value);
118   if (V8_UNLIKELY(reference_it != movable_references_.end())) {
119     CHECK_EQ(slot, reference_it->second);
120     return;
121   }
122 
123   // Add regular movable reference.
124   movable_references_.emplace(value, slot);
125 
126   // Check whether the slot itself resides on a page that is compacted.
127   if (V8_LIKELY(!slot_page->space().is_compactable())) return;
128 
129   CHECK_EQ(interior_movable_references_.end(),
130            interior_movable_references_.find(slot));
131   interior_movable_references_.emplace(slot, nullptr);
132 #if DEBUG
133   interior_slot_to_object_.emplace(slot, slot_header.ObjectStart());
134 #endif  // DEBUG
135 }
136 
Relocate(Address from,Address to)137 void MovableReferences::Relocate(Address from, Address to) {
138 #if DEBUG
139   moved_objects_.insert(from);
140 #endif  // DEBUG
141 
142   // Interior slots always need to be processed for moved objects.
143   // Consider an object A with slot A.x pointing to value B where A is
144   // allocated on a movable page itself. When B is finally moved, it needs to
145   // find the corresponding slot A.x. Object A may be moved already and the
146   // memory may have been freed, which would result in a crash.
147   if (!interior_movable_references_.empty()) {
148     const HeapObjectHeader& header = HeapObjectHeader::FromObject(to);
149     const size_t size = header.ObjectSize();
150     RelocateInteriorReferences(from, to, size);
151   }
152 
153   auto it = movable_references_.find(from);
154   // This means that there is no corresponding slot for a live object.
155   // This may happen because a mutator may change the slot to point to a
156   // different object because e.g. incremental marking marked an object
157   // as live that was later on replaced.
158   if (it == movable_references_.end()) {
159     return;
160   }
161 
162   // If the object is referenced by a slot that is contained on a compacted
163   // area itself, check whether it can be updated already.
164   MovableReference* slot = it->second;
165   auto interior_it = interior_movable_references_.find(slot);
166   if (interior_it != interior_movable_references_.end()) {
167     MovableReference* slot_location =
168         reinterpret_cast<MovableReference*>(interior_it->second);
169     if (!slot_location) {
170       interior_it->second = to;
171 #if DEBUG
172       // Check that the containing object has not been moved yet.
173       auto reverse_it = interior_slot_to_object_.find(slot);
174       DCHECK_NE(interior_slot_to_object_.end(), reverse_it);
175       DCHECK_EQ(moved_objects_.end(), moved_objects_.find(reverse_it->second));
176 #endif  // DEBUG
177     } else {
178       slot = slot_location;
179     }
180   }
181 
182   // Compaction is atomic so slot should not be updated during compaction.
183   DCHECK_EQ(from, *slot);
184 
185   // Update the slots new value.
186   *slot = to;
187 }
188 
RelocateInteriorReferences(Address from,Address to,size_t size)189 void MovableReferences::RelocateInteriorReferences(Address from, Address to,
190                                                    size_t size) {
191   // |from| is a valid address for a slot.
192   auto interior_it = interior_movable_references_.lower_bound(
193       reinterpret_cast<MovableReference*>(from));
194   if (interior_it == interior_movable_references_.end()) return;
195   DCHECK_GE(reinterpret_cast<Address>(interior_it->first), from);
196 
197   size_t offset = reinterpret_cast<Address>(interior_it->first) - from;
198   while (offset < size) {
199     if (!interior_it->second) {
200       // Update the interior reference value, so that when the object the slot
201       // is pointing to is moved, it can re-use this value.
202       Address refernece = to + offset;
203       interior_it->second = refernece;
204 
205       // If the |slot|'s content is pointing into the region [from, from +
206       // size) we are dealing with an interior pointer that does not point to
207       // a valid HeapObjectHeader. Such references need to be fixed up
208       // immediately.
209       Address& reference_contents = *reinterpret_cast<Address*>(refernece);
210       if (reference_contents > from && reference_contents < (from + size)) {
211         reference_contents = reference_contents - from + to;
212       }
213     }
214 
215     interior_it++;
216     if (interior_it == interior_movable_references_.end()) return;
217     offset = reinterpret_cast<Address>(interior_it->first) - from;
218   }
219 }
220 
221 class CompactionState final {
222   CPPGC_STACK_ALLOCATED();
223   using Pages = std::vector<NormalPage*>;
224 
225  public:
CompactionState(NormalPageSpace * space,MovableReferences & movable_references)226   CompactionState(NormalPageSpace* space, MovableReferences& movable_references)
227       : space_(space), movable_references_(movable_references) {}
228 
AddPage(NormalPage * page)229   void AddPage(NormalPage* page) {
230     DCHECK_EQ(space_, &page->space());
231     // If not the first page, add |page| onto the available pages chain.
232     if (!current_page_)
233       current_page_ = page;
234     else
235       available_pages_.push_back(page);
236   }
237 
RelocateObject(const NormalPage * page,const Address header,size_t size)238   void RelocateObject(const NormalPage* page, const Address header,
239                       size_t size) {
240     // Allocate and copy over the live object.
241     Address compact_frontier =
242         current_page_->PayloadStart() + used_bytes_in_current_page_;
243     if (compact_frontier + size > current_page_->PayloadEnd()) {
244       // Can't fit on current page. Add remaining onto the freelist and advance
245       // to next available page.
246       ReturnCurrentPageToSpace();
247 
248       current_page_ = available_pages_.back();
249       available_pages_.pop_back();
250       used_bytes_in_current_page_ = 0;
251       compact_frontier = current_page_->PayloadStart();
252     }
253     if (V8_LIKELY(compact_frontier != header)) {
254       // Use a non-overlapping copy, if possible.
255       if (current_page_ == page)
256         memmove(compact_frontier, header, size);
257       else
258         memcpy(compact_frontier, header, size);
259       movable_references_.Relocate(header + sizeof(HeapObjectHeader),
260                                    compact_frontier + sizeof(HeapObjectHeader));
261     }
262     current_page_->object_start_bitmap().SetBit(compact_frontier);
263     used_bytes_in_current_page_ += size;
264     DCHECK_LE(used_bytes_in_current_page_, current_page_->PayloadSize());
265   }
266 
FinishCompactingSpace()267   void FinishCompactingSpace() {
268     // If the current page hasn't been allocated into, add it to the available
269     // list, for subsequent release below.
270     if (used_bytes_in_current_page_ == 0) {
271       available_pages_.push_back(current_page_);
272     } else {
273       ReturnCurrentPageToSpace();
274     }
275 
276     // Return remaining available pages to the free page pool, decommitting
277     // them from the pagefile.
278     for (NormalPage* page : available_pages_) {
279       SetMemoryInaccessible(page->PayloadStart(), page->PayloadSize());
280       NormalPage::Destroy(page);
281     }
282   }
283 
FinishCompactingPage(NormalPage * page)284   void FinishCompactingPage(NormalPage* page) {
285 #if DEBUG || defined(V8_USE_MEMORY_SANITIZER) || \
286     defined(V8_USE_ADDRESS_SANITIZER)
287     // Zap the unused portion, until it is either compacted into or freed.
288     if (current_page_ != page) {
289       ZapMemory(page->PayloadStart(), page->PayloadSize());
290     } else {
291       ZapMemory(page->PayloadStart() + used_bytes_in_current_page_,
292                 page->PayloadSize() - used_bytes_in_current_page_);
293     }
294 #endif
295   }
296 
297  private:
ReturnCurrentPageToSpace()298   void ReturnCurrentPageToSpace() {
299     DCHECK_EQ(space_, &current_page_->space());
300     space_->AddPage(current_page_);
301     if (used_bytes_in_current_page_ != current_page_->PayloadSize()) {
302       // Put the remainder of the page onto the free list.
303       size_t freed_size =
304           current_page_->PayloadSize() - used_bytes_in_current_page_;
305       Address payload = current_page_->PayloadStart();
306       Address free_start = payload + used_bytes_in_current_page_;
307       SetMemoryInaccessible(free_start, freed_size);
308       space_->free_list().Add({free_start, freed_size});
309       current_page_->object_start_bitmap().SetBit(free_start);
310     }
311   }
312 
313   NormalPageSpace* space_;
314   MovableReferences& movable_references_;
315   // Page into which compacted object will be written to.
316   NormalPage* current_page_ = nullptr;
317   // Offset into |current_page_| to the next free address.
318   size_t used_bytes_in_current_page_ = 0;
319   // Additional pages in the current space that can be used as compaction
320   // targets. Pages that remain available at the compaction can be released.
321   Pages available_pages_;
322 };
323 
CompactPage(NormalPage * page,CompactionState & compaction_state)324 void CompactPage(NormalPage* page, CompactionState& compaction_state) {
325   compaction_state.AddPage(page);
326 
327   page->object_start_bitmap().Clear();
328 
329   for (Address header_address = page->PayloadStart();
330        header_address < page->PayloadEnd();) {
331     HeapObjectHeader* header =
332         reinterpret_cast<HeapObjectHeader*>(header_address);
333     size_t size = header->AllocatedSize();
334     DCHECK_GT(size, 0u);
335     DCHECK_LT(size, kPageSize);
336 
337     if (header->IsFree()) {
338       // Unpoison the freelist entry so that we can compact into it as wanted.
339       ASAN_UNPOISON_MEMORY_REGION(header_address, size);
340       header_address += size;
341       continue;
342     }
343 
344     if (!header->IsMarked()) {
345       // Compaction is currently launched only from AtomicPhaseEpilogue, so it's
346       // guaranteed to be on the mutator thread - no need to postpone
347       // finalization.
348       header->Finalize();
349 
350       // As compaction is under way, leave the freed memory accessible
351       // while compacting the rest of the page. We just zap the payload
352       // to catch out other finalizers trying to access it.
353 #if DEBUG || defined(V8_USE_MEMORY_SANITIZER) || \
354     defined(V8_USE_ADDRESS_SANITIZER)
355       ZapMemory(header, size);
356 #endif
357       header_address += size;
358       continue;
359     }
360 
361     // Object is marked.
362 #if !defined(CPPGC_YOUNG_GENERATION)
363     header->Unmark();
364 #endif
365     // Potentially unpoison the live object as well as it is the source of
366     // the copy.
367     ASAN_UNPOISON_MEMORY_REGION(header->ObjectStart(), header->ObjectSize());
368     compaction_state.RelocateObject(page, header_address, size);
369     header_address += size;
370   }
371 
372   compaction_state.FinishCompactingPage(page);
373 }
374 
CompactSpace(NormalPageSpace * space,MovableReferences & movable_references)375 void CompactSpace(NormalPageSpace* space,
376                   MovableReferences& movable_references) {
377   using Pages = NormalPageSpace::Pages;
378 
379 #ifdef V8_USE_ADDRESS_SANITIZER
380   UnmarkedObjectsPoisoner().Traverse(*space);
381 #endif  // V8_USE_ADDRESS_SANITIZER
382 
383   DCHECK(space->is_compactable());
384 
385   space->free_list().Clear();
386 
387   // Compaction generally follows Jonker's algorithm for fast garbage
388   // compaction. Compaction is performed in-place, sliding objects down over
389   // unused holes for a smaller heap page footprint and improved locality. A
390   // "compaction pointer" is consequently kept, pointing to the next available
391   // address to move objects down to. It will belong to one of the already
392   // compacted pages for this space, but as compaction proceeds, it will not
393   // belong to the same page as the one being currently compacted.
394   //
395   // The compaction pointer is represented by the
396   // |(current_page_, used_bytes_in_current_page_)| pair, with
397   // |used_bytes_in_current_page_| being the offset into |current_page_|, making
398   // up the next available location. When the compaction of an arena page causes
399   // the compaction pointer to exhaust the current page it is compacting into,
400   // page compaction will advance the current page of the compaction
401   // pointer, as well as the allocation point.
402   //
403   // By construction, the page compaction can be performed without having
404   // to allocate any new pages. So to arrange for the page compaction's
405   // supply of freed, available pages, we chain them together after each
406   // has been "compacted from". The page compaction will then reuse those
407   // as needed, and once finished, the chained, available pages can be
408   // released back to the OS.
409   //
410   // To ease the passing of the compaction state when iterating over an
411   // arena's pages, package it up into a |CompactionState|.
412 
413   Pages pages = space->RemoveAllPages();
414   if (pages.empty()) return;
415 
416   CompactionState compaction_state(space, movable_references);
417   for (BasePage* page : pages) {
418     // Large objects do not belong to this arena.
419     CompactPage(NormalPage::From(page), compaction_state);
420   }
421 
422   compaction_state.FinishCompactingSpace();
423   // Sweeping will verify object start bitmap of compacted space.
424 }
425 
UpdateHeapResidency(const std::vector<NormalPageSpace * > & spaces)426 size_t UpdateHeapResidency(const std::vector<NormalPageSpace*>& spaces) {
427   return std::accumulate(spaces.cbegin(), spaces.cend(), 0u,
428                          [](size_t acc, const NormalPageSpace* space) {
429                            DCHECK(space->is_compactable());
430                            if (!space->size()) return acc;
431                            return acc + space->free_list().Size();
432                          });
433 }
434 
435 }  // namespace
436 
Compactor(RawHeap & heap)437 Compactor::Compactor(RawHeap& heap) : heap_(heap) {
438   for (auto& space : heap_) {
439     if (!space->is_compactable()) continue;
440     DCHECK_EQ(&heap, space->raw_heap());
441     compactable_spaces_.push_back(static_cast<NormalPageSpace*>(space.get()));
442   }
443 }
444 
ShouldCompact(GarbageCollector::Config::MarkingType marking_type,GarbageCollector::Config::StackState stack_state) const445 bool Compactor::ShouldCompact(
446     GarbageCollector::Config::MarkingType marking_type,
447     GarbageCollector::Config::StackState stack_state) const {
448   if (compactable_spaces_.empty() ||
449       (marking_type == GarbageCollector::Config::MarkingType::kAtomic &&
450        stack_state ==
451            GarbageCollector::Config::StackState::kMayContainHeapPointers)) {
452     // The following check ensures that tests that want to test compaction are
453     // not interrupted by garbage collections that cannot use compaction.
454     DCHECK(!enable_for_next_gc_for_testing_);
455     return false;
456   }
457 
458   if (enable_for_next_gc_for_testing_) {
459     return true;
460   }
461 
462   size_t free_list_size = UpdateHeapResidency(compactable_spaces_);
463 
464   return free_list_size > kFreeListSizeThreshold;
465 }
466 
InitializeIfShouldCompact(GarbageCollector::Config::MarkingType marking_type,GarbageCollector::Config::StackState stack_state)467 void Compactor::InitializeIfShouldCompact(
468     GarbageCollector::Config::MarkingType marking_type,
469     GarbageCollector::Config::StackState stack_state) {
470   DCHECK(!is_enabled_);
471 
472   if (!ShouldCompact(marking_type, stack_state)) return;
473 
474   compaction_worklists_ = std::make_unique<CompactionWorklists>();
475 
476   is_enabled_ = true;
477 }
478 
CancelIfShouldNotCompact(GarbageCollector::Config::MarkingType marking_type,GarbageCollector::Config::StackState stack_state)479 bool Compactor::CancelIfShouldNotCompact(
480     GarbageCollector::Config::MarkingType marking_type,
481     GarbageCollector::Config::StackState stack_state) {
482   if (!is_enabled_ || ShouldCompact(marking_type, stack_state)) return false;
483 
484   DCHECK_NOT_NULL(compaction_worklists_);
485   compaction_worklists_->movable_slots_worklist()->Clear();
486   compaction_worklists_.reset();
487 
488   is_enabled_ = false;
489   return true;
490 }
491 
CompactSpacesIfEnabled()492 Compactor::CompactableSpaceHandling Compactor::CompactSpacesIfEnabled() {
493   if (!is_enabled_) return CompactableSpaceHandling::kSweep;
494 
495   StatsCollector::EnabledScope stats_scope(heap_.heap()->stats_collector(),
496                                            StatsCollector::kAtomicCompact);
497 
498   MovableReferences movable_references(*heap_.heap());
499 
500   CompactionWorklists::MovableReferencesWorklist::Local local(
501       compaction_worklists_->movable_slots_worklist());
502   CompactionWorklists::MovableReference* slot;
503   while (local.Pop(&slot)) {
504     movable_references.AddOrFilter(slot);
505   }
506   compaction_worklists_.reset();
507 
508   for (NormalPageSpace* space : compactable_spaces_) {
509     CompactSpace(space, movable_references);
510   }
511 
512   enable_for_next_gc_for_testing_ = false;
513   is_enabled_ = false;
514   return CompactableSpaceHandling::kIgnore;
515 }
516 
EnableForNextGCForTesting()517 void Compactor::EnableForNextGCForTesting() {
518   DCHECK_NULL(heap_.heap()->marker());
519   enable_for_next_gc_for_testing_ = true;
520 }
521 
522 }  // namespace internal
523 }  // namespace cppgc
524