1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/sweeper.h"
6 
7 #include "src/execution/vm-state-inl.h"
8 #include "src/heap/array-buffer-tracker-inl.h"
9 #include "src/heap/gc-tracer.h"
10 #include "src/heap/invalidated-slots-inl.h"
11 #include "src/heap/mark-compact-inl.h"
12 #include "src/heap/remembered-set.h"
13 #include "src/objects/objects-inl.h"
14 
15 namespace v8 {
16 namespace internal {
17 
Sweeper(Heap * heap,MajorNonAtomicMarkingState * marking_state)18 Sweeper::Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
19     : heap_(heap),
20       marking_state_(marking_state),
21       num_tasks_(0),
22       pending_sweeper_tasks_semaphore_(0),
23       incremental_sweeper_pending_(false),
24       sweeping_in_progress_(false),
25       num_sweeping_tasks_(0),
26       stop_sweeper_tasks_(false),
27       iterability_task_semaphore_(0),
28       iterability_in_progress_(false),
29       iterability_task_started_(false),
30       should_reduce_memory_(false) {}
31 
PauseOrCompleteScope(Sweeper * sweeper)32 Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
33     : sweeper_(sweeper) {
34   sweeper_->stop_sweeper_tasks_ = true;
35   if (!sweeper_->sweeping_in_progress()) return;
36 
37   sweeper_->AbortAndWaitForTasks();
38 
39   // Complete sweeping if there's nothing more to do.
40   if (sweeper_->IsDoneSweeping()) {
41     sweeper_->heap_->mark_compact_collector()->EnsureSweepingCompleted();
42     DCHECK(!sweeper_->sweeping_in_progress());
43   } else {
44     // Unless sweeping is complete the flag still indicates that the sweeper
45     // is enabled. It just cannot use tasks anymore.
46     DCHECK(sweeper_->sweeping_in_progress());
47   }
48 }
49 
~PauseOrCompleteScope()50 Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() {
51   sweeper_->stop_sweeper_tasks_ = false;
52   if (!sweeper_->sweeping_in_progress()) return;
53 
54   sweeper_->StartSweeperTasks();
55 }
56 
FilterSweepingPagesScope(Sweeper * sweeper,const PauseOrCompleteScope & pause_or_complete_scope)57 Sweeper::FilterSweepingPagesScope::FilterSweepingPagesScope(
58     Sweeper* sweeper, const PauseOrCompleteScope& pause_or_complete_scope)
59     : sweeper_(sweeper),
60       pause_or_complete_scope_(pause_or_complete_scope),
61       sweeping_in_progress_(sweeper_->sweeping_in_progress()) {
62   USE(pause_or_complete_scope_);
63   if (!sweeping_in_progress_) return;
64 
65   int old_space_index = GetSweepSpaceIndex(OLD_SPACE);
66   old_space_sweeping_list_ =
67       std::move(sweeper_->sweeping_list_[old_space_index]);
68   sweeper_->sweeping_list_[old_space_index].clear();
69 }
70 
~FilterSweepingPagesScope()71 Sweeper::FilterSweepingPagesScope::~FilterSweepingPagesScope() {
72   DCHECK_EQ(sweeping_in_progress_, sweeper_->sweeping_in_progress());
73   if (!sweeping_in_progress_) return;
74 
75   sweeper_->sweeping_list_[GetSweepSpaceIndex(OLD_SPACE)] =
76       std::move(old_space_sweeping_list_);
77   // old_space_sweeping_list_ does not need to be cleared as we don't use it.
78 }
79 
80 class Sweeper::SweeperTask final : public CancelableTask {
81  public:
SweeperTask(Isolate * isolate,Sweeper * sweeper,base::Semaphore * pending_sweeper_tasks,std::atomic<intptr_t> * num_sweeping_tasks,AllocationSpace space_to_start)82   SweeperTask(Isolate* isolate, Sweeper* sweeper,
83               base::Semaphore* pending_sweeper_tasks,
84               std::atomic<intptr_t>* num_sweeping_tasks,
85               AllocationSpace space_to_start)
86       : CancelableTask(isolate),
87         sweeper_(sweeper),
88         pending_sweeper_tasks_(pending_sweeper_tasks),
89         num_sweeping_tasks_(num_sweeping_tasks),
90         space_to_start_(space_to_start),
91         tracer_(isolate->heap()->tracer()) {}
92 
93   ~SweeperTask() override = default;
94 
95  private:
RunInternal()96   void RunInternal() final {
97     TRACE_BACKGROUND_GC(tracer_,
98                         GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING);
99     DCHECK(IsValidSweepingSpace(space_to_start_));
100     const int offset = space_to_start_ - FIRST_GROWABLE_PAGED_SPACE;
101     for (int i = 0; i < kNumberOfSweepingSpaces; i++) {
102       const AllocationSpace space_id = static_cast<AllocationSpace>(
103           FIRST_GROWABLE_PAGED_SPACE +
104           ((i + offset) % kNumberOfSweepingSpaces));
105       // Do not sweep code space concurrently.
106       if (space_id == CODE_SPACE) continue;
107       DCHECK(IsValidSweepingSpace(space_id));
108       sweeper_->SweepSpaceFromTask(space_id);
109     }
110     (*num_sweeping_tasks_)--;
111     pending_sweeper_tasks_->Signal();
112   }
113 
114   Sweeper* const sweeper_;
115   base::Semaphore* const pending_sweeper_tasks_;
116   std::atomic<intptr_t>* const num_sweeping_tasks_;
117   AllocationSpace space_to_start_;
118   GCTracer* const tracer_;
119 
120   DISALLOW_COPY_AND_ASSIGN(SweeperTask);
121 };
122 
123 class Sweeper::IncrementalSweeperTask final : public CancelableTask {
124  public:
IncrementalSweeperTask(Isolate * isolate,Sweeper * sweeper)125   IncrementalSweeperTask(Isolate* isolate, Sweeper* sweeper)
126       : CancelableTask(isolate), isolate_(isolate), sweeper_(sweeper) {}
127 
128   ~IncrementalSweeperTask() override = default;
129 
130  private:
RunInternal()131   void RunInternal() final {
132     VMState<GC> state(isolate_);
133     TRACE_EVENT_CALL_STATS_SCOPED(isolate_, "v8", "V8.Task");
134 
135     sweeper_->incremental_sweeper_pending_ = false;
136 
137     if (sweeper_->sweeping_in_progress()) {
138       if (!sweeper_->SweepSpaceIncrementallyFromTask(CODE_SPACE)) {
139         sweeper_->ScheduleIncrementalSweepingTask();
140       }
141     }
142   }
143 
144   Isolate* const isolate_;
145   Sweeper* const sweeper_;
146   DISALLOW_COPY_AND_ASSIGN(IncrementalSweeperTask);
147 };
148 
StartSweeping()149 void Sweeper::StartSweeping() {
150   CHECK(!stop_sweeper_tasks_);
151   sweeping_in_progress_ = true;
152   iterability_in_progress_ = true;
153   should_reduce_memory_ = heap_->ShouldReduceMemory();
154   MajorNonAtomicMarkingState* marking_state =
155       heap_->mark_compact_collector()->non_atomic_marking_state();
156   ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
157     // Sorting is done in order to make compaction more efficient: by sweeping
158     // pages with the most free bytes first, we make it more likely that when
159     // evacuating a page, already swept pages will have enough free bytes to
160     // hold the objects to move (and therefore, we won't need to wait for more
161     // pages to be swept in order to move those objects).
162     // Since maps don't move, there is no need to sort the pages from MAP_SPACE
163     // before sweeping them.
164     if (space != MAP_SPACE) {
165       int space_index = GetSweepSpaceIndex(space);
166       std::sort(
167           sweeping_list_[space_index].begin(),
168           sweeping_list_[space_index].end(), [marking_state](Page* a, Page* b) {
169             return marking_state->live_bytes(a) > marking_state->live_bytes(b);
170           });
171     }
172   });
173 }
174 
StartSweeperTasks()175 void Sweeper::StartSweeperTasks() {
176   DCHECK_EQ(0, num_tasks_);
177   DCHECK_EQ(0, num_sweeping_tasks_);
178   if (FLAG_concurrent_sweeping && sweeping_in_progress_ &&
179       !heap_->delay_sweeper_tasks_for_testing_) {
180     ForAllSweepingSpaces([this](AllocationSpace space) {
181       DCHECK(IsValidSweepingSpace(space));
182       num_sweeping_tasks_++;
183       auto task = std::make_unique<SweeperTask>(
184           heap_->isolate(), this, &pending_sweeper_tasks_semaphore_,
185           &num_sweeping_tasks_, space);
186       DCHECK_LT(num_tasks_, kMaxSweeperTasks);
187       task_ids_[num_tasks_++] = task->id();
188       V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
189     });
190     ScheduleIncrementalSweepingTask();
191   }
192 }
193 
GetSweptPageSafe(PagedSpace * space)194 Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
195   base::MutexGuard guard(&mutex_);
196   SweptList& list = swept_list_[GetSweepSpaceIndex(space->identity())];
197   if (!list.empty()) {
198     auto last_page = list.back();
199     list.pop_back();
200     return last_page;
201   }
202   return nullptr;
203 }
204 
MergeOldToNewRememberedSetsForSweptPages()205 void Sweeper::MergeOldToNewRememberedSetsForSweptPages() {
206   base::MutexGuard guard(&mutex_);
207 
208   ForAllSweepingSpaces([this](AllocationSpace space) {
209     SweptList& swept_list = swept_list_[GetSweepSpaceIndex(space)];
210     for (Page* p : swept_list) p->MergeOldToNewRememberedSets();
211   });
212 }
213 
AbortAndWaitForTasks()214 void Sweeper::AbortAndWaitForTasks() {
215   if (!FLAG_concurrent_sweeping) return;
216 
217   for (int i = 0; i < num_tasks_; i++) {
218     if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
219         TryAbortResult::kTaskAborted) {
220       pending_sweeper_tasks_semaphore_.Wait();
221     } else {
222       // Aborted case.
223       num_sweeping_tasks_--;
224     }
225   }
226   num_tasks_ = 0;
227   DCHECK_EQ(0, num_sweeping_tasks_);
228 }
229 
EnsureCompleted()230 void Sweeper::EnsureCompleted() {
231   if (!sweeping_in_progress_) return;
232 
233   EnsureIterabilityCompleted();
234 
235   // If sweeping is not completed or not running at all, we try to complete it
236   // here.
237   ForAllSweepingSpaces(
238       [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
239 
240   AbortAndWaitForTasks();
241 
242   ForAllSweepingSpaces([this](AllocationSpace space) {
243     CHECK(sweeping_list_[GetSweepSpaceIndex(space)].empty());
244   });
245   sweeping_in_progress_ = false;
246 }
247 
AreSweeperTasksRunning()248 bool Sweeper::AreSweeperTasksRunning() { return num_sweeping_tasks_ != 0; }
249 
RawSweep(Page * p,FreeListRebuildingMode free_list_mode,FreeSpaceTreatmentMode free_space_mode,FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space,const base::MutexGuard & page_guard)250 int Sweeper::RawSweep(
251     Page* p, FreeListRebuildingMode free_list_mode,
252     FreeSpaceTreatmentMode free_space_mode,
253     FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space,
254     const base::MutexGuard& page_guard) {
255   Space* space = p->owner();
256   DCHECK_NOT_NULL(space);
257   DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
258          space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
259   DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
260 
261   CodeObjectRegistry* code_object_registry = p->GetCodeObjectRegistry();
262 
263   // TODO(ulan): we don't have to clear type old-to-old slots in code space
264   // because the concurrent marker doesn't mark code objects. This requires
265   // the write barrier for code objects to check the color of the code object.
266   bool non_empty_typed_slots = p->typed_slot_set<OLD_TO_NEW>() != nullptr ||
267                                p->typed_slot_set<OLD_TO_OLD>() != nullptr;
268 
269   // The free ranges map is used for filtering typed slots.
270   std::map<uint32_t, uint32_t> free_ranges;
271 
272   // Before we sweep objects on the page, we free dead array buffers which
273   // requires valid mark bits.
274   ArrayBufferTracker::FreeDead(p, marking_state_);
275 
276   Address free_start = p->area_start();
277   InvalidatedSlotsCleanup old_to_new_cleanup =
278       InvalidatedSlotsCleanup::NoCleanup(p);
279 
280   // Clean invalidated slots during the final atomic pause. After resuming
281   // execution this isn't necessary, invalid old-to-new refs were already
282   // removed by mark compact's update pointers phase.
283   if (invalidated_slots_in_free_space ==
284       FreeSpaceMayContainInvalidatedSlots::kYes)
285     old_to_new_cleanup = InvalidatedSlotsCleanup::OldToNew(p);
286 
287   intptr_t live_bytes = 0;
288   intptr_t freed_bytes = 0;
289   intptr_t max_freed_bytes = 0;
290 
291   // Set the allocated_bytes_ counter to area_size and clear the wasted_memory_
292   // counter. The free operations below will decrease allocated_bytes_ to actual
293   // live bytes and keep track of wasted_memory_.
294   p->ResetAllocationStatistics();
295 
296   if (code_object_registry) code_object_registry->Clear();
297 
298   for (auto object_and_size :
299        LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
300     HeapObject const object = object_and_size.first;
301     if (code_object_registry)
302       code_object_registry->RegisterAlreadyExistingCodeObject(object.address());
303     DCHECK(marking_state_->IsBlack(object));
304     Address free_end = object.address();
305     if (free_end != free_start) {
306       CHECK_GT(free_end, free_start);
307       size_t size = static_cast<size_t>(free_end - free_start);
308       if (free_space_mode == ZAP_FREE_SPACE) {
309         ZapCode(free_start, size);
310       }
311       if (free_list_mode == REBUILD_FREE_LIST) {
312         freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
313             free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
314         max_freed_bytes = Max(freed_bytes, max_freed_bytes);
315       } else {
316         p->heap()->CreateFillerObjectAt(
317             free_start, static_cast<int>(size), ClearRecordedSlots::kNo,
318             ClearFreedMemoryMode::kClearFreedMemory);
319       }
320       if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
321       RememberedSetSweeping::RemoveRange(p, free_start, free_end,
322                                          SlotSet::KEEP_EMPTY_BUCKETS);
323       RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, free_end,
324                                              SlotSet::KEEP_EMPTY_BUCKETS);
325       if (non_empty_typed_slots) {
326         free_ranges.insert(std::pair<uint32_t, uint32_t>(
327             static_cast<uint32_t>(free_start - p->address()),
328             static_cast<uint32_t>(free_end - p->address())));
329       }
330 
331       old_to_new_cleanup.Free(free_start, free_end);
332     }
333     Map map = object.synchronized_map();
334     int size = object.SizeFromMap(map);
335     live_bytes += size;
336     free_start = free_end + size;
337   }
338 
339   if (free_start != p->area_end()) {
340     CHECK_GT(p->area_end(), free_start);
341     size_t size = static_cast<size_t>(p->area_end() - free_start);
342     if (free_space_mode == ZAP_FREE_SPACE) {
343       ZapCode(free_start, size);
344     }
345     if (free_list_mode == REBUILD_FREE_LIST) {
346       freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
347           free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
348       max_freed_bytes = Max(freed_bytes, max_freed_bytes);
349     } else {
350       p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
351                                       ClearRecordedSlots::kNo,
352                                       ClearFreedMemoryMode::kClearFreedMemory);
353     }
354     if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
355     RememberedSetSweeping::RemoveRange(p, free_start, p->area_end(),
356                                        SlotSet::KEEP_EMPTY_BUCKETS);
357     RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, p->area_end(),
358                                            SlotSet::KEEP_EMPTY_BUCKETS);
359     if (non_empty_typed_slots) {
360       free_ranges.insert(std::pair<uint32_t, uint32_t>(
361           static_cast<uint32_t>(free_start - p->address()),
362           static_cast<uint32_t>(p->area_end() - p->address())));
363     }
364 
365     old_to_new_cleanup.Free(free_start, p->area_end());
366   }
367 
368   // Clear invalid typed slots after collection all free ranges.
369   if (!free_ranges.empty()) {
370     TypedSlotSet* old_to_new = p->typed_slot_set<OLD_TO_NEW>();
371     if (old_to_new != nullptr) {
372       old_to_new->ClearInvalidSlots(free_ranges);
373     }
374     TypedSlotSet* old_to_old = p->typed_slot_set<OLD_TO_OLD>();
375     if (old_to_old != nullptr) {
376       old_to_old->ClearInvalidSlots(free_ranges);
377     }
378   }
379 
380   marking_state_->bitmap(p)->Clear();
381   if (free_list_mode == IGNORE_FREE_LIST) {
382     marking_state_->SetLiveBytes(p, 0);
383     // We did not free memory, so have to adjust allocated bytes here.
384     intptr_t freed_bytes = p->area_size() - live_bytes;
385     p->DecreaseAllocatedBytes(freed_bytes);
386   } else {
387     // Keep the old live bytes counter of the page until RefillFreeList, where
388     // the space size is refined.
389     // The allocated_bytes() counter is precisely the total size of objects.
390     DCHECK_EQ(live_bytes, p->allocated_bytes());
391   }
392   p->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kDone);
393   if (code_object_registry) code_object_registry->Finalize();
394   if (free_list_mode == IGNORE_FREE_LIST) return 0;
395 
396   return static_cast<int>(
397       p->free_list()->GuaranteedAllocatable(max_freed_bytes));
398 }
399 
SweepSpaceFromTask(AllocationSpace identity)400 void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
401   Page* page = nullptr;
402   while (!stop_sweeper_tasks_ &&
403          ((page = GetSweepingPageSafe(identity)) != nullptr)) {
404     // Typed slot sets are only recorded on code pages. Code pages
405     // are not swept concurrently to the application to ensure W^X.
406     DCHECK(!page->typed_slot_set<OLD_TO_NEW>() &&
407            !page->typed_slot_set<OLD_TO_OLD>());
408     ParallelSweepPage(page, identity);
409   }
410 }
411 
SweepSpaceIncrementallyFromTask(AllocationSpace identity)412 bool Sweeper::SweepSpaceIncrementallyFromTask(AllocationSpace identity) {
413   if (Page* page = GetSweepingPageSafe(identity)) {
414     ParallelSweepPage(page, identity);
415   }
416   return sweeping_list_[GetSweepSpaceIndex(identity)].empty();
417 }
418 
ParallelSweepSpace(AllocationSpace identity,int required_freed_bytes,int max_pages,FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space)419 int Sweeper::ParallelSweepSpace(
420     AllocationSpace identity, int required_freed_bytes, int max_pages,
421     FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space) {
422   int max_freed = 0;
423   int pages_freed = 0;
424   Page* page = nullptr;
425   while ((page = GetSweepingPageSafe(identity)) != nullptr) {
426     int freed =
427         ParallelSweepPage(page, identity, invalidated_slots_in_free_space);
428     ++pages_freed;
429     if (page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
430       // Free list of a never-allocate page will be dropped later on.
431       continue;
432     }
433     DCHECK_GE(freed, 0);
434     max_freed = Max(max_freed, freed);
435     if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
436       return max_freed;
437     if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
438   }
439   return max_freed;
440 }
441 
ParallelSweepPage(Page * page,AllocationSpace identity,FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space)442 int Sweeper::ParallelSweepPage(
443     Page* page, AllocationSpace identity,
444     FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space) {
445   DCHECK(IsValidSweepingSpace(identity));
446 
447   // The Scavenger may add already swept pages back.
448   if (page->SweepingDone()) return 0;
449 
450   int max_freed = 0;
451   {
452     base::MutexGuard guard(page->mutex());
453     DCHECK(!page->SweepingDone());
454     // If the page is a code page, the CodePageMemoryModificationScope changes
455     // the page protection mode from rx -> rw while sweeping.
456     CodePageMemoryModificationScope code_page_scope(page);
457 
458     DCHECK_EQ(Page::ConcurrentSweepingState::kPending,
459               page->concurrent_sweeping_state());
460     page->set_concurrent_sweeping_state(
461         Page::ConcurrentSweepingState::kInProgress);
462     const FreeSpaceTreatmentMode free_space_mode =
463         Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
464     max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode,
465                          invalidated_slots_in_free_space, guard);
466     DCHECK(page->SweepingDone());
467   }
468 
469   {
470     base::MutexGuard guard(&mutex_);
471     swept_list_[GetSweepSpaceIndex(identity)].push_back(page);
472   }
473   return max_freed;
474 }
475 
ScheduleIncrementalSweepingTask()476 void Sweeper::ScheduleIncrementalSweepingTask() {
477   if (!incremental_sweeper_pending_) {
478     incremental_sweeper_pending_ = true;
479     v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap_->isolate());
480     auto taskrunner =
481         V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
482     taskrunner->PostTask(
483         std::make_unique<IncrementalSweeperTask>(heap_->isolate(), this));
484   }
485 }
486 
AddPage(AllocationSpace space,Page * page,Sweeper::AddPageMode mode)487 void Sweeper::AddPage(AllocationSpace space, Page* page,
488                       Sweeper::AddPageMode mode) {
489   base::MutexGuard guard(&mutex_);
490   DCHECK(IsValidSweepingSpace(space));
491   DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
492   if (mode == Sweeper::REGULAR) {
493     PrepareToBeSweptPage(space, page);
494   } else {
495     // Page has been temporarily removed from the sweeper. Accounting already
496     // happened when the page was initially added, so it is skipped here.
497     DCHECK_EQ(Sweeper::READD_TEMPORARY_REMOVED_PAGE, mode);
498   }
499   DCHECK_EQ(Page::ConcurrentSweepingState::kPending,
500             page->concurrent_sweeping_state());
501   sweeping_list_[GetSweepSpaceIndex(space)].push_back(page);
502 }
503 
PrepareToBeSweptPage(AllocationSpace space,Page * page)504 void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
505 #ifdef DEBUG
506   DCHECK_GE(page->area_size(),
507             static_cast<size_t>(marking_state_->live_bytes(page)));
508   DCHECK_EQ(Page::ConcurrentSweepingState::kDone,
509             page->concurrent_sweeping_state());
510   page->ForAllFreeListCategories([page](FreeListCategory* category) {
511     DCHECK(!category->is_linked(page->owner()->free_list()));
512   });
513 #endif  // DEBUG
514   page->MoveOldToNewRememberedSetForSweeping();
515   page->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kPending);
516   heap_->paged_space(space)->IncreaseAllocatedBytes(
517       marking_state_->live_bytes(page), page);
518 }
519 
GetSweepingPageSafe(AllocationSpace space)520 Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
521   base::MutexGuard guard(&mutex_);
522   DCHECK(IsValidSweepingSpace(space));
523   int space_index = GetSweepSpaceIndex(space);
524   Page* page = nullptr;
525   if (!sweeping_list_[space_index].empty()) {
526     page = sweeping_list_[space_index].back();
527     sweeping_list_[space_index].pop_back();
528   }
529   return page;
530 }
531 
EnsureIterabilityCompleted()532 void Sweeper::EnsureIterabilityCompleted() {
533   if (!iterability_in_progress_) return;
534 
535   if (FLAG_concurrent_sweeping && iterability_task_started_) {
536     if (heap_->isolate()->cancelable_task_manager()->TryAbort(
537             iterability_task_id_) != TryAbortResult::kTaskAborted) {
538       iterability_task_semaphore_.Wait();
539     }
540     iterability_task_started_ = false;
541   }
542 
543   for (Page* page : iterability_list_) {
544     MakeIterable(page);
545   }
546   iterability_list_.clear();
547   iterability_in_progress_ = false;
548 }
549 
550 class Sweeper::IterabilityTask final : public CancelableTask {
551  public:
IterabilityTask(Isolate * isolate,Sweeper * sweeper,base::Semaphore * pending_iterability_task)552   IterabilityTask(Isolate* isolate, Sweeper* sweeper,
553                   base::Semaphore* pending_iterability_task)
554       : CancelableTask(isolate),
555         sweeper_(sweeper),
556         pending_iterability_task_(pending_iterability_task),
557         tracer_(isolate->heap()->tracer()) {}
558 
559   ~IterabilityTask() override = default;
560 
561  private:
RunInternal()562   void RunInternal() final {
563     TRACE_BACKGROUND_GC(tracer_,
564                         GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING);
565     for (Page* page : sweeper_->iterability_list_) {
566       sweeper_->MakeIterable(page);
567     }
568     sweeper_->iterability_list_.clear();
569     pending_iterability_task_->Signal();
570   }
571 
572   Sweeper* const sweeper_;
573   base::Semaphore* const pending_iterability_task_;
574   GCTracer* const tracer_;
575 
576   DISALLOW_COPY_AND_ASSIGN(IterabilityTask);
577 };
578 
StartIterabilityTasks()579 void Sweeper::StartIterabilityTasks() {
580   if (!iterability_in_progress_) return;
581 
582   DCHECK(!iterability_task_started_);
583   if (FLAG_concurrent_sweeping && !iterability_list_.empty()) {
584     auto task = std::make_unique<IterabilityTask>(heap_->isolate(), this,
585                                                   &iterability_task_semaphore_);
586     iterability_task_id_ = task->id();
587     iterability_task_started_ = true;
588     V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
589   }
590 }
591 
AddPageForIterability(Page * page)592 void Sweeper::AddPageForIterability(Page* page) {
593   DCHECK(sweeping_in_progress_);
594   DCHECK(iterability_in_progress_);
595   DCHECK(!iterability_task_started_);
596   DCHECK(IsValidIterabilitySpace(page->owner_identity()));
597   DCHECK_EQ(Page::ConcurrentSweepingState::kDone,
598             page->concurrent_sweeping_state());
599 
600   iterability_list_.push_back(page);
601   page->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kPending);
602 }
603 
MakeIterable(Page * page)604 void Sweeper::MakeIterable(Page* page) {
605   base::MutexGuard guard(page->mutex());
606   DCHECK(IsValidIterabilitySpace(page->owner_identity()));
607   const FreeSpaceTreatmentMode free_space_mode =
608       Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
609   RawSweep(page, IGNORE_FREE_LIST, free_space_mode,
610            FreeSpaceMayContainInvalidatedSlots::kNo, guard);
611 }
612 
613 }  // namespace internal
614 }  // namespace v8
615