1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/sweeper.h"
6 
7 #include "src/base/template-utils.h"
8 #include "src/heap/array-buffer-tracker-inl.h"
9 #include "src/heap/gc-tracer.h"
10 #include "src/heap/mark-compact-inl.h"
11 #include "src/heap/remembered-set.h"
12 #include "src/objects-inl.h"
13 #include "src/vm-state-inl.h"
14 
15 namespace v8 {
16 namespace internal {
17 
PauseOrCompleteScope(Sweeper * sweeper)18 Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
19     : sweeper_(sweeper) {
20   sweeper_->stop_sweeper_tasks_.SetValue(true);
21   if (!sweeper_->sweeping_in_progress()) return;
22 
23   sweeper_->AbortAndWaitForTasks();
24 
25   // Complete sweeping if there's nothing more to do.
26   if (sweeper_->IsDoneSweeping()) {
27     sweeper_->heap_->mark_compact_collector()->EnsureSweepingCompleted();
28     DCHECK(!sweeper_->sweeping_in_progress());
29   } else {
30     // Unless sweeping is complete the flag still indicates that the sweeper
31     // is enabled. It just cannot use tasks anymore.
32     DCHECK(sweeper_->sweeping_in_progress());
33   }
34 }
35 
~PauseOrCompleteScope()36 Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() {
37   sweeper_->stop_sweeper_tasks_.SetValue(false);
38   if (!sweeper_->sweeping_in_progress()) return;
39 
40   sweeper_->StartSweeperTasks();
41 }
42 
FilterSweepingPagesScope(Sweeper * sweeper,const PauseOrCompleteScope & pause_or_complete_scope)43 Sweeper::FilterSweepingPagesScope::FilterSweepingPagesScope(
44     Sweeper* sweeper, const PauseOrCompleteScope& pause_or_complete_scope)
45     : sweeper_(sweeper),
46       pause_or_complete_scope_(pause_or_complete_scope),
47       sweeping_in_progress_(sweeper_->sweeping_in_progress()) {
48   USE(pause_or_complete_scope_);
49   if (!sweeping_in_progress_) return;
50 
51   int old_space_index = GetSweepSpaceIndex(OLD_SPACE);
52   old_space_sweeping_list_ =
53       std::move(sweeper_->sweeping_list_[old_space_index]);
54   sweeper_->sweeping_list_[old_space_index].clear();
55 }
56 
~FilterSweepingPagesScope()57 Sweeper::FilterSweepingPagesScope::~FilterSweepingPagesScope() {
58   DCHECK_EQ(sweeping_in_progress_, sweeper_->sweeping_in_progress());
59   if (!sweeping_in_progress_) return;
60 
61   sweeper_->sweeping_list_[GetSweepSpaceIndex(OLD_SPACE)] =
62       std::move(old_space_sweeping_list_);
63   // old_space_sweeping_list_ does not need to be cleared as we don't use it.
64 }
65 
66 class Sweeper::SweeperTask final : public CancelableTask {
67  public:
SweeperTask(Isolate * isolate,Sweeper * sweeper,base::Semaphore * pending_sweeper_tasks,std::atomic<intptr_t> * num_sweeping_tasks,AllocationSpace space_to_start)68   SweeperTask(Isolate* isolate, Sweeper* sweeper,
69               base::Semaphore* pending_sweeper_tasks,
70               std::atomic<intptr_t>* num_sweeping_tasks,
71               AllocationSpace space_to_start)
72       : CancelableTask(isolate),
73         sweeper_(sweeper),
74         pending_sweeper_tasks_(pending_sweeper_tasks),
75         num_sweeping_tasks_(num_sweeping_tasks),
76         space_to_start_(space_to_start),
77         tracer_(isolate->heap()->tracer()) {}
78 
~SweeperTask()79   virtual ~SweeperTask() {}
80 
81  private:
RunInternal()82   void RunInternal() final {
83     TRACE_BACKGROUND_GC(tracer_,
84                         GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING);
85     DCHECK(IsValidSweepingSpace(space_to_start_));
86     const int offset = space_to_start_ - FIRST_GROWABLE_PAGED_SPACE;
87     for (int i = 0; i < kNumberOfSweepingSpaces; i++) {
88       const AllocationSpace space_id = static_cast<AllocationSpace>(
89           FIRST_GROWABLE_PAGED_SPACE +
90           ((i + offset) % kNumberOfSweepingSpaces));
91       // Do not sweep code space concurrently.
92       if (space_id == CODE_SPACE) continue;
93       DCHECK(IsValidSweepingSpace(space_id));
94       sweeper_->SweepSpaceFromTask(space_id);
95     }
96     (*num_sweeping_tasks_)--;
97     pending_sweeper_tasks_->Signal();
98   }
99 
100   Sweeper* const sweeper_;
101   base::Semaphore* const pending_sweeper_tasks_;
102   std::atomic<intptr_t>* const num_sweeping_tasks_;
103   AllocationSpace space_to_start_;
104   GCTracer* const tracer_;
105 
106   DISALLOW_COPY_AND_ASSIGN(SweeperTask);
107 };
108 
109 class Sweeper::IncrementalSweeperTask final : public CancelableTask {
110  public:
IncrementalSweeperTask(Isolate * isolate,Sweeper * sweeper)111   IncrementalSweeperTask(Isolate* isolate, Sweeper* sweeper)
112       : CancelableTask(isolate), isolate_(isolate), sweeper_(sweeper) {}
113 
~IncrementalSweeperTask()114   virtual ~IncrementalSweeperTask() {}
115 
116  private:
RunInternal()117   void RunInternal() final {
118     VMState<GC> state(isolate_);
119     TRACE_EVENT_CALL_STATS_SCOPED(isolate_, "v8", "V8.Task");
120 
121     sweeper_->incremental_sweeper_pending_ = false;
122 
123     if (sweeper_->sweeping_in_progress()) {
124       if (!sweeper_->SweepSpaceIncrementallyFromTask(CODE_SPACE)) {
125         sweeper_->ScheduleIncrementalSweepingTask();
126       }
127     }
128   }
129 
130   Isolate* const isolate_;
131   Sweeper* const sweeper_;
132   DISALLOW_COPY_AND_ASSIGN(IncrementalSweeperTask);
133 };
134 
StartSweeping()135 void Sweeper::StartSweeping() {
136   CHECK(!stop_sweeper_tasks_.Value());
137   sweeping_in_progress_ = true;
138   iterability_in_progress_ = true;
139   MajorNonAtomicMarkingState* marking_state =
140       heap_->mark_compact_collector()->non_atomic_marking_state();
141   ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
142     int space_index = GetSweepSpaceIndex(space);
143     std::sort(sweeping_list_[space_index].begin(),
144               sweeping_list_[space_index].end(),
145               [marking_state](Page* a, Page* b) {
146                 return marking_state->live_bytes(a) <
147                        marking_state->live_bytes(b);
148               });
149   });
150 }
151 
StartSweeperTasks()152 void Sweeper::StartSweeperTasks() {
153   DCHECK_EQ(0, num_tasks_);
154   DCHECK_EQ(0, num_sweeping_tasks_);
155   if (FLAG_concurrent_sweeping && sweeping_in_progress_ &&
156       !heap_->delay_sweeper_tasks_for_testing_) {
157     ForAllSweepingSpaces([this](AllocationSpace space) {
158       DCHECK(IsValidSweepingSpace(space));
159       num_sweeping_tasks_++;
160       auto task = base::make_unique<SweeperTask>(
161           heap_->isolate(), this, &pending_sweeper_tasks_semaphore_,
162           &num_sweeping_tasks_, space);
163       DCHECK_LT(num_tasks_, kMaxSweeperTasks);
164       task_ids_[num_tasks_++] = task->id();
165       V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
166     });
167     ScheduleIncrementalSweepingTask();
168   }
169 }
170 
SweepOrWaitUntilSweepingCompleted(Page * page)171 void Sweeper::SweepOrWaitUntilSweepingCompleted(Page* page) {
172   if (!page->SweepingDone()) {
173     ParallelSweepPage(page, page->owner()->identity());
174     if (!page->SweepingDone()) {
175       // We were not able to sweep that page, i.e., a concurrent
176       // sweeper thread currently owns this page. Wait for the sweeper
177       // thread to be done with this page.
178       page->WaitUntilSweepingCompleted();
179     }
180   }
181 }
182 
GetSweptPageSafe(PagedSpace * space)183 Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
184   base::LockGuard<base::Mutex> guard(&mutex_);
185   SweptList& list = swept_list_[GetSweepSpaceIndex(space->identity())];
186   if (!list.empty()) {
187     auto last_page = list.back();
188     list.pop_back();
189     return last_page;
190   }
191   return nullptr;
192 }
193 
AbortAndWaitForTasks()194 void Sweeper::AbortAndWaitForTasks() {
195   if (!FLAG_concurrent_sweeping) return;
196 
197   for (int i = 0; i < num_tasks_; i++) {
198     if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
199         CancelableTaskManager::kTaskAborted) {
200       pending_sweeper_tasks_semaphore_.Wait();
201     } else {
202       // Aborted case.
203       num_sweeping_tasks_--;
204     }
205   }
206   num_tasks_ = 0;
207   DCHECK_EQ(0, num_sweeping_tasks_);
208 }
209 
EnsureCompleted()210 void Sweeper::EnsureCompleted() {
211   if (!sweeping_in_progress_) return;
212 
213   EnsureIterabilityCompleted();
214 
215   // If sweeping is not completed or not running at all, we try to complete it
216   // here.
217   ForAllSweepingSpaces(
218       [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
219 
220   AbortAndWaitForTasks();
221 
222   ForAllSweepingSpaces([this](AllocationSpace space) {
223     CHECK(sweeping_list_[GetSweepSpaceIndex(space)].empty());
224   });
225   sweeping_in_progress_ = false;
226 }
227 
AreSweeperTasksRunning()228 bool Sweeper::AreSweeperTasksRunning() { return num_sweeping_tasks_ != 0; }
229 
RawSweep(Page * p,FreeListRebuildingMode free_list_mode,FreeSpaceTreatmentMode free_space_mode)230 int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
231                       FreeSpaceTreatmentMode free_space_mode) {
232   Space* space = p->owner();
233   DCHECK_NOT_NULL(space);
234   DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
235          space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
236   DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
237 
238   // TODO(ulan): we don't have to clear type old-to-old slots in code space
239   // because the concurrent marker doesn't mark code objects. This requires
240   // the write barrier for code objects to check the color of the code object.
241   bool non_empty_typed_slots = p->typed_slot_set<OLD_TO_NEW>() != nullptr ||
242                                p->typed_slot_set<OLD_TO_OLD>() != nullptr;
243 
244   // The free ranges map is used for filtering typed slots.
245   std::map<uint32_t, uint32_t> free_ranges;
246 
247   // Before we sweep objects on the page, we free dead array buffers which
248   // requires valid mark bits.
249   ArrayBufferTracker::FreeDead(p, marking_state_);
250 
251   Address free_start = p->area_start();
252   DCHECK_EQ(0, free_start % (32 * kPointerSize));
253 
254   // If we use the skip list for code space pages, we have to lock the skip
255   // list because it could be accessed concurrently by the runtime or the
256   // deoptimizer.
257   const bool rebuild_skip_list =
258       space->identity() == CODE_SPACE && p->skip_list() != nullptr;
259   SkipList* skip_list = p->skip_list();
260   if (rebuild_skip_list) {
261     skip_list->Clear();
262   }
263 
264   intptr_t live_bytes = 0;
265   intptr_t freed_bytes = 0;
266   intptr_t max_freed_bytes = 0;
267   int curr_region = -1;
268 
269   // Set the allocated_bytes counter to area_size. The free operations below
270   // will decrease the counter to actual live bytes.
271   p->ResetAllocatedBytes();
272 
273   for (auto object_and_size :
274        LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
275     HeapObject* const object = object_and_size.first;
276     DCHECK(marking_state_->IsBlack(object));
277     Address free_end = object->address();
278     if (free_end != free_start) {
279       CHECK_GT(free_end, free_start);
280       size_t size = static_cast<size_t>(free_end - free_start);
281       if (free_space_mode == ZAP_FREE_SPACE) {
282         memset(reinterpret_cast<void*>(free_start), 0xCC, size);
283       }
284       if (free_list_mode == REBUILD_FREE_LIST) {
285         freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
286             free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
287         max_freed_bytes = Max(freed_bytes, max_freed_bytes);
288       } else {
289         p->heap()->CreateFillerObjectAt(
290             free_start, static_cast<int>(size), ClearRecordedSlots::kNo,
291             ClearFreedMemoryMode::kClearFreedMemory);
292       }
293       RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
294                                              SlotSet::KEEP_EMPTY_BUCKETS);
295       RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, free_end,
296                                              SlotSet::KEEP_EMPTY_BUCKETS);
297       if (non_empty_typed_slots) {
298         free_ranges.insert(std::pair<uint32_t, uint32_t>(
299             static_cast<uint32_t>(free_start - p->address()),
300             static_cast<uint32_t>(free_end - p->address())));
301       }
302     }
303     Map* map = object->synchronized_map();
304     int size = object->SizeFromMap(map);
305     live_bytes += size;
306     if (rebuild_skip_list) {
307       int new_region_start = SkipList::RegionNumber(free_end);
308       int new_region_end =
309           SkipList::RegionNumber(free_end + size - kPointerSize);
310       if (new_region_start != curr_region || new_region_end != curr_region) {
311         skip_list->AddObject(free_end, size);
312         curr_region = new_region_end;
313       }
314     }
315     free_start = free_end + size;
316   }
317 
318   if (free_start != p->area_end()) {
319     CHECK_GT(p->area_end(), free_start);
320     size_t size = static_cast<size_t>(p->area_end() - free_start);
321     if (free_space_mode == ZAP_FREE_SPACE) {
322       memset(reinterpret_cast<void*>(free_start), 0xCC, size);
323     }
324     if (free_list_mode == REBUILD_FREE_LIST) {
325       freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
326           free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
327       max_freed_bytes = Max(freed_bytes, max_freed_bytes);
328     } else {
329       p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
330                                       ClearRecordedSlots::kNo,
331                                       ClearFreedMemoryMode::kClearFreedMemory);
332     }
333 
334     RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
335                                            SlotSet::KEEP_EMPTY_BUCKETS);
336     RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, p->area_end(),
337                                            SlotSet::KEEP_EMPTY_BUCKETS);
338     if (non_empty_typed_slots) {
339       free_ranges.insert(std::pair<uint32_t, uint32_t>(
340           static_cast<uint32_t>(free_start - p->address()),
341           static_cast<uint32_t>(p->area_end() - p->address())));
342     }
343   }
344 
345   // Clear invalid typed slots after collection all free ranges.
346   if (!free_ranges.empty()) {
347     TypedSlotSet* old_to_new = p->typed_slot_set<OLD_TO_NEW>();
348     if (old_to_new != nullptr) {
349       old_to_new->RemoveInvaldSlots(free_ranges);
350     }
351     TypedSlotSet* old_to_old = p->typed_slot_set<OLD_TO_OLD>();
352     if (old_to_old != nullptr) {
353       old_to_old->RemoveInvaldSlots(free_ranges);
354     }
355   }
356 
357   marking_state_->bitmap(p)->Clear();
358   if (free_list_mode == IGNORE_FREE_LIST) {
359     marking_state_->SetLiveBytes(p, 0);
360     // We did not free memory, so have to adjust allocated bytes here.
361     intptr_t freed_bytes = p->area_size() - live_bytes;
362     p->DecreaseAllocatedBytes(freed_bytes);
363   } else {
364     // Keep the old live bytes counter of the page until RefillFreeList, where
365     // the space size is refined.
366     // The allocated_bytes() counter is precisely the total size of objects.
367     DCHECK_EQ(live_bytes, p->allocated_bytes());
368   }
369   p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
370   if (free_list_mode == IGNORE_FREE_LIST) return 0;
371   return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
372 }
373 
SweepSpaceFromTask(AllocationSpace identity)374 void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
375   Page* page = nullptr;
376   while (!stop_sweeper_tasks_.Value() &&
377          ((page = GetSweepingPageSafe(identity)) != nullptr)) {
378     ParallelSweepPage(page, identity);
379   }
380 }
381 
SweepSpaceIncrementallyFromTask(AllocationSpace identity)382 bool Sweeper::SweepSpaceIncrementallyFromTask(AllocationSpace identity) {
383   if (Page* page = GetSweepingPageSafe(identity)) {
384     ParallelSweepPage(page, identity);
385   }
386   return sweeping_list_[GetSweepSpaceIndex(identity)].empty();
387 }
388 
ParallelSweepSpace(AllocationSpace identity,int required_freed_bytes,int max_pages)389 int Sweeper::ParallelSweepSpace(AllocationSpace identity,
390                                 int required_freed_bytes, int max_pages) {
391   int max_freed = 0;
392   int pages_freed = 0;
393   Page* page = nullptr;
394   while ((page = GetSweepingPageSafe(identity)) != nullptr) {
395     int freed = ParallelSweepPage(page, identity);
396     pages_freed += 1;
397     DCHECK_GE(freed, 0);
398     max_freed = Max(max_freed, freed);
399     if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
400       return max_freed;
401     if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
402   }
403   return max_freed;
404 }
405 
ParallelSweepPage(Page * page,AllocationSpace identity)406 int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
407   // Early bailout for pages that are swept outside of the regular sweeping
408   // path. This check here avoids taking the lock first, avoiding deadlocks.
409   if (page->SweepingDone()) return 0;
410 
411   DCHECK(IsValidSweepingSpace(identity));
412   int max_freed = 0;
413   {
414     base::LockGuard<base::Mutex> guard(page->mutex());
415     // If this page was already swept in the meantime, we can return here.
416     if (page->SweepingDone()) return 0;
417 
418     // If the page is a code page, the CodePageMemoryModificationScope changes
419     // the page protection mode from rx -> rw while sweeping.
420     CodePageMemoryModificationScope code_page_scope(page);
421 
422     DCHECK_EQ(Page::kSweepingPending,
423               page->concurrent_sweeping_state().Value());
424     page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
425     const FreeSpaceTreatmentMode free_space_mode =
426         Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
427     max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
428     DCHECK(page->SweepingDone());
429 
430     // After finishing sweeping of a page we clean up its remembered set.
431     TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>();
432     if (typed_slot_set) {
433       typed_slot_set->FreeToBeFreedChunks();
434     }
435     SlotSet* slot_set = page->slot_set<OLD_TO_NEW>();
436     if (slot_set) {
437       slot_set->FreeToBeFreedBuckets();
438     }
439   }
440 
441   {
442     base::LockGuard<base::Mutex> guard(&mutex_);
443     swept_list_[GetSweepSpaceIndex(identity)].push_back(page);
444   }
445   return max_freed;
446 }
447 
ScheduleIncrementalSweepingTask()448 void Sweeper::ScheduleIncrementalSweepingTask() {
449   if (!incremental_sweeper_pending_) {
450     incremental_sweeper_pending_ = true;
451     IncrementalSweeperTask* task =
452         new IncrementalSweeperTask(heap_->isolate(), this);
453     v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap_->isolate());
454     V8::GetCurrentPlatform()->CallOnForegroundThread(isolate, task);
455   }
456 }
457 
AddPage(AllocationSpace space,Page * page,Sweeper::AddPageMode mode)458 void Sweeper::AddPage(AllocationSpace space, Page* page,
459                       Sweeper::AddPageMode mode) {
460   base::LockGuard<base::Mutex> guard(&mutex_);
461   DCHECK(IsValidSweepingSpace(space));
462   DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
463   if (mode == Sweeper::REGULAR) {
464     PrepareToBeSweptPage(space, page);
465   } else {
466     // Page has been temporarily removed from the sweeper. Accounting already
467     // happened when the page was initially added, so it is skipped here.
468     DCHECK_EQ(Sweeper::READD_TEMPORARY_REMOVED_PAGE, mode);
469   }
470   DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state().Value());
471   sweeping_list_[GetSweepSpaceIndex(space)].push_back(page);
472 }
473 
PrepareToBeSweptPage(AllocationSpace space,Page * page)474 void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
475   DCHECK_GE(page->area_size(),
476             static_cast<size_t>(marking_state_->live_bytes(page)));
477   DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state().Value());
478   page->ForAllFreeListCategories(
479       [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
480   page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
481   heap_->paged_space(space)->IncreaseAllocatedBytes(
482       marking_state_->live_bytes(page), page);
483 }
484 
GetSweepingPageSafe(AllocationSpace space)485 Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
486   base::LockGuard<base::Mutex> guard(&mutex_);
487   DCHECK(IsValidSweepingSpace(space));
488   int space_index = GetSweepSpaceIndex(space);
489   Page* page = nullptr;
490   if (!sweeping_list_[space_index].empty()) {
491     page = sweeping_list_[space_index].front();
492     sweeping_list_[space_index].pop_front();
493   }
494   return page;
495 }
496 
EnsurePageIsIterable(Page * page)497 void Sweeper::EnsurePageIsIterable(Page* page) {
498   AllocationSpace space = page->owner()->identity();
499   if (IsValidSweepingSpace(space)) {
500     SweepOrWaitUntilSweepingCompleted(page);
501   } else {
502     DCHECK(IsValidIterabilitySpace(space));
503     EnsureIterabilityCompleted();
504   }
505 }
506 
EnsureIterabilityCompleted()507 void Sweeper::EnsureIterabilityCompleted() {
508   if (!iterability_in_progress_) return;
509 
510   if (FLAG_concurrent_sweeping && iterability_task_started_) {
511     if (heap_->isolate()->cancelable_task_manager()->TryAbort(
512             iterability_task_id_) != CancelableTaskManager::kTaskAborted) {
513       iterability_task_semaphore_.Wait();
514     }
515     iterability_task_started_ = false;
516   }
517 
518   for (Page* page : iterability_list_) {
519     MakeIterable(page);
520   }
521   iterability_list_.clear();
522   iterability_in_progress_ = false;
523 }
524 
525 class Sweeper::IterabilityTask final : public CancelableTask {
526  public:
IterabilityTask(Isolate * isolate,Sweeper * sweeper,base::Semaphore * pending_iterability_task)527   IterabilityTask(Isolate* isolate, Sweeper* sweeper,
528                   base::Semaphore* pending_iterability_task)
529       : CancelableTask(isolate),
530         sweeper_(sweeper),
531         pending_iterability_task_(pending_iterability_task),
532         tracer_(isolate->heap()->tracer()) {}
533 
~IterabilityTask()534   virtual ~IterabilityTask() {}
535 
536  private:
RunInternal()537   void RunInternal() final {
538     TRACE_BACKGROUND_GC(tracer_,
539                         GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING);
540     for (Page* page : sweeper_->iterability_list_) {
541       sweeper_->MakeIterable(page);
542     }
543     sweeper_->iterability_list_.clear();
544     pending_iterability_task_->Signal();
545   }
546 
547   Sweeper* const sweeper_;
548   base::Semaphore* const pending_iterability_task_;
549   GCTracer* const tracer_;
550 
551   DISALLOW_COPY_AND_ASSIGN(IterabilityTask);
552 };
553 
StartIterabilityTasks()554 void Sweeper::StartIterabilityTasks() {
555   if (!iterability_in_progress_) return;
556 
557   DCHECK(!iterability_task_started_);
558   if (FLAG_concurrent_sweeping && !iterability_list_.empty()) {
559     auto task = base::make_unique<IterabilityTask>(
560         heap_->isolate(), this, &iterability_task_semaphore_);
561     iterability_task_id_ = task->id();
562     iterability_task_started_ = true;
563     V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
564   }
565 }
566 
AddPageForIterability(Page * page)567 void Sweeper::AddPageForIterability(Page* page) {
568   DCHECK(sweeping_in_progress_);
569   DCHECK(iterability_in_progress_);
570   DCHECK(!iterability_task_started_);
571   DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
572   DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state().Value());
573 
574   iterability_list_.push_back(page);
575   page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
576 }
577 
MakeIterable(Page * page)578 void Sweeper::MakeIterable(Page* page) {
579   DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
580   const FreeSpaceTreatmentMode free_space_mode =
581       Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
582   RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
583 }
584 
585 }  // namespace internal
586 }  // namespace v8
587