1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "test/cctest/heap/heap-utils.h"
6 
7 #include "src/base/platform/mutex.h"
8 #include "src/execution/isolate.h"
9 #include "src/heap/factory.h"
10 #include "src/heap/heap-inl.h"
11 #include "src/heap/incremental-marking.h"
12 #include "src/heap/mark-compact.h"
13 #include "src/heap/memory-chunk.h"
14 #include "src/heap/safepoint.h"
15 #include "test/cctest/cctest.h"
16 
17 namespace v8 {
18 namespace internal {
19 namespace heap {
20 
InvokeScavenge(Isolate * isolate)21 void InvokeScavenge(Isolate* isolate) {
22   CcTest::CollectGarbage(i::NEW_SPACE, isolate);
23 }
24 
InvokeMarkSweep(Isolate * isolate)25 void InvokeMarkSweep(Isolate* isolate) { CcTest::CollectAllGarbage(isolate); }
26 
SealCurrentObjects(Heap * heap)27 void SealCurrentObjects(Heap* heap) {
28   // If you see this check failing, disable the flag at the start of your test:
29   // FLAG_stress_concurrent_allocation = false;
30   // Background thread allocating concurrently interferes with this function.
31   CHECK(!FLAG_stress_concurrent_allocation);
32   CcTest::CollectAllGarbage();
33   CcTest::CollectAllGarbage();
34   heap->mark_compact_collector()->EnsureSweepingCompleted();
35   heap->old_space()->FreeLinearAllocationArea();
36   for (Page* page : *heap->old_space()) {
37     page->MarkNeverAllocateForTesting();
38   }
39 }
40 
FixedArrayLenFromSize(int size)41 int FixedArrayLenFromSize(int size) {
42   return std::min({(size - FixedArray::kHeaderSize) / kTaggedSize,
43                    FixedArray::kMaxRegularLength});
44 }
45 
FillOldSpacePageWithFixedArrays(Heap * heap,int remainder)46 std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
47                                                                 int remainder) {
48   PauseAllocationObserversScope pause_observers(heap);
49   std::vector<Handle<FixedArray>> handles;
50   Isolate* isolate = heap->isolate();
51   const int kArraySize = 128;
52   const int kArrayLen = heap::FixedArrayLenFromSize(kArraySize);
53   Handle<FixedArray> array;
54   int allocated = 0;
55   do {
56     if (allocated + kArraySize * 2 >
57         static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage())) {
58       int size =
59           kArraySize * 2 -
60           ((allocated + kArraySize * 2) -
61            static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage())) -
62           remainder;
63       int last_array_len = heap::FixedArrayLenFromSize(size);
64       array = isolate->factory()->NewFixedArray(last_array_len,
65                                                 AllocationType::kOld);
66       CHECK_EQ(size, array->Size());
67       allocated += array->Size() + remainder;
68     } else {
69       array =
70           isolate->factory()->NewFixedArray(kArrayLen, AllocationType::kOld);
71       allocated += array->Size();
72       CHECK_EQ(kArraySize, array->Size());
73     }
74     if (handles.empty()) {
75       // Check that allocations started on a new page.
76       CHECK_EQ(array->address(), Page::FromHeapObject(*array)->area_start());
77     }
78     handles.push_back(array);
79   } while (allocated <
80            static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()));
81   return handles;
82 }
83 
CreatePadding(Heap * heap,int padding_size,AllocationType allocation,int object_size)84 std::vector<Handle<FixedArray>> CreatePadding(Heap* heap, int padding_size,
85                                               AllocationType allocation,
86                                               int object_size) {
87   std::vector<Handle<FixedArray>> handles;
88   Isolate* isolate = heap->isolate();
89   int allocate_memory;
90   int length;
91   int free_memory = padding_size;
92   if (allocation == i::AllocationType::kOld) {
93     heap->old_space()->FreeLinearAllocationArea();
94     int overall_free_memory = static_cast<int>(heap->old_space()->Available());
95     CHECK(padding_size <= overall_free_memory || overall_free_memory == 0);
96   } else {
97     int overall_free_memory = static_cast<int>(heap->new_space()->Available());
98     CHECK(padding_size <= overall_free_memory || overall_free_memory == 0);
99   }
100   while (free_memory > 0) {
101     if (free_memory > object_size) {
102       allocate_memory = object_size;
103       length = FixedArrayLenFromSize(allocate_memory);
104     } else {
105       allocate_memory = free_memory;
106       length = FixedArrayLenFromSize(allocate_memory);
107       if (length <= 0) {
108         // Not enough room to create another FixedArray, so create a filler.
109         if (allocation == i::AllocationType::kOld) {
110           heap->CreateFillerObjectAt(
111               *heap->old_space()->allocation_top_address(), free_memory,
112               ClearRecordedSlots::kNo);
113         } else {
114           heap->CreateFillerObjectAt(
115               *heap->new_space()->allocation_top_address(), free_memory,
116               ClearRecordedSlots::kNo);
117         }
118         break;
119       }
120     }
121     handles.push_back(isolate->factory()->NewFixedArray(length, allocation));
122     CHECK((allocation == AllocationType::kYoung &&
123            heap->new_space()->Contains(*handles.back())) ||
124           (allocation == AllocationType::kOld &&
125            heap->InOldSpace(*handles.back())) ||
126           FLAG_single_generation);
127     free_memory -= handles.back()->Size();
128   }
129   return handles;
130 }
131 
FillCurrentPage(v8::internal::NewSpace * space,std::vector<Handle<FixedArray>> * out_handles)132 bool FillCurrentPage(v8::internal::NewSpace* space,
133                      std::vector<Handle<FixedArray>>* out_handles) {
134   return heap::FillCurrentPageButNBytes(space, 0, out_handles);
135 }
136 
FillCurrentPageButNBytes(v8::internal::NewSpace * space,int extra_bytes,std::vector<Handle<FixedArray>> * out_handles)137 bool FillCurrentPageButNBytes(v8::internal::NewSpace* space, int extra_bytes,
138                               std::vector<Handle<FixedArray>>* out_handles) {
139   PauseAllocationObserversScope pause_observers(space->heap());
140   // We cannot rely on `space->limit()` to point to the end of the current page
141   // in the case where inline allocations are disabled, it actually points to
142   // the current allocation pointer.
143   DCHECK_IMPLIES(space->heap()->inline_allocation_disabled(),
144                  space->limit() == space->top());
145   int space_remaining =
146       static_cast<int>(space->to_space().page_high() - space->top());
147   CHECK(space_remaining >= extra_bytes);
148   int new_linear_size = space_remaining - extra_bytes;
149   if (new_linear_size == 0) return false;
150   std::vector<Handle<FixedArray>> handles = heap::CreatePadding(
151       space->heap(), space_remaining, i::AllocationType::kYoung);
152   if (out_handles != nullptr) {
153     out_handles->insert(out_handles->end(), handles.begin(), handles.end());
154   }
155   return true;
156 }
157 
SimulateFullSpace(v8::internal::NewSpace * space,std::vector<Handle<FixedArray>> * out_handles)158 void SimulateFullSpace(v8::internal::NewSpace* space,
159                        std::vector<Handle<FixedArray>>* out_handles) {
160   // If you see this check failing, disable the flag at the start of your test:
161   // FLAG_stress_concurrent_allocation = false;
162   // Background thread allocating concurrently interferes with this function.
163   CHECK(!FLAG_stress_concurrent_allocation);
164   while (heap::FillCurrentPage(space, out_handles) || space->AddFreshPage()) {
165   }
166 }
167 
SimulateIncrementalMarking(i::Heap * heap,bool force_completion)168 void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
169   const double kStepSizeInMs = 100;
170   CHECK(FLAG_incremental_marking);
171   i::IncrementalMarking* marking = heap->incremental_marking();
172   i::MarkCompactCollector* collector = heap->mark_compact_collector();
173   if (collector->sweeping_in_progress()) {
174     SafepointScope scope(heap);
175     collector->EnsureSweepingCompleted();
176   }
177   CHECK(marking->IsMarking() || marking->IsStopped() || marking->IsComplete());
178   if (marking->IsStopped()) {
179     heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
180                                   i::GarbageCollectionReason::kTesting);
181   }
182   CHECK(marking->IsMarking() || marking->IsComplete());
183   if (!force_completion) return;
184 
185   while (!marking->IsComplete()) {
186     marking->Step(kStepSizeInMs, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
187                   i::StepOrigin::kV8);
188     if (marking->IsReadyToOverApproximateWeakClosure()) {
189       SafepointScope scope(heap);
190       marking->FinalizeIncrementally();
191     }
192   }
193   CHECK(marking->IsComplete());
194 }
195 
SimulateFullSpace(v8::internal::PagedSpace * space)196 void SimulateFullSpace(v8::internal::PagedSpace* space) {
197   // If you see this check failing, disable the flag at the start of your test:
198   // FLAG_stress_concurrent_allocation = false;
199   // Background thread allocating concurrently interferes with this function.
200   CHECK(!FLAG_stress_concurrent_allocation);
201   CodeSpaceMemoryModificationScope modification_scope(space->heap());
202   i::MarkCompactCollector* collector = space->heap()->mark_compact_collector();
203   if (collector->sweeping_in_progress()) {
204     collector->EnsureSweepingCompleted();
205   }
206   space->FreeLinearAllocationArea();
207   space->ResetFreeList();
208 }
209 
AbandonCurrentlyFreeMemory(PagedSpace * space)210 void AbandonCurrentlyFreeMemory(PagedSpace* space) {
211   space->FreeLinearAllocationArea();
212   for (Page* page : *space) {
213     page->MarkNeverAllocateForTesting();
214   }
215 }
216 
GcAndSweep(Heap * heap,AllocationSpace space)217 void GcAndSweep(Heap* heap, AllocationSpace space) {
218   heap->CollectGarbage(space, GarbageCollectionReason::kTesting);
219   if (heap->mark_compact_collector()->sweeping_in_progress()) {
220     SafepointScope scope(heap);
221     heap->mark_compact_collector()->EnsureSweepingCompleted();
222   }
223 }
224 
ForceEvacuationCandidate(Page * page)225 void ForceEvacuationCandidate(Page* page) {
226   CHECK(FLAG_manual_evacuation_candidates_selection);
227   page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
228   PagedSpace* space = static_cast<PagedSpace*>(page->owner());
229   DCHECK_NOT_NULL(space);
230   Address top = space->top();
231   Address limit = space->limit();
232   if (top < limit && Page::FromAllocationAreaAddress(top) == page) {
233     // Create filler object to keep page iterable if it was iterable.
234     int remaining = static_cast<int>(limit - top);
235     space->heap()->CreateFillerObjectAt(top, remaining,
236                                         ClearRecordedSlots::kNo);
237     base::MutexGuard guard(space->mutex());
238     space->FreeLinearAllocationArea();
239   }
240 }
241 
InCorrectGeneration(HeapObject object)242 bool InCorrectGeneration(HeapObject object) {
243   return FLAG_single_generation ? !i::Heap::InYoungGeneration(object)
244                                 : i::Heap::InYoungGeneration(object);
245 }
246 
GrowNewSpace(Heap * heap)247 void GrowNewSpace(Heap* heap) {
248   SafepointScope scope(heap);
249   if (!heap->new_space()->IsAtMaximumCapacity()) {
250     heap->new_space()->Grow();
251   }
252 }
253 
GrowNewSpaceToMaximumCapacity(Heap * heap)254 void GrowNewSpaceToMaximumCapacity(Heap* heap) {
255   SafepointScope scope(heap);
256   while (!heap->new_space()->IsAtMaximumCapacity()) {
257     heap->new_space()->Grow();
258   }
259 }
260 
261 }  // namespace heap
262 }  // namespace internal
263 }  // namespace v8
264