1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/spaces.h"
6
7 #include <utility>
8
9 #include "src/base/bits.h"
10 #include "src/base/macros.h"
11 #include "src/base/platform/semaphore.h"
12 #include "src/base/template-utils.h"
13 #include "src/counters.h"
14 #include "src/heap/array-buffer-tracker.h"
15 #include "src/heap/concurrent-marking.h"
16 #include "src/heap/gc-tracer.h"
17 #include "src/heap/incremental-marking.h"
18 #include "src/heap/mark-compact.h"
19 #include "src/heap/slot-set.h"
20 #include "src/heap/sweeper.h"
21 #include "src/msan.h"
22 #include "src/objects-inl.h"
23 #include "src/snapshot/snapshot.h"
24 #include "src/v8.h"
25 #include "src/vm-state-inl.h"
26
27 namespace v8 {
28 namespace internal {
29
30 // ----------------------------------------------------------------------------
31 // HeapObjectIterator
32
HeapObjectIterator(PagedSpace * space)33 HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
34 : cur_addr_(kNullAddress),
35 cur_end_(kNullAddress),
36 space_(space),
37 page_range_(space->anchor()->next_page(), space->anchor()),
38 current_page_(page_range_.begin()) {}
39
HeapObjectIterator(Page * page)40 HeapObjectIterator::HeapObjectIterator(Page* page)
41 : cur_addr_(kNullAddress),
42 cur_end_(kNullAddress),
43 space_(reinterpret_cast<PagedSpace*>(page->owner())),
44 page_range_(page),
45 current_page_(page_range_.begin()) {
46 #ifdef DEBUG
47 Space* owner = page->owner();
48 DCHECK(owner == page->heap()->old_space() ||
49 owner == page->heap()->map_space() ||
50 owner == page->heap()->code_space() ||
51 owner == page->heap()->read_only_space());
52 #endif // DEBUG
53 }
54
55 // We have hit the end of the page and should advance to the next block of
56 // objects. This happens at the end of the page.
AdvanceToNextPage()57 bool HeapObjectIterator::AdvanceToNextPage() {
58 DCHECK_EQ(cur_addr_, cur_end_);
59 if (current_page_ == page_range_.end()) return false;
60 Page* cur_page = *(current_page_++);
61 Heap* heap = space_->heap();
62
63 heap->mark_compact_collector()->sweeper()->EnsurePageIsIterable(cur_page);
64 #ifdef ENABLE_MINOR_MC
65 if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
66 heap->minor_mark_compact_collector()->MakeIterable(
67 cur_page, MarkingTreatmentMode::CLEAR,
68 FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
69 #else
70 DCHECK(!cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE));
71 #endif // ENABLE_MINOR_MC
72 cur_addr_ = cur_page->area_start();
73 cur_end_ = cur_page->area_end();
74 DCHECK(cur_page->SweepingDone());
75 return true;
76 }
77
PauseAllocationObserversScope(Heap * heap)78 PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
79 : heap_(heap) {
80 DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);
81
82 for (SpaceIterator it(heap_); it.has_next();) {
83 it.next()->PauseAllocationObservers();
84 }
85 }
86
~PauseAllocationObserversScope()87 PauseAllocationObserversScope::~PauseAllocationObserversScope() {
88 for (SpaceIterator it(heap_); it.has_next();) {
89 it.next()->ResumeAllocationObservers();
90 }
91 }
92
93 // -----------------------------------------------------------------------------
94 // CodeRange
95
CodeRange(Isolate * isolate)96 CodeRange::CodeRange(Isolate* isolate)
97 : isolate_(isolate),
98 free_list_(0),
99 allocation_list_(0),
100 current_allocation_block_index_(0) {}
101
SetUp(size_t requested)102 bool CodeRange::SetUp(size_t requested) {
103 DCHECK(!virtual_memory_.IsReserved());
104
105 if (requested == 0) {
106 // When a target requires the code range feature, we put all code objects
107 // in a kMaximalCodeRangeSize range of virtual address space, so that
108 // they can call each other with near calls.
109 if (kRequiresCodeRange) {
110 requested = kMaximalCodeRangeSize;
111 } else {
112 return true;
113 }
114 }
115
116 if (requested <= kMinimumCodeRangeSize) {
117 requested = kMinimumCodeRangeSize;
118 }
119
120 const size_t reserved_area =
121 kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
122 if (requested < (kMaximalCodeRangeSize - reserved_area))
123 requested += reserved_area;
124
125 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
126
127 VirtualMemory reservation;
128 if (!AlignedAllocVirtualMemory(
129 requested, Max(kCodeRangeAreaAlignment, AllocatePageSize()),
130 GetRandomMmapAddr(), &reservation)) {
131 return false;
132 }
133
134 // We are sure that we have mapped a block of requested addresses.
135 DCHECK_GE(reservation.size(), requested);
136 Address base = reservation.address();
137
138 // On some platforms, specifically Win64, we need to reserve some pages at
139 // the beginning of an executable space.
140 if (reserved_area > 0) {
141 if (!reservation.SetPermissions(base, reserved_area,
142 PageAllocator::kReadWrite))
143 return false;
144
145 base += reserved_area;
146 }
147 Address aligned_base = ::RoundUp(base, MemoryChunk::kAlignment);
148 size_t size = reservation.size() - (aligned_base - base) - reserved_area;
149 allocation_list_.emplace_back(aligned_base, size);
150 current_allocation_block_index_ = 0;
151
152 LOG(isolate_,
153 NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
154 requested));
155 virtual_memory_.TakeControl(&reservation);
156 return true;
157 }
158
CompareFreeBlockAddress(const FreeBlock & left,const FreeBlock & right)159 bool CodeRange::CompareFreeBlockAddress(const FreeBlock& left,
160 const FreeBlock& right) {
161 return left.start < right.start;
162 }
163
164
GetNextAllocationBlock(size_t requested)165 bool CodeRange::GetNextAllocationBlock(size_t requested) {
166 for (current_allocation_block_index_++;
167 current_allocation_block_index_ < allocation_list_.size();
168 current_allocation_block_index_++) {
169 if (requested <= allocation_list_[current_allocation_block_index_].size) {
170 return true; // Found a large enough allocation block.
171 }
172 }
173
174 // Sort and merge the free blocks on the free list and the allocation list.
175 free_list_.insert(free_list_.end(), allocation_list_.begin(),
176 allocation_list_.end());
177 allocation_list_.clear();
178 std::sort(free_list_.begin(), free_list_.end(), &CompareFreeBlockAddress);
179 for (size_t i = 0; i < free_list_.size();) {
180 FreeBlock merged = free_list_[i];
181 i++;
182 // Add adjacent free blocks to the current merged block.
183 while (i < free_list_.size() &&
184 free_list_[i].start == merged.start + merged.size) {
185 merged.size += free_list_[i].size;
186 i++;
187 }
188 if (merged.size > 0) {
189 allocation_list_.push_back(merged);
190 }
191 }
192 free_list_.clear();
193
194 for (current_allocation_block_index_ = 0;
195 current_allocation_block_index_ < allocation_list_.size();
196 current_allocation_block_index_++) {
197 if (requested <= allocation_list_[current_allocation_block_index_].size) {
198 return true; // Found a large enough allocation block.
199 }
200 }
201 current_allocation_block_index_ = 0;
202 // Code range is full or too fragmented.
203 return false;
204 }
205
206
AllocateRawMemory(const size_t requested_size,const size_t commit_size,size_t * allocated)207 Address CodeRange::AllocateRawMemory(const size_t requested_size,
208 const size_t commit_size,
209 size_t* allocated) {
210 // requested_size includes the header and two guard regions, while commit_size
211 // only includes the header.
212 DCHECK_LE(commit_size,
213 requested_size - 2 * MemoryAllocator::CodePageGuardSize());
214 FreeBlock current;
215 if (!ReserveBlock(requested_size, ¤t)) {
216 *allocated = 0;
217 return kNullAddress;
218 }
219 *allocated = current.size;
220 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
221 if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory(
222 &virtual_memory_, current.start, commit_size, *allocated)) {
223 *allocated = 0;
224 ReleaseBlock(¤t);
225 return kNullAddress;
226 }
227 return current.start;
228 }
229
FreeRawMemory(Address address,size_t length)230 void CodeRange::FreeRawMemory(Address address, size_t length) {
231 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
232 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
233 free_list_.emplace_back(address, length);
234 virtual_memory_.SetPermissions(address, length, PageAllocator::kNoAccess);
235 }
236
ReserveBlock(const size_t requested_size,FreeBlock * block)237 bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
238 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
239 DCHECK(allocation_list_.empty() ||
240 current_allocation_block_index_ < allocation_list_.size());
241 if (allocation_list_.empty() ||
242 requested_size > allocation_list_[current_allocation_block_index_].size) {
243 // Find an allocation block large enough.
244 if (!GetNextAllocationBlock(requested_size)) return false;
245 }
246 // Commit the requested memory at the start of the current allocation block.
247 size_t aligned_requested = ::RoundUp(requested_size, MemoryChunk::kAlignment);
248 *block = allocation_list_[current_allocation_block_index_];
249 // Don't leave a small free block, useless for a large object or chunk.
250 if (aligned_requested < (block->size - Page::kPageSize)) {
251 block->size = aligned_requested;
252 }
253 DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
254 allocation_list_[current_allocation_block_index_].start += block->size;
255 allocation_list_[current_allocation_block_index_].size -= block->size;
256 return true;
257 }
258
259
ReleaseBlock(const FreeBlock * block)260 void CodeRange::ReleaseBlock(const FreeBlock* block) {
261 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
262 free_list_.push_back(*block);
263 }
264
265
266 // -----------------------------------------------------------------------------
267 // MemoryAllocator
268 //
269
MemoryAllocator(Isolate * isolate)270 MemoryAllocator::MemoryAllocator(Isolate* isolate)
271 : isolate_(isolate),
272 code_range_(nullptr),
273 capacity_(0),
274 size_(0),
275 size_executable_(0),
276 lowest_ever_allocated_(static_cast<Address>(-1ll)),
277 highest_ever_allocated_(kNullAddress),
278 unmapper_(isolate->heap(), this) {}
279
SetUp(size_t capacity,size_t code_range_size)280 bool MemoryAllocator::SetUp(size_t capacity, size_t code_range_size) {
281 capacity_ = ::RoundUp(capacity, Page::kPageSize);
282
283 size_ = 0;
284 size_executable_ = 0;
285
286 code_range_ = new CodeRange(isolate_);
287 if (!code_range_->SetUp(code_range_size)) return false;
288
289 return true;
290 }
291
292
TearDown()293 void MemoryAllocator::TearDown() {
294 unmapper()->TearDown();
295
296 // Check that spaces were torn down before MemoryAllocator.
297 DCHECK_EQ(size_.Value(), 0u);
298 // TODO(gc) this will be true again when we fix FreeMemory.
299 // DCHECK_EQ(0, size_executable_);
300 capacity_ = 0;
301
302 if (last_chunk_.IsReserved()) {
303 last_chunk_.Free();
304 }
305
306 delete code_range_;
307 code_range_ = nullptr;
308 }
309
310 class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
311 public:
UnmapFreeMemoryTask(Isolate * isolate,Unmapper * unmapper)312 explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
313 : CancelableTask(isolate),
314 unmapper_(unmapper),
315 tracer_(isolate->heap()->tracer()) {}
316
317 private:
RunInternal()318 void RunInternal() override {
319 TRACE_BACKGROUND_GC(tracer_,
320 GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
321 unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
322 unmapper_->active_unmapping_tasks_.Decrement(1);
323 unmapper_->pending_unmapping_tasks_semaphore_.Signal();
324 if (FLAG_trace_unmapper) {
325 PrintIsolate(unmapper_->heap_->isolate(),
326 "UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id());
327 }
328 }
329
330 Unmapper* const unmapper_;
331 GCTracer* const tracer_;
332 DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
333 };
334
FreeQueuedChunks()335 void MemoryAllocator::Unmapper::FreeQueuedChunks() {
336 if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
337 if (!MakeRoomForNewTasks()) {
338 // kMaxUnmapperTasks are already running. Avoid creating any more.
339 if (FLAG_trace_unmapper) {
340 PrintIsolate(heap_->isolate(),
341 "Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
342 kMaxUnmapperTasks);
343 }
344 return;
345 }
346 auto task = base::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
347 if (FLAG_trace_unmapper) {
348 PrintIsolate(heap_->isolate(),
349 "Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
350 task->id());
351 }
352 DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
353 DCHECK_LE(active_unmapping_tasks_.Value(), pending_unmapping_tasks_);
354 DCHECK_GE(active_unmapping_tasks_.Value(), 0);
355 active_unmapping_tasks_.Increment(1);
356 task_ids_[pending_unmapping_tasks_++] = task->id();
357 V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
358 } else {
359 PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
360 }
361 }
362
CancelAndWaitForPendingTasks()363 void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
364 for (int i = 0; i < pending_unmapping_tasks_; i++) {
365 if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
366 CancelableTaskManager::kTaskAborted) {
367 pending_unmapping_tasks_semaphore_.Wait();
368 }
369 }
370 pending_unmapping_tasks_ = 0;
371 active_unmapping_tasks_.SetValue(0);
372
373 if (FLAG_trace_unmapper) {
374 PrintIsolate(
375 heap_->isolate(),
376 "Unmapper::CancelAndWaitForPendingTasks: no tasks remaining\n");
377 }
378 }
379
PrepareForMarkCompact()380 void MemoryAllocator::Unmapper::PrepareForMarkCompact() {
381 CancelAndWaitForPendingTasks();
382 // Free non-regular chunks because they cannot be re-used.
383 PerformFreeMemoryOnQueuedNonRegularChunks();
384 }
385
EnsureUnmappingCompleted()386 void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
387 CancelAndWaitForPendingTasks();
388 PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
389 }
390
MakeRoomForNewTasks()391 bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
392 DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
393
394 if (active_unmapping_tasks_.Value() == 0 && pending_unmapping_tasks_ > 0) {
395 // All previous unmapping tasks have been run to completion.
396 // Finalize those tasks to make room for new ones.
397 CancelAndWaitForPendingTasks();
398 }
399 return pending_unmapping_tasks_ != kMaxUnmapperTasks;
400 }
401
PerformFreeMemoryOnQueuedNonRegularChunks()402 void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks() {
403 MemoryChunk* chunk = nullptr;
404 while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
405 allocator_->PerformFreeMemory(chunk);
406 }
407 }
408
409 template <MemoryAllocator::Unmapper::FreeMode mode>
PerformFreeMemoryOnQueuedChunks()410 void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
411 MemoryChunk* chunk = nullptr;
412 if (FLAG_trace_unmapper) {
413 PrintIsolate(
414 heap_->isolate(),
415 "Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n",
416 NumberOfChunks());
417 }
418 // Regular chunks.
419 while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
420 bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
421 allocator_->PerformFreeMemory(chunk);
422 if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
423 }
424 if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
425 // The previous loop uncommitted any pages marked as pooled and added them
426 // to the pooled list. In case of kReleasePooled we need to free them
427 // though.
428 while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
429 allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
430 }
431 }
432 PerformFreeMemoryOnQueuedNonRegularChunks();
433 }
434
TearDown()435 void MemoryAllocator::Unmapper::TearDown() {
436 CHECK_EQ(0, pending_unmapping_tasks_);
437 PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
438 for (int i = 0; i < kNumberOfChunkQueues; i++) {
439 DCHECK(chunks_[i].empty());
440 }
441 }
442
NumberOfChunks()443 int MemoryAllocator::Unmapper::NumberOfChunks() {
444 base::LockGuard<base::Mutex> guard(&mutex_);
445 size_t result = 0;
446 for (int i = 0; i < kNumberOfChunkQueues; i++) {
447 result += chunks_[i].size();
448 }
449 return static_cast<int>(result);
450 }
451
CommitMemory(Address base,size_t size)452 bool MemoryAllocator::CommitMemory(Address base, size_t size) {
453 if (!SetPermissions(base, size, PageAllocator::kReadWrite)) {
454 return false;
455 }
456 UpdateAllocatedSpaceLimits(base, base + size);
457 return true;
458 }
459
FreeMemory(VirtualMemory * reservation,Executability executable)460 void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
461 Executability executable) {
462 // TODO(gc) make code_range part of memory allocator?
463 // Code which is part of the code-range does not have its own VirtualMemory.
464 DCHECK(code_range() == nullptr ||
465 !code_range()->contains(reservation->address()));
466 DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid() ||
467 reservation->size() <= Page::kPageSize);
468
469 reservation->Free();
470 }
471
472
FreeMemory(Address base,size_t size,Executability executable)473 void MemoryAllocator::FreeMemory(Address base, size_t size,
474 Executability executable) {
475 // TODO(gc) make code_range part of memory allocator?
476 if (code_range() != nullptr && code_range()->contains(base)) {
477 DCHECK(executable == EXECUTABLE);
478 code_range()->FreeRawMemory(base, size);
479 } else {
480 DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
481 CHECK(FreePages(reinterpret_cast<void*>(base), size));
482 }
483 }
484
ReserveAlignedMemory(size_t size,size_t alignment,void * hint,VirtualMemory * controller)485 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
486 void* hint,
487 VirtualMemory* controller) {
488 VirtualMemory reservation;
489 if (!AlignedAllocVirtualMemory(size, alignment, hint, &reservation)) {
490 return kNullAddress;
491 }
492
493 Address result = reservation.address();
494 size_.Increment(reservation.size());
495 controller->TakeControl(&reservation);
496 return result;
497 }
498
AllocateAlignedMemory(size_t reserve_size,size_t commit_size,size_t alignment,Executability executable,void * hint,VirtualMemory * controller)499 Address MemoryAllocator::AllocateAlignedMemory(
500 size_t reserve_size, size_t commit_size, size_t alignment,
501 Executability executable, void* hint, VirtualMemory* controller) {
502 DCHECK(commit_size <= reserve_size);
503 VirtualMemory reservation;
504 Address base =
505 ReserveAlignedMemory(reserve_size, alignment, hint, &reservation);
506 if (base == kNullAddress) return kNullAddress;
507
508 if (executable == EXECUTABLE) {
509 if (!CommitExecutableMemory(&reservation, base, commit_size,
510 reserve_size)) {
511 base = kNullAddress;
512 }
513 } else {
514 if (reservation.SetPermissions(base, commit_size,
515 PageAllocator::kReadWrite)) {
516 UpdateAllocatedSpaceLimits(base, base + commit_size);
517 } else {
518 base = kNullAddress;
519 }
520 }
521
522 if (base == kNullAddress) {
523 // Failed to commit the body. Free the mapping and any partially committed
524 // regions inside it.
525 reservation.Free();
526 size_.Decrement(reserve_size);
527 return kNullAddress;
528 }
529
530 controller->TakeControl(&reservation);
531 return base;
532 }
533
InitializeAsAnchor(Space * space)534 void Page::InitializeAsAnchor(Space* space) {
535 set_owner(space);
536 set_next_chunk(this);
537 set_prev_chunk(this);
538 SetFlags(0, static_cast<uintptr_t>(~0));
539 SetFlag(ANCHOR);
540 }
541
synchronized_heap()542 Heap* MemoryChunk::synchronized_heap() {
543 return reinterpret_cast<Heap*>(
544 base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
545 }
546
InitializationMemoryFence()547 void MemoryChunk::InitializationMemoryFence() {
548 base::SeqCst_MemoryFence();
549 #ifdef THREAD_SANITIZER
550 // Since TSAN does not process memory fences, we use the following annotation
551 // to tell TSAN that there is no data race when emitting a
552 // InitializationMemoryFence. Note that the other thread still needs to
553 // perform MemoryChunk::synchronized_heap().
554 base::Release_Store(reinterpret_cast<base::AtomicWord*>(&heap_),
555 reinterpret_cast<base::AtomicWord>(heap_));
556 #endif
557 }
558
SetReadAndExecutable()559 void MemoryChunk::SetReadAndExecutable() {
560 DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
561 DCHECK(owner()->identity() == CODE_SPACE || owner()->identity() == LO_SPACE);
562 // Decrementing the write_unprotect_counter_ and changing the page
563 // protection mode has to be atomic.
564 base::LockGuard<base::Mutex> guard(page_protection_change_mutex_);
565 if (write_unprotect_counter_ == 0) {
566 // This is a corner case that may happen when we have a
567 // CodeSpaceMemoryModificationScope open and this page was newly
568 // added.
569 return;
570 }
571 write_unprotect_counter_--;
572 DCHECK_LT(write_unprotect_counter_, kMaxWriteUnprotectCounter);
573 if (write_unprotect_counter_ == 0) {
574 Address protect_start =
575 address() + MemoryAllocator::CodePageAreaStartOffset();
576 size_t page_size = MemoryAllocator::GetCommitPageSize();
577 DCHECK(IsAddressAligned(protect_start, page_size));
578 size_t protect_size = RoundUp(area_size(), page_size);
579 CHECK(SetPermissions(protect_start, protect_size,
580 PageAllocator::kReadExecute));
581 }
582 }
583
SetReadAndWritable()584 void MemoryChunk::SetReadAndWritable() {
585 DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
586 DCHECK(owner()->identity() == CODE_SPACE || owner()->identity() == LO_SPACE);
587 // Incrementing the write_unprotect_counter_ and changing the page
588 // protection mode has to be atomic.
589 base::LockGuard<base::Mutex> guard(page_protection_change_mutex_);
590 write_unprotect_counter_++;
591 DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter);
592 if (write_unprotect_counter_ == 1) {
593 Address unprotect_start =
594 address() + MemoryAllocator::CodePageAreaStartOffset();
595 size_t page_size = MemoryAllocator::GetCommitPageSize();
596 DCHECK(IsAddressAligned(unprotect_start, page_size));
597 size_t unprotect_size = RoundUp(area_size(), page_size);
598 CHECK(SetPermissions(unprotect_start, unprotect_size,
599 PageAllocator::kReadWrite));
600 }
601 }
602
Initialize(Heap * heap,Address base,size_t size,Address area_start,Address area_end,Executability executable,Space * owner,VirtualMemory * reservation)603 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
604 Address area_start, Address area_end,
605 Executability executable, Space* owner,
606 VirtualMemory* reservation) {
607 MemoryChunk* chunk = FromAddress(base);
608
609 DCHECK(base == chunk->address());
610
611 chunk->heap_ = heap;
612 chunk->size_ = size;
613 chunk->area_start_ = area_start;
614 chunk->area_end_ = area_end;
615 chunk->flags_ = Flags(NO_FLAGS);
616 chunk->set_owner(owner);
617 chunk->InitializeReservedMemory();
618 base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
619 base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
620 base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
621 nullptr);
622 base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
623 nullptr);
624 chunk->invalidated_slots_ = nullptr;
625 chunk->skip_list_ = nullptr;
626 chunk->progress_bar_ = 0;
627 chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
628 chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
629 chunk->page_protection_change_mutex_ = new base::Mutex();
630 chunk->write_unprotect_counter_ = 0;
631 chunk->mutex_ = new base::Mutex();
632 chunk->allocated_bytes_ = chunk->area_size();
633 chunk->wasted_memory_ = 0;
634 chunk->young_generation_bitmap_ = nullptr;
635 chunk->set_next_chunk(nullptr);
636 chunk->set_prev_chunk(nullptr);
637 chunk->local_tracker_ = nullptr;
638
639 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
640 chunk->categories_[i] = nullptr;
641 }
642
643 if (owner->identity() == RO_SPACE) {
644 heap->incremental_marking()
645 ->non_atomic_marking_state()
646 ->bitmap(chunk)
647 ->MarkAllBits();
648 } else {
649 heap->incremental_marking()->non_atomic_marking_state()->ClearLiveness(
650 chunk);
651 }
652
653 DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
654
655 if (executable == EXECUTABLE) {
656 chunk->SetFlag(IS_EXECUTABLE);
657 if (heap->write_protect_code_memory()) {
658 chunk->write_unprotect_counter_ =
659 heap->code_space_memory_modification_scope_depth();
660 } else {
661 size_t page_size = MemoryAllocator::GetCommitPageSize();
662 DCHECK(IsAddressAligned(area_start, page_size));
663 size_t area_size = RoundUp(area_end - area_start, page_size);
664 CHECK(SetPermissions(area_start, area_size,
665 PageAllocator::kReadWriteExecute));
666 }
667 }
668
669 if (reservation != nullptr) {
670 chunk->reservation_.TakeControl(reservation);
671 }
672
673 return chunk;
674 }
675
InitializePage(MemoryChunk * chunk,Executability executable)676 Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
677 Page* page = static_cast<Page*>(chunk);
678 DCHECK_GE(Page::kAllocatableMemory, page->area_size());
679 // Make sure that categories are initialized before freeing the area.
680 page->ResetAllocatedBytes();
681 heap()->incremental_marking()->SetOldSpacePageFlags(page);
682 page->AllocateFreeListCategories();
683 page->InitializeFreeListCategories();
684 page->InitializationMemoryFence();
685 return page;
686 }
687
InitializePage(MemoryChunk * chunk,Executability executable)688 Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
689 DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
690 bool in_to_space = (id() != kFromSpace);
691 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
692 : MemoryChunk::IN_FROM_SPACE);
693 DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
694 : MemoryChunk::IN_TO_SPACE));
695 Page* page = static_cast<Page*>(chunk);
696 heap()->incremental_marking()->SetNewSpacePageFlags(page);
697 page->AllocateLocalTracker();
698 #ifdef ENABLE_MINOR_MC
699 if (FLAG_minor_mc) {
700 page->AllocateYoungGenerationBitmap();
701 heap()
702 ->minor_mark_compact_collector()
703 ->non_atomic_marking_state()
704 ->ClearLiveness(page);
705 }
706 #endif // ENABLE_MINOR_MC
707 page->InitializationMemoryFence();
708 return page;
709 }
710
Initialize(Heap * heap,MemoryChunk * chunk,Executability executable)711 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
712 Executability executable) {
713 if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
714 STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
715 FATAL("Code page is too large.");
716 }
717 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
718
719 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
720
721 // Initialize the owner field for each contained page (except the first, which
722 // is initialized by MemoryChunk::Initialize).
723 for (Address addr = chunk->address() + Page::kPageSize + Page::kOwnerOffset;
724 addr < chunk->area_end(); addr += Page::kPageSize) {
725 // Clear out kPageHeaderTag.
726 Memory::Address_at(addr) = 0;
727 }
728 LargePage* page = static_cast<LargePage*>(chunk);
729 page->InitializationMemoryFence();
730 return page;
731 }
732
AllocateFreeListCategories()733 void Page::AllocateFreeListCategories() {
734 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
735 categories_[i] = new FreeListCategory(
736 reinterpret_cast<PagedSpace*>(owner())->free_list(), this);
737 }
738 }
739
InitializeFreeListCategories()740 void Page::InitializeFreeListCategories() {
741 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
742 categories_[i]->Initialize(static_cast<FreeListCategoryType>(i));
743 }
744 }
745
ReleaseFreeListCategories()746 void Page::ReleaseFreeListCategories() {
747 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
748 if (categories_[i] != nullptr) {
749 delete categories_[i];
750 categories_[i] = nullptr;
751 }
752 }
753 }
754
ConvertNewToOld(Page * old_page)755 Page* Page::ConvertNewToOld(Page* old_page) {
756 DCHECK(!old_page->is_anchor());
757 DCHECK(old_page->InNewSpace());
758 OldSpace* old_space = old_page->heap()->old_space();
759 old_page->set_owner(old_space);
760 old_page->SetFlags(0, static_cast<uintptr_t>(~0));
761 Page* new_page = old_space->InitializePage(old_page, NOT_EXECUTABLE);
762 old_space->AddPage(new_page);
763 return new_page;
764 }
765
CommittedPhysicalMemory()766 size_t MemoryChunk::CommittedPhysicalMemory() {
767 if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
768 return size();
769 return high_water_mark_.Value();
770 }
771
IsPagedSpace() const772 bool MemoryChunk::IsPagedSpace() const {
773 return owner()->identity() != LO_SPACE;
774 }
775
InsertAfter(MemoryChunk * other)776 void MemoryChunk::InsertAfter(MemoryChunk* other) {
777 MemoryChunk* other_next = other->next_chunk();
778
779 set_next_chunk(other_next);
780 set_prev_chunk(other);
781 other_next->set_prev_chunk(this);
782 other->set_next_chunk(this);
783 }
784
785
Unlink()786 void MemoryChunk::Unlink() {
787 MemoryChunk* next_element = next_chunk();
788 MemoryChunk* prev_element = prev_chunk();
789 next_element->set_prev_chunk(prev_element);
790 prev_element->set_next_chunk(next_element);
791 set_prev_chunk(nullptr);
792 set_next_chunk(nullptr);
793 }
794
AllocateChunk(size_t reserve_area_size,size_t commit_area_size,Executability executable,Space * owner)795 MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
796 size_t commit_area_size,
797 Executability executable,
798 Space* owner) {
799 DCHECK_LE(commit_area_size, reserve_area_size);
800
801 size_t chunk_size;
802 Heap* heap = isolate_->heap();
803 Address base = kNullAddress;
804 VirtualMemory reservation;
805 Address area_start = kNullAddress;
806 Address area_end = kNullAddress;
807 void* address_hint =
808 AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment);
809
810 //
811 // MemoryChunk layout:
812 //
813 // Executable
814 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
815 // | Header |
816 // +----------------------------+<- base + CodePageGuardStartOffset
817 // | Guard |
818 // +----------------------------+<- area_start_
819 // | Area |
820 // +----------------------------+<- area_end_ (area_start + commit_area_size)
821 // | Committed but not used |
822 // +----------------------------+<- aligned at OS page boundary
823 // | Reserved but not committed |
824 // +----------------------------+<- aligned at OS page boundary
825 // | Guard |
826 // +----------------------------+<- base + chunk_size
827 //
828 // Non-executable
829 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
830 // | Header |
831 // +----------------------------+<- area_start_ (base + kObjectStartOffset)
832 // | Area |
833 // +----------------------------+<- area_end_ (area_start + commit_area_size)
834 // | Committed but not used |
835 // +----------------------------+<- aligned at OS page boundary
836 // | Reserved but not committed |
837 // +----------------------------+<- base + chunk_size
838 //
839
840 if (executable == EXECUTABLE) {
841 chunk_size = ::RoundUp(
842 CodePageAreaStartOffset() + reserve_area_size + CodePageGuardSize(),
843 GetCommitPageSize());
844
845 // Size of header (not executable) plus area (executable).
846 size_t commit_size = ::RoundUp(
847 CodePageGuardStartOffset() + commit_area_size, GetCommitPageSize());
848 // Allocate executable memory either from code range or from the OS.
849 #ifdef V8_TARGET_ARCH_MIPS64
850 // Use code range only for large object space on mips64 to keep address
851 // range within 256-MB memory region.
852 if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) {
853 #else
854 if (code_range()->valid()) {
855 #endif
856 base =
857 code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
858 DCHECK(IsAligned(base, MemoryChunk::kAlignment));
859 if (base == kNullAddress) return nullptr;
860 size_.Increment(chunk_size);
861 // Update executable memory size.
862 size_executable_.Increment(chunk_size);
863 } else {
864 base = AllocateAlignedMemory(chunk_size, commit_size,
865 MemoryChunk::kAlignment, executable,
866 address_hint, &reservation);
867 if (base == kNullAddress) return nullptr;
868 // Update executable memory size.
869 size_executable_.Increment(reservation.size());
870 }
871
872 if (Heap::ShouldZapGarbage()) {
873 ZapBlock(base, CodePageGuardStartOffset());
874 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
875 }
876
877 area_start = base + CodePageAreaStartOffset();
878 area_end = area_start + commit_area_size;
879 } else {
880 chunk_size = ::RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
881 GetCommitPageSize());
882 size_t commit_size =
883 ::RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
884 GetCommitPageSize());
885 base =
886 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
887 executable, address_hint, &reservation);
888
889 if (base == kNullAddress) return nullptr;
890
891 if (Heap::ShouldZapGarbage()) {
892 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
893 }
894
895 area_start = base + Page::kObjectStartOffset;
896 area_end = area_start + commit_area_size;
897 }
898
899 // Use chunk_size for statistics and callbacks because we assume that they
900 // treat reserved but not-yet committed memory regions of chunks as allocated.
901 isolate_->counters()->memory_allocated()->Increment(
902 static_cast<int>(chunk_size));
903
904 LOG(isolate_,
905 NewEvent("MemoryChunk", reinterpret_cast<void*>(base), chunk_size));
906
907 // We cannot use the last chunk in the address space because we would
908 // overflow when comparing top and limit if this chunk is used for a
909 // linear allocation area.
910 if ((base + chunk_size) == 0u) {
911 CHECK(!last_chunk_.IsReserved());
912 last_chunk_.TakeControl(&reservation);
913 UncommitBlock(last_chunk_.address(), last_chunk_.size());
914 size_.Decrement(chunk_size);
915 if (executable == EXECUTABLE) {
916 size_executable_.Decrement(chunk_size);
917 }
918 CHECK(last_chunk_.IsReserved());
919 return AllocateChunk(reserve_area_size, commit_area_size, executable,
920 owner);
921 }
922
923 MemoryChunk* chunk =
924 MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
925 executable, owner, &reservation);
926
927 if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
928 return chunk;
929 }
930
931 void Page::ResetAllocatedBytes() { allocated_bytes_ = area_size(); }
932
933 void Page::ResetFreeListStatistics() {
934 wasted_memory_ = 0;
935 }
936
937 size_t Page::AvailableInFreeList() {
938 size_t sum = 0;
939 ForAllFreeListCategories([&sum](FreeListCategory* category) {
940 sum += category->available();
941 });
942 return sum;
943 }
944
945 #ifdef DEBUG
946 namespace {
947 // Skips filler starting from the given filler until the end address.
948 // Returns the first address after the skipped fillers.
949 Address SkipFillers(HeapObject* filler, Address end) {
950 Address addr = filler->address();
951 while (addr < end) {
952 filler = HeapObject::FromAddress(addr);
953 CHECK(filler->IsFiller());
954 addr = filler->address() + filler->Size();
955 }
956 return addr;
957 }
958 } // anonymous namespace
959 #endif // DEBUG
960
961 size_t Page::ShrinkToHighWaterMark() {
962 // Shrinking only makes sense outside of the CodeRange, where we don't care
963 // about address space fragmentation.
964 VirtualMemory* reservation = reserved_memory();
965 if (!reservation->IsReserved()) return 0;
966
967 // Shrink pages to high water mark. The water mark points either to a filler
968 // or the area_end.
969 HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
970 if (filler->address() == area_end()) return 0;
971 CHECK(filler->IsFiller());
972 // Ensure that no objects were allocated in [filler, area_end) region.
973 DCHECK_EQ(area_end(), SkipFillers(filler, area_end()));
974 // Ensure that no objects will be allocated on this page.
975 DCHECK_EQ(0u, AvailableInFreeList());
976
977 size_t unused = RoundDown(static_cast<size_t>(area_end() - filler->address()),
978 MemoryAllocator::GetCommitPageSize());
979 if (unused > 0) {
980 DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
981 if (FLAG_trace_gc_verbose) {
982 PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
983 reinterpret_cast<void*>(this),
984 reinterpret_cast<void*>(area_end()),
985 reinterpret_cast<void*>(area_end() - unused));
986 }
987 heap()->CreateFillerObjectAt(
988 filler->address(),
989 static_cast<int>(area_end() - filler->address() - unused),
990 ClearRecordedSlots::kNo);
991 heap()->memory_allocator()->PartialFreeMemory(
992 this, address() + size() - unused, unused, area_end() - unused);
993 if (filler->address() != area_end()) {
994 CHECK(filler->IsFiller());
995 CHECK_EQ(filler->address() + filler->Size(), area_end());
996 }
997 }
998 return unused;
999 }
1000
1001 void Page::CreateBlackArea(Address start, Address end) {
1002 DCHECK(heap()->incremental_marking()->black_allocation());
1003 DCHECK_EQ(Page::FromAddress(start), this);
1004 DCHECK_NE(start, end);
1005 DCHECK_EQ(Page::FromAddress(end - 1), this);
1006 IncrementalMarking::MarkingState* marking_state =
1007 heap()->incremental_marking()->marking_state();
1008 marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
1009 AddressToMarkbitIndex(end));
1010 marking_state->IncrementLiveBytes(this, static_cast<intptr_t>(end - start));
1011 }
1012
1013 void Page::DestroyBlackArea(Address start, Address end) {
1014 DCHECK(heap()->incremental_marking()->black_allocation());
1015 DCHECK_EQ(Page::FromAddress(start), this);
1016 DCHECK_NE(start, end);
1017 DCHECK_EQ(Page::FromAddress(end - 1), this);
1018 IncrementalMarking::MarkingState* marking_state =
1019 heap()->incremental_marking()->marking_state();
1020 marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
1021 AddressToMarkbitIndex(end));
1022 marking_state->IncrementLiveBytes(this, -static_cast<intptr_t>(end - start));
1023 }
1024
1025 void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
1026 size_t bytes_to_free,
1027 Address new_area_end) {
1028 VirtualMemory* reservation = chunk->reserved_memory();
1029 DCHECK(reservation->IsReserved());
1030 chunk->size_ -= bytes_to_free;
1031 chunk->area_end_ = new_area_end;
1032 if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
1033 // Add guard page at the end.
1034 size_t page_size = GetCommitPageSize();
1035 DCHECK_EQ(0, chunk->area_end_ % static_cast<Address>(page_size));
1036 DCHECK_EQ(chunk->address() + chunk->size(),
1037 chunk->area_end() + CodePageGuardSize());
1038 reservation->SetPermissions(chunk->area_end_, page_size,
1039 PageAllocator::kNoAccess);
1040 }
1041 // On e.g. Windows, a reservation may be larger than a page and releasing
1042 // partially starting at |start_free| will also release the potentially
1043 // unused part behind the current page.
1044 const size_t released_bytes = reservation->Release(start_free);
1045 DCHECK_GE(size_.Value(), released_bytes);
1046 size_.Decrement(released_bytes);
1047 isolate_->counters()->memory_allocated()->Decrement(
1048 static_cast<int>(released_bytes));
1049 }
1050
1051 void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
1052 DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
1053 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
1054
1055 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
1056 chunk->IsEvacuationCandidate());
1057
1058 VirtualMemory* reservation = chunk->reserved_memory();
1059 const size_t size =
1060 reservation->IsReserved() ? reservation->size() : chunk->size();
1061 DCHECK_GE(size_.Value(), static_cast<size_t>(size));
1062 size_.Decrement(size);
1063 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
1064 if (chunk->executable() == EXECUTABLE) {
1065 DCHECK_GE(size_executable_.Value(), size);
1066 size_executable_.Decrement(size);
1067 }
1068
1069 chunk->SetFlag(MemoryChunk::PRE_FREED);
1070
1071 if (chunk->executable()) UnregisterExecutableMemoryChunk(chunk);
1072 }
1073
1074
1075 void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
1076 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
1077 chunk->ReleaseAllocatedMemory();
1078
1079 VirtualMemory* reservation = chunk->reserved_memory();
1080 if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
1081 UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
1082 } else {
1083 if (reservation->IsReserved()) {
1084 FreeMemory(reservation, chunk->executable());
1085 } else {
1086 FreeMemory(chunk->address(), chunk->size(), chunk->executable());
1087 }
1088 }
1089 }
1090
1091 template <MemoryAllocator::FreeMode mode>
1092 void MemoryAllocator::Free(MemoryChunk* chunk) {
1093 switch (mode) {
1094 case kFull:
1095 PreFreeMemory(chunk);
1096 PerformFreeMemory(chunk);
1097 break;
1098 case kAlreadyPooled:
1099 // Pooled pages cannot be touched anymore as their memory is uncommitted.
1100 FreeMemory(chunk->address(), static_cast<size_t>(MemoryChunk::kPageSize),
1101 Executability::NOT_EXECUTABLE);
1102 break;
1103 case kPooledAndQueue:
1104 DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
1105 DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
1106 chunk->SetFlag(MemoryChunk::POOLED);
1107 V8_FALLTHROUGH;
1108 case kPreFreeAndQueue:
1109 PreFreeMemory(chunk);
1110 // The chunks added to this queue will be freed by a concurrent thread.
1111 unmapper()->AddMemoryChunkSafe(chunk);
1112 break;
1113 }
1114 }
1115
1116 template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
1117
1118 template void MemoryAllocator::Free<MemoryAllocator::kAlreadyPooled>(
1119 MemoryChunk* chunk);
1120
1121 template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>(
1122 MemoryChunk* chunk);
1123
1124 template void MemoryAllocator::Free<MemoryAllocator::kPooledAndQueue>(
1125 MemoryChunk* chunk);
1126
1127 template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
1128 Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
1129 Executability executable) {
1130 MemoryChunk* chunk = nullptr;
1131 if (alloc_mode == kPooled) {
1132 DCHECK_EQ(size, static_cast<size_t>(MemoryChunk::kAllocatableMemory));
1133 DCHECK_EQ(executable, NOT_EXECUTABLE);
1134 chunk = AllocatePagePooled(owner);
1135 }
1136 if (chunk == nullptr) {
1137 chunk = AllocateChunk(size, size, executable, owner);
1138 }
1139 if (chunk == nullptr) return nullptr;
1140 return owner->InitializePage(chunk, executable);
1141 }
1142
1143 template Page*
1144 MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
1145 size_t size, PagedSpace* owner, Executability executable);
1146 template Page*
1147 MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
1148 size_t size, SemiSpace* owner, Executability executable);
1149 template Page*
1150 MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
1151 size_t size, SemiSpace* owner, Executability executable);
1152
1153 LargePage* MemoryAllocator::AllocateLargePage(size_t size,
1154 LargeObjectSpace* owner,
1155 Executability executable) {
1156 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
1157 if (chunk == nullptr) return nullptr;
1158 return LargePage::Initialize(isolate_->heap(), chunk, executable);
1159 }
1160
1161 template <typename SpaceType>
1162 MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
1163 MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
1164 if (chunk == nullptr) return nullptr;
1165 const int size = MemoryChunk::kPageSize;
1166 const Address start = reinterpret_cast<Address>(chunk);
1167 const Address area_start = start + MemoryChunk::kObjectStartOffset;
1168 const Address area_end = start + size;
1169 if (!CommitBlock(start, size)) {
1170 return nullptr;
1171 }
1172 VirtualMemory reservation(start, size);
1173 MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
1174 NOT_EXECUTABLE, owner, &reservation);
1175 size_.Increment(size);
1176 return chunk;
1177 }
1178
1179 bool MemoryAllocator::CommitBlock(Address start, size_t size) {
1180 if (!CommitMemory(start, size)) return false;
1181
1182 if (Heap::ShouldZapGarbage()) {
1183 ZapBlock(start, size);
1184 }
1185
1186 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
1187 return true;
1188 }
1189
1190
1191 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
1192 if (!SetPermissions(start, size, PageAllocator::kNoAccess)) return false;
1193 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
1194 return true;
1195 }
1196
1197
1198 void MemoryAllocator::ZapBlock(Address start, size_t size) {
1199 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
1200 Memory::Address_at(start + s) = static_cast<Address>(kZapValue);
1201 }
1202 }
1203
1204 size_t MemoryAllocator::CodePageGuardStartOffset() {
1205 // We are guarding code pages: the first OS page after the header
1206 // will be protected as non-writable.
1207 return ::RoundUp(Page::kObjectStartOffset, GetCommitPageSize());
1208 }
1209
1210 size_t MemoryAllocator::CodePageGuardSize() { return GetCommitPageSize(); }
1211
1212 size_t MemoryAllocator::CodePageAreaStartOffset() {
1213 // We are guarding code pages: the first OS page after the header
1214 // will be protected as non-writable.
1215 return CodePageGuardStartOffset() + CodePageGuardSize();
1216 }
1217
1218 size_t MemoryAllocator::CodePageAreaEndOffset() {
1219 // We are guarding code pages: the last OS page will be protected as
1220 // non-writable.
1221 return Page::kPageSize - static_cast<int>(GetCommitPageSize());
1222 }
1223
1224 intptr_t MemoryAllocator::GetCommitPageSize() {
1225 if (FLAG_v8_os_page_size != 0) {
1226 DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
1227 return FLAG_v8_os_page_size * KB;
1228 } else {
1229 return CommitPageSize();
1230 }
1231 }
1232
1233 bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
1234 size_t commit_size,
1235 size_t reserved_size) {
1236 const size_t page_size = GetCommitPageSize();
1237 // All addresses and sizes must be aligned to the commit page size.
1238 DCHECK(IsAddressAligned(start, page_size));
1239 DCHECK_EQ(0, commit_size % page_size);
1240 DCHECK_EQ(0, reserved_size % page_size);
1241 const size_t guard_size = CodePageGuardSize();
1242 const size_t pre_guard_offset = CodePageGuardStartOffset();
1243 const size_t code_area_offset = CodePageAreaStartOffset();
1244 // reserved_size includes two guard regions, commit_size does not.
1245 DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
1246 const Address pre_guard_page = start + pre_guard_offset;
1247 const Address code_area = start + code_area_offset;
1248 const Address post_guard_page = start + reserved_size - guard_size;
1249 // Commit the non-executable header, from start to pre-code guard page.
1250 if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
1251 // Create the pre-code guard page, following the header.
1252 if (vm->SetPermissions(pre_guard_page, page_size,
1253 PageAllocator::kNoAccess)) {
1254 // Commit the executable code body.
1255 if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
1256 PageAllocator::kReadWrite)) {
1257 // Create the post-code guard page.
1258 if (vm->SetPermissions(post_guard_page, page_size,
1259 PageAllocator::kNoAccess)) {
1260 UpdateAllocatedSpaceLimits(start, code_area + commit_size);
1261 return true;
1262 }
1263 vm->SetPermissions(code_area, commit_size, PageAllocator::kNoAccess);
1264 }
1265 }
1266 vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
1267 }
1268 return false;
1269 }
1270
1271
1272 // -----------------------------------------------------------------------------
1273 // MemoryChunk implementation
1274
1275 bool MemoryChunk::contains_array_buffers() {
1276 return local_tracker() != nullptr && !local_tracker()->IsEmpty();
1277 }
1278
1279 void MemoryChunk::ReleaseAllocatedMemory() {
1280 if (skip_list_ != nullptr) {
1281 delete skip_list_;
1282 skip_list_ = nullptr;
1283 }
1284 if (mutex_ != nullptr) {
1285 delete mutex_;
1286 mutex_ = nullptr;
1287 }
1288 if (page_protection_change_mutex_ != nullptr) {
1289 delete page_protection_change_mutex_;
1290 page_protection_change_mutex_ = nullptr;
1291 }
1292 ReleaseSlotSet<OLD_TO_NEW>();
1293 ReleaseSlotSet<OLD_TO_OLD>();
1294 ReleaseTypedSlotSet<OLD_TO_NEW>();
1295 ReleaseTypedSlotSet<OLD_TO_OLD>();
1296 ReleaseInvalidatedSlots();
1297 if (local_tracker_ != nullptr) ReleaseLocalTracker();
1298 if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
1299
1300 if (IsPagedSpace()) {
1301 Page* page = static_cast<Page*>(this);
1302 page->ReleaseFreeListCategories();
1303 }
1304 }
1305
1306 static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
1307 size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
1308 DCHECK_LT(0, pages);
1309 SlotSet* slot_set = new SlotSet[pages];
1310 for (size_t i = 0; i < pages; i++) {
1311 slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
1312 }
1313 return slot_set;
1314 }
1315
1316 template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
1317 template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
1318
1319 template <RememberedSetType type>
1320 SlotSet* MemoryChunk::AllocateSlotSet() {
1321 SlotSet* slot_set = AllocateAndInitializeSlotSet(size_, address());
1322 SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap(
1323 &slot_set_[type], nullptr, slot_set);
1324 if (old_slot_set != nullptr) {
1325 delete[] slot_set;
1326 slot_set = old_slot_set;
1327 }
1328 DCHECK(slot_set);
1329 return slot_set;
1330 }
1331
1332 template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
1333 template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
1334
1335 template <RememberedSetType type>
1336 void MemoryChunk::ReleaseSlotSet() {
1337 SlotSet* slot_set = slot_set_[type];
1338 if (slot_set) {
1339 slot_set_[type] = nullptr;
1340 delete[] slot_set;
1341 }
1342 }
1343
1344 template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_NEW>();
1345 template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
1346
1347 template <RememberedSetType type>
1348 TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
1349 TypedSlotSet* typed_slot_set = new TypedSlotSet(address());
1350 TypedSlotSet* old_value = base::AsAtomicPointer::Release_CompareAndSwap(
1351 &typed_slot_set_[type], nullptr, typed_slot_set);
1352 if (old_value != nullptr) {
1353 delete typed_slot_set;
1354 typed_slot_set = old_value;
1355 }
1356 DCHECK(typed_slot_set);
1357 return typed_slot_set;
1358 }
1359
1360 template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
1361 template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
1362
1363 template <RememberedSetType type>
1364 void MemoryChunk::ReleaseTypedSlotSet() {
1365 TypedSlotSet* typed_slot_set = typed_slot_set_[type];
1366 if (typed_slot_set) {
1367 typed_slot_set_[type] = nullptr;
1368 delete typed_slot_set;
1369 }
1370 }
1371
1372 InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
1373 DCHECK_NULL(invalidated_slots_);
1374 invalidated_slots_ = new InvalidatedSlots();
1375 return invalidated_slots_;
1376 }
1377
1378 void MemoryChunk::ReleaseInvalidatedSlots() {
1379 if (invalidated_slots_) {
1380 delete invalidated_slots_;
1381 invalidated_slots_ = nullptr;
1382 }
1383 }
1384
1385 void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject* object,
1386 int size) {
1387 if (!ShouldSkipEvacuationSlotRecording()) {
1388 if (invalidated_slots() == nullptr) {
1389 AllocateInvalidatedSlots();
1390 }
1391 int old_size = (*invalidated_slots())[object];
1392 (*invalidated_slots())[object] = std::max(old_size, size);
1393 }
1394 }
1395
1396 void MemoryChunk::AllocateLocalTracker() {
1397 DCHECK_NULL(local_tracker_);
1398 local_tracker_ = new LocalArrayBufferTracker(owner());
1399 }
1400
1401 void MemoryChunk::ReleaseLocalTracker() {
1402 DCHECK_NOT_NULL(local_tracker_);
1403 delete local_tracker_;
1404 local_tracker_ = nullptr;
1405 }
1406
1407 void MemoryChunk::AllocateYoungGenerationBitmap() {
1408 DCHECK_NULL(young_generation_bitmap_);
1409 young_generation_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
1410 }
1411
1412 void MemoryChunk::ReleaseYoungGenerationBitmap() {
1413 DCHECK_NOT_NULL(young_generation_bitmap_);
1414 free(young_generation_bitmap_);
1415 young_generation_bitmap_ = nullptr;
1416 }
1417
1418 // -----------------------------------------------------------------------------
1419 // PagedSpace implementation
1420
1421 void Space::AddAllocationObserver(AllocationObserver* observer) {
1422 allocation_observers_.push_back(observer);
1423 StartNextInlineAllocationStep();
1424 }
1425
1426 void Space::RemoveAllocationObserver(AllocationObserver* observer) {
1427 auto it = std::find(allocation_observers_.begin(),
1428 allocation_observers_.end(), observer);
1429 DCHECK(allocation_observers_.end() != it);
1430 allocation_observers_.erase(it);
1431 StartNextInlineAllocationStep();
1432 }
1433
1434 void Space::PauseAllocationObservers() { allocation_observers_paused_ = true; }
1435
1436 void Space::ResumeAllocationObservers() {
1437 allocation_observers_paused_ = false;
1438 }
1439
1440 void Space::AllocationStep(int bytes_since_last, Address soon_object,
1441 int size) {
1442 if (!AllocationObserversActive()) {
1443 return;
1444 }
1445
1446 DCHECK(!heap()->allocation_step_in_progress());
1447 heap()->set_allocation_step_in_progress(true);
1448 heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
1449 for (AllocationObserver* observer : allocation_observers_) {
1450 observer->AllocationStep(bytes_since_last, soon_object, size);
1451 }
1452 heap()->set_allocation_step_in_progress(false);
1453 }
1454
1455 intptr_t Space::GetNextInlineAllocationStepSize() {
1456 intptr_t next_step = 0;
1457 for (AllocationObserver* observer : allocation_observers_) {
1458 next_step = next_step ? Min(next_step, observer->bytes_to_next_step())
1459 : observer->bytes_to_next_step();
1460 }
1461 DCHECK(allocation_observers_.size() == 0 || next_step > 0);
1462 return next_step;
1463 }
1464
1465 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
1466 Executability executable)
1467 : SpaceWithLinearArea(heap, space), executable_(executable), anchor_(this) {
1468 area_size_ = MemoryAllocator::PageAreaSize(space);
1469 accounting_stats_.Clear();
1470 }
1471
1472
1473 bool PagedSpace::SetUp() { return true; }
1474
1475
1476 bool PagedSpace::HasBeenSetUp() { return true; }
1477
1478
1479 void PagedSpace::TearDown() {
1480 for (auto it = begin(); it != end();) {
1481 Page* page = *(it++); // Will be erased.
1482 heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
1483 }
1484 anchor_.set_next_page(&anchor_);
1485 anchor_.set_prev_page(&anchor_);
1486 accounting_stats_.Clear();
1487 }
1488
1489 void PagedSpace::RefillFreeList() {
1490 // Any PagedSpace might invoke RefillFreeList. We filter all but our old
1491 // generation spaces out.
1492 if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
1493 identity() != MAP_SPACE && identity() != RO_SPACE) {
1494 return;
1495 }
1496 MarkCompactCollector* collector = heap()->mark_compact_collector();
1497 size_t added = 0;
1498 {
1499 Page* p = nullptr;
1500 while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
1501 // Only during compaction pages can actually change ownership. This is
1502 // safe because there exists no other competing action on the page links
1503 // during compaction.
1504 if (is_local()) {
1505 DCHECK_NE(this, p->owner());
1506 PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
1507 base::LockGuard<base::Mutex> guard(owner->mutex());
1508 owner->RefineAllocatedBytesAfterSweeping(p);
1509 owner->RemovePage(p);
1510 added += AddPage(p);
1511 } else {
1512 base::LockGuard<base::Mutex> guard(mutex());
1513 DCHECK_EQ(this, p->owner());
1514 RefineAllocatedBytesAfterSweeping(p);
1515 added += RelinkFreeListCategories(p);
1516 }
1517 added += p->wasted_memory();
1518 if (is_local() && (added > kCompactionMemoryWanted)) break;
1519 }
1520 }
1521 }
1522
1523 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
1524 base::LockGuard<base::Mutex> guard(mutex());
1525
1526 DCHECK(identity() == other->identity());
1527 // Unmerged fields:
1528 // area_size_
1529 // anchor_
1530
1531 other->FreeLinearAllocationArea();
1532
1533 // The linear allocation area of {other} should be destroyed now.
1534 DCHECK_EQ(kNullAddress, other->top());
1535 DCHECK_EQ(kNullAddress, other->limit());
1536
1537 // Move over pages.
1538 for (auto it = other->begin(); it != other->end();) {
1539 Page* p = *(it++);
1540 // Relinking requires the category to be unlinked.
1541 other->RemovePage(p);
1542 AddPage(p);
1543 DCHECK_EQ(p->AvailableInFreeList(),
1544 p->AvailableInFreeListFromAllocatedBytes());
1545 }
1546 DCHECK_EQ(0u, other->Size());
1547 DCHECK_EQ(0u, other->Capacity());
1548 }
1549
1550
1551 size_t PagedSpace::CommittedPhysicalMemory() {
1552 if (!base::OS::HasLazyCommits()) return CommittedMemory();
1553 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1554 size_t size = 0;
1555 for (Page* page : *this) {
1556 size += page->CommittedPhysicalMemory();
1557 }
1558 return size;
1559 }
1560
1561 bool PagedSpace::ContainsSlow(Address addr) {
1562 Page* p = Page::FromAddress(addr);
1563 for (Page* page : *this) {
1564 if (page == p) return true;
1565 }
1566 return false;
1567 }
1568
1569 void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
1570 CHECK(page->SweepingDone());
1571 auto marking_state =
1572 heap()->incremental_marking()->non_atomic_marking_state();
1573 // The live_byte on the page was accounted in the space allocated
1574 // bytes counter. After sweeping allocated_bytes() contains the
1575 // accurate live byte count on the page.
1576 size_t old_counter = marking_state->live_bytes(page);
1577 size_t new_counter = page->allocated_bytes();
1578 DCHECK_GE(old_counter, new_counter);
1579 if (old_counter > new_counter) {
1580 DecreaseAllocatedBytes(old_counter - new_counter, page);
1581 // Give the heap a chance to adjust counters in response to the
1582 // more precise and smaller old generation size.
1583 heap()->NotifyRefinedOldGenerationSize(old_counter - new_counter);
1584 }
1585 marking_state->SetLiveBytes(page, 0);
1586 }
1587
1588 Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
1589 base::LockGuard<base::Mutex> guard(mutex());
1590 // Check for pages that still contain free list entries. Bail out for smaller
1591 // categories.
1592 const int minimum_category =
1593 static_cast<int>(FreeList::SelectFreeListCategoryType(size_in_bytes));
1594 Page* page = free_list()->GetPageForCategoryType(kHuge);
1595 if (!page && static_cast<int>(kLarge) >= minimum_category)
1596 page = free_list()->GetPageForCategoryType(kLarge);
1597 if (!page && static_cast<int>(kMedium) >= minimum_category)
1598 page = free_list()->GetPageForCategoryType(kMedium);
1599 if (!page && static_cast<int>(kSmall) >= minimum_category)
1600 page = free_list()->GetPageForCategoryType(kSmall);
1601 if (!page && static_cast<int>(kTiny) >= minimum_category)
1602 page = free_list()->GetPageForCategoryType(kTiny);
1603 if (!page && static_cast<int>(kTiniest) >= minimum_category)
1604 page = free_list()->GetPageForCategoryType(kTiniest);
1605 if (!page) return nullptr;
1606 RemovePage(page);
1607 return page;
1608 }
1609
1610 size_t PagedSpace::AddPage(Page* page) {
1611 CHECK(page->SweepingDone());
1612 page->set_owner(this);
1613 page->InsertAfter(anchor()->prev_page());
1614 AccountCommitted(page->size());
1615 IncreaseCapacity(page->area_size());
1616 IncreaseAllocatedBytes(page->allocated_bytes(), page);
1617 return RelinkFreeListCategories(page);
1618 }
1619
1620 void PagedSpace::RemovePage(Page* page) {
1621 CHECK(page->SweepingDone());
1622 page->Unlink();
1623 UnlinkFreeListCategories(page);
1624 DecreaseAllocatedBytes(page->allocated_bytes(), page);
1625 DecreaseCapacity(page->area_size());
1626 AccountUncommitted(page->size());
1627 }
1628
1629 size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
1630 size_t unused = page->ShrinkToHighWaterMark();
1631 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
1632 AccountUncommitted(unused);
1633 return unused;
1634 }
1635
1636 void PagedSpace::ResetFreeList() {
1637 for (Page* page : *this) {
1638 free_list_.EvictFreeListItems(page);
1639 }
1640 DCHECK(free_list_.IsEmpty());
1641 }
1642
1643 void PagedSpace::ShrinkImmortalImmovablePages() {
1644 DCHECK(!heap()->deserialization_complete());
1645 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1646 FreeLinearAllocationArea();
1647 ResetFreeList();
1648 for (Page* page : *this) {
1649 DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
1650 ShrinkPageToHighWaterMark(page);
1651 }
1652 }
1653
1654 bool PagedSpace::Expand() {
1655 // Always lock against the main space as we can only adjust capacity and
1656 // pages concurrently for the main paged space.
1657 base::LockGuard<base::Mutex> guard(heap()->paged_space(identity())->mutex());
1658
1659 const int size = AreaSize();
1660
1661 if (!heap()->CanExpandOldGeneration(size)) return false;
1662
1663 Page* page =
1664 heap()->memory_allocator()->AllocatePage(size, this, executable());
1665 if (page == nullptr) return false;
1666 // Pages created during bootstrapping may contain immortal immovable objects.
1667 if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
1668 AddPage(page);
1669 Free(page->area_start(), page->area_size(),
1670 SpaceAccountingMode::kSpaceAccounted);
1671 DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
1672 return true;
1673 }
1674
1675
1676 int PagedSpace::CountTotalPages() {
1677 int count = 0;
1678 for (Page* page : *this) {
1679 count++;
1680 USE(page);
1681 }
1682 return count;
1683 }
1684
1685
1686 void PagedSpace::ResetFreeListStatistics() {
1687 for (Page* page : *this) {
1688 page->ResetFreeListStatistics();
1689 }
1690 }
1691
1692 void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
1693 SetTopAndLimit(top, limit);
1694 if (top != kNullAddress && top != limit &&
1695 heap()->incremental_marking()->black_allocation()) {
1696 Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
1697 }
1698 }
1699
1700 void PagedSpace::DecreaseLimit(Address new_limit) {
1701 Address old_limit = limit();
1702 DCHECK_LE(top(), new_limit);
1703 DCHECK_GE(old_limit, new_limit);
1704 if (new_limit != old_limit) {
1705 SetTopAndLimit(top(), new_limit);
1706 Free(new_limit, old_limit - new_limit,
1707 SpaceAccountingMode::kSpaceAccounted);
1708 if (heap()->incremental_marking()->black_allocation()) {
1709 Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
1710 old_limit);
1711 }
1712 }
1713 }
1714
1715 Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
1716 size_t min_size) {
1717 DCHECK_GE(end - start, min_size);
1718
1719 if (heap()->inline_allocation_disabled()) {
1720 // Fit the requested area exactly.
1721 return start + min_size;
1722 } else if (SupportsInlineAllocation() && AllocationObserversActive()) {
1723 // Generated code may allocate inline from the linear allocation area for.
1724 // To make sure we can observe these allocations, we use a lower limit.
1725 size_t step = GetNextInlineAllocationStepSize();
1726
1727 // TODO(ofrobots): there is subtle difference between old space and new
1728 // space here. Any way to avoid it? `step - 1` makes more sense as we would
1729 // like to sample the object that straddles the `start + step` boundary.
1730 // Rounding down further would introduce a small statistical error in
1731 // sampling. However, presently PagedSpace requires limit to be aligned.
1732 size_t rounded_step;
1733 if (identity() == NEW_SPACE) {
1734 DCHECK_GE(step, 1);
1735 rounded_step = step - 1;
1736 } else {
1737 rounded_step = RoundSizeDownToObjectAlignment(static_cast<int>(step));
1738 }
1739 return Min(static_cast<Address>(start + min_size + rounded_step), end);
1740 } else {
1741 // The entire node can be used as the linear allocation area.
1742 return end;
1743 }
1744 }
1745
1746 void PagedSpace::MarkLinearAllocationAreaBlack() {
1747 DCHECK(heap()->incremental_marking()->black_allocation());
1748 Address current_top = top();
1749 Address current_limit = limit();
1750 if (current_top != kNullAddress && current_top != current_limit) {
1751 Page::FromAllocationAreaAddress(current_top)
1752 ->CreateBlackArea(current_top, current_limit);
1753 }
1754 }
1755
1756 void PagedSpace::UnmarkLinearAllocationArea() {
1757 Address current_top = top();
1758 Address current_limit = limit();
1759 if (current_top != kNullAddress && current_top != current_limit) {
1760 Page::FromAllocationAreaAddress(current_top)
1761 ->DestroyBlackArea(current_top, current_limit);
1762 }
1763 }
1764
1765 void PagedSpace::FreeLinearAllocationArea() {
1766 // Mark the old linear allocation area with a free space map so it can be
1767 // skipped when scanning the heap.
1768 Address current_top = top();
1769 Address current_limit = limit();
1770 if (current_top == kNullAddress) {
1771 DCHECK_EQ(kNullAddress, current_limit);
1772 return;
1773 }
1774
1775 if (heap()->incremental_marking()->black_allocation()) {
1776 Page* page = Page::FromAllocationAreaAddress(current_top);
1777
1778 // Clear the bits in the unused black area.
1779 if (current_top != current_limit) {
1780 IncrementalMarking::MarkingState* marking_state =
1781 heap()->incremental_marking()->marking_state();
1782 marking_state->bitmap(page)->ClearRange(
1783 page->AddressToMarkbitIndex(current_top),
1784 page->AddressToMarkbitIndex(current_limit));
1785 marking_state->IncrementLiveBytes(
1786 page, -static_cast<int>(current_limit - current_top));
1787 }
1788 }
1789
1790 InlineAllocationStep(current_top, kNullAddress, kNullAddress, 0);
1791 SetTopAndLimit(kNullAddress, kNullAddress);
1792 DCHECK_GE(current_limit, current_top);
1793
1794 // The code page of the linear allocation area needs to be unprotected
1795 // because we are going to write a filler into that memory area below.
1796 if (identity() == CODE_SPACE) {
1797 heap()->UnprotectAndRegisterMemoryChunk(
1798 MemoryChunk::FromAddress(current_top));
1799 }
1800 Free(current_top, current_limit - current_top,
1801 SpaceAccountingMode::kSpaceAccounted);
1802 }
1803
1804 void PagedSpace::ReleasePage(Page* page) {
1805 DCHECK_EQ(
1806 0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
1807 page));
1808 DCHECK_EQ(page->owner(), this);
1809
1810 free_list_.EvictFreeListItems(page);
1811 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1812
1813 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
1814 DCHECK(!top_on_previous_step_);
1815 allocation_info_.Reset(kNullAddress, kNullAddress);
1816 }
1817
1818 // If page is still in a list, unlink it from that list.
1819 if (page->next_chunk() != nullptr) {
1820 DCHECK_NOT_NULL(page->prev_chunk());
1821 page->Unlink();
1822 }
1823 AccountUncommitted(page->size());
1824 accounting_stats_.DecreaseCapacity(page->area_size());
1825 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
1826 }
1827
1828 void PagedSpace::SetReadAndExecutable() {
1829 DCHECK(identity() == CODE_SPACE);
1830 for (Page* page : *this) {
1831 CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
1832 page->SetReadAndExecutable();
1833 }
1834 }
1835
1836 void PagedSpace::SetReadAndWritable() {
1837 DCHECK(identity() == CODE_SPACE);
1838 for (Page* page : *this) {
1839 CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
1840 page->SetReadAndWritable();
1841 }
1842 }
1843
1844 std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
1845 return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this));
1846 }
1847
1848 bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
1849 DCHECK(IsAligned(size_in_bytes, kPointerSize));
1850 DCHECK_LE(top(), limit());
1851 #ifdef DEBUG
1852 if (top() != limit()) {
1853 DCHECK_EQ(Page::FromAddress(top()), Page::FromAddress(limit() - 1));
1854 }
1855 #endif
1856 // Don't free list allocate if there is linear space available.
1857 DCHECK_LT(static_cast<size_t>(limit() - top()), size_in_bytes);
1858
1859 // Mark the old linear allocation area with a free space map so it can be
1860 // skipped when scanning the heap. This also puts it back in the free list
1861 // if it is big enough.
1862 FreeLinearAllocationArea();
1863
1864 if (!is_local()) {
1865 heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
1866 heap()->GCFlagsForIncrementalMarking(),
1867 kGCCallbackScheduleIdleGarbageCollection);
1868 }
1869
1870 size_t new_node_size = 0;
1871 FreeSpace* new_node = free_list_.Allocate(size_in_bytes, &new_node_size);
1872 if (new_node == nullptr) return false;
1873
1874 DCHECK_GE(new_node_size, size_in_bytes);
1875
1876 // The old-space-step might have finished sweeping and restarted marking.
1877 // Verify that it did not turn the page of the new node into an evacuation
1878 // candidate.
1879 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
1880
1881 // Memory in the linear allocation area is counted as allocated. We may free
1882 // a little of this again immediately - see below.
1883 Page* page = Page::FromAddress(new_node->address());
1884 IncreaseAllocatedBytes(new_node_size, page);
1885
1886 Address start = new_node->address();
1887 Address end = new_node->address() + new_node_size;
1888 Address limit = ComputeLimit(start, end, size_in_bytes);
1889 DCHECK_LE(limit, end);
1890 DCHECK_LE(size_in_bytes, limit - start);
1891 if (limit != end) {
1892 if (identity() == CODE_SPACE) {
1893 heap()->UnprotectAndRegisterMemoryChunk(page);
1894 }
1895 Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
1896 }
1897 SetLinearAllocationArea(start, limit);
1898
1899 return true;
1900 }
1901
1902 #ifdef DEBUG
1903 void PagedSpace::Print() {}
1904 #endif
1905
1906 #ifdef VERIFY_HEAP
1907 void PagedSpace::Verify(ObjectVisitor* visitor) {
1908 bool allocation_pointer_found_in_space =
1909 (allocation_info_.top() == allocation_info_.limit());
1910 for (Page* page : *this) {
1911 CHECK(page->owner() == this);
1912 if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
1913 allocation_pointer_found_in_space = true;
1914 }
1915 CHECK(page->SweepingDone());
1916 HeapObjectIterator it(page);
1917 Address end_of_previous_object = page->area_start();
1918 Address top = page->area_end();
1919 for (HeapObject* object = it.Next(); object != nullptr;
1920 object = it.Next()) {
1921 CHECK(end_of_previous_object <= object->address());
1922
1923 // The first word should be a map, and we expect all map pointers to
1924 // be in map space.
1925 Map* map = object->map();
1926 CHECK(map->IsMap());
1927 CHECK(heap()->map_space()->Contains(map) ||
1928 heap()->read_only_space()->Contains(map));
1929
1930 // Perform space-specific object verification.
1931 VerifyObject(object);
1932
1933 // The object itself should look OK.
1934 object->ObjectVerify();
1935
1936 if (!FLAG_verify_heap_skip_remembered_set) {
1937 heap()->VerifyRememberedSetFor(object);
1938 }
1939
1940 // All the interior pointers should be contained in the heap.
1941 int size = object->Size();
1942 object->IterateBody(map, size, visitor);
1943 CHECK(object->address() + size <= top);
1944 end_of_previous_object = object->address() + size;
1945 }
1946 }
1947 CHECK(allocation_pointer_found_in_space);
1948 #ifdef DEBUG
1949 VerifyCountersAfterSweeping();
1950 #endif
1951 }
1952
1953 void PagedSpace::VerifyLiveBytes() {
1954 IncrementalMarking::MarkingState* marking_state =
1955 heap()->incremental_marking()->marking_state();
1956 for (Page* page : *this) {
1957 CHECK(page->SweepingDone());
1958 HeapObjectIterator it(page);
1959 int black_size = 0;
1960 for (HeapObject* object = it.Next(); object != nullptr;
1961 object = it.Next()) {
1962 // All the interior pointers should be contained in the heap.
1963 if (marking_state->IsBlack(object)) {
1964 black_size += object->Size();
1965 }
1966 }
1967 CHECK_LE(black_size, marking_state->live_bytes(page));
1968 }
1969 }
1970 #endif // VERIFY_HEAP
1971
1972 #ifdef DEBUG
1973 void PagedSpace::VerifyCountersAfterSweeping() {
1974 size_t total_capacity = 0;
1975 size_t total_allocated = 0;
1976 for (Page* page : *this) {
1977 DCHECK(page->SweepingDone());
1978 total_capacity += page->area_size();
1979 HeapObjectIterator it(page);
1980 size_t real_allocated = 0;
1981 for (HeapObject* object = it.Next(); object != nullptr;
1982 object = it.Next()) {
1983 if (!object->IsFiller()) {
1984 real_allocated += object->Size();
1985 }
1986 }
1987 total_allocated += page->allocated_bytes();
1988 // The real size can be smaller than the accounted size if array trimming,
1989 // object slack tracking happened after sweeping.
1990 DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
1991 DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
1992 }
1993 DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
1994 DCHECK_EQ(total_allocated, accounting_stats_.Size());
1995 }
1996
1997 void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
1998 // We need to refine the counters on pages that are already swept and have
1999 // not been moved over to the actual space. Otherwise, the AccountingStats
2000 // are just an over approximation.
2001 RefillFreeList();
2002
2003 size_t total_capacity = 0;
2004 size_t total_allocated = 0;
2005 auto marking_state =
2006 heap()->incremental_marking()->non_atomic_marking_state();
2007 for (Page* page : *this) {
2008 size_t page_allocated =
2009 page->SweepingDone()
2010 ? page->allocated_bytes()
2011 : static_cast<size_t>(marking_state->live_bytes(page));
2012 total_capacity += page->area_size();
2013 total_allocated += page_allocated;
2014 DCHECK_EQ(page_allocated, accounting_stats_.AllocatedOnPage(page));
2015 }
2016 DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
2017 DCHECK_EQ(total_allocated, accounting_stats_.Size());
2018 }
2019 #endif
2020
2021 // -----------------------------------------------------------------------------
2022 // NewSpace implementation
2023
2024 bool NewSpace::SetUp(size_t initial_semispace_capacity,
2025 size_t maximum_semispace_capacity) {
2026 DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
2027 DCHECK(base::bits::IsPowerOfTwo(
2028 static_cast<uint32_t>(maximum_semispace_capacity)));
2029
2030 to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
2031 from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
2032 if (!to_space_.Commit()) {
2033 return false;
2034 }
2035 DCHECK(!from_space_.is_committed()); // No need to use memory yet.
2036 ResetLinearAllocationArea();
2037
2038 return true;
2039 }
2040
2041
2042 void NewSpace::TearDown() {
2043 allocation_info_.Reset(kNullAddress, kNullAddress);
2044
2045 to_space_.TearDown();
2046 from_space_.TearDown();
2047 }
2048
2049 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
2050
2051
2052 void NewSpace::Grow() {
2053 // Double the semispace size but only up to maximum capacity.
2054 DCHECK(TotalCapacity() < MaximumCapacity());
2055 size_t new_capacity =
2056 Min(MaximumCapacity(),
2057 static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
2058 if (to_space_.GrowTo(new_capacity)) {
2059 // Only grow from space if we managed to grow to-space.
2060 if (!from_space_.GrowTo(new_capacity)) {
2061 // If we managed to grow to-space but couldn't grow from-space,
2062 // attempt to shrink to-space.
2063 if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
2064 // We are in an inconsistent state because we could not
2065 // commit/uncommit memory from new space.
2066 FATAL("inconsistent state");
2067 }
2068 }
2069 }
2070 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2071 }
2072
2073
2074 void NewSpace::Shrink() {
2075 size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
2076 size_t rounded_new_capacity = ::RoundUp(new_capacity, Page::kPageSize);
2077 if (rounded_new_capacity < TotalCapacity() &&
2078 to_space_.ShrinkTo(rounded_new_capacity)) {
2079 // Only shrink from-space if we managed to shrink to-space.
2080 from_space_.Reset();
2081 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
2082 // If we managed to shrink to-space but couldn't shrink from
2083 // space, attempt to grow to-space again.
2084 if (!to_space_.GrowTo(from_space_.current_capacity())) {
2085 // We are in an inconsistent state because we could not
2086 // commit/uncommit memory from new space.
2087 FATAL("inconsistent state");
2088 }
2089 }
2090 }
2091 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2092 }
2093
2094 bool NewSpace::Rebalance() {
2095 // Order here is important to make use of the page pool.
2096 return to_space_.EnsureCurrentCapacity() &&
2097 from_space_.EnsureCurrentCapacity();
2098 }
2099
2100 bool SemiSpace::EnsureCurrentCapacity() {
2101 if (is_committed()) {
2102 const int expected_pages =
2103 static_cast<int>(current_capacity_ / Page::kPageSize);
2104 int actual_pages = 0;
2105 Page* current_page = anchor()->next_page();
2106 while (current_page != anchor()) {
2107 actual_pages++;
2108 current_page = current_page->next_page();
2109 if (actual_pages > expected_pages) {
2110 Page* to_remove = current_page->prev_page();
2111 // Make sure we don't overtake the actual top pointer.
2112 CHECK_NE(to_remove, current_page_);
2113 to_remove->Unlink();
2114 // Clear new space flags to avoid this page being treated as a new
2115 // space page that is potentially being swept.
2116 to_remove->SetFlags(0, Page::kIsInNewSpaceMask);
2117 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
2118 to_remove);
2119 }
2120 }
2121 IncrementalMarking::NonAtomicMarkingState* marking_state =
2122 heap()->incremental_marking()->non_atomic_marking_state();
2123 while (actual_pages < expected_pages) {
2124 actual_pages++;
2125 current_page =
2126 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
2127 Page::kAllocatableMemory, this, NOT_EXECUTABLE);
2128 if (current_page == nullptr) return false;
2129 DCHECK_NOT_NULL(current_page);
2130 current_page->InsertAfter(anchor());
2131 marking_state->ClearLiveness(current_page);
2132 current_page->SetFlags(anchor()->prev_page()->GetFlags(),
2133 static_cast<uintptr_t>(Page::kCopyAllFlags));
2134 heap()->CreateFillerObjectAt(current_page->area_start(),
2135 static_cast<int>(current_page->area_size()),
2136 ClearRecordedSlots::kNo);
2137 }
2138 }
2139 return true;
2140 }
2141
2142 LinearAllocationArea LocalAllocationBuffer::Close() {
2143 if (IsValid()) {
2144 heap_->CreateFillerObjectAt(
2145 allocation_info_.top(),
2146 static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
2147 ClearRecordedSlots::kNo);
2148 const LinearAllocationArea old_info = allocation_info_;
2149 allocation_info_ = LinearAllocationArea(kNullAddress, kNullAddress);
2150 return old_info;
2151 }
2152 return LinearAllocationArea(kNullAddress, kNullAddress);
2153 }
2154
2155 LocalAllocationBuffer::LocalAllocationBuffer(
2156 Heap* heap, LinearAllocationArea allocation_info)
2157 : heap_(heap), allocation_info_(allocation_info) {
2158 if (IsValid()) {
2159 heap_->CreateFillerObjectAt(
2160 allocation_info_.top(),
2161 static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
2162 ClearRecordedSlots::kNo);
2163 }
2164 }
2165
2166
2167 LocalAllocationBuffer::LocalAllocationBuffer(
2168 const LocalAllocationBuffer& other) {
2169 *this = other;
2170 }
2171
2172
2173 LocalAllocationBuffer& LocalAllocationBuffer::operator=(
2174 const LocalAllocationBuffer& other) {
2175 Close();
2176 heap_ = other.heap_;
2177 allocation_info_ = other.allocation_info_;
2178
2179 // This is needed since we (a) cannot yet use move-semantics, and (b) want
2180 // to make the use of the class easy by it as value and (c) implicitly call
2181 // {Close} upon copy.
2182 const_cast<LocalAllocationBuffer&>(other).allocation_info_.Reset(
2183 kNullAddress, kNullAddress);
2184 return *this;
2185 }
2186
2187 void NewSpace::UpdateLinearAllocationArea() {
2188 // Make sure there is no unaccounted allocations.
2189 DCHECK(!AllocationObserversActive() || top_on_previous_step_ == top());
2190
2191 Address new_top = to_space_.page_low();
2192 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2193 allocation_info_.Reset(new_top, to_space_.page_high());
2194 original_top_.SetValue(top());
2195 original_limit_.SetValue(limit());
2196 StartNextInlineAllocationStep();
2197 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2198 }
2199
2200 void NewSpace::ResetLinearAllocationArea() {
2201 // Do a step to account for memory allocated so far before resetting.
2202 InlineAllocationStep(top(), top(), kNullAddress, 0);
2203 to_space_.Reset();
2204 UpdateLinearAllocationArea();
2205 // Clear all mark-bits in the to-space.
2206 IncrementalMarking::NonAtomicMarkingState* marking_state =
2207 heap()->incremental_marking()->non_atomic_marking_state();
2208 for (Page* p : to_space_) {
2209 marking_state->ClearLiveness(p);
2210 // Concurrent marking may have local live bytes for this page.
2211 heap()->concurrent_marking()->ClearLiveness(p);
2212 }
2213 }
2214
2215 void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
2216 Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
2217 allocation_info_.set_limit(new_limit);
2218 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2219 }
2220
2221 void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
2222 Address new_limit = ComputeLimit(top(), limit(), min_size);
2223 DCHECK_LE(new_limit, limit());
2224 DecreaseLimit(new_limit);
2225 }
2226
2227 bool NewSpace::AddFreshPage() {
2228 Address top = allocation_info_.top();
2229 DCHECK(!Page::IsAtObjectStart(top));
2230
2231 // Do a step to account for memory allocated on previous page.
2232 InlineAllocationStep(top, top, kNullAddress, 0);
2233
2234 if (!to_space_.AdvancePage()) {
2235 // No more pages left to advance.
2236 return false;
2237 }
2238
2239 // Clear remainder of current page.
2240 Address limit = Page::FromAllocationAreaAddress(top)->area_end();
2241 int remaining_in_page = static_cast<int>(limit - top);
2242 heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
2243 UpdateLinearAllocationArea();
2244
2245 return true;
2246 }
2247
2248
2249 bool NewSpace::AddFreshPageSynchronized() {
2250 base::LockGuard<base::Mutex> guard(&mutex_);
2251 return AddFreshPage();
2252 }
2253
2254
2255 bool NewSpace::EnsureAllocation(int size_in_bytes,
2256 AllocationAlignment alignment) {
2257 Address old_top = allocation_info_.top();
2258 Address high = to_space_.page_high();
2259 int filler_size = Heap::GetFillToAlign(old_top, alignment);
2260 int aligned_size_in_bytes = size_in_bytes + filler_size;
2261
2262 if (old_top + aligned_size_in_bytes > high) {
2263 // Not enough room in the page, try to allocate a new one.
2264 if (!AddFreshPage()) {
2265 return false;
2266 }
2267
2268 old_top = allocation_info_.top();
2269 high = to_space_.page_high();
2270 filler_size = Heap::GetFillToAlign(old_top, alignment);
2271 }
2272
2273 DCHECK(old_top + aligned_size_in_bytes <= high);
2274
2275 if (allocation_info_.limit() < high) {
2276 // Either the limit has been lowered because linear allocation was disabled
2277 // or because incremental marking wants to get a chance to do a step,
2278 // or because idle scavenge job wants to get a chance to post a task.
2279 // Set the new limit accordingly.
2280 Address new_top = old_top + aligned_size_in_bytes;
2281 Address soon_object = old_top + filler_size;
2282 InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
2283 UpdateInlineAllocationLimit(aligned_size_in_bytes);
2284 }
2285 return true;
2286 }
2287
2288 void SpaceWithLinearArea::StartNextInlineAllocationStep() {
2289 if (heap()->allocation_step_in_progress()) {
2290 // If we are mid-way through an existing step, don't start a new one.
2291 return;
2292 }
2293
2294 if (AllocationObserversActive()) {
2295 top_on_previous_step_ = top();
2296 UpdateInlineAllocationLimit(0);
2297 } else {
2298 DCHECK_EQ(kNullAddress, top_on_previous_step_);
2299 }
2300 }
2301
2302 void SpaceWithLinearArea::AddAllocationObserver(AllocationObserver* observer) {
2303 InlineAllocationStep(top(), top(), kNullAddress, 0);
2304 Space::AddAllocationObserver(observer);
2305 DCHECK_IMPLIES(top_on_previous_step_, AllocationObserversActive());
2306 }
2307
2308 void SpaceWithLinearArea::RemoveAllocationObserver(
2309 AllocationObserver* observer) {
2310 Address top_for_next_step =
2311 allocation_observers_.size() == 1 ? kNullAddress : top();
2312 InlineAllocationStep(top(), top_for_next_step, kNullAddress, 0);
2313 Space::RemoveAllocationObserver(observer);
2314 DCHECK_IMPLIES(top_on_previous_step_, AllocationObserversActive());
2315 }
2316
2317 void SpaceWithLinearArea::PauseAllocationObservers() {
2318 // Do a step to account for memory allocated so far.
2319 InlineAllocationStep(top(), kNullAddress, kNullAddress, 0);
2320 Space::PauseAllocationObservers();
2321 DCHECK_EQ(kNullAddress, top_on_previous_step_);
2322 UpdateInlineAllocationLimit(0);
2323 }
2324
2325 void SpaceWithLinearArea::ResumeAllocationObservers() {
2326 DCHECK_EQ(kNullAddress, top_on_previous_step_);
2327 Space::ResumeAllocationObservers();
2328 StartNextInlineAllocationStep();
2329 }
2330
2331 void SpaceWithLinearArea::InlineAllocationStep(Address top,
2332 Address top_for_next_step,
2333 Address soon_object,
2334 size_t size) {
2335 if (heap()->allocation_step_in_progress()) {
2336 // Avoid starting a new step if we are mid-way through an existing one.
2337 return;
2338 }
2339
2340 if (top_on_previous_step_) {
2341 if (top < top_on_previous_step_) {
2342 // Generated code decreased the top pointer to do folded allocations.
2343 DCHECK_NE(top, kNullAddress);
2344 DCHECK_EQ(Page::FromAllocationAreaAddress(top),
2345 Page::FromAllocationAreaAddress(top_on_previous_step_));
2346 top_on_previous_step_ = top;
2347 }
2348 int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
2349 AllocationStep(bytes_allocated, soon_object, static_cast<int>(size));
2350 top_on_previous_step_ = top_for_next_step;
2351 }
2352 }
2353
2354 std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator() {
2355 return std::unique_ptr<ObjectIterator>(new SemiSpaceIterator(this));
2356 }
2357
2358 #ifdef VERIFY_HEAP
2359 // We do not use the SemiSpaceIterator because verification doesn't assume
2360 // that it works (it depends on the invariants we are checking).
2361 void NewSpace::Verify() {
2362 // The allocation pointer should be in the space or at the very end.
2363 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2364
2365 // There should be objects packed in from the low address up to the
2366 // allocation pointer.
2367 Address current = to_space_.first_page()->area_start();
2368 CHECK_EQ(current, to_space_.space_start());
2369
2370 while (current != top()) {
2371 if (!Page::IsAlignedToPageSize(current)) {
2372 // The allocation pointer should not be in the middle of an object.
2373 CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
2374 current < top());
2375
2376 HeapObject* object = HeapObject::FromAddress(current);
2377
2378 // The first word should be a map, and we expect all map pointers to
2379 // be in map space or read-only space.
2380 Map* map = object->map();
2381 CHECK(map->IsMap());
2382 CHECK(heap()->map_space()->Contains(map) ||
2383 heap()->read_only_space()->Contains(map));
2384
2385 // The object should not be code or a map.
2386 CHECK(!object->IsMap());
2387 CHECK(!object->IsAbstractCode());
2388
2389 // The object itself should look OK.
2390 object->ObjectVerify();
2391
2392 // All the interior pointers should be contained in the heap.
2393 VerifyPointersVisitor visitor;
2394 int size = object->Size();
2395 object->IterateBody(map, size, &visitor);
2396
2397 current += size;
2398 } else {
2399 // At end of page, switch to next page.
2400 Page* page = Page::FromAllocationAreaAddress(current)->next_page();
2401 // Next page should be valid.
2402 CHECK(!page->is_anchor());
2403 current = page->area_start();
2404 }
2405 }
2406
2407 // Check semi-spaces.
2408 CHECK_EQ(from_space_.id(), kFromSpace);
2409 CHECK_EQ(to_space_.id(), kToSpace);
2410 from_space_.Verify();
2411 to_space_.Verify();
2412 }
2413 #endif
2414
2415 // -----------------------------------------------------------------------------
2416 // SemiSpace implementation
2417
2418 void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
2419 DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
2420 minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
2421 current_capacity_ = minimum_capacity_;
2422 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
2423 committed_ = false;
2424 }
2425
2426
2427 void SemiSpace::TearDown() {
2428 // Properly uncommit memory to keep the allocator counters in sync.
2429 if (is_committed()) {
2430 Uncommit();
2431 }
2432 current_capacity_ = maximum_capacity_ = 0;
2433 }
2434
2435
2436 bool SemiSpace::Commit() {
2437 DCHECK(!is_committed());
2438 Page* current = anchor();
2439 const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
2440 for (int pages_added = 0; pages_added < num_pages; pages_added++) {
2441 Page* new_page =
2442 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
2443 Page::kAllocatableMemory, this, NOT_EXECUTABLE);
2444 if (new_page == nullptr) {
2445 RewindPages(current, pages_added);
2446 return false;
2447 }
2448 new_page->InsertAfter(current);
2449 current = new_page;
2450 }
2451 Reset();
2452 AccountCommitted(current_capacity_);
2453 if (age_mark_ == kNullAddress) {
2454 age_mark_ = first_page()->area_start();
2455 }
2456 committed_ = true;
2457 return true;
2458 }
2459
2460
2461 bool SemiSpace::Uncommit() {
2462 DCHECK(is_committed());
2463 for (auto it = begin(); it != end();) {
2464 Page* p = *(it++);
2465 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(p);
2466 }
2467 anchor()->set_next_page(anchor());
2468 anchor()->set_prev_page(anchor());
2469 AccountUncommitted(current_capacity_);
2470 committed_ = false;
2471 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2472 return true;
2473 }
2474
2475
2476 size_t SemiSpace::CommittedPhysicalMemory() {
2477 if (!is_committed()) return 0;
2478 size_t size = 0;
2479 for (Page* p : *this) {
2480 size += p->CommittedPhysicalMemory();
2481 }
2482 return size;
2483 }
2484
2485 bool SemiSpace::GrowTo(size_t new_capacity) {
2486 if (!is_committed()) {
2487 if (!Commit()) return false;
2488 }
2489 DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u);
2490 DCHECK_LE(new_capacity, maximum_capacity_);
2491 DCHECK_GT(new_capacity, current_capacity_);
2492 const size_t delta = new_capacity - current_capacity_;
2493 DCHECK(IsAligned(delta, AllocatePageSize()));
2494 const int delta_pages = static_cast<int>(delta / Page::kPageSize);
2495 Page* last_page = anchor()->prev_page();
2496 DCHECK_NE(last_page, anchor());
2497 IncrementalMarking::NonAtomicMarkingState* marking_state =
2498 heap()->incremental_marking()->non_atomic_marking_state();
2499 for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
2500 Page* new_page =
2501 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
2502 Page::kAllocatableMemory, this, NOT_EXECUTABLE);
2503 if (new_page == nullptr) {
2504 RewindPages(last_page, pages_added);
2505 return false;
2506 }
2507 new_page->InsertAfter(last_page);
2508 marking_state->ClearLiveness(new_page);
2509 // Duplicate the flags that was set on the old page.
2510 new_page->SetFlags(last_page->GetFlags(), Page::kCopyOnFlipFlagsMask);
2511 last_page = new_page;
2512 }
2513 AccountCommitted(delta);
2514 current_capacity_ = new_capacity;
2515 return true;
2516 }
2517
2518 void SemiSpace::RewindPages(Page* start, int num_pages) {
2519 Page* new_last_page = nullptr;
2520 Page* last_page = start;
2521 while (num_pages > 0) {
2522 DCHECK_NE(last_page, anchor());
2523 new_last_page = last_page->prev_page();
2524 last_page->prev_page()->set_next_page(last_page->next_page());
2525 last_page->next_page()->set_prev_page(last_page->prev_page());
2526 last_page = new_last_page;
2527 num_pages--;
2528 }
2529 }
2530
2531 bool SemiSpace::ShrinkTo(size_t new_capacity) {
2532 DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u);
2533 DCHECK_GE(new_capacity, minimum_capacity_);
2534 DCHECK_LT(new_capacity, current_capacity_);
2535 if (is_committed()) {
2536 const size_t delta = current_capacity_ - new_capacity;
2537 DCHECK(IsAligned(delta, AllocatePageSize()));
2538 int delta_pages = static_cast<int>(delta / Page::kPageSize);
2539 Page* new_last_page;
2540 Page* last_page;
2541 while (delta_pages > 0) {
2542 last_page = anchor()->prev_page();
2543 new_last_page = last_page->prev_page();
2544 new_last_page->set_next_page(anchor());
2545 anchor()->set_prev_page(new_last_page);
2546 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
2547 last_page);
2548 delta_pages--;
2549 }
2550 AccountUncommitted(delta);
2551 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2552 }
2553 current_capacity_ = new_capacity;
2554 return true;
2555 }
2556
2557 void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
2558 anchor_.set_owner(this);
2559 anchor_.prev_page()->set_next_page(&anchor_);
2560 anchor_.next_page()->set_prev_page(&anchor_);
2561
2562 for (Page* page : *this) {
2563 page->set_owner(this);
2564 page->SetFlags(flags, mask);
2565 if (id_ == kToSpace) {
2566 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
2567 page->SetFlag(MemoryChunk::IN_TO_SPACE);
2568 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
2569 heap()->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(
2570 page, 0);
2571 } else {
2572 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
2573 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
2574 }
2575 DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
2576 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
2577 }
2578 }
2579
2580
2581 void SemiSpace::Reset() {
2582 DCHECK_NE(anchor_.next_page(), &anchor_);
2583 current_page_ = anchor_.next_page();
2584 pages_used_ = 0;
2585 }
2586
2587 void SemiSpace::RemovePage(Page* page) {
2588 if (current_page_ == page) {
2589 current_page_ = page->prev_page();
2590 }
2591 page->Unlink();
2592 }
2593
2594 void SemiSpace::PrependPage(Page* page) {
2595 page->SetFlags(current_page()->GetFlags(),
2596 static_cast<uintptr_t>(Page::kCopyAllFlags));
2597 page->set_owner(this);
2598 page->InsertAfter(anchor());
2599 pages_used_++;
2600 }
2601
2602 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
2603 // We won't be swapping semispaces without data in them.
2604 DCHECK_NE(from->anchor_.next_page(), &from->anchor_);
2605 DCHECK_NE(to->anchor_.next_page(), &to->anchor_);
2606
2607 intptr_t saved_to_space_flags = to->current_page()->GetFlags();
2608
2609 // We swap all properties but id_.
2610 std::swap(from->current_capacity_, to->current_capacity_);
2611 std::swap(from->maximum_capacity_, to->maximum_capacity_);
2612 std::swap(from->minimum_capacity_, to->minimum_capacity_);
2613 std::swap(from->age_mark_, to->age_mark_);
2614 std::swap(from->committed_, to->committed_);
2615 std::swap(from->anchor_, to->anchor_);
2616 std::swap(from->current_page_, to->current_page_);
2617
2618 to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
2619 from->FixPagesFlags(0, 0);
2620 }
2621
2622 void SemiSpace::set_age_mark(Address mark) {
2623 DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
2624 age_mark_ = mark;
2625 // Mark all pages up to the one containing mark.
2626 for (Page* p : PageRange(space_start(), mark)) {
2627 p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
2628 }
2629 }
2630
2631 std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator() {
2632 // Use the NewSpace::NewObjectIterator to iterate the ToSpace.
2633 UNREACHABLE();
2634 }
2635
2636 #ifdef DEBUG
2637 void SemiSpace::Print() {}
2638 #endif
2639
2640 #ifdef VERIFY_HEAP
2641 void SemiSpace::Verify() {
2642 bool is_from_space = (id_ == kFromSpace);
2643 Page* page = anchor_.next_page();
2644 CHECK(anchor_.owner() == this);
2645 while (page != &anchor_) {
2646 CHECK_EQ(page->owner(), this);
2647 CHECK(page->InNewSpace());
2648 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
2649 : MemoryChunk::IN_TO_SPACE));
2650 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
2651 : MemoryChunk::IN_FROM_SPACE));
2652 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
2653 if (!is_from_space) {
2654 // The pointers-from-here-are-interesting flag isn't updated dynamically
2655 // on from-space pages, so it might be out of sync with the marking state.
2656 if (page->heap()->incremental_marking()->IsMarking()) {
2657 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
2658 } else {
2659 CHECK(
2660 !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
2661 }
2662 }
2663 CHECK_EQ(page->prev_page()->next_page(), page);
2664 page = page->next_page();
2665 }
2666 }
2667 #endif
2668
2669 #ifdef DEBUG
2670 void SemiSpace::AssertValidRange(Address start, Address end) {
2671 // Addresses belong to same semi-space
2672 Page* page = Page::FromAllocationAreaAddress(start);
2673 Page* end_page = Page::FromAllocationAreaAddress(end);
2674 SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
2675 DCHECK_EQ(space, end_page->owner());
2676 // Start address is before end address, either on same page,
2677 // or end address is on a later page in the linked list of
2678 // semi-space pages.
2679 if (page == end_page) {
2680 DCHECK_LE(start, end);
2681 } else {
2682 while (page != end_page) {
2683 page = page->next_page();
2684 DCHECK_NE(page, space->anchor());
2685 }
2686 }
2687 }
2688 #endif
2689
2690
2691 // -----------------------------------------------------------------------------
2692 // SemiSpaceIterator implementation.
2693
2694 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
2695 Initialize(space->bottom(), space->top());
2696 }
2697
2698
2699 void SemiSpaceIterator::Initialize(Address start, Address end) {
2700 SemiSpace::AssertValidRange(start, end);
2701 current_ = start;
2702 limit_ = end;
2703 }
2704
2705 size_t NewSpace::CommittedPhysicalMemory() {
2706 if (!base::OS::HasLazyCommits()) return CommittedMemory();
2707 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2708 size_t size = to_space_.CommittedPhysicalMemory();
2709 if (from_space_.is_committed()) {
2710 size += from_space_.CommittedPhysicalMemory();
2711 }
2712 return size;
2713 }
2714
2715
2716 // -----------------------------------------------------------------------------
2717 // Free lists for old object spaces implementation
2718
2719
2720 void FreeListCategory::Reset() {
2721 set_top(nullptr);
2722 set_prev(nullptr);
2723 set_next(nullptr);
2724 available_ = 0;
2725 }
2726
2727 FreeSpace* FreeListCategory::PickNodeFromList(size_t minimum_size,
2728 size_t* node_size) {
2729 DCHECK(page()->CanAllocate());
2730 FreeSpace* node = top();
2731 if (node == nullptr || static_cast<size_t>(node->Size()) < minimum_size) {
2732 *node_size = 0;
2733 return nullptr;
2734 }
2735 set_top(node->next());
2736 *node_size = node->Size();
2737 available_ -= *node_size;
2738 return node;
2739 }
2740
2741 FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
2742 size_t* node_size) {
2743 DCHECK(page()->CanAllocate());
2744 FreeSpace* prev_non_evac_node = nullptr;
2745 for (FreeSpace* cur_node = top(); cur_node != nullptr;
2746 cur_node = cur_node->next()) {
2747 size_t size = cur_node->size();
2748 if (size >= minimum_size) {
2749 DCHECK_GE(available_, size);
2750 available_ -= size;
2751 if (cur_node == top()) {
2752 set_top(cur_node->next());
2753 }
2754 if (prev_non_evac_node != nullptr) {
2755 MemoryChunk* chunk =
2756 MemoryChunk::FromAddress(prev_non_evac_node->address());
2757 if (chunk->owner()->identity() == CODE_SPACE) {
2758 chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
2759 }
2760 prev_non_evac_node->set_next(cur_node->next());
2761 }
2762 *node_size = size;
2763 return cur_node;
2764 }
2765
2766 prev_non_evac_node = cur_node;
2767 }
2768 return nullptr;
2769 }
2770
2771 void FreeListCategory::Free(Address start, size_t size_in_bytes,
2772 FreeMode mode) {
2773 DCHECK(page()->CanAllocate());
2774 FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
2775 free_space->set_next(top());
2776 set_top(free_space);
2777 available_ += size_in_bytes;
2778 if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
2779 owner()->AddCategory(this);
2780 }
2781 }
2782
2783
2784 void FreeListCategory::RepairFreeList(Heap* heap) {
2785 FreeSpace* n = top();
2786 while (n != nullptr) {
2787 Map** map_location = reinterpret_cast<Map**>(n->address());
2788 if (*map_location == nullptr) {
2789 *map_location = heap->free_space_map();
2790 } else {
2791 DCHECK(*map_location == heap->free_space_map());
2792 }
2793 n = n->next();
2794 }
2795 }
2796
2797 void FreeListCategory::Relink() {
2798 DCHECK(!is_linked());
2799 owner()->AddCategory(this);
2800 }
2801
2802 FreeList::FreeList() : wasted_bytes_(0) {
2803 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2804 categories_[i] = nullptr;
2805 }
2806 Reset();
2807 }
2808
2809
2810 void FreeList::Reset() {
2811 ForAllFreeListCategories(
2812 [](FreeListCategory* category) { category->Reset(); });
2813 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2814 categories_[i] = nullptr;
2815 }
2816 ResetStats();
2817 }
2818
2819 size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
2820 Page* page = Page::FromAddress(start);
2821 page->DecreaseAllocatedBytes(size_in_bytes);
2822
2823 // Blocks have to be a minimum size to hold free list items.
2824 if (size_in_bytes < kMinBlockSize) {
2825 page->add_wasted_memory(size_in_bytes);
2826 wasted_bytes_.Increment(size_in_bytes);
2827 return size_in_bytes;
2828 }
2829
2830 // Insert other blocks at the head of a free list of the appropriate
2831 // magnitude.
2832 FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
2833 page->free_list_category(type)->Free(start, size_in_bytes, mode);
2834 DCHECK_EQ(page->AvailableInFreeList(),
2835 page->AvailableInFreeListFromAllocatedBytes());
2836 return 0;
2837 }
2838
2839 FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, size_t minimum_size,
2840 size_t* node_size) {
2841 FreeListCategoryIterator it(this, type);
2842 FreeSpace* node = nullptr;
2843 while (it.HasNext()) {
2844 FreeListCategory* current = it.Next();
2845 node = current->PickNodeFromList(minimum_size, node_size);
2846 if (node != nullptr) {
2847 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2848 return node;
2849 }
2850 RemoveCategory(current);
2851 }
2852 return node;
2853 }
2854
2855 FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type,
2856 size_t minimum_size, size_t* node_size) {
2857 if (categories_[type] == nullptr) return nullptr;
2858 FreeSpace* node =
2859 categories_[type]->PickNodeFromList(minimum_size, node_size);
2860 if (node != nullptr) {
2861 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2862 }
2863 return node;
2864 }
2865
2866 FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
2867 size_t* node_size,
2868 size_t minimum_size) {
2869 FreeListCategoryIterator it(this, type);
2870 FreeSpace* node = nullptr;
2871 while (it.HasNext()) {
2872 FreeListCategory* current = it.Next();
2873 node = current->SearchForNodeInList(minimum_size, node_size);
2874 if (node != nullptr) {
2875 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2876 return node;
2877 }
2878 if (current->is_empty()) {
2879 RemoveCategory(current);
2880 }
2881 }
2882 return node;
2883 }
2884
2885 FreeSpace* FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
2886 DCHECK_GE(kMaxBlockSize, size_in_bytes);
2887 FreeSpace* node = nullptr;
2888 // First try the allocation fast path: try to allocate the minimum element
2889 // size of a free list category. This operation is constant time.
2890 FreeListCategoryType type =
2891 SelectFastAllocationFreeListCategoryType(size_in_bytes);
2892 for (int i = type; i < kHuge && node == nullptr; i++) {
2893 node = FindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
2894 node_size);
2895 }
2896
2897 if (node == nullptr) {
2898 // Next search the huge list for free list nodes. This takes linear time in
2899 // the number of huge elements.
2900 node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
2901 }
2902
2903 if (node == nullptr && type != kHuge) {
2904 // We didn't find anything in the huge list. Now search the best fitting
2905 // free list for a node that has at least the requested size.
2906 type = SelectFreeListCategoryType(size_in_bytes);
2907 node = TryFindNodeIn(type, size_in_bytes, node_size);
2908 }
2909
2910 if (node != nullptr) {
2911 Page::FromAddress(node->address())->IncreaseAllocatedBytes(*node_size);
2912 }
2913
2914 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2915 return node;
2916 }
2917
2918 size_t FreeList::EvictFreeListItems(Page* page) {
2919 size_t sum = 0;
2920 page->ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
2921 DCHECK_EQ(this, category->owner());
2922 sum += category->available();
2923 RemoveCategory(category);
2924 category->Reset();
2925 });
2926 return sum;
2927 }
2928
2929 bool FreeList::ContainsPageFreeListItems(Page* page) {
2930 bool contained = false;
2931 page->ForAllFreeListCategories(
2932 [this, &contained](FreeListCategory* category) {
2933 if (category->owner() == this && category->is_linked()) {
2934 contained = true;
2935 }
2936 });
2937 return contained;
2938 }
2939
2940 void FreeList::RepairLists(Heap* heap) {
2941 ForAllFreeListCategories(
2942 [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
2943 }
2944
2945 bool FreeList::AddCategory(FreeListCategory* category) {
2946 FreeListCategoryType type = category->type_;
2947 DCHECK_LT(type, kNumberOfCategories);
2948 FreeListCategory* top = categories_[type];
2949
2950 if (category->is_empty()) return false;
2951 if (top == category) return false;
2952
2953 // Common double-linked list insertion.
2954 if (top != nullptr) {
2955 top->set_prev(category);
2956 }
2957 category->set_next(top);
2958 categories_[type] = category;
2959 return true;
2960 }
2961
2962 void FreeList::RemoveCategory(FreeListCategory* category) {
2963 FreeListCategoryType type = category->type_;
2964 DCHECK_LT(type, kNumberOfCategories);
2965 FreeListCategory* top = categories_[type];
2966
2967 // Common double-linked list removal.
2968 if (top == category) {
2969 categories_[type] = category->next();
2970 }
2971 if (category->prev() != nullptr) {
2972 category->prev()->set_next(category->next());
2973 }
2974 if (category->next() != nullptr) {
2975 category->next()->set_prev(category->prev());
2976 }
2977 category->set_next(nullptr);
2978 category->set_prev(nullptr);
2979 }
2980
2981 void FreeList::PrintCategories(FreeListCategoryType type) {
2982 FreeListCategoryIterator it(this, type);
2983 PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this),
2984 static_cast<void*>(categories_[type]), type);
2985 while (it.HasNext()) {
2986 FreeListCategory* current = it.Next();
2987 PrintF("%p -> ", static_cast<void*>(current));
2988 }
2989 PrintF("null\n");
2990 }
2991
2992
2993 #ifdef DEBUG
2994 size_t FreeListCategory::SumFreeList() {
2995 size_t sum = 0;
2996 FreeSpace* cur = top();
2997 while (cur != nullptr) {
2998 DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
2999 sum += cur->relaxed_read_size();
3000 cur = cur->next();
3001 }
3002 return sum;
3003 }
3004
3005 int FreeListCategory::FreeListLength() {
3006 int length = 0;
3007 FreeSpace* cur = top();
3008 while (cur != nullptr) {
3009 length++;
3010 cur = cur->next();
3011 if (length == kVeryLongFreeList) return length;
3012 }
3013 return length;
3014 }
3015
3016 bool FreeList::IsVeryLong() {
3017 int len = 0;
3018 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
3019 FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
3020 while (it.HasNext()) {
3021 len += it.Next()->FreeListLength();
3022 if (len >= FreeListCategory::kVeryLongFreeList) return true;
3023 }
3024 }
3025 return false;
3026 }
3027
3028
3029 // This can take a very long time because it is linear in the number of entries
3030 // on the free list, so it should not be called if FreeListLength returns
3031 // kVeryLongFreeList.
3032 size_t FreeList::SumFreeLists() {
3033 size_t sum = 0;
3034 ForAllFreeListCategories(
3035 [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
3036 return sum;
3037 }
3038 #endif
3039
3040
3041 // -----------------------------------------------------------------------------
3042 // OldSpace implementation
3043
3044 void PagedSpace::PrepareForMarkCompact() {
3045 // We don't have a linear allocation area while sweeping. It will be restored
3046 // on the first allocation after the sweep.
3047 FreeLinearAllocationArea();
3048
3049 // Clear the free list before a full GC---it will be rebuilt afterward.
3050 free_list_.Reset();
3051 }
3052
3053 size_t PagedSpace::SizeOfObjects() {
3054 CHECK_GE(limit(), top());
3055 DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
3056 return Size() - (limit() - top());
3057 }
3058
3059 // After we have booted, we have created a map which represents free space
3060 // on the heap. If there was already a free list then the elements on it
3061 // were created with the wrong FreeSpaceMap (normally nullptr), so we need to
3062 // fix them.
3063 void PagedSpace::RepairFreeListsAfterDeserialization() {
3064 free_list_.RepairLists(heap());
3065 // Each page may have a small free space that is not tracked by a free list.
3066 // Those free spaces still contain null as their map pointer.
3067 // Overwrite them with new fillers.
3068 for (Page* page : *this) {
3069 int size = static_cast<int>(page->wasted_memory());
3070 if (size == 0) {
3071 // If there is no wasted memory then all free space is in the free list.
3072 continue;
3073 }
3074 Address start = page->HighWaterMark();
3075 Address end = page->area_end();
3076 if (start < end - size) {
3077 // A region at the high watermark is already in free list.
3078 HeapObject* filler = HeapObject::FromAddress(start);
3079 CHECK(filler->IsFiller());
3080 start += filler->Size();
3081 }
3082 CHECK_EQ(size, static_cast<int>(end - start));
3083 heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
3084 }
3085 }
3086
3087 bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
3088 MarkCompactCollector* collector = heap()->mark_compact_collector();
3089 if (collector->sweeping_in_progress()) {
3090 // Wait for the sweeper threads here and complete the sweeping phase.
3091 collector->EnsureSweepingCompleted();
3092
3093 // After waiting for the sweeper threads, there may be new free-list
3094 // entries.
3095 return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
3096 }
3097 return false;
3098 }
3099
3100 bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
3101 MarkCompactCollector* collector = heap()->mark_compact_collector();
3102 if (FLAG_concurrent_sweeping && collector->sweeping_in_progress()) {
3103 collector->sweeper()->ParallelSweepSpace(identity(), 0);
3104 RefillFreeList();
3105 return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
3106 }
3107 return false;
3108 }
3109
3110 bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
3111 VMState<GC> state(heap()->isolate());
3112 RuntimeCallTimerScope runtime_timer(
3113 heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
3114 return RawSlowRefillLinearAllocationArea(size_in_bytes);
3115 }
3116
3117 bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
3118 return RawSlowRefillLinearAllocationArea(size_in_bytes);
3119 }
3120
3121 bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
3122 // Allocation in this space has failed.
3123 DCHECK_GE(size_in_bytes, 0);
3124 const int kMaxPagesToSweep = 1;
3125
3126 if (RefillLinearAllocationAreaFromFreeList(size_in_bytes)) return true;
3127
3128 MarkCompactCollector* collector = heap()->mark_compact_collector();
3129 // Sweeping is still in progress.
3130 if (collector->sweeping_in_progress()) {
3131 if (FLAG_concurrent_sweeping && !is_local() &&
3132 !collector->sweeper()->AreSweeperTasksRunning()) {
3133 collector->EnsureSweepingCompleted();
3134 }
3135
3136 // First try to refill the free-list, concurrent sweeper threads
3137 // may have freed some objects in the meantime.
3138 RefillFreeList();
3139
3140 // Retry the free list allocation.
3141 if (RefillLinearAllocationAreaFromFreeList(
3142 static_cast<size_t>(size_in_bytes)))
3143 return true;
3144
3145 // If sweeping is still in progress try to sweep pages.
3146 int max_freed = collector->sweeper()->ParallelSweepSpace(
3147 identity(), size_in_bytes, kMaxPagesToSweep);
3148 RefillFreeList();
3149 if (max_freed >= size_in_bytes) {
3150 if (RefillLinearAllocationAreaFromFreeList(
3151 static_cast<size_t>(size_in_bytes)))
3152 return true;
3153 }
3154 } else if (is_local()) {
3155 // Sweeping not in progress and we are on a {CompactionSpace}. This can
3156 // only happen when we are evacuating for the young generation.
3157 PagedSpace* main_space = heap()->paged_space(identity());
3158 Page* page = main_space->RemovePageSafe(size_in_bytes);
3159 if (page != nullptr) {
3160 AddPage(page);
3161 if (RefillLinearAllocationAreaFromFreeList(
3162 static_cast<size_t>(size_in_bytes)))
3163 return true;
3164 }
3165 }
3166
3167 if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
3168 DCHECK((CountTotalPages() > 1) ||
3169 (static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
3170 return RefillLinearAllocationAreaFromFreeList(
3171 static_cast<size_t>(size_in_bytes));
3172 }
3173
3174 // If sweeper threads are active, wait for them at that point and steal
3175 // elements form their free-lists. Allocation may still fail their which
3176 // would indicate that there is not enough memory for the given allocation.
3177 return SweepAndRetryAllocation(size_in_bytes);
3178 }
3179
3180 // -----------------------------------------------------------------------------
3181 // MapSpace implementation
3182
3183 #ifdef VERIFY_HEAP
3184 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
3185 #endif
3186
3187 ReadOnlySpace::ReadOnlySpace(Heap* heap, AllocationSpace id,
3188 Executability executable)
3189 : PagedSpace(heap, id, executable),
3190 is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()) {
3191 }
3192
3193 void ReadOnlyPage::MakeHeaderRelocatable() {
3194 if (mutex_ != nullptr) {
3195 // TODO(v8:7464): heap_ and owner_ need to be cleared as well.
3196 delete mutex_;
3197 mutex_ = nullptr;
3198 local_tracker_ = nullptr;
3199 reservation_.Reset();
3200 }
3201 }
3202
3203 void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
3204 const size_t page_size = MemoryAllocator::GetCommitPageSize();
3205 const size_t area_start_offset = RoundUp(Page::kObjectStartOffset, page_size);
3206 for (Page* p : *this) {
3207 ReadOnlyPage* page = static_cast<ReadOnlyPage*>(p);
3208 if (access == PageAllocator::kRead) {
3209 page->MakeHeaderRelocatable();
3210 }
3211 CHECK(SetPermissions(page->address() + area_start_offset,
3212 page->size() - area_start_offset, access));
3213 }
3214 }
3215
3216 void ReadOnlySpace::ClearStringPaddingIfNeeded() {
3217 if (is_string_padding_cleared_) return;
3218
3219 WritableScope writable_scope(this);
3220 for (Page* page : *this) {
3221 HeapObjectIterator iterator(page);
3222 for (HeapObject* o = iterator.Next(); o != nullptr; o = iterator.Next()) {
3223 if (o->IsSeqOneByteString()) {
3224 SeqOneByteString::cast(o)->clear_padding();
3225 } else if (o->IsSeqTwoByteString()) {
3226 SeqTwoByteString::cast(o)->clear_padding();
3227 }
3228 }
3229 }
3230 is_string_padding_cleared_ = true;
3231 }
3232
3233 void ReadOnlySpace::MarkAsReadOnly() {
3234 DCHECK(!is_marked_read_only_);
3235 FreeLinearAllocationArea();
3236 is_marked_read_only_ = true;
3237 SetPermissionsForPages(PageAllocator::kRead);
3238 }
3239
3240 void ReadOnlySpace::MarkAsReadWrite() {
3241 DCHECK(is_marked_read_only_);
3242 SetPermissionsForPages(PageAllocator::kReadWrite);
3243 is_marked_read_only_ = false;
3244 }
3245
3246 Address LargePage::GetAddressToShrink(Address object_address,
3247 size_t object_size) {
3248 if (executable() == EXECUTABLE) {
3249 return 0;
3250 }
3251 size_t used_size = ::RoundUp((object_address - address()) + object_size,
3252 MemoryAllocator::GetCommitPageSize());
3253 if (used_size < CommittedPhysicalMemory()) {
3254 return address() + used_size;
3255 }
3256 return 0;
3257 }
3258
3259 void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
3260 RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
3261 SlotSet::FREE_EMPTY_BUCKETS);
3262 RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
3263 SlotSet::FREE_EMPTY_BUCKETS);
3264 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
3265 RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
3266 }
3267
3268 // -----------------------------------------------------------------------------
3269 // LargeObjectIterator
3270
3271 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
3272 current_ = space->first_page_;
3273 }
3274
3275
3276 HeapObject* LargeObjectIterator::Next() {
3277 if (current_ == nullptr) return nullptr;
3278
3279 HeapObject* object = current_->GetObject();
3280 current_ = current_->next_page();
3281 return object;
3282 }
3283
3284
3285 // -----------------------------------------------------------------------------
3286 // LargeObjectSpace
3287
3288 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
3289 : Space(heap, id), // Managed on a per-allocation basis
3290 first_page_(nullptr),
3291 size_(0),
3292 page_count_(0),
3293 objects_size_(0),
3294 chunk_map_(1024) {}
3295
3296 LargeObjectSpace::~LargeObjectSpace() {}
3297
3298 bool LargeObjectSpace::SetUp() {
3299 return true;
3300 }
3301
3302 void LargeObjectSpace::TearDown() {
3303 while (first_page_ != nullptr) {
3304 LargePage* page = first_page_;
3305 first_page_ = first_page_->next_page();
3306 LOG(heap()->isolate(),
3307 DeleteEvent("LargeObjectChunk",
3308 reinterpret_cast<void*>(page->address())));
3309 heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
3310 }
3311 SetUp();
3312 }
3313
3314
3315 AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
3316 Executability executable) {
3317 // Check if we want to force a GC before growing the old space further.
3318 // If so, fail the allocation.
3319 if (!heap()->CanExpandOldGeneration(object_size) ||
3320 !heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
3321 return AllocationResult::Retry(identity());
3322 }
3323
3324 LargePage* page = heap()->memory_allocator()->AllocateLargePage(
3325 object_size, this, executable);
3326 if (page == nullptr) return AllocationResult::Retry(identity());
3327 DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
3328
3329 size_ += static_cast<int>(page->size());
3330 AccountCommitted(page->size());
3331 objects_size_ += object_size;
3332 page_count_++;
3333 page->set_next_page(first_page_);
3334 first_page_ = page;
3335
3336 InsertChunkMapEntries(page);
3337
3338 HeapObject* object = page->GetObject();
3339
3340 if (Heap::ShouldZapGarbage()) {
3341 // Make the object consistent so the heap can be verified in OldSpaceStep.
3342 // We only need to do this in debug builds or if verify_heap is on.
3343 reinterpret_cast<Object**>(object->address())[0] =
3344 heap()->fixed_array_map();
3345 reinterpret_cast<Object**>(object->address())[1] = Smi::kZero;
3346 }
3347
3348 heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
3349 heap()->GCFlagsForIncrementalMarking(),
3350 kGCCallbackScheduleIdleGarbageCollection);
3351 heap()->CreateFillerObjectAt(object->address(), object_size,
3352 ClearRecordedSlots::kNo);
3353 if (heap()->incremental_marking()->black_allocation()) {
3354 heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
3355 }
3356 AllocationStep(object_size, object->address(), object_size);
3357 DCHECK_IMPLIES(
3358 heap()->incremental_marking()->black_allocation(),
3359 heap()->incremental_marking()->marking_state()->IsBlack(object));
3360 return object;
3361 }
3362
3363
3364 size_t LargeObjectSpace::CommittedPhysicalMemory() {
3365 // On a platform that provides lazy committing of memory, we over-account
3366 // the actually committed memory. There is no easy way right now to support
3367 // precise accounting of committed memory in large object space.
3368 return CommittedMemory();
3369 }
3370
3371
3372 // GC support
3373 Object* LargeObjectSpace::FindObject(Address a) {
3374 LargePage* page = FindPage(a);
3375 if (page != nullptr) {
3376 return page->GetObject();
3377 }
3378 return Smi::kZero; // Signaling not found.
3379 }
3380
3381 LargePage* LargeObjectSpace::FindPageThreadSafe(Address a) {
3382 base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
3383 return FindPage(a);
3384 }
3385
3386 LargePage* LargeObjectSpace::FindPage(Address a) {
3387 const Address key = MemoryChunk::FromAddress(a)->address();
3388 auto it = chunk_map_.find(key);
3389 if (it != chunk_map_.end()) {
3390 LargePage* page = it->second;
3391 if (page->Contains(a)) {
3392 return page;
3393 }
3394 }
3395 return nullptr;
3396 }
3397
3398
3399 void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
3400 IncrementalMarking::NonAtomicMarkingState* marking_state =
3401 heap()->incremental_marking()->non_atomic_marking_state();
3402 LargeObjectIterator it(this);
3403 for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
3404 if (marking_state->IsBlackOrGrey(obj)) {
3405 Marking::MarkWhite(marking_state->MarkBitFrom(obj));
3406 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
3407 RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
3408 chunk->ResetProgressBar();
3409 marking_state->SetLiveBytes(chunk, 0);
3410 }
3411 DCHECK(marking_state->IsWhite(obj));
3412 }
3413 }
3414
3415 void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
3416 // There may be concurrent access on the chunk map. We have to take the lock
3417 // here.
3418 base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
3419 for (Address current = reinterpret_cast<Address>(page);
3420 current < reinterpret_cast<Address>(page) + page->size();
3421 current += MemoryChunk::kPageSize) {
3422 chunk_map_[current] = page;
3423 }
3424 }
3425
3426 void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
3427 RemoveChunkMapEntries(page, page->address());
3428 }
3429
3430 void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
3431 Address free_start) {
3432 for (Address current = ::RoundUp(free_start, MemoryChunk::kPageSize);
3433 current < reinterpret_cast<Address>(page) + page->size();
3434 current += MemoryChunk::kPageSize) {
3435 chunk_map_.erase(current);
3436 }
3437 }
3438
3439 void LargeObjectSpace::FreeUnmarkedObjects() {
3440 LargePage* previous = nullptr;
3441 LargePage* current = first_page_;
3442 IncrementalMarking::NonAtomicMarkingState* marking_state =
3443 heap()->incremental_marking()->non_atomic_marking_state();
3444 objects_size_ = 0;
3445 while (current != nullptr) {
3446 HeapObject* object = current->GetObject();
3447 DCHECK(!marking_state->IsGrey(object));
3448 if (marking_state->IsBlack(object)) {
3449 Address free_start;
3450 size_t size = static_cast<size_t>(object->Size());
3451 objects_size_ += size;
3452 if ((free_start = current->GetAddressToShrink(object->address(), size)) !=
3453 0) {
3454 DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
3455 current->ClearOutOfLiveRangeSlots(free_start);
3456 RemoveChunkMapEntries(current, free_start);
3457 const size_t bytes_to_free =
3458 current->size() - (free_start - current->address());
3459 heap()->memory_allocator()->PartialFreeMemory(
3460 current, free_start, bytes_to_free,
3461 current->area_start() + object->Size());
3462 size_ -= bytes_to_free;
3463 AccountUncommitted(bytes_to_free);
3464 }
3465 previous = current;
3466 current = current->next_page();
3467 } else {
3468 LargePage* page = current;
3469 // Cut the chunk out from the chunk list.
3470 current = current->next_page();
3471 if (previous == nullptr) {
3472 first_page_ = current;
3473 } else {
3474 previous->set_next_page(current);
3475 }
3476
3477 // Free the chunk.
3478 size_ -= static_cast<int>(page->size());
3479 AccountUncommitted(page->size());
3480 page_count_--;
3481
3482 RemoveChunkMapEntries(page);
3483 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
3484 }
3485 }
3486 }
3487
3488
3489 bool LargeObjectSpace::Contains(HeapObject* object) {
3490 Address address = object->address();
3491 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3492
3493 bool owned = (chunk->owner() == this);
3494
3495 SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
3496
3497 return owned;
3498 }
3499
3500 std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
3501 return std::unique_ptr<ObjectIterator>(new LargeObjectIterator(this));
3502 }
3503
3504 #ifdef VERIFY_HEAP
3505 // We do not assume that the large object iterator works, because it depends
3506 // on the invariants we are checking during verification.
3507 void LargeObjectSpace::Verify() {
3508 for (LargePage* chunk = first_page_; chunk != nullptr;
3509 chunk = chunk->next_page()) {
3510 // Each chunk contains an object that starts at the large object page's
3511 // object area start.
3512 HeapObject* object = chunk->GetObject();
3513 Page* page = Page::FromAddress(object->address());
3514 CHECK(object->address() == page->area_start());
3515
3516 // The first word should be a map, and we expect all map pointers to be
3517 // in map space or read-only space.
3518 Map* map = object->map();
3519 CHECK(map->IsMap());
3520 CHECK(heap()->map_space()->Contains(map) ||
3521 heap()->read_only_space()->Contains(map));
3522
3523 // We have only the following types in the large object space:
3524 CHECK(object->IsAbstractCode() || object->IsSeqString() ||
3525 object->IsExternalString() || object->IsThinString() ||
3526 object->IsFixedArray() || object->IsFixedDoubleArray() ||
3527 object->IsWeakFixedArray() || object->IsWeakArrayList() ||
3528 object->IsPropertyArray() || object->IsByteArray() ||
3529 object->IsFeedbackVector() || object->IsBigInt() ||
3530 object->IsFreeSpace());
3531
3532 // The object itself should look OK.
3533 object->ObjectVerify();
3534
3535 if (!FLAG_verify_heap_skip_remembered_set) {
3536 heap()->VerifyRememberedSetFor(object);
3537 }
3538
3539 // Byte arrays and strings don't have interior pointers.
3540 if (object->IsAbstractCode()) {
3541 VerifyPointersVisitor code_visitor;
3542 object->IterateBody(map, object->Size(), &code_visitor);
3543 } else if (object->IsFixedArray()) {
3544 FixedArray* array = FixedArray::cast(object);
3545 for (int j = 0; j < array->length(); j++) {
3546 Object* element = array->get(j);
3547 if (element->IsHeapObject()) {
3548 HeapObject* element_object = HeapObject::cast(element);
3549 CHECK(heap()->Contains(element_object));
3550 CHECK(element_object->map()->IsMap());
3551 }
3552 }
3553 } else if (object->IsPropertyArray()) {
3554 PropertyArray* array = PropertyArray::cast(object);
3555 for (int j = 0; j < array->length(); j++) {
3556 Object* property = array->get(j);
3557 if (property->IsHeapObject()) {
3558 HeapObject* property_object = HeapObject::cast(property);
3559 CHECK(heap()->Contains(property_object));
3560 CHECK(property_object->map()->IsMap());
3561 }
3562 }
3563 }
3564 }
3565 }
3566 #endif
3567
3568 #ifdef DEBUG
3569 void LargeObjectSpace::Print() {
3570 OFStream os(stdout);
3571 LargeObjectIterator it(this);
3572 for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
3573 obj->Print(os);
3574 }
3575 }
3576
3577 void Page::Print() {
3578 // Make a best-effort to print the objects in the page.
3579 PrintF("Page@%p in %s\n", reinterpret_cast<void*>(this->address()),
3580 AllocationSpaceName(this->owner()->identity()));
3581 printf(" --------------------------------------\n");
3582 HeapObjectIterator objects(this);
3583 unsigned mark_size = 0;
3584 for (HeapObject* object = objects.Next(); object != nullptr;
3585 object = objects.Next()) {
3586 bool is_marked =
3587 heap()->incremental_marking()->marking_state()->IsBlackOrGrey(object);
3588 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
3589 if (is_marked) {
3590 mark_size += object->Size();
3591 }
3592 object->ShortPrint();
3593 PrintF("\n");
3594 }
3595 printf(" --------------------------------------\n");
3596 printf(" Marked: %x, LiveCount: %" V8PRIdPTR "\n", mark_size,
3597 heap()->incremental_marking()->marking_state()->live_bytes(this));
3598 }
3599
3600 #endif // DEBUG
3601 } // namespace internal
3602 } // namespace v8
3603