1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/read-only-spaces.h"
6 
7 #include <memory>
8 
9 #include "include/v8-internal.h"
10 #include "include/v8-platform.h"
11 #include "src/base/logging.h"
12 #include "src/common/globals.h"
13 #include "src/common/ptr-compr-inl.h"
14 #include "src/execution/isolate.h"
15 #include "src/heap/allocation-stats.h"
16 #include "src/heap/basic-memory-chunk.h"
17 #include "src/heap/combined-heap.h"
18 #include "src/heap/heap-inl.h"
19 #include "src/heap/memory-allocator.h"
20 #include "src/heap/memory-chunk.h"
21 #include "src/heap/read-only-heap.h"
22 #include "src/objects/objects-inl.h"
23 #include "src/objects/property-details.h"
24 #include "src/objects/string.h"
25 #include "src/snapshot/read-only-deserializer.h"
26 
27 namespace v8 {
28 namespace internal {
29 
CopyAndRebaseRoots(Address * src,Address * dst,Address new_base)30 void CopyAndRebaseRoots(Address* src, Address* dst, Address new_base) {
31   Address src_base = GetIsolateRootAddress(src[0]);
32   for (size_t i = 0; i < ReadOnlyHeap::kEntriesCount; ++i) {
33     dst[i] = src[i] - src_base + new_base;
34   }
35 }
36 
set_read_only_heap(std::unique_ptr<ReadOnlyHeap> read_only_heap)37 void ReadOnlyArtifacts::set_read_only_heap(
38     std::unique_ptr<ReadOnlyHeap> read_only_heap) {
39   read_only_heap_ = std::move(read_only_heap);
40 }
41 
InitializeChecksum(SnapshotData * read_only_snapshot_data)42 void ReadOnlyArtifacts::InitializeChecksum(
43     SnapshotData* read_only_snapshot_data) {
44 #ifdef DEBUG
45   read_only_blob_checksum_ = Checksum(read_only_snapshot_data->Payload());
46 #endif  // DEBUG
47 }
48 
VerifyChecksum(SnapshotData * read_only_snapshot_data,bool read_only_heap_created)49 void ReadOnlyArtifacts::VerifyChecksum(SnapshotData* read_only_snapshot_data,
50                                        bool read_only_heap_created) {
51 #ifdef DEBUG
52   if (read_only_blob_checksum_) {
53     // The read-only heap was set up from a snapshot. Make sure it's the always
54     // the same snapshot.
55     uint32_t snapshot_checksum = Checksum(read_only_snapshot_data->Payload());
56     CHECK_WITH_MSG(snapshot_checksum,
57                    "Attempt to create the read-only heap after already "
58                    "creating from a snapshot.");
59     if (!FLAG_stress_snapshot) {
60       // --stress-snapshot is only intended to check how well the
61       // serializer/deserializer copes with unexpected objects, and is not
62       // intended to test whether the newly deserialized Isolate would actually
63       // work since it serializes a currently running Isolate, which is not
64       // supported. As a result, it's possible that it will create a new
65       // read-only snapshot that is not compatible with the original one (for
66       // instance due to the string table being re-ordered). Since we won't
67       // acutally use that new Isoalte, we're ok with any potential corruption.
68       // See crbug.com/1043058.
69       CHECK_EQ(read_only_blob_checksum_, snapshot_checksum);
70     }
71   } else {
72     // If there's no checksum, then that means the read-only heap objects are
73     // being created.
74     CHECK(read_only_heap_created);
75   }
76 #endif  // DEBUG
77 }
78 
~SingleCopyReadOnlyArtifacts()79 SingleCopyReadOnlyArtifacts::~SingleCopyReadOnlyArtifacts() {
80   // This particular SharedReadOnlySpace should not destroy its own pages as
81   // TearDown requires MemoryAllocator which itself is tied to an Isolate.
82   shared_read_only_space_->pages_.resize(0);
83 
84   for (ReadOnlyPage* chunk : pages_) {
85     void* chunk_address = reinterpret_cast<void*>(chunk->address());
86     size_t size = RoundUp(chunk->size(), page_allocator_->AllocatePageSize());
87     CHECK(page_allocator_->FreePages(chunk_address, size));
88   }
89 }
90 
GetReadOnlyHeapForIsolate(Isolate * isolate)91 ReadOnlyHeap* SingleCopyReadOnlyArtifacts::GetReadOnlyHeapForIsolate(
92     Isolate* isolate) {
93   return read_only_heap();
94 }
95 
Initialize(Isolate * isolate,std::vector<ReadOnlyPage * > && pages,const AllocationStats & stats)96 void SingleCopyReadOnlyArtifacts::Initialize(Isolate* isolate,
97                                              std::vector<ReadOnlyPage*>&& pages,
98                                              const AllocationStats& stats) {
99   // Do not use the platform page allocator when sharing a pointer compression
100   // cage, as the Isolate's page allocator is a BoundedPageAllocator tied to the
101   // shared cage.
102   page_allocator_ = COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL
103                         ? isolate->page_allocator()
104                         : GetPlatformPageAllocator();
105   pages_ = std::move(pages);
106   set_accounting_stats(stats);
107   set_shared_read_only_space(
108       std::make_unique<SharedReadOnlySpace>(isolate->heap(), this));
109 }
110 
ReinstallReadOnlySpace(Isolate * isolate)111 void SingleCopyReadOnlyArtifacts::ReinstallReadOnlySpace(Isolate* isolate) {
112   isolate->heap()->ReplaceReadOnlySpace(shared_read_only_space());
113 }
114 
VerifyHeapAndSpaceRelationships(Isolate * isolate)115 void SingleCopyReadOnlyArtifacts::VerifyHeapAndSpaceRelationships(
116     Isolate* isolate) {
117   DCHECK_EQ(read_only_heap()->read_only_space(), shared_read_only_space());
118 
119   // Confirm the Isolate is using the shared ReadOnlyHeap and ReadOnlySpace.
120   DCHECK_EQ(read_only_heap(), isolate->read_only_heap());
121   DCHECK_EQ(shared_read_only_space(), isolate->heap()->read_only_space());
122 }
123 
InitializeRootsFrom(Isolate * isolate)124 void PointerCompressedReadOnlyArtifacts::InitializeRootsFrom(Isolate* isolate) {
125   auto isolate_ro_roots =
126       isolate->roots_table().read_only_roots_begin().location();
127   CopyAndRebaseRoots(isolate_ro_roots, read_only_roots_, 0);
128 }
129 
InitializeRootsIn(Isolate * isolate)130 void PointerCompressedReadOnlyArtifacts::InitializeRootsIn(Isolate* isolate) {
131   auto isolate_ro_roots =
132       isolate->roots_table().read_only_roots_begin().location();
133   CopyAndRebaseRoots(read_only_roots_, isolate_ro_roots,
134                      isolate->isolate_root());
135 }
136 
CreateReadOnlySpace(Isolate * isolate)137 SharedReadOnlySpace* PointerCompressedReadOnlyArtifacts::CreateReadOnlySpace(
138     Isolate* isolate) {
139   AllocationStats new_stats;
140   new_stats.IncreaseCapacity(accounting_stats().Capacity());
141 
142   std::vector<std::unique_ptr<v8::PageAllocator::SharedMemoryMapping>> mappings;
143   std::vector<ReadOnlyPage*> pages;
144   Address isolate_root = isolate->isolate_root();
145   for (size_t i = 0; i < pages_.size(); ++i) {
146     const ReadOnlyPage* page = pages_[i];
147     const Tagged_t offset = OffsetForPage(i);
148     Address new_address = isolate_root + offset;
149     ReadOnlyPage* new_page = nullptr;
150     bool success = isolate->heap()
151                        ->memory_allocator()
152                        ->data_page_allocator()
153                        ->ReserveForSharedMemoryMapping(
154                            reinterpret_cast<void*>(new_address), page->size());
155     CHECK(success);
156     auto shared_memory = RemapPageTo(i, new_address, new_page);
157     // Later it's possible that this might fail, but for now on Linux this is
158     // not possible. When we move onto windows, it's not possible to reserve
159     // memory and then map into the middle of it at which point we will have to
160     // reserve the memory free it and then attempt to remap to it which could
161     // fail. At that point this will need to change.
162     CHECK(shared_memory);
163     CHECK_NOT_NULL(new_page);
164 
165     new_stats.IncreaseAllocatedBytes(page->allocated_bytes(), new_page);
166     mappings.push_back(std::move(shared_memory));
167     pages.push_back(new_page);
168   }
169 
170   auto* shared_read_only_space =
171       new SharedReadOnlySpace(isolate->heap(), std::move(pages),
172                               std::move(mappings), std::move(new_stats));
173   return shared_read_only_space;
174 }
175 
GetReadOnlyHeapForIsolate(Isolate * isolate)176 ReadOnlyHeap* PointerCompressedReadOnlyArtifacts::GetReadOnlyHeapForIsolate(
177     Isolate* isolate) {
178   DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
179   InitializeRootsIn(isolate);
180 
181   SharedReadOnlySpace* shared_read_only_space = CreateReadOnlySpace(isolate);
182   ReadOnlyHeap* read_only_heap = new ReadOnlyHeap(shared_read_only_space);
183 
184   // TODO(v8:10699): The cache should just live uncompressed in
185   // ReadOnlyArtifacts and be decompressed on the fly.
186   auto original_cache = read_only_heap_->read_only_object_cache_;
187   auto& cache = read_only_heap->read_only_object_cache_;
188   Address isolate_root = isolate->isolate_root();
189   for (Object original_object : original_cache) {
190     Address original_address = original_object.ptr();
191     Address new_address = isolate_root + CompressTagged(original_address);
192     Object new_object = Object(new_address);
193     cache.push_back(new_object);
194   }
195 
196   return read_only_heap;
197 }
198 
199 std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping>
RemapPageTo(size_t i,Address new_address,ReadOnlyPage * & new_page)200 PointerCompressedReadOnlyArtifacts::RemapPageTo(size_t i, Address new_address,
201                                                 ReadOnlyPage*& new_page) {
202   std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> mapping =
203       shared_memory_[i]->RemapTo(reinterpret_cast<void*>(new_address));
204   if (mapping) {
205     new_page = static_cast<ReadOnlyPage*>(reinterpret_cast<void*>(new_address));
206     return mapping;
207   } else {
208     return {};
209   }
210 }
211 
Initialize(Isolate * isolate,std::vector<ReadOnlyPage * > && pages,const AllocationStats & stats)212 void PointerCompressedReadOnlyArtifacts::Initialize(
213     Isolate* isolate, std::vector<ReadOnlyPage*>&& pages,
214     const AllocationStats& stats) {
215   DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
216   DCHECK(pages_.empty());
217   DCHECK(!pages.empty());
218 
219   // It's not possible to copy the AllocationStats directly as the new pages
220   // will be mapped to different addresses.
221   stats_.IncreaseCapacity(stats.Capacity());
222 
223   v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
224   DCHECK(page_allocator->CanAllocateSharedPages());
225 
226   for (const ReadOnlyPage* page : pages) {
227     size_t size = RoundUp(page->size(), page_allocator->AllocatePageSize());
228     // 1. Allocate some new memory for a shared copy of the page and copy the
229     // original contents into it. Doesn't need to be V8 page aligned, since
230     // we'll never use it directly.
231     auto shared_memory = page_allocator->AllocateSharedPages(size, page);
232     void* ptr = shared_memory->GetMemory();
233     CHECK_NOT_NULL(ptr);
234 
235     // 2. Copy the contents of the original page into the shared page.
236     ReadOnlyPage* new_page = reinterpret_cast<ReadOnlyPage*>(ptr);
237 
238     pages_.push_back(new_page);
239     shared_memory_.push_back(std::move(shared_memory));
240     // This is just CompressTagged but inlined so it will always compile.
241     Tagged_t compressed_address = CompressTagged(page->address());
242     page_offsets_.push_back(compressed_address);
243 
244     // 3. Update the accounting stats so the allocated bytes are for the new
245     // shared page rather than the original.
246     stats_.IncreaseAllocatedBytes(page->allocated_bytes(), new_page);
247   }
248 
249   InitializeRootsFrom(isolate);
250   set_shared_read_only_space(
251       std::make_unique<SharedReadOnlySpace>(isolate->heap(), this));
252 }
253 
ReinstallReadOnlySpace(Isolate * isolate)254 void PointerCompressedReadOnlyArtifacts::ReinstallReadOnlySpace(
255     Isolate* isolate) {
256   // We need to build a new SharedReadOnlySpace that occupies the same memory as
257   // the original one, so first the original space's pages must be freed.
258   Heap* heap = isolate->heap();
259   heap->read_only_space()->TearDown(heap->memory_allocator());
260 
261   heap->ReplaceReadOnlySpace(CreateReadOnlySpace(heap->isolate()));
262 
263   DCHECK_NE(heap->read_only_space(), shared_read_only_space());
264 
265   // Also recreate the ReadOnlyHeap using the this space.
266   auto* ro_heap = new ReadOnlyHeap(isolate->read_only_heap(),
267                                    isolate->heap()->read_only_space());
268   isolate->set_read_only_heap(ro_heap);
269 
270   DCHECK_NE(*isolate->roots_table().read_only_roots_begin().location(), 0);
271 }
272 
VerifyHeapAndSpaceRelationships(Isolate * isolate)273 void PointerCompressedReadOnlyArtifacts::VerifyHeapAndSpaceRelationships(
274     Isolate* isolate) {
275   // Confirm the canonical versions of the ReadOnlySpace/ReadOnlyHeap from the
276   // ReadOnlyArtifacts are not accidentally present in a real Isolate (which
277   // might destroy them) and the ReadOnlyHeaps and Spaces are correctly
278   // associated with each other.
279   DCHECK_NE(shared_read_only_space(), isolate->heap()->read_only_space());
280   DCHECK_NE(read_only_heap(), isolate->read_only_heap());
281   DCHECK_EQ(read_only_heap()->read_only_space(), shared_read_only_space());
282   DCHECK_EQ(isolate->read_only_heap()->read_only_space(),
283             isolate->heap()->read_only_space());
284 }
285 
286 // -----------------------------------------------------------------------------
287 // ReadOnlySpace implementation
288 
ReadOnlySpace(Heap * heap)289 ReadOnlySpace::ReadOnlySpace(Heap* heap)
290     : BaseSpace(heap, RO_SPACE),
291       top_(kNullAddress),
292       limit_(kNullAddress),
293       is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()),
294       capacity_(0),
295       area_size_(MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE)) {}
296 
297 // Needs to be defined in the cc file to force the vtable to be emitted in
298 // component builds.
299 ReadOnlySpace::~ReadOnlySpace() = default;
300 
TearDown(MemoryAllocator * memory_allocator)301 void SharedReadOnlySpace::TearDown(MemoryAllocator* memory_allocator) {
302   // SharedReadOnlySpaces do not tear down their own pages since they are either
303   // freed down by the ReadOnlyArtifacts that contains them or in the case of
304   // pointer compression, they are freed when the SharedMemoryMappings are
305   // freed.
306   pages_.resize(0);
307   accounting_stats_.Clear();
308 }
309 
TearDown(MemoryAllocator * memory_allocator)310 void ReadOnlySpace::TearDown(MemoryAllocator* memory_allocator) {
311   for (ReadOnlyPage* chunk : pages_) {
312     memory_allocator->FreeReadOnlyPage(chunk);
313   }
314   pages_.resize(0);
315   accounting_stats_.Clear();
316 }
317 
DetachPagesAndAddToArtifacts(std::shared_ptr<ReadOnlyArtifacts> artifacts)318 void ReadOnlySpace::DetachPagesAndAddToArtifacts(
319     std::shared_ptr<ReadOnlyArtifacts> artifacts) {
320   DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
321 
322   Heap* heap = ReadOnlySpace::heap();
323   // Without pointer compression in a per-Isolate cage, ReadOnlySpace pages are
324   // directly shared between all heaps and so must be unregistered from their
325   // originating allocator.
326   Seal(COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL
327            ? SealMode::kDetachFromHeap
328            : SealMode::kDetachFromHeapAndUnregisterMemory);
329   artifacts->Initialize(heap->isolate(), std::move(pages_), accounting_stats_);
330 }
331 
MakeHeaderRelocatable()332 void ReadOnlyPage::MakeHeaderRelocatable() {
333   heap_ = nullptr;
334   owner_ = nullptr;
335   reservation_.Reset();
336 }
337 
SetPermissionsForPages(MemoryAllocator * memory_allocator,PageAllocator::Permission access)338 void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
339                                            PageAllocator::Permission access) {
340   for (BasicMemoryChunk* chunk : pages_) {
341     // Read only pages don't have valid reservation object so we get proper
342     // page allocator manually.
343     v8::PageAllocator* page_allocator =
344         memory_allocator->page_allocator(NOT_EXECUTABLE);
345     CHECK(SetPermissions(page_allocator, chunk->address(), chunk->size(),
346                          access));
347   }
348 }
349 
350 // After we have booted, we have created a map which represents free space
351 // on the heap.  If there was already a free list then the elements on it
352 // were created with the wrong FreeSpaceMap (normally nullptr), so we need to
353 // fix them.
RepairFreeSpacesAfterDeserialization()354 void ReadOnlySpace::RepairFreeSpacesAfterDeserialization() {
355   BasicMemoryChunk::UpdateHighWaterMark(top_);
356   // Each page may have a small free space that is not tracked by a free list.
357   // Those free spaces still contain null as their map pointer.
358   // Overwrite them with new fillers.
359   for (BasicMemoryChunk* chunk : pages_) {
360     Address start = chunk->HighWaterMark();
361     Address end = chunk->area_end();
362     // Put a filler object in the gap between the end of the allocated objects
363     // and the end of the allocatable area.
364     if (start < end) {
365       heap()->CreateFillerObjectAt(start, static_cast<int>(end - start),
366                                    ClearRecordedSlots::kNo);
367     }
368   }
369 }
370 
ClearStringPaddingIfNeeded()371 void ReadOnlySpace::ClearStringPaddingIfNeeded() {
372   if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
373     // TODO(v8:11641): Revisit this once third-party heap supports iteration.
374     return;
375   }
376   if (is_string_padding_cleared_) return;
377 
378   ReadOnlyHeapObjectIterator iterator(this);
379   for (HeapObject o = iterator.Next(); !o.is_null(); o = iterator.Next()) {
380     if (o.IsSeqOneByteString()) {
381       SeqOneByteString::cast(o).clear_padding();
382     } else if (o.IsSeqTwoByteString()) {
383       SeqTwoByteString::cast(o).clear_padding();
384     }
385   }
386   is_string_padding_cleared_ = true;
387 }
388 
Seal(SealMode ro_mode)389 void ReadOnlySpace::Seal(SealMode ro_mode) {
390   DCHECK(!is_marked_read_only_);
391 
392   FreeLinearAllocationArea();
393   is_marked_read_only_ = true;
394   auto* memory_allocator = heap()->memory_allocator();
395 
396   if (ro_mode != SealMode::kDoNotDetachFromHeap) {
397     DetachFromHeap();
398     for (ReadOnlyPage* p : pages_) {
399       if (ro_mode == SealMode::kDetachFromHeapAndUnregisterMemory) {
400         memory_allocator->UnregisterMemory(p);
401       }
402       if (ReadOnlyHeap::IsReadOnlySpaceShared()) {
403         p->MakeHeaderRelocatable();
404       }
405     }
406   }
407 
408   SetPermissionsForPages(memory_allocator, PageAllocator::kRead);
409 }
410 
Unseal()411 void ReadOnlySpace::Unseal() {
412   DCHECK(is_marked_read_only_);
413   if (!pages_.empty()) {
414     SetPermissionsForPages(heap()->memory_allocator(),
415                            PageAllocator::kReadWrite);
416   }
417   is_marked_read_only_ = false;
418 }
419 
ContainsSlow(Address addr)420 bool ReadOnlySpace::ContainsSlow(Address addr) {
421   BasicMemoryChunk* c = BasicMemoryChunk::FromAddress(addr);
422   for (BasicMemoryChunk* chunk : pages_) {
423     if (chunk == c) return true;
424   }
425   return false;
426 }
427 
428 namespace {
429 // Only iterates over a single chunk as the chunk iteration is done externally.
430 class ReadOnlySpaceObjectIterator : public ObjectIterator {
431  public:
ReadOnlySpaceObjectIterator(Heap * heap,ReadOnlySpace * space,BasicMemoryChunk * chunk)432   ReadOnlySpaceObjectIterator(Heap* heap, ReadOnlySpace* space,
433                               BasicMemoryChunk* chunk)
434       : cur_addr_(kNullAddress), cur_end_(kNullAddress), space_(space) {}
435 
436   // Advance to the next object, skipping free spaces and other fillers and
437   // skipping the special garbage section of which there is one per space.
438   // Returns nullptr when the iteration has ended.
Next()439   HeapObject Next() override {
440     HeapObject next_obj = FromCurrentPage();
441     if (!next_obj.is_null()) return next_obj;
442     return HeapObject();
443   }
444 
445  private:
FromCurrentPage()446   HeapObject FromCurrentPage() {
447     while (cur_addr_ != cur_end_) {
448       if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
449         cur_addr_ = space_->limit();
450         continue;
451       }
452       HeapObject obj = HeapObject::FromAddress(cur_addr_);
453       const int obj_size = obj.Size();
454       cur_addr_ += obj_size;
455       DCHECK_LE(cur_addr_, cur_end_);
456       if (!obj.IsFreeSpaceOrFiller()) {
457         if (obj.IsCode()) {
458           DCHECK(Code::cast(obj).is_builtin());
459           DCHECK_CODEOBJECT_SIZE(obj_size, space_);
460         } else {
461           DCHECK_OBJECT_SIZE(obj_size);
462         }
463         return obj;
464       }
465     }
466     return HeapObject();
467   }
468 
469   Address cur_addr_;  // Current iteration point.
470   Address cur_end_;   // End iteration point.
471   ReadOnlySpace* space_;
472 };
473 }  // namespace
474 
475 #ifdef VERIFY_HEAP
476 namespace {
477 class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
478  public:
VerifyReadOnlyPointersVisitor(Heap * heap)479   explicit VerifyReadOnlyPointersVisitor(Heap* heap)
480       : VerifyPointersVisitor(heap) {}
481 
482  protected:
VerifyPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)483   void VerifyPointers(HeapObject host, MaybeObjectSlot start,
484                       MaybeObjectSlot end) override {
485     if (!host.is_null()) {
486       CHECK(ReadOnlyHeap::Contains(host.map()));
487     }
488     VerifyPointersVisitor::VerifyPointers(host, start, end);
489 
490     for (MaybeObjectSlot current = start; current < end; ++current) {
491       HeapObject heap_object;
492       if ((*current)->GetHeapObject(&heap_object)) {
493         CHECK(ReadOnlyHeap::Contains(heap_object));
494       }
495     }
496   }
497 };
498 }  // namespace
499 
Verify(Isolate * isolate)500 void ReadOnlySpace::Verify(Isolate* isolate) {
501   bool allocation_pointer_found_in_space = top_ == limit_;
502   VerifyReadOnlyPointersVisitor visitor(isolate->heap());
503 
504   for (BasicMemoryChunk* page : pages_) {
505     if (ReadOnlyHeap::IsReadOnlySpaceShared()) {
506       CHECK_NULL(page->owner());
507     } else {
508       CHECK_EQ(page->owner(), this);
509     }
510 
511     if (page == Page::FromAllocationAreaAddress(top_)) {
512       allocation_pointer_found_in_space = true;
513     }
514     ReadOnlySpaceObjectIterator it(isolate->heap(), this, page);
515     Address end_of_previous_object = page->area_start();
516     Address top = page->area_end();
517 
518     for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
519       CHECK(end_of_previous_object <= object.address());
520 
521       Map map = object.map();
522       CHECK(map.IsMap());
523 
524       // The object itself should look OK.
525       object.ObjectVerify(isolate);
526 
527       // All the interior pointers should be contained in the heap.
528       int size = object.Size();
529       object.IterateBody(map, size, &visitor);
530       CHECK(object.address() + size <= top);
531       end_of_previous_object = object.address() + size;
532 
533       CHECK(!object.IsExternalString());
534       CHECK(!object.IsJSArrayBuffer());
535     }
536   }
537   CHECK(allocation_pointer_found_in_space);
538 
539 #ifdef DEBUG
540   VerifyCounters(isolate->heap());
541 #endif
542 }
543 
544 #ifdef DEBUG
VerifyCounters(Heap * heap)545 void ReadOnlySpace::VerifyCounters(Heap* heap) {
546   size_t total_capacity = 0;
547   size_t total_allocated = 0;
548   for (BasicMemoryChunk* page : pages_) {
549     total_capacity += page->area_size();
550     ReadOnlySpaceObjectIterator it(heap, this, page);
551     size_t real_allocated = 0;
552     for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
553       if (!object.IsFreeSpaceOrFiller()) {
554         real_allocated += object.Size();
555       }
556     }
557     total_allocated += page->allocated_bytes();
558     // The real size can be smaller than the accounted size if array trimming,
559     // object slack tracking happened after sweeping.
560     DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
561     DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
562   }
563   DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
564   DCHECK_EQ(total_allocated, accounting_stats_.Size());
565 }
566 #endif  // DEBUG
567 #endif  // VERIFY_HEAP
568 
CommittedPhysicalMemory()569 size_t ReadOnlySpace::CommittedPhysicalMemory() {
570   if (!base::OS::HasLazyCommits()) return CommittedMemory();
571   BasicMemoryChunk::UpdateHighWaterMark(top_);
572   size_t size = 0;
573   for (auto* chunk : pages_) {
574     size += chunk->size();
575   }
576 
577   return size;
578 }
579 
FreeLinearAllocationArea()580 void ReadOnlySpace::FreeLinearAllocationArea() {
581   // Mark the old linear allocation area with a free space map so it can be
582   // skipped when scanning the heap.
583   if (top_ == kNullAddress) {
584     DCHECK_EQ(kNullAddress, limit_);
585     return;
586   }
587 
588   // Clear the bits in the unused black area.
589   ReadOnlyPage* page = pages_.back();
590   heap()->incremental_marking()->marking_state()->bitmap(page)->ClearRange(
591       page->AddressToMarkbitIndex(top_), page->AddressToMarkbitIndex(limit_));
592 
593   heap()->CreateFillerObjectAt(top_, static_cast<int>(limit_ - top_),
594                                ClearRecordedSlots::kNo);
595 
596   BasicMemoryChunk::UpdateHighWaterMark(top_);
597 
598   top_ = kNullAddress;
599   limit_ = kNullAddress;
600 }
601 
EnsureSpaceForAllocation(int size_in_bytes)602 void ReadOnlySpace::EnsureSpaceForAllocation(int size_in_bytes) {
603   if (top_ + size_in_bytes <= limit_) {
604     return;
605   }
606 
607   DCHECK_GE(size_in_bytes, 0);
608 
609   FreeLinearAllocationArea();
610 
611   BasicMemoryChunk* chunk =
612       heap()->memory_allocator()->AllocateReadOnlyPage(AreaSize(), this);
613   capacity_ += AreaSize();
614 
615   accounting_stats_.IncreaseCapacity(chunk->area_size());
616   AccountCommitted(chunk->size());
617   CHECK_NOT_NULL(chunk);
618   pages_.push_back(static_cast<ReadOnlyPage*>(chunk));
619 
620   heap()->CreateFillerObjectAt(chunk->area_start(),
621                                static_cast<int>(chunk->area_size()),
622                                ClearRecordedSlots::kNo);
623 
624   top_ = chunk->area_start();
625   limit_ = chunk->area_end();
626   return;
627 }
628 
TryAllocateLinearlyAligned(int size_in_bytes,AllocationAlignment alignment)629 HeapObject ReadOnlySpace::TryAllocateLinearlyAligned(
630     int size_in_bytes, AllocationAlignment alignment) {
631   Address current_top = top_;
632   int filler_size = Heap::GetFillToAlign(current_top, alignment);
633 
634   Address new_top = current_top + filler_size + size_in_bytes;
635   if (new_top > limit_) return HeapObject();
636 
637   // Allocation always occurs in the last chunk for RO_SPACE.
638   BasicMemoryChunk* chunk = pages_.back();
639   int allocated_size = filler_size + size_in_bytes;
640   accounting_stats_.IncreaseAllocatedBytes(allocated_size, chunk);
641   chunk->IncreaseAllocatedBytes(allocated_size);
642 
643   top_ = new_top;
644   if (filler_size > 0) {
645     return Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
646                                    HeapObject::FromAddress(current_top),
647                                    filler_size);
648   }
649 
650   return HeapObject::FromAddress(current_top);
651 }
652 
AllocateRawAligned(int size_in_bytes,AllocationAlignment alignment)653 AllocationResult ReadOnlySpace::AllocateRawAligned(
654     int size_in_bytes, AllocationAlignment alignment) {
655   DCHECK(!FLAG_enable_third_party_heap);
656   DCHECK(!IsDetached());
657   int allocation_size = size_in_bytes;
658 
659   HeapObject object = TryAllocateLinearlyAligned(allocation_size, alignment);
660   if (object.is_null()) {
661     // We don't know exactly how much filler we need to align until space is
662     // allocated, so assume the worst case.
663     EnsureSpaceForAllocation(allocation_size +
664                              Heap::GetMaximumFillToAlign(alignment));
665     allocation_size = size_in_bytes;
666     object = TryAllocateLinearlyAligned(size_in_bytes, alignment);
667     CHECK(!object.is_null());
668   }
669   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
670 
671   return object;
672 }
673 
AllocateRawUnaligned(int size_in_bytes)674 AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) {
675   DCHECK(!IsDetached());
676   EnsureSpaceForAllocation(size_in_bytes);
677   Address current_top = top_;
678   Address new_top = current_top + size_in_bytes;
679   DCHECK_LE(new_top, limit_);
680   top_ = new_top;
681   HeapObject object = HeapObject::FromAddress(current_top);
682 
683   DCHECK(!object.is_null());
684   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
685 
686   // Allocation always occurs in the last chunk for RO_SPACE.
687   BasicMemoryChunk* chunk = pages_.back();
688   accounting_stats_.IncreaseAllocatedBytes(size_in_bytes, chunk);
689   chunk->IncreaseAllocatedBytes(size_in_bytes);
690 
691   return object;
692 }
693 
AllocateRaw(int size_in_bytes,AllocationAlignment alignment)694 AllocationResult ReadOnlySpace::AllocateRaw(int size_in_bytes,
695                                             AllocationAlignment alignment) {
696 #ifdef V8_HOST_ARCH_32_BIT
697   AllocationResult result = alignment != kWordAligned
698                                 ? AllocateRawAligned(size_in_bytes, alignment)
699                                 : AllocateRawUnaligned(size_in_bytes);
700 #else
701   AllocationResult result = AllocateRawUnaligned(size_in_bytes);
702 #endif
703   HeapObject heap_obj;
704   if (!result.IsRetry() && result.To(&heap_obj)) {
705     DCHECK(heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
706   }
707   return result;
708 }
709 
ShrinkToHighWaterMark()710 size_t ReadOnlyPage::ShrinkToHighWaterMark() {
711   // Shrink pages to high water mark. The water mark points either to a filler
712   // or the area_end.
713   HeapObject filler = HeapObject::FromAddress(HighWaterMark());
714   if (filler.address() == area_end()) return 0;
715   CHECK(filler.IsFreeSpaceOrFiller());
716   DCHECK_EQ(filler.address() + filler.Size(), area_end());
717 
718   size_t unused = RoundDown(static_cast<size_t>(area_end() - filler.address()),
719                             MemoryAllocator::GetCommitPageSize());
720   if (unused > 0) {
721     DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
722     if (FLAG_trace_gc_verbose) {
723       PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
724                    reinterpret_cast<void*>(this),
725                    reinterpret_cast<void*>(area_end()),
726                    reinterpret_cast<void*>(area_end() - unused));
727     }
728     heap()->CreateFillerObjectAt(
729         filler.address(),
730         static_cast<int>(area_end() - filler.address() - unused),
731         ClearRecordedSlots::kNo);
732     heap()->memory_allocator()->PartialFreeMemory(
733         this, address() + size() - unused, unused, area_end() - unused);
734     if (filler.address() != area_end()) {
735       CHECK(filler.IsFreeSpaceOrFiller());
736       CHECK_EQ(filler.address() + filler.Size(), area_end());
737     }
738   }
739   return unused;
740 }
741 
ShrinkPages()742 void ReadOnlySpace::ShrinkPages() {
743   if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return;
744   BasicMemoryChunk::UpdateHighWaterMark(top_);
745   heap()->CreateFillerObjectAt(top_, static_cast<int>(limit_ - top_),
746                                ClearRecordedSlots::kNo);
747 
748   for (ReadOnlyPage* chunk : pages_) {
749     DCHECK(chunk->IsFlagSet(Page::NEVER_EVACUATE));
750     size_t unused = chunk->ShrinkToHighWaterMark();
751     capacity_ -= unused;
752     accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
753     AccountUncommitted(unused);
754   }
755   limit_ = pages_.back()->area_end();
756 }
757 
InitializePage(BasicMemoryChunk * chunk)758 ReadOnlyPage* ReadOnlySpace::InitializePage(BasicMemoryChunk* chunk) {
759   ReadOnlyPage* page = reinterpret_cast<ReadOnlyPage*>(chunk);
760   page->allocated_bytes_ = 0;
761   page->SetFlag(BasicMemoryChunk::Flag::NEVER_EVACUATE);
762   heap()
763       ->incremental_marking()
764       ->non_atomic_marking_state()
765       ->bitmap(chunk)
766       ->MarkAllBits();
767   chunk->SetFlag(BasicMemoryChunk::READ_ONLY_HEAP);
768 
769   return page;
770 }
771 
SharedReadOnlySpace(Heap * heap,PointerCompressedReadOnlyArtifacts * artifacts)772 SharedReadOnlySpace::SharedReadOnlySpace(
773     Heap* heap, PointerCompressedReadOnlyArtifacts* artifacts)
774     : SharedReadOnlySpace(heap) {
775   // This constructor should only be used when RO_SPACE is shared with pointer
776   // compression in a per-Isolate cage.
777   DCHECK(V8_SHARED_RO_HEAP_BOOL);
778   DCHECK(COMPRESS_POINTERS_BOOL);
779   DCHECK(COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL);
780   DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
781   DCHECK(!artifacts->pages().empty());
782 
783   accounting_stats_.IncreaseCapacity(artifacts->accounting_stats().Capacity());
784   for (ReadOnlyPage* page : artifacts->pages()) {
785     pages_.push_back(page);
786     accounting_stats_.IncreaseAllocatedBytes(page->allocated_bytes(), page);
787   }
788 }
789 
SharedReadOnlySpace(Heap * heap,std::vector<ReadOnlyPage * > && new_pages,std::vector<std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping>> && mappings,AllocationStats && new_stats)790 SharedReadOnlySpace::SharedReadOnlySpace(
791     Heap* heap, std::vector<ReadOnlyPage*>&& new_pages,
792     std::vector<std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping>>&&
793         mappings,
794     AllocationStats&& new_stats)
795     : SharedReadOnlySpace(heap) {
796   DCHECK(V8_SHARED_RO_HEAP_BOOL);
797   DCHECK(COMPRESS_POINTERS_BOOL);
798   DCHECK(COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL);
799   DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
800 
801   accounting_stats_ = std::move(new_stats);
802   pages_ = std::move(new_pages);
803   shared_memory_mappings_ = std::move(mappings);
804 }
805 
SharedReadOnlySpace(Heap * heap,SingleCopyReadOnlyArtifacts * artifacts)806 SharedReadOnlySpace::SharedReadOnlySpace(Heap* heap,
807                                          SingleCopyReadOnlyArtifacts* artifacts)
808     : SharedReadOnlySpace(heap) {
809   // This constructor should only be used when RO_SPACE is shared without
810   // pointer compression in a per-Isolate cage.
811   DCHECK(V8_SHARED_RO_HEAP_BOOL);
812   DCHECK(!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL);
813   accounting_stats_ = artifacts->accounting_stats();
814   pages_ = artifacts->pages();
815 }
816 
817 }  // namespace internal
818 }  // namespace v8
819