1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/large-spaces.h"
6
7 #include "src/base/platform/mutex.h"
8 #include "src/base/sanitizer/msan.h"
9 #include "src/common/globals.h"
10 #include "src/execution/isolate.h"
11 #include "src/heap/combined-heap.h"
12 #include "src/heap/incremental-marking.h"
13 #include "src/heap/list.h"
14 #include "src/heap/marking.h"
15 #include "src/heap/memory-allocator.h"
16 #include "src/heap/memory-chunk-inl.h"
17 #include "src/heap/remembered-set.h"
18 #include "src/heap/slot-set.h"
19 #include "src/heap/spaces-inl.h"
20 #include "src/logging/log.h"
21 #include "src/objects/objects-inl.h"
22 #include "src/utils/ostreams.h"
23
24 namespace v8 {
25 namespace internal {
26
27 // This check is here to ensure that the lower 32 bits of any real heap object
28 // can't overlap with the lower 32 bits of cleared weak reference value and
29 // therefore it's enough to compare only the lower 32 bits of a MaybeObject in
30 // order to figure out if it's a cleared weak reference or not.
31 STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);
32
Initialize(Heap * heap,MemoryChunk * chunk,Executability executable)33 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
34 Executability executable) {
35 if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
36 STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
37 FATAL("Code page is too large.");
38 }
39
40 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
41
42 LargePage* page = static_cast<LargePage*>(chunk);
43 page->SetFlag(MemoryChunk::LARGE_PAGE);
44 page->list_node().Initialize();
45 return page;
46 }
47
Available()48 size_t LargeObjectSpace::Available() {
49 // We return zero here since we cannot take advantage of already allocated
50 // large object memory.
51 return 0;
52 }
53
GetAddressToShrink(Address object_address,size_t object_size)54 Address LargePage::GetAddressToShrink(Address object_address,
55 size_t object_size) {
56 if (executable() == EXECUTABLE) {
57 return 0;
58 }
59 size_t used_size = ::RoundUp((object_address - address()) + object_size,
60 MemoryAllocator::GetCommitPageSize());
61 if (used_size < CommittedPhysicalMemory()) {
62 return address() + used_size;
63 }
64 return 0;
65 }
66
ClearOutOfLiveRangeSlots(Address free_start)67 void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
68 DCHECK_NULL(this->sweeping_slot_set());
69 RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
70 SlotSet::FREE_EMPTY_BUCKETS);
71 RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
72 SlotSet::FREE_EMPTY_BUCKETS);
73 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
74 RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
75 }
76
77 // -----------------------------------------------------------------------------
78 // LargeObjectSpaceObjectIterator
79
LargeObjectSpaceObjectIterator(LargeObjectSpace * space)80 LargeObjectSpaceObjectIterator::LargeObjectSpaceObjectIterator(
81 LargeObjectSpace* space) {
82 current_ = space->first_page();
83 }
84
Next()85 HeapObject LargeObjectSpaceObjectIterator::Next() {
86 if (current_ == nullptr) return HeapObject();
87
88 HeapObject object = current_->GetObject();
89 current_ = current_->next_page();
90 return object;
91 }
92
93 // -----------------------------------------------------------------------------
94 // OldLargeObjectSpace
95
LargeObjectSpace(Heap * heap,AllocationSpace id)96 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
97 : Space(heap, id, new NoFreeList()),
98 size_(0),
99 page_count_(0),
100 objects_size_(0),
101 pending_object_(0) {}
102
TearDown()103 void LargeObjectSpace::TearDown() {
104 while (!memory_chunk_list_.Empty()) {
105 LargePage* page = first_page();
106 LOG(heap()->isolate(),
107 DeleteEvent("LargeObjectChunk",
108 reinterpret_cast<void*>(page->address())));
109 memory_chunk_list_.Remove(page);
110 heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
111 }
112 }
113
AdvanceAndInvokeAllocationObservers(Address soon_object,size_t object_size)114 void LargeObjectSpace::AdvanceAndInvokeAllocationObservers(Address soon_object,
115 size_t object_size) {
116 if (!allocation_counter_.IsActive()) return;
117
118 if (object_size >= allocation_counter_.NextBytes()) {
119 allocation_counter_.InvokeAllocationObservers(soon_object, object_size,
120 object_size);
121 }
122
123 // Large objects can be accounted immediately since no LAB is involved.
124 allocation_counter_.AdvanceAllocationObservers(object_size);
125 }
126
AllocateRaw(int object_size)127 AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size) {
128 return AllocateRaw(object_size, NOT_EXECUTABLE);
129 }
130
AllocateRaw(int object_size,Executability executable)131 AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
132 Executability executable) {
133 DCHECK(!FLAG_enable_third_party_heap);
134 // Check if we want to force a GC before growing the old space further.
135 // If so, fail the allocation.
136 if (!heap()->CanExpandOldGeneration(object_size) ||
137 !heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
138 return AllocationResult::Retry(identity());
139 }
140
141 LargePage* page = AllocateLargePage(object_size, executable);
142 if (page == nullptr) return AllocationResult::Retry(identity());
143 page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
144 HeapObject object = page->GetObject();
145 UpdatePendingObject(object);
146 heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
147 heap()->GCFlagsForIncrementalMarking(),
148 kGCCallbackScheduleIdleGarbageCollection);
149 if (heap()->incremental_marking()->black_allocation()) {
150 heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
151 }
152 DCHECK_IMPLIES(
153 heap()->incremental_marking()->black_allocation(),
154 heap()->incremental_marking()->marking_state()->IsBlack(object));
155 page->InitializationMemoryFence();
156 heap()->NotifyOldGenerationExpansion(identity(), page);
157 AdvanceAndInvokeAllocationObservers(object.address(),
158 static_cast<size_t>(object_size));
159 return object;
160 }
161
AllocateRawBackground(LocalHeap * local_heap,int object_size)162 AllocationResult OldLargeObjectSpace::AllocateRawBackground(
163 LocalHeap* local_heap, int object_size) {
164 DCHECK(!FLAG_enable_third_party_heap);
165 // Check if we want to force a GC before growing the old space further.
166 // If so, fail the allocation.
167 if (!heap()->CanExpandOldGenerationBackground(local_heap, object_size) ||
168 !heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap)) {
169 return AllocationResult::Retry(identity());
170 }
171
172 LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
173 if (page == nullptr) return AllocationResult::Retry(identity());
174 page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
175 HeapObject object = page->GetObject();
176 heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
177 if (heap()->incremental_marking()->black_allocation()) {
178 heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
179 }
180 DCHECK_IMPLIES(
181 heap()->incremental_marking()->black_allocation(),
182 heap()->incremental_marking()->marking_state()->IsBlack(object));
183 page->InitializationMemoryFence();
184 return object;
185 }
186
AllocateLargePage(int object_size,Executability executable)187 LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
188 Executability executable) {
189 LargePage* page = heap()->memory_allocator()->AllocateLargePage(
190 object_size, this, executable);
191 if (page == nullptr) return nullptr;
192 DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
193
194 {
195 base::MutexGuard guard(&allocation_mutex_);
196 AddPage(page, object_size);
197 }
198
199 HeapObject object = page->GetObject();
200
201 heap()->CreateFillerObjectAt(object.address(), object_size,
202 ClearRecordedSlots::kNo);
203 return page;
204 }
205
CommittedPhysicalMemory()206 size_t LargeObjectSpace::CommittedPhysicalMemory() {
207 // On a platform that provides lazy committing of memory, we over-account
208 // the actually committed memory. There is no easy way right now to support
209 // precise accounting of committed memory in large object space.
210 return CommittedMemory();
211 }
212
FindPage(Address a)213 LargePage* CodeLargeObjectSpace::FindPage(Address a) {
214 const Address key = BasicMemoryChunk::FromAddress(a)->address();
215 auto it = chunk_map_.find(key);
216 if (it != chunk_map_.end()) {
217 LargePage* page = it->second;
218 CHECK(page->Contains(a));
219 return page;
220 }
221 return nullptr;
222 }
223
ClearMarkingStateOfLiveObjects()224 void OldLargeObjectSpace::ClearMarkingStateOfLiveObjects() {
225 IncrementalMarking::NonAtomicMarkingState* marking_state =
226 heap()->incremental_marking()->non_atomic_marking_state();
227 LargeObjectSpaceObjectIterator it(this);
228 for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
229 if (marking_state->IsBlackOrGrey(obj)) {
230 Marking::MarkWhite(marking_state->MarkBitFrom(obj));
231 MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
232 RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
233 chunk->ProgressBar().ResetIfEnabled();
234 marking_state->SetLiveBytes(chunk, 0);
235 }
236 DCHECK(marking_state->IsWhite(obj));
237 }
238 }
239
InsertChunkMapEntries(LargePage * page)240 void CodeLargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
241 for (Address current = reinterpret_cast<Address>(page);
242 current < reinterpret_cast<Address>(page) + page->size();
243 current += MemoryChunk::kPageSize) {
244 chunk_map_[current] = page;
245 }
246 }
247
RemoveChunkMapEntries(LargePage * page)248 void CodeLargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
249 for (Address current = page->address();
250 current < reinterpret_cast<Address>(page) + page->size();
251 current += MemoryChunk::kPageSize) {
252 chunk_map_.erase(current);
253 }
254 }
255
PromoteNewLargeObject(LargePage * page)256 void OldLargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
257 DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
258 DCHECK(page->IsLargePage());
259 DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
260 DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
261 size_t object_size = static_cast<size_t>(page->GetObject().Size());
262 static_cast<LargeObjectSpace*>(page->owner())->RemovePage(page, object_size);
263 page->ClearFlag(MemoryChunk::FROM_PAGE);
264 AddPage(page, object_size);
265 }
266
AddPage(LargePage * page,size_t object_size)267 void LargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
268 size_ += static_cast<int>(page->size());
269 AccountCommitted(page->size());
270 objects_size_ += object_size;
271 page_count_++;
272 memory_chunk_list_.PushBack(page);
273 page->set_owner(this);
274 page->SetOldGenerationPageFlags(!is_off_thread() &&
275 heap()->incremental_marking()->IsMarking());
276 }
277
RemovePage(LargePage * page,size_t object_size)278 void LargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
279 size_ -= static_cast<int>(page->size());
280 AccountUncommitted(page->size());
281 objects_size_ -= object_size;
282 page_count_--;
283 memory_chunk_list_.Remove(page);
284 page->set_owner(nullptr);
285 }
286
FreeUnmarkedObjects()287 void LargeObjectSpace::FreeUnmarkedObjects() {
288 LargePage* current = first_page();
289 IncrementalMarking::NonAtomicMarkingState* marking_state =
290 heap()->incremental_marking()->non_atomic_marking_state();
291 // Right-trimming does not update the objects_size_ counter. We are lazily
292 // updating it after every GC.
293 size_t surviving_object_size = 0;
294 while (current) {
295 LargePage* next_current = current->next_page();
296 HeapObject object = current->GetObject();
297 DCHECK(!marking_state->IsGrey(object));
298 size_t size = static_cast<size_t>(object.Size());
299 if (marking_state->IsBlack(object)) {
300 Address free_start;
301 surviving_object_size += size;
302 if ((free_start = current->GetAddressToShrink(object.address(), size)) !=
303 0) {
304 DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
305 current->ClearOutOfLiveRangeSlots(free_start);
306 const size_t bytes_to_free =
307 current->size() - (free_start - current->address());
308 heap()->memory_allocator()->PartialFreeMemory(
309 current, free_start, bytes_to_free,
310 current->area_start() + object.Size());
311 size_ -= bytes_to_free;
312 AccountUncommitted(bytes_to_free);
313 }
314 } else {
315 RemovePage(current, size);
316 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
317 current);
318 }
319 current = next_current;
320 }
321 objects_size_ = surviving_object_size;
322 }
323
Contains(HeapObject object)324 bool LargeObjectSpace::Contains(HeapObject object) {
325 BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
326
327 bool owned = (chunk->owner() == this);
328
329 SLOW_DCHECK(!owned || ContainsSlow(object.address()));
330
331 return owned;
332 }
333
ContainsSlow(Address addr)334 bool LargeObjectSpace::ContainsSlow(Address addr) {
335 for (LargePage* page : *this) {
336 if (page->Contains(addr)) return true;
337 }
338 return false;
339 }
340
GetObjectIterator(Heap * heap)341 std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator(
342 Heap* heap) {
343 return std::unique_ptr<ObjectIterator>(
344 new LargeObjectSpaceObjectIterator(this));
345 }
346
347 #ifdef VERIFY_HEAP
348 // We do not assume that the large object iterator works, because it depends
349 // on the invariants we are checking during verification.
Verify(Isolate * isolate)350 void LargeObjectSpace::Verify(Isolate* isolate) {
351 size_t external_backing_store_bytes[kNumTypes];
352
353 for (int i = 0; i < kNumTypes; i++) {
354 external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
355 }
356
357 PtrComprCageBase cage_base(isolate);
358 for (LargePage* chunk = first_page(); chunk != nullptr;
359 chunk = chunk->next_page()) {
360 // Each chunk contains an object that starts at the large object page's
361 // object area start.
362 HeapObject object = chunk->GetObject();
363 Page* page = Page::FromHeapObject(object);
364 CHECK(object.address() == page->area_start());
365
366 // The first word should be a map, and we expect all map pointers to be
367 // in map space or read-only space.
368 Map map = object.map(cage_base);
369 CHECK(map.IsMap(cage_base));
370 CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
371
372 // We have only the following types in the large object space:
373 if (!(object.IsAbstractCode(cage_base) || object.IsSeqString(cage_base) ||
374 object.IsExternalString(cage_base) ||
375 object.IsThinString(cage_base) || object.IsFixedArray(cage_base) ||
376 object.IsFixedDoubleArray(cage_base) ||
377 object.IsWeakFixedArray(cage_base) ||
378 object.IsWeakArrayList(cage_base) ||
379 object.IsPropertyArray(cage_base) || object.IsByteArray(cage_base) ||
380 object.IsFeedbackVector(cage_base) || object.IsBigInt(cage_base) ||
381 object.IsFreeSpace(cage_base) ||
382 object.IsFeedbackMetadata(cage_base) || object.IsContext(cage_base) ||
383 object.IsUncompiledDataWithoutPreparseData(cage_base) ||
384 object.IsPreparseData(cage_base)) &&
385 !FLAG_young_generation_large_objects) {
386 FATAL("Found invalid Object (instance_type=%i) in large object space.",
387 object.map(cage_base).instance_type());
388 }
389
390 // The object itself should look OK.
391 object.ObjectVerify(isolate);
392
393 if (!FLAG_verify_heap_skip_remembered_set) {
394 heap()->VerifyRememberedSetFor(object);
395 }
396
397 // Byte arrays and strings don't have interior pointers.
398 if (object.IsAbstractCode(cage_base)) {
399 VerifyPointersVisitor code_visitor(heap());
400 object.IterateBody(map, object.Size(), &code_visitor);
401 } else if (object.IsFixedArray(cage_base)) {
402 FixedArray array = FixedArray::cast(object);
403 for (int j = 0; j < array.length(); j++) {
404 Object element = array.get(j);
405 if (element.IsHeapObject()) {
406 HeapObject element_object = HeapObject::cast(element);
407 CHECK(IsValidHeapObject(heap(), element_object));
408 CHECK(element_object.map(cage_base).IsMap(cage_base));
409 }
410 }
411 } else if (object.IsPropertyArray(cage_base)) {
412 PropertyArray array = PropertyArray::cast(object);
413 for (int j = 0; j < array.length(); j++) {
414 Object property = array.get(j);
415 if (property.IsHeapObject()) {
416 HeapObject property_object = HeapObject::cast(property);
417 CHECK(heap()->Contains(property_object));
418 CHECK(property_object.map(cage_base).IsMap(cage_base));
419 }
420 }
421 }
422 for (int i = 0; i < kNumTypes; i++) {
423 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
424 external_backing_store_bytes[t] += chunk->ExternalBackingStoreBytes(t);
425 }
426 }
427 for (int i = 0; i < kNumTypes; i++) {
428 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
429 CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
430 }
431 }
432 #endif
433
434 #ifdef DEBUG
Print()435 void LargeObjectSpace::Print() {
436 StdoutStream os;
437 LargeObjectSpaceObjectIterator it(this);
438 for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
439 obj.Print(os);
440 }
441 }
442 #endif // DEBUG
443
UpdatePendingObject(HeapObject object)444 void LargeObjectSpace::UpdatePendingObject(HeapObject object) {
445 base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
446 pending_object_.store(object.address(), std::memory_order_release);
447 }
448
OldLargeObjectSpace(Heap * heap)449 OldLargeObjectSpace::OldLargeObjectSpace(Heap* heap)
450 : LargeObjectSpace(heap, LO_SPACE) {}
451
OldLargeObjectSpace(Heap * heap,AllocationSpace id)452 OldLargeObjectSpace::OldLargeObjectSpace(Heap* heap, AllocationSpace id)
453 : LargeObjectSpace(heap, id) {}
454
NewLargeObjectSpace(Heap * heap,size_t capacity)455 NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap, size_t capacity)
456 : LargeObjectSpace(heap, NEW_LO_SPACE),
457 capacity_(capacity) {}
458
AllocateRaw(int object_size)459 AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
460 DCHECK(!FLAG_enable_third_party_heap);
461 // Do not allocate more objects if promoting the existing object would exceed
462 // the old generation capacity.
463 if (!heap()->CanExpandOldGeneration(SizeOfObjects())) {
464 return AllocationResult::Retry(identity());
465 }
466
467 // Allocation for the first object must succeed independent from the capacity.
468 if (SizeOfObjects() > 0 && static_cast<size_t>(object_size) > Available()) {
469 return AllocationResult::Retry(identity());
470 }
471
472 LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
473 if (page == nullptr) return AllocationResult::Retry(identity());
474
475 // The size of the first object may exceed the capacity.
476 capacity_ = std::max(capacity_, SizeOfObjects());
477
478 HeapObject result = page->GetObject();
479 page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
480 page->SetFlag(MemoryChunk::TO_PAGE);
481 UpdatePendingObject(result);
482 #ifdef ENABLE_MINOR_MC
483 if (FLAG_minor_mc) {
484 page->AllocateYoungGenerationBitmap();
485 heap()
486 ->minor_mark_compact_collector()
487 ->non_atomic_marking_state()
488 ->ClearLiveness(page);
489 }
490 #endif // ENABLE_MINOR_MC
491 page->InitializationMemoryFence();
492 DCHECK(page->IsLargePage());
493 DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
494 AdvanceAndInvokeAllocationObservers(result.address(),
495 static_cast<size_t>(object_size));
496 return result;
497 }
498
Available()499 size_t NewLargeObjectSpace::Available() { return capacity_ - SizeOfObjects(); }
500
Flip()501 void NewLargeObjectSpace::Flip() {
502 for (LargePage* chunk = first_page(); chunk != nullptr;
503 chunk = chunk->next_page()) {
504 chunk->SetFlag(MemoryChunk::FROM_PAGE);
505 chunk->ClearFlag(MemoryChunk::TO_PAGE);
506 }
507 }
508
FreeDeadObjects(const std::function<bool (HeapObject)> & is_dead)509 void NewLargeObjectSpace::FreeDeadObjects(
510 const std::function<bool(HeapObject)>& is_dead) {
511 bool is_marking = heap()->incremental_marking()->IsMarking();
512 size_t surviving_object_size = 0;
513 bool freed_pages = false;
514 for (auto it = begin(); it != end();) {
515 LargePage* page = *it;
516 it++;
517 HeapObject object = page->GetObject();
518 size_t size = static_cast<size_t>(object.Size());
519 if (is_dead(object)) {
520 freed_pages = true;
521 RemovePage(page, size);
522 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
523 if (FLAG_concurrent_marking && is_marking) {
524 heap()->concurrent_marking()->ClearMemoryChunkData(page);
525 }
526 } else {
527 surviving_object_size += size;
528 }
529 }
530 // Right-trimming does not update the objects_size_ counter. We are lazily
531 // updating it after every GC.
532 objects_size_ = surviving_object_size;
533 if (freed_pages) {
534 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
535 }
536 }
537
SetCapacity(size_t capacity)538 void NewLargeObjectSpace::SetCapacity(size_t capacity) {
539 capacity_ = std::max(capacity, SizeOfObjects());
540 }
541
CodeLargeObjectSpace(Heap * heap)542 CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
543 : OldLargeObjectSpace(heap, CODE_LO_SPACE),
544 chunk_map_(kInitialChunkMapCapacity) {}
545
AllocateRaw(int object_size)546 AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
547 DCHECK(!FLAG_enable_third_party_heap);
548 return OldLargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
549 }
550
AddPage(LargePage * page,size_t object_size)551 void CodeLargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
552 OldLargeObjectSpace::AddPage(page, object_size);
553 InsertChunkMapEntries(page);
554 }
555
RemovePage(LargePage * page,size_t object_size)556 void CodeLargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
557 RemoveChunkMapEntries(page);
558 heap()->isolate()->RemoveCodeMemoryChunk(page);
559 OldLargeObjectSpace::RemovePage(page, object_size);
560 }
561
562 } // namespace internal
563 } // namespace v8
564