1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/heap.h"
6
7 #include <atomic>
8 #include <cinttypes>
9 #include <iomanip>
10 #include <memory>
11 #include <unordered_map>
12 #include <unordered_set>
13
14 #include "src/api/api-inl.h"
15 #include "src/base/bits.h"
16 #include "src/base/flags.h"
17 #include "src/base/logging.h"
18 #include "src/base/once.h"
19 #include "src/base/platform/mutex.h"
20 #include "src/base/utils/random-number-generator.h"
21 #include "src/builtins/accessors.h"
22 #include "src/codegen/assembler-inl.h"
23 #include "src/codegen/compilation-cache.h"
24 #include "src/common/assert-scope.h"
25 #include "src/common/globals.h"
26 #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
27 #include "src/debug/debug.h"
28 #include "src/deoptimizer/deoptimizer.h"
29 #include "src/execution/isolate-utils-inl.h"
30 #include "src/execution/microtask-queue.h"
31 #include "src/execution/runtime-profiler.h"
32 #include "src/execution/v8threads.h"
33 #include "src/execution/vm-state-inl.h"
34 #include "src/handles/global-handles-inl.h"
35 #include "src/heap/array-buffer-sweeper.h"
36 #include "src/heap/barrier.h"
37 #include "src/heap/base/stack.h"
38 #include "src/heap/basic-memory-chunk.h"
39 #include "src/heap/code-object-registry.h"
40 #include "src/heap/code-range.h"
41 #include "src/heap/code-stats.h"
42 #include "src/heap/collection-barrier.h"
43 #include "src/heap/combined-heap.h"
44 #include "src/heap/concurrent-allocator.h"
45 #include "src/heap/concurrent-marking.h"
46 #include "src/heap/cppgc-js/cpp-heap.h"
47 #include "src/heap/embedder-tracing.h"
48 #include "src/heap/finalization-registry-cleanup-task.h"
49 #include "src/heap/gc-idle-time-handler.h"
50 #include "src/heap/gc-tracer.h"
51 #include "src/heap/heap-controller.h"
52 #include "src/heap/heap-write-barrier-inl.h"
53 #include "src/heap/incremental-marking-inl.h"
54 #include "src/heap/incremental-marking.h"
55 #include "src/heap/large-spaces.h"
56 #include "src/heap/local-heap.h"
57 #include "src/heap/mark-compact-inl.h"
58 #include "src/heap/mark-compact.h"
59 #include "src/heap/marking-barrier-inl.h"
60 #include "src/heap/marking-barrier.h"
61 #include "src/heap/memory-chunk-inl.h"
62 #include "src/heap/memory-chunk-layout.h"
63 #include "src/heap/memory-measurement.h"
64 #include "src/heap/memory-reducer.h"
65 #include "src/heap/object-stats.h"
66 #include "src/heap/objects-visiting-inl.h"
67 #include "src/heap/objects-visiting.h"
68 #include "src/heap/paged-spaces-inl.h"
69 #include "src/heap/read-only-heap.h"
70 #include "src/heap/remembered-set.h"
71 #include "src/heap/safepoint.h"
72 #include "src/heap/scavenge-job.h"
73 #include "src/heap/scavenger-inl.h"
74 #include "src/heap/stress-marking-observer.h"
75 #include "src/heap/stress-scavenge-observer.h"
76 #include "src/heap/sweeper.h"
77 #include "src/init/bootstrapper.h"
78 #include "src/init/v8.h"
79 #include "src/interpreter/interpreter.h"
80 #include "src/logging/log.h"
81 #include "src/logging/runtime-call-stats-scope.h"
82 #include "src/numbers/conversions.h"
83 #include "src/objects/data-handler.h"
84 #include "src/objects/feedback-vector.h"
85 #include "src/objects/free-space-inl.h"
86 #include "src/objects/hash-table-inl.h"
87 #include "src/objects/maybe-object.h"
88 #include "src/objects/shared-function-info.h"
89 #include "src/objects/slots-atomic-inl.h"
90 #include "src/objects/slots-inl.h"
91 #include "src/regexp/regexp.h"
92 #include "src/snapshot/embedded/embedded-data.h"
93 #include "src/snapshot/serializer-deserializer.h"
94 #include "src/snapshot/snapshot.h"
95 #include "src/strings/string-stream.h"
96 #include "src/strings/unicode-decoder.h"
97 #include "src/strings/unicode-inl.h"
98 #include "src/tracing/trace-event.h"
99 #include "src/utils/utils-inl.h"
100 #include "src/utils/utils.h"
101
102 #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
103 #include "src/heap/conservative-stack-visitor.h"
104 #endif
105
106 #include "src/base/platform/wrappers.h"
107 // Has to be the last include (doesn't have include guards):
108 #include "src/objects/object-macros.h"
109
110 namespace v8 {
111 namespace internal {
112
113 namespace {
114 std::atomic<CollectionEpoch> global_epoch{0};
115
next_epoch()116 CollectionEpoch next_epoch() {
117 return global_epoch.fetch_add(1, std::memory_order_relaxed) + 1;
118 }
119 } // namespace
120
121 #ifdef V8_ENABLE_THIRD_PARTY_HEAP
GetIsolateFromWritableObject(HeapObject object)122 Isolate* Heap::GetIsolateFromWritableObject(HeapObject object) {
123 return reinterpret_cast<Isolate*>(
124 third_party_heap::Heap::GetIsolate(object.address()));
125 }
126 #endif
127
128 // These are outside the Heap class so they can be forward-declared
129 // in heap-write-barrier-inl.h.
Heap_PageFlagsAreConsistent(HeapObject object)130 bool Heap_PageFlagsAreConsistent(HeapObject object) {
131 return Heap::PageFlagsAreConsistent(object);
132 }
133
Heap_ValueMightRequireGenerationalWriteBarrier(HeapObject value)134 bool Heap_ValueMightRequireGenerationalWriteBarrier(HeapObject value) {
135 if (!value.IsCode()) return true;
136 // Code objects are never in new space and thus don't require generational
137 // write barrier.
138 DCHECK(!ObjectInYoungGeneration(value));
139 return false;
140 }
141
Heap_GenerationalBarrierSlow(HeapObject object,Address slot,HeapObject value)142 void Heap_GenerationalBarrierSlow(HeapObject object, Address slot,
143 HeapObject value) {
144 Heap::GenerationalBarrierSlow(object, slot, value);
145 }
146
Heap_WriteBarrierForCodeSlow(Code host)147 void Heap_WriteBarrierForCodeSlow(Code host) {
148 Heap::WriteBarrierForCodeSlow(host);
149 }
150
Heap_GenerationalBarrierForCodeSlow(Code host,RelocInfo * rinfo,HeapObject object)151 void Heap_GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
152 HeapObject object) {
153 Heap::GenerationalBarrierForCodeSlow(host, rinfo, object);
154 }
155
Heap_GenerationalEphemeronKeyBarrierSlow(Heap * heap,EphemeronHashTable table,Address slot)156 void Heap_GenerationalEphemeronKeyBarrierSlow(Heap* heap,
157 EphemeronHashTable table,
158 Address slot) {
159 heap->RecordEphemeronKeyWrite(table, slot);
160 }
161
SetConstructStubCreateDeoptPCOffset(int pc_offset)162 void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
163 DCHECK_EQ(Smi::zero(), construct_stub_create_deopt_pc_offset());
164 set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset));
165 }
166
SetConstructStubInvokeDeoptPCOffset(int pc_offset)167 void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
168 DCHECK_EQ(Smi::zero(), construct_stub_invoke_deopt_pc_offset());
169 set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
170 }
171
SetInterpreterEntryReturnPCOffset(int pc_offset)172 void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
173 DCHECK_EQ(Smi::zero(), interpreter_entry_return_pc_offset());
174 set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
175 }
176
SetSerializedObjects(FixedArray objects)177 void Heap::SetSerializedObjects(FixedArray objects) {
178 DCHECK(isolate()->serializer_enabled());
179 set_serialized_objects(objects);
180 }
181
SetSerializedGlobalProxySizes(FixedArray sizes)182 void Heap::SetSerializedGlobalProxySizes(FixedArray sizes) {
183 DCHECK(isolate()->serializer_enabled());
184 set_serialized_global_proxy_sizes(sizes);
185 }
186
SetBasicBlockProfilingData(Handle<ArrayList> list)187 void Heap::SetBasicBlockProfilingData(Handle<ArrayList> list) {
188 set_basic_block_profiling_data(*list);
189 }
190
operator ==(const Heap::GCCallbackTuple & other) const191 bool Heap::GCCallbackTuple::operator==(
192 const Heap::GCCallbackTuple& other) const {
193 return other.callback == callback && other.data == data;
194 }
195
196 Heap::GCCallbackTuple& Heap::GCCallbackTuple::operator=(
197 const Heap::GCCallbackTuple& other) V8_NOEXCEPT = default;
198
199 class ScavengeTaskObserver : public AllocationObserver {
200 public:
ScavengeTaskObserver(Heap * heap,intptr_t step_size)201 ScavengeTaskObserver(Heap* heap, intptr_t step_size)
202 : AllocationObserver(step_size), heap_(heap) {}
203
Step(int bytes_allocated,Address,size_t)204 void Step(int bytes_allocated, Address, size_t) override {
205 heap_->ScheduleScavengeTaskIfNeeded();
206 }
207
208 private:
209 Heap* heap_;
210 };
211
Heap()212 Heap::Heap()
213 : isolate_(isolate()),
214 memory_pressure_level_(MemoryPressureLevel::kNone),
215 global_pretenuring_feedback_(kInitialFeedbackCapacity),
216 safepoint_(new GlobalSafepoint(this)),
217 external_string_table_(this),
218 collection_barrier_(new CollectionBarrier(this)) {
219 // Ensure old_generation_size_ is a multiple of kPageSize.
220 DCHECK_EQ(0, max_old_generation_size() & (Page::kPageSize - 1));
221
222 max_regular_code_object_size_ = MemoryChunkLayout::MaxRegularCodeObjectSize();
223
224 set_native_contexts_list(Smi::zero());
225 set_allocation_sites_list(Smi::zero());
226 set_dirty_js_finalization_registries_list(Smi::zero());
227 set_dirty_js_finalization_registries_list_tail(Smi::zero());
228 // Put a dummy entry in the remembered pages so we can find the list the
229 // minidump even if there are no real unmapped pages.
230 RememberUnmappedPage(kNullAddress, false);
231 }
232
233 Heap::~Heap() = default;
234
MaxReserved()235 size_t Heap::MaxReserved() {
236 const size_t kMaxNewLargeObjectSpaceSize = max_semi_space_size_;
237 return static_cast<size_t>(2 * max_semi_space_size_ +
238 kMaxNewLargeObjectSpaceSize +
239 max_old_generation_size());
240 }
241
YoungGenerationSizeFromOldGenerationSize(size_t old_generation)242 size_t Heap::YoungGenerationSizeFromOldGenerationSize(size_t old_generation) {
243 // Compute the semi space size and cap it.
244 size_t ratio = old_generation <= kOldGenerationLowMemory
245 ? kOldGenerationToSemiSpaceRatioLowMemory
246 : kOldGenerationToSemiSpaceRatio;
247 size_t semi_space = old_generation / ratio;
248 semi_space = std::min({semi_space, kMaxSemiSpaceSize});
249 semi_space = std::max({semi_space, kMinSemiSpaceSize});
250 semi_space = RoundUp(semi_space, Page::kPageSize);
251 return YoungGenerationSizeFromSemiSpaceSize(semi_space);
252 }
253
HeapSizeFromPhysicalMemory(uint64_t physical_memory)254 size_t Heap::HeapSizeFromPhysicalMemory(uint64_t physical_memory) {
255 // Compute the old generation size and cap it.
256 uint64_t old_generation = physical_memory /
257 kPhysicalMemoryToOldGenerationRatio *
258 kHeapLimitMultiplier;
259 old_generation =
260 std::min(old_generation,
261 static_cast<uint64_t>(MaxOldGenerationSize(physical_memory)));
262 old_generation =
263 std::max({old_generation, static_cast<uint64_t>(V8HeapTrait::kMinSize)});
264 old_generation = RoundUp(old_generation, Page::kPageSize);
265
266 size_t young_generation = YoungGenerationSizeFromOldGenerationSize(
267 static_cast<size_t>(old_generation));
268 return static_cast<size_t>(old_generation) + young_generation;
269 }
270
GenerationSizesFromHeapSize(size_t heap_size,size_t * young_generation_size,size_t * old_generation_size)271 void Heap::GenerationSizesFromHeapSize(size_t heap_size,
272 size_t* young_generation_size,
273 size_t* old_generation_size) {
274 // Initialize values for the case when the given heap size is too small.
275 *young_generation_size = 0;
276 *old_generation_size = 0;
277 // Binary search for the largest old generation size that fits to the given
278 // heap limit considering the correspondingly sized young generation.
279 size_t lower = 0, upper = heap_size;
280 while (lower + 1 < upper) {
281 size_t old_generation = lower + (upper - lower) / 2;
282 size_t young_generation =
283 YoungGenerationSizeFromOldGenerationSize(old_generation);
284 if (old_generation + young_generation <= heap_size) {
285 // This size configuration fits into the given heap limit.
286 *young_generation_size = young_generation;
287 *old_generation_size = old_generation;
288 lower = old_generation;
289 } else {
290 upper = old_generation;
291 }
292 }
293 }
294
MinYoungGenerationSize()295 size_t Heap::MinYoungGenerationSize() {
296 return YoungGenerationSizeFromSemiSpaceSize(kMinSemiSpaceSize);
297 }
298
MinOldGenerationSize()299 size_t Heap::MinOldGenerationSize() {
300 size_t paged_space_count =
301 LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
302 return paged_space_count * Page::kPageSize;
303 }
304
AllocatorLimitOnMaxOldGenerationSize()305 size_t Heap::AllocatorLimitOnMaxOldGenerationSize() {
306 #ifdef V8_COMPRESS_POINTERS
307 // Isolate and the young generation are also allocated on the heap.
308 return kPtrComprCageReservationSize -
309 YoungGenerationSizeFromSemiSpaceSize(kMaxSemiSpaceSize) -
310 RoundUp(sizeof(Isolate), size_t{1} << kPageSizeBits);
311 #else
312 return std::numeric_limits<size_t>::max();
313 #endif
314 }
315
MaxOldGenerationSize(uint64_t physical_memory)316 size_t Heap::MaxOldGenerationSize(uint64_t physical_memory) {
317 size_t max_size = V8HeapTrait::kMaxSize;
318 // Finch experiment: Increase the heap size from 2GB to 4GB for 64-bit
319 // systems with physical memory bigger than 16GB. The physical memory
320 // is rounded up to GB.
321 constexpr bool x64_bit = Heap::kHeapLimitMultiplier >= 2;
322 if (FLAG_huge_max_old_generation_size && x64_bit &&
323 (physical_memory + 512 * MB) / GB >= 16) {
324 DCHECK_EQ(max_size / GB, 2);
325 max_size *= 2;
326 }
327 return std::min(max_size, AllocatorLimitOnMaxOldGenerationSize());
328 }
329
YoungGenerationSizeFromSemiSpaceSize(size_t semi_space_size)330 size_t Heap::YoungGenerationSizeFromSemiSpaceSize(size_t semi_space_size) {
331 return semi_space_size * (2 + kNewLargeObjectSpaceToSemiSpaceRatio);
332 }
333
SemiSpaceSizeFromYoungGenerationSize(size_t young_generation_size)334 size_t Heap::SemiSpaceSizeFromYoungGenerationSize(
335 size_t young_generation_size) {
336 return young_generation_size / (2 + kNewLargeObjectSpaceToSemiSpaceRatio);
337 }
338
Capacity()339 size_t Heap::Capacity() {
340 if (!HasBeenSetUp()) return 0;
341
342 if (FLAG_enable_third_party_heap) return tp_heap_->Capacity();
343
344 return NewSpaceCapacity() + OldGenerationCapacity();
345 }
346
OldGenerationCapacity()347 size_t Heap::OldGenerationCapacity() {
348 if (!HasBeenSetUp()) return 0;
349 PagedSpaceIterator spaces(this);
350 size_t total = 0;
351 for (PagedSpace* space = spaces.Next(); space != nullptr;
352 space = spaces.Next()) {
353 total += space->Capacity();
354 }
355 return total + lo_space_->SizeOfObjects() + code_lo_space_->SizeOfObjects();
356 }
357
CommittedOldGenerationMemory()358 size_t Heap::CommittedOldGenerationMemory() {
359 if (!HasBeenSetUp()) return 0;
360
361 PagedSpaceIterator spaces(this);
362 size_t total = 0;
363 for (PagedSpace* space = spaces.Next(); space != nullptr;
364 space = spaces.Next()) {
365 total += space->CommittedMemory();
366 }
367 return total + lo_space_->Size() + code_lo_space_->Size();
368 }
369
CommittedMemoryOfUnmapper()370 size_t Heap::CommittedMemoryOfUnmapper() {
371 if (!HasBeenSetUp()) return 0;
372
373 return memory_allocator()->unmapper()->CommittedBufferedMemory();
374 }
375
CommittedMemory()376 size_t Heap::CommittedMemory() {
377 if (!HasBeenSetUp()) return 0;
378
379 size_t new_space_committed = new_space_ ? new_space_->CommittedMemory() : 0;
380 size_t new_lo_space_committed = new_lo_space_ ? new_lo_space_->Size() : 0;
381
382 return new_space_committed + new_lo_space_committed +
383 CommittedOldGenerationMemory();
384 }
385
CommittedPhysicalMemory()386 size_t Heap::CommittedPhysicalMemory() {
387 if (!HasBeenSetUp()) return 0;
388
389 size_t total = 0;
390 for (SpaceIterator it(this); it.HasNext();) {
391 total += it.Next()->CommittedPhysicalMemory();
392 }
393
394 return total;
395 }
396
CommittedMemoryExecutable()397 size_t Heap::CommittedMemoryExecutable() {
398 if (!HasBeenSetUp()) return 0;
399
400 return static_cast<size_t>(memory_allocator()->SizeExecutable());
401 }
402
UpdateMaximumCommitted()403 void Heap::UpdateMaximumCommitted() {
404 if (!HasBeenSetUp()) return;
405
406 const size_t current_committed_memory = CommittedMemory();
407 if (current_committed_memory > maximum_committed_) {
408 maximum_committed_ = current_committed_memory;
409 }
410 }
411
Available()412 size_t Heap::Available() {
413 if (!HasBeenSetUp()) return 0;
414
415 size_t total = 0;
416
417 for (SpaceIterator it(this); it.HasNext();) {
418 total += it.Next()->Available();
419 }
420
421 total += memory_allocator()->Available();
422 return total;
423 }
424
CanExpandOldGeneration(size_t size)425 bool Heap::CanExpandOldGeneration(size_t size) {
426 if (force_oom_ || force_gc_on_next_allocation_) return false;
427 if (OldGenerationCapacity() + size > max_old_generation_size()) return false;
428 // The OldGenerationCapacity does not account compaction spaces used
429 // during evacuation. Ensure that expanding the old generation does push
430 // the total allocated memory size over the maximum heap size.
431 return memory_allocator()->Size() + size <= MaxReserved();
432 }
433
CanExpandOldGenerationBackground(LocalHeap * local_heap,size_t size)434 bool Heap::CanExpandOldGenerationBackground(LocalHeap* local_heap,
435 size_t size) {
436 if (force_oom_) return false;
437
438 // When the heap is tearing down, then GC requests from background threads
439 // are not served and the threads are allowed to expand the heap to avoid OOM.
440 return gc_state() == TEAR_DOWN || IsMainThreadParked(local_heap) ||
441 memory_allocator()->Size() + size <= MaxReserved();
442 }
443
CanPromoteYoungAndExpandOldGeneration(size_t size)444 bool Heap::CanPromoteYoungAndExpandOldGeneration(size_t size) {
445 size_t new_space_capacity = NewSpaceCapacity();
446 size_t new_lo_space_capacity = new_lo_space_ ? new_lo_space_->Size() : 0;
447
448 // Over-estimate the new space size using capacity to allow some slack.
449 return CanExpandOldGeneration(size + new_space_capacity +
450 new_lo_space_capacity);
451 }
452
HasBeenSetUp() const453 bool Heap::HasBeenSetUp() const {
454 // We will always have an old space when the heap is set up.
455 return old_space_ != nullptr;
456 }
457
SelectGarbageCollector(AllocationSpace space,const char ** reason)458 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
459 const char** reason) {
460 // Is global GC requested?
461 if (space != NEW_SPACE && space != NEW_LO_SPACE) {
462 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
463 *reason = "GC in old space requested";
464 return GarbageCollector::MARK_COMPACTOR;
465 }
466
467 if (FLAG_gc_global || ShouldStressCompaction() || !new_space()) {
468 *reason = "GC in old space forced by flags";
469 return GarbageCollector::MARK_COMPACTOR;
470 }
471
472 if (incremental_marking()->NeedsFinalization() &&
473 AllocationLimitOvershotByLargeMargin()) {
474 *reason = "Incremental marking needs finalization";
475 return GarbageCollector::MARK_COMPACTOR;
476 }
477
478 if (!CanPromoteYoungAndExpandOldGeneration(0)) {
479 isolate_->counters()
480 ->gc_compactor_caused_by_oldspace_exhaustion()
481 ->Increment();
482 *reason = "scavenge might not succeed";
483 return GarbageCollector::MARK_COMPACTOR;
484 }
485
486 // Default
487 *reason = nullptr;
488 return YoungGenerationCollector();
489 }
490
SetGCState(HeapState state)491 void Heap::SetGCState(HeapState state) {
492 gc_state_.store(state, std::memory_order_relaxed);
493 }
494
PrintShortHeapStatistics()495 void Heap::PrintShortHeapStatistics() {
496 if (!FLAG_trace_gc_verbose) return;
497 PrintIsolate(isolate_,
498 "Memory allocator, used: %6zu KB,"
499 " available: %6zu KB\n",
500 memory_allocator()->Size() / KB,
501 memory_allocator()->Available() / KB);
502 PrintIsolate(isolate_,
503 "Read-only space, used: %6zu KB"
504 ", available: %6zu KB"
505 ", committed: %6zu KB\n",
506 read_only_space_->Size() / KB, size_t{0},
507 read_only_space_->CommittedMemory() / KB);
508 PrintIsolate(isolate_,
509 "New space, used: %6zu KB"
510 ", available: %6zu KB"
511 ", committed: %6zu KB\n",
512 NewSpaceSize() / KB, new_space_->Available() / KB,
513 new_space_->CommittedMemory() / KB);
514 PrintIsolate(isolate_,
515 "New large object space, used: %6zu KB"
516 ", available: %6zu KB"
517 ", committed: %6zu KB\n",
518 new_lo_space_->SizeOfObjects() / KB,
519 new_lo_space_->Available() / KB,
520 new_lo_space_->CommittedMemory() / KB);
521 PrintIsolate(isolate_,
522 "Old space, used: %6zu KB"
523 ", available: %6zu KB"
524 ", committed: %6zu KB\n",
525 old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
526 old_space_->CommittedMemory() / KB);
527 PrintIsolate(isolate_,
528 "Code space, used: %6zu KB"
529 ", available: %6zu KB"
530 ", committed: %6zu KB\n",
531 code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
532 code_space_->CommittedMemory() / KB);
533 PrintIsolate(isolate_,
534 "Map space, used: %6zu KB"
535 ", available: %6zu KB"
536 ", committed: %6zu KB\n",
537 map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
538 map_space_->CommittedMemory() / KB);
539 PrintIsolate(isolate_,
540 "Large object space, used: %6zu KB"
541 ", available: %6zu KB"
542 ", committed: %6zu KB\n",
543 lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
544 lo_space_->CommittedMemory() / KB);
545 PrintIsolate(isolate_,
546 "Code large object space, used: %6zu KB"
547 ", available: %6zu KB"
548 ", committed: %6zu KB\n",
549 code_lo_space_->SizeOfObjects() / KB,
550 code_lo_space_->Available() / KB,
551 code_lo_space_->CommittedMemory() / KB);
552 ReadOnlySpace* const ro_space = read_only_space_;
553 PrintIsolate(isolate_,
554 "All spaces, used: %6zu KB"
555 ", available: %6zu KB"
556 ", committed: %6zu KB\n",
557 (this->SizeOfObjects() + ro_space->Size()) / KB,
558 (this->Available()) / KB,
559 (this->CommittedMemory() + ro_space->CommittedMemory()) / KB);
560 PrintIsolate(isolate_,
561 "Unmapper buffering %zu chunks of committed: %6zu KB\n",
562 memory_allocator()->unmapper()->NumberOfCommittedChunks(),
563 CommittedMemoryOfUnmapper() / KB);
564 PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
565 external_memory_.total() / KB);
566 PrintIsolate(isolate_, "Backing store memory: %6" PRIu64 " KB\n",
567 backing_store_bytes() / KB);
568 PrintIsolate(isolate_, "External memory global %zu KB\n",
569 external_memory_callback_() / KB);
570 PrintIsolate(isolate_, "Total time spent in GC : %.1f ms\n",
571 total_gc_time_ms_);
572 }
573
PrintFreeListsStats()574 void Heap::PrintFreeListsStats() {
575 DCHECK(FLAG_trace_gc_freelists);
576
577 if (FLAG_trace_gc_freelists_verbose) {
578 PrintIsolate(isolate_,
579 "Freelists statistics per Page: "
580 "[category: length || total free bytes]\n");
581 }
582
583 std::vector<int> categories_lengths(
584 old_space()->free_list()->number_of_categories(), 0);
585 std::vector<size_t> categories_sums(
586 old_space()->free_list()->number_of_categories(), 0);
587 unsigned int pageCnt = 0;
588
589 // This loops computes freelists lengths and sum.
590 // If FLAG_trace_gc_freelists_verbose is enabled, it also prints
591 // the stats of each FreeListCategory of each Page.
592 for (Page* page : *old_space()) {
593 std::ostringstream out_str;
594
595 if (FLAG_trace_gc_freelists_verbose) {
596 out_str << "Page " << std::setw(4) << pageCnt;
597 }
598
599 for (int cat = kFirstCategory;
600 cat <= old_space()->free_list()->last_category(); cat++) {
601 FreeListCategory* free_list =
602 page->free_list_category(static_cast<FreeListCategoryType>(cat));
603 int length = free_list->FreeListLength();
604 size_t sum = free_list->SumFreeList();
605
606 if (FLAG_trace_gc_freelists_verbose) {
607 out_str << "[" << cat << ": " << std::setw(4) << length << " || "
608 << std::setw(6) << sum << " ]"
609 << (cat == old_space()->free_list()->last_category() ? "\n"
610 : ", ");
611 }
612 categories_lengths[cat] += length;
613 categories_sums[cat] += sum;
614 }
615
616 if (FLAG_trace_gc_freelists_verbose) {
617 PrintIsolate(isolate_, "%s", out_str.str().c_str());
618 }
619
620 pageCnt++;
621 }
622
623 // Print statistics about old_space (pages, free/wasted/used memory...).
624 PrintIsolate(
625 isolate_,
626 "%d pages. Free space: %.1f MB (waste: %.2f). "
627 "Usage: %.1f/%.1f (MB) -> %.2f%%.\n",
628 pageCnt, static_cast<double>(old_space_->Available()) / MB,
629 static_cast<double>(old_space_->Waste()) / MB,
630 static_cast<double>(old_space_->Size()) / MB,
631 static_cast<double>(old_space_->Capacity()) / MB,
632 static_cast<double>(old_space_->Size()) / old_space_->Capacity() * 100);
633
634 // Print global statistics of each FreeListCategory (length & sum).
635 PrintIsolate(isolate_,
636 "FreeLists global statistics: "
637 "[category: length || total free KB]\n");
638 std::ostringstream out_str;
639 for (int cat = kFirstCategory;
640 cat <= old_space()->free_list()->last_category(); cat++) {
641 out_str << "[" << cat << ": " << categories_lengths[cat] << " || "
642 << std::fixed << std::setprecision(2)
643 << static_cast<double>(categories_sums[cat]) / KB << " KB]"
644 << (cat == old_space()->free_list()->last_category() ? "\n" : ", ");
645 }
646 PrintIsolate(isolate_, "%s", out_str.str().c_str());
647 }
648
DumpJSONHeapStatistics(std::stringstream & stream)649 void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
650 HeapStatistics stats;
651 reinterpret_cast<v8::Isolate*>(isolate())->GetHeapStatistics(&stats);
652
653 // clang-format off
654 #define DICT(s) "{" << s << "}"
655 #define LIST(s) "[" << s << "]"
656 #define QUOTE(s) "\"" << s << "\""
657 #define MEMBER(s) QUOTE(s) << ":"
658
659 auto SpaceStatistics = [this](int space_index) {
660 HeapSpaceStatistics space_stats;
661 reinterpret_cast<v8::Isolate*>(isolate())->GetHeapSpaceStatistics(
662 &space_stats, space_index);
663 std::stringstream stream;
664 stream << DICT(
665 MEMBER("name")
666 << QUOTE(BaseSpace::GetSpaceName(
667 static_cast<AllocationSpace>(space_index)))
668 << ","
669 MEMBER("size") << space_stats.space_size() << ","
670 MEMBER("used_size") << space_stats.space_used_size() << ","
671 MEMBER("available_size") << space_stats.space_available_size() << ","
672 MEMBER("physical_size") << space_stats.physical_space_size());
673 return stream.str();
674 };
675
676 stream << DICT(
677 MEMBER("isolate") << QUOTE(reinterpret_cast<void*>(isolate())) << ","
678 MEMBER("id") << gc_count() << ","
679 MEMBER("time_ms") << isolate()->time_millis_since_init() << ","
680 MEMBER("total_heap_size") << stats.total_heap_size() << ","
681 MEMBER("total_heap_size_executable")
682 << stats.total_heap_size_executable() << ","
683 MEMBER("total_physical_size") << stats.total_physical_size() << ","
684 MEMBER("total_available_size") << stats.total_available_size() << ","
685 MEMBER("used_heap_size") << stats.used_heap_size() << ","
686 MEMBER("heap_size_limit") << stats.heap_size_limit() << ","
687 MEMBER("malloced_memory") << stats.malloced_memory() << ","
688 MEMBER("external_memory") << stats.external_memory() << ","
689 MEMBER("peak_malloced_memory") << stats.peak_malloced_memory() << ","
690 MEMBER("spaces") << LIST(
691 SpaceStatistics(RO_SPACE) << "," <<
692 SpaceStatistics(NEW_SPACE) << "," <<
693 SpaceStatistics(OLD_SPACE) << "," <<
694 SpaceStatistics(CODE_SPACE) << "," <<
695 SpaceStatistics(MAP_SPACE) << "," <<
696 SpaceStatistics(LO_SPACE) << "," <<
697 SpaceStatistics(CODE_LO_SPACE) << "," <<
698 SpaceStatistics(NEW_LO_SPACE)));
699
700 #undef DICT
701 #undef LIST
702 #undef QUOTE
703 #undef MEMBER
704 // clang-format on
705 }
706
ReportStatisticsAfterGC()707 void Heap::ReportStatisticsAfterGC() {
708 for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
709 ++i) {
710 int count = deferred_counters_[i];
711 deferred_counters_[i] = 0;
712 while (count > 0) {
713 count--;
714 isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i));
715 }
716 }
717 }
718
AddHeapObjectAllocationTracker(HeapObjectAllocationTracker * tracker)719 void Heap::AddHeapObjectAllocationTracker(
720 HeapObjectAllocationTracker* tracker) {
721 if (allocation_trackers_.empty() && FLAG_inline_new) {
722 DisableInlineAllocation();
723 }
724 allocation_trackers_.push_back(tracker);
725 }
726
RemoveHeapObjectAllocationTracker(HeapObjectAllocationTracker * tracker)727 void Heap::RemoveHeapObjectAllocationTracker(
728 HeapObjectAllocationTracker* tracker) {
729 allocation_trackers_.erase(std::remove(allocation_trackers_.begin(),
730 allocation_trackers_.end(), tracker),
731 allocation_trackers_.end());
732 if (allocation_trackers_.empty() && FLAG_inline_new) {
733 EnableInlineAllocation();
734 }
735 }
736
AddRetainingPathTarget(Handle<HeapObject> object,RetainingPathOption option)737 void Heap::AddRetainingPathTarget(Handle<HeapObject> object,
738 RetainingPathOption option) {
739 if (!FLAG_track_retaining_path) {
740 PrintF("Retaining path tracking requires --track-retaining-path\n");
741 } else {
742 Handle<WeakArrayList> array(retaining_path_targets(), isolate());
743 int index = array->length();
744 array = WeakArrayList::AddToEnd(isolate(), array,
745 MaybeObjectHandle::Weak(object));
746 set_retaining_path_targets(*array);
747 DCHECK_EQ(array->length(), index + 1);
748 retaining_path_target_option_[index] = option;
749 }
750 }
751
IsRetainingPathTarget(HeapObject object,RetainingPathOption * option)752 bool Heap::IsRetainingPathTarget(HeapObject object,
753 RetainingPathOption* option) {
754 WeakArrayList targets = retaining_path_targets();
755 int length = targets.length();
756 MaybeObject object_to_check = HeapObjectReference::Weak(object);
757 for (int i = 0; i < length; i++) {
758 MaybeObject target = targets.Get(i);
759 DCHECK(target->IsWeakOrCleared());
760 if (target == object_to_check) {
761 DCHECK(retaining_path_target_option_.count(i));
762 *option = retaining_path_target_option_[i];
763 return true;
764 }
765 }
766 return false;
767 }
768
PrintRetainingPath(HeapObject target,RetainingPathOption option)769 void Heap::PrintRetainingPath(HeapObject target, RetainingPathOption option) {
770 PrintF("\n\n\n");
771 PrintF("#################################################\n");
772 PrintF("Retaining path for %p:\n", reinterpret_cast<void*>(target.ptr()));
773 HeapObject object = target;
774 std::vector<std::pair<HeapObject, bool>> retaining_path;
775 Root root = Root::kUnknown;
776 bool ephemeron = false;
777 while (true) {
778 retaining_path.push_back(std::make_pair(object, ephemeron));
779 if (option == RetainingPathOption::kTrackEphemeronPath &&
780 ephemeron_retainer_.count(object)) {
781 object = ephemeron_retainer_[object];
782 ephemeron = true;
783 } else if (retainer_.count(object)) {
784 object = retainer_[object];
785 ephemeron = false;
786 } else {
787 if (retaining_root_.count(object)) {
788 root = retaining_root_[object];
789 }
790 break;
791 }
792 }
793 int distance = static_cast<int>(retaining_path.size());
794 for (auto node : retaining_path) {
795 HeapObject node_object = node.first;
796 bool node_ephemeron = node.second;
797 PrintF("\n");
798 PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
799 PrintF("Distance from root %d%s: ", distance,
800 node_ephemeron ? " (ephemeron)" : "");
801 node_object.ShortPrint();
802 PrintF("\n");
803 #ifdef OBJECT_PRINT
804 node_object.Print();
805 PrintF("\n");
806 #endif
807 --distance;
808 }
809 PrintF("\n");
810 PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
811 PrintF("Root: %s\n", RootVisitor::RootName(root));
812 PrintF("-------------------------------------------------\n");
813 }
814
UpdateRetainersMapAfterScavenge(std::unordered_map<HeapObject,HeapObject,Object::Hasher> * map)815 void UpdateRetainersMapAfterScavenge(
816 std::unordered_map<HeapObject, HeapObject, Object::Hasher>* map) {
817 std::unordered_map<HeapObject, HeapObject, Object::Hasher> updated_map;
818
819 for (auto pair : *map) {
820 HeapObject object = pair.first;
821 HeapObject retainer = pair.second;
822
823 if (Heap::InFromPage(object)) {
824 MapWord map_word = object.map_word(kRelaxedLoad);
825 if (!map_word.IsForwardingAddress()) continue;
826 object = map_word.ToForwardingAddress();
827 }
828
829 if (Heap::InFromPage(retainer)) {
830 MapWord map_word = retainer.map_word(kRelaxedLoad);
831 if (!map_word.IsForwardingAddress()) continue;
832 retainer = map_word.ToForwardingAddress();
833 }
834
835 updated_map[object] = retainer;
836 }
837
838 *map = std::move(updated_map);
839 }
840
UpdateRetainersAfterScavenge()841 void Heap::UpdateRetainersAfterScavenge() {
842 if (!incremental_marking()->IsMarking()) return;
843
844 // This isn't supported for Minor MC.
845 DCHECK(!FLAG_minor_mc);
846
847 UpdateRetainersMapAfterScavenge(&retainer_);
848 UpdateRetainersMapAfterScavenge(&ephemeron_retainer_);
849
850 std::unordered_map<HeapObject, Root, Object::Hasher> updated_retaining_root;
851
852 for (auto pair : retaining_root_) {
853 HeapObject object = pair.first;
854
855 if (Heap::InFromPage(object)) {
856 MapWord map_word = object.map_word(kRelaxedLoad);
857 if (!map_word.IsForwardingAddress()) continue;
858 object = map_word.ToForwardingAddress();
859 }
860
861 updated_retaining_root[object] = pair.second;
862 }
863
864 retaining_root_ = std::move(updated_retaining_root);
865 }
866
AddRetainer(HeapObject retainer,HeapObject object)867 void Heap::AddRetainer(HeapObject retainer, HeapObject object) {
868 if (retainer_.count(object)) return;
869 retainer_[object] = retainer;
870 RetainingPathOption option = RetainingPathOption::kDefault;
871 if (IsRetainingPathTarget(object, &option)) {
872 // Check if the retaining path was already printed in
873 // AddEphemeronRetainer().
874 if (ephemeron_retainer_.count(object) == 0 ||
875 option == RetainingPathOption::kDefault) {
876 PrintRetainingPath(object, option);
877 }
878 }
879 }
880
AddEphemeronRetainer(HeapObject retainer,HeapObject object)881 void Heap::AddEphemeronRetainer(HeapObject retainer, HeapObject object) {
882 if (ephemeron_retainer_.count(object)) return;
883 ephemeron_retainer_[object] = retainer;
884 RetainingPathOption option = RetainingPathOption::kDefault;
885 if (IsRetainingPathTarget(object, &option) &&
886 option == RetainingPathOption::kTrackEphemeronPath) {
887 // Check if the retaining path was already printed in AddRetainer().
888 if (retainer_.count(object) == 0) {
889 PrintRetainingPath(object, option);
890 }
891 }
892 }
893
AddRetainingRoot(Root root,HeapObject object)894 void Heap::AddRetainingRoot(Root root, HeapObject object) {
895 if (retaining_root_.count(object)) return;
896 retaining_root_[object] = root;
897 RetainingPathOption option = RetainingPathOption::kDefault;
898 if (IsRetainingPathTarget(object, &option)) {
899 PrintRetainingPath(object, option);
900 }
901 }
902
IncrementDeferredCount(v8::Isolate::UseCounterFeature feature)903 void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
904 deferred_counters_[feature]++;
905 }
906
UncommitFromSpace()907 bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
908
GarbageCollectionPrologue()909 void Heap::GarbageCollectionPrologue() {
910 TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE);
911
912 // Reset GC statistics.
913 promoted_objects_size_ = 0;
914 previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
915 semi_space_copied_object_size_ = 0;
916 nodes_died_in_new_space_ = 0;
917 nodes_copied_in_new_space_ = 0;
918 nodes_promoted_ = 0;
919
920 UpdateMaximumCommitted();
921
922 #ifdef DEBUG
923 DCHECK(!AllowGarbageCollection::IsAllowed());
924 DCHECK_EQ(gc_state(), NOT_IN_GC);
925
926 if (FLAG_gc_verbose) Print();
927 #endif // DEBUG
928
929 if (new_space_ && new_space_->IsAtMaximumCapacity()) {
930 maximum_size_scavenges_++;
931 } else {
932 maximum_size_scavenges_ = 0;
933 }
934 if (FLAG_track_retaining_path) {
935 retainer_.clear();
936 ephemeron_retainer_.clear();
937 retaining_root_.clear();
938 }
939 memory_allocator()->unmapper()->PrepareForGC();
940 }
941
GarbageCollectionPrologueInSafepoint()942 void Heap::GarbageCollectionPrologueInSafepoint() {
943 TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE_SAFEPOINT);
944 gc_count_++;
945
946 if (new_space_) {
947 UpdateNewSpaceAllocationCounter();
948 CheckNewSpaceExpansionCriteria();
949 new_space_->ResetParkedAllocationBuffers();
950 }
951 }
952
UpdateNewSpaceAllocationCounter()953 void Heap::UpdateNewSpaceAllocationCounter() {
954 new_space_allocation_counter_ = NewSpaceAllocationCounter();
955 }
956
NewSpaceAllocationCounter()957 size_t Heap::NewSpaceAllocationCounter() {
958 return new_space_allocation_counter_ +
959 (new_space_ ? new_space()->AllocatedSinceLastGC() : 0);
960 }
961
SizeOfObjects()962 size_t Heap::SizeOfObjects() {
963 size_t total = 0;
964
965 for (SpaceIterator it(this); it.HasNext();) {
966 total += it.Next()->SizeOfObjects();
967 }
968 return total;
969 }
970
TotalGlobalHandlesSize()971 size_t Heap::TotalGlobalHandlesSize() {
972 return isolate_->global_handles()->TotalSize();
973 }
974
UsedGlobalHandlesSize()975 size_t Heap::UsedGlobalHandlesSize() {
976 return isolate_->global_handles()->UsedSize();
977 }
978
MergeAllocationSitePretenuringFeedback(const PretenuringFeedbackMap & local_pretenuring_feedback)979 void Heap::MergeAllocationSitePretenuringFeedback(
980 const PretenuringFeedbackMap& local_pretenuring_feedback) {
981 PtrComprCageBase cage_base(isolate());
982 AllocationSite site;
983 for (auto& site_and_count : local_pretenuring_feedback) {
984 site = site_and_count.first;
985 MapWord map_word = site.map_word(cage_base, kRelaxedLoad);
986 if (map_word.IsForwardingAddress()) {
987 site = AllocationSite::cast(map_word.ToForwardingAddress());
988 }
989
990 // We have not validated the allocation site yet, since we have not
991 // dereferenced the site during collecting information.
992 // This is an inlined check of AllocationMemento::IsValid.
993 if (!site.IsAllocationSite() || site.IsZombie()) continue;
994
995 const int value = static_cast<int>(site_and_count.second);
996 DCHECK_LT(0, value);
997 if (site.IncrementMementoFoundCount(value)) {
998 // For sites in the global map the count is accessed through the site.
999 global_pretenuring_feedback_.insert(std::make_pair(site, 0));
1000 }
1001 }
1002 }
1003
AddAllocationObserversToAllSpaces(AllocationObserver * observer,AllocationObserver * new_space_observer)1004 void Heap::AddAllocationObserversToAllSpaces(
1005 AllocationObserver* observer, AllocationObserver* new_space_observer) {
1006 DCHECK(observer && new_space_observer);
1007
1008 for (SpaceIterator it(this); it.HasNext();) {
1009 Space* space = it.Next();
1010 if (space == new_space()) {
1011 space->AddAllocationObserver(new_space_observer);
1012 } else {
1013 space->AddAllocationObserver(observer);
1014 }
1015 }
1016 }
1017
RemoveAllocationObserversFromAllSpaces(AllocationObserver * observer,AllocationObserver * new_space_observer)1018 void Heap::RemoveAllocationObserversFromAllSpaces(
1019 AllocationObserver* observer, AllocationObserver* new_space_observer) {
1020 DCHECK(observer && new_space_observer);
1021
1022 for (SpaceIterator it(this); it.HasNext();) {
1023 Space* space = it.Next();
1024 if (space == new_space()) {
1025 space->RemoveAllocationObserver(new_space_observer);
1026 } else {
1027 space->RemoveAllocationObserver(observer);
1028 }
1029 }
1030 }
1031
PublishPendingAllocations()1032 void Heap::PublishPendingAllocations() {
1033 if (FLAG_enable_third_party_heap) return;
1034 if (new_space_) new_space_->MarkLabStartInitialized();
1035 PagedSpaceIterator spaces(this);
1036 for (PagedSpace* space = spaces.Next(); space != nullptr;
1037 space = spaces.Next()) {
1038 space->MoveOriginalTopForward();
1039 }
1040 lo_space_->ResetPendingObject();
1041 if (new_lo_space_) new_lo_space_->ResetPendingObject();
1042 code_lo_space_->ResetPendingObject();
1043 }
1044
1045 namespace {
MakePretenureDecision(AllocationSite site,AllocationSite::PretenureDecision current_decision,double ratio,bool maximum_size_scavenge)1046 inline bool MakePretenureDecision(
1047 AllocationSite site, AllocationSite::PretenureDecision current_decision,
1048 double ratio, bool maximum_size_scavenge) {
1049 // Here we just allow state transitions from undecided or maybe tenure
1050 // to don't tenure, maybe tenure, or tenure.
1051 if ((current_decision == AllocationSite::kUndecided ||
1052 current_decision == AllocationSite::kMaybeTenure)) {
1053 if (ratio >= AllocationSite::kPretenureRatio) {
1054 // We just transition into tenure state when the semi-space was at
1055 // maximum capacity.
1056 if (maximum_size_scavenge) {
1057 site.set_deopt_dependent_code(true);
1058 site.set_pretenure_decision(AllocationSite::kTenure);
1059 // Currently we just need to deopt when we make a state transition to
1060 // tenure.
1061 return true;
1062 }
1063 site.set_pretenure_decision(AllocationSite::kMaybeTenure);
1064 } else {
1065 site.set_pretenure_decision(AllocationSite::kDontTenure);
1066 }
1067 }
1068 return false;
1069 }
1070
1071 // Clear feedback calculation fields until the next gc.
ResetPretenuringFeedback(AllocationSite site)1072 inline void ResetPretenuringFeedback(AllocationSite site) {
1073 site.set_memento_found_count(0);
1074 site.set_memento_create_count(0);
1075 }
1076
DigestPretenuringFeedback(Isolate * isolate,AllocationSite site,bool maximum_size_scavenge)1077 inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site,
1078 bool maximum_size_scavenge) {
1079 bool deopt = false;
1080 int create_count = site.memento_create_count();
1081 int found_count = site.memento_found_count();
1082 bool minimum_mementos_created =
1083 create_count >= AllocationSite::kPretenureMinimumCreated;
1084 double ratio = minimum_mementos_created || FLAG_trace_pretenuring_statistics
1085 ? static_cast<double>(found_count) / create_count
1086 : 0.0;
1087 AllocationSite::PretenureDecision current_decision =
1088 site.pretenure_decision();
1089
1090 if (minimum_mementos_created) {
1091 deopt = MakePretenureDecision(site, current_decision, ratio,
1092 maximum_size_scavenge);
1093 }
1094
1095 if (FLAG_trace_pretenuring_statistics) {
1096 PrintIsolate(isolate,
1097 "pretenuring: AllocationSite(%p): (created, found, ratio) "
1098 "(%d, %d, %f) %s => %s\n",
1099 reinterpret_cast<void*>(site.ptr()), create_count, found_count,
1100 ratio, site.PretenureDecisionName(current_decision),
1101 site.PretenureDecisionName(site.pretenure_decision()));
1102 }
1103
1104 ResetPretenuringFeedback(site);
1105 return deopt;
1106 }
1107
PretenureAllocationSiteManually(Isolate * isolate,AllocationSite site)1108 bool PretenureAllocationSiteManually(Isolate* isolate, AllocationSite site) {
1109 AllocationSite::PretenureDecision current_decision =
1110 site.pretenure_decision();
1111 bool deopt = true;
1112 if (current_decision == AllocationSite::kUndecided ||
1113 current_decision == AllocationSite::kMaybeTenure) {
1114 site.set_deopt_dependent_code(true);
1115 site.set_pretenure_decision(AllocationSite::kTenure);
1116 } else {
1117 deopt = false;
1118 }
1119 if (FLAG_trace_pretenuring_statistics) {
1120 PrintIsolate(isolate,
1121 "pretenuring manually requested: AllocationSite(%p): "
1122 "%s => %s\n",
1123 reinterpret_cast<void*>(site.ptr()),
1124 site.PretenureDecisionName(current_decision),
1125 site.PretenureDecisionName(site.pretenure_decision()));
1126 }
1127
1128 ResetPretenuringFeedback(site);
1129 return deopt;
1130 }
1131
1132 } // namespace
1133
RemoveAllocationSitePretenuringFeedback(AllocationSite site)1134 void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite site) {
1135 global_pretenuring_feedback_.erase(site);
1136 }
1137
DeoptMaybeTenuredAllocationSites()1138 bool Heap::DeoptMaybeTenuredAllocationSites() {
1139 return new_space_ && new_space_->IsAtMaximumCapacity() &&
1140 maximum_size_scavenges_ == 0;
1141 }
1142
ProcessPretenuringFeedback()1143 void Heap::ProcessPretenuringFeedback() {
1144 bool trigger_deoptimization = false;
1145 if (FLAG_allocation_site_pretenuring) {
1146 int tenure_decisions = 0;
1147 int dont_tenure_decisions = 0;
1148 int allocation_mementos_found = 0;
1149 int allocation_sites = 0;
1150 int active_allocation_sites = 0;
1151
1152 AllocationSite site;
1153
1154 // Step 1: Digest feedback for recorded allocation sites.
1155 bool maximum_size_scavenge = MaximumSizeScavenge();
1156 for (auto& site_and_count : global_pretenuring_feedback_) {
1157 allocation_sites++;
1158 site = site_and_count.first;
1159 // Count is always access through the site.
1160 DCHECK_EQ(0, site_and_count.second);
1161 int found_count = site.memento_found_count();
1162 // An entry in the storage does not imply that the count is > 0 because
1163 // allocation sites might have been reset due to too many objects dying
1164 // in old space.
1165 if (found_count > 0) {
1166 DCHECK(site.IsAllocationSite());
1167 active_allocation_sites++;
1168 allocation_mementos_found += found_count;
1169 if (DigestPretenuringFeedback(isolate_, site, maximum_size_scavenge)) {
1170 trigger_deoptimization = true;
1171 }
1172 if (site.GetAllocationType() == AllocationType::kOld) {
1173 tenure_decisions++;
1174 } else {
1175 dont_tenure_decisions++;
1176 }
1177 }
1178 }
1179
1180 // Step 2: Pretenure allocation sites for manual requests.
1181 if (allocation_sites_to_pretenure_) {
1182 while (!allocation_sites_to_pretenure_->empty()) {
1183 auto pretenure_site = allocation_sites_to_pretenure_->Pop();
1184 if (PretenureAllocationSiteManually(isolate_, pretenure_site)) {
1185 trigger_deoptimization = true;
1186 }
1187 }
1188 allocation_sites_to_pretenure_.reset();
1189 }
1190
1191 // Step 3: Deopt maybe tenured allocation sites if necessary.
1192 bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
1193 if (deopt_maybe_tenured) {
1194 ForeachAllocationSite(
1195 allocation_sites_list(),
1196 [&allocation_sites, &trigger_deoptimization](AllocationSite site) {
1197 DCHECK(site.IsAllocationSite());
1198 allocation_sites++;
1199 if (site.IsMaybeTenure()) {
1200 site.set_deopt_dependent_code(true);
1201 trigger_deoptimization = true;
1202 }
1203 });
1204 }
1205
1206 if (trigger_deoptimization) {
1207 isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1208 }
1209
1210 if (FLAG_trace_pretenuring_statistics &&
1211 (allocation_mementos_found > 0 || tenure_decisions > 0 ||
1212 dont_tenure_decisions > 0)) {
1213 PrintIsolate(isolate(),
1214 "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
1215 "active_sites=%d "
1216 "mementos=%d tenured=%d not_tenured=%d\n",
1217 deopt_maybe_tenured ? 1 : 0, allocation_sites,
1218 active_allocation_sites, allocation_mementos_found,
1219 tenure_decisions, dont_tenure_decisions);
1220 }
1221
1222 global_pretenuring_feedback_.clear();
1223 global_pretenuring_feedback_.reserve(kInitialFeedbackCapacity);
1224 }
1225 }
1226
PretenureAllocationSiteOnNextCollection(AllocationSite site)1227 void Heap::PretenureAllocationSiteOnNextCollection(AllocationSite site) {
1228 if (!allocation_sites_to_pretenure_) {
1229 allocation_sites_to_pretenure_.reset(
1230 new GlobalHandleVector<AllocationSite>(this));
1231 }
1232 allocation_sites_to_pretenure_->Push(site);
1233 }
1234
InvalidateCodeDeoptimizationData(Code code)1235 void Heap::InvalidateCodeDeoptimizationData(Code code) {
1236 CodePageMemoryModificationScope modification_scope(code);
1237 code.set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array());
1238 }
1239
DeoptMarkedAllocationSites()1240 void Heap::DeoptMarkedAllocationSites() {
1241 // TODO(hpayer): If iterating over the allocation sites list becomes a
1242 // performance issue, use a cache data structure in heap instead.
1243
1244 ForeachAllocationSite(allocation_sites_list(), [](AllocationSite site) {
1245 if (site.deopt_dependent_code()) {
1246 site.dependent_code().MarkCodeForDeoptimization(
1247 DependentCode::kAllocationSiteTenuringChangedGroup);
1248 site.set_deopt_dependent_code(false);
1249 }
1250 });
1251
1252 Deoptimizer::DeoptimizeMarkedCode(isolate_);
1253 }
1254
GarbageCollectionEpilogueInSafepoint(GarbageCollector collector)1255 void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
1256 if (collector == GarbageCollector::MARK_COMPACTOR) {
1257 memory_pressure_level_.store(MemoryPressureLevel::kNone,
1258 std::memory_order_relaxed);
1259 }
1260
1261 TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_SAFEPOINT);
1262
1263 safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
1264 local_heap->InvokeGCEpilogueCallbacksInSafepoint();
1265 });
1266
1267 #define UPDATE_COUNTERS_FOR_SPACE(space) \
1268 isolate_->counters()->space##_bytes_available()->Set( \
1269 static_cast<int>(space()->Available())); \
1270 isolate_->counters()->space##_bytes_committed()->Set( \
1271 static_cast<int>(space()->CommittedMemory())); \
1272 isolate_->counters()->space##_bytes_used()->Set( \
1273 static_cast<int>(space()->SizeOfObjects()));
1274 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
1275 if (space()->CommittedMemory() > 0) { \
1276 isolate_->counters()->external_fragmentation_##space()->AddSample( \
1277 static_cast<int>(100 - (space()->SizeOfObjects() * 100.0) / \
1278 space()->CommittedMemory())); \
1279 }
1280 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
1281 UPDATE_COUNTERS_FOR_SPACE(space) \
1282 UPDATE_FRAGMENTATION_FOR_SPACE(space)
1283
1284 if (new_space()) {
1285 UPDATE_COUNTERS_FOR_SPACE(new_space)
1286 }
1287
1288 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
1289 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
1290 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
1291 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
1292 #undef UPDATE_COUNTERS_FOR_SPACE
1293 #undef UPDATE_FRAGMENTATION_FOR_SPACE
1294 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
1295
1296 #ifdef DEBUG
1297 // Old-to-new slot sets must be empty after each collection.
1298 for (SpaceIterator it(this); it.HasNext();) {
1299 Space* space = it.Next();
1300
1301 for (MemoryChunk* chunk = space->first_page(); chunk != space->last_page();
1302 chunk = chunk->list_node().next())
1303 DCHECK_NULL(chunk->invalidated_slots<OLD_TO_NEW>());
1304 }
1305
1306 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
1307 if (FLAG_print_handles) PrintHandles();
1308 if (FLAG_code_stats) ReportCodeStatistics("After GC");
1309 if (FLAG_check_handle_count) CheckHandleCount();
1310 #endif
1311
1312 if (Heap::ShouldZapGarbage() || FLAG_clear_free_memory) {
1313 ZapFromSpace();
1314 }
1315
1316 if (new_space()) {
1317 TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
1318 ReduceNewSpaceSize();
1319 }
1320
1321 // Set main thread state back to Running from CollectionRequested.
1322 LocalHeap::ThreadState old_state =
1323 main_thread_local_heap()->state_.exchange(LocalHeap::kRunning);
1324
1325 CHECK(old_state == LocalHeap::kRunning ||
1326 old_state == LocalHeap::kSafepointRequested);
1327
1328 // Resume all threads waiting for the GC.
1329 collection_barrier_->ResumeThreadsAwaitingCollection();
1330 }
1331
GarbageCollectionEpilogue()1332 void Heap::GarbageCollectionEpilogue() {
1333 TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
1334 AllowGarbageCollection for_the_rest_of_the_epilogue;
1335
1336 UpdateMaximumCommitted();
1337
1338 isolate_->counters()->alive_after_last_gc()->Set(
1339 static_cast<int>(SizeOfObjects()));
1340
1341 isolate_->counters()->string_table_capacity()->Set(
1342 isolate()->string_table()->Capacity());
1343 isolate_->counters()->number_of_symbols()->Set(
1344 isolate()->string_table()->NumberOfElements());
1345
1346 if (CommittedMemory() > 0) {
1347 isolate_->counters()->external_fragmentation_total()->AddSample(
1348 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
1349
1350 isolate_->counters()->heap_sample_total_committed()->AddSample(
1351 static_cast<int>(CommittedMemory() / KB));
1352 isolate_->counters()->heap_sample_total_used()->AddSample(
1353 static_cast<int>(SizeOfObjects() / KB));
1354 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
1355 static_cast<int>(map_space()->CommittedMemory() / KB));
1356 isolate_->counters()->heap_sample_code_space_committed()->AddSample(
1357 static_cast<int>(code_space()->CommittedMemory() / KB));
1358
1359 isolate_->counters()->heap_sample_maximum_committed()->AddSample(
1360 static_cast<int>(MaximumCommittedMemory() / KB));
1361 }
1362
1363 #ifdef DEBUG
1364 ReportStatisticsAfterGC();
1365 #endif // DEBUG
1366
1367 last_gc_time_ = MonotonicallyIncreasingTimeInMs();
1368 }
1369
1370 class V8_NODISCARD GCCallbacksScope {
1371 public:
GCCallbacksScope(Heap * heap)1372 explicit GCCallbacksScope(Heap* heap) : heap_(heap) {
1373 heap_->gc_callbacks_depth_++;
1374 }
~GCCallbacksScope()1375 ~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
1376
CheckReenter()1377 bool CheckReenter() { return heap_->gc_callbacks_depth_ == 1; }
1378
1379 private:
1380 Heap* heap_;
1381 };
1382
HandleGCRequest()1383 void Heap::HandleGCRequest() {
1384 if (FLAG_stress_scavenge > 0 && stress_scavenge_observer_->HasRequestedGC()) {
1385 CollectAllGarbage(NEW_SPACE, GarbageCollectionReason::kTesting);
1386 stress_scavenge_observer_->RequestedGCDone();
1387 } else if (HighMemoryPressure()) {
1388 incremental_marking()->reset_request_type();
1389 CheckMemoryPressure();
1390 } else if (CollectionRequested()) {
1391 CheckCollectionRequested();
1392 } else if (incremental_marking()->request_type() ==
1393 IncrementalMarking::COMPLETE_MARKING) {
1394 incremental_marking()->reset_request_type();
1395 CollectAllGarbage(current_gc_flags_,
1396 GarbageCollectionReason::kFinalizeMarkingViaStackGuard,
1397 current_gc_callback_flags_);
1398 } else if (incremental_marking()->request_type() ==
1399 IncrementalMarking::FINALIZATION &&
1400 incremental_marking()->IsMarking() &&
1401 !incremental_marking()->finalize_marking_completed()) {
1402 incremental_marking()->reset_request_type();
1403 FinalizeIncrementalMarkingIncrementally(
1404 GarbageCollectionReason::kFinalizeMarkingViaStackGuard);
1405 }
1406 }
1407
ScheduleScavengeTaskIfNeeded()1408 void Heap::ScheduleScavengeTaskIfNeeded() {
1409 DCHECK_NOT_NULL(scavenge_job_);
1410 scavenge_job_->ScheduleTaskIfNeeded(this);
1411 }
1412
GCTypePriorityTimer(GarbageCollector collector)1413 TimedHistogram* Heap::GCTypePriorityTimer(GarbageCollector collector) {
1414 if (IsYoungGenerationCollector(collector)) {
1415 if (isolate_->IsIsolateInBackground()) {
1416 return isolate_->counters()->gc_scavenger_background();
1417 }
1418 return isolate_->counters()->gc_scavenger_foreground();
1419 } else {
1420 if (!incremental_marking()->IsStopped()) {
1421 if (ShouldReduceMemory()) {
1422 if (isolate_->IsIsolateInBackground()) {
1423 return isolate_->counters()->gc_finalize_reduce_memory_background();
1424 }
1425 return isolate_->counters()->gc_finalize_reduce_memory_foreground();
1426 } else {
1427 if (isolate_->IsIsolateInBackground()) {
1428 return isolate_->counters()->gc_finalize_background();
1429 }
1430 return isolate_->counters()->gc_finalize_foreground();
1431 }
1432 } else {
1433 if (isolate_->IsIsolateInBackground()) {
1434 return isolate_->counters()->gc_compactor_background();
1435 }
1436 return isolate_->counters()->gc_compactor_foreground();
1437 }
1438 }
1439 }
1440
GCTypeTimer(GarbageCollector collector)1441 TimedHistogram* Heap::GCTypeTimer(GarbageCollector collector) {
1442 if (IsYoungGenerationCollector(collector)) {
1443 return isolate_->counters()->gc_scavenger();
1444 }
1445 if (incremental_marking()->IsStopped()) {
1446 return isolate_->counters()->gc_compactor();
1447 }
1448 if (ShouldReduceMemory()) {
1449 return isolate_->counters()->gc_finalize_reduce_memory();
1450 }
1451 if (incremental_marking()->IsMarking() &&
1452 incremental_marking()->local_marking_worklists()->IsPerContextMode()) {
1453 return isolate_->counters()->gc_finalize_measure_memory();
1454 }
1455 return isolate_->counters()->gc_finalize();
1456 }
1457
CollectAllGarbage(int flags,GarbageCollectionReason gc_reason,const v8::GCCallbackFlags gc_callback_flags)1458 void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
1459 const v8::GCCallbackFlags gc_callback_flags) {
1460 // Since we are ignoring the return value, the exact choice of space does
1461 // not matter, so long as we do not specify NEW_SPACE, which would not
1462 // cause a full GC.
1463 set_current_gc_flags(flags);
1464 CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
1465 set_current_gc_flags(kNoGCFlags);
1466 }
1467
1468 namespace {
1469
CompareWords(int size,HeapObject a,HeapObject b)1470 intptr_t CompareWords(int size, HeapObject a, HeapObject b) {
1471 int slots = size / kTaggedSize;
1472 DCHECK_EQ(a.Size(), size);
1473 DCHECK_EQ(b.Size(), size);
1474 Tagged_t* slot_a = reinterpret_cast<Tagged_t*>(a.address());
1475 Tagged_t* slot_b = reinterpret_cast<Tagged_t*>(b.address());
1476 for (int i = 0; i < slots; i++) {
1477 if (*slot_a != *slot_b) {
1478 return *slot_a - *slot_b;
1479 }
1480 slot_a++;
1481 slot_b++;
1482 }
1483 return 0;
1484 }
1485
ReportDuplicates(int size,std::vector<HeapObject> * objects)1486 void ReportDuplicates(int size, std::vector<HeapObject>* objects) {
1487 if (objects->size() == 0) return;
1488
1489 sort(objects->begin(), objects->end(), [size](HeapObject a, HeapObject b) {
1490 intptr_t c = CompareWords(size, a, b);
1491 if (c != 0) return c < 0;
1492 return a < b;
1493 });
1494
1495 std::vector<std::pair<int, HeapObject>> duplicates;
1496 HeapObject current = (*objects)[0];
1497 int count = 1;
1498 for (size_t i = 1; i < objects->size(); i++) {
1499 if (CompareWords(size, current, (*objects)[i]) == 0) {
1500 count++;
1501 } else {
1502 if (count > 1) {
1503 duplicates.push_back(std::make_pair(count - 1, current));
1504 }
1505 count = 1;
1506 current = (*objects)[i];
1507 }
1508 }
1509 if (count > 1) {
1510 duplicates.push_back(std::make_pair(count - 1, current));
1511 }
1512
1513 int threshold = FLAG_trace_duplicate_threshold_kb * KB;
1514
1515 sort(duplicates.begin(), duplicates.end());
1516 for (auto it = duplicates.rbegin(); it != duplicates.rend(); ++it) {
1517 int duplicate_bytes = it->first * size;
1518 if (duplicate_bytes < threshold) break;
1519 PrintF("%d duplicates of size %d each (%dKB)\n", it->first, size,
1520 duplicate_bytes / KB);
1521 PrintF("Sample object: ");
1522 it->second.Print();
1523 PrintF("============================\n");
1524 }
1525 }
1526 } // anonymous namespace
1527
CollectAllAvailableGarbage(GarbageCollectionReason gc_reason)1528 void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
1529 // Since we are ignoring the return value, the exact choice of space does
1530 // not matter, so long as we do not specify NEW_SPACE, which would not
1531 // cause a full GC.
1532 // Major GC would invoke weak handle callbacks on weakly reachable
1533 // handles, but won't collect weakly reachable objects until next
1534 // major GC. Therefore if we collect aggressively and weak handle callback
1535 // has been invoked, we rerun major GC to release objects which become
1536 // garbage.
1537 // Note: as weak callbacks can execute arbitrary code, we cannot
1538 // hope that eventually there will be no weak callbacks invocations.
1539 // Therefore stop recollecting after several attempts.
1540 if (gc_reason == GarbageCollectionReason::kLastResort) {
1541 InvokeNearHeapLimitCallback();
1542 }
1543 RCS_SCOPE(isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
1544
1545 // The optimizing compiler may be unnecessarily holding on to memory.
1546 isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
1547 isolate()->ClearSerializerData();
1548 set_current_gc_flags(
1549 kReduceMemoryFootprintMask |
1550 (gc_reason == GarbageCollectionReason::kLowMemoryNotification ? kForcedGC
1551 : 0));
1552 isolate_->compilation_cache()->Clear();
1553 const int kMaxNumberOfAttempts = 7;
1554 const int kMinNumberOfAttempts = 2;
1555 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
1556 if (!CollectGarbage(OLD_SPACE, gc_reason, kNoGCCallbackFlags) &&
1557 attempt + 1 >= kMinNumberOfAttempts) {
1558 break;
1559 }
1560 }
1561
1562 set_current_gc_flags(kNoGCFlags);
1563 EagerlyFreeExternalMemory();
1564
1565 if (FLAG_trace_duplicate_threshold_kb) {
1566 std::map<int, std::vector<HeapObject>> objects_by_size;
1567 PagedSpaceIterator spaces(this);
1568 for (PagedSpace* space = spaces.Next(); space != nullptr;
1569 space = spaces.Next()) {
1570 PagedSpaceObjectIterator it(this, space);
1571 for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
1572 objects_by_size[obj.Size()].push_back(obj);
1573 }
1574 }
1575 {
1576 LargeObjectSpaceObjectIterator it(lo_space());
1577 for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
1578 objects_by_size[obj.Size()].push_back(obj);
1579 }
1580 }
1581 for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
1582 ++it) {
1583 ReportDuplicates(it->first, &it->second);
1584 }
1585 }
1586 }
1587
PreciseCollectAllGarbage(int flags,GarbageCollectionReason gc_reason,const GCCallbackFlags gc_callback_flags)1588 void Heap::PreciseCollectAllGarbage(int flags,
1589 GarbageCollectionReason gc_reason,
1590 const GCCallbackFlags gc_callback_flags) {
1591 if (!incremental_marking()->IsStopped()) {
1592 FinalizeIncrementalMarkingAtomically(gc_reason);
1593 }
1594 CollectAllGarbage(flags, gc_reason, gc_callback_flags);
1595 }
1596
ReportExternalMemoryPressure()1597 void Heap::ReportExternalMemoryPressure() {
1598 const GCCallbackFlags kGCCallbackFlagsForExternalMemory =
1599 static_cast<GCCallbackFlags>(
1600 kGCCallbackFlagSynchronousPhantomCallbackProcessing |
1601 kGCCallbackFlagCollectAllExternalMemory);
1602 int64_t current = external_memory_.total();
1603 int64_t baseline = external_memory_.low_since_mark_compact();
1604 int64_t limit = external_memory_.limit();
1605 TRACE_EVENT2(
1606 "devtools.timeline,v8", "V8.ExternalMemoryPressure", "external_memory_mb",
1607 static_cast<int>((current - baseline) / MB), "external_memory_limit_mb",
1608 static_cast<int>((limit - baseline) / MB));
1609 if (current > baseline + external_memory_hard_limit()) {
1610 CollectAllGarbage(
1611 kReduceMemoryFootprintMask,
1612 GarbageCollectionReason::kExternalMemoryPressure,
1613 static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage |
1614 kGCCallbackFlagsForExternalMemory));
1615 return;
1616 }
1617 if (incremental_marking()->IsStopped()) {
1618 if (incremental_marking()->CanBeActivated()) {
1619 StartIncrementalMarking(GCFlagsForIncrementalMarking(),
1620 GarbageCollectionReason::kExternalMemoryPressure,
1621 kGCCallbackFlagsForExternalMemory);
1622 } else {
1623 CollectAllGarbage(i::Heap::kNoGCFlags,
1624 GarbageCollectionReason::kExternalMemoryPressure,
1625 kGCCallbackFlagsForExternalMemory);
1626 }
1627 } else {
1628 // Incremental marking is turned on an has already been started.
1629 const double kMinStepSize = 5;
1630 const double kMaxStepSize = 10;
1631 const double ms_step = std::min(
1632 kMaxStepSize, std::max(kMinStepSize, static_cast<double>(current) /
1633 limit * kMinStepSize));
1634 const double deadline = MonotonicallyIncreasingTimeInMs() + ms_step;
1635 // Extend the gc callback flags with external memory flags.
1636 current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
1637 current_gc_callback_flags_ | kGCCallbackFlagsForExternalMemory);
1638 incremental_marking()->AdvanceWithDeadline(
1639 deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8);
1640 }
1641 }
1642
external_memory_limit()1643 int64_t Heap::external_memory_limit() { return external_memory_.limit(); }
1644
EnsureFillerObjectAtTop()1645 void Heap::EnsureFillerObjectAtTop() {
1646 // There may be an allocation memento behind objects in new space. Upon
1647 // evacuation of a non-full new space (or if we are on the last page) there
1648 // may be uninitialized memory behind top. We fill the remainder of the page
1649 // with a filler.
1650 if (!new_space_) return;
1651 Address to_top = new_space_->top();
1652 Page* page = Page::FromAddress(to_top - kTaggedSize);
1653 if (page->Contains(to_top)) {
1654 int remaining_in_page = static_cast<int>(page->area_end() - to_top);
1655 CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
1656 }
1657 }
1658
DevToolsTraceEventScope(Heap * heap,const char * event_name,const char * event_type)1659 Heap::DevToolsTraceEventScope::DevToolsTraceEventScope(Heap* heap,
1660 const char* event_name,
1661 const char* event_type)
1662 : heap_(heap), event_name_(event_name) {
1663 TRACE_EVENT_BEGIN2("devtools.timeline,v8", event_name_, "usedHeapSizeBefore",
1664 heap_->SizeOfObjects(), "type", event_type);
1665 }
1666
~DevToolsTraceEventScope()1667 Heap::DevToolsTraceEventScope::~DevToolsTraceEventScope() {
1668 TRACE_EVENT_END1("devtools.timeline,v8", event_name_, "usedHeapSizeAfter",
1669 heap_->SizeOfObjects());
1670 }
1671
CollectGarbage(AllocationSpace space,GarbageCollectionReason gc_reason,const v8::GCCallbackFlags gc_callback_flags)1672 bool Heap::CollectGarbage(AllocationSpace space,
1673 GarbageCollectionReason gc_reason,
1674 const v8::GCCallbackFlags gc_callback_flags) {
1675 if (V8_UNLIKELY(!deserialization_complete_)) {
1676 // During isolate initialization heap always grows. GC is only requested
1677 // if a new page allocation fails. In such a case we should crash with
1678 // an out-of-memory instead of performing GC because the prologue/epilogue
1679 // callbacks may see objects that are not yet deserialized.
1680 CHECK(always_allocate());
1681 FatalProcessOutOfMemory("GC during deserialization");
1682 }
1683 const char* collector_reason = nullptr;
1684 GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
1685 is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced ||
1686 current_gc_flags_ & kForcedGC ||
1687 force_gc_on_next_allocation_;
1688 is_current_gc_for_heap_profiler_ =
1689 gc_reason == GarbageCollectionReason::kHeapProfiler;
1690 if (force_gc_on_next_allocation_) force_gc_on_next_allocation_ = false;
1691
1692 DevToolsTraceEventScope devtools_trace_event_scope(
1693 this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC",
1694 GarbageCollectionReasonToString(gc_reason));
1695
1696 // Filter on-stack reference below this method.
1697 isolate()
1698 ->global_handles()
1699 ->CleanupOnStackReferencesBelowCurrentStackPosition();
1700
1701 // Ensure that all pending phantom callbacks are invoked.
1702 isolate()->global_handles()->InvokeSecondPassPhantomCallbacks();
1703
1704 // The VM is in the GC state until exiting this function.
1705 VMState<GC> state(isolate());
1706
1707 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
1708 // Reset the allocation timeout, but make sure to allow at least a few
1709 // allocations after a collection. The reason for this is that we have a lot
1710 // of allocation sequences and we assume that a garbage collection will allow
1711 // the subsequent allocation attempts to go through.
1712 if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
1713 allocation_timeout_ =
1714 std::max(6, NextAllocationTimeout(allocation_timeout_));
1715 }
1716 #endif
1717
1718 EnsureFillerObjectAtTop();
1719
1720 if (IsYoungGenerationCollector(collector) &&
1721 !incremental_marking()->IsStopped()) {
1722 if (FLAG_trace_incremental_marking) {
1723 isolate()->PrintWithTimestamp(
1724 "[IncrementalMarking] Scavenge during marking.\n");
1725 }
1726 }
1727
1728 size_t freed_global_handles = 0;
1729
1730 size_t committed_memory_before = 0;
1731
1732 if (collector == GarbageCollector::MARK_COMPACTOR) {
1733 committed_memory_before = CommittedOldGenerationMemory();
1734 if (cpp_heap()) {
1735 // CppHeap needs a stack marker at the top of all entry points to allow
1736 // deterministic passes over the stack. E.g., a verifier that should only
1737 // find a subset of references of the marker.
1738 //
1739 // TODO(chromium:1056170): Consider adding a component that keeps track
1740 // of relevant GC stack regions where interesting pointers can be found.
1741 static_cast<v8::internal::CppHeap*>(cpp_heap())
1742 ->SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
1743 }
1744 }
1745
1746 {
1747 tracer()->Start(collector, gc_reason, collector_reason);
1748 DCHECK(AllowGarbageCollection::IsAllowed());
1749 DisallowGarbageCollection no_gc_during_gc;
1750 GarbageCollectionPrologue();
1751
1752 {
1753 TimedHistogram* gc_type_timer = GCTypeTimer(collector);
1754 TimedHistogramScope histogram_timer_scope(gc_type_timer, isolate_);
1755 TRACE_EVENT0("v8", gc_type_timer->name());
1756
1757 TimedHistogram* gc_type_priority_timer = GCTypePriorityTimer(collector);
1758 OptionalTimedHistogramScopeMode mode =
1759 isolate_->IsMemorySavingsModeActive()
1760 ? OptionalTimedHistogramScopeMode::DONT_TAKE_TIME
1761 : OptionalTimedHistogramScopeMode::TAKE_TIME;
1762 OptionalTimedHistogramScope histogram_timer_priority_scope(
1763 gc_type_priority_timer, isolate_, mode);
1764
1765 if (!IsYoungGenerationCollector(collector)) {
1766 PROFILE(isolate_, CodeMovingGCEvent());
1767 }
1768
1769 GCType gc_type = collector == GarbageCollector::MARK_COMPACTOR
1770 ? kGCTypeMarkSweepCompact
1771 : kGCTypeScavenge;
1772 {
1773 GCCallbacksScope scope(this);
1774 // Temporary override any embedder stack state as callbacks may create
1775 // their own state on the stack and recursively trigger GC.
1776 EmbedderStackStateScope embedder_scope(
1777 local_embedder_heap_tracer(),
1778 EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
1779 if (scope.CheckReenter()) {
1780 AllowGarbageCollection allow_gc;
1781 AllowJavascriptExecution allow_js(isolate());
1782 TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
1783 VMState<EXTERNAL> callback_state(isolate_);
1784 HandleScope handle_scope(isolate_);
1785 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1786 }
1787 }
1788
1789 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
1790 tp_heap_->CollectGarbage();
1791 } else {
1792 freed_global_handles +=
1793 PerformGarbageCollection(collector, gc_callback_flags);
1794 }
1795 // Clear flags describing the current GC now that the current GC is
1796 // complete. Do this before GarbageCollectionEpilogue() since that could
1797 // trigger another unforced GC.
1798 is_current_gc_forced_ = false;
1799 is_current_gc_for_heap_profiler_ = false;
1800
1801 {
1802 TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
1803 gc_post_processing_depth_++;
1804 {
1805 AllowGarbageCollection allow_gc;
1806 AllowJavascriptExecution allow_js(isolate());
1807 freed_global_handles +=
1808 isolate_->global_handles()->PostGarbageCollectionProcessing(
1809 collector, gc_callback_flags);
1810 }
1811 gc_post_processing_depth_--;
1812 }
1813
1814 {
1815 GCCallbacksScope scope(this);
1816 if (scope.CheckReenter()) {
1817 AllowGarbageCollection allow_gc;
1818 AllowJavascriptExecution allow_js(isolate());
1819 TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
1820 VMState<EXTERNAL> callback_state(isolate_);
1821 HandleScope handle_scope(isolate_);
1822 CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1823 }
1824 }
1825 if (collector == GarbageCollector::MARK_COMPACTOR ||
1826 collector == GarbageCollector::SCAVENGER) {
1827 tracer()->RecordGCPhasesHistograms(gc_type_timer);
1828 }
1829 }
1830
1831 GarbageCollectionEpilogue();
1832 if (collector == GarbageCollector::MARK_COMPACTOR &&
1833 FLAG_track_detached_contexts) {
1834 isolate()->CheckDetachedContextsAfterGC();
1835 }
1836
1837 if (collector == GarbageCollector::MARK_COMPACTOR) {
1838 // Calculate used memory first, then committed memory. Following code
1839 // assumes that committed >= used, which might not hold when this is
1840 // calculated in the wrong order and background threads allocate
1841 // in-between.
1842 size_t used_memory_after = OldGenerationSizeOfObjects();
1843 size_t committed_memory_after = CommittedOldGenerationMemory();
1844 MemoryReducer::Event event;
1845 event.type = MemoryReducer::kMarkCompact;
1846 event.time_ms = MonotonicallyIncreasingTimeInMs();
1847 // Trigger one more GC if
1848 // - this GC decreased committed memory,
1849 // - there is high fragmentation,
1850 event.next_gc_likely_to_collect_more =
1851 (committed_memory_before > committed_memory_after + MB) ||
1852 HasHighFragmentation(used_memory_after, committed_memory_after);
1853 event.committed_memory = committed_memory_after;
1854 if (deserialization_complete_) {
1855 memory_reducer_->NotifyMarkCompact(event);
1856 }
1857 if (initial_max_old_generation_size_ < max_old_generation_size() &&
1858 used_memory_after < initial_max_old_generation_size_threshold_) {
1859 set_max_old_generation_size(initial_max_old_generation_size_);
1860 }
1861 }
1862
1863 tracer()->Stop(collector);
1864 }
1865
1866 if (collector == GarbageCollector::MARK_COMPACTOR &&
1867 (gc_callback_flags & (kGCCallbackFlagForced |
1868 kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
1869 isolate()->CountUsage(v8::Isolate::kForcedGC);
1870 }
1871
1872 // Start incremental marking for the next cycle. We do this only for scavenger
1873 // to avoid a loop where mark-compact causes another mark-compact.
1874 if (IsYoungGenerationCollector(collector)) {
1875 StartIncrementalMarkingIfAllocationLimitIsReached(
1876 GCFlagsForIncrementalMarking(),
1877 kGCCallbackScheduleIdleGarbageCollection);
1878 }
1879
1880 if (!CanExpandOldGeneration(0)) {
1881 InvokeNearHeapLimitCallback();
1882 if (!CanExpandOldGeneration(0)) {
1883 FatalProcessOutOfMemory("Reached heap limit");
1884 }
1885 }
1886
1887 return freed_global_handles > 0;
1888 }
1889
NotifyContextDisposed(bool dependant_context)1890 int Heap::NotifyContextDisposed(bool dependant_context) {
1891 if (!dependant_context) {
1892 tracer()->ResetSurvivalEvents();
1893 old_generation_size_configured_ = false;
1894 set_old_generation_allocation_limit(initial_old_generation_size_);
1895 MemoryReducer::Event event;
1896 event.type = MemoryReducer::kPossibleGarbage;
1897 event.time_ms = MonotonicallyIncreasingTimeInMs();
1898 memory_reducer_->NotifyPossibleGarbage(event);
1899 }
1900 isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
1901 if (!isolate()->context().is_null()) {
1902 RemoveDirtyFinalizationRegistriesOnContext(isolate()->raw_native_context());
1903 isolate()->raw_native_context().set_retained_maps(
1904 ReadOnlyRoots(this).empty_weak_array_list());
1905 }
1906 return ++contexts_disposed_;
1907 }
1908
StartIncrementalMarking(int gc_flags,GarbageCollectionReason gc_reason,GCCallbackFlags gc_callback_flags)1909 void Heap::StartIncrementalMarking(int gc_flags,
1910 GarbageCollectionReason gc_reason,
1911 GCCallbackFlags gc_callback_flags) {
1912 DCHECK(incremental_marking()->IsStopped());
1913
1914 // Sweeping needs to be completed such that markbits are all cleared before
1915 // starting marking again.
1916 CompleteSweepingFull();
1917 if (cpp_heap()) {
1918 CppHeap::From(cpp_heap())->FinishSweepingIfRunning();
1919 }
1920
1921 SafepointScope safepoint(this);
1922
1923 #ifdef DEBUG
1924 VerifyCountersAfterSweeping();
1925 #endif
1926
1927 // Now that sweeping is completed, we can update the current epoch for the new
1928 // full collection.
1929 UpdateEpochFull();
1930
1931 set_current_gc_flags(gc_flags);
1932 current_gc_callback_flags_ = gc_callback_flags;
1933 incremental_marking()->Start(gc_reason);
1934 }
1935
CompleteSweepingFull()1936 void Heap::CompleteSweepingFull() {
1937 array_buffer_sweeper()->EnsureFinished();
1938 mark_compact_collector()->EnsureSweepingCompleted();
1939 DCHECK(!mark_compact_collector()->sweeping_in_progress());
1940 }
1941
StartIncrementalMarkingIfAllocationLimitIsReached(int gc_flags,const GCCallbackFlags gc_callback_flags)1942 void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
1943 int gc_flags, const GCCallbackFlags gc_callback_flags) {
1944 if (incremental_marking()->IsStopped()) {
1945 switch (IncrementalMarkingLimitReached()) {
1946 case IncrementalMarkingLimit::kHardLimit:
1947 StartIncrementalMarking(
1948 gc_flags,
1949 OldGenerationSpaceAvailable() <= NewSpaceCapacity()
1950 ? GarbageCollectionReason::kAllocationLimit
1951 : GarbageCollectionReason::kGlobalAllocationLimit,
1952 gc_callback_flags);
1953 break;
1954 case IncrementalMarkingLimit::kSoftLimit:
1955 incremental_marking()->incremental_marking_job()->ScheduleTask(this);
1956 break;
1957 case IncrementalMarkingLimit::kFallbackForEmbedderLimit:
1958 // This is a fallback case where no appropriate limits have been
1959 // configured yet.
1960 MemoryReducer::Event event;
1961 event.type = MemoryReducer::kPossibleGarbage;
1962 event.time_ms = MonotonicallyIncreasingTimeInMs();
1963 memory_reducer()->NotifyPossibleGarbage(event);
1964 break;
1965 case IncrementalMarkingLimit::kNoLimit:
1966 break;
1967 }
1968 }
1969 }
1970
StartIncrementalMarkingIfAllocationLimitIsReachedBackground()1971 void Heap::StartIncrementalMarkingIfAllocationLimitIsReachedBackground() {
1972 if (!incremental_marking()->IsStopped() ||
1973 !incremental_marking()->CanBeActivated()) {
1974 return;
1975 }
1976
1977 const size_t old_generation_space_available = OldGenerationSpaceAvailable();
1978
1979 if (old_generation_space_available < NewSpaceCapacity()) {
1980 incremental_marking()->incremental_marking_job()->ScheduleTask(this);
1981 }
1982 }
1983
StartIdleIncrementalMarking(GarbageCollectionReason gc_reason,const GCCallbackFlags gc_callback_flags)1984 void Heap::StartIdleIncrementalMarking(
1985 GarbageCollectionReason gc_reason,
1986 const GCCallbackFlags gc_callback_flags) {
1987 StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason,
1988 gc_callback_flags);
1989 }
1990
MoveRange(HeapObject dst_object,const ObjectSlot dst_slot,const ObjectSlot src_slot,int len,WriteBarrierMode mode)1991 void Heap::MoveRange(HeapObject dst_object, const ObjectSlot dst_slot,
1992 const ObjectSlot src_slot, int len,
1993 WriteBarrierMode mode) {
1994 DCHECK_NE(len, 0);
1995 DCHECK_NE(dst_object.map(), ReadOnlyRoots(this).fixed_cow_array_map());
1996 const ObjectSlot dst_end(dst_slot + len);
1997 // Ensure no range overflow.
1998 DCHECK(dst_slot < dst_end);
1999 DCHECK(src_slot < src_slot + len);
2000
2001 if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
2002 if (dst_slot < src_slot) {
2003 // Copy tagged values forward using relaxed load/stores that do not
2004 // involve value decompression.
2005 const AtomicSlot atomic_dst_end(dst_end);
2006 AtomicSlot dst(dst_slot);
2007 AtomicSlot src(src_slot);
2008 while (dst < atomic_dst_end) {
2009 *dst = *src;
2010 ++dst;
2011 ++src;
2012 }
2013 } else {
2014 // Copy tagged values backwards using relaxed load/stores that do not
2015 // involve value decompression.
2016 const AtomicSlot atomic_dst_begin(dst_slot);
2017 AtomicSlot dst(dst_slot + len - 1);
2018 AtomicSlot src(src_slot + len - 1);
2019 while (dst >= atomic_dst_begin) {
2020 *dst = *src;
2021 --dst;
2022 --src;
2023 }
2024 }
2025 } else {
2026 MemMove(dst_slot.ToVoidPtr(), src_slot.ToVoidPtr(), len * kTaggedSize);
2027 }
2028 if (mode == SKIP_WRITE_BARRIER) return;
2029 WriteBarrierForRange(dst_object, dst_slot, dst_end);
2030 }
2031
2032 // Instantiate Heap::CopyRange() for ObjectSlot and MaybeObjectSlot.
2033 template void Heap::CopyRange<ObjectSlot>(HeapObject dst_object,
2034 ObjectSlot dst_slot,
2035 ObjectSlot src_slot, int len,
2036 WriteBarrierMode mode);
2037 template void Heap::CopyRange<MaybeObjectSlot>(HeapObject dst_object,
2038 MaybeObjectSlot dst_slot,
2039 MaybeObjectSlot src_slot,
2040 int len, WriteBarrierMode mode);
2041
2042 template <typename TSlot>
CopyRange(HeapObject dst_object,const TSlot dst_slot,const TSlot src_slot,int len,WriteBarrierMode mode)2043 void Heap::CopyRange(HeapObject dst_object, const TSlot dst_slot,
2044 const TSlot src_slot, int len, WriteBarrierMode mode) {
2045 DCHECK_NE(len, 0);
2046
2047 DCHECK_NE(dst_object.map(), ReadOnlyRoots(this).fixed_cow_array_map());
2048 const TSlot dst_end(dst_slot + len);
2049 // Ensure ranges do not overlap.
2050 DCHECK(dst_end <= src_slot || (src_slot + len) <= dst_slot);
2051
2052 if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
2053 // Copy tagged values using relaxed load/stores that do not involve value
2054 // decompression.
2055 const AtomicSlot atomic_dst_end(dst_end);
2056 AtomicSlot dst(dst_slot);
2057 AtomicSlot src(src_slot);
2058 while (dst < atomic_dst_end) {
2059 *dst = *src;
2060 ++dst;
2061 ++src;
2062 }
2063 } else {
2064 MemCopy(dst_slot.ToVoidPtr(), src_slot.ToVoidPtr(), len * kTaggedSize);
2065 }
2066 if (mode == SKIP_WRITE_BARRIER) return;
2067 WriteBarrierForRange(dst_object, dst_slot, dst_end);
2068 }
2069
2070 #ifdef VERIFY_HEAP
2071 // Helper class for verifying the string table.
2072 class StringTableVerifier : public RootVisitor {
2073 public:
StringTableVerifier(Isolate * isolate)2074 explicit StringTableVerifier(Isolate* isolate) : isolate_(isolate) {}
2075
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)2076 void VisitRootPointers(Root root, const char* description,
2077 FullObjectSlot start, FullObjectSlot end) override {
2078 UNREACHABLE();
2079 }
VisitRootPointers(Root root,const char * description,OffHeapObjectSlot start,OffHeapObjectSlot end)2080 void VisitRootPointers(Root root, const char* description,
2081 OffHeapObjectSlot start,
2082 OffHeapObjectSlot end) override {
2083 // Visit all HeapObject pointers in [start, end).
2084 for (OffHeapObjectSlot p = start; p < end; ++p) {
2085 Object o = p.load(isolate_);
2086 DCHECK(!HasWeakHeapObjectTag(o));
2087 if (o.IsHeapObject()) {
2088 HeapObject object = HeapObject::cast(o);
2089 // Check that the string is actually internalized.
2090 CHECK(object.IsInternalizedString());
2091 }
2092 }
2093 }
2094
2095 private:
2096 Isolate* isolate_;
2097 };
2098
VerifyStringTable(Isolate * isolate)2099 static void VerifyStringTable(Isolate* isolate) {
2100 StringTableVerifier verifier(isolate);
2101 isolate->string_table()->IterateElements(&verifier);
2102 }
2103 #endif // VERIFY_HEAP
2104
EnsureFromSpaceIsCommitted()2105 void Heap::EnsureFromSpaceIsCommitted() {
2106 if (!new_space_) return;
2107 if (new_space_->CommitFromSpaceIfNeeded()) return;
2108
2109 // Committing memory to from space failed.
2110 // Memory is exhausted and we will die.
2111 FatalProcessOutOfMemory("Committing semi space failed.");
2112 }
2113
CollectionRequested()2114 bool Heap::CollectionRequested() {
2115 return collection_barrier_->WasGCRequested();
2116 }
2117
CollectGarbageForBackground(LocalHeap * local_heap)2118 void Heap::CollectGarbageForBackground(LocalHeap* local_heap) {
2119 CHECK(local_heap->is_main_thread());
2120 CollectAllGarbage(current_gc_flags_,
2121 GarbageCollectionReason::kBackgroundAllocationFailure,
2122 current_gc_callback_flags_);
2123 }
2124
CheckCollectionRequested()2125 void Heap::CheckCollectionRequested() {
2126 if (!CollectionRequested()) return;
2127
2128 CollectAllGarbage(current_gc_flags_,
2129 GarbageCollectionReason::kBackgroundAllocationFailure,
2130 current_gc_callback_flags_);
2131 }
2132
UpdateSurvivalStatistics(int start_new_space_size)2133 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
2134 if (start_new_space_size == 0) return;
2135
2136 promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
2137 static_cast<double>(start_new_space_size) * 100);
2138
2139 if (previous_semi_space_copied_object_size_ > 0) {
2140 promotion_rate_ =
2141 (static_cast<double>(promoted_objects_size_) /
2142 static_cast<double>(previous_semi_space_copied_object_size_) * 100);
2143 } else {
2144 promotion_rate_ = 0;
2145 }
2146
2147 semi_space_copied_rate_ =
2148 (static_cast<double>(semi_space_copied_object_size_) /
2149 static_cast<double>(start_new_space_size) * 100);
2150
2151 double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
2152 tracer()->AddSurvivalRatio(survival_rate);
2153 }
2154
2155 namespace {
CollectorScopeId(GarbageCollector collector)2156 GCTracer::Scope::ScopeId CollectorScopeId(GarbageCollector collector) {
2157 switch (collector) {
2158 case GarbageCollector::MARK_COMPACTOR:
2159 return GCTracer::Scope::ScopeId::MARK_COMPACTOR;
2160 case GarbageCollector::MINOR_MARK_COMPACTOR:
2161 return GCTracer::Scope::ScopeId::MINOR_MARK_COMPACTOR;
2162 case GarbageCollector::SCAVENGER:
2163 return GCTracer::Scope::ScopeId::SCAVENGER;
2164 }
2165 UNREACHABLE();
2166 }
2167 } // namespace
2168
PerformGarbageCollection(GarbageCollector collector,const v8::GCCallbackFlags gc_callback_flags)2169 size_t Heap::PerformGarbageCollection(
2170 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
2171 DisallowJavascriptExecution no_js(isolate());
2172
2173 if (IsYoungGenerationCollector(collector)) {
2174 CompleteSweepingYoung(collector);
2175 } else {
2176 DCHECK_EQ(GarbageCollector::MARK_COMPACTOR, collector);
2177 CompleteSweepingFull();
2178 if (cpp_heap()) {
2179 CppHeap::From(cpp_heap())->FinishSweepingIfRunning();
2180 }
2181 }
2182
2183 // The last GC cycle is done after completing sweeping. Start the next GC
2184 // cycle.
2185 UpdateCurrentEpoch(collector);
2186
2187 TRACE_GC_EPOCH(tracer(), CollectorScopeId(collector), ThreadKind::kMain);
2188
2189 SafepointScope safepoint_scope(this);
2190
2191 collection_barrier_->StopTimeToCollectionTimer();
2192
2193 #ifdef VERIFY_HEAP
2194 if (FLAG_verify_heap) {
2195 Verify();
2196 }
2197 #endif
2198
2199 tracer()->StartInSafepoint();
2200
2201 GarbageCollectionPrologueInSafepoint();
2202
2203 EnsureFromSpaceIsCommitted();
2204
2205 size_t start_young_generation_size =
2206 NewSpaceSize() + (new_lo_space() ? new_lo_space()->SizeOfObjects() : 0);
2207
2208 switch (collector) {
2209 case GarbageCollector::MARK_COMPACTOR:
2210 MarkCompact();
2211 break;
2212 case GarbageCollector::MINOR_MARK_COMPACTOR:
2213 MinorMarkCompact();
2214 break;
2215 case GarbageCollector::SCAVENGER:
2216 Scavenge();
2217 break;
2218 }
2219
2220 ProcessPretenuringFeedback();
2221
2222 UpdateSurvivalStatistics(static_cast<int>(start_young_generation_size));
2223 ConfigureInitialOldGenerationSize();
2224
2225 if (collector != GarbageCollector::MARK_COMPACTOR) {
2226 // Objects that died in the new space might have been accounted
2227 // as bytes marked ahead of schedule by the incremental marker.
2228 incremental_marking()->UpdateMarkedBytesAfterScavenge(
2229 start_young_generation_size - SurvivedYoungObjectSize());
2230 }
2231
2232 if (!fast_promotion_mode_ || collector == GarbageCollector::MARK_COMPACTOR) {
2233 ComputeFastPromotionMode();
2234 }
2235
2236 isolate_->counters()->objs_since_last_young()->Set(0);
2237
2238 isolate_->eternal_handles()->PostGarbageCollectionProcessing();
2239
2240 // Update relocatables.
2241 Relocatable::PostGarbageCollectionProcessing(isolate_);
2242
2243 size_t freed_global_handles;
2244
2245 {
2246 TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
2247 // First round weak callbacks are not supposed to allocate and trigger
2248 // nested GCs.
2249 freed_global_handles =
2250 isolate_->global_handles()->InvokeFirstPassWeakCallbacks();
2251 }
2252
2253 if (collector == GarbageCollector::MARK_COMPACTOR) {
2254 TRACE_GC(tracer(), GCTracer::Scope::HEAP_EMBEDDER_TRACING_EPILOGUE);
2255 // TraceEpilogue may trigger operations that invalidate global handles. It
2256 // has to be called *after* all other operations that potentially touch and
2257 // reset global handles. It is also still part of the main garbage
2258 // collection pause and thus needs to be called *before* any operation that
2259 // can potentially trigger recursive garbage
2260 local_embedder_heap_tracer()->TraceEpilogue();
2261 }
2262
2263 #ifdef VERIFY_HEAP
2264 if (FLAG_verify_heap) {
2265 Verify();
2266 }
2267 #endif
2268
2269 RecomputeLimits(collector);
2270
2271 GarbageCollectionEpilogueInSafepoint(collector);
2272
2273 tracer()->StopInSafepoint();
2274
2275 return freed_global_handles;
2276 }
2277
CollectSharedGarbage(GarbageCollectionReason gc_reason)2278 void Heap::CollectSharedGarbage(GarbageCollectionReason gc_reason) {
2279 DCHECK(!IsShared());
2280 DCHECK_NOT_NULL(isolate()->shared_isolate());
2281
2282 isolate()->shared_isolate()->heap()->PerformSharedGarbageCollection(
2283 isolate(), gc_reason);
2284 }
2285
PerformSharedGarbageCollection(Isolate * initiator,GarbageCollectionReason gc_reason)2286 void Heap::PerformSharedGarbageCollection(Isolate* initiator,
2287 GarbageCollectionReason gc_reason) {
2288 DCHECK(IsShared());
2289 base::MutexGuard guard(isolate()->client_isolate_mutex());
2290
2291 const char* collector_reason = nullptr;
2292 GarbageCollector collector = GarbageCollector::MARK_COMPACTOR;
2293
2294 tracer()->Start(collector, gc_reason, collector_reason);
2295
2296 isolate()->IterateClientIsolates([initiator](Isolate* client) {
2297 DCHECK_NOT_NULL(client->shared_isolate());
2298 Heap* client_heap = client->heap();
2299
2300 GlobalSafepoint::StopMainThread stop_main_thread =
2301 initiator == client ? GlobalSafepoint::StopMainThread::kNo
2302 : GlobalSafepoint::StopMainThread::kYes;
2303
2304 client_heap->safepoint()->EnterSafepointScope(stop_main_thread);
2305
2306 client_heap->shared_old_allocator_->FreeLinearAllocationArea();
2307 client_heap->shared_map_allocator_->FreeLinearAllocationArea();
2308 });
2309
2310 PerformGarbageCollection(GarbageCollector::MARK_COMPACTOR);
2311
2312 isolate()->IterateClientIsolates([initiator](Isolate* client) {
2313 GlobalSafepoint::StopMainThread stop_main_thread =
2314 initiator == client ? GlobalSafepoint::StopMainThread::kNo
2315 : GlobalSafepoint::StopMainThread::kYes;
2316 client->heap()->safepoint()->LeaveSafepointScope(stop_main_thread);
2317 });
2318
2319 tracer()->Stop(collector);
2320 }
2321
CompleteSweepingYoung(GarbageCollector collector)2322 void Heap::CompleteSweepingYoung(GarbageCollector collector) {
2323 GCTracer::Scope::ScopeId scope_id;
2324
2325 switch (collector) {
2326 case GarbageCollector::MINOR_MARK_COMPACTOR:
2327 scope_id = GCTracer::Scope::MINOR_MC_COMPLETE_SWEEP_ARRAY_BUFFERS;
2328 break;
2329 case GarbageCollector::SCAVENGER:
2330 scope_id = GCTracer::Scope::SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS;
2331 break;
2332 default:
2333 UNREACHABLE();
2334 }
2335
2336 TRACE_GC_EPOCH(tracer(), scope_id, ThreadKind::kMain);
2337 array_buffer_sweeper()->EnsureFinished();
2338 }
2339
EnsureSweepingCompleted(HeapObject object)2340 void Heap::EnsureSweepingCompleted(HeapObject object) {
2341 if (!mark_compact_collector()->sweeping_in_progress()) return;
2342
2343 BasicMemoryChunk* basic_chunk = BasicMemoryChunk::FromHeapObject(object);
2344 if (basic_chunk->InReadOnlySpace()) return;
2345
2346 MemoryChunk* chunk = MemoryChunk::cast(basic_chunk);
2347 if (chunk->SweepingDone()) return;
2348
2349 // SweepingDone() is always true for large pages.
2350 DCHECK(!chunk->IsLargePage());
2351
2352 Page* page = Page::cast(chunk);
2353 mark_compact_collector()->EnsurePageIsSwept(page);
2354 }
2355
UpdateCurrentEpoch(GarbageCollector collector)2356 void Heap::UpdateCurrentEpoch(GarbageCollector collector) {
2357 if (IsYoungGenerationCollector(collector)) {
2358 epoch_young_ = next_epoch();
2359 } else if (incremental_marking()->IsStopped()) {
2360 epoch_full_ = next_epoch();
2361 }
2362 }
2363
UpdateEpochFull()2364 void Heap::UpdateEpochFull() { epoch_full_ = next_epoch(); }
2365
RecomputeLimits(GarbageCollector collector)2366 void Heap::RecomputeLimits(GarbageCollector collector) {
2367 if (!((collector == GarbageCollector::MARK_COMPACTOR) ||
2368 (HasLowYoungGenerationAllocationRate() &&
2369 old_generation_size_configured_))) {
2370 return;
2371 }
2372
2373 double v8_gc_speed =
2374 tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
2375 double v8_mutator_speed =
2376 tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
2377 double v8_growing_factor = MemoryController<V8HeapTrait>::GrowingFactor(
2378 this, max_old_generation_size(), v8_gc_speed, v8_mutator_speed);
2379 double global_growing_factor = 0;
2380 if (UseGlobalMemoryScheduling()) {
2381 DCHECK_NOT_NULL(local_embedder_heap_tracer());
2382 double embedder_gc_speed = tracer()->EmbedderSpeedInBytesPerMillisecond();
2383 double embedder_speed =
2384 tracer()->CurrentEmbedderAllocationThroughputInBytesPerMillisecond();
2385 double embedder_growing_factor =
2386 (embedder_gc_speed > 0 && embedder_speed > 0)
2387 ? MemoryController<GlobalMemoryTrait>::GrowingFactor(
2388 this, max_global_memory_size_, embedder_gc_speed,
2389 embedder_speed)
2390 : 0;
2391 global_growing_factor =
2392 std::max(v8_growing_factor, embedder_growing_factor);
2393 }
2394
2395 size_t old_gen_size = OldGenerationSizeOfObjects();
2396 size_t new_space_capacity = NewSpaceCapacity();
2397 HeapGrowingMode mode = CurrentHeapGrowingMode();
2398
2399 if (collector == GarbageCollector::MARK_COMPACTOR) {
2400 external_memory_.ResetAfterGC();
2401
2402 set_old_generation_allocation_limit(
2403 MemoryController<V8HeapTrait>::CalculateAllocationLimit(
2404 this, old_gen_size, min_old_generation_size_,
2405 max_old_generation_size(), new_space_capacity, v8_growing_factor,
2406 mode));
2407 if (UseGlobalMemoryScheduling()) {
2408 DCHECK_GT(global_growing_factor, 0);
2409 global_allocation_limit_ =
2410 MemoryController<GlobalMemoryTrait>::CalculateAllocationLimit(
2411 this, GlobalSizeOfObjects(), min_global_memory_size_,
2412 max_global_memory_size_, new_space_capacity,
2413 global_growing_factor, mode);
2414 }
2415 CheckIneffectiveMarkCompact(
2416 old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
2417 } else if (HasLowYoungGenerationAllocationRate() &&
2418 old_generation_size_configured_) {
2419 size_t new_old_generation_limit =
2420 MemoryController<V8HeapTrait>::CalculateAllocationLimit(
2421 this, old_gen_size, min_old_generation_size_,
2422 max_old_generation_size(), new_space_capacity, v8_growing_factor,
2423 mode);
2424 if (new_old_generation_limit < old_generation_allocation_limit()) {
2425 set_old_generation_allocation_limit(new_old_generation_limit);
2426 }
2427 if (UseGlobalMemoryScheduling()) {
2428 DCHECK_GT(global_growing_factor, 0);
2429 size_t new_global_limit =
2430 MemoryController<GlobalMemoryTrait>::CalculateAllocationLimit(
2431 this, GlobalSizeOfObjects(), min_global_memory_size_,
2432 max_global_memory_size_, new_space_capacity,
2433 global_growing_factor, mode);
2434 if (new_global_limit < global_allocation_limit_) {
2435 global_allocation_limit_ = new_global_limit;
2436 }
2437 }
2438 }
2439 }
2440
CallGCPrologueCallbacks(GCType gc_type,GCCallbackFlags flags)2441 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
2442 RCS_SCOPE(isolate(), RuntimeCallCounterId::kGCPrologueCallback);
2443 for (const GCCallbackTuple& info : gc_prologue_callbacks_) {
2444 if (gc_type & info.gc_type) {
2445 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
2446 info.callback(isolate, gc_type, flags, info.data);
2447 }
2448 }
2449 }
2450
CallGCEpilogueCallbacks(GCType gc_type,GCCallbackFlags flags)2451 void Heap::CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags) {
2452 RCS_SCOPE(isolate(), RuntimeCallCounterId::kGCEpilogueCallback);
2453 for (const GCCallbackTuple& info : gc_epilogue_callbacks_) {
2454 if (gc_type & info.gc_type) {
2455 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
2456 info.callback(isolate, gc_type, flags, info.data);
2457 }
2458 }
2459 }
2460
MarkCompact()2461 void Heap::MarkCompact() {
2462 PauseAllocationObserversScope pause_observers(this);
2463
2464 SetGCState(MARK_COMPACT);
2465
2466 LOG(isolate_, ResourceEvent("markcompact", "begin"));
2467
2468 CodeSpaceMemoryModificationScope code_modifcation(this);
2469
2470 UpdateOldGenerationAllocationCounter();
2471 uint64_t size_of_objects_before_gc = SizeOfObjects();
2472
2473 mark_compact_collector()->Prepare();
2474
2475 ms_count_++;
2476 contexts_disposed_ = 0;
2477
2478 MarkCompactPrologue();
2479
2480 mark_compact_collector()->CollectGarbage();
2481
2482 LOG(isolate_, ResourceEvent("markcompact", "end"));
2483
2484 MarkCompactEpilogue();
2485
2486 if (FLAG_allocation_site_pretenuring) {
2487 EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
2488 }
2489 old_generation_size_configured_ = true;
2490 // This should be updated before PostGarbageCollectionProcessing, which
2491 // can cause another GC. Take into account the objects promoted during
2492 // GC.
2493 old_generation_allocation_counter_at_last_gc_ +=
2494 static_cast<size_t>(promoted_objects_size_);
2495 old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects();
2496 global_memory_at_last_gc_ = GlobalSizeOfObjects();
2497 }
2498
MinorMarkCompact()2499 void Heap::MinorMarkCompact() {
2500 #ifdef ENABLE_MINOR_MC
2501 DCHECK(FLAG_minor_mc);
2502 DCHECK(new_space());
2503
2504 PauseAllocationObserversScope pause_observers(this);
2505 SetGCState(MINOR_MARK_COMPACT);
2506 LOG(isolate_, ResourceEvent("MinorMarkCompact", "begin"));
2507
2508 TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC);
2509 AlwaysAllocateScope always_allocate(this);
2510 IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
2511 incremental_marking());
2512 ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
2513
2514 minor_mark_compact_collector()->CollectGarbage();
2515
2516 LOG(isolate_, ResourceEvent("MinorMarkCompact", "end"));
2517 SetGCState(NOT_IN_GC);
2518 #else
2519 UNREACHABLE();
2520 #endif // ENABLE_MINOR_MC
2521 }
2522
MarkCompactEpilogue()2523 void Heap::MarkCompactEpilogue() {
2524 TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
2525 SetGCState(NOT_IN_GC);
2526
2527 isolate_->counters()->objs_since_last_full()->Set(0);
2528
2529 incremental_marking()->Epilogue();
2530
2531 DCHECK(incremental_marking()->IsStopped());
2532 }
2533
MarkCompactPrologue()2534 void Heap::MarkCompactPrologue() {
2535 TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE);
2536 isolate_->descriptor_lookup_cache()->Clear();
2537 RegExpResultsCache::Clear(string_split_cache());
2538 RegExpResultsCache::Clear(regexp_multiple_cache());
2539
2540 isolate_->compilation_cache()->MarkCompactPrologue();
2541
2542 FlushNumberStringCache();
2543 }
2544
CheckNewSpaceExpansionCriteria()2545 void Heap::CheckNewSpaceExpansionCriteria() {
2546 if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
2547 survived_since_last_expansion_ > new_space_->TotalCapacity()) {
2548 // Grow the size of new space if there is room to grow, and enough data
2549 // has survived scavenge since the last expansion.
2550 new_space_->Grow();
2551 survived_since_last_expansion_ = 0;
2552 }
2553 new_lo_space()->SetCapacity(new_space()->Capacity());
2554 }
2555
EvacuateYoungGeneration()2556 void Heap::EvacuateYoungGeneration() {
2557 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE);
2558 base::MutexGuard guard(relocation_mutex());
2559 ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
2560 if (!FLAG_concurrent_marking) {
2561 DCHECK(fast_promotion_mode_);
2562 DCHECK(CanPromoteYoungAndExpandOldGeneration(0));
2563 }
2564
2565 mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
2566
2567 SetGCState(SCAVENGE);
2568 LOG(isolate_, ResourceEvent("scavenge", "begin"));
2569
2570 // Move pages from new->old generation.
2571 PageRange range(new_space()->first_allocatable_address(), new_space()->top());
2572 for (auto it = range.begin(); it != range.end();) {
2573 Page* p = (*++it)->prev_page();
2574 new_space()->from_space().RemovePage(p);
2575 Page::ConvertNewToOld(p);
2576 if (incremental_marking()->IsMarking())
2577 mark_compact_collector()->RecordLiveSlotsOnPage(p);
2578 }
2579
2580 // Reset new space.
2581 if (!new_space()->Rebalance()) {
2582 FatalProcessOutOfMemory("NewSpace::Rebalance");
2583 }
2584 new_space()->ResetLinearAllocationArea();
2585 new_space()->set_age_mark(new_space()->top());
2586
2587 for (auto it = new_lo_space()->begin(); it != new_lo_space()->end();) {
2588 LargePage* page = *it;
2589 // Increment has to happen after we save the page, because it is going to
2590 // be removed below.
2591 it++;
2592 lo_space()->PromoteNewLargeObject(page);
2593 }
2594
2595 // Fix up special trackers.
2596 external_string_table_.PromoteYoung();
2597 // GlobalHandles are updated in PostGarbageCollectonProcessing
2598
2599 size_t promoted = new_space()->Size() + new_lo_space()->Size();
2600 IncrementYoungSurvivorsCounter(promoted);
2601 IncrementPromotedObjectsSize(promoted);
2602 IncrementSemiSpaceCopiedObjectSize(0);
2603
2604 LOG(isolate_, ResourceEvent("scavenge", "end"));
2605 SetGCState(NOT_IN_GC);
2606 }
2607
Scavenge()2608 void Heap::Scavenge() {
2609 DCHECK_NOT_NULL(new_space());
2610
2611 if (fast_promotion_mode_ && CanPromoteYoungAndExpandOldGeneration(0)) {
2612 tracer()->NotifyYoungGenerationHandling(
2613 YoungGenerationHandling::kFastPromotionDuringScavenge);
2614 EvacuateYoungGeneration();
2615 return;
2616 }
2617 tracer()->NotifyYoungGenerationHandling(
2618 YoungGenerationHandling::kRegularScavenge);
2619
2620 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
2621 base::MutexGuard guard(relocation_mutex());
2622 ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
2623 // There are soft limits in the allocation code, designed to trigger a mark
2624 // sweep collection by failing allocations. There is no sense in trying to
2625 // trigger one during scavenge: scavenges allocation should always succeed.
2626 AlwaysAllocateScope scope(this);
2627
2628 // Bump-pointer allocations done during scavenge are not real allocations.
2629 // Pause the inline allocation steps.
2630 PauseAllocationObserversScope pause_observers(this);
2631 IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
2632 incremental_marking());
2633
2634 mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
2635
2636 SetGCState(SCAVENGE);
2637
2638 // Flip the semispaces. After flipping, to space is empty, from space has
2639 // live objects.
2640 new_space()->Flip();
2641 new_space()->ResetLinearAllocationArea();
2642
2643 // We also flip the young generation large object space. All large objects
2644 // will be in the from space.
2645 new_lo_space()->Flip();
2646 new_lo_space()->ResetPendingObject();
2647
2648 // Implements Cheney's copying algorithm
2649 LOG(isolate_, ResourceEvent("scavenge", "begin"));
2650
2651 scavenger_collector_->CollectGarbage();
2652
2653 LOG(isolate_, ResourceEvent("scavenge", "end"));
2654
2655 SetGCState(NOT_IN_GC);
2656 }
2657
ComputeFastPromotionMode()2658 void Heap::ComputeFastPromotionMode() {
2659 if (!new_space_) return;
2660
2661 const size_t survived_in_new_space =
2662 survived_last_scavenge_ * 100 / NewSpaceCapacity();
2663 fast_promotion_mode_ =
2664 !FLAG_optimize_for_size && FLAG_fast_promotion_new_space &&
2665 !ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() &&
2666 survived_in_new_space >= kMinPromotedPercentForFastPromotionMode;
2667
2668 if (FLAG_trace_gc_verbose && !FLAG_trace_gc_ignore_scavenger) {
2669 PrintIsolate(isolate(), "Fast promotion mode: %s survival rate: %zu%%\n",
2670 fast_promotion_mode_ ? "true" : "false",
2671 survived_in_new_space);
2672 }
2673 }
2674
UnprotectAndRegisterMemoryChunk(MemoryChunk * chunk,UnprotectMemoryOrigin origin)2675 void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk,
2676 UnprotectMemoryOrigin origin) {
2677 if (unprotected_memory_chunks_registry_enabled_) {
2678 base::Optional<base::MutexGuard> guard;
2679 if (origin != UnprotectMemoryOrigin::kMainThread) {
2680 guard.emplace(&unprotected_memory_chunks_mutex_);
2681 }
2682 if (unprotected_memory_chunks_.insert(chunk).second) {
2683 chunk->SetCodeModificationPermissions();
2684 }
2685 }
2686 }
2687
UnprotectAndRegisterMemoryChunk(HeapObject object,UnprotectMemoryOrigin origin)2688 void Heap::UnprotectAndRegisterMemoryChunk(HeapObject object,
2689 UnprotectMemoryOrigin origin) {
2690 UnprotectAndRegisterMemoryChunk(MemoryChunk::FromHeapObject(object), origin);
2691 }
2692
UnregisterUnprotectedMemoryChunk(MemoryChunk * chunk)2693 void Heap::UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk) {
2694 unprotected_memory_chunks_.erase(chunk);
2695 }
2696
ProtectUnprotectedMemoryChunks()2697 void Heap::ProtectUnprotectedMemoryChunks() {
2698 DCHECK(unprotected_memory_chunks_registry_enabled_);
2699 for (auto chunk = unprotected_memory_chunks_.begin();
2700 chunk != unprotected_memory_chunks_.end(); chunk++) {
2701 CHECK(memory_allocator()->IsMemoryChunkExecutable(*chunk));
2702 (*chunk)->SetDefaultCodePermissions();
2703 }
2704 unprotected_memory_chunks_.clear();
2705 }
2706
Contains(String string)2707 bool Heap::ExternalStringTable::Contains(String string) {
2708 for (size_t i = 0; i < young_strings_.size(); ++i) {
2709 if (young_strings_[i] == string) return true;
2710 }
2711 for (size_t i = 0; i < old_strings_.size(); ++i) {
2712 if (old_strings_[i] == string) return true;
2713 }
2714 return false;
2715 }
2716
UpdateExternalString(String string,size_t old_payload,size_t new_payload)2717 void Heap::UpdateExternalString(String string, size_t old_payload,
2718 size_t new_payload) {
2719 DCHECK(string.IsExternalString());
2720 if (FLAG_enable_third_party_heap) return;
2721
2722 Page* page = Page::FromHeapObject(string);
2723
2724 if (old_payload > new_payload) {
2725 page->DecrementExternalBackingStoreBytes(
2726 ExternalBackingStoreType::kExternalString, old_payload - new_payload);
2727 } else {
2728 page->IncrementExternalBackingStoreBytes(
2729 ExternalBackingStoreType::kExternalString, new_payload - old_payload);
2730 }
2731 }
2732
UpdateYoungReferenceInExternalStringTableEntry(Heap * heap,FullObjectSlot p)2733 String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
2734 FullObjectSlot p) {
2735 PtrComprCageBase cage_base(heap->isolate());
2736 HeapObject obj = HeapObject::cast(*p);
2737 MapWord first_word = obj.map_word(cage_base, kRelaxedLoad);
2738
2739 String new_string;
2740
2741 if (InFromPage(obj)) {
2742 if (!first_word.IsForwardingAddress()) {
2743 // Unreachable external string can be finalized.
2744 String string = String::cast(obj);
2745 if (!string.IsExternalString(cage_base)) {
2746 // Original external string has been internalized.
2747 DCHECK(string.IsThinString(cage_base));
2748 return String();
2749 }
2750 heap->FinalizeExternalString(string);
2751 return String();
2752 }
2753 new_string = String::cast(first_word.ToForwardingAddress());
2754 } else {
2755 new_string = String::cast(obj);
2756 }
2757
2758 // String is still reachable.
2759 if (new_string.IsThinString(cage_base)) {
2760 // Filtering Thin strings out of the external string table.
2761 return String();
2762 } else if (new_string.IsExternalString(cage_base)) {
2763 MemoryChunk::MoveExternalBackingStoreBytes(
2764 ExternalBackingStoreType::kExternalString,
2765 Page::FromAddress((*p).ptr()), Page::FromHeapObject(new_string),
2766 ExternalString::cast(new_string).ExternalPayloadSize());
2767 return new_string;
2768 }
2769
2770 // Internalization can replace external strings with non-external strings.
2771 return new_string.IsExternalString(cage_base) ? new_string : String();
2772 }
2773
VerifyYoung()2774 void Heap::ExternalStringTable::VerifyYoung() {
2775 #ifdef DEBUG
2776 std::set<String> visited_map;
2777 std::map<MemoryChunk*, size_t> size_map;
2778 ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
2779 for (size_t i = 0; i < young_strings_.size(); ++i) {
2780 String obj = String::cast(young_strings_[i]);
2781 MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
2782 DCHECK(mc->InYoungGeneration());
2783 DCHECK(heap_->InYoungGeneration(obj));
2784 DCHECK(!obj.IsTheHole(heap_->isolate()));
2785 DCHECK(obj.IsExternalString());
2786 // Note: we can have repeated elements in the table.
2787 DCHECK_EQ(0, visited_map.count(obj));
2788 visited_map.insert(obj);
2789 size_map[mc] += ExternalString::cast(obj).ExternalPayloadSize();
2790 }
2791 for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
2792 it != size_map.end(); it++)
2793 DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
2794 #endif
2795 }
2796
Verify()2797 void Heap::ExternalStringTable::Verify() {
2798 #ifdef DEBUG
2799 std::set<String> visited_map;
2800 std::map<MemoryChunk*, size_t> size_map;
2801 ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
2802 VerifyYoung();
2803 for (size_t i = 0; i < old_strings_.size(); ++i) {
2804 String obj = String::cast(old_strings_[i]);
2805 MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
2806 DCHECK(!mc->InYoungGeneration());
2807 DCHECK(!heap_->InYoungGeneration(obj));
2808 DCHECK(!obj.IsTheHole(heap_->isolate()));
2809 DCHECK(obj.IsExternalString());
2810 // Note: we can have repeated elements in the table.
2811 DCHECK_EQ(0, visited_map.count(obj));
2812 visited_map.insert(obj);
2813 size_map[mc] += ExternalString::cast(obj).ExternalPayloadSize();
2814 }
2815 for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
2816 it != size_map.end(); it++)
2817 DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
2818 #endif
2819 }
2820
UpdateYoungReferences(Heap::ExternalStringTableUpdaterCallback updater_func)2821 void Heap::ExternalStringTable::UpdateYoungReferences(
2822 Heap::ExternalStringTableUpdaterCallback updater_func) {
2823 if (young_strings_.empty()) return;
2824
2825 FullObjectSlot start(young_strings_.data());
2826 FullObjectSlot end(young_strings_.data() + young_strings_.size());
2827 FullObjectSlot last = start;
2828
2829 for (FullObjectSlot p = start; p < end; ++p) {
2830 String target = updater_func(heap_, p);
2831
2832 if (target.is_null()) continue;
2833
2834 DCHECK(target.IsExternalString());
2835
2836 if (InYoungGeneration(target)) {
2837 // String is still in new space. Update the table entry.
2838 last.store(target);
2839 ++last;
2840 } else {
2841 // String got promoted. Move it to the old string list.
2842 old_strings_.push_back(target);
2843 }
2844 }
2845
2846 DCHECK(last <= end);
2847 young_strings_.resize(last - start);
2848 #ifdef VERIFY_HEAP
2849 if (FLAG_verify_heap) {
2850 VerifyYoung();
2851 }
2852 #endif
2853 }
2854
PromoteYoung()2855 void Heap::ExternalStringTable::PromoteYoung() {
2856 old_strings_.reserve(old_strings_.size() + young_strings_.size());
2857 std::move(std::begin(young_strings_), std::end(young_strings_),
2858 std::back_inserter(old_strings_));
2859 young_strings_.clear();
2860 }
2861
IterateYoung(RootVisitor * v)2862 void Heap::ExternalStringTable::IterateYoung(RootVisitor* v) {
2863 if (!young_strings_.empty()) {
2864 v->VisitRootPointers(
2865 Root::kExternalStringsTable, nullptr,
2866 FullObjectSlot(young_strings_.data()),
2867 FullObjectSlot(young_strings_.data() + young_strings_.size()));
2868 }
2869 }
2870
IterateAll(RootVisitor * v)2871 void Heap::ExternalStringTable::IterateAll(RootVisitor* v) {
2872 IterateYoung(v);
2873 if (!old_strings_.empty()) {
2874 v->VisitRootPointers(
2875 Root::kExternalStringsTable, nullptr,
2876 FullObjectSlot(old_strings_.data()),
2877 FullObjectSlot(old_strings_.data() + old_strings_.size()));
2878 }
2879 }
2880
UpdateYoungReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)2881 void Heap::UpdateYoungReferencesInExternalStringTable(
2882 ExternalStringTableUpdaterCallback updater_func) {
2883 external_string_table_.UpdateYoungReferences(updater_func);
2884 }
2885
UpdateReferences(Heap::ExternalStringTableUpdaterCallback updater_func)2886 void Heap::ExternalStringTable::UpdateReferences(
2887 Heap::ExternalStringTableUpdaterCallback updater_func) {
2888 if (old_strings_.size() > 0) {
2889 FullObjectSlot start(old_strings_.data());
2890 FullObjectSlot end(old_strings_.data() + old_strings_.size());
2891 for (FullObjectSlot p = start; p < end; ++p)
2892 p.store(updater_func(heap_, p));
2893 }
2894
2895 UpdateYoungReferences(updater_func);
2896 }
2897
UpdateReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)2898 void Heap::UpdateReferencesInExternalStringTable(
2899 ExternalStringTableUpdaterCallback updater_func) {
2900 external_string_table_.UpdateReferences(updater_func);
2901 }
2902
ProcessAllWeakReferences(WeakObjectRetainer * retainer)2903 void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
2904 ProcessNativeContexts(retainer);
2905 ProcessAllocationSites(retainer);
2906 ProcessDirtyJSFinalizationRegistries(retainer);
2907 }
2908
ProcessYoungWeakReferences(WeakObjectRetainer * retainer)2909 void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
2910 ProcessNativeContexts(retainer);
2911 }
2912
ProcessNativeContexts(WeakObjectRetainer * retainer)2913 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
2914 Object head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
2915 // Update the head of the list of contexts.
2916 set_native_contexts_list(head);
2917 }
2918
ProcessAllocationSites(WeakObjectRetainer * retainer)2919 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
2920 Object allocation_site_obj =
2921 VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
2922 set_allocation_sites_list(allocation_site_obj);
2923 }
2924
ProcessDirtyJSFinalizationRegistries(WeakObjectRetainer * retainer)2925 void Heap::ProcessDirtyJSFinalizationRegistries(WeakObjectRetainer* retainer) {
2926 Object head = VisitWeakList<JSFinalizationRegistry>(
2927 this, dirty_js_finalization_registries_list(), retainer);
2928 set_dirty_js_finalization_registries_list(head);
2929 // If the list is empty, set the tail to undefined. Otherwise the tail is set
2930 // by WeakListVisitor<JSFinalizationRegistry>::VisitLiveObject.
2931 if (head.IsUndefined(isolate())) {
2932 set_dirty_js_finalization_registries_list_tail(head);
2933 }
2934 }
2935
ProcessWeakListRoots(WeakObjectRetainer * retainer)2936 void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
2937 set_native_contexts_list(retainer->RetainAs(native_contexts_list()));
2938 set_allocation_sites_list(retainer->RetainAs(allocation_sites_list()));
2939 set_dirty_js_finalization_registries_list(
2940 retainer->RetainAs(dirty_js_finalization_registries_list()));
2941 set_dirty_js_finalization_registries_list_tail(
2942 retainer->RetainAs(dirty_js_finalization_registries_list_tail()));
2943 }
2944
ForeachAllocationSite(Object list,const std::function<void (AllocationSite)> & visitor)2945 void Heap::ForeachAllocationSite(
2946 Object list, const std::function<void(AllocationSite)>& visitor) {
2947 DisallowGarbageCollection no_gc;
2948 Object current = list;
2949 while (current.IsAllocationSite()) {
2950 AllocationSite site = AllocationSite::cast(current);
2951 visitor(site);
2952 Object current_nested = site.nested_site();
2953 while (current_nested.IsAllocationSite()) {
2954 AllocationSite nested_site = AllocationSite::cast(current_nested);
2955 visitor(nested_site);
2956 current_nested = nested_site.nested_site();
2957 }
2958 current = site.weak_next();
2959 }
2960 }
2961
ResetAllAllocationSitesDependentCode(AllocationType allocation)2962 void Heap::ResetAllAllocationSitesDependentCode(AllocationType allocation) {
2963 DisallowGarbageCollection no_gc_scope;
2964 bool marked = false;
2965
2966 ForeachAllocationSite(allocation_sites_list(),
2967 [&marked, allocation, this](AllocationSite site) {
2968 if (site.GetAllocationType() == allocation) {
2969 site.ResetPretenureDecision();
2970 site.set_deopt_dependent_code(true);
2971 marked = true;
2972 RemoveAllocationSitePretenuringFeedback(site);
2973 return;
2974 }
2975 });
2976 if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
2977 }
2978
EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc)2979 void Heap::EvaluateOldSpaceLocalPretenuring(
2980 uint64_t size_of_objects_before_gc) {
2981 uint64_t size_of_objects_after_gc = SizeOfObjects();
2982 double old_generation_survival_rate =
2983 (static_cast<double>(size_of_objects_after_gc) * 100) /
2984 static_cast<double>(size_of_objects_before_gc);
2985
2986 if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
2987 // Too many objects died in the old generation, pretenuring of wrong
2988 // allocation sites may be the cause for that. We have to deopt all
2989 // dependent code registered in the allocation sites to re-evaluate
2990 // our pretenuring decisions.
2991 ResetAllAllocationSitesDependentCode(AllocationType::kOld);
2992 if (FLAG_trace_pretenuring) {
2993 PrintF(
2994 "Deopt all allocation sites dependent code due to low survival "
2995 "rate in the old generation %f\n",
2996 old_generation_survival_rate);
2997 }
2998 }
2999 }
3000
VisitExternalResources(v8::ExternalResourceVisitor * visitor)3001 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
3002 DisallowGarbageCollection no_gc;
3003 // All external strings are listed in the external string table.
3004
3005 class ExternalStringTableVisitorAdapter : public RootVisitor {
3006 public:
3007 explicit ExternalStringTableVisitorAdapter(
3008 Isolate* isolate, v8::ExternalResourceVisitor* visitor)
3009 : isolate_(isolate), visitor_(visitor) {}
3010 void VisitRootPointers(Root root, const char* description,
3011 FullObjectSlot start, FullObjectSlot end) override {
3012 for (FullObjectSlot p = start; p < end; ++p) {
3013 DCHECK((*p).IsExternalString());
3014 visitor_->VisitExternalString(
3015 Utils::ToLocal(Handle<String>(String::cast(*p), isolate_)));
3016 }
3017 }
3018
3019 private:
3020 Isolate* isolate_;
3021 v8::ExternalResourceVisitor* visitor_;
3022 } external_string_table_visitor(isolate(), visitor);
3023
3024 external_string_table_.IterateAll(&external_string_table_visitor);
3025 }
3026
3027 STATIC_ASSERT(IsAligned(FixedDoubleArray::kHeaderSize, kDoubleAlignment));
3028
3029 #ifdef V8_COMPRESS_POINTERS
3030 // TODO(ishell, v8:8875): When pointer compression is enabled the kHeaderSize
3031 // is only kTaggedSize aligned but we can keep using unaligned access since
3032 // both x64 and arm64 architectures (where pointer compression supported)
3033 // allow unaligned access to doubles.
3034 STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kTaggedSize));
3035 #else
3036 STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kDoubleAlignment));
3037 #endif
3038
3039 #ifdef V8_HOST_ARCH_32_BIT
3040 STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize);
3041 #endif
3042
GetMaximumFillToAlign(AllocationAlignment alignment)3043 int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
3044 switch (alignment) {
3045 case kWordAligned:
3046 return 0;
3047 case kDoubleAligned:
3048 case kDoubleUnaligned:
3049 return kDoubleSize - kTaggedSize;
3050 default:
3051 UNREACHABLE();
3052 }
3053 }
3054
3055 // static
GetFillToAlign(Address address,AllocationAlignment alignment)3056 int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
3057 if (alignment == kDoubleAligned && (address & kDoubleAlignmentMask) != 0)
3058 return kTaggedSize;
3059 if (alignment == kDoubleUnaligned && (address & kDoubleAlignmentMask) == 0)
3060 return kDoubleSize - kTaggedSize; // No fill if double is always aligned.
3061 return 0;
3062 }
3063
GetCodeRangeReservedAreaSize()3064 size_t Heap::GetCodeRangeReservedAreaSize() {
3065 return kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
3066 }
3067
3068 // static
PrecedeWithFiller(ReadOnlyRoots roots,HeapObject object,int filler_size)3069 HeapObject Heap::PrecedeWithFiller(ReadOnlyRoots roots, HeapObject object,
3070 int filler_size) {
3071 CreateFillerObjectAt(roots, object.address(), filler_size,
3072 ClearFreedMemoryMode::kDontClearFreedMemory);
3073 return HeapObject::FromAddress(object.address() + filler_size);
3074 }
3075
3076 // static
AlignWithFiller(ReadOnlyRoots roots,HeapObject object,int object_size,int allocation_size,AllocationAlignment alignment)3077 HeapObject Heap::AlignWithFiller(ReadOnlyRoots roots, HeapObject object,
3078 int object_size, int allocation_size,
3079 AllocationAlignment alignment) {
3080 int filler_size = allocation_size - object_size;
3081 DCHECK_LT(0, filler_size);
3082 int pre_filler = GetFillToAlign(object.address(), alignment);
3083 if (pre_filler) {
3084 object = PrecedeWithFiller(roots, object, pre_filler);
3085 filler_size -= pre_filler;
3086 }
3087 if (filler_size) {
3088 CreateFillerObjectAt(roots, object.address() + object_size, filler_size,
3089 ClearFreedMemoryMode::kDontClearFreedMemory);
3090 }
3091 return object;
3092 }
3093
AllocateExternalBackingStore(const std::function<void * (size_t)> & allocate,size_t byte_length)3094 void* Heap::AllocateExternalBackingStore(
3095 const std::function<void*(size_t)>& allocate, size_t byte_length) {
3096 if (!always_allocate() && new_space()) {
3097 size_t new_space_backing_store_bytes =
3098 new_space()->ExternalBackingStoreBytes();
3099 if (new_space_backing_store_bytes >= 2 * kMaxSemiSpaceSize &&
3100 new_space_backing_store_bytes >= byte_length) {
3101 // Performing a young generation GC amortizes over the allocated backing
3102 // store bytes and may free enough external bytes for this allocation.
3103 CollectGarbage(NEW_SPACE,
3104 GarbageCollectionReason::kExternalMemoryPressure);
3105 }
3106 }
3107 void* result = allocate(byte_length);
3108 if (result) return result;
3109 if (!always_allocate()) {
3110 for (int i = 0; i < 2; i++) {
3111 CollectGarbage(OLD_SPACE,
3112 GarbageCollectionReason::kExternalMemoryPressure);
3113 result = allocate(byte_length);
3114 if (result) return result;
3115 }
3116 isolate()->counters()->gc_last_resort_from_handles()->Increment();
3117 CollectAllAvailableGarbage(
3118 GarbageCollectionReason::kExternalMemoryPressure);
3119 }
3120 return allocate(byte_length);
3121 }
3122
ConfigureInitialOldGenerationSize()3123 void Heap::ConfigureInitialOldGenerationSize() {
3124 if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
3125 const size_t minimum_growing_step =
3126 MemoryController<V8HeapTrait>::MinimumAllocationLimitGrowingStep(
3127 CurrentHeapGrowingMode());
3128 const size_t new_old_generation_allocation_limit =
3129 std::max(OldGenerationSizeOfObjects() + minimum_growing_step,
3130 static_cast<size_t>(
3131 static_cast<double>(old_generation_allocation_limit()) *
3132 (tracer()->AverageSurvivalRatio() / 100)));
3133 if (new_old_generation_allocation_limit <
3134 old_generation_allocation_limit()) {
3135 set_old_generation_allocation_limit(new_old_generation_allocation_limit);
3136 } else {
3137 old_generation_size_configured_ = true;
3138 }
3139 if (UseGlobalMemoryScheduling()) {
3140 const size_t new_global_memory_limit = std::max(
3141 GlobalSizeOfObjects() + minimum_growing_step,
3142 static_cast<size_t>(static_cast<double>(global_allocation_limit_) *
3143 (tracer()->AverageSurvivalRatio() / 100)));
3144 if (new_global_memory_limit < global_allocation_limit_) {
3145 global_allocation_limit_ = new_global_memory_limit;
3146 }
3147 }
3148 }
3149 }
3150
FlushNumberStringCache()3151 void Heap::FlushNumberStringCache() {
3152 // Flush the number to string cache.
3153 int len = number_string_cache().length();
3154 for (int i = 0; i < len; i++) {
3155 number_string_cache().set_undefined(i);
3156 }
3157 }
3158
3159 namespace {
3160
CreateFillerObjectAtImpl(ReadOnlyRoots roots,Address addr,int size,ClearFreedMemoryMode clear_memory_mode)3161 HeapObject CreateFillerObjectAtImpl(ReadOnlyRoots roots, Address addr, int size,
3162 ClearFreedMemoryMode clear_memory_mode) {
3163 if (size == 0) return HeapObject();
3164 HeapObject filler = HeapObject::FromAddress(addr);
3165 if (size == kTaggedSize) {
3166 filler.set_map_after_allocation(roots.unchecked_one_pointer_filler_map(),
3167 SKIP_WRITE_BARRIER);
3168 } else if (size == 2 * kTaggedSize) {
3169 filler.set_map_after_allocation(roots.unchecked_two_pointer_filler_map(),
3170 SKIP_WRITE_BARRIER);
3171 if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
3172 AtomicSlot slot(ObjectSlot(addr) + 1);
3173 *slot = static_cast<Tagged_t>(kClearedFreeMemoryValue);
3174 }
3175 } else {
3176 DCHECK_GT(size, 2 * kTaggedSize);
3177 filler.set_map_after_allocation(roots.unchecked_free_space_map(),
3178 SKIP_WRITE_BARRIER);
3179 FreeSpace::cast(filler).set_size(size, kRelaxedStore);
3180 if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
3181 MemsetTagged(ObjectSlot(addr) + 2, Object(kClearedFreeMemoryValue),
3182 (size / kTaggedSize) - 2);
3183 }
3184 }
3185
3186 // At this point, we may be deserializing the heap from a snapshot, and
3187 // none of the maps have been created yet and are nullptr.
3188 DCHECK((filler.map_slot().contains_map_value(kNullAddress) &&
3189 !Heap::FromWritableHeapObject(filler)->deserialization_complete()) ||
3190 filler.map().IsMap());
3191
3192 return filler;
3193 }
3194
3195 #ifdef DEBUG
VerifyNoNeedToClearSlots(Address start,Address end)3196 void VerifyNoNeedToClearSlots(Address start, Address end) {
3197 BasicMemoryChunk* basic_chunk = BasicMemoryChunk::FromAddress(start);
3198 if (basic_chunk->InReadOnlySpace()) return;
3199 MemoryChunk* chunk = static_cast<MemoryChunk*>(basic_chunk);
3200 if (chunk->InYoungGeneration()) return;
3201 BaseSpace* space = chunk->owner();
3202 space->heap()->VerifySlotRangeHasNoRecordedSlots(start, end);
3203 }
3204 #else
VerifyNoNeedToClearSlots(Address start,Address end)3205 void VerifyNoNeedToClearSlots(Address start, Address end) {}
3206 #endif // DEBUG
3207
3208 } // namespace
3209
3210 // static
CreateFillerObjectAt(ReadOnlyRoots roots,Address addr,int size,ClearFreedMemoryMode clear_memory_mode)3211 HeapObject Heap::CreateFillerObjectAt(ReadOnlyRoots roots, Address addr,
3212 int size,
3213 ClearFreedMemoryMode clear_memory_mode) {
3214 // TODO(leszeks): Verify that no slots need to be recorded.
3215 HeapObject filler =
3216 CreateFillerObjectAtImpl(roots, addr, size, clear_memory_mode);
3217 VerifyNoNeedToClearSlots(addr, addr + size);
3218 return filler;
3219 }
3220
CreateFillerObjectAtBackground(Address addr,int size,ClearFreedMemoryMode clear_memory_mode)3221 void Heap::CreateFillerObjectAtBackground(
3222 Address addr, int size, ClearFreedMemoryMode clear_memory_mode) {
3223 CreateFillerObjectAtImpl(ReadOnlyRoots(this), addr, size, clear_memory_mode);
3224 // Do not verify whether slots are cleared here: the concurrent sweeper is not
3225 // allowed to access the main thread's remembered set.
3226 }
3227
CreateFillerObjectAt(Address addr,int size,ClearRecordedSlots clear_slots_mode)3228 HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
3229 ClearRecordedSlots clear_slots_mode) {
3230 // TODO(mlippautz): It would be nice to DCHECK that we never call this
3231 // with {addr} pointing into large object space; however we currently
3232 // initialize LO allocations with a filler, see
3233 // LargeObjectSpace::AllocateLargePage.
3234 if (size == 0) return HeapObject();
3235 HeapObject filler = CreateFillerObjectAtImpl(
3236 ReadOnlyRoots(this), addr, size,
3237 clear_slots_mode == ClearRecordedSlots::kYes
3238 ? ClearFreedMemoryMode::kClearFreedMemory
3239 : ClearFreedMemoryMode::kDontClearFreedMemory);
3240 if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
3241 if (clear_slots_mode == ClearRecordedSlots::kYes) {
3242 ClearRecordedSlotRange(addr, addr + size);
3243 } else {
3244 VerifyNoNeedToClearSlots(addr, addr + size);
3245 }
3246 }
3247 return filler;
3248 }
3249
CanMoveObjectStart(HeapObject object)3250 bool Heap::CanMoveObjectStart(HeapObject object) {
3251 if (!FLAG_move_object_start) return false;
3252
3253 // Sampling heap profiler may have a reference to the object.
3254 if (isolate()->heap_profiler()->is_sampling_allocations()) return false;
3255
3256 if (IsLargeObject(object)) return false;
3257
3258 // Compilation jobs may have references to the object.
3259 if (isolate()->concurrent_recompilation_enabled() &&
3260 isolate()->optimizing_compile_dispatcher()->HasJobs()) {
3261 return false;
3262 }
3263
3264 // We can move the object start if the page was already swept.
3265 return Page::FromHeapObject(object)->SweepingDone();
3266 }
3267
IsImmovable(HeapObject object)3268 bool Heap::IsImmovable(HeapObject object) {
3269 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL)
3270 return third_party_heap::Heap::IsImmovable(object);
3271
3272 BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
3273 return chunk->NeverEvacuate() || IsLargeObject(object);
3274 }
3275
IsLargeObject(HeapObject object)3276 bool Heap::IsLargeObject(HeapObject object) {
3277 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL)
3278 return third_party_heap::Heap::InLargeObjectSpace(object.address()) ||
3279 third_party_heap::Heap::InSpace(object.address(), CODE_LO_SPACE);
3280 return BasicMemoryChunk::FromHeapObject(object)->IsLargePage();
3281 }
3282
3283 #ifdef ENABLE_SLOW_DCHECKS
3284 namespace {
3285
3286 class LeftTrimmerVerifierRootVisitor : public RootVisitor {
3287 public:
LeftTrimmerVerifierRootVisitor(FixedArrayBase to_check)3288 explicit LeftTrimmerVerifierRootVisitor(FixedArrayBase to_check)
3289 : to_check_(to_check) {}
3290
3291 LeftTrimmerVerifierRootVisitor(const LeftTrimmerVerifierRootVisitor&) =
3292 delete;
3293 LeftTrimmerVerifierRootVisitor& operator=(
3294 const LeftTrimmerVerifierRootVisitor&) = delete;
3295
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)3296 void VisitRootPointers(Root root, const char* description,
3297 FullObjectSlot start, FullObjectSlot end) override {
3298 for (FullObjectSlot p = start; p < end; ++p) {
3299 DCHECK_NE(*p, to_check_);
3300 }
3301 }
3302
VisitRootPointers(Root root,const char * description,OffHeapObjectSlot start,OffHeapObjectSlot end)3303 void VisitRootPointers(Root root, const char* description,
3304 OffHeapObjectSlot start,
3305 OffHeapObjectSlot end) override {
3306 DCHECK_EQ(root, Root::kStringTable);
3307 // We can skip iterating the string table, it doesn't point to any fixed
3308 // arrays.
3309 }
3310
3311 private:
3312 FixedArrayBase to_check_;
3313 };
3314 } // namespace
3315 #endif // ENABLE_SLOW_DCHECKS
3316
3317 namespace {
MayContainRecordedSlots(HeapObject object)3318 bool MayContainRecordedSlots(HeapObject object) {
3319 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return false;
3320 // New space object do not have recorded slots.
3321 if (BasicMemoryChunk::FromHeapObject(object)->InYoungGeneration())
3322 return false;
3323 // Allowlist objects that definitely do not have pointers.
3324 if (object.IsByteArray() || object.IsFixedDoubleArray()) return false;
3325 // Conservatively return true for other objects.
3326 return true;
3327 }
3328 } // namespace
3329
OnMoveEvent(HeapObject target,HeapObject source,int size_in_bytes)3330 void Heap::OnMoveEvent(HeapObject target, HeapObject source,
3331 int size_in_bytes) {
3332 HeapProfiler* heap_profiler = isolate_->heap_profiler();
3333 if (heap_profiler->is_tracking_object_moves()) {
3334 heap_profiler->ObjectMoveEvent(source.address(), target.address(),
3335 size_in_bytes);
3336 }
3337 for (auto& tracker : allocation_trackers_) {
3338 tracker->MoveEvent(source.address(), target.address(), size_in_bytes);
3339 }
3340 if (target.IsSharedFunctionInfo()) {
3341 LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source.address(),
3342 target.address()));
3343 } else if (target.IsNativeContext()) {
3344 PROFILE(isolate_,
3345 NativeContextMoveEvent(source.address(), target.address()));
3346 }
3347
3348 if (FLAG_verify_predictable) {
3349 ++allocations_count_;
3350 // Advance synthetic time by making a time request.
3351 MonotonicallyIncreasingTimeInMs();
3352
3353 UpdateAllocationsHash(source);
3354 UpdateAllocationsHash(target);
3355 UpdateAllocationsHash(size_in_bytes);
3356
3357 if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
3358 PrintAllocationsHash();
3359 }
3360 } else if (FLAG_fuzzer_gc_analysis) {
3361 ++allocations_count_;
3362 }
3363 }
3364
LeftTrimFixedArray(FixedArrayBase object,int elements_to_trim)3365 FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
3366 int elements_to_trim) {
3367 if (elements_to_trim == 0) {
3368 // This simplifies reasoning in the rest of the function.
3369 return object;
3370 }
3371 CHECK(!object.is_null());
3372 DCHECK(CanMoveObjectStart(object));
3373 // Add custom visitor to concurrent marker if new left-trimmable type
3374 // is added.
3375 DCHECK(object.IsFixedArray() || object.IsFixedDoubleArray());
3376 const int element_size = object.IsFixedArray() ? kTaggedSize : kDoubleSize;
3377 const int bytes_to_trim = elements_to_trim * element_size;
3378 Map map = object.map();
3379
3380 // For now this trick is only applied to fixed arrays which may be in new
3381 // space or old space. In a large object space the object's start must
3382 // coincide with chunk and thus the trick is just not applicable.
3383 DCHECK(!IsLargeObject(object));
3384 DCHECK(object.map() != ReadOnlyRoots(this).fixed_cow_array_map());
3385
3386 STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
3387 STATIC_ASSERT(FixedArrayBase::kLengthOffset == kTaggedSize);
3388 STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kTaggedSize);
3389
3390 const int len = object.length();
3391 DCHECK(elements_to_trim <= len);
3392
3393 // Calculate location of new array start.
3394 Address old_start = object.address();
3395 Address new_start = old_start + bytes_to_trim;
3396
3397 if (incremental_marking()->IsMarking()) {
3398 incremental_marking()->NotifyLeftTrimming(
3399 object, HeapObject::FromAddress(new_start));
3400 }
3401
3402 #ifdef DEBUG
3403 if (MayContainRecordedSlots(object)) {
3404 MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
3405 DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
3406 DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object));
3407 }
3408 #endif
3409
3410 // Technically in new space this write might be omitted (except for
3411 // debug mode which iterates through the heap), but to play safer
3412 // we still do it.
3413 CreateFillerObjectAt(old_start, bytes_to_trim,
3414 MayContainRecordedSlots(object)
3415 ? ClearRecordedSlots::kYes
3416 : ClearRecordedSlots::kNo);
3417
3418 // Initialize header of the trimmed array. Since left trimming is only
3419 // performed on pages which are not concurrently swept creating a filler
3420 // object does not require synchronization.
3421 RELAXED_WRITE_FIELD(object, bytes_to_trim,
3422 Object(MapWord::FromMap(map).ptr()));
3423 RELAXED_WRITE_FIELD(object, bytes_to_trim + kTaggedSize,
3424 Smi::FromInt(len - elements_to_trim));
3425
3426 FixedArrayBase new_object =
3427 FixedArrayBase::cast(HeapObject::FromAddress(new_start));
3428
3429 // Notify the heap profiler of change in object layout.
3430 OnMoveEvent(new_object, object, new_object.Size());
3431
3432 #ifdef ENABLE_SLOW_DCHECKS
3433 if (FLAG_enable_slow_asserts) {
3434 // Make sure the stack or other roots (e.g., Handles) don't contain pointers
3435 // to the original FixedArray (which is now the filler object).
3436 SafepointScope scope(this);
3437 LeftTrimmerVerifierRootVisitor root_visitor(object);
3438 ReadOnlyRoots(this).Iterate(&root_visitor);
3439 IterateRoots(&root_visitor, {});
3440 }
3441 #endif // ENABLE_SLOW_DCHECKS
3442
3443 return new_object;
3444 }
3445
RightTrimFixedArray(FixedArrayBase object,int elements_to_trim)3446 void Heap::RightTrimFixedArray(FixedArrayBase object, int elements_to_trim) {
3447 const int len = object.length();
3448 DCHECK_LE(elements_to_trim, len);
3449 DCHECK_GE(elements_to_trim, 0);
3450
3451 int bytes_to_trim;
3452 if (object.IsByteArray()) {
3453 int new_size = ByteArray::SizeFor(len - elements_to_trim);
3454 bytes_to_trim = ByteArray::SizeFor(len) - new_size;
3455 DCHECK_GE(bytes_to_trim, 0);
3456 } else if (object.IsFixedArray()) {
3457 CHECK_NE(elements_to_trim, len);
3458 bytes_to_trim = elements_to_trim * kTaggedSize;
3459 } else {
3460 DCHECK(object.IsFixedDoubleArray());
3461 CHECK_NE(elements_to_trim, len);
3462 bytes_to_trim = elements_to_trim * kDoubleSize;
3463 }
3464
3465 CreateFillerForArray<FixedArrayBase>(object, elements_to_trim, bytes_to_trim);
3466 }
3467
RightTrimWeakFixedArray(WeakFixedArray object,int elements_to_trim)3468 void Heap::RightTrimWeakFixedArray(WeakFixedArray object,
3469 int elements_to_trim) {
3470 // This function is safe to use only at the end of the mark compact
3471 // collection: When marking, we record the weak slots, and shrinking
3472 // invalidates them.
3473 DCHECK_EQ(gc_state(), MARK_COMPACT);
3474 CreateFillerForArray<WeakFixedArray>(object, elements_to_trim,
3475 elements_to_trim * kTaggedSize);
3476 }
3477
3478 template <typename T>
CreateFillerForArray(T object,int elements_to_trim,int bytes_to_trim)3479 void Heap::CreateFillerForArray(T object, int elements_to_trim,
3480 int bytes_to_trim) {
3481 DCHECK(object.IsFixedArrayBase() || object.IsByteArray() ||
3482 object.IsWeakFixedArray());
3483
3484 // For now this trick is only applied to objects in new and paged space.
3485 DCHECK(object.map() != ReadOnlyRoots(this).fixed_cow_array_map());
3486
3487 if (bytes_to_trim == 0) {
3488 DCHECK_EQ(elements_to_trim, 0);
3489 // No need to create filler and update live bytes counters.
3490 return;
3491 }
3492
3493 // Calculate location of new array end.
3494 int old_size = object.Size();
3495 Address old_end = object.address() + old_size;
3496 Address new_end = old_end - bytes_to_trim;
3497
3498 #ifdef DEBUG
3499 if (MayContainRecordedSlots(object)) {
3500 MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
3501 DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object));
3502 DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
3503 }
3504 #endif
3505
3506 bool clear_slots = MayContainRecordedSlots(object);
3507
3508 // Technically in new space this write might be omitted (except for
3509 // debug mode which iterates through the heap), but to play safer
3510 // we still do it.
3511 // We do not create a filler for objects in a large object space.
3512 if (!IsLargeObject(object)) {
3513 HeapObject filler = CreateFillerObjectAt(
3514 new_end, bytes_to_trim,
3515 clear_slots ? ClearRecordedSlots::kYes : ClearRecordedSlots::kNo);
3516 DCHECK(!filler.is_null());
3517 // Clear the mark bits of the black area that belongs now to the filler.
3518 // This is an optimization. The sweeper will release black fillers anyway.
3519 if (incremental_marking()->black_allocation() &&
3520 incremental_marking()->marking_state()->IsBlackOrGrey(filler)) {
3521 Page* page = Page::FromAddress(new_end);
3522 incremental_marking()->marking_state()->bitmap(page)->ClearRange(
3523 page->AddressToMarkbitIndex(new_end),
3524 page->AddressToMarkbitIndex(new_end + bytes_to_trim));
3525 }
3526 } else if (clear_slots) {
3527 // Large objects are not swept, so it is not necessary to clear the
3528 // recorded slot.
3529 MemsetTagged(ObjectSlot(new_end), Object(kClearedFreeMemoryValue),
3530 (old_end - new_end) / kTaggedSize);
3531 }
3532
3533 // Initialize header of the trimmed array. We are storing the new length
3534 // using release store after creating a filler for the left-over space to
3535 // avoid races with the sweeper thread.
3536 object.set_length(object.length() - elements_to_trim, kReleaseStore);
3537
3538 // Notify the heap object allocation tracker of change in object layout. The
3539 // array may not be moved during GC, and size has to be adjusted nevertheless.
3540 for (auto& tracker : allocation_trackers_) {
3541 tracker->UpdateObjectSizeEvent(object.address(), object.Size());
3542 }
3543 }
3544
MakeHeapIterable()3545 void Heap::MakeHeapIterable() {
3546 mark_compact_collector()->EnsureSweepingCompleted();
3547
3548 MakeLocalHeapLabsIterable();
3549 }
3550
MakeLocalHeapLabsIterable()3551 void Heap::MakeLocalHeapLabsIterable() {
3552 safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
3553 local_heap->MakeLinearAllocationAreaIterable();
3554 });
3555 }
3556
3557 namespace {
3558
ComputeMutatorUtilizationImpl(double mutator_speed,double gc_speed)3559 double ComputeMutatorUtilizationImpl(double mutator_speed, double gc_speed) {
3560 constexpr double kMinMutatorUtilization = 0.0;
3561 constexpr double kConservativeGcSpeedInBytesPerMillisecond = 200000;
3562 if (mutator_speed == 0) return kMinMutatorUtilization;
3563 if (gc_speed == 0) gc_speed = kConservativeGcSpeedInBytesPerMillisecond;
3564 // Derivation:
3565 // mutator_utilization = mutator_time / (mutator_time + gc_time)
3566 // mutator_time = 1 / mutator_speed
3567 // gc_time = 1 / gc_speed
3568 // mutator_utilization = (1 / mutator_speed) /
3569 // (1 / mutator_speed + 1 / gc_speed)
3570 // mutator_utilization = gc_speed / (mutator_speed + gc_speed)
3571 return gc_speed / (mutator_speed + gc_speed);
3572 }
3573
3574 } // namespace
3575
ComputeMutatorUtilization(const char * tag,double mutator_speed,double gc_speed)3576 double Heap::ComputeMutatorUtilization(const char* tag, double mutator_speed,
3577 double gc_speed) {
3578 double result = ComputeMutatorUtilizationImpl(mutator_speed, gc_speed);
3579 if (FLAG_trace_mutator_utilization) {
3580 isolate()->PrintWithTimestamp(
3581 "%s mutator utilization = %.3f ("
3582 "mutator_speed=%.f, gc_speed=%.f)\n",
3583 tag, result, mutator_speed, gc_speed);
3584 }
3585 return result;
3586 }
3587
HasLowYoungGenerationAllocationRate()3588 bool Heap::HasLowYoungGenerationAllocationRate() {
3589 double mu = ComputeMutatorUtilization(
3590 "Young generation",
3591 tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond(),
3592 tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects));
3593 constexpr double kHighMutatorUtilization = 0.993;
3594 return mu > kHighMutatorUtilization;
3595 }
3596
HasLowOldGenerationAllocationRate()3597 bool Heap::HasLowOldGenerationAllocationRate() {
3598 double mu = ComputeMutatorUtilization(
3599 "Old generation",
3600 tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond(),
3601 tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
3602 const double kHighMutatorUtilization = 0.993;
3603 return mu > kHighMutatorUtilization;
3604 }
3605
HasLowEmbedderAllocationRate()3606 bool Heap::HasLowEmbedderAllocationRate() {
3607 if (!UseGlobalMemoryScheduling()) return true;
3608
3609 DCHECK_NOT_NULL(local_embedder_heap_tracer());
3610 double mu = ComputeMutatorUtilization(
3611 "Embedder",
3612 tracer()->CurrentEmbedderAllocationThroughputInBytesPerMillisecond(),
3613 tracer()->EmbedderSpeedInBytesPerMillisecond());
3614 const double kHighMutatorUtilization = 0.993;
3615 return mu > kHighMutatorUtilization;
3616 }
3617
HasLowAllocationRate()3618 bool Heap::HasLowAllocationRate() {
3619 return HasLowYoungGenerationAllocationRate() &&
3620 HasLowOldGenerationAllocationRate() && HasLowEmbedderAllocationRate();
3621 }
3622
IsIneffectiveMarkCompact(size_t old_generation_size,double mutator_utilization)3623 bool Heap::IsIneffectiveMarkCompact(size_t old_generation_size,
3624 double mutator_utilization) {
3625 const double kHighHeapPercentage = 0.8;
3626 const double kLowMutatorUtilization = 0.4;
3627 return old_generation_size >=
3628 kHighHeapPercentage * max_old_generation_size() &&
3629 mutator_utilization < kLowMutatorUtilization;
3630 }
3631
CheckIneffectiveMarkCompact(size_t old_generation_size,double mutator_utilization)3632 void Heap::CheckIneffectiveMarkCompact(size_t old_generation_size,
3633 double mutator_utilization) {
3634 const int kMaxConsecutiveIneffectiveMarkCompacts = 4;
3635 if (!FLAG_detect_ineffective_gcs_near_heap_limit) return;
3636 if (!IsIneffectiveMarkCompact(old_generation_size, mutator_utilization)) {
3637 consecutive_ineffective_mark_compacts_ = 0;
3638 return;
3639 }
3640 ++consecutive_ineffective_mark_compacts_;
3641 if (consecutive_ineffective_mark_compacts_ ==
3642 kMaxConsecutiveIneffectiveMarkCompacts) {
3643 if (InvokeNearHeapLimitCallback()) {
3644 // The callback increased the heap limit.
3645 consecutive_ineffective_mark_compacts_ = 0;
3646 return;
3647 }
3648 FatalProcessOutOfMemory("Ineffective mark-compacts near heap limit");
3649 }
3650 }
3651
HasHighFragmentation()3652 bool Heap::HasHighFragmentation() {
3653 size_t used = OldGenerationSizeOfObjects();
3654 size_t committed = CommittedOldGenerationMemory();
3655 return HasHighFragmentation(used, committed);
3656 }
3657
HasHighFragmentation(size_t used,size_t committed)3658 bool Heap::HasHighFragmentation(size_t used, size_t committed) {
3659 const size_t kSlack = 16 * MB;
3660 // Fragmentation is high if committed > 2 * used + kSlack.
3661 // Rewrite the exression to avoid overflow.
3662 DCHECK_GE(committed, used);
3663 return committed - used > used + kSlack;
3664 }
3665
ShouldOptimizeForMemoryUsage()3666 bool Heap::ShouldOptimizeForMemoryUsage() {
3667 const size_t kOldGenerationSlack = max_old_generation_size() / 8;
3668 return FLAG_optimize_for_size || isolate()->IsIsolateInBackground() ||
3669 isolate()->IsMemorySavingsModeActive() || HighMemoryPressure() ||
3670 !CanExpandOldGeneration(kOldGenerationSlack);
3671 }
3672
ActivateMemoryReducerIfNeeded()3673 void Heap::ActivateMemoryReducerIfNeeded() {
3674 // Activate memory reducer when switching to background if
3675 // - there was no mark compact since the start.
3676 // - the committed memory can be potentially reduced.
3677 // 2 pages for the old, code, and map space + 1 page for new space.
3678 const int kMinCommittedMemory = 7 * Page::kPageSize;
3679 if (ms_count_ == 0 && CommittedMemory() > kMinCommittedMemory &&
3680 isolate()->IsIsolateInBackground()) {
3681 MemoryReducer::Event event;
3682 event.type = MemoryReducer::kPossibleGarbage;
3683 event.time_ms = MonotonicallyIncreasingTimeInMs();
3684 memory_reducer_->NotifyPossibleGarbage(event);
3685 }
3686 }
3687
ReduceNewSpaceSize()3688 void Heap::ReduceNewSpaceSize() {
3689 static const size_t kLowAllocationThroughput = 1000;
3690 const double allocation_throughput =
3691 tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
3692
3693 if (FLAG_predictable) return;
3694
3695 if (ShouldReduceMemory() ||
3696 ((allocation_throughput != 0) &&
3697 (allocation_throughput < kLowAllocationThroughput))) {
3698 new_space_->Shrink();
3699 new_lo_space_->SetCapacity(new_space_->Capacity());
3700 UncommitFromSpace();
3701 }
3702 }
3703
NewSpaceSize()3704 size_t Heap::NewSpaceSize() { return new_space() ? new_space()->Size() : 0; }
3705
NewSpaceCapacity()3706 size_t Heap::NewSpaceCapacity() {
3707 return new_space() ? new_space()->Capacity() : 0;
3708 }
3709
FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason)3710 void Heap::FinalizeIncrementalMarkingIfComplete(
3711 GarbageCollectionReason gc_reason) {
3712 if (incremental_marking()->IsMarking() &&
3713 (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
3714 (!incremental_marking()->finalize_marking_completed() &&
3715 mark_compact_collector()->local_marking_worklists()->IsEmpty() &&
3716 local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking()))) {
3717 FinalizeIncrementalMarkingIncrementally(gc_reason);
3718 } else if (incremental_marking()->IsComplete() ||
3719 (incremental_marking()->IsMarking() &&
3720 mark_compact_collector()->local_marking_worklists()->IsEmpty() &&
3721 local_embedder_heap_tracer()
3722 ->ShouldFinalizeIncrementalMarking())) {
3723 CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
3724 }
3725 }
3726
FinalizeIncrementalMarkingAtomically(GarbageCollectionReason gc_reason)3727 void Heap::FinalizeIncrementalMarkingAtomically(
3728 GarbageCollectionReason gc_reason) {
3729 DCHECK(!incremental_marking()->IsStopped());
3730 CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
3731 }
3732
InvokeIncrementalMarkingPrologueCallbacks()3733 void Heap::InvokeIncrementalMarkingPrologueCallbacks() {
3734 GCCallbacksScope scope(this);
3735 if (scope.CheckReenter()) {
3736 AllowGarbageCollection allow_allocation;
3737 TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
3738 VMState<EXTERNAL> state(isolate_);
3739 HandleScope handle_scope(isolate_);
3740 CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
3741 }
3742 }
3743
InvokeIncrementalMarkingEpilogueCallbacks()3744 void Heap::InvokeIncrementalMarkingEpilogueCallbacks() {
3745 GCCallbacksScope scope(this);
3746 if (scope.CheckReenter()) {
3747 AllowGarbageCollection allow_allocation;
3748 TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
3749 VMState<EXTERNAL> state(isolate_);
3750 HandleScope handle_scope(isolate_);
3751 CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
3752 }
3753 }
3754
FinalizeIncrementalMarkingIncrementally(GarbageCollectionReason gc_reason)3755 void Heap::FinalizeIncrementalMarkingIncrementally(
3756 GarbageCollectionReason gc_reason) {
3757 if (FLAG_trace_incremental_marking) {
3758 isolate()->PrintWithTimestamp(
3759 "[IncrementalMarking] (%s).\n",
3760 Heap::GarbageCollectionReasonToString(gc_reason));
3761 }
3762
3763 DevToolsTraceEventScope devtools_trace_event_scope(
3764 this, "MajorGC", "incremental finalization step");
3765
3766 NestedTimedHistogramScope incremental_marking_scope(
3767 isolate()->counters()->gc_incremental_marking_finalize());
3768 TRACE_EVENT1("v8", "V8.GCIncrementalMarkingFinalize", "epoch", epoch_full());
3769 TRACE_GC_EPOCH(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE,
3770 ThreadKind::kMain);
3771
3772 SafepointScope safepoint(this);
3773 InvokeIncrementalMarkingPrologueCallbacks();
3774 incremental_marking()->FinalizeIncrementally();
3775 InvokeIncrementalMarkingEpilogueCallbacks();
3776 }
3777
NotifyObjectLayoutChange(HeapObject object,const DisallowGarbageCollection &,InvalidateRecordedSlots invalidate_recorded_slots)3778 void Heap::NotifyObjectLayoutChange(
3779 HeapObject object, const DisallowGarbageCollection&,
3780 InvalidateRecordedSlots invalidate_recorded_slots) {
3781 if (incremental_marking()->IsMarking()) {
3782 incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
3783 if (incremental_marking()->IsCompacting() &&
3784 invalidate_recorded_slots == InvalidateRecordedSlots::kYes &&
3785 MayContainRecordedSlots(object)) {
3786 MemoryChunk::FromHeapObject(object)
3787 ->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
3788 }
3789 }
3790 if (invalidate_recorded_slots == InvalidateRecordedSlots::kYes &&
3791 MayContainRecordedSlots(object)) {
3792 MemoryChunk::FromHeapObject(object)
3793 ->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
3794 }
3795 #ifdef VERIFY_HEAP
3796 if (FLAG_verify_heap) {
3797 DCHECK(pending_layout_change_object_.is_null());
3798 pending_layout_change_object_ = object;
3799 }
3800 #endif
3801 }
3802
NotifyCodeObjectChangeStart(Code code,const DisallowGarbageCollection &)3803 void Heap::NotifyCodeObjectChangeStart(Code code,
3804 const DisallowGarbageCollection&) {
3805 // Updating the code object will also trim the object size, this results in
3806 // free memory which we want to give back to the LAB. Sweeping that object's
3807 // page will ensure that we don't add that memory to the free list as well.
3808 EnsureSweepingCompleted(code);
3809 }
3810
NotifyCodeObjectChangeEnd(Code code,const DisallowGarbageCollection &)3811 void Heap::NotifyCodeObjectChangeEnd(Code code,
3812 const DisallowGarbageCollection&) {
3813 // Ensure relocation_info is already initialized.
3814 DCHECK(code.relocation_info_or_undefined().IsByteArray());
3815
3816 if (incremental_marking()->IsMarking()) {
3817 // Object might have been marked already without relocation_info. Force
3818 // revisitation of the object such that we find all pointers in the
3819 // instruction stream.
3820 incremental_marking()->MarkBlackAndRevisitObject(code);
3821 }
3822 }
3823
3824 #ifdef VERIFY_HEAP
3825 // Helper class for collecting slot addresses.
3826 class SlotCollectingVisitor final : public ObjectVisitor {
3827 public:
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)3828 void VisitPointers(HeapObject host, ObjectSlot start,
3829 ObjectSlot end) override {
3830 VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
3831 }
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)3832 void VisitPointers(HeapObject host, MaybeObjectSlot start,
3833 MaybeObjectSlot end) final {
3834 for (MaybeObjectSlot p = start; p < end; ++p) {
3835 slots_.push_back(p);
3836 }
3837 }
3838
VisitCodePointer(HeapObject host,CodeObjectSlot slot)3839 void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
3840 CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
3841 #if V8_EXTERNAL_CODE_SPACE
3842 code_slots_.push_back(slot);
3843 #endif
3844 }
3845
VisitCodeTarget(Code host,RelocInfo * rinfo)3846 void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
3847
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)3848 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
3849 UNREACHABLE();
3850 }
3851
VisitMapPointer(HeapObject object)3852 void VisitMapPointer(HeapObject object) override {} // do nothing by default
3853
number_of_slots()3854 int number_of_slots() { return static_cast<int>(slots_.size()); }
3855
slot(int i)3856 MaybeObjectSlot slot(int i) { return slots_[i]; }
3857 #if V8_EXTERNAL_CODE_SPACE
code_slot(int i)3858 CodeObjectSlot code_slot(int i) { return code_slots_[i]; }
number_of_code_slots()3859 int number_of_code_slots() { return static_cast<int>(code_slots_.size()); }
3860 #endif
3861
3862 private:
3863 std::vector<MaybeObjectSlot> slots_;
3864 #if V8_EXTERNAL_CODE_SPACE
3865 std::vector<CodeObjectSlot> code_slots_;
3866 #endif
3867 };
3868
VerifyObjectLayoutChange(HeapObject object,Map new_map)3869 void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
3870 if (!FLAG_verify_heap) return;
3871
3872 // Check that Heap::NotifyObjectLayoutChange was called for object transitions
3873 // that are not safe for concurrent marking.
3874 // If you see this check triggering for a freshly allocated object,
3875 // use object->set_map_after_allocation() to initialize its map.
3876 if (pending_layout_change_object_.is_null()) {
3877 if (object.IsJSObject()) {
3878 // Without double unboxing all in-object fields of a JSObject are tagged.
3879 return;
3880 }
3881 if (object.IsString() &&
3882 (new_map == ReadOnlyRoots(this).thin_string_map() ||
3883 new_map == ReadOnlyRoots(this).thin_one_byte_string_map())) {
3884 // When transitioning a string to ThinString,
3885 // Heap::NotifyObjectLayoutChange doesn't need to be invoked because only
3886 // tagged fields are introduced.
3887 return;
3888 }
3889 // Check that the set of slots before and after the transition match.
3890 SlotCollectingVisitor old_visitor;
3891 object.IterateFast(&old_visitor);
3892 MapWord old_map_word = object.map_word(kRelaxedLoad);
3893 // Temporarily set the new map to iterate new slots.
3894 object.set_map_word(MapWord::FromMap(new_map), kRelaxedStore);
3895 SlotCollectingVisitor new_visitor;
3896 object.IterateFast(&new_visitor);
3897 // Restore the old map.
3898 object.set_map_word(old_map_word, kRelaxedStore);
3899 DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
3900 for (int i = 0; i < new_visitor.number_of_slots(); i++) {
3901 DCHECK_EQ(new_visitor.slot(i), old_visitor.slot(i));
3902 }
3903 #if V8_EXTERNAL_CODE_SPACE
3904 DCHECK_EQ(new_visitor.number_of_code_slots(),
3905 old_visitor.number_of_code_slots());
3906 for (int i = 0; i < new_visitor.number_of_code_slots(); i++) {
3907 DCHECK_EQ(new_visitor.code_slot(i), old_visitor.code_slot(i));
3908 }
3909 #endif // V8_EXTERNAL_CODE_SPACE
3910 } else {
3911 DCHECK_EQ(pending_layout_change_object_, object);
3912 pending_layout_change_object_ = HeapObject();
3913 }
3914 }
3915 #endif
3916
ComputeHeapState()3917 GCIdleTimeHeapState Heap::ComputeHeapState() {
3918 GCIdleTimeHeapState heap_state;
3919 heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
3920 heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
3921 return heap_state;
3922 }
3923
PerformIdleTimeAction(GCIdleTimeAction action,GCIdleTimeHeapState heap_state,double deadline_in_ms)3924 bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
3925 GCIdleTimeHeapState heap_state,
3926 double deadline_in_ms) {
3927 bool result = false;
3928 switch (action) {
3929 case GCIdleTimeAction::kDone:
3930 result = true;
3931 break;
3932 case GCIdleTimeAction::kIncrementalStep: {
3933 incremental_marking()->AdvanceWithDeadline(
3934 deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
3935 StepOrigin::kTask);
3936 FinalizeIncrementalMarkingIfComplete(
3937 GarbageCollectionReason::kFinalizeMarkingViaTask);
3938 result = incremental_marking()->IsStopped();
3939 break;
3940 }
3941 }
3942
3943 return result;
3944 }
3945
IdleNotificationEpilogue(GCIdleTimeAction action,GCIdleTimeHeapState heap_state,double start_ms,double deadline_in_ms)3946 void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
3947 GCIdleTimeHeapState heap_state,
3948 double start_ms, double deadline_in_ms) {
3949 double idle_time_in_ms = deadline_in_ms - start_ms;
3950 double current_time = MonotonicallyIncreasingTimeInMs();
3951 last_idle_notification_time_ = current_time;
3952 double deadline_difference = deadline_in_ms - current_time;
3953
3954 if (FLAG_trace_idle_notification) {
3955 isolate_->PrintWithTimestamp(
3956 "Idle notification: requested idle time %.2f ms, used idle time %.2f "
3957 "ms, deadline usage %.2f ms [",
3958 idle_time_in_ms, idle_time_in_ms - deadline_difference,
3959 deadline_difference);
3960 switch (action) {
3961 case GCIdleTimeAction::kDone:
3962 PrintF("done");
3963 break;
3964 case GCIdleTimeAction::kIncrementalStep:
3965 PrintF("incremental step");
3966 break;
3967 }
3968 PrintF("]");
3969 if (FLAG_trace_idle_notification_verbose) {
3970 PrintF("[");
3971 heap_state.Print();
3972 PrintF("]");
3973 }
3974 PrintF("\n");
3975 }
3976 }
3977
MonotonicallyIncreasingTimeInMs() const3978 double Heap::MonotonicallyIncreasingTimeInMs() const {
3979 return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
3980 static_cast<double>(base::Time::kMillisecondsPerSecond);
3981 }
3982
VerifyNewSpaceTop()3983 void Heap::VerifyNewSpaceTop() {
3984 if (!new_space()) return;
3985 new_space()->VerifyTop();
3986 }
3987
IdleNotification(int idle_time_in_ms)3988 bool Heap::IdleNotification(int idle_time_in_ms) {
3989 return IdleNotification(
3990 V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
3991 (static_cast<double>(idle_time_in_ms) /
3992 static_cast<double>(base::Time::kMillisecondsPerSecond)));
3993 }
3994
IdleNotification(double deadline_in_seconds)3995 bool Heap::IdleNotification(double deadline_in_seconds) {
3996 CHECK(HasBeenSetUp());
3997 double deadline_in_ms =
3998 deadline_in_seconds *
3999 static_cast<double>(base::Time::kMillisecondsPerSecond);
4000 NestedTimedHistogramScope idle_notification_scope(
4001 isolate_->counters()->gc_idle_notification());
4002 TRACE_EVENT0("v8", "V8.GCIdleNotification");
4003 double start_ms = MonotonicallyIncreasingTimeInMs();
4004 double idle_time_in_ms = deadline_in_ms - start_ms;
4005
4006 tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
4007 OldGenerationAllocationCounter(),
4008 EmbedderAllocationCounter());
4009
4010 GCIdleTimeHeapState heap_state = ComputeHeapState();
4011 GCIdleTimeAction action =
4012 gc_idle_time_handler_->Compute(idle_time_in_ms, heap_state);
4013 bool result = PerformIdleTimeAction(action, heap_state, deadline_in_ms);
4014 IdleNotificationEpilogue(action, heap_state, start_ms, deadline_in_ms);
4015 return result;
4016 }
4017
RecentIdleNotificationHappened()4018 bool Heap::RecentIdleNotificationHappened() {
4019 return (last_idle_notification_time_ +
4020 GCIdleTimeHandler::kMaxScheduledIdleTime) >
4021 MonotonicallyIncreasingTimeInMs();
4022 }
4023
4024 class MemoryPressureInterruptTask : public CancelableTask {
4025 public:
MemoryPressureInterruptTask(Heap * heap)4026 explicit MemoryPressureInterruptTask(Heap* heap)
4027 : CancelableTask(heap->isolate()), heap_(heap) {}
4028
4029 ~MemoryPressureInterruptTask() override = default;
4030 MemoryPressureInterruptTask(const MemoryPressureInterruptTask&) = delete;
4031 MemoryPressureInterruptTask& operator=(const MemoryPressureInterruptTask&) =
4032 delete;
4033
4034 private:
4035 // v8::internal::CancelableTask overrides.
RunInternal()4036 void RunInternal() override { heap_->CheckMemoryPressure(); }
4037
4038 Heap* heap_;
4039 };
4040
CheckMemoryPressure()4041 void Heap::CheckMemoryPressure() {
4042 if (HighMemoryPressure()) {
4043 // The optimizing compiler may be unnecessarily holding on to memory.
4044 isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
4045 }
4046 // Reset the memory pressure level to avoid recursive GCs triggered by
4047 // CheckMemoryPressure from AdjustAmountOfExternalMemory called by
4048 // the finalizers.
4049 MemoryPressureLevel memory_pressure_level = memory_pressure_level_.exchange(
4050 MemoryPressureLevel::kNone, std::memory_order_relaxed);
4051 if (memory_pressure_level == MemoryPressureLevel::kCritical) {
4052 TRACE_EVENT0("devtools.timeline,v8", "V8.CheckMemoryPressure");
4053 CollectGarbageOnMemoryPressure();
4054 } else if (memory_pressure_level == MemoryPressureLevel::kModerate) {
4055 if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
4056 TRACE_EVENT0("devtools.timeline,v8", "V8.CheckMemoryPressure");
4057 StartIncrementalMarking(kReduceMemoryFootprintMask,
4058 GarbageCollectionReason::kMemoryPressure);
4059 }
4060 }
4061 }
4062
CollectGarbageOnMemoryPressure()4063 void Heap::CollectGarbageOnMemoryPressure() {
4064 const int kGarbageThresholdInBytes = 8 * MB;
4065 const double kGarbageThresholdAsFractionOfTotalMemory = 0.1;
4066 // This constant is the maximum response time in RAIL performance model.
4067 const double kMaxMemoryPressurePauseMs = 100;
4068
4069 double start = MonotonicallyIncreasingTimeInMs();
4070 CollectAllGarbage(kReduceMemoryFootprintMask,
4071 GarbageCollectionReason::kMemoryPressure,
4072 kGCCallbackFlagCollectAllAvailableGarbage);
4073 EagerlyFreeExternalMemory();
4074 double end = MonotonicallyIncreasingTimeInMs();
4075
4076 // Estimate how much memory we can free.
4077 int64_t potential_garbage =
4078 (CommittedMemory() - SizeOfObjects()) + external_memory_.total();
4079 // If we can potentially free large amount of memory, then start GC right
4080 // away instead of waiting for memory reducer.
4081 if (potential_garbage >= kGarbageThresholdInBytes &&
4082 potential_garbage >=
4083 CommittedMemory() * kGarbageThresholdAsFractionOfTotalMemory) {
4084 // If we spent less than half of the time budget, then perform full GC
4085 // Otherwise, start incremental marking.
4086 if (end - start < kMaxMemoryPressurePauseMs / 2) {
4087 CollectAllGarbage(kReduceMemoryFootprintMask,
4088 GarbageCollectionReason::kMemoryPressure,
4089 kGCCallbackFlagCollectAllAvailableGarbage);
4090 } else {
4091 if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
4092 StartIncrementalMarking(kReduceMemoryFootprintMask,
4093 GarbageCollectionReason::kMemoryPressure);
4094 }
4095 }
4096 }
4097 }
4098
MemoryPressureNotification(MemoryPressureLevel level,bool is_isolate_locked)4099 void Heap::MemoryPressureNotification(MemoryPressureLevel level,
4100 bool is_isolate_locked) {
4101 TRACE_EVENT1("devtools.timeline,v8", "V8.MemoryPressureNotification", "level",
4102 static_cast<int>(level));
4103 MemoryPressureLevel previous =
4104 memory_pressure_level_.exchange(level, std::memory_order_relaxed);
4105 if ((previous != MemoryPressureLevel::kCritical &&
4106 level == MemoryPressureLevel::kCritical) ||
4107 (previous == MemoryPressureLevel::kNone &&
4108 level == MemoryPressureLevel::kModerate)) {
4109 if (is_isolate_locked) {
4110 CheckMemoryPressure();
4111 } else {
4112 ExecutionAccess access(isolate());
4113 isolate()->stack_guard()->RequestGC();
4114 auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
4115 reinterpret_cast<v8::Isolate*>(isolate()));
4116 taskrunner->PostTask(std::make_unique<MemoryPressureInterruptTask>(this));
4117 }
4118 }
4119 }
4120
EagerlyFreeExternalMemory()4121 void Heap::EagerlyFreeExternalMemory() {
4122 array_buffer_sweeper()->EnsureFinished();
4123 memory_allocator()->unmapper()->EnsureUnmappingCompleted();
4124 }
4125
AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,void * data)4126 void Heap::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
4127 void* data) {
4128 const size_t kMaxCallbacks = 100;
4129 CHECK_LT(near_heap_limit_callbacks_.size(), kMaxCallbacks);
4130 for (auto callback_data : near_heap_limit_callbacks_) {
4131 CHECK_NE(callback_data.first, callback);
4132 }
4133 near_heap_limit_callbacks_.push_back(std::make_pair(callback, data));
4134 }
4135
RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,size_t heap_limit)4136 void Heap::RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
4137 size_t heap_limit) {
4138 for (size_t i = 0; i < near_heap_limit_callbacks_.size(); i++) {
4139 if (near_heap_limit_callbacks_[i].first == callback) {
4140 near_heap_limit_callbacks_.erase(near_heap_limit_callbacks_.begin() + i);
4141 if (heap_limit) {
4142 RestoreHeapLimit(heap_limit);
4143 }
4144 return;
4145 }
4146 }
4147 UNREACHABLE();
4148 }
4149
AppendArrayBufferExtension(JSArrayBuffer object,ArrayBufferExtension * extension)4150 void Heap::AppendArrayBufferExtension(JSArrayBuffer object,
4151 ArrayBufferExtension* extension) {
4152 // ArrayBufferSweeper is managing all counters and updating Heap counters.
4153 array_buffer_sweeper_->Append(object, extension);
4154 }
4155
DetachArrayBufferExtension(JSArrayBuffer object,ArrayBufferExtension * extension)4156 void Heap::DetachArrayBufferExtension(JSArrayBuffer object,
4157 ArrayBufferExtension* extension) {
4158 // ArrayBufferSweeper is managing all counters and updating Heap counters.
4159 return array_buffer_sweeper_->Detach(object, extension);
4160 }
4161
AutomaticallyRestoreInitialHeapLimit(double threshold_percent)4162 void Heap::AutomaticallyRestoreInitialHeapLimit(double threshold_percent) {
4163 initial_max_old_generation_size_threshold_ =
4164 initial_max_old_generation_size_ * threshold_percent;
4165 }
4166
InvokeNearHeapLimitCallback()4167 bool Heap::InvokeNearHeapLimitCallback() {
4168 if (near_heap_limit_callbacks_.size() > 0) {
4169 HandleScope scope(isolate());
4170 v8::NearHeapLimitCallback callback =
4171 near_heap_limit_callbacks_.back().first;
4172 void* data = near_heap_limit_callbacks_.back().second;
4173 size_t heap_limit = callback(data, max_old_generation_size(),
4174 initial_max_old_generation_size_);
4175 if (heap_limit > max_old_generation_size()) {
4176 set_max_old_generation_size(
4177 std::min(heap_limit, AllocatorLimitOnMaxOldGenerationSize()));
4178 return true;
4179 }
4180 }
4181 return false;
4182 }
4183
MeasureMemory(std::unique_ptr<v8::MeasureMemoryDelegate> delegate,v8::MeasureMemoryExecution execution)4184 bool Heap::MeasureMemory(std::unique_ptr<v8::MeasureMemoryDelegate> delegate,
4185 v8::MeasureMemoryExecution execution) {
4186 HandleScope handle_scope(isolate());
4187 std::vector<Handle<NativeContext>> contexts = FindAllNativeContexts();
4188 std::vector<Handle<NativeContext>> to_measure;
4189 for (auto& current : contexts) {
4190 if (delegate->ShouldMeasure(
4191 v8::Utils::ToLocal(Handle<Context>::cast(current)))) {
4192 to_measure.push_back(current);
4193 }
4194 }
4195 return memory_measurement_->EnqueueRequest(std::move(delegate), execution,
4196 to_measure);
4197 }
4198
MeasureMemoryDelegate(Handle<NativeContext> context,Handle<JSPromise> promise,v8::MeasureMemoryMode mode)4199 std::unique_ptr<v8::MeasureMemoryDelegate> Heap::MeasureMemoryDelegate(
4200 Handle<NativeContext> context, Handle<JSPromise> promise,
4201 v8::MeasureMemoryMode mode) {
4202 return i::MemoryMeasurement::DefaultDelegate(isolate_, context, promise,
4203 mode);
4204 }
4205
CollectCodeStatistics()4206 void Heap::CollectCodeStatistics() {
4207 TRACE_EVENT0("v8", "Heap::CollectCodeStatistics");
4208 CodeStatistics::ResetCodeAndMetadataStatistics(isolate());
4209 // We do not look for code in new space, or map space. If code
4210 // somehow ends up in those spaces, we would miss it here.
4211 CodeStatistics::CollectCodeStatistics(code_space_, isolate());
4212 CodeStatistics::CollectCodeStatistics(old_space_, isolate());
4213 CodeStatistics::CollectCodeStatistics(code_lo_space_, isolate());
4214 }
4215
4216 #ifdef DEBUG
4217
Print()4218 void Heap::Print() {
4219 if (!HasBeenSetUp()) return;
4220 isolate()->PrintStack(stdout);
4221
4222 for (SpaceIterator it(this); it.HasNext();) {
4223 it.Next()->Print();
4224 }
4225 }
4226
ReportCodeStatistics(const char * title)4227 void Heap::ReportCodeStatistics(const char* title) {
4228 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4229 CollectCodeStatistics();
4230 CodeStatistics::ReportCodeStatistics(isolate());
4231 }
4232
4233 #endif // DEBUG
4234
GarbageCollectionReasonToString(GarbageCollectionReason gc_reason)4235 const char* Heap::GarbageCollectionReasonToString(
4236 GarbageCollectionReason gc_reason) {
4237 switch (gc_reason) {
4238 case GarbageCollectionReason::kAllocationFailure:
4239 return "allocation failure";
4240 case GarbageCollectionReason::kAllocationLimit:
4241 return "allocation limit";
4242 case GarbageCollectionReason::kContextDisposal:
4243 return "context disposal";
4244 case GarbageCollectionReason::kCountersExtension:
4245 return "counters extension";
4246 case GarbageCollectionReason::kDebugger:
4247 return "debugger";
4248 case GarbageCollectionReason::kDeserializer:
4249 return "deserialize";
4250 case GarbageCollectionReason::kExternalMemoryPressure:
4251 return "external memory pressure";
4252 case GarbageCollectionReason::kFinalizeMarkingViaStackGuard:
4253 return "finalize incremental marking via stack guard";
4254 case GarbageCollectionReason::kFinalizeMarkingViaTask:
4255 return "finalize incremental marking via task";
4256 case GarbageCollectionReason::kFullHashtable:
4257 return "full hash-table";
4258 case GarbageCollectionReason::kHeapProfiler:
4259 return "heap profiler";
4260 case GarbageCollectionReason::kTask:
4261 return "task";
4262 case GarbageCollectionReason::kLastResort:
4263 return "last resort";
4264 case GarbageCollectionReason::kLowMemoryNotification:
4265 return "low memory notification";
4266 case GarbageCollectionReason::kMakeHeapIterable:
4267 return "make heap iterable";
4268 case GarbageCollectionReason::kMemoryPressure:
4269 return "memory pressure";
4270 case GarbageCollectionReason::kMemoryReducer:
4271 return "memory reducer";
4272 case GarbageCollectionReason::kRuntime:
4273 return "runtime";
4274 case GarbageCollectionReason::kSamplingProfiler:
4275 return "sampling profiler";
4276 case GarbageCollectionReason::kSnapshotCreator:
4277 return "snapshot creator";
4278 case GarbageCollectionReason::kTesting:
4279 return "testing";
4280 case GarbageCollectionReason::kExternalFinalize:
4281 return "external finalize";
4282 case GarbageCollectionReason::kGlobalAllocationLimit:
4283 return "global allocation limit";
4284 case GarbageCollectionReason::kMeasureMemory:
4285 return "measure memory";
4286 case GarbageCollectionReason::kUnknown:
4287 return "unknown";
4288 case GarbageCollectionReason::kBackgroundAllocationFailure:
4289 return "background allocation failure";
4290 }
4291 UNREACHABLE();
4292 }
4293
Contains(HeapObject value) const4294 bool Heap::Contains(HeapObject value) const {
4295 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
4296 return true;
4297 }
4298 if (ReadOnlyHeap::Contains(value)) {
4299 return false;
4300 }
4301 if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
4302 return false;
4303 }
4304 return HasBeenSetUp() &&
4305 ((new_space_ && new_space_->ToSpaceContains(value)) ||
4306 old_space_->Contains(value) || code_space_->Contains(value) ||
4307 map_space_->Contains(value) || lo_space_->Contains(value) ||
4308 code_lo_space_->Contains(value) ||
4309 (new_lo_space_ && new_lo_space_->Contains(value)));
4310 }
4311
ContainsCode(HeapObject value) const4312 bool Heap::ContainsCode(HeapObject value) const {
4313 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
4314 return true;
4315 }
4316 // TODO(v8:11880): support external code space.
4317 if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
4318 return false;
4319 }
4320 return HasBeenSetUp() &&
4321 (code_space_->Contains(value) || code_lo_space_->Contains(value));
4322 }
4323
SharedHeapContains(HeapObject value) const4324 bool Heap::SharedHeapContains(HeapObject value) const {
4325 if (shared_old_space_)
4326 return shared_old_space_->Contains(value) ||
4327 shared_map_space_->Contains(value);
4328 return false;
4329 }
4330
InSpace(HeapObject value,AllocationSpace space) const4331 bool Heap::InSpace(HeapObject value, AllocationSpace space) const {
4332 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL)
4333 return third_party_heap::Heap::InSpace(value.address(), space);
4334 if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
4335 return false;
4336 }
4337 if (!HasBeenSetUp()) return false;
4338
4339 switch (space) {
4340 case NEW_SPACE:
4341 return new_space_->ToSpaceContains(value);
4342 case OLD_SPACE:
4343 return old_space_->Contains(value);
4344 case CODE_SPACE:
4345 return code_space_->Contains(value);
4346 case MAP_SPACE:
4347 return map_space_->Contains(value);
4348 case LO_SPACE:
4349 return lo_space_->Contains(value);
4350 case CODE_LO_SPACE:
4351 return code_lo_space_->Contains(value);
4352 case NEW_LO_SPACE:
4353 return new_lo_space_->Contains(value);
4354 case RO_SPACE:
4355 return ReadOnlyHeap::Contains(value);
4356 }
4357 UNREACHABLE();
4358 }
4359
IsShared()4360 bool Heap::IsShared() { return isolate()->is_shared(); }
4361
InSpaceSlow(Address addr,AllocationSpace space) const4362 bool Heap::InSpaceSlow(Address addr, AllocationSpace space) const {
4363 if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
4364 return false;
4365 }
4366 if (!HasBeenSetUp()) return false;
4367
4368 switch (space) {
4369 case NEW_SPACE:
4370 return new_space_->ToSpaceContainsSlow(addr);
4371 case OLD_SPACE:
4372 return old_space_->ContainsSlow(addr);
4373 case CODE_SPACE:
4374 return code_space_->ContainsSlow(addr);
4375 case MAP_SPACE:
4376 return map_space_->ContainsSlow(addr);
4377 case LO_SPACE:
4378 return lo_space_->ContainsSlow(addr);
4379 case CODE_LO_SPACE:
4380 return code_lo_space_->ContainsSlow(addr);
4381 case NEW_LO_SPACE:
4382 return new_lo_space_->ContainsSlow(addr);
4383 case RO_SPACE:
4384 return read_only_space_->ContainsSlow(addr);
4385 }
4386 UNREACHABLE();
4387 }
4388
IsValidAllocationSpace(AllocationSpace space)4389 bool Heap::IsValidAllocationSpace(AllocationSpace space) {
4390 switch (space) {
4391 case NEW_SPACE:
4392 case OLD_SPACE:
4393 case CODE_SPACE:
4394 case MAP_SPACE:
4395 case LO_SPACE:
4396 case NEW_LO_SPACE:
4397 case CODE_LO_SPACE:
4398 case RO_SPACE:
4399 return true;
4400 default:
4401 return false;
4402 }
4403 }
4404
4405 #ifdef VERIFY_HEAP
Verify()4406 void Heap::Verify() {
4407 CHECK(HasBeenSetUp());
4408 SafepointScope safepoint_scope(this);
4409 HandleScope scope(isolate());
4410
4411 MakeHeapIterable();
4412
4413 array_buffer_sweeper()->EnsureFinished();
4414
4415 VerifyPointersVisitor visitor(this);
4416 IterateRoots(&visitor, {});
4417
4418 if (!isolate()->context().is_null() &&
4419 !isolate()->normalized_map_cache()->IsUndefined(isolate())) {
4420 NormalizedMapCache::cast(*isolate()->normalized_map_cache())
4421 .NormalizedMapCacheVerify(isolate());
4422 }
4423
4424 // The heap verifier can't deal with partially deserialized objects, so
4425 // disable it if a deserializer is active.
4426 // TODO(leszeks): Enable verification during deserialization, e.g. by only
4427 // blocklisting objects that are in a partially deserialized state.
4428 if (isolate()->has_active_deserializer()) return;
4429
4430 VerifySmisVisitor smis_visitor;
4431 IterateSmiRoots(&smis_visitor);
4432
4433 if (new_space_) new_space_->Verify(isolate());
4434
4435 old_space_->Verify(isolate(), &visitor);
4436 map_space_->Verify(isolate(), &visitor);
4437
4438 VerifyPointersVisitor no_dirty_regions_visitor(this);
4439 code_space_->Verify(isolate(), &no_dirty_regions_visitor);
4440
4441 lo_space_->Verify(isolate());
4442 code_lo_space_->Verify(isolate());
4443 if (new_lo_space_) new_lo_space_->Verify(isolate());
4444 VerifyStringTable(isolate());
4445 }
4446
VerifyReadOnlyHeap()4447 void Heap::VerifyReadOnlyHeap() {
4448 CHECK(!read_only_space_->writable());
4449 read_only_space_->Verify(isolate());
4450 }
4451
4452 class SlotVerifyingVisitor : public ObjectVisitorWithCageBases {
4453 public:
SlotVerifyingVisitor(Isolate * isolate,std::set<Address> * untyped,std::set<std::pair<SlotType,Address>> * typed)4454 SlotVerifyingVisitor(Isolate* isolate, std::set<Address>* untyped,
4455 std::set<std::pair<SlotType, Address>>* typed)
4456 : ObjectVisitorWithCageBases(isolate), untyped_(untyped), typed_(typed) {}
4457
4458 virtual bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) = 0;
4459
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)4460 void VisitPointers(HeapObject host, ObjectSlot start,
4461 ObjectSlot end) override {
4462 #ifdef DEBUG
4463 for (ObjectSlot slot = start; slot < end; ++slot) {
4464 Object obj = slot.load(cage_base());
4465 CHECK(!MapWord::IsPacked(obj.ptr()) || !HasWeakHeapObjectTag(obj));
4466 }
4467 #endif // DEBUG
4468 VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
4469 }
4470
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)4471 void VisitPointers(HeapObject host, MaybeObjectSlot start,
4472 MaybeObjectSlot end) final {
4473 for (MaybeObjectSlot slot = start; slot < end; ++slot) {
4474 if (ShouldHaveBeenRecorded(host, slot.load(cage_base()))) {
4475 CHECK_GT(untyped_->count(slot.address()), 0);
4476 }
4477 }
4478 }
4479
VisitCodePointer(HeapObject host,CodeObjectSlot slot)4480 void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
4481 CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
4482 if (ShouldHaveBeenRecorded(
4483 host, MaybeObject::FromObject(slot.load(code_cage_base())))) {
4484 CHECK_GT(untyped_->count(slot.address()), 0);
4485 }
4486 }
4487
VisitCodeTarget(Code host,RelocInfo * rinfo)4488 void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
4489 Object target = Code::GetCodeFromTargetAddress(rinfo->target_address());
4490 if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
4491 CHECK(
4492 InTypedSet(CODE_TARGET_SLOT, rinfo->pc()) ||
4493 (rinfo->IsInConstantPool() &&
4494 InTypedSet(CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address())));
4495 }
4496 }
4497
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)4498 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
4499 Object target = rinfo->target_object_no_host(cage_base());
4500 if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
4501 CHECK(
4502 InTypedSet(FULL_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
4503 InTypedSet(COMPRESSED_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
4504 InTypedSet(DATA_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
4505 (rinfo->IsInConstantPool() &&
4506 InTypedSet(COMPRESSED_OBJECT_SLOT,
4507 rinfo->constant_pool_entry_address())) ||
4508 (rinfo->IsInConstantPool() &&
4509 InTypedSet(FULL_OBJECT_SLOT, rinfo->constant_pool_entry_address())));
4510 }
4511 }
4512
4513 protected:
InUntypedSet(ObjectSlot slot)4514 bool InUntypedSet(ObjectSlot slot) {
4515 return untyped_->count(slot.address()) > 0;
4516 }
4517
4518 private:
InTypedSet(SlotType type,Address slot)4519 bool InTypedSet(SlotType type, Address slot) {
4520 return typed_->count(std::make_pair(type, slot)) > 0;
4521 }
4522 std::set<Address>* untyped_;
4523 std::set<std::pair<SlotType, Address>>* typed_;
4524 };
4525
4526 class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
4527 public:
OldToNewSlotVerifyingVisitor(Isolate * isolate,std::set<Address> * untyped,std::set<std::pair<SlotType,Address>> * typed,EphemeronRememberedSet * ephemeron_remembered_set)4528 OldToNewSlotVerifyingVisitor(Isolate* isolate, std::set<Address>* untyped,
4529 std::set<std::pair<SlotType, Address>>* typed,
4530 EphemeronRememberedSet* ephemeron_remembered_set)
4531 : SlotVerifyingVisitor(isolate, untyped, typed),
4532 ephemeron_remembered_set_(ephemeron_remembered_set) {}
4533
ShouldHaveBeenRecorded(HeapObject host,MaybeObject target)4534 bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) override {
4535 DCHECK_IMPLIES(target->IsStrongOrWeak() && Heap::InYoungGeneration(target),
4536 Heap::InToPage(target));
4537 return target->IsStrongOrWeak() && Heap::InYoungGeneration(target) &&
4538 !Heap::InYoungGeneration(host);
4539 }
4540
VisitEphemeron(HeapObject host,int index,ObjectSlot key,ObjectSlot target)4541 void VisitEphemeron(HeapObject host, int index, ObjectSlot key,
4542 ObjectSlot target) override {
4543 VisitPointer(host, target);
4544 #ifdef ENABLE_MINOR_MC
4545 if (FLAG_minor_mc) return VisitPointer(host, target);
4546 #endif
4547 // Keys are handled separately and should never appear in this set.
4548 CHECK(!InUntypedSet(key));
4549 Object k = *key;
4550 if (!ObjectInYoungGeneration(host) && ObjectInYoungGeneration(k)) {
4551 EphemeronHashTable table = EphemeronHashTable::cast(host);
4552 auto it = ephemeron_remembered_set_->find(table);
4553 CHECK(it != ephemeron_remembered_set_->end());
4554 int slot_index =
4555 EphemeronHashTable::SlotToIndex(table.address(), key.address());
4556 InternalIndex entry = EphemeronHashTable::IndexToEntry(slot_index);
4557 CHECK(it->second.find(entry.as_int()) != it->second.end());
4558 }
4559 }
4560
4561 private:
4562 EphemeronRememberedSet* ephemeron_remembered_set_;
4563 };
4564
4565 template <RememberedSetType direction>
CollectSlots(MemoryChunk * chunk,Address start,Address end,std::set<Address> * untyped,std::set<std::pair<SlotType,Address>> * typed)4566 void CollectSlots(MemoryChunk* chunk, Address start, Address end,
4567 std::set<Address>* untyped,
4568 std::set<std::pair<SlotType, Address>>* typed) {
4569 RememberedSet<direction>::Iterate(
4570 chunk,
4571 [start, end, untyped](MaybeObjectSlot slot) {
4572 if (start <= slot.address() && slot.address() < end) {
4573 untyped->insert(slot.address());
4574 }
4575 return KEEP_SLOT;
4576 },
4577 SlotSet::FREE_EMPTY_BUCKETS);
4578 if (direction == OLD_TO_NEW) {
4579 CHECK(chunk->SweepingDone());
4580 RememberedSetSweeping::Iterate(
4581 chunk,
4582 [start, end, untyped](MaybeObjectSlot slot) {
4583 if (start <= slot.address() && slot.address() < end) {
4584 untyped->insert(slot.address());
4585 }
4586 return KEEP_SLOT;
4587 },
4588 SlotSet::FREE_EMPTY_BUCKETS);
4589 }
4590 RememberedSet<direction>::IterateTyped(
4591 chunk, [=](SlotType type, Address slot) {
4592 if (start <= slot && slot < end) {
4593 typed->insert(std::make_pair(type, slot));
4594 }
4595 return KEEP_SLOT;
4596 });
4597 }
4598
VerifyRememberedSetFor(HeapObject object)4599 void Heap::VerifyRememberedSetFor(HeapObject object) {
4600 MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
4601 DCHECK_IMPLIES(chunk->mutex() == nullptr, ReadOnlyHeap::Contains(object));
4602 // In RO_SPACE chunk->mutex() may be nullptr, so just ignore it.
4603 base::LockGuard<base::Mutex, base::NullBehavior::kIgnoreIfNull> lock_guard(
4604 chunk->mutex());
4605 Address start = object.address();
4606 Address end = start + object.Size();
4607 std::set<Address> old_to_new;
4608 std::set<std::pair<SlotType, Address>> typed_old_to_new;
4609 if (!InYoungGeneration(object)) {
4610 CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
4611 OldToNewSlotVerifyingVisitor visitor(isolate(), &old_to_new,
4612 &typed_old_to_new,
4613 &this->ephemeron_remembered_set_);
4614 object.IterateBody(&visitor);
4615 }
4616 // TODO(v8:11797): Add old to old slot set verification once all weak objects
4617 // have their own instance types and slots are recorded for all weak fields.
4618 }
4619 #endif
4620
4621 #ifdef DEBUG
VerifyCountersAfterSweeping()4622 void Heap::VerifyCountersAfterSweeping() {
4623 MakeLocalHeapLabsIterable();
4624
4625 PagedSpaceIterator spaces(this);
4626 for (PagedSpace* space = spaces.Next(); space != nullptr;
4627 space = spaces.Next()) {
4628 space->VerifyCountersAfterSweeping(this);
4629 }
4630 }
4631
VerifyCountersBeforeConcurrentSweeping()4632 void Heap::VerifyCountersBeforeConcurrentSweeping() {
4633 PagedSpaceIterator spaces(this);
4634 for (PagedSpace* space = spaces.Next(); space != nullptr;
4635 space = spaces.Next()) {
4636 space->VerifyCountersBeforeConcurrentSweeping();
4637 }
4638 }
4639 #endif
4640
ZapFromSpace()4641 void Heap::ZapFromSpace() {
4642 if (!new_space_ || !new_space_->IsFromSpaceCommitted()) return;
4643 for (Page* page : PageRange(new_space_->from_space().first_page(), nullptr)) {
4644 memory_allocator()->ZapBlock(page->area_start(),
4645 page->HighWaterMark() - page->area_start(),
4646 ZapValue());
4647 }
4648 }
4649
ZapCodeObject(Address start_address,int size_in_bytes)4650 void Heap::ZapCodeObject(Address start_address, int size_in_bytes) {
4651 #ifdef DEBUG
4652 DCHECK(IsAligned(start_address, kIntSize));
4653 for (int i = 0; i < size_in_bytes / kIntSize; i++) {
4654 Memory<int>(start_address + i * kIntSize) = kCodeZapValue;
4655 }
4656 #endif
4657 }
4658
4659 // TODO(ishell): move builtin accessors out from Heap.
builtin(Builtin builtin)4660 Code Heap::builtin(Builtin builtin) {
4661 DCHECK(Builtins::IsBuiltinId(builtin));
4662 return Code::cast(
4663 Object(isolate()->builtin_table()[static_cast<int>(builtin)]));
4664 }
4665
builtin_address(Builtin builtin)4666 Address Heap::builtin_address(Builtin builtin) {
4667 const int index = Builtins::ToInt(builtin);
4668 DCHECK(Builtins::IsBuiltinId(builtin) || index == Builtins::kBuiltinCount);
4669 // Note: Must return an address within the full builtin_table for
4670 // IterateBuiltins to work.
4671 return reinterpret_cast<Address>(&isolate()->builtin_table()[index]);
4672 }
4673
builtin_tier0_address(Builtin builtin)4674 Address Heap::builtin_tier0_address(Builtin builtin) {
4675 const int index = static_cast<int>(builtin);
4676 DCHECK(Builtins::IsBuiltinId(builtin) || index == Builtins::kBuiltinCount);
4677 return reinterpret_cast<Address>(
4678 &isolate()->isolate_data()->builtin_tier0_table()[index]);
4679 }
4680
set_builtin(Builtin builtin,Code code)4681 void Heap::set_builtin(Builtin builtin, Code code) {
4682 DCHECK(Builtins::IsBuiltinId(builtin));
4683 DCHECK(Internals::HasHeapObjectTag(code.ptr()));
4684 // The given builtin may be uninitialized thus we cannot check its type here.
4685 isolate()->builtin_table()[Builtins::ToInt(builtin)] = code.ptr();
4686 }
4687
IterateWeakRoots(RootVisitor * v,base::EnumSet<SkipRoot> options)4688 void Heap::IterateWeakRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
4689 DCHECK(!options.contains(SkipRoot::kWeak));
4690
4691 if (!options.contains(SkipRoot::kOldGeneration) &&
4692 !options.contains(SkipRoot::kUnserializable)) {
4693 // Do not visit for serialization, since the string table is custom
4694 // serialized. Also do not visit if we are skipping old generation.
4695 isolate()->string_table()->IterateElements(v);
4696 }
4697 v->Synchronize(VisitorSynchronization::kStringTable);
4698 if (!options.contains(SkipRoot::kExternalStringTable) &&
4699 !options.contains(SkipRoot::kUnserializable)) {
4700 // Scavenge collections have special processing for this.
4701 // Do not visit for serialization, since the external string table will
4702 // be populated from scratch upon deserialization.
4703 external_string_table_.IterateAll(v);
4704 }
4705 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4706 }
4707
IterateSmiRoots(RootVisitor * v)4708 void Heap::IterateSmiRoots(RootVisitor* v) {
4709 // Acquire execution access since we are going to read stack limit values.
4710 ExecutionAccess access(isolate());
4711 v->VisitRootPointers(Root::kSmiRootList, nullptr,
4712 roots_table().smi_roots_begin(),
4713 roots_table().smi_roots_end());
4714 v->Synchronize(VisitorSynchronization::kSmiRootList);
4715 }
4716
4717 // We cannot avoid stale handles to left-trimmed objects, but can only make
4718 // sure all handles still needed are updated. Filter out a stale pointer
4719 // and clear the slot to allow post processing of handles (needed because
4720 // the sweeper might actually free the underlying page).
4721 class ClearStaleLeftTrimmedHandlesVisitor : public RootVisitor {
4722 public:
ClearStaleLeftTrimmedHandlesVisitor(Heap * heap)4723 explicit ClearStaleLeftTrimmedHandlesVisitor(Heap* heap) : heap_(heap) {
4724 USE(heap_);
4725 }
4726
VisitRootPointer(Root root,const char * description,FullObjectSlot p)4727 void VisitRootPointer(Root root, const char* description,
4728 FullObjectSlot p) override {
4729 FixHandle(p);
4730 }
4731
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)4732 void VisitRootPointers(Root root, const char* description,
4733 FullObjectSlot start, FullObjectSlot end) override {
4734 for (FullObjectSlot p = start; p < end; ++p) {
4735 FixHandle(p);
4736 }
4737 }
4738
4739 private:
FixHandle(FullObjectSlot p)4740 inline void FixHandle(FullObjectSlot p) {
4741 if (!(*p).IsHeapObject()) return;
4742 HeapObject current = HeapObject::cast(*p);
4743 if (!current.map_word(kRelaxedLoad).IsForwardingAddress() &&
4744 current.IsFreeSpaceOrFiller()) {
4745 #ifdef DEBUG
4746 // We need to find a FixedArrayBase map after walking the fillers.
4747 while (!current.map_word(kRelaxedLoad).IsForwardingAddress() &&
4748 current.IsFreeSpaceOrFiller()) {
4749 Address next = current.ptr();
4750 if (current.map() == ReadOnlyRoots(heap_).one_pointer_filler_map()) {
4751 next += kTaggedSize;
4752 } else if (current.map() ==
4753 ReadOnlyRoots(heap_).two_pointer_filler_map()) {
4754 next += 2 * kTaggedSize;
4755 } else {
4756 next += current.Size();
4757 }
4758 current = HeapObject::cast(Object(next));
4759 }
4760 DCHECK(current.map_word(kRelaxedLoad).IsForwardingAddress() ||
4761 current.IsFixedArrayBase());
4762 #endif // DEBUG
4763 p.store(Smi::zero());
4764 }
4765 }
4766
4767 Heap* heap_;
4768 };
4769
IterateRoots(RootVisitor * v,base::EnumSet<SkipRoot> options)4770 void Heap::IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
4771 v->VisitRootPointers(Root::kStrongRootList, nullptr,
4772 roots_table().strong_roots_begin(),
4773 roots_table().strong_roots_end());
4774 v->Synchronize(VisitorSynchronization::kStrongRootList);
4775
4776 isolate_->bootstrapper()->Iterate(v);
4777 v->Synchronize(VisitorSynchronization::kBootstrapper);
4778 Relocatable::Iterate(isolate_, v);
4779 v->Synchronize(VisitorSynchronization::kRelocatable);
4780 isolate_->debug()->Iterate(v);
4781 v->Synchronize(VisitorSynchronization::kDebug);
4782
4783 isolate_->compilation_cache()->Iterate(v);
4784 v->Synchronize(VisitorSynchronization::kCompilationCache);
4785
4786 if (!options.contains(SkipRoot::kOldGeneration)) {
4787 IterateBuiltins(v);
4788 v->Synchronize(VisitorSynchronization::kBuiltins);
4789 }
4790
4791 // Iterate over pointers being held by inactive threads.
4792 isolate_->thread_manager()->Iterate(v);
4793 v->Synchronize(VisitorSynchronization::kThreadManager);
4794
4795 // Visitors in this block only run when not serializing. These include:
4796 //
4797 // - Thread-local and stack.
4798 // - Handles.
4799 // - Microtasks.
4800 // - The startup object cache.
4801 //
4802 // When creating real startup snapshot, these areas are expected to be empty.
4803 // It is also possible to create a snapshot of a *running* isolate for testing
4804 // purposes. In this case, these areas are likely not empty and will simply be
4805 // skipped.
4806 //
4807 // The general guideline for adding visitors to this section vs. adding them
4808 // above is that non-transient heap state is always visited, transient heap
4809 // state is visited only when not serializing.
4810 if (!options.contains(SkipRoot::kUnserializable)) {
4811 if (!options.contains(SkipRoot::kGlobalHandles)) {
4812 if (options.contains(SkipRoot::kWeak)) {
4813 if (options.contains(SkipRoot::kOldGeneration)) {
4814 // Skip handles that are either weak or old.
4815 isolate_->global_handles()->IterateYoungStrongAndDependentRoots(v);
4816 } else {
4817 // Skip handles that are weak.
4818 isolate_->global_handles()->IterateStrongRoots(v);
4819 }
4820 } else {
4821 // Do not skip weak handles.
4822 if (options.contains(SkipRoot::kOldGeneration)) {
4823 // Skip handles that are old.
4824 isolate_->global_handles()->IterateAllYoungRoots(v);
4825 } else {
4826 // Do not skip any handles.
4827 isolate_->global_handles()->IterateAllRoots(v);
4828 }
4829 }
4830 }
4831 v->Synchronize(VisitorSynchronization::kGlobalHandles);
4832
4833 if (!options.contains(SkipRoot::kStack)) {
4834 IterateStackRoots(v);
4835 v->Synchronize(VisitorSynchronization::kStackRoots);
4836 }
4837
4838 #ifndef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
4839 // Iterate over main thread handles in handle scopes.
4840 if (!options.contains(SkipRoot::kMainThreadHandles)) {
4841 // Clear main thread handles with stale references to left-trimmed
4842 // objects. The GC would crash on such stale references.
4843 ClearStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
4844 isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
4845
4846 isolate_->handle_scope_implementer()->Iterate(v);
4847 }
4848 #endif
4849
4850 // Iterate local handles for all local heaps.
4851 safepoint_->Iterate(v);
4852
4853 // Iterates all persistent handles.
4854 isolate_->persistent_handles_list()->Iterate(v, isolate_);
4855
4856 v->Synchronize(VisitorSynchronization::kHandleScope);
4857
4858 if (options.contains(SkipRoot::kOldGeneration)) {
4859 isolate_->eternal_handles()->IterateYoungRoots(v);
4860 } else {
4861 isolate_->eternal_handles()->IterateAllRoots(v);
4862 }
4863 v->Synchronize(VisitorSynchronization::kEternalHandles);
4864
4865 // Iterate over pending Microtasks stored in MicrotaskQueues.
4866 MicrotaskQueue* default_microtask_queue =
4867 isolate_->default_microtask_queue();
4868 if (default_microtask_queue) {
4869 MicrotaskQueue* microtask_queue = default_microtask_queue;
4870 do {
4871 microtask_queue->IterateMicrotasks(v);
4872 microtask_queue = microtask_queue->next();
4873 } while (microtask_queue != default_microtask_queue);
4874 }
4875
4876 // Iterate over other strong roots (currently only identity maps and
4877 // deoptimization entries).
4878 for (StrongRootsEntry* current = strong_roots_head_; current;
4879 current = current->next) {
4880 v->VisitRootPointers(Root::kStrongRoots, current->label, current->start,
4881 current->end);
4882 }
4883 v->Synchronize(VisitorSynchronization::kStrongRoots);
4884
4885 // Iterate over the startup object cache unless serializing or
4886 // deserializing.
4887 SerializerDeserializer::Iterate(isolate_, v);
4888 v->Synchronize(VisitorSynchronization::kStartupObjectCache);
4889 }
4890
4891 if (!options.contains(SkipRoot::kWeak)) {
4892 IterateWeakRoots(v, options);
4893 }
4894 }
4895
IterateRootsIncludingClients(RootVisitor * v,base::EnumSet<SkipRoot> options)4896 void Heap::IterateRootsIncludingClients(RootVisitor* v,
4897 base::EnumSet<SkipRoot> options) {
4898 IterateRoots(v, options);
4899
4900 isolate()->IterateClientIsolates([v, options](Isolate* client) {
4901 client->heap()->IterateRoots(v, options);
4902 });
4903 }
4904
IterateWeakGlobalHandles(RootVisitor * v)4905 void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
4906 isolate_->global_handles()->IterateWeakRoots(v);
4907 }
4908
IterateBuiltins(RootVisitor * v)4909 void Heap::IterateBuiltins(RootVisitor* v) {
4910 for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
4911 ++builtin) {
4912 v->VisitRootPointer(Root::kBuiltins, Builtins::name(builtin),
4913 FullObjectSlot(builtin_address(builtin)));
4914 }
4915
4916 for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLastTier0;
4917 ++builtin) {
4918 v->VisitRootPointer(Root::kBuiltins, Builtins::name(builtin),
4919 FullObjectSlot(builtin_tier0_address(builtin)));
4920 }
4921
4922 // The entry table doesn't need to be updated since all builtins are embedded.
4923 STATIC_ASSERT(Builtins::AllBuiltinsAreIsolateIndependent());
4924 }
4925
IterateStackRoots(RootVisitor * v)4926 void Heap::IterateStackRoots(RootVisitor* v) {
4927 isolate_->Iterate(v);
4928 isolate_->global_handles()->IterateStrongStackRoots(v);
4929 }
4930
4931 namespace {
GlobalMemorySizeFromV8Size(size_t v8_size)4932 size_t GlobalMemorySizeFromV8Size(size_t v8_size) {
4933 const size_t kGlobalMemoryToV8Ratio = 2;
4934 return std::min(static_cast<uint64_t>(std::numeric_limits<size_t>::max()),
4935 static_cast<uint64_t>(v8_size) * kGlobalMemoryToV8Ratio);
4936 }
4937 } // anonymous namespace
4938
ConfigureHeap(const v8::ResourceConstraints & constraints)4939 void Heap::ConfigureHeap(const v8::ResourceConstraints& constraints) {
4940 // Initialize max_semi_space_size_.
4941 {
4942 max_semi_space_size_ = 8 * (kSystemPointerSize / 4) * MB;
4943 if (constraints.max_young_generation_size_in_bytes() > 0) {
4944 max_semi_space_size_ = SemiSpaceSizeFromYoungGenerationSize(
4945 constraints.max_young_generation_size_in_bytes());
4946 }
4947 if (FLAG_max_semi_space_size > 0) {
4948 max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
4949 } else if (FLAG_max_heap_size > 0) {
4950 size_t max_heap_size = static_cast<size_t>(FLAG_max_heap_size) * MB;
4951 size_t young_generation_size, old_generation_size;
4952 if (FLAG_max_old_space_size > 0) {
4953 old_generation_size = static_cast<size_t>(FLAG_max_old_space_size) * MB;
4954 young_generation_size = max_heap_size > old_generation_size
4955 ? max_heap_size - old_generation_size
4956 : 0;
4957 } else {
4958 GenerationSizesFromHeapSize(max_heap_size, &young_generation_size,
4959 &old_generation_size);
4960 }
4961 max_semi_space_size_ =
4962 SemiSpaceSizeFromYoungGenerationSize(young_generation_size);
4963 }
4964 if (FLAG_stress_compaction) {
4965 // This will cause more frequent GCs when stressing.
4966 max_semi_space_size_ = MB;
4967 }
4968 // TODO(dinfuehr): Rounding to a power of 2 is not longer needed. Remove it.
4969 max_semi_space_size_ =
4970 static_cast<size_t>(base::bits::RoundUpToPowerOfTwo64(
4971 static_cast<uint64_t>(max_semi_space_size_)));
4972 max_semi_space_size_ = std::max({max_semi_space_size_, kMinSemiSpaceSize});
4973 max_semi_space_size_ = RoundDown<Page::kPageSize>(max_semi_space_size_);
4974 }
4975
4976 // Initialize max_old_generation_size_ and max_global_memory_.
4977 {
4978 size_t max_old_generation_size = 700ul * (kSystemPointerSize / 4) * MB;
4979 if (constraints.max_old_generation_size_in_bytes() > 0) {
4980 max_old_generation_size = constraints.max_old_generation_size_in_bytes();
4981 }
4982 if (FLAG_max_old_space_size > 0) {
4983 max_old_generation_size =
4984 static_cast<size_t>(FLAG_max_old_space_size) * MB;
4985 } else if (FLAG_max_heap_size > 0) {
4986 size_t max_heap_size = static_cast<size_t>(FLAG_max_heap_size) * MB;
4987 size_t young_generation_size =
4988 YoungGenerationSizeFromSemiSpaceSize(max_semi_space_size_);
4989 max_old_generation_size = max_heap_size > young_generation_size
4990 ? max_heap_size - young_generation_size
4991 : 0;
4992 }
4993 max_old_generation_size =
4994 std::max(max_old_generation_size, MinOldGenerationSize());
4995 max_old_generation_size = std::min(max_old_generation_size,
4996 AllocatorLimitOnMaxOldGenerationSize());
4997 max_old_generation_size =
4998 RoundDown<Page::kPageSize>(max_old_generation_size);
4999
5000 max_global_memory_size_ =
5001 GlobalMemorySizeFromV8Size(max_old_generation_size);
5002 set_max_old_generation_size(max_old_generation_size);
5003 }
5004
5005 CHECK_IMPLIES(FLAG_max_heap_size > 0,
5006 FLAG_max_semi_space_size == 0 || FLAG_max_old_space_size == 0);
5007
5008 // Initialize initial_semispace_size_.
5009 {
5010 initial_semispace_size_ = kMinSemiSpaceSize;
5011 if (max_semi_space_size_ == kMaxSemiSpaceSize) {
5012 // Start with at least 1*MB semi-space on machines with a lot of memory.
5013 initial_semispace_size_ =
5014 std::max(initial_semispace_size_, static_cast<size_t>(1 * MB));
5015 }
5016 if (constraints.initial_young_generation_size_in_bytes() > 0) {
5017 initial_semispace_size_ = SemiSpaceSizeFromYoungGenerationSize(
5018 constraints.initial_young_generation_size_in_bytes());
5019 }
5020 if (FLAG_initial_heap_size > 0) {
5021 size_t young_generation, old_generation;
5022 Heap::GenerationSizesFromHeapSize(
5023 static_cast<size_t>(FLAG_initial_heap_size) * MB, &young_generation,
5024 &old_generation);
5025 initial_semispace_size_ =
5026 SemiSpaceSizeFromYoungGenerationSize(young_generation);
5027 }
5028 if (FLAG_min_semi_space_size > 0) {
5029 initial_semispace_size_ =
5030 static_cast<size_t>(FLAG_min_semi_space_size) * MB;
5031 }
5032 initial_semispace_size_ =
5033 std::min(initial_semispace_size_, max_semi_space_size_);
5034 initial_semispace_size_ =
5035 RoundDown<Page::kPageSize>(initial_semispace_size_);
5036 }
5037
5038 if (FLAG_lazy_new_space_shrinking) {
5039 initial_semispace_size_ = max_semi_space_size_;
5040 }
5041
5042 // Initialize initial_old_space_size_.
5043 {
5044 initial_old_generation_size_ = kMaxInitialOldGenerationSize;
5045 if (constraints.initial_old_generation_size_in_bytes() > 0) {
5046 initial_old_generation_size_ =
5047 constraints.initial_old_generation_size_in_bytes();
5048 old_generation_size_configured_ = true;
5049 }
5050 if (FLAG_initial_heap_size > 0) {
5051 size_t initial_heap_size =
5052 static_cast<size_t>(FLAG_initial_heap_size) * MB;
5053 size_t young_generation_size =
5054 YoungGenerationSizeFromSemiSpaceSize(initial_semispace_size_);
5055 initial_old_generation_size_ =
5056 initial_heap_size > young_generation_size
5057 ? initial_heap_size - young_generation_size
5058 : 0;
5059 old_generation_size_configured_ = true;
5060 }
5061 if (FLAG_initial_old_space_size > 0) {
5062 initial_old_generation_size_ =
5063 static_cast<size_t>(FLAG_initial_old_space_size) * MB;
5064 old_generation_size_configured_ = true;
5065 }
5066 initial_old_generation_size_ =
5067 std::min(initial_old_generation_size_, max_old_generation_size() / 2);
5068 initial_old_generation_size_ =
5069 RoundDown<Page::kPageSize>(initial_old_generation_size_);
5070 }
5071
5072 if (old_generation_size_configured_) {
5073 // If the embedder pre-configures the initial old generation size,
5074 // then allow V8 to skip full GCs below that threshold.
5075 min_old_generation_size_ = initial_old_generation_size_;
5076 min_global_memory_size_ =
5077 GlobalMemorySizeFromV8Size(min_old_generation_size_);
5078 }
5079
5080 if (FLAG_semi_space_growth_factor < 2) {
5081 FLAG_semi_space_growth_factor = 2;
5082 }
5083
5084 set_old_generation_allocation_limit(initial_old_generation_size_);
5085 global_allocation_limit_ =
5086 GlobalMemorySizeFromV8Size(old_generation_allocation_limit());
5087 initial_max_old_generation_size_ = max_old_generation_size();
5088
5089 // We rely on being able to allocate new arrays in paged spaces.
5090 DCHECK(kMaxRegularHeapObjectSize >=
5091 (JSArray::kHeaderSize +
5092 FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
5093 AllocationMemento::kSize));
5094
5095 code_range_size_ = constraints.code_range_size_in_bytes();
5096
5097 configured_ = true;
5098 }
5099
AddToRingBuffer(const char * string)5100 void Heap::AddToRingBuffer(const char* string) {
5101 size_t first_part =
5102 std::min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
5103 memcpy(trace_ring_buffer_ + ring_buffer_end_, string, first_part);
5104 ring_buffer_end_ += first_part;
5105 if (first_part < strlen(string)) {
5106 ring_buffer_full_ = true;
5107 size_t second_part = strlen(string) - first_part;
5108 memcpy(trace_ring_buffer_, string + first_part, second_part);
5109 ring_buffer_end_ = second_part;
5110 }
5111 }
5112
GetFromRingBuffer(char * buffer)5113 void Heap::GetFromRingBuffer(char* buffer) {
5114 size_t copied = 0;
5115 if (ring_buffer_full_) {
5116 copied = kTraceRingBufferSize - ring_buffer_end_;
5117 memcpy(buffer, trace_ring_buffer_ + ring_buffer_end_, copied);
5118 }
5119 memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
5120 }
5121
ConfigureHeapDefault()5122 void Heap::ConfigureHeapDefault() {
5123 v8::ResourceConstraints constraints;
5124 ConfigureHeap(constraints);
5125 }
5126
RecordStats(HeapStats * stats,bool take_snapshot)5127 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
5128 *stats->start_marker = HeapStats::kStartMarker;
5129 *stats->end_marker = HeapStats::kEndMarker;
5130 *stats->ro_space_size = read_only_space_->Size();
5131 *stats->ro_space_capacity = read_only_space_->Capacity();
5132 *stats->new_space_size = NewSpaceSize();
5133 *stats->new_space_capacity = NewSpaceCapacity();
5134 *stats->old_space_size = old_space_->SizeOfObjects();
5135 *stats->old_space_capacity = old_space_->Capacity();
5136 *stats->code_space_size = code_space_->SizeOfObjects();
5137 *stats->code_space_capacity = code_space_->Capacity();
5138 *stats->map_space_size = map_space_->SizeOfObjects();
5139 *stats->map_space_capacity = map_space_->Capacity();
5140 *stats->lo_space_size = lo_space_->Size();
5141 *stats->code_lo_space_size = code_lo_space_->Size();
5142 isolate_->global_handles()->RecordStats(stats);
5143 *stats->memory_allocator_size = memory_allocator()->Size();
5144 *stats->memory_allocator_capacity =
5145 memory_allocator()->Size() + memory_allocator()->Available();
5146 *stats->os_error = base::OS::GetLastError();
5147 // TODO(leszeks): Include the string table in both current and peak usage.
5148 *stats->malloced_memory = isolate_->allocator()->GetCurrentMemoryUsage();
5149 *stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
5150 if (take_snapshot) {
5151 HeapObjectIterator iterator(this);
5152 for (HeapObject obj = iterator.Next(); !obj.is_null();
5153 obj = iterator.Next()) {
5154 InstanceType type = obj.map().instance_type();
5155 DCHECK(0 <= type && type <= LAST_TYPE);
5156 stats->objects_per_type[type]++;
5157 stats->size_per_type[type] += obj.Size();
5158 }
5159 }
5160 if (stats->last_few_messages != nullptr)
5161 GetFromRingBuffer(stats->last_few_messages);
5162 }
5163
OldGenerationSizeOfObjects()5164 size_t Heap::OldGenerationSizeOfObjects() {
5165 PagedSpaceIterator spaces(this);
5166 size_t total = 0;
5167 for (PagedSpace* space = spaces.Next(); space != nullptr;
5168 space = spaces.Next()) {
5169 total += space->SizeOfObjects();
5170 }
5171 return total + lo_space_->SizeOfObjects() + code_lo_space_->SizeOfObjects();
5172 }
5173
EmbedderSizeOfObjects() const5174 size_t Heap::EmbedderSizeOfObjects() const {
5175 return local_embedder_heap_tracer()
5176 ? local_embedder_heap_tracer()->used_size()
5177 : 0;
5178 }
5179
GlobalSizeOfObjects()5180 size_t Heap::GlobalSizeOfObjects() {
5181 return OldGenerationSizeOfObjects() + EmbedderSizeOfObjects();
5182 }
5183
AllocatedExternalMemorySinceMarkCompact()5184 uint64_t Heap::AllocatedExternalMemorySinceMarkCompact() {
5185 return external_memory_.AllocatedSinceMarkCompact();
5186 }
5187
AllocationLimitOvershotByLargeMargin()5188 bool Heap::AllocationLimitOvershotByLargeMargin() {
5189 // This guards against too eager finalization in small heaps.
5190 // The number is chosen based on v8.browsing_mobile on Nexus 7v2.
5191 constexpr size_t kMarginForSmallHeaps = 32u * MB;
5192
5193 uint64_t size_now =
5194 OldGenerationSizeOfObjects() + AllocatedExternalMemorySinceMarkCompact();
5195
5196 const size_t v8_overshoot = old_generation_allocation_limit() < size_now
5197 ? size_now - old_generation_allocation_limit()
5198 : 0;
5199 const size_t global_overshoot =
5200 global_allocation_limit_ < GlobalSizeOfObjects()
5201 ? GlobalSizeOfObjects() - global_allocation_limit_
5202 : 0;
5203
5204 // Bail out if the V8 and global sizes are still below their respective
5205 // limits.
5206 if (v8_overshoot == 0 && global_overshoot == 0) {
5207 return false;
5208 }
5209
5210 // Overshoot margin is 50% of allocation limit or half-way to the max heap
5211 // with special handling of small heaps.
5212 const size_t v8_margin = std::min(
5213 std::max(old_generation_allocation_limit() / 2, kMarginForSmallHeaps),
5214 (max_old_generation_size() - old_generation_allocation_limit()) / 2);
5215 const size_t global_margin =
5216 std::min(std::max(global_allocation_limit_ / 2, kMarginForSmallHeaps),
5217 (max_global_memory_size_ - global_allocation_limit_) / 2);
5218
5219 return v8_overshoot >= v8_margin || global_overshoot >= global_margin;
5220 }
5221
ShouldOptimizeForLoadTime()5222 bool Heap::ShouldOptimizeForLoadTime() {
5223 return isolate()->rail_mode() == PERFORMANCE_LOAD &&
5224 !AllocationLimitOvershotByLargeMargin() &&
5225 MonotonicallyIncreasingTimeInMs() <
5226 isolate()->LoadStartTimeMs() + kMaxLoadTimeMs;
5227 }
5228
5229 // This predicate is called when an old generation space cannot allocated from
5230 // the free list and is about to add a new page. Returning false will cause a
5231 // major GC. It happens when the old generation allocation limit is reached and
5232 // - either we need to optimize for memory usage,
5233 // - or the incremental marking is not in progress and we cannot start it.
ShouldExpandOldGenerationOnSlowAllocation(LocalHeap * local_heap)5234 bool Heap::ShouldExpandOldGenerationOnSlowAllocation(LocalHeap* local_heap) {
5235 if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
5236 // We reached the old generation allocation limit.
5237
5238 // Background threads need to be allowed to allocate without GC after teardown
5239 // was initiated.
5240 if (gc_state() == TEAR_DOWN) return true;
5241
5242 // If main thread is parked, it can't perform the GC. Fix the deadlock by
5243 // allowing the allocation.
5244 if (IsMainThreadParked(local_heap)) return true;
5245
5246 // Make it more likely that retry of allocation on background thread succeeds
5247 if (IsRetryOfFailedAllocation(local_heap)) return true;
5248
5249 // Background thread requested GC, allocation should fail
5250 if (CollectionRequested()) return false;
5251
5252 if (ShouldOptimizeForMemoryUsage()) return false;
5253
5254 if (ShouldOptimizeForLoadTime()) return true;
5255
5256 if (incremental_marking()->NeedsFinalization()) {
5257 return !AllocationLimitOvershotByLargeMargin();
5258 }
5259
5260 if (incremental_marking()->IsStopped() &&
5261 IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
5262 // We cannot start incremental marking.
5263 return false;
5264 }
5265 return true;
5266 }
5267
IsRetryOfFailedAllocation(LocalHeap * local_heap)5268 bool Heap::IsRetryOfFailedAllocation(LocalHeap* local_heap) {
5269 if (!local_heap) return false;
5270 return local_heap->allocation_failed_;
5271 }
5272
IsMainThreadParked(LocalHeap * local_heap)5273 bool Heap::IsMainThreadParked(LocalHeap* local_heap) {
5274 if (!local_heap) return false;
5275 return local_heap->main_thread_parked_;
5276 }
5277
CurrentHeapGrowingMode()5278 Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
5279 if (ShouldReduceMemory() || FLAG_stress_compaction) {
5280 return Heap::HeapGrowingMode::kMinimal;
5281 }
5282
5283 if (ShouldOptimizeForMemoryUsage()) {
5284 return Heap::HeapGrowingMode::kConservative;
5285 }
5286
5287 if (memory_reducer()->ShouldGrowHeapSlowly()) {
5288 return Heap::HeapGrowingMode::kSlow;
5289 }
5290
5291 return Heap::HeapGrowingMode::kDefault;
5292 }
5293
GlobalMemoryAvailable()5294 base::Optional<size_t> Heap::GlobalMemoryAvailable() {
5295 if (!UseGlobalMemoryScheduling()) return {};
5296
5297 size_t global_size = GlobalSizeOfObjects();
5298
5299 if (global_size < global_allocation_limit_)
5300 return global_allocation_limit_ - global_size;
5301
5302 return 0;
5303 }
5304
PercentToOldGenerationLimit()5305 double Heap::PercentToOldGenerationLimit() {
5306 double size_at_gc = old_generation_size_at_last_gc_;
5307 double size_now =
5308 OldGenerationSizeOfObjects() + AllocatedExternalMemorySinceMarkCompact();
5309 double current_bytes = size_now - size_at_gc;
5310 double total_bytes = old_generation_allocation_limit() - size_at_gc;
5311 return total_bytes > 0 ? (current_bytes / total_bytes) * 100.0 : 0;
5312 }
5313
PercentToGlobalMemoryLimit()5314 double Heap::PercentToGlobalMemoryLimit() {
5315 double size_at_gc = old_generation_size_at_last_gc_;
5316 double size_now =
5317 OldGenerationSizeOfObjects() + AllocatedExternalMemorySinceMarkCompact();
5318 double current_bytes = size_now - size_at_gc;
5319 double total_bytes = old_generation_allocation_limit() - size_at_gc;
5320 return total_bytes > 0 ? (current_bytes / total_bytes) * 100.0 : 0;
5321 }
5322
5323 // - kNoLimit means that either incremental marking is disabled or it is too
5324 // early to start incremental marking.
5325 // - kSoftLimit means that incremental marking should be started soon.
5326 // - kHardLimit means that incremental marking should be started immediately.
5327 // - kFallbackForEmbedderLimit means that incremental marking should be
5328 // started as soon as the embedder does not allocate with high throughput
5329 // anymore.
IncrementalMarkingLimitReached()5330 Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
5331 // Code using an AlwaysAllocateScope assumes that the GC state does not
5332 // change; that implies that no marking steps must be performed.
5333 if (!incremental_marking()->CanBeActivated() || always_allocate()) {
5334 // Incremental marking is disabled or it is too early to start.
5335 return IncrementalMarkingLimit::kNoLimit;
5336 }
5337 if (FLAG_stress_incremental_marking) {
5338 return IncrementalMarkingLimit::kHardLimit;
5339 }
5340 if (incremental_marking()->IsBelowActivationThresholds()) {
5341 // Incremental marking is disabled or it is too early to start.
5342 return IncrementalMarkingLimit::kNoLimit;
5343 }
5344 if (ShouldStressCompaction() || HighMemoryPressure()) {
5345 // If there is high memory pressure or stress testing is enabled, then
5346 // start marking immediately.
5347 return IncrementalMarkingLimit::kHardLimit;
5348 }
5349
5350 if (FLAG_stress_marking > 0) {
5351 int current_percent = static_cast<int>(
5352 std::max(PercentToOldGenerationLimit(), PercentToGlobalMemoryLimit()));
5353 if (current_percent > 0) {
5354 if (FLAG_trace_stress_marking) {
5355 isolate()->PrintWithTimestamp(
5356 "[IncrementalMarking] %d%% of the memory limit reached\n",
5357 current_percent);
5358 }
5359 if (FLAG_fuzzer_gc_analysis) {
5360 // Skips values >=100% since they already trigger marking.
5361 if (current_percent < 100) {
5362 max_marking_limit_reached_ =
5363 std::max<double>(max_marking_limit_reached_, current_percent);
5364 }
5365 } else if (current_percent >= stress_marking_percentage_) {
5366 stress_marking_percentage_ = NextStressMarkingLimit();
5367 return IncrementalMarkingLimit::kHardLimit;
5368 }
5369 }
5370 }
5371
5372 if (FLAG_incremental_marking_soft_trigger > 0 ||
5373 FLAG_incremental_marking_hard_trigger > 0) {
5374 int current_percent = static_cast<int>(
5375 std::max(PercentToOldGenerationLimit(), PercentToGlobalMemoryLimit()));
5376 if (current_percent > FLAG_incremental_marking_hard_trigger &&
5377 FLAG_incremental_marking_hard_trigger > 0) {
5378 return IncrementalMarkingLimit::kHardLimit;
5379 }
5380 if (current_percent > FLAG_incremental_marking_soft_trigger &&
5381 FLAG_incremental_marking_soft_trigger > 0) {
5382 return IncrementalMarkingLimit::kSoftLimit;
5383 }
5384 return IncrementalMarkingLimit::kNoLimit;
5385 }
5386
5387 size_t old_generation_space_available = OldGenerationSpaceAvailable();
5388 const base::Optional<size_t> global_memory_available =
5389 GlobalMemoryAvailable();
5390
5391 if (old_generation_space_available > NewSpaceCapacity() &&
5392 (!global_memory_available ||
5393 global_memory_available > NewSpaceCapacity())) {
5394 if (local_embedder_heap_tracer()->InUse() &&
5395 !old_generation_size_configured_ && gc_count_ == 0) {
5396 // At this point the embedder memory is above the activation
5397 // threshold. No GC happened so far and it's thus unlikely to get a
5398 // configured heap any time soon. Start a memory reducer in this case
5399 // which will wait until the allocation rate is low to trigger garbage
5400 // collection.
5401 return IncrementalMarkingLimit::kFallbackForEmbedderLimit;
5402 }
5403 return IncrementalMarkingLimit::kNoLimit;
5404 }
5405 if (ShouldOptimizeForMemoryUsage()) {
5406 return IncrementalMarkingLimit::kHardLimit;
5407 }
5408 if (ShouldOptimizeForLoadTime()) {
5409 return IncrementalMarkingLimit::kNoLimit;
5410 }
5411 if (old_generation_space_available == 0) {
5412 return IncrementalMarkingLimit::kHardLimit;
5413 }
5414 if (global_memory_available && *global_memory_available == 0) {
5415 return IncrementalMarkingLimit::kHardLimit;
5416 }
5417 return IncrementalMarkingLimit::kSoftLimit;
5418 }
5419
ShouldStressCompaction() const5420 bool Heap::ShouldStressCompaction() const {
5421 return FLAG_stress_compaction && (gc_count_ & 1) != 0;
5422 }
5423
EnableInlineAllocation()5424 void Heap::EnableInlineAllocation() {
5425 if (!inline_allocation_disabled_) return;
5426 inline_allocation_disabled_ = false;
5427
5428 // Update inline allocation limit for new space.
5429 if (new_space()) {
5430 new_space()->AdvanceAllocationObservers();
5431 new_space()->UpdateInlineAllocationLimit(0);
5432 }
5433 }
5434
DisableInlineAllocation()5435 void Heap::DisableInlineAllocation() {
5436 if (inline_allocation_disabled_) return;
5437 inline_allocation_disabled_ = true;
5438
5439 // Update inline allocation limit for new space.
5440 if (new_space()) {
5441 new_space()->UpdateInlineAllocationLimit(0);
5442 }
5443
5444 // Update inline allocation limit for old spaces.
5445 PagedSpaceIterator spaces(this);
5446 CodeSpaceMemoryModificationScope modification_scope(this);
5447 for (PagedSpace* space = spaces.Next(); space != nullptr;
5448 space = spaces.Next()) {
5449 base::MutexGuard guard(space->mutex());
5450 space->FreeLinearAllocationArea();
5451 }
5452 }
5453
AllocateRawWithLightRetrySlowPath(int size,AllocationType allocation,AllocationOrigin origin,AllocationAlignment alignment)5454 HeapObject Heap::AllocateRawWithLightRetrySlowPath(
5455 int size, AllocationType allocation, AllocationOrigin origin,
5456 AllocationAlignment alignment) {
5457 HeapObject result;
5458 AllocationResult alloc = AllocateRaw(size, allocation, origin, alignment);
5459 if (alloc.To(&result)) {
5460 // DCHECK that the successful allocation is not "exception". The one
5461 // exception to this is when allocating the "exception" object itself, in
5462 // which case this must be an ROSpace allocation and the exception object
5463 // in the roots has to be unset.
5464 DCHECK((CanAllocateInReadOnlySpace() &&
5465 allocation == AllocationType::kReadOnly &&
5466 ReadOnlyRoots(this).unchecked_exception() == Smi::zero()) ||
5467 result != ReadOnlyRoots(this).exception());
5468 return result;
5469 }
5470 // Two GCs before panicking. In newspace will almost always succeed.
5471 for (int i = 0; i < 2; i++) {
5472 if (IsSharedAllocationType(allocation)) {
5473 CollectSharedGarbage(GarbageCollectionReason::kAllocationFailure);
5474 } else {
5475 CollectGarbage(alloc.RetrySpace(),
5476 GarbageCollectionReason::kAllocationFailure);
5477 }
5478 alloc = AllocateRaw(size, allocation, origin, alignment);
5479 if (alloc.To(&result)) {
5480 DCHECK(result != ReadOnlyRoots(this).exception());
5481 return result;
5482 }
5483 }
5484 return HeapObject();
5485 }
5486
AllocateRawWithRetryOrFailSlowPath(int size,AllocationType allocation,AllocationOrigin origin,AllocationAlignment alignment)5487 HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
5488 int size, AllocationType allocation, AllocationOrigin origin,
5489 AllocationAlignment alignment) {
5490 AllocationResult alloc;
5491 HeapObject result =
5492 AllocateRawWithLightRetrySlowPath(size, allocation, origin, alignment);
5493 if (!result.is_null()) return result;
5494
5495 isolate()->counters()->gc_last_resort_from_handles()->Increment();
5496 if (IsSharedAllocationType(allocation)) {
5497 CollectSharedGarbage(GarbageCollectionReason::kLastResort);
5498 } else {
5499 CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
5500 }
5501
5502 {
5503 AlwaysAllocateScope scope(this);
5504 alloc = AllocateRaw(size, allocation, origin, alignment);
5505 }
5506 if (alloc.To(&result)) {
5507 DCHECK(result != ReadOnlyRoots(this).exception());
5508 return result;
5509 }
5510 // TODO(1181417): Fix this.
5511 FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
5512 }
5513
SetUp()5514 void Heap::SetUp() {
5515 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
5516 allocation_timeout_ = NextAllocationTimeout();
5517 #endif
5518
5519 #ifdef V8_ENABLE_THIRD_PARTY_HEAP
5520 tp_heap_ = third_party_heap::Heap::New(isolate());
5521 #endif
5522
5523 // Initialize heap spaces and initial maps and objects.
5524 //
5525 // If the heap is not yet configured (e.g. through the API), configure it.
5526 // Configuration is based on the flags new-space-size (really the semispace
5527 // size) and old-space-size if set or the initial values of semispace_size_
5528 // and old_generation_size_ otherwise.
5529 if (!configured_) ConfigureHeapDefault();
5530
5531 mmap_region_base_ =
5532 reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
5533 ~kMmapRegionMask;
5534
5535 v8::PageAllocator* code_page_allocator;
5536 if (isolate_->RequiresCodeRange() || code_range_size_ != 0) {
5537 const size_t requested_size =
5538 code_range_size_ == 0 ? kMaximalCodeRangeSize : code_range_size_;
5539 // When a target requires the code range feature, we put all code objects in
5540 // a contiguous range of virtual address space, so that they can call each
5541 // other with near calls.
5542 if (COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL) {
5543 // When sharing a pointer cage among Isolates, also share the
5544 // CodeRange. isolate_->page_allocator() is the process-wide pointer
5545 // compression cage's PageAllocator.
5546 code_range_ = CodeRange::EnsureProcessWideCodeRange(
5547 isolate_->page_allocator(), requested_size);
5548 } else {
5549 code_range_ = std::make_shared<CodeRange>();
5550 if (!code_range_->InitReservation(isolate_->page_allocator(),
5551 requested_size)) {
5552 V8::FatalProcessOutOfMemory(
5553 isolate_, "Failed to reserve virtual memory for CodeRange");
5554 }
5555 }
5556
5557 LOG(isolate_,
5558 NewEvent("CodeRange",
5559 reinterpret_cast<void*>(code_range_->reservation()->address()),
5560 code_range_size_));
5561
5562 isolate_->AddCodeRange(code_range_->reservation()->region().begin(),
5563 code_range_->reservation()->region().size());
5564 code_page_allocator = code_range_->page_allocator();
5565 } else {
5566 code_page_allocator = isolate_->page_allocator();
5567 }
5568
5569 // Set up memory allocator.
5570 memory_allocator_.reset(
5571 new MemoryAllocator(isolate_, code_page_allocator, MaxReserved()));
5572
5573 mark_compact_collector_.reset(new MarkCompactCollector(this));
5574
5575 scavenger_collector_.reset(new ScavengerCollector(this));
5576
5577 incremental_marking_.reset(
5578 new IncrementalMarking(this, mark_compact_collector_->weak_objects()));
5579
5580 if (FLAG_concurrent_marking || FLAG_parallel_marking) {
5581 concurrent_marking_.reset(new ConcurrentMarking(
5582 this, mark_compact_collector_->marking_worklists(),
5583 mark_compact_collector_->weak_objects()));
5584 } else {
5585 concurrent_marking_.reset(new ConcurrentMarking(this, nullptr, nullptr));
5586 }
5587
5588 marking_barrier_.reset(new MarkingBarrier(this));
5589
5590 for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
5591 space_[i] = nullptr;
5592 }
5593 }
5594
SetUpFromReadOnlyHeap(ReadOnlyHeap * ro_heap)5595 void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
5596 DCHECK_NOT_NULL(ro_heap);
5597 DCHECK_IMPLIES(read_only_space_ != nullptr,
5598 read_only_space_ == ro_heap->read_only_space());
5599 space_[RO_SPACE] = nullptr;
5600 read_only_space_ = ro_heap->read_only_space();
5601 }
5602
ReplaceReadOnlySpace(SharedReadOnlySpace * space)5603 void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
5604 CHECK(V8_SHARED_RO_HEAP_BOOL);
5605 if (read_only_space_) {
5606 read_only_space_->TearDown(memory_allocator());
5607 delete read_only_space_;
5608 }
5609
5610 read_only_space_ = space;
5611 }
5612
5613 class StressConcurrentAllocationObserver : public AllocationObserver {
5614 public:
StressConcurrentAllocationObserver(Heap * heap)5615 explicit StressConcurrentAllocationObserver(Heap* heap)
5616 : AllocationObserver(1024), heap_(heap) {}
5617
Step(int bytes_allocated,Address,size_t)5618 void Step(int bytes_allocated, Address, size_t) override {
5619 DCHECK(heap_->deserialization_complete());
5620 if (FLAG_stress_concurrent_allocation) {
5621 // Only schedule task if --stress-concurrent-allocation is enabled. This
5622 // allows tests to disable flag even when Isolate was already initialized.
5623 StressConcurrentAllocatorTask::Schedule(heap_->isolate());
5624 }
5625 heap_->RemoveAllocationObserversFromAllSpaces(this, this);
5626 heap_->need_to_remove_stress_concurrent_allocation_observer_ = false;
5627 }
5628
5629 private:
5630 Heap* heap_;
5631 };
5632
SetUpSpaces()5633 void Heap::SetUpSpaces() {
5634 // Ensure SetUpFromReadOnlySpace has been ran.
5635 DCHECK_NOT_NULL(read_only_space_);
5636 const bool has_young_gen = !FLAG_single_generation && !IsShared();
5637 if (has_young_gen) {
5638 space_[NEW_SPACE] = new_space_ =
5639 new NewSpace(this, memory_allocator_->data_page_allocator(),
5640 initial_semispace_size_, max_semi_space_size_);
5641 }
5642 space_[OLD_SPACE] = old_space_ = new OldSpace(this);
5643 space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
5644 space_[MAP_SPACE] = map_space_ = new MapSpace(this);
5645 space_[LO_SPACE] = lo_space_ = new OldLargeObjectSpace(this);
5646 if (has_young_gen) {
5647 space_[NEW_LO_SPACE] = new_lo_space_ =
5648 new NewLargeObjectSpace(this, NewSpaceCapacity());
5649 }
5650 space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this);
5651
5652 for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
5653 i++) {
5654 deferred_counters_[i] = 0;
5655 }
5656
5657 tracer_.reset(new GCTracer(this));
5658 #ifdef ENABLE_MINOR_MC
5659 minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
5660 #else
5661 minor_mark_compact_collector_ = nullptr;
5662 #endif // ENABLE_MINOR_MC
5663 array_buffer_sweeper_.reset(new ArrayBufferSweeper(this));
5664 gc_idle_time_handler_.reset(new GCIdleTimeHandler());
5665 memory_measurement_.reset(new MemoryMeasurement(isolate()));
5666 memory_reducer_.reset(new MemoryReducer(this));
5667 if (V8_UNLIKELY(TracingFlags::is_gc_stats_enabled())) {
5668 live_object_stats_.reset(new ObjectStats(this));
5669 dead_object_stats_.reset(new ObjectStats(this));
5670 }
5671 local_embedder_heap_tracer_.reset(new LocalEmbedderHeapTracer(isolate()));
5672 embedder_roots_handler_ =
5673 &local_embedder_heap_tracer()->default_embedder_roots_handler();
5674
5675 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5676 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5677
5678 mark_compact_collector()->SetUp();
5679 #ifdef ENABLE_MINOR_MC
5680 if (minor_mark_compact_collector() != nullptr) {
5681 minor_mark_compact_collector()->SetUp();
5682 }
5683 #endif // ENABLE_MINOR_MC
5684
5685 if (new_space()) {
5686 scavenge_job_.reset(new ScavengeJob());
5687 scavenge_task_observer_.reset(new ScavengeTaskObserver(
5688 this, ScavengeJob::YoungGenerationTaskTriggerSize(this)));
5689 new_space()->AddAllocationObserver(scavenge_task_observer_.get());
5690 }
5691
5692 SetGetExternallyAllocatedMemoryInBytesCallback(
5693 DefaultGetExternallyAllocatedMemoryInBytesCallback);
5694
5695 if (FLAG_stress_marking > 0) {
5696 stress_marking_percentage_ = NextStressMarkingLimit();
5697 stress_marking_observer_ = new StressMarkingObserver(this);
5698 AddAllocationObserversToAllSpaces(stress_marking_observer_,
5699 stress_marking_observer_);
5700 }
5701 if (FLAG_stress_scavenge > 0 && new_space()) {
5702 stress_scavenge_observer_ = new StressScavengeObserver(this);
5703 new_space()->AddAllocationObserver(stress_scavenge_observer_);
5704 }
5705
5706 write_protect_code_memory_ = FLAG_write_protect_code_memory;
5707 }
5708
InitializeMainThreadLocalHeap(LocalHeap * main_thread_local_heap)5709 void Heap::InitializeMainThreadLocalHeap(LocalHeap* main_thread_local_heap) {
5710 DCHECK_NULL(main_thread_local_heap_);
5711 main_thread_local_heap_ = main_thread_local_heap;
5712 }
5713
InitializeHashSeed()5714 void Heap::InitializeHashSeed() {
5715 DCHECK(!deserialization_complete_);
5716 uint64_t new_hash_seed;
5717 if (FLAG_hash_seed == 0) {
5718 int64_t rnd = isolate()->random_number_generator()->NextInt64();
5719 new_hash_seed = static_cast<uint64_t>(rnd);
5720 } else {
5721 new_hash_seed = static_cast<uint64_t>(FLAG_hash_seed);
5722 }
5723 ReadOnlyRoots(this).hash_seed().copy_in(
5724 0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
5725 }
5726
NextAllocationTimeout(int current_timeout)5727 int Heap::NextAllocationTimeout(int current_timeout) {
5728 if (FLAG_random_gc_interval > 0) {
5729 // If current timeout hasn't reached 0 the GC was caused by something
5730 // different than --stress-atomic-gc flag and we don't update the timeout.
5731 if (current_timeout <= 0) {
5732 return isolate()->fuzzer_rng()->NextInt(FLAG_random_gc_interval + 1);
5733 } else {
5734 return current_timeout;
5735 }
5736 }
5737 return FLAG_gc_interval;
5738 }
5739
PrintAllocationsHash()5740 void Heap::PrintAllocationsHash() {
5741 uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
5742 PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
5743 }
5744
PrintMaxMarkingLimitReached()5745 void Heap::PrintMaxMarkingLimitReached() {
5746 PrintF("\n### Maximum marking limit reached = %.02lf\n",
5747 max_marking_limit_reached_);
5748 }
5749
PrintMaxNewSpaceSizeReached()5750 void Heap::PrintMaxNewSpaceSizeReached() {
5751 PrintF("\n### Maximum new space size reached = %.02lf\n",
5752 stress_scavenge_observer_->MaxNewSpaceSizeReached());
5753 }
5754
NextStressMarkingLimit()5755 int Heap::NextStressMarkingLimit() {
5756 return isolate()->fuzzer_rng()->NextInt(FLAG_stress_marking + 1);
5757 }
5758
NotifyDeserializationComplete()5759 void Heap::NotifyDeserializationComplete() {
5760 PagedSpaceIterator spaces(this);
5761 for (PagedSpace* s = spaces.Next(); s != nullptr; s = spaces.Next()) {
5762 if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
5763 #ifdef DEBUG
5764 // All pages right after bootstrapping must be marked as never-evacuate.
5765 for (Page* p : *s) {
5766 DCHECK(p->NeverEvacuate());
5767 }
5768 #endif // DEBUG
5769 }
5770
5771 if (FLAG_stress_concurrent_allocation) {
5772 stress_concurrent_allocation_observer_.reset(
5773 new StressConcurrentAllocationObserver(this));
5774 AddAllocationObserversToAllSpaces(
5775 stress_concurrent_allocation_observer_.get(),
5776 stress_concurrent_allocation_observer_.get());
5777 need_to_remove_stress_concurrent_allocation_observer_ = true;
5778 }
5779
5780 deserialization_complete_ = true;
5781 }
5782
NotifyBootstrapComplete()5783 void Heap::NotifyBootstrapComplete() {
5784 // This function is invoked for each native context creation. We are
5785 // interested only in the first native context.
5786 if (old_generation_capacity_after_bootstrap_ == 0) {
5787 old_generation_capacity_after_bootstrap_ = OldGenerationCapacity();
5788 }
5789 }
5790
NotifyOldGenerationExpansion(AllocationSpace space,MemoryChunk * chunk)5791 void Heap::NotifyOldGenerationExpansion(AllocationSpace space,
5792 MemoryChunk* chunk) {
5793 // Pages created during bootstrapping may contain immortal immovable objects.
5794 if (!deserialization_complete()) {
5795 chunk->MarkNeverEvacuate();
5796 }
5797 if (space == CODE_SPACE || space == CODE_LO_SPACE) {
5798 isolate()->AddCodeMemoryChunk(chunk);
5799 }
5800 const size_t kMemoryReducerActivationThreshold = 1 * MB;
5801 if (old_generation_capacity_after_bootstrap_ && ms_count_ == 0 &&
5802 OldGenerationCapacity() >= old_generation_capacity_after_bootstrap_ +
5803 kMemoryReducerActivationThreshold &&
5804 FLAG_memory_reducer_for_small_heaps) {
5805 MemoryReducer::Event event;
5806 event.type = MemoryReducer::kPossibleGarbage;
5807 event.time_ms = MonotonicallyIncreasingTimeInMs();
5808 memory_reducer()->NotifyPossibleGarbage(event);
5809 }
5810 }
5811
SetEmbedderHeapTracer(EmbedderHeapTracer * tracer)5812 void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
5813 DCHECK_EQ(gc_state(), HeapState::NOT_IN_GC);
5814 // Setting a tracer is only supported when CppHeap is not used.
5815 DCHECK_IMPLIES(tracer, !cpp_heap_);
5816 local_embedder_heap_tracer()->SetRemoteTracer(tracer);
5817 }
5818
SetEmbedderRootsHandler(EmbedderRootsHandler * handler)5819 void Heap::SetEmbedderRootsHandler(EmbedderRootsHandler* handler) {
5820 embedder_roots_handler_ = handler;
5821 }
5822
GetEmbedderRootsHandler() const5823 EmbedderRootsHandler* Heap::GetEmbedderRootsHandler() const {
5824 return embedder_roots_handler_;
5825 }
5826
GetEmbedderHeapTracer() const5827 EmbedderHeapTracer* Heap::GetEmbedderHeapTracer() const {
5828 return local_embedder_heap_tracer()->remote_tracer();
5829 }
5830
AttachCppHeap(v8::CppHeap * cpp_heap)5831 void Heap::AttachCppHeap(v8::CppHeap* cpp_heap) {
5832 CppHeap::From(cpp_heap)->AttachIsolate(isolate());
5833 cpp_heap_ = cpp_heap;
5834 }
5835
DetachCppHeap()5836 void Heap::DetachCppHeap() {
5837 CppHeap::From(cpp_heap_)->DetachIsolate();
5838 cpp_heap_ = nullptr;
5839 }
5840
flags_for_embedder_tracer() const5841 EmbedderHeapTracer::TraceFlags Heap::flags_for_embedder_tracer() const {
5842 if (is_current_gc_forced()) {
5843 return EmbedderHeapTracer::TraceFlags::kForced;
5844 } else if (ShouldReduceMemory()) {
5845 return EmbedderHeapTracer::TraceFlags::kReduceMemory;
5846 }
5847 return EmbedderHeapTracer::TraceFlags::kNoFlags;
5848 }
5849
overriden_stack_state() const5850 const cppgc::EmbedderStackState* Heap::overriden_stack_state() const {
5851 const auto* cpp_heap = CppHeap::From(cpp_heap_);
5852 return cpp_heap ? cpp_heap->override_stack_state() : nullptr;
5853 }
5854
RegisterExternallyReferencedObject(Address * location)5855 void Heap::RegisterExternallyReferencedObject(Address* location) {
5856 GlobalHandles::MarkTraced(location);
5857 Object object(*location);
5858 if (!object.IsHeapObject()) {
5859 // The embedder is not aware of whether numbers are materialized as heap
5860 // objects are just passed around as Smis.
5861 return;
5862 }
5863 HeapObject heap_object = HeapObject::cast(object);
5864 DCHECK(IsValidHeapObject(this, heap_object));
5865 if (FLAG_incremental_marking_wrappers && incremental_marking()->IsMarking()) {
5866 incremental_marking()->WhiteToGreyAndPush(heap_object);
5867 } else {
5868 DCHECK(mark_compact_collector()->in_use());
5869 mark_compact_collector()->MarkExternallyReferencedObject(heap_object);
5870 }
5871 }
5872
StartTearDown()5873 void Heap::StartTearDown() {
5874 // Finish any ongoing sweeping to avoid stray background tasks still accessing
5875 // the heap during teardown.
5876 CompleteSweepingFull();
5877
5878 memory_allocator()->unmapper()->EnsureUnmappingCompleted();
5879
5880 SetGCState(TEAR_DOWN);
5881
5882 // Background threads may allocate and block until GC is performed. However
5883 // this might never happen when the main thread tries to quit and doesn't
5884 // process the event queue anymore. Avoid this deadlock by allowing all
5885 // allocations after tear down was requested to make sure all background
5886 // threads finish.
5887 collection_barrier_->NotifyShutdownRequested();
5888
5889 #ifdef VERIFY_HEAP
5890 // {StartTearDown} is called fairly early during Isolate teardown, so it's
5891 // a good time to run heap verification (if requested), before starting to
5892 // tear down parts of the Isolate.
5893 if (FLAG_verify_heap) {
5894 SafepointScope scope(this);
5895 Verify();
5896 }
5897 #endif
5898 }
5899
TearDown()5900 void Heap::TearDown() {
5901 DCHECK_EQ(gc_state(), TEAR_DOWN);
5902
5903 if (FLAG_concurrent_marking || FLAG_parallel_marking)
5904 concurrent_marking_->Pause();
5905
5906 // It's too late for Heap::Verify() here, as parts of the Isolate are
5907 // already gone by the time this is called.
5908
5909 UpdateMaximumCommitted();
5910
5911 if (FLAG_verify_predictable || FLAG_fuzzer_gc_analysis) {
5912 PrintAllocationsHash();
5913 }
5914
5915 if (FLAG_fuzzer_gc_analysis) {
5916 if (FLAG_stress_marking > 0) {
5917 PrintMaxMarkingLimitReached();
5918 }
5919 if (FLAG_stress_scavenge > 0) {
5920 PrintMaxNewSpaceSizeReached();
5921 }
5922 }
5923
5924 if (new_space()) {
5925 new_space()->RemoveAllocationObserver(scavenge_task_observer_.get());
5926 }
5927
5928 scavenge_task_observer_.reset();
5929 scavenge_job_.reset();
5930
5931 if (need_to_remove_stress_concurrent_allocation_observer_) {
5932 RemoveAllocationObserversFromAllSpaces(
5933 stress_concurrent_allocation_observer_.get(),
5934 stress_concurrent_allocation_observer_.get());
5935 }
5936 stress_concurrent_allocation_observer_.reset();
5937
5938 if (FLAG_stress_marking > 0) {
5939 RemoveAllocationObserversFromAllSpaces(stress_marking_observer_,
5940 stress_marking_observer_);
5941 delete stress_marking_observer_;
5942 stress_marking_observer_ = nullptr;
5943 }
5944 if (FLAG_stress_scavenge > 0 && new_space()) {
5945 new_space()->RemoveAllocationObserver(stress_scavenge_observer_);
5946 delete stress_scavenge_observer_;
5947 stress_scavenge_observer_ = nullptr;
5948 }
5949
5950 if (mark_compact_collector_) {
5951 mark_compact_collector_->TearDown();
5952 mark_compact_collector_.reset();
5953 }
5954
5955 #ifdef ENABLE_MINOR_MC
5956 if (minor_mark_compact_collector_ != nullptr) {
5957 minor_mark_compact_collector_->TearDown();
5958 delete minor_mark_compact_collector_;
5959 minor_mark_compact_collector_ = nullptr;
5960 }
5961 #endif // ENABLE_MINOR_MC
5962
5963 scavenger_collector_.reset();
5964 array_buffer_sweeper_.reset();
5965 incremental_marking_.reset();
5966 concurrent_marking_.reset();
5967
5968 gc_idle_time_handler_.reset();
5969
5970 memory_measurement_.reset();
5971
5972 if (memory_reducer_ != nullptr) {
5973 memory_reducer_->TearDown();
5974 memory_reducer_.reset();
5975 }
5976
5977 live_object_stats_.reset();
5978 dead_object_stats_.reset();
5979
5980 local_embedder_heap_tracer_.reset();
5981 embedder_roots_handler_ = nullptr;
5982
5983 if (cpp_heap_) {
5984 CppHeap::From(cpp_heap_)->DetachIsolate();
5985 cpp_heap_ = nullptr;
5986 }
5987
5988 external_string_table_.TearDown();
5989
5990 tracer_.reset();
5991
5992 allocation_sites_to_pretenure_.reset();
5993
5994 for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
5995 delete space_[i];
5996 space_[i] = nullptr;
5997 }
5998
5999 isolate()->read_only_heap()->OnHeapTearDown(this);
6000 read_only_space_ = nullptr;
6001
6002 memory_allocator()->TearDown();
6003
6004 StrongRootsEntry* next = nullptr;
6005 for (StrongRootsEntry* current = strong_roots_head_; current;
6006 current = next) {
6007 next = current->next;
6008 delete current;
6009 }
6010 strong_roots_head_ = nullptr;
6011
6012 memory_allocator_.reset();
6013 }
6014
InitSharedSpaces()6015 void Heap::InitSharedSpaces() {
6016 shared_old_space_ = isolate()->shared_isolate()->heap()->old_space();
6017 shared_old_allocator_.reset(
6018 new ConcurrentAllocator(main_thread_local_heap(), shared_old_space_));
6019
6020 shared_map_space_ = isolate()->shared_isolate()->heap()->map_space();
6021 shared_map_allocator_.reset(
6022 new ConcurrentAllocator(main_thread_local_heap(), shared_map_space_));
6023 }
6024
DeinitSharedSpaces()6025 void Heap::DeinitSharedSpaces() {
6026 shared_old_space_ = nullptr;
6027 shared_old_allocator_.reset();
6028
6029 shared_map_space_ = nullptr;
6030 shared_map_allocator_.reset();
6031 }
6032
AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,GCType gc_type,void * data)6033 void Heap::AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
6034 GCType gc_type, void* data) {
6035 DCHECK_NOT_NULL(callback);
6036 DCHECK(gc_prologue_callbacks_.end() ==
6037 std::find(gc_prologue_callbacks_.begin(), gc_prologue_callbacks_.end(),
6038 GCCallbackTuple(callback, gc_type, data)));
6039 gc_prologue_callbacks_.emplace_back(callback, gc_type, data);
6040 }
6041
RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,void * data)6042 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
6043 void* data) {
6044 DCHECK_NOT_NULL(callback);
6045 for (size_t i = 0; i < gc_prologue_callbacks_.size(); i++) {
6046 if (gc_prologue_callbacks_[i].callback == callback &&
6047 gc_prologue_callbacks_[i].data == data) {
6048 gc_prologue_callbacks_[i] = gc_prologue_callbacks_.back();
6049 gc_prologue_callbacks_.pop_back();
6050 return;
6051 }
6052 }
6053 UNREACHABLE();
6054 }
6055
AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,GCType gc_type,void * data)6056 void Heap::AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
6057 GCType gc_type, void* data) {
6058 DCHECK_NOT_NULL(callback);
6059 DCHECK(gc_epilogue_callbacks_.end() ==
6060 std::find(gc_epilogue_callbacks_.begin(), gc_epilogue_callbacks_.end(),
6061 GCCallbackTuple(callback, gc_type, data)));
6062 gc_epilogue_callbacks_.emplace_back(callback, gc_type, data);
6063 }
6064
RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,void * data)6065 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
6066 void* data) {
6067 DCHECK_NOT_NULL(callback);
6068 for (size_t i = 0; i < gc_epilogue_callbacks_.size(); i++) {
6069 if (gc_epilogue_callbacks_[i].callback == callback &&
6070 gc_epilogue_callbacks_[i].data == data) {
6071 gc_epilogue_callbacks_[i] = gc_epilogue_callbacks_.back();
6072 gc_epilogue_callbacks_.pop_back();
6073 return;
6074 }
6075 }
6076 UNREACHABLE();
6077 }
6078
6079 namespace {
CompactWeakArrayList(Heap * heap,Handle<WeakArrayList> array,AllocationType allocation)6080 Handle<WeakArrayList> CompactWeakArrayList(Heap* heap,
6081 Handle<WeakArrayList> array,
6082 AllocationType allocation) {
6083 if (array->length() == 0) {
6084 return array;
6085 }
6086 int new_length = array->CountLiveWeakReferences();
6087 if (new_length == array->length()) {
6088 return array;
6089 }
6090
6091 Handle<WeakArrayList> new_array = WeakArrayList::EnsureSpace(
6092 heap->isolate(),
6093 handle(ReadOnlyRoots(heap).empty_weak_array_list(), heap->isolate()),
6094 new_length, allocation);
6095 // Allocation might have caused GC and turned some of the elements into
6096 // cleared weak heap objects. Count the number of live references again and
6097 // fill in the new array.
6098 int copy_to = 0;
6099 for (int i = 0; i < array->length(); i++) {
6100 MaybeObject element = array->Get(i);
6101 if (element->IsCleared()) continue;
6102 new_array->Set(copy_to++, element);
6103 }
6104 new_array->set_length(copy_to);
6105 return new_array;
6106 }
6107
6108 } // anonymous namespace
6109
CompactWeakArrayLists()6110 void Heap::CompactWeakArrayLists() {
6111 // Find known PrototypeUsers and compact them.
6112 std::vector<Handle<PrototypeInfo>> prototype_infos;
6113 {
6114 HeapObjectIterator iterator(this);
6115 for (HeapObject o = iterator.Next(); !o.is_null(); o = iterator.Next()) {
6116 if (o.IsPrototypeInfo()) {
6117 PrototypeInfo prototype_info = PrototypeInfo::cast(o);
6118 if (prototype_info.prototype_users().IsWeakArrayList()) {
6119 prototype_infos.emplace_back(handle(prototype_info, isolate()));
6120 }
6121 }
6122 }
6123 }
6124 for (auto& prototype_info : prototype_infos) {
6125 Handle<WeakArrayList> array(
6126 WeakArrayList::cast(prototype_info->prototype_users()), isolate());
6127 DCHECK(InOldSpace(*array) ||
6128 *array == ReadOnlyRoots(this).empty_weak_array_list());
6129 WeakArrayList new_array = PrototypeUsers::Compact(
6130 array, this, JSObject::PrototypeRegistryCompactionCallback,
6131 AllocationType::kOld);
6132 prototype_info->set_prototype_users(new_array);
6133 }
6134
6135 // Find known WeakArrayLists and compact them.
6136 Handle<WeakArrayList> scripts(script_list(), isolate());
6137 DCHECK_IMPLIES(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL, InOldSpace(*scripts));
6138 scripts = CompactWeakArrayList(this, scripts, AllocationType::kOld);
6139 set_script_list(*scripts);
6140 }
6141
AddRetainedMap(Handle<NativeContext> context,Handle<Map> map)6142 void Heap::AddRetainedMap(Handle<NativeContext> context, Handle<Map> map) {
6143 if (map->is_in_retained_map_list()) {
6144 return;
6145 }
6146 Handle<WeakArrayList> array(context->retained_maps(), isolate());
6147 if (array->IsFull()) {
6148 CompactRetainedMaps(*array);
6149 }
6150 array =
6151 WeakArrayList::AddToEnd(isolate(), array, MaybeObjectHandle::Weak(map));
6152 array = WeakArrayList::AddToEnd(
6153 isolate(), array,
6154 MaybeObjectHandle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()));
6155 if (*array != context->retained_maps()) {
6156 context->set_retained_maps(*array);
6157 }
6158 map->set_is_in_retained_map_list(true);
6159 }
6160
CompactRetainedMaps(WeakArrayList retained_maps)6161 void Heap::CompactRetainedMaps(WeakArrayList retained_maps) {
6162 int length = retained_maps.length();
6163 int new_length = 0;
6164 // This loop compacts the array by removing cleared weak cells.
6165 for (int i = 0; i < length; i += 2) {
6166 MaybeObject maybe_object = retained_maps.Get(i);
6167 if (maybe_object->IsCleared()) {
6168 continue;
6169 }
6170
6171 DCHECK(maybe_object->IsWeak());
6172
6173 MaybeObject age = retained_maps.Get(i + 1);
6174 DCHECK(age->IsSmi());
6175 if (i != new_length) {
6176 retained_maps.Set(new_length, maybe_object);
6177 retained_maps.Set(new_length + 1, age);
6178 }
6179 new_length += 2;
6180 }
6181 HeapObject undefined = ReadOnlyRoots(this).undefined_value();
6182 for (int i = new_length; i < length; i++) {
6183 retained_maps.Set(i, HeapObjectReference::Strong(undefined));
6184 }
6185 if (new_length != length) retained_maps.set_length(new_length);
6186 }
6187
FatalProcessOutOfMemory(const char * location)6188 void Heap::FatalProcessOutOfMemory(const char* location) {
6189 v8::internal::V8::FatalProcessOutOfMemory(isolate(), location, true);
6190 }
6191
6192 #ifdef DEBUG
6193
6194 class PrintHandleVisitor : public RootVisitor {
6195 public:
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)6196 void VisitRootPointers(Root root, const char* description,
6197 FullObjectSlot start, FullObjectSlot end) override {
6198 for (FullObjectSlot p = start; p < end; ++p)
6199 PrintF(" handle %p to %p\n", p.ToVoidPtr(),
6200 reinterpret_cast<void*>((*p).ptr()));
6201 }
6202 };
6203
PrintHandles()6204 void Heap::PrintHandles() {
6205 PrintF("Handles:\n");
6206 PrintHandleVisitor v;
6207 isolate_->handle_scope_implementer()->Iterate(&v);
6208 }
6209
6210 #endif
6211
6212 class CheckHandleCountVisitor : public RootVisitor {
6213 public:
CheckHandleCountVisitor()6214 CheckHandleCountVisitor() : handle_count_(0) {}
~CheckHandleCountVisitor()6215 ~CheckHandleCountVisitor() override {
6216 CHECK_GT(HandleScope::kCheckHandleThreshold, handle_count_);
6217 }
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)6218 void VisitRootPointers(Root root, const char* description,
6219 FullObjectSlot start, FullObjectSlot end) override {
6220 handle_count_ += end - start;
6221 }
6222
6223 private:
6224 ptrdiff_t handle_count_;
6225 };
6226
CheckHandleCount()6227 void Heap::CheckHandleCount() {
6228 CheckHandleCountVisitor v;
6229 isolate_->handle_scope_implementer()->Iterate(&v);
6230 }
6231
ClearRecordedSlot(HeapObject object,ObjectSlot slot)6232 void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
6233 #ifndef V8_DISABLE_WRITE_BARRIERS
6234 DCHECK(!IsLargeObject(object));
6235 Page* page = Page::FromAddress(slot.address());
6236 if (!page->InYoungGeneration()) {
6237 DCHECK_EQ(page->owner_identity(), OLD_SPACE);
6238
6239 if (!page->SweepingDone()) {
6240 RememberedSet<OLD_TO_NEW>::Remove(page, slot.address());
6241 }
6242 }
6243 #endif
6244 }
6245
6246 // static
InsertIntoRememberedSetFromCode(MemoryChunk * chunk,Address slot)6247 int Heap::InsertIntoRememberedSetFromCode(MemoryChunk* chunk, Address slot) {
6248 RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
6249 return 0;
6250 }
6251
6252 #ifdef DEBUG
VerifyClearedSlot(HeapObject object,ObjectSlot slot)6253 void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
6254 #ifndef V8_DISABLE_WRITE_BARRIERS
6255 DCHECK(!IsLargeObject(object));
6256 if (InYoungGeneration(object)) return;
6257 Page* page = Page::FromAddress(slot.address());
6258 DCHECK_EQ(page->owner_identity(), OLD_SPACE);
6259 // Slots are filtered with invalidated slots.
6260 CHECK_IMPLIES(RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()),
6261 page->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object));
6262 CHECK_IMPLIES(RememberedSet<OLD_TO_OLD>::Contains(page, slot.address()),
6263 page->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
6264 #endif
6265 }
6266
VerifySlotRangeHasNoRecordedSlots(Address start,Address end)6267 void Heap::VerifySlotRangeHasNoRecordedSlots(Address start, Address end) {
6268 #ifndef V8_DISABLE_WRITE_BARRIERS
6269 Page* page = Page::FromAddress(start);
6270 DCHECK(!page->InYoungGeneration());
6271 RememberedSet<OLD_TO_NEW>::CheckNoneInRange(page, start, end);
6272 #endif
6273 }
6274 #endif
6275
ClearRecordedSlotRange(Address start,Address end)6276 void Heap::ClearRecordedSlotRange(Address start, Address end) {
6277 #ifndef V8_DISABLE_WRITE_BARRIERS
6278 Page* page = Page::FromAddress(start);
6279 DCHECK(!page->IsLargePage());
6280 if (!page->InYoungGeneration()) {
6281 DCHECK_EQ(page->owner_identity(), OLD_SPACE);
6282
6283 if (!page->SweepingDone()) {
6284 RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
6285 SlotSet::KEEP_EMPTY_BUCKETS);
6286 }
6287 }
6288 #endif
6289 }
6290
Next()6291 PagedSpace* PagedSpaceIterator::Next() {
6292 int space = counter_++;
6293 switch (space) {
6294 case RO_SPACE:
6295 UNREACHABLE();
6296 case OLD_SPACE:
6297 return heap_->old_space();
6298 case CODE_SPACE:
6299 return heap_->code_space();
6300 case MAP_SPACE:
6301 return heap_->map_space();
6302 default:
6303 DCHECK_GT(space, LAST_GROWABLE_PAGED_SPACE);
6304 return nullptr;
6305 }
6306 }
6307
SpaceIterator(Heap * heap)6308 SpaceIterator::SpaceIterator(Heap* heap)
6309 : heap_(heap), current_space_(FIRST_MUTABLE_SPACE) {}
6310
6311 SpaceIterator::~SpaceIterator() = default;
6312
HasNext()6313 bool SpaceIterator::HasNext() {
6314 while (current_space_ <= LAST_MUTABLE_SPACE) {
6315 Space* space = heap_->space(current_space_);
6316 if (space) return true;
6317 ++current_space_;
6318 }
6319
6320 // No more spaces left.
6321 return false;
6322 }
6323
Next()6324 Space* SpaceIterator::Next() {
6325 DCHECK(HasNext());
6326 Space* space = heap_->space(current_space_++);
6327 DCHECK_NOT_NULL(space);
6328 return space;
6329 }
6330
6331 class HeapObjectsFilter {
6332 public:
6333 virtual ~HeapObjectsFilter() = default;
6334 virtual bool SkipObject(HeapObject object) = 0;
6335 };
6336
6337 class UnreachableObjectsFilter : public HeapObjectsFilter {
6338 public:
UnreachableObjectsFilter(Heap * heap)6339 explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
6340 MarkReachableObjects();
6341 }
6342
~UnreachableObjectsFilter()6343 ~UnreachableObjectsFilter() override {
6344 for (auto it : reachable_) {
6345 delete it.second;
6346 it.second = nullptr;
6347 }
6348 }
6349
SkipObject(HeapObject object)6350 bool SkipObject(HeapObject object) override {
6351 if (object.IsFreeSpaceOrFiller()) return true;
6352 Address chunk = object.ptr() & ~kLogicalChunkAlignmentMask;
6353 if (reachable_.count(chunk) == 0) return true;
6354 return reachable_[chunk]->count(object) == 0;
6355 }
6356
6357 private:
MarkAsReachable(HeapObject object)6358 bool MarkAsReachable(HeapObject object) {
6359 Address chunk = object.ptr() & ~kLogicalChunkAlignmentMask;
6360 if (reachable_.count(chunk) == 0) {
6361 reachable_[chunk] = new std::unordered_set<HeapObject, Object::Hasher>();
6362 }
6363 if (reachable_[chunk]->count(object)) return false;
6364 reachable_[chunk]->insert(object);
6365 return true;
6366 }
6367
6368 static constexpr intptr_t kLogicalChunkAlignment =
6369 (static_cast<uintptr_t>(1) << kPageSizeBits);
6370
6371 static constexpr intptr_t kLogicalChunkAlignmentMask =
6372 kLogicalChunkAlignment - 1;
6373
6374 class MarkingVisitor : public ObjectVisitorWithCageBases, public RootVisitor {
6375 public:
MarkingVisitor(UnreachableObjectsFilter * filter)6376 explicit MarkingVisitor(UnreachableObjectsFilter* filter)
6377 : ObjectVisitorWithCageBases(filter->heap_), filter_(filter) {}
6378
VisitMapPointer(HeapObject object)6379 void VisitMapPointer(HeapObject object) override {
6380 MarkHeapObject(Map::unchecked_cast(object.map()));
6381 }
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)6382 void VisitPointers(HeapObject host, ObjectSlot start,
6383 ObjectSlot end) override {
6384 MarkPointers(MaybeObjectSlot(start), MaybeObjectSlot(end));
6385 }
6386
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)6387 void VisitPointers(HeapObject host, MaybeObjectSlot start,
6388 MaybeObjectSlot end) final {
6389 MarkPointers(start, end);
6390 }
6391
VisitCodePointer(HeapObject host,CodeObjectSlot slot)6392 void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
6393 CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
6394 HeapObject code = HeapObject::unchecked_cast(slot.load(code_cage_base()));
6395 MarkHeapObject(code);
6396 }
6397
VisitCodeTarget(Code host,RelocInfo * rinfo)6398 void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
6399 Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
6400 MarkHeapObject(target);
6401 }
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)6402 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
6403 MarkHeapObject(rinfo->target_object_no_host(cage_base()));
6404 }
6405
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)6406 void VisitRootPointers(Root root, const char* description,
6407 FullObjectSlot start, FullObjectSlot end) override {
6408 MarkPointersImpl(start, end);
6409 }
VisitRootPointers(Root root,const char * description,OffHeapObjectSlot start,OffHeapObjectSlot end)6410 void VisitRootPointers(Root root, const char* description,
6411 OffHeapObjectSlot start,
6412 OffHeapObjectSlot end) override {
6413 MarkPointersImpl(start, end);
6414 }
6415
TransitiveClosure()6416 void TransitiveClosure() {
6417 while (!marking_stack_.empty()) {
6418 HeapObject obj = marking_stack_.back();
6419 marking_stack_.pop_back();
6420 obj.Iterate(this);
6421 }
6422 }
6423
6424 private:
MarkPointers(MaybeObjectSlot start,MaybeObjectSlot end)6425 void MarkPointers(MaybeObjectSlot start, MaybeObjectSlot end) {
6426 MarkPointersImpl(start, end);
6427 }
6428
6429 template <typename TSlot>
MarkPointersImpl(TSlot start,TSlot end)6430 V8_INLINE void MarkPointersImpl(TSlot start, TSlot end) {
6431 // Treat weak references as strong.
6432 for (TSlot p = start; p < end; ++p) {
6433 typename TSlot::TObject object = p.load(cage_base());
6434 HeapObject heap_object;
6435 if (object.GetHeapObject(&heap_object)) {
6436 MarkHeapObject(heap_object);
6437 }
6438 }
6439 }
6440
MarkHeapObject(HeapObject heap_object)6441 V8_INLINE void MarkHeapObject(HeapObject heap_object) {
6442 if (filter_->MarkAsReachable(heap_object)) {
6443 marking_stack_.push_back(heap_object);
6444 }
6445 }
6446
6447 UnreachableObjectsFilter* filter_;
6448 std::vector<HeapObject> marking_stack_;
6449 };
6450
6451 friend class MarkingVisitor;
6452
MarkReachableObjects()6453 void MarkReachableObjects() {
6454 MarkingVisitor visitor(this);
6455 heap_->IterateRoots(&visitor, {});
6456 visitor.TransitiveClosure();
6457 }
6458
6459 Heap* heap_;
6460 DISALLOW_GARBAGE_COLLECTION(no_gc_)
6461 std::unordered_map<Address, std::unordered_set<HeapObject, Object::Hasher>*>
6462 reachable_;
6463 };
6464
HeapObjectIterator(Heap * heap,HeapObjectIterator::HeapObjectsFiltering filtering)6465 HeapObjectIterator::HeapObjectIterator(
6466 Heap* heap, HeapObjectIterator::HeapObjectsFiltering filtering)
6467 : heap_(heap),
6468 safepoint_scope_(std::make_unique<SafepointScope>(heap)),
6469 filtering_(filtering),
6470 filter_(nullptr),
6471 space_iterator_(nullptr),
6472 object_iterator_(nullptr) {
6473 heap_->MakeHeapIterable();
6474 // Start the iteration.
6475 space_iterator_ = new SpaceIterator(heap_);
6476 switch (filtering_) {
6477 case kFilterUnreachable:
6478 filter_ = new UnreachableObjectsFilter(heap_);
6479 break;
6480 default:
6481 break;
6482 }
6483 object_iterator_ = space_iterator_->Next()->GetObjectIterator(heap_);
6484 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) heap_->tp_heap_->ResetIterator();
6485 }
6486
~HeapObjectIterator()6487 HeapObjectIterator::~HeapObjectIterator() {
6488 #ifdef DEBUG
6489 // Assert that in filtering mode we have iterated through all
6490 // objects. Otherwise, heap will be left in an inconsistent state.
6491 if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL && filtering_ != kNoFiltering) {
6492 DCHECK_NULL(object_iterator_);
6493 }
6494 #endif
6495 delete space_iterator_;
6496 delete filter_;
6497 }
6498
Next()6499 HeapObject HeapObjectIterator::Next() {
6500 if (filter_ == nullptr) return NextObject();
6501
6502 HeapObject obj = NextObject();
6503 while (!obj.is_null() && (filter_->SkipObject(obj))) obj = NextObject();
6504 return obj;
6505 }
6506
NextObject()6507 HeapObject HeapObjectIterator::NextObject() {
6508 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return heap_->tp_heap_->NextObject();
6509 // No iterator means we are done.
6510 if (object_iterator_.get() == nullptr) return HeapObject();
6511
6512 HeapObject obj = object_iterator_.get()->Next();
6513 if (!obj.is_null()) {
6514 // If the current iterator has more objects we are fine.
6515 return obj;
6516 } else {
6517 // Go though the spaces looking for one that has objects.
6518 while (space_iterator_->HasNext()) {
6519 object_iterator_ = space_iterator_->Next()->GetObjectIterator(heap_);
6520 obj = object_iterator_.get()->Next();
6521 if (!obj.is_null()) {
6522 return obj;
6523 }
6524 }
6525 }
6526 // Done with the last space.
6527 object_iterator_.reset(nullptr);
6528 return HeapObject();
6529 }
6530
UpdateTotalGCTime(double duration)6531 void Heap::UpdateTotalGCTime(double duration) {
6532 if (FLAG_trace_gc_verbose) {
6533 total_gc_time_ms_ += duration;
6534 }
6535 }
6536
CleanUpYoung()6537 void Heap::ExternalStringTable::CleanUpYoung() {
6538 int last = 0;
6539 Isolate* isolate = heap_->isolate();
6540 for (size_t i = 0; i < young_strings_.size(); ++i) {
6541 Object o = young_strings_[i];
6542 if (o.IsTheHole(isolate)) {
6543 continue;
6544 }
6545 // The real external string is already in one of these vectors and was or
6546 // will be processed. Re-processing it will add a duplicate to the vector.
6547 if (o.IsThinString()) continue;
6548 DCHECK(o.IsExternalString());
6549 if (InYoungGeneration(o)) {
6550 young_strings_[last++] = o;
6551 } else {
6552 old_strings_.push_back(o);
6553 }
6554 }
6555 young_strings_.resize(last);
6556 }
6557
CleanUpAll()6558 void Heap::ExternalStringTable::CleanUpAll() {
6559 CleanUpYoung();
6560 int last = 0;
6561 Isolate* isolate = heap_->isolate();
6562 for (size_t i = 0; i < old_strings_.size(); ++i) {
6563 Object o = old_strings_[i];
6564 if (o.IsTheHole(isolate)) {
6565 continue;
6566 }
6567 // The real external string is already in one of these vectors and was or
6568 // will be processed. Re-processing it will add a duplicate to the vector.
6569 if (o.IsThinString()) continue;
6570 DCHECK(o.IsExternalString());
6571 DCHECK(!InYoungGeneration(o));
6572 old_strings_[last++] = o;
6573 }
6574 old_strings_.resize(last);
6575 #ifdef VERIFY_HEAP
6576 if (FLAG_verify_heap && !FLAG_enable_third_party_heap) {
6577 Verify();
6578 }
6579 #endif
6580 }
6581
TearDown()6582 void Heap::ExternalStringTable::TearDown() {
6583 for (size_t i = 0; i < young_strings_.size(); ++i) {
6584 Object o = young_strings_[i];
6585 // Dont finalize thin strings.
6586 if (o.IsThinString()) continue;
6587 heap_->FinalizeExternalString(ExternalString::cast(o));
6588 }
6589 young_strings_.clear();
6590 for (size_t i = 0; i < old_strings_.size(); ++i) {
6591 Object o = old_strings_[i];
6592 // Dont finalize thin strings.
6593 if (o.IsThinString()) continue;
6594 heap_->FinalizeExternalString(ExternalString::cast(o));
6595 }
6596 old_strings_.clear();
6597 }
6598
RememberUnmappedPage(Address page,bool compacted)6599 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6600 // Tag the page pointer to make it findable in the dump file.
6601 if (compacted) {
6602 page ^= 0xC1EAD & (Page::kPageSize - 1); // Cleared.
6603 } else {
6604 page ^= 0x1D1ED & (Page::kPageSize - 1); // I died.
6605 }
6606 remembered_unmapped_pages_[remembered_unmapped_pages_index_] = page;
6607 remembered_unmapped_pages_index_++;
6608 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
6609 }
6610
YoungArrayBufferBytes()6611 size_t Heap::YoungArrayBufferBytes() {
6612 return array_buffer_sweeper()->YoungBytes();
6613 }
6614
OldArrayBufferBytes()6615 size_t Heap::OldArrayBufferBytes() {
6616 return array_buffer_sweeper()->OldBytes();
6617 }
6618
RegisterStrongRoots(const char * label,FullObjectSlot start,FullObjectSlot end)6619 StrongRootsEntry* Heap::RegisterStrongRoots(const char* label,
6620 FullObjectSlot start,
6621 FullObjectSlot end) {
6622 base::MutexGuard guard(&strong_roots_mutex_);
6623
6624 StrongRootsEntry* entry = new StrongRootsEntry(label);
6625 entry->start = start;
6626 entry->end = end;
6627 entry->prev = nullptr;
6628 entry->next = strong_roots_head_;
6629
6630 if (strong_roots_head_) {
6631 DCHECK_NULL(strong_roots_head_->prev);
6632 strong_roots_head_->prev = entry;
6633 }
6634 strong_roots_head_ = entry;
6635
6636 return entry;
6637 }
6638
UpdateStrongRoots(StrongRootsEntry * entry,FullObjectSlot start,FullObjectSlot end)6639 void Heap::UpdateStrongRoots(StrongRootsEntry* entry, FullObjectSlot start,
6640 FullObjectSlot end) {
6641 entry->start = start;
6642 entry->end = end;
6643 }
6644
UnregisterStrongRoots(StrongRootsEntry * entry)6645 void Heap::UnregisterStrongRoots(StrongRootsEntry* entry) {
6646 base::MutexGuard guard(&strong_roots_mutex_);
6647
6648 StrongRootsEntry* prev = entry->prev;
6649 StrongRootsEntry* next = entry->next;
6650
6651 if (prev) prev->next = next;
6652 if (next) next->prev = prev;
6653
6654 if (strong_roots_head_ == entry) {
6655 DCHECK_NULL(prev);
6656 strong_roots_head_ = next;
6657 }
6658
6659 delete entry;
6660 }
6661
SetBuiltinsConstantsTable(FixedArray cache)6662 void Heap::SetBuiltinsConstantsTable(FixedArray cache) {
6663 set_builtins_constants_table(cache);
6664 }
6665
SetDetachedContexts(WeakArrayList detached_contexts)6666 void Heap::SetDetachedContexts(WeakArrayList detached_contexts) {
6667 set_detached_contexts(detached_contexts);
6668 }
6669
SetInterpreterEntryTrampolineForProfiling(Code code)6670 void Heap::SetInterpreterEntryTrampolineForProfiling(Code code) {
6671 DCHECK_EQ(Builtin::kInterpreterEntryTrampoline, code.builtin_id());
6672 set_interpreter_entry_trampoline_for_profiling(code);
6673 }
6674
PostFinalizationRegistryCleanupTaskIfNeeded()6675 void Heap::PostFinalizationRegistryCleanupTaskIfNeeded() {
6676 // Only one cleanup task is posted at a time.
6677 if (!HasDirtyJSFinalizationRegistries() ||
6678 is_finalization_registry_cleanup_task_posted_) {
6679 return;
6680 }
6681 auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
6682 reinterpret_cast<v8::Isolate*>(isolate()));
6683 auto task = std::make_unique<FinalizationRegistryCleanupTask>(this);
6684 taskrunner->PostNonNestableTask(std::move(task));
6685 is_finalization_registry_cleanup_task_posted_ = true;
6686 }
6687
EnqueueDirtyJSFinalizationRegistry(JSFinalizationRegistry finalization_registry,std::function<void (HeapObject object,ObjectSlot slot,Object target)> gc_notify_updated_slot)6688 void Heap::EnqueueDirtyJSFinalizationRegistry(
6689 JSFinalizationRegistry finalization_registry,
6690 std::function<void(HeapObject object, ObjectSlot slot, Object target)>
6691 gc_notify_updated_slot) {
6692 // Add a FinalizationRegistry to the tail of the dirty list.
6693 DCHECK(!HasDirtyJSFinalizationRegistries() ||
6694 dirty_js_finalization_registries_list().IsJSFinalizationRegistry());
6695 DCHECK(finalization_registry.next_dirty().IsUndefined(isolate()));
6696 DCHECK(!finalization_registry.scheduled_for_cleanup());
6697 finalization_registry.set_scheduled_for_cleanup(true);
6698 if (dirty_js_finalization_registries_list_tail().IsUndefined(isolate())) {
6699 DCHECK(dirty_js_finalization_registries_list().IsUndefined(isolate()));
6700 set_dirty_js_finalization_registries_list(finalization_registry);
6701 // dirty_js_finalization_registries_list_ is rescanned by
6702 // ProcessWeakListRoots.
6703 } else {
6704 JSFinalizationRegistry tail = JSFinalizationRegistry::cast(
6705 dirty_js_finalization_registries_list_tail());
6706 tail.set_next_dirty(finalization_registry);
6707 gc_notify_updated_slot(
6708 tail, tail.RawField(JSFinalizationRegistry::kNextDirtyOffset),
6709 finalization_registry);
6710 }
6711 set_dirty_js_finalization_registries_list_tail(finalization_registry);
6712 // dirty_js_finalization_registries_list_tail_ is rescanned by
6713 // ProcessWeakListRoots.
6714 }
6715
DequeueDirtyJSFinalizationRegistry()6716 MaybeHandle<JSFinalizationRegistry> Heap::DequeueDirtyJSFinalizationRegistry() {
6717 // Take a FinalizationRegistry from the head of the dirty list for fairness.
6718 if (HasDirtyJSFinalizationRegistries()) {
6719 Handle<JSFinalizationRegistry> head(
6720 JSFinalizationRegistry::cast(dirty_js_finalization_registries_list()),
6721 isolate());
6722 set_dirty_js_finalization_registries_list(head->next_dirty());
6723 head->set_next_dirty(ReadOnlyRoots(this).undefined_value());
6724 if (*head == dirty_js_finalization_registries_list_tail()) {
6725 set_dirty_js_finalization_registries_list_tail(
6726 ReadOnlyRoots(this).undefined_value());
6727 }
6728 return head;
6729 }
6730 return {};
6731 }
6732
RemoveDirtyFinalizationRegistriesOnContext(NativeContext context)6733 void Heap::RemoveDirtyFinalizationRegistriesOnContext(NativeContext context) {
6734 DisallowGarbageCollection no_gc;
6735
6736 Isolate* isolate = this->isolate();
6737 Object prev = ReadOnlyRoots(isolate).undefined_value();
6738 Object current = dirty_js_finalization_registries_list();
6739 while (!current.IsUndefined(isolate)) {
6740 JSFinalizationRegistry finalization_registry =
6741 JSFinalizationRegistry::cast(current);
6742 if (finalization_registry.native_context() == context) {
6743 if (prev.IsUndefined(isolate)) {
6744 set_dirty_js_finalization_registries_list(
6745 finalization_registry.next_dirty());
6746 } else {
6747 JSFinalizationRegistry::cast(prev).set_next_dirty(
6748 finalization_registry.next_dirty());
6749 }
6750 finalization_registry.set_scheduled_for_cleanup(false);
6751 current = finalization_registry.next_dirty();
6752 finalization_registry.set_next_dirty(
6753 ReadOnlyRoots(isolate).undefined_value());
6754 } else {
6755 prev = current;
6756 current = finalization_registry.next_dirty();
6757 }
6758 }
6759 set_dirty_js_finalization_registries_list_tail(prev);
6760 }
6761
KeepDuringJob(Handle<JSReceiver> target)6762 void Heap::KeepDuringJob(Handle<JSReceiver> target) {
6763 DCHECK(weak_refs_keep_during_job().IsUndefined() ||
6764 weak_refs_keep_during_job().IsOrderedHashSet());
6765 Handle<OrderedHashSet> table;
6766 if (weak_refs_keep_during_job().IsUndefined(isolate())) {
6767 table = isolate()->factory()->NewOrderedHashSet();
6768 } else {
6769 table =
6770 handle(OrderedHashSet::cast(weak_refs_keep_during_job()), isolate());
6771 }
6772 table = OrderedHashSet::Add(isolate(), table, target).ToHandleChecked();
6773 set_weak_refs_keep_during_job(*table);
6774 }
6775
ClearKeptObjects()6776 void Heap::ClearKeptObjects() {
6777 set_weak_refs_keep_during_job(ReadOnlyRoots(isolate()).undefined_value());
6778 }
6779
NumberOfTrackedHeapObjectTypes()6780 size_t Heap::NumberOfTrackedHeapObjectTypes() {
6781 return ObjectStats::OBJECT_STATS_COUNT;
6782 }
6783
ObjectCountAtLastGC(size_t index)6784 size_t Heap::ObjectCountAtLastGC(size_t index) {
6785 if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
6786 return 0;
6787 return live_object_stats_->object_count_last_gc(index);
6788 }
6789
ObjectSizeAtLastGC(size_t index)6790 size_t Heap::ObjectSizeAtLastGC(size_t index) {
6791 if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
6792 return 0;
6793 return live_object_stats_->object_size_last_gc(index);
6794 }
6795
GetObjectTypeName(size_t index,const char ** object_type,const char ** object_sub_type)6796 bool Heap::GetObjectTypeName(size_t index, const char** object_type,
6797 const char** object_sub_type) {
6798 if (index >= ObjectStats::OBJECT_STATS_COUNT) return false;
6799
6800 switch (static_cast<int>(index)) {
6801 #define COMPARE_AND_RETURN_NAME(name) \
6802 case name: \
6803 *object_type = #name; \
6804 *object_sub_type = ""; \
6805 return true;
6806 INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
6807 #undef COMPARE_AND_RETURN_NAME
6808
6809 #define COMPARE_AND_RETURN_NAME(name) \
6810 case ObjectStats::FIRST_VIRTUAL_TYPE + ObjectStats::name: \
6811 *object_type = #name; \
6812 *object_sub_type = ""; \
6813 return true;
6814 VIRTUAL_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
6815 #undef COMPARE_AND_RETURN_NAME
6816 }
6817 return false;
6818 }
6819
NumberOfNativeContexts()6820 size_t Heap::NumberOfNativeContexts() {
6821 int result = 0;
6822 Object context = native_contexts_list();
6823 while (!context.IsUndefined(isolate())) {
6824 ++result;
6825 Context native_context = Context::cast(context);
6826 context = native_context.next_context_link();
6827 }
6828 return result;
6829 }
6830
FindAllNativeContexts()6831 std::vector<Handle<NativeContext>> Heap::FindAllNativeContexts() {
6832 std::vector<Handle<NativeContext>> result;
6833 Object context = native_contexts_list();
6834 while (!context.IsUndefined(isolate())) {
6835 NativeContext native_context = NativeContext::cast(context);
6836 result.push_back(handle(native_context, isolate()));
6837 context = native_context.next_context_link();
6838 }
6839 return result;
6840 }
6841
FindAllRetainedMaps()6842 std::vector<WeakArrayList> Heap::FindAllRetainedMaps() {
6843 std::vector<WeakArrayList> result;
6844 Object context = native_contexts_list();
6845 while (!context.IsUndefined(isolate())) {
6846 NativeContext native_context = NativeContext::cast(context);
6847 result.push_back(native_context.retained_maps());
6848 context = native_context.next_context_link();
6849 }
6850 return result;
6851 }
6852
NumberOfDetachedContexts()6853 size_t Heap::NumberOfDetachedContexts() {
6854 // The detached_contexts() array has two entries per detached context.
6855 return detached_contexts().length() / 2;
6856 }
6857
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)6858 void VerifyPointersVisitor::VisitPointers(HeapObject host, ObjectSlot start,
6859 ObjectSlot end) {
6860 VerifyPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
6861 }
6862
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)6863 void VerifyPointersVisitor::VisitPointers(HeapObject host,
6864 MaybeObjectSlot start,
6865 MaybeObjectSlot end) {
6866 VerifyPointers(host, start, end);
6867 }
6868
VisitCodePointer(HeapObject host,CodeObjectSlot slot)6869 void VerifyPointersVisitor::VisitCodePointer(HeapObject host,
6870 CodeObjectSlot slot) {
6871 CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
6872 Object maybe_code = slot.load(code_cage_base());
6873 HeapObject code;
6874 if (maybe_code.GetHeapObject(&code)) {
6875 VerifyCodeObjectImpl(code);
6876 } else {
6877 CHECK(maybe_code.IsSmi());
6878 }
6879 }
6880
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)6881 void VerifyPointersVisitor::VisitRootPointers(Root root,
6882 const char* description,
6883 FullObjectSlot start,
6884 FullObjectSlot end) {
6885 VerifyPointersImpl(start, end);
6886 }
6887
VisitRootPointers(Root root,const char * description,OffHeapObjectSlot start,OffHeapObjectSlot end)6888 void VerifyPointersVisitor::VisitRootPointers(Root root,
6889 const char* description,
6890 OffHeapObjectSlot start,
6891 OffHeapObjectSlot end) {
6892 VerifyPointersImpl(start, end);
6893 }
6894
VerifyHeapObjectImpl(HeapObject heap_object)6895 void VerifyPointersVisitor::VerifyHeapObjectImpl(HeapObject heap_object) {
6896 CHECK(IsValidHeapObject(heap_, heap_object));
6897 CHECK(heap_object.map(cage_base()).IsMap());
6898 }
6899
VerifyCodeObjectImpl(HeapObject heap_object)6900 void VerifyPointersVisitor::VerifyCodeObjectImpl(HeapObject heap_object) {
6901 CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
6902 CHECK(IsValidCodeObject(heap_, heap_object));
6903 CHECK(heap_object.map(cage_base()).IsMap());
6904 CHECK(heap_object.map(cage_base()).instance_type() == CODE_TYPE);
6905 }
6906
6907 template <typename TSlot>
VerifyPointersImpl(TSlot start,TSlot end)6908 void VerifyPointersVisitor::VerifyPointersImpl(TSlot start, TSlot end) {
6909 for (TSlot slot = start; slot < end; ++slot) {
6910 typename TSlot::TObject object = slot.load(cage_base());
6911 HeapObject heap_object;
6912 if (object.GetHeapObject(&heap_object)) {
6913 VerifyHeapObjectImpl(heap_object);
6914 } else {
6915 CHECK(object.IsSmi() || object.IsCleared() ||
6916 MapWord::IsPacked(object.ptr()));
6917 }
6918 }
6919 }
6920
VerifyPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)6921 void VerifyPointersVisitor::VerifyPointers(HeapObject host,
6922 MaybeObjectSlot start,
6923 MaybeObjectSlot end) {
6924 // If this DCHECK fires then you probably added a pointer field
6925 // to one of objects in DATA_ONLY_VISITOR_ID_LIST. You can fix
6926 // this by moving that object to POINTER_VISITOR_ID_LIST.
6927 DCHECK_EQ(ObjectFields::kMaybePointers,
6928 Map::ObjectFieldsFrom(host.map().visitor_id()));
6929 VerifyPointersImpl(start, end);
6930 }
6931
VisitCodeTarget(Code host,RelocInfo * rinfo)6932 void VerifyPointersVisitor::VisitCodeTarget(Code host, RelocInfo* rinfo) {
6933 Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
6934 VerifyHeapObjectImpl(target);
6935 }
6936
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)6937 void VerifyPointersVisitor::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
6938 VerifyHeapObjectImpl(rinfo->target_object_no_host(cage_base()));
6939 }
6940
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)6941 void VerifySmisVisitor::VisitRootPointers(Root root, const char* description,
6942 FullObjectSlot start,
6943 FullObjectSlot end) {
6944 for (FullObjectSlot current = start; current < end; ++current) {
6945 CHECK((*current).IsSmi());
6946 }
6947 }
6948
AllowedToBeMigrated(Map map,HeapObject obj,AllocationSpace dst)6949 bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) {
6950 // Object migration is governed by the following rules:
6951 //
6952 // 1) Objects in new-space can be migrated to the old space
6953 // that matches their target space or they stay in new-space.
6954 // 2) Objects in old-space stay in the same space when migrating.
6955 // 3) Fillers (two or more words) can migrate due to left-trimming of
6956 // fixed arrays in new-space or old space.
6957 // 4) Fillers (one word) can never migrate, they are skipped by
6958 // incremental marking explicitly to prevent invalid pattern.
6959 //
6960 // Since this function is used for debugging only, we do not place
6961 // asserts here, but check everything explicitly.
6962 if (map == ReadOnlyRoots(this).one_pointer_filler_map()) return false;
6963 InstanceType type = map.instance_type();
6964 MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
6965 AllocationSpace src = chunk->owner_identity();
6966 switch (src) {
6967 case NEW_SPACE:
6968 return dst == NEW_SPACE || dst == OLD_SPACE;
6969 case OLD_SPACE:
6970 return dst == OLD_SPACE;
6971 case CODE_SPACE:
6972 return dst == CODE_SPACE && type == CODE_TYPE;
6973 case MAP_SPACE:
6974 case LO_SPACE:
6975 case CODE_LO_SPACE:
6976 case NEW_LO_SPACE:
6977 case RO_SPACE:
6978 return false;
6979 }
6980 UNREACHABLE();
6981 }
6982
EmbedderAllocationCounter() const6983 size_t Heap::EmbedderAllocationCounter() const {
6984 return local_embedder_heap_tracer()
6985 ? local_embedder_heap_tracer()->allocated_size()
6986 : 0;
6987 }
6988
CreateObjectStats()6989 void Heap::CreateObjectStats() {
6990 if (V8_LIKELY(!TracingFlags::is_gc_stats_enabled())) return;
6991 if (!live_object_stats_) {
6992 live_object_stats_.reset(new ObjectStats(this));
6993 }
6994 if (!dead_object_stats_) {
6995 dead_object_stats_.reset(new ObjectStats(this));
6996 }
6997 }
6998
GcSafeMapOfCodeSpaceObject(HeapObject object)6999 Map Heap::GcSafeMapOfCodeSpaceObject(HeapObject object) {
7000 PtrComprCageBase cage_base(isolate());
7001 MapWord map_word = object.map_word(cage_base, kRelaxedLoad);
7002 if (map_word.IsForwardingAddress()) {
7003 #if V8_EXTERNAL_CODE_SPACE
7004 PtrComprCageBase code_cage_base(isolate()->code_cage_base());
7005 #else
7006 PtrComprCageBase code_cage_base = cage_base;
7007 #endif
7008 return map_word.ToForwardingAddress(code_cage_base).map(cage_base);
7009 }
7010 return map_word.ToMap();
7011 }
7012
GcSafeCastToCode(HeapObject object,Address inner_pointer)7013 Code Heap::GcSafeCastToCode(HeapObject object, Address inner_pointer) {
7014 Code code = Code::unchecked_cast(object);
7015 DCHECK(!code.is_null());
7016 DCHECK(GcSafeCodeContains(code, inner_pointer));
7017 return code;
7018 }
7019
GcSafeCodeContains(Code code,Address addr)7020 bool Heap::GcSafeCodeContains(Code code, Address addr) {
7021 Map map = GcSafeMapOfCodeSpaceObject(code);
7022 DCHECK(map == ReadOnlyRoots(this).code_map());
7023 Builtin maybe_builtin = InstructionStream::TryLookupCode(isolate(), addr);
7024 if (Builtins::IsBuiltinId(maybe_builtin) &&
7025 code.builtin_id() == maybe_builtin) {
7026 return true;
7027 }
7028 Address start = code.address();
7029 Address end = code.address() + code.SizeFromMap(map);
7030 return start <= addr && addr < end;
7031 }
7032
GcSafeFindCodeForInnerPointer(Address inner_pointer)7033 Code Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
7034 Builtin maybe_builtin =
7035 InstructionStream::TryLookupCode(isolate(), inner_pointer);
7036 if (Builtins::IsBuiltinId(maybe_builtin)) {
7037 return builtin(maybe_builtin);
7038 }
7039
7040 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
7041 Address start = tp_heap_->GetObjectFromInnerPointer(inner_pointer);
7042 return GcSafeCastToCode(HeapObject::FromAddress(start), inner_pointer);
7043 }
7044
7045 // Check if the inner pointer points into a large object chunk.
7046 LargePage* large_page = code_lo_space()->FindPage(inner_pointer);
7047 if (large_page != nullptr) {
7048 return GcSafeCastToCode(large_page->GetObject(), inner_pointer);
7049 }
7050
7051 if (V8_LIKELY(code_space()->Contains(inner_pointer))) {
7052 // Iterate through the page until we reach the end or find an object
7053 // starting after the inner pointer.
7054 Page* page = Page::FromAddress(inner_pointer);
7055
7056 Address start =
7057 page->GetCodeObjectRegistry()->GetCodeObjectStartFromInnerAddress(
7058 inner_pointer);
7059 return GcSafeCastToCode(HeapObject::FromAddress(start), inner_pointer);
7060 }
7061
7062 // It can only fall through to here during debugging, where for instance "jco"
7063 // was called on an address within a RO_SPACE builtin. It cannot reach here
7064 // during stack iteration as RO_SPACE memory is not executable so cannot
7065 // appear on the stack as an instruction address.
7066 DCHECK(ReadOnlyHeap::Contains(
7067 HeapObject::FromAddress(inner_pointer & ~kHeapObjectTagMask)));
7068
7069 // TODO(delphick): Possibly optimize this as it iterates over all pages in
7070 // RO_SPACE instead of just the one containing the address.
7071 ReadOnlyHeapObjectIterator iterator(isolate()->read_only_heap());
7072 for (HeapObject object = iterator.Next(); !object.is_null();
7073 object = iterator.Next()) {
7074 if (!object.IsCode()) continue;
7075 Code code = Code::cast(object);
7076 if (inner_pointer >= code.address() &&
7077 inner_pointer < code.address() + code.Size()) {
7078 return code;
7079 }
7080 }
7081 UNREACHABLE();
7082 }
7083
WriteBarrierForCodeSlow(Code code)7084 void Heap::WriteBarrierForCodeSlow(Code code) {
7085 for (RelocIterator it(code, RelocInfo::EmbeddedObjectModeMask()); !it.done();
7086 it.next()) {
7087 GenerationalBarrierForCode(code, it.rinfo(), it.rinfo()->target_object());
7088 WriteBarrier::Marking(code, it.rinfo(), it.rinfo()->target_object());
7089 }
7090 }
7091
GenerationalBarrierSlow(HeapObject object,Address slot,HeapObject value)7092 void Heap::GenerationalBarrierSlow(HeapObject object, Address slot,
7093 HeapObject value) {
7094 MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
7095 RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
7096 }
7097
RecordEphemeronKeyWrite(EphemeronHashTable table,Address slot)7098 void Heap::RecordEphemeronKeyWrite(EphemeronHashTable table, Address slot) {
7099 DCHECK(ObjectInYoungGeneration(HeapObjectSlot(slot).ToHeapObject()));
7100 if (FLAG_minor_mc) {
7101 // Minor MC lacks support for specialized generational ephemeron barriers.
7102 // The regular write barrier works as well but keeps more memory alive.
7103 MemoryChunk* chunk = MemoryChunk::FromHeapObject(table);
7104 RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
7105 } else {
7106 int slot_index = EphemeronHashTable::SlotToIndex(table.address(), slot);
7107 InternalIndex entry = EphemeronHashTable::IndexToEntry(slot_index);
7108 auto it =
7109 ephemeron_remembered_set_.insert({table, std::unordered_set<int>()});
7110 it.first->second.insert(entry.as_int());
7111 }
7112 }
7113
EphemeronKeyWriteBarrierFromCode(Address raw_object,Address key_slot_address,Isolate * isolate)7114 void Heap::EphemeronKeyWriteBarrierFromCode(Address raw_object,
7115 Address key_slot_address,
7116 Isolate* isolate) {
7117 EphemeronHashTable table = EphemeronHashTable::cast(Object(raw_object));
7118 MaybeObjectSlot key_slot(key_slot_address);
7119 MaybeObject maybe_key = *key_slot;
7120 HeapObject key;
7121 if (!maybe_key.GetHeapObject(&key)) return;
7122 if (!ObjectInYoungGeneration(table) && ObjectInYoungGeneration(key)) {
7123 isolate->heap()->RecordEphemeronKeyWrite(table, key_slot_address);
7124 }
7125 WriteBarrier::Marking(table, key_slot, maybe_key);
7126 }
7127
7128 enum RangeWriteBarrierMode {
7129 kDoGenerational = 1 << 0,
7130 kDoMarking = 1 << 1,
7131 kDoEvacuationSlotRecording = 1 << 2,
7132 };
7133
7134 template <int kModeMask, typename TSlot>
WriteBarrierForRangeImpl(MemoryChunk * source_page,HeapObject object,TSlot start_slot,TSlot end_slot)7135 void Heap::WriteBarrierForRangeImpl(MemoryChunk* source_page, HeapObject object,
7136 TSlot start_slot, TSlot end_slot) {
7137 // At least one of generational or marking write barrier should be requested.
7138 STATIC_ASSERT(kModeMask & (kDoGenerational | kDoMarking));
7139 // kDoEvacuationSlotRecording implies kDoMarking.
7140 STATIC_ASSERT(!(kModeMask & kDoEvacuationSlotRecording) ||
7141 (kModeMask & kDoMarking));
7142
7143 MarkingBarrier* marking_barrier = WriteBarrier::CurrentMarkingBarrier(this);
7144 MarkCompactCollector* collector = this->mark_compact_collector();
7145
7146 for (TSlot slot = start_slot; slot < end_slot; ++slot) {
7147 typename TSlot::TObject value = *slot;
7148 HeapObject value_heap_object;
7149 if (!value.GetHeapObject(&value_heap_object)) continue;
7150
7151 if ((kModeMask & kDoGenerational) &&
7152 Heap::InYoungGeneration(value_heap_object)) {
7153 RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(source_page,
7154 slot.address());
7155 }
7156
7157 if ((kModeMask & kDoMarking) &&
7158 marking_barrier->MarkValue(object, value_heap_object)) {
7159 if (kModeMask & kDoEvacuationSlotRecording) {
7160 collector->RecordSlot(source_page, HeapObjectSlot(slot),
7161 value_heap_object);
7162 }
7163 }
7164 }
7165 }
7166
7167 // Instantiate Heap::WriteBarrierForRange() for ObjectSlot and MaybeObjectSlot.
7168 template void Heap::WriteBarrierForRange<ObjectSlot>(HeapObject object,
7169 ObjectSlot start_slot,
7170 ObjectSlot end_slot);
7171 template void Heap::WriteBarrierForRange<MaybeObjectSlot>(
7172 HeapObject object, MaybeObjectSlot start_slot, MaybeObjectSlot end_slot);
7173
7174 template <typename TSlot>
WriteBarrierForRange(HeapObject object,TSlot start_slot,TSlot end_slot)7175 void Heap::WriteBarrierForRange(HeapObject object, TSlot start_slot,
7176 TSlot end_slot) {
7177 if (FLAG_disable_write_barriers) return;
7178 MemoryChunk* source_page = MemoryChunk::FromHeapObject(object);
7179 base::Flags<RangeWriteBarrierMode> mode;
7180
7181 if (!source_page->InYoungGeneration()) {
7182 mode |= kDoGenerational;
7183 }
7184
7185 if (incremental_marking()->IsMarking()) {
7186 mode |= kDoMarking;
7187 if (!source_page->ShouldSkipEvacuationSlotRecording()) {
7188 mode |= kDoEvacuationSlotRecording;
7189 }
7190 }
7191
7192 switch (mode) {
7193 // Nothing to be done.
7194 case 0:
7195 return;
7196
7197 // Generational only.
7198 case kDoGenerational:
7199 return WriteBarrierForRangeImpl<kDoGenerational>(source_page, object,
7200 start_slot, end_slot);
7201 // Marking, no evacuation slot recording.
7202 case kDoMarking:
7203 return WriteBarrierForRangeImpl<kDoMarking>(source_page, object,
7204 start_slot, end_slot);
7205 // Marking with evacuation slot recording.
7206 case kDoMarking | kDoEvacuationSlotRecording:
7207 return WriteBarrierForRangeImpl<kDoMarking | kDoEvacuationSlotRecording>(
7208 source_page, object, start_slot, end_slot);
7209
7210 // Generational and marking, no evacuation slot recording.
7211 case kDoGenerational | kDoMarking:
7212 return WriteBarrierForRangeImpl<kDoGenerational | kDoMarking>(
7213 source_page, object, start_slot, end_slot);
7214
7215 // Generational and marking with evacuation slot recording.
7216 case kDoGenerational | kDoMarking | kDoEvacuationSlotRecording:
7217 return WriteBarrierForRangeImpl<kDoGenerational | kDoMarking |
7218 kDoEvacuationSlotRecording>(
7219 source_page, object, start_slot, end_slot);
7220
7221 default:
7222 UNREACHABLE();
7223 }
7224 }
7225
GenerationalBarrierForCodeSlow(Code host,RelocInfo * rinfo,HeapObject object)7226 void Heap::GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
7227 HeapObject object) {
7228 DCHECK(InYoungGeneration(object));
7229 Page* source_page = Page::FromHeapObject(host);
7230 RelocInfo::Mode rmode = rinfo->rmode();
7231 Address addr = rinfo->pc();
7232 SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
7233 if (rinfo->IsInConstantPool()) {
7234 addr = rinfo->constant_pool_entry_address();
7235 if (RelocInfo::IsCodeTargetMode(rmode)) {
7236 slot_type = CODE_ENTRY_SLOT;
7237 } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
7238 slot_type = COMPRESSED_OBJECT_SLOT;
7239 } else {
7240 DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
7241 slot_type = FULL_OBJECT_SLOT;
7242 }
7243 }
7244 uintptr_t offset = addr - source_page->address();
7245 DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
7246 RememberedSet<OLD_TO_NEW>::InsertTyped(source_page, slot_type,
7247 static_cast<uint32_t>(offset));
7248 }
7249
PageFlagsAreConsistent(HeapObject object)7250 bool Heap::PageFlagsAreConsistent(HeapObject object) {
7251 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
7252 return true;
7253 }
7254 BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
7255 heap_internals::MemoryChunk* slim_chunk =
7256 heap_internals::MemoryChunk::FromHeapObject(object);
7257
7258 // Slim chunk flags consistency.
7259 CHECK_EQ(chunk->InYoungGeneration(), slim_chunk->InYoungGeneration());
7260 CHECK_EQ(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING),
7261 slim_chunk->IsMarking());
7262
7263 AllocationSpace identity = chunk->owner()->identity();
7264
7265 // Generation consistency.
7266 CHECK_EQ(identity == NEW_SPACE || identity == NEW_LO_SPACE,
7267 slim_chunk->InYoungGeneration());
7268 // Read-only consistency.
7269 CHECK_EQ(chunk->InReadOnlySpace(), slim_chunk->InReadOnlySpace());
7270
7271 // Marking consistency.
7272 if (chunk->IsWritable()) {
7273 // RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
7274 // find a heap. The exception is when the ReadOnlySpace is writeable, during
7275 // bootstrapping, so explicitly allow this case.
7276 Heap* heap = Heap::FromWritableHeapObject(object);
7277 CHECK_EQ(slim_chunk->IsMarking(), heap->incremental_marking()->IsMarking());
7278 } else {
7279 // Non-writable RO_SPACE must never have marking flag set.
7280 CHECK(!slim_chunk->IsMarking());
7281 }
7282 return true;
7283 }
7284
SetEmbedderStackStateForNextFinalization(EmbedderHeapTracer::EmbedderStackState stack_state)7285 void Heap::SetEmbedderStackStateForNextFinalization(
7286 EmbedderHeapTracer::EmbedderStackState stack_state) {
7287 local_embedder_heap_tracer()->SetEmbedderStackStateForNextFinalization(
7288 stack_state);
7289 }
7290
7291 #ifdef DEBUG
IncrementObjectCounters()7292 void Heap::IncrementObjectCounters() {
7293 isolate_->counters()->objs_since_last_full()->Increment();
7294 isolate_->counters()->objs_since_last_young()->Increment();
7295 }
7296 #endif // DEBUG
7297
7298 // StrongRootBlocks are allocated as a block of addresses, prefixed with a
7299 // StrongRootsEntry pointer:
7300 //
7301 // | StrongRootsEntry*
7302 // | Address 1
7303 // | ...
7304 // | Address N
7305 //
7306 // The allocate method registers the range "Address 1" to "Address N" with the
7307 // heap as a strong root array, saves that entry in StrongRootsEntry*, and
7308 // returns a pointer to Address 1.
allocate(size_t n)7309 Address* StrongRootBlockAllocator::allocate(size_t n) {
7310 void* block = base::Malloc(sizeof(StrongRootsEntry*) + n * sizeof(Address));
7311
7312 StrongRootsEntry** header = reinterpret_cast<StrongRootsEntry**>(block);
7313 Address* ret = reinterpret_cast<Address*>(reinterpret_cast<char*>(block) +
7314 sizeof(StrongRootsEntry*));
7315
7316 memset(ret, kNullAddress, n * sizeof(Address));
7317 *header = heap_->RegisterStrongRoots(
7318 "StrongRootBlockAllocator", FullObjectSlot(ret), FullObjectSlot(ret + n));
7319
7320 return ret;
7321 }
7322
deallocate(Address * p,size_t n)7323 void StrongRootBlockAllocator::deallocate(Address* p, size_t n) noexcept {
7324 // The allocate method returns a pointer to Address 1, so the deallocate
7325 // method has to offset that pointer back by sizeof(StrongRootsEntry*).
7326 void* block = reinterpret_cast<char*>(p) - sizeof(StrongRootsEntry*);
7327 StrongRootsEntry** header = reinterpret_cast<StrongRootsEntry**>(block);
7328
7329 heap_->UnregisterStrongRoots(*header);
7330
7331 base::Free(block);
7332 }
7333
7334 } // namespace internal
7335 } // namespace v8
7336