1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/heap.h"
6 
7 #include <unordered_map>
8 #include <unordered_set>
9 
10 #include "src/accessors.h"
11 #include "src/api.h"
12 #include "src/assembler-inl.h"
13 #include "src/ast/context-slot-cache.h"
14 #include "src/base/bits.h"
15 #include "src/base/once.h"
16 #include "src/base/utils/random-number-generator.h"
17 #include "src/bootstrapper.h"
18 #include "src/code-stubs.h"
19 #include "src/compilation-cache.h"
20 #include "src/conversions.h"
21 #include "src/debug/debug.h"
22 #include "src/deoptimizer.h"
23 #include "src/feedback-vector.h"
24 #include "src/global-handles.h"
25 #include "src/heap/array-buffer-collector.h"
26 #include "src/heap/array-buffer-tracker-inl.h"
27 #include "src/heap/barrier.h"
28 #include "src/heap/code-stats.h"
29 #include "src/heap/concurrent-marking.h"
30 #include "src/heap/embedder-tracing.h"
31 #include "src/heap/gc-idle-time-handler.h"
32 #include "src/heap/gc-tracer.h"
33 #include "src/heap/incremental-marking.h"
34 #include "src/heap/item-parallel-job.h"
35 #include "src/heap/mark-compact-inl.h"
36 #include "src/heap/mark-compact.h"
37 #include "src/heap/memory-reducer.h"
38 #include "src/heap/object-stats.h"
39 #include "src/heap/objects-visiting-inl.h"
40 #include "src/heap/objects-visiting.h"
41 #include "src/heap/remembered-set.h"
42 #include "src/heap/scavenge-job.h"
43 #include "src/heap/scavenger-inl.h"
44 #include "src/heap/store-buffer.h"
45 #include "src/heap/stress-marking-observer.h"
46 #include "src/heap/stress-scavenge-observer.h"
47 #include "src/heap/sweeper.h"
48 #include "src/instruction-stream.h"
49 #include "src/interpreter/interpreter.h"
50 #include "src/objects/data-handler.h"
51 #include "src/objects/hash-table-inl.h"
52 #include "src/objects/maybe-object.h"
53 #include "src/objects/shared-function-info.h"
54 #include "src/regexp/jsregexp.h"
55 #include "src/runtime-profiler.h"
56 #include "src/snapshot/natives.h"
57 #include "src/snapshot/serializer-common.h"
58 #include "src/snapshot/snapshot.h"
59 #include "src/tracing/trace-event.h"
60 #include "src/unicode-decoder.h"
61 #include "src/unicode-inl.h"
62 #include "src/utils-inl.h"
63 #include "src/utils.h"
64 #include "src/v8.h"
65 #include "src/vm-state-inl.h"
66 
67 // Has to be the last include (doesn't have include guards):
68 #include "src/objects/object-macros.h"
69 
70 namespace v8 {
71 namespace internal {
72 
SetArgumentsAdaptorDeoptPCOffset(int pc_offset)73 void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
74   DCHECK_EQ(Smi::kZero, arguments_adaptor_deopt_pc_offset());
75   set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
76 }
77 
SetConstructStubCreateDeoptPCOffset(int pc_offset)78 void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
79   DCHECK(construct_stub_create_deopt_pc_offset() == Smi::kZero);
80   set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset));
81 }
82 
SetConstructStubInvokeDeoptPCOffset(int pc_offset)83 void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
84   DCHECK(construct_stub_invoke_deopt_pc_offset() == Smi::kZero);
85   set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
86 }
87 
SetInterpreterEntryReturnPCOffset(int pc_offset)88 void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
89   DCHECK_EQ(Smi::kZero, interpreter_entry_return_pc_offset());
90   set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
91 }
92 
SetSerializedObjects(FixedArray * objects)93 void Heap::SetSerializedObjects(FixedArray* objects) {
94   DCHECK(isolate()->serializer_enabled());
95   set_serialized_objects(objects);
96 }
97 
SetSerializedGlobalProxySizes(FixedArray * sizes)98 void Heap::SetSerializedGlobalProxySizes(FixedArray* sizes) {
99   DCHECK(isolate()->serializer_enabled());
100   set_serialized_global_proxy_sizes(sizes);
101 }
102 
operator ==(const Heap::GCCallbackTuple & other) const103 bool Heap::GCCallbackTuple::operator==(
104     const Heap::GCCallbackTuple& other) const {
105   return other.callback == callback && other.data == data;
106 }
107 
operator =(const Heap::GCCallbackTuple & other)108 Heap::GCCallbackTuple& Heap::GCCallbackTuple::operator=(
109     const Heap::GCCallbackTuple& other) {
110   callback = other.callback;
111   gc_type = other.gc_type;
112   data = other.data;
113   return *this;
114 }
115 
116 struct Heap::StrongRootsList {
117   Object** start;
118   Object** end;
119   StrongRootsList* next;
120 };
121 
122 class IdleScavengeObserver : public AllocationObserver {
123  public:
IdleScavengeObserver(Heap & heap,intptr_t step_size)124   IdleScavengeObserver(Heap& heap, intptr_t step_size)
125       : AllocationObserver(step_size), heap_(heap) {}
126 
Step(int bytes_allocated,Address,size_t)127   void Step(int bytes_allocated, Address, size_t) override {
128     heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
129   }
130 
131  private:
132   Heap& heap_;
133 };
134 
Heap()135 Heap::Heap()
136     : external_memory_(0),
137       external_memory_limit_(kExternalAllocationSoftLimit),
138       external_memory_at_last_mark_compact_(0),
139       external_memory_concurrently_freed_(0),
140       isolate_(nullptr),
141       code_range_size_(0),
142       // semispace_size_ should be a power of 2 and old_generation_size_ should
143       // be a multiple of Page::kPageSize.
144       max_semi_space_size_(8 * (kPointerSize / 4) * MB),
145       initial_semispace_size_(kMinSemiSpaceSizeInKB * KB),
146       max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
147       initial_max_old_generation_size_(max_old_generation_size_),
148       initial_old_generation_size_(max_old_generation_size_ /
149                                    kInitalOldGenerationLimitFactor),
150       old_generation_size_configured_(false),
151       // Variables set based on semispace_size_ and old_generation_size_ in
152       // ConfigureHeap.
153       // Will be 4 * reserved_semispace_size_ to ensure that young
154       // generation can be aligned to its size.
155       maximum_committed_(0),
156       survived_since_last_expansion_(0),
157       survived_last_scavenge_(0),
158       always_allocate_scope_count_(0),
159       memory_pressure_level_(MemoryPressureLevel::kNone),
160       contexts_disposed_(0),
161       number_of_disposed_maps_(0),
162       new_space_(nullptr),
163       old_space_(nullptr),
164       code_space_(nullptr),
165       map_space_(nullptr),
166       lo_space_(nullptr),
167       read_only_space_(nullptr),
168       write_protect_code_memory_(false),
169       code_space_memory_modification_scope_depth_(0),
170       gc_state_(NOT_IN_GC),
171       gc_post_processing_depth_(0),
172       allocations_count_(0),
173       raw_allocations_hash_(0),
174       stress_marking_observer_(nullptr),
175       stress_scavenge_observer_(nullptr),
176       allocation_step_in_progress_(false),
177       max_marking_limit_reached_(0.0),
178       ms_count_(0),
179       gc_count_(0),
180       consecutive_ineffective_mark_compacts_(0),
181       mmap_region_base_(0),
182       remembered_unmapped_pages_index_(0),
183       old_generation_allocation_limit_(initial_old_generation_size_),
184       inline_allocation_disabled_(false),
185       tracer_(nullptr),
186       promoted_objects_size_(0),
187       promotion_ratio_(0),
188       semi_space_copied_object_size_(0),
189       previous_semi_space_copied_object_size_(0),
190       semi_space_copied_rate_(0),
191       nodes_died_in_new_space_(0),
192       nodes_copied_in_new_space_(0),
193       nodes_promoted_(0),
194       maximum_size_scavenges_(0),
195       last_idle_notification_time_(0.0),
196       last_gc_time_(0.0),
197       mark_compact_collector_(nullptr),
198       minor_mark_compact_collector_(nullptr),
199       array_buffer_collector_(nullptr),
200       memory_allocator_(nullptr),
201       store_buffer_(nullptr),
202       incremental_marking_(nullptr),
203       concurrent_marking_(nullptr),
204       gc_idle_time_handler_(nullptr),
205       memory_reducer_(nullptr),
206       live_object_stats_(nullptr),
207       dead_object_stats_(nullptr),
208       scavenge_job_(nullptr),
209       parallel_scavenge_semaphore_(0),
210       idle_scavenge_observer_(nullptr),
211       new_space_allocation_counter_(0),
212       old_generation_allocation_counter_at_last_gc_(0),
213       old_generation_size_at_last_gc_(0),
214       global_pretenuring_feedback_(kInitialFeedbackCapacity),
215       is_marking_flag_(false),
216       ring_buffer_full_(false),
217       ring_buffer_end_(0),
218       configured_(false),
219       current_gc_flags_(Heap::kNoGCFlags),
220       current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
221       external_string_table_(this),
222       gc_callbacks_depth_(0),
223       deserialization_complete_(false),
224       strong_roots_list_(nullptr),
225       heap_iterator_depth_(0),
226       local_embedder_heap_tracer_(nullptr),
227       fast_promotion_mode_(false),
228       force_oom_(false),
229       delay_sweeper_tasks_for_testing_(false),
230       pending_layout_change_object_(nullptr),
231       unprotected_memory_chunks_registry_enabled_(false)
232 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
233       ,
234       allocation_timeout_(0)
235 #endif  // V8_ENABLE_ALLOCATION_TIMEOUT
236 {
237   // Ensure old_generation_size_ is a multiple of kPageSize.
238   DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
239 
240   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
241   set_native_contexts_list(nullptr);
242   set_allocation_sites_list(Smi::kZero);
243   set_encountered_weak_collections(Smi::kZero);
244   // Put a dummy entry in the remembered pages so we can find the list the
245   // minidump even if there are no real unmapped pages.
246   RememberUnmappedPage(kNullAddress, false);
247 }
248 
MaxReserved()249 size_t Heap::MaxReserved() {
250   const double kFactor = Page::kPageSize * 1.0 / Page::kAllocatableMemory;
251   return static_cast<size_t>(
252       (2 * max_semi_space_size_ + max_old_generation_size_) * kFactor);
253 }
254 
Capacity()255 size_t Heap::Capacity() {
256   if (!HasBeenSetUp()) return 0;
257 
258   return new_space_->Capacity() + OldGenerationCapacity();
259 }
260 
OldGenerationCapacity()261 size_t Heap::OldGenerationCapacity() {
262   if (!HasBeenSetUp()) return 0;
263   PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
264   size_t total = 0;
265   for (PagedSpace* space = spaces.next(); space != nullptr;
266        space = spaces.next()) {
267     total += space->Capacity();
268   }
269   return total + lo_space_->SizeOfObjects();
270 }
271 
CommittedOldGenerationMemory()272 size_t Heap::CommittedOldGenerationMemory() {
273   if (!HasBeenSetUp()) return 0;
274 
275   PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
276   size_t total = 0;
277   for (PagedSpace* space = spaces.next(); space != nullptr;
278        space = spaces.next()) {
279     total += space->CommittedMemory();
280   }
281   return total + lo_space_->Size();
282 }
283 
CommittedMemory()284 size_t Heap::CommittedMemory() {
285   if (!HasBeenSetUp()) return 0;
286 
287   return new_space_->CommittedMemory() + CommittedOldGenerationMemory();
288 }
289 
290 
CommittedPhysicalMemory()291 size_t Heap::CommittedPhysicalMemory() {
292   if (!HasBeenSetUp()) return 0;
293 
294   size_t total = 0;
295   for (SpaceIterator it(this); it.has_next();) {
296     total += it.next()->CommittedPhysicalMemory();
297   }
298 
299   return total;
300 }
301 
CommittedMemoryExecutable()302 size_t Heap::CommittedMemoryExecutable() {
303   if (!HasBeenSetUp()) return 0;
304 
305   return static_cast<size_t>(memory_allocator()->SizeExecutable());
306 }
307 
308 
UpdateMaximumCommitted()309 void Heap::UpdateMaximumCommitted() {
310   if (!HasBeenSetUp()) return;
311 
312   const size_t current_committed_memory = CommittedMemory();
313   if (current_committed_memory > maximum_committed_) {
314     maximum_committed_ = current_committed_memory;
315   }
316 }
317 
Available()318 size_t Heap::Available() {
319   if (!HasBeenSetUp()) return 0;
320 
321   size_t total = 0;
322 
323   for (SpaceIterator it(this); it.has_next();) {
324     total += it.next()->Available();
325   }
326   return total;
327 }
328 
CanExpandOldGeneration(size_t size)329 bool Heap::CanExpandOldGeneration(size_t size) {
330   if (force_oom_) return false;
331   if (OldGenerationCapacity() + size > MaxOldGenerationSize()) return false;
332   // The OldGenerationCapacity does not account compaction spaces used
333   // during evacuation. Ensure that expanding the old generation does push
334   // the total allocated memory size over the maximum heap size.
335   return memory_allocator()->Size() + size <= MaxReserved();
336 }
337 
HasBeenSetUp()338 bool Heap::HasBeenSetUp() {
339   return old_space_ != nullptr && code_space_ != nullptr &&
340          map_space_ != nullptr && lo_space_ != nullptr &&
341          read_only_space_ != nullptr;
342 }
343 
344 
SelectGarbageCollector(AllocationSpace space,const char ** reason)345 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
346                                               const char** reason) {
347   // Is global GC requested?
348   if (space != NEW_SPACE) {
349     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
350     *reason = "GC in old space requested";
351     return MARK_COMPACTOR;
352   }
353 
354   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
355     *reason = "GC in old space forced by flags";
356     return MARK_COMPACTOR;
357   }
358 
359   if (incremental_marking()->NeedsFinalization() &&
360       AllocationLimitOvershotByLargeMargin()) {
361     *reason = "Incremental marking needs finalization";
362     return MARK_COMPACTOR;
363   }
364 
365   // Over-estimate the new space size using capacity to allow some slack.
366   if (!CanExpandOldGeneration(new_space_->TotalCapacity())) {
367     isolate_->counters()
368         ->gc_compactor_caused_by_oldspace_exhaustion()
369         ->Increment();
370     *reason = "scavenge might not succeed";
371     return MARK_COMPACTOR;
372   }
373 
374   // Default
375   *reason = nullptr;
376   return YoungGenerationCollector();
377 }
378 
SetGCState(HeapState state)379 void Heap::SetGCState(HeapState state) {
380   gc_state_ = state;
381 }
382 
PrintShortHeapStatistics()383 void Heap::PrintShortHeapStatistics() {
384   if (!FLAG_trace_gc_verbose) return;
385   PrintIsolate(isolate_, "Memory allocator,   used: %6" PRIuS
386                          " KB,"
387                          " available: %6" PRIuS " KB\n",
388                memory_allocator()->Size() / KB,
389                memory_allocator()->Available() / KB);
390   PrintIsolate(isolate_,
391                "Read-only space,    used: %6" PRIuS
392                " KB"
393                ", available: %6" PRIuS
394                " KB"
395                ", committed: %6" PRIuS " KB\n",
396                read_only_space_->Size() / KB,
397                read_only_space_->Available() / KB,
398                read_only_space_->CommittedMemory() / KB);
399   PrintIsolate(isolate_, "New space,          used: %6" PRIuS
400                          " KB"
401                          ", available: %6" PRIuS
402                          " KB"
403                          ", committed: %6" PRIuS " KB\n",
404                new_space_->Size() / KB, new_space_->Available() / KB,
405                new_space_->CommittedMemory() / KB);
406   PrintIsolate(isolate_, "Old space,          used: %6" PRIuS
407                          " KB"
408                          ", available: %6" PRIuS
409                          " KB"
410                          ", committed: %6" PRIuS " KB\n",
411                old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
412                old_space_->CommittedMemory() / KB);
413   PrintIsolate(isolate_, "Code space,         used: %6" PRIuS
414                          " KB"
415                          ", available: %6" PRIuS
416                          " KB"
417                          ", committed: %6" PRIuS "KB\n",
418                code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
419                code_space_->CommittedMemory() / KB);
420   PrintIsolate(isolate_, "Map space,          used: %6" PRIuS
421                          " KB"
422                          ", available: %6" PRIuS
423                          " KB"
424                          ", committed: %6" PRIuS " KB\n",
425                map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
426                map_space_->CommittedMemory() / KB);
427   PrintIsolate(isolate_, "Large object space, used: %6" PRIuS
428                          " KB"
429                          ", available: %6" PRIuS
430                          " KB"
431                          ", committed: %6" PRIuS " KB\n",
432                lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
433                lo_space_->CommittedMemory() / KB);
434   PrintIsolate(isolate_, "All spaces,         used: %6" PRIuS
435                          " KB"
436                          ", available: %6" PRIuS
437                          " KB"
438                          ", committed: %6" PRIuS "KB\n",
439                this->SizeOfObjects() / KB, this->Available() / KB,
440                this->CommittedMemory() / KB);
441   PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
442                external_memory_ / KB);
443   PrintIsolate(isolate_, "External memory global %zu KB\n",
444                external_memory_callback_() / KB);
445   PrintIsolate(isolate_, "Total time spent in GC  : %.1f ms\n",
446                total_gc_time_ms_);
447 }
448 
ReportStatisticsAfterGC()449 void Heap::ReportStatisticsAfterGC() {
450   for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
451        ++i) {
452     int count = deferred_counters_[i];
453     deferred_counters_[i] = 0;
454     while (count > 0) {
455       count--;
456       isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i));
457     }
458   }
459 }
460 
AddHeapObjectAllocationTracker(HeapObjectAllocationTracker * tracker)461 void Heap::AddHeapObjectAllocationTracker(
462     HeapObjectAllocationTracker* tracker) {
463   if (allocation_trackers_.empty()) DisableInlineAllocation();
464   allocation_trackers_.push_back(tracker);
465 }
466 
RemoveHeapObjectAllocationTracker(HeapObjectAllocationTracker * tracker)467 void Heap::RemoveHeapObjectAllocationTracker(
468     HeapObjectAllocationTracker* tracker) {
469   allocation_trackers_.erase(std::remove(allocation_trackers_.begin(),
470                                          allocation_trackers_.end(), tracker),
471                              allocation_trackers_.end());
472   if (allocation_trackers_.empty()) EnableInlineAllocation();
473 }
474 
AddRetainingPathTarget(Handle<HeapObject> object,RetainingPathOption option)475 void Heap::AddRetainingPathTarget(Handle<HeapObject> object,
476                                   RetainingPathOption option) {
477   if (!FLAG_track_retaining_path) {
478     PrintF("Retaining path tracking requires --track-retaining-path\n");
479   } else {
480     int index = 0;
481     Handle<FixedArrayOfWeakCells> array = FixedArrayOfWeakCells::Add(
482         handle(retaining_path_targets(), isolate()), object, &index);
483     set_retaining_path_targets(*array);
484     retaining_path_target_option_[index] = option;
485   }
486 }
487 
IsRetainingPathTarget(HeapObject * object,RetainingPathOption * option)488 bool Heap::IsRetainingPathTarget(HeapObject* object,
489                                  RetainingPathOption* option) {
490   if (!retaining_path_targets()->IsFixedArrayOfWeakCells()) return false;
491   FixedArrayOfWeakCells* targets =
492       FixedArrayOfWeakCells::cast(retaining_path_targets());
493   int length = targets->Length();
494   for (int i = 0; i < length; i++) {
495     if (targets->Get(i) == object) {
496       DCHECK(retaining_path_target_option_.count(i));
497       *option = retaining_path_target_option_[i];
498       return true;
499     }
500   }
501   return false;
502 }
503 
PrintRetainingPath(HeapObject * target,RetainingPathOption option)504 void Heap::PrintRetainingPath(HeapObject* target, RetainingPathOption option) {
505   PrintF("\n\n\n");
506   PrintF("#################################################\n");
507   PrintF("Retaining path for %p:\n", static_cast<void*>(target));
508   HeapObject* object = target;
509   std::vector<std::pair<HeapObject*, bool>> retaining_path;
510   Root root = Root::kUnknown;
511   bool ephemeral = false;
512   while (true) {
513     retaining_path.push_back(std::make_pair(object, ephemeral));
514     if (option == RetainingPathOption::kTrackEphemeralPath &&
515         ephemeral_retainer_.count(object)) {
516       object = ephemeral_retainer_[object];
517       ephemeral = true;
518     } else if (retainer_.count(object)) {
519       object = retainer_[object];
520       ephemeral = false;
521     } else {
522       if (retaining_root_.count(object)) {
523         root = retaining_root_[object];
524       }
525       break;
526     }
527   }
528   int distance = static_cast<int>(retaining_path.size());
529   for (auto node : retaining_path) {
530     HeapObject* object = node.first;
531     bool ephemeral = node.second;
532     PrintF("\n");
533     PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
534     PrintF("Distance from root %d%s: ", distance,
535            ephemeral ? " (ephemeral)" : "");
536     object->ShortPrint();
537     PrintF("\n");
538 #ifdef OBJECT_PRINT
539     object->Print();
540     PrintF("\n");
541 #endif
542     --distance;
543   }
544   PrintF("\n");
545   PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
546   PrintF("Root: %s\n", RootVisitor::RootName(root));
547   PrintF("-------------------------------------------------\n");
548 }
549 
AddRetainer(HeapObject * retainer,HeapObject * object)550 void Heap::AddRetainer(HeapObject* retainer, HeapObject* object) {
551   if (retainer_.count(object)) return;
552   retainer_[object] = retainer;
553   RetainingPathOption option = RetainingPathOption::kDefault;
554   if (IsRetainingPathTarget(object, &option)) {
555     // Check if the retaining path was already printed in
556     // AddEphemeralRetainer().
557     if (ephemeral_retainer_.count(object) == 0 ||
558         option == RetainingPathOption::kDefault) {
559       PrintRetainingPath(object, option);
560     }
561   }
562 }
563 
AddEphemeralRetainer(HeapObject * retainer,HeapObject * object)564 void Heap::AddEphemeralRetainer(HeapObject* retainer, HeapObject* object) {
565   if (ephemeral_retainer_.count(object)) return;
566   ephemeral_retainer_[object] = retainer;
567   RetainingPathOption option = RetainingPathOption::kDefault;
568   if (IsRetainingPathTarget(object, &option) &&
569       option == RetainingPathOption::kTrackEphemeralPath) {
570     // Check if the retaining path was already printed in AddRetainer().
571     if (retainer_.count(object) == 0) {
572       PrintRetainingPath(object, option);
573     }
574   }
575 }
576 
AddRetainingRoot(Root root,HeapObject * object)577 void Heap::AddRetainingRoot(Root root, HeapObject* object) {
578   if (retaining_root_.count(object)) return;
579   retaining_root_[object] = root;
580   RetainingPathOption option = RetainingPathOption::kDefault;
581   if (IsRetainingPathTarget(object, &option)) {
582     PrintRetainingPath(object, option);
583   }
584 }
585 
IncrementDeferredCount(v8::Isolate::UseCounterFeature feature)586 void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
587   deferred_counters_[feature]++;
588 }
589 
UncommitFromSpace()590 bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
591 
GarbageCollectionPrologue()592 void Heap::GarbageCollectionPrologue() {
593   TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE);
594   {
595     AllowHeapAllocation for_the_first_part_of_prologue;
596     gc_count_++;
597 
598 #ifdef VERIFY_HEAP
599     if (FLAG_verify_heap) {
600       Verify();
601     }
602 #endif
603   }
604 
605   // Reset GC statistics.
606   promoted_objects_size_ = 0;
607   previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
608   semi_space_copied_object_size_ = 0;
609   nodes_died_in_new_space_ = 0;
610   nodes_copied_in_new_space_ = 0;
611   nodes_promoted_ = 0;
612 
613   UpdateMaximumCommitted();
614 
615 #ifdef DEBUG
616   DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
617 
618   if (FLAG_gc_verbose) Print();
619 #endif  // DEBUG
620 
621   if (new_space_->IsAtMaximumCapacity()) {
622     maximum_size_scavenges_++;
623   } else {
624     maximum_size_scavenges_ = 0;
625   }
626   CheckNewSpaceExpansionCriteria();
627   UpdateNewSpaceAllocationCounter();
628   if (FLAG_track_retaining_path) {
629     retainer_.clear();
630     ephemeral_retainer_.clear();
631     retaining_root_.clear();
632   }
633 }
634 
SizeOfObjects()635 size_t Heap::SizeOfObjects() {
636   size_t total = 0;
637 
638   for (SpaceIterator it(this); it.has_next();) {
639     total += it.next()->SizeOfObjects();
640   }
641   return total;
642 }
643 
644 
GetSpaceName(int idx)645 const char* Heap::GetSpaceName(int idx) {
646   switch (idx) {
647     case NEW_SPACE:
648       return "new_space";
649     case OLD_SPACE:
650       return "old_space";
651     case MAP_SPACE:
652       return "map_space";
653     case CODE_SPACE:
654       return "code_space";
655     case LO_SPACE:
656       return "large_object_space";
657     case RO_SPACE:
658       return "read_only_space";
659     default:
660       UNREACHABLE();
661   }
662   return nullptr;
663 }
664 
SetRootCodeStubs(SimpleNumberDictionary * value)665 void Heap::SetRootCodeStubs(SimpleNumberDictionary* value) {
666   roots_[kCodeStubsRootIndex] = value;
667 }
668 
RepairFreeListsAfterDeserialization()669 void Heap::RepairFreeListsAfterDeserialization() {
670   PagedSpaces spaces(this);
671   for (PagedSpace* space = spaces.next(); space != nullptr;
672        space = spaces.next()) {
673     space->RepairFreeListsAfterDeserialization();
674   }
675 }
676 
MergeAllocationSitePretenuringFeedback(const PretenuringFeedbackMap & local_pretenuring_feedback)677 void Heap::MergeAllocationSitePretenuringFeedback(
678     const PretenuringFeedbackMap& local_pretenuring_feedback) {
679   AllocationSite* site = nullptr;
680   for (auto& site_and_count : local_pretenuring_feedback) {
681     site = site_and_count.first;
682     MapWord map_word = site_and_count.first->map_word();
683     if (map_word.IsForwardingAddress()) {
684       site = AllocationSite::cast(map_word.ToForwardingAddress());
685     }
686 
687     // We have not validated the allocation site yet, since we have not
688     // dereferenced the site during collecting information.
689     // This is an inlined check of AllocationMemento::IsValid.
690     if (!site->IsAllocationSite() || site->IsZombie()) continue;
691 
692     const int value = static_cast<int>(site_and_count.second);
693     DCHECK_LT(0, value);
694     if (site->IncrementMementoFoundCount(value)) {
695       // For sites in the global map the count is accessed through the site.
696       global_pretenuring_feedback_.insert(std::make_pair(site, 0));
697     }
698   }
699 }
700 
AddAllocationObserversToAllSpaces(AllocationObserver * observer,AllocationObserver * new_space_observer)701 void Heap::AddAllocationObserversToAllSpaces(
702     AllocationObserver* observer, AllocationObserver* new_space_observer) {
703   DCHECK(observer && new_space_observer);
704 
705   for (SpaceIterator it(this); it.has_next();) {
706     Space* space = it.next();
707     if (space == new_space()) {
708       space->AddAllocationObserver(new_space_observer);
709     } else {
710       space->AddAllocationObserver(observer);
711     }
712   }
713 }
714 
RemoveAllocationObserversFromAllSpaces(AllocationObserver * observer,AllocationObserver * new_space_observer)715 void Heap::RemoveAllocationObserversFromAllSpaces(
716     AllocationObserver* observer, AllocationObserver* new_space_observer) {
717   DCHECK(observer && new_space_observer);
718 
719   for (SpaceIterator it(this); it.has_next();) {
720     Space* space = it.next();
721     if (space == new_space()) {
722       space->RemoveAllocationObserver(new_space_observer);
723     } else {
724       space->RemoveAllocationObserver(observer);
725     }
726   }
727 }
728 
729 class Heap::SkipStoreBufferScope {
730  public:
SkipStoreBufferScope(StoreBuffer * store_buffer)731   explicit SkipStoreBufferScope(StoreBuffer* store_buffer)
732       : store_buffer_(store_buffer) {
733     store_buffer_->MoveAllEntriesToRememberedSet();
734     store_buffer_->SetMode(StoreBuffer::IN_GC);
735   }
736 
~SkipStoreBufferScope()737   ~SkipStoreBufferScope() {
738     DCHECK(store_buffer_->Empty());
739     store_buffer_->SetMode(StoreBuffer::NOT_IN_GC);
740   }
741 
742  private:
743   StoreBuffer* store_buffer_;
744 };
745 
746 namespace {
MakePretenureDecision(AllocationSite * site,AllocationSite::PretenureDecision current_decision,double ratio,bool maximum_size_scavenge)747 inline bool MakePretenureDecision(
748     AllocationSite* site, AllocationSite::PretenureDecision current_decision,
749     double ratio, bool maximum_size_scavenge) {
750   // Here we just allow state transitions from undecided or maybe tenure
751   // to don't tenure, maybe tenure, or tenure.
752   if ((current_decision == AllocationSite::kUndecided ||
753        current_decision == AllocationSite::kMaybeTenure)) {
754     if (ratio >= AllocationSite::kPretenureRatio) {
755       // We just transition into tenure state when the semi-space was at
756       // maximum capacity.
757       if (maximum_size_scavenge) {
758         site->set_deopt_dependent_code(true);
759         site->set_pretenure_decision(AllocationSite::kTenure);
760         // Currently we just need to deopt when we make a state transition to
761         // tenure.
762         return true;
763       }
764       site->set_pretenure_decision(AllocationSite::kMaybeTenure);
765     } else {
766       site->set_pretenure_decision(AllocationSite::kDontTenure);
767     }
768   }
769   return false;
770 }
771 
DigestPretenuringFeedback(Isolate * isolate,AllocationSite * site,bool maximum_size_scavenge)772 inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite* site,
773                                       bool maximum_size_scavenge) {
774   bool deopt = false;
775   int create_count = site->memento_create_count();
776   int found_count = site->memento_found_count();
777   bool minimum_mementos_created =
778       create_count >= AllocationSite::kPretenureMinimumCreated;
779   double ratio = minimum_mementos_created || FLAG_trace_pretenuring_statistics
780                      ? static_cast<double>(found_count) / create_count
781                      : 0.0;
782   AllocationSite::PretenureDecision current_decision =
783       site->pretenure_decision();
784 
785   if (minimum_mementos_created) {
786     deopt = MakePretenureDecision(site, current_decision, ratio,
787                                   maximum_size_scavenge);
788   }
789 
790   if (FLAG_trace_pretenuring_statistics) {
791     PrintIsolate(isolate,
792                  "pretenuring: AllocationSite(%p): (created, found, ratio) "
793                  "(%d, %d, %f) %s => %s\n",
794                  static_cast<void*>(site), create_count, found_count, ratio,
795                  site->PretenureDecisionName(current_decision),
796                  site->PretenureDecisionName(site->pretenure_decision()));
797   }
798 
799   // Clear feedback calculation fields until the next gc.
800   site->set_memento_found_count(0);
801   site->set_memento_create_count(0);
802   return deopt;
803 }
804 }  // namespace
805 
RemoveAllocationSitePretenuringFeedback(AllocationSite * site)806 void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) {
807   global_pretenuring_feedback_.erase(site);
808 }
809 
DeoptMaybeTenuredAllocationSites()810 bool Heap::DeoptMaybeTenuredAllocationSites() {
811   return new_space_->IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
812 }
813 
ProcessPretenuringFeedback()814 void Heap::ProcessPretenuringFeedback() {
815   bool trigger_deoptimization = false;
816   if (FLAG_allocation_site_pretenuring) {
817     int tenure_decisions = 0;
818     int dont_tenure_decisions = 0;
819     int allocation_mementos_found = 0;
820     int allocation_sites = 0;
821     int active_allocation_sites = 0;
822 
823     AllocationSite* site = nullptr;
824 
825     // Step 1: Digest feedback for recorded allocation sites.
826     bool maximum_size_scavenge = MaximumSizeScavenge();
827     for (auto& site_and_count : global_pretenuring_feedback_) {
828       allocation_sites++;
829       site = site_and_count.first;
830       // Count is always access through the site.
831       DCHECK_EQ(0, site_and_count.second);
832       int found_count = site->memento_found_count();
833       // An entry in the storage does not imply that the count is > 0 because
834       // allocation sites might have been reset due to too many objects dying
835       // in old space.
836       if (found_count > 0) {
837         DCHECK(site->IsAllocationSite());
838         active_allocation_sites++;
839         allocation_mementos_found += found_count;
840         if (DigestPretenuringFeedback(isolate_, site, maximum_size_scavenge)) {
841           trigger_deoptimization = true;
842         }
843         if (site->GetPretenureMode() == TENURED) {
844           tenure_decisions++;
845         } else {
846           dont_tenure_decisions++;
847         }
848       }
849     }
850 
851     // Step 2: Deopt maybe tenured allocation sites if necessary.
852     bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
853     if (deopt_maybe_tenured) {
854       Object* list_element = allocation_sites_list();
855       while (list_element->IsAllocationSite()) {
856         site = AllocationSite::cast(list_element);
857         DCHECK(site->IsAllocationSite());
858         allocation_sites++;
859         if (site->IsMaybeTenure()) {
860           site->set_deopt_dependent_code(true);
861           trigger_deoptimization = true;
862         }
863         list_element = site->weak_next();
864       }
865     }
866 
867     if (trigger_deoptimization) {
868       isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
869     }
870 
871     if (FLAG_trace_pretenuring_statistics &&
872         (allocation_mementos_found > 0 || tenure_decisions > 0 ||
873          dont_tenure_decisions > 0)) {
874       PrintIsolate(isolate(),
875                    "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
876                    "active_sites=%d "
877                    "mementos=%d tenured=%d not_tenured=%d\n",
878                    deopt_maybe_tenured ? 1 : 0, allocation_sites,
879                    active_allocation_sites, allocation_mementos_found,
880                    tenure_decisions, dont_tenure_decisions);
881     }
882 
883     global_pretenuring_feedback_.clear();
884     global_pretenuring_feedback_.reserve(kInitialFeedbackCapacity);
885   }
886 }
887 
InvalidateCodeEmbeddedObjects(Code * code)888 void Heap::InvalidateCodeEmbeddedObjects(Code* code) {
889   MemoryChunk* chunk = MemoryChunk::FromAddress(code->address());
890   CodePageMemoryModificationScope modification_scope(chunk);
891   code->InvalidateEmbeddedObjects();
892 }
893 
InvalidateCodeDeoptimizationData(Code * code)894 void Heap::InvalidateCodeDeoptimizationData(Code* code) {
895   MemoryChunk* chunk = MemoryChunk::FromAddress(code->address());
896   CodePageMemoryModificationScope modification_scope(chunk);
897   code->set_deoptimization_data(empty_fixed_array());
898 }
899 
DeoptMarkedAllocationSites()900 void Heap::DeoptMarkedAllocationSites() {
901   // TODO(hpayer): If iterating over the allocation sites list becomes a
902   // performance issue, use a cache data structure in heap instead.
903   Object* list_element = allocation_sites_list();
904   while (list_element->IsAllocationSite()) {
905     AllocationSite* site = AllocationSite::cast(list_element);
906     if (site->deopt_dependent_code()) {
907       site->dependent_code()->MarkCodeForDeoptimization(
908           isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
909       site->set_deopt_dependent_code(false);
910     }
911     list_element = site->weak_next();
912   }
913   Deoptimizer::DeoptimizeMarkedCode(isolate_);
914 }
915 
916 
GarbageCollectionEpilogue()917 void Heap::GarbageCollectionEpilogue() {
918   TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
919   // In release mode, we only zap the from space under heap verification.
920   if (Heap::ShouldZapGarbage()) {
921     ZapFromSpace();
922   }
923 
924 #ifdef VERIFY_HEAP
925   if (FLAG_verify_heap) {
926     Verify();
927   }
928 #endif
929 
930   AllowHeapAllocation for_the_rest_of_the_epilogue;
931 
932 #ifdef DEBUG
933   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
934   if (FLAG_print_handles) PrintHandles();
935   if (FLAG_gc_verbose) Print();
936   if (FLAG_code_stats) ReportCodeStatistics("After GC");
937   if (FLAG_check_handle_count) CheckHandleCount();
938 #endif
939 
940   UpdateMaximumCommitted();
941 
942   isolate_->counters()->alive_after_last_gc()->Set(
943       static_cast<int>(SizeOfObjects()));
944 
945   isolate_->counters()->string_table_capacity()->Set(
946       string_table()->Capacity());
947   isolate_->counters()->number_of_symbols()->Set(
948       string_table()->NumberOfElements());
949 
950   if (CommittedMemory() > 0) {
951     isolate_->counters()->external_fragmentation_total()->AddSample(
952         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
953 
954     isolate_->counters()->heap_sample_total_committed()->AddSample(
955         static_cast<int>(CommittedMemory() / KB));
956     isolate_->counters()->heap_sample_total_used()->AddSample(
957         static_cast<int>(SizeOfObjects() / KB));
958     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
959         static_cast<int>(map_space()->CommittedMemory() / KB));
960     isolate_->counters()->heap_sample_code_space_committed()->AddSample(
961         static_cast<int>(code_space()->CommittedMemory() / KB));
962 
963     isolate_->counters()->heap_sample_maximum_committed()->AddSample(
964         static_cast<int>(MaximumCommittedMemory() / KB));
965   }
966 
967 #define UPDATE_COUNTERS_FOR_SPACE(space)                \
968   isolate_->counters()->space##_bytes_available()->Set( \
969       static_cast<int>(space()->Available()));          \
970   isolate_->counters()->space##_bytes_committed()->Set( \
971       static_cast<int>(space()->CommittedMemory()));    \
972   isolate_->counters()->space##_bytes_used()->Set(      \
973       static_cast<int>(space()->SizeOfObjects()));
974 #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                          \
975   if (space()->CommittedMemory() > 0) {                                \
976     isolate_->counters()->external_fragmentation_##space()->AddSample( \
977         static_cast<int>(100 -                                         \
978                          (space()->SizeOfObjects() * 100.0) /          \
979                              space()->CommittedMemory()));             \
980   }
981 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
982   UPDATE_COUNTERS_FOR_SPACE(space)                         \
983   UPDATE_FRAGMENTATION_FOR_SPACE(space)
984 
985   UPDATE_COUNTERS_FOR_SPACE(new_space)
986   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
987   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
988   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
989   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
990 #undef UPDATE_COUNTERS_FOR_SPACE
991 #undef UPDATE_FRAGMENTATION_FOR_SPACE
992 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
993 
994 #ifdef DEBUG
995   ReportStatisticsAfterGC();
996 #endif  // DEBUG
997 
998   last_gc_time_ = MonotonicallyIncreasingTimeInMs();
999 
1000   {
1001     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
1002     ReduceNewSpaceSize();
1003   }
1004 }
1005 
1006 
PreprocessStackTraces()1007 void Heap::PreprocessStackTraces() {
1008   FixedArrayOfWeakCells::Iterator iterator(weak_stack_trace_list());
1009   FixedArray* elements;
1010   while ((elements = iterator.Next<FixedArray>()) != nullptr) {
1011     for (int j = 1; j < elements->length(); j += 4) {
1012       Object* maybe_code = elements->get(j + 2);
1013       // If GC happens while adding a stack trace to the weak fixed array,
1014       // which has been copied into a larger backing store, we may run into
1015       // a stack trace that has already been preprocessed. Guard against this.
1016       if (!maybe_code->IsAbstractCode()) break;
1017       AbstractCode* abstract_code = AbstractCode::cast(maybe_code);
1018       int offset = Smi::ToInt(elements->get(j + 3));
1019       int pos = abstract_code->SourcePosition(offset);
1020       elements->set(j + 2, Smi::FromInt(pos));
1021     }
1022   }
1023   // We must not compact the weak fixed list here, as we may be in the middle
1024   // of writing to it, when the GC triggered. Instead, we reset the root value.
1025   set_weak_stack_trace_list(Smi::kZero);
1026 }
1027 
1028 
1029 class GCCallbacksScope {
1030  public:
GCCallbacksScope(Heap * heap)1031   explicit GCCallbacksScope(Heap* heap) : heap_(heap) {
1032     heap_->gc_callbacks_depth_++;
1033   }
~GCCallbacksScope()1034   ~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
1035 
CheckReenter()1036   bool CheckReenter() { return heap_->gc_callbacks_depth_ == 1; }
1037 
1038  private:
1039   Heap* heap_;
1040 };
1041 
1042 
HandleGCRequest()1043 void Heap::HandleGCRequest() {
1044   if (FLAG_stress_scavenge > 0 && stress_scavenge_observer_->HasRequestedGC()) {
1045     CollectAllGarbage(NEW_SPACE, GarbageCollectionReason::kTesting);
1046     stress_scavenge_observer_->RequestedGCDone();
1047   } else if (HighMemoryPressure()) {
1048     incremental_marking()->reset_request_type();
1049     CheckMemoryPressure();
1050   } else if (incremental_marking()->request_type() ==
1051              IncrementalMarking::COMPLETE_MARKING) {
1052     incremental_marking()->reset_request_type();
1053     CollectAllGarbage(current_gc_flags_,
1054                       GarbageCollectionReason::kFinalizeMarkingViaStackGuard,
1055                       current_gc_callback_flags_);
1056   } else if (incremental_marking()->request_type() ==
1057                  IncrementalMarking::FINALIZATION &&
1058              incremental_marking()->IsMarking() &&
1059              !incremental_marking()->finalize_marking_completed()) {
1060     incremental_marking()->reset_request_type();
1061     FinalizeIncrementalMarking(
1062         GarbageCollectionReason::kFinalizeMarkingViaStackGuard);
1063   }
1064 }
1065 
1066 
ScheduleIdleScavengeIfNeeded(int bytes_allocated)1067 void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
1068   scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
1069 }
1070 
FinalizeIncrementalMarking(GarbageCollectionReason gc_reason)1071 void Heap::FinalizeIncrementalMarking(GarbageCollectionReason gc_reason) {
1072   if (FLAG_trace_incremental_marking) {
1073     isolate()->PrintWithTimestamp(
1074         "[IncrementalMarking] (%s).\n",
1075         Heap::GarbageCollectionReasonToString(gc_reason));
1076   }
1077 
1078   HistogramTimerScope incremental_marking_scope(
1079       isolate()->counters()->gc_incremental_marking_finalize());
1080   TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
1081   TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
1082 
1083   {
1084     GCCallbacksScope scope(this);
1085     if (scope.CheckReenter()) {
1086       AllowHeapAllocation allow_allocation;
1087       TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
1088       VMState<EXTERNAL> state(isolate_);
1089       HandleScope handle_scope(isolate_);
1090       CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
1091     }
1092   }
1093   incremental_marking()->FinalizeIncrementally();
1094   {
1095     GCCallbacksScope scope(this);
1096     if (scope.CheckReenter()) {
1097       AllowHeapAllocation allow_allocation;
1098       TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
1099       VMState<EXTERNAL> state(isolate_);
1100       HandleScope handle_scope(isolate_);
1101       CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
1102     }
1103   }
1104 }
1105 
GCTypePriorityTimer(GarbageCollector collector)1106 HistogramTimer* Heap::GCTypePriorityTimer(GarbageCollector collector) {
1107   if (IsYoungGenerationCollector(collector)) {
1108     if (isolate_->IsIsolateInBackground()) {
1109       return isolate_->counters()->gc_scavenger_background();
1110     }
1111     return isolate_->counters()->gc_scavenger_foreground();
1112   } else {
1113     if (!incremental_marking()->IsStopped()) {
1114       if (ShouldReduceMemory()) {
1115         if (isolate_->IsIsolateInBackground()) {
1116           return isolate_->counters()->gc_finalize_reduce_memory_background();
1117         }
1118         return isolate_->counters()->gc_finalize_reduce_memory_foreground();
1119       } else {
1120         if (isolate_->IsIsolateInBackground()) {
1121           return isolate_->counters()->gc_finalize_background();
1122         }
1123         return isolate_->counters()->gc_finalize_foreground();
1124       }
1125     } else {
1126       if (isolate_->IsIsolateInBackground()) {
1127         return isolate_->counters()->gc_compactor_background();
1128       }
1129       return isolate_->counters()->gc_compactor_foreground();
1130     }
1131   }
1132 }
1133 
GCTypeTimer(GarbageCollector collector)1134 HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) {
1135   if (IsYoungGenerationCollector(collector)) {
1136     return isolate_->counters()->gc_scavenger();
1137   } else {
1138     if (!incremental_marking()->IsStopped()) {
1139       if (ShouldReduceMemory()) {
1140         return isolate_->counters()->gc_finalize_reduce_memory();
1141       } else {
1142         return isolate_->counters()->gc_finalize();
1143       }
1144     } else {
1145       return isolate_->counters()->gc_compactor();
1146     }
1147   }
1148 }
1149 
CollectAllGarbage(int flags,GarbageCollectionReason gc_reason,const v8::GCCallbackFlags gc_callback_flags)1150 void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
1151                              const v8::GCCallbackFlags gc_callback_flags) {
1152   // Since we are ignoring the return value, the exact choice of space does
1153   // not matter, so long as we do not specify NEW_SPACE, which would not
1154   // cause a full GC.
1155   set_current_gc_flags(flags);
1156   CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
1157   set_current_gc_flags(kNoGCFlags);
1158 }
1159 
1160 namespace {
1161 
CompareWords(int size,HeapObject * a,HeapObject * b)1162 intptr_t CompareWords(int size, HeapObject* a, HeapObject* b) {
1163   int words = size / kPointerSize;
1164   DCHECK_EQ(a->Size(), size);
1165   DCHECK_EQ(b->Size(), size);
1166   intptr_t* slot_a = reinterpret_cast<intptr_t*>(a->address());
1167   intptr_t* slot_b = reinterpret_cast<intptr_t*>(b->address());
1168   for (int i = 0; i < words; i++) {
1169     if (*slot_a != *slot_b) {
1170       return *slot_a - *slot_b;
1171     }
1172     slot_a++;
1173     slot_b++;
1174   }
1175   return 0;
1176 }
1177 
ReportDuplicates(int size,std::vector<HeapObject * > & objects)1178 void ReportDuplicates(int size, std::vector<HeapObject*>& objects) {
1179   if (objects.size() == 0) return;
1180 
1181   sort(objects.begin(), objects.end(), [size](HeapObject* a, HeapObject* b) {
1182     intptr_t c = CompareWords(size, a, b);
1183     if (c != 0) return c < 0;
1184     return a < b;
1185   });
1186 
1187   std::vector<std::pair<int, HeapObject*>> duplicates;
1188   HeapObject* current = objects[0];
1189   int count = 1;
1190   for (size_t i = 1; i < objects.size(); i++) {
1191     if (CompareWords(size, current, objects[i]) == 0) {
1192       count++;
1193     } else {
1194       if (count > 1) {
1195         duplicates.push_back(std::make_pair(count - 1, current));
1196       }
1197       count = 1;
1198       current = objects[i];
1199     }
1200   }
1201   if (count > 1) {
1202     duplicates.push_back(std::make_pair(count - 1, current));
1203   }
1204 
1205   int threshold = FLAG_trace_duplicate_threshold_kb * KB;
1206 
1207   sort(duplicates.begin(), duplicates.end());
1208   for (auto it = duplicates.rbegin(); it != duplicates.rend(); ++it) {
1209     int duplicate_bytes = it->first * size;
1210     if (duplicate_bytes < threshold) break;
1211     PrintF("%d duplicates of size %d each (%dKB)\n", it->first, size,
1212            duplicate_bytes / KB);
1213     PrintF("Sample object: ");
1214     it->second->Print();
1215     PrintF("============================\n");
1216   }
1217 }
1218 }  // anonymous namespace
1219 
CollectAllAvailableGarbage(GarbageCollectionReason gc_reason)1220 void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
1221   // Since we are ignoring the return value, the exact choice of space does
1222   // not matter, so long as we do not specify NEW_SPACE, which would not
1223   // cause a full GC.
1224   // Major GC would invoke weak handle callbacks on weakly reachable
1225   // handles, but won't collect weakly reachable objects until next
1226   // major GC.  Therefore if we collect aggressively and weak handle callback
1227   // has been invoked, we rerun major GC to release objects which become
1228   // garbage.
1229   // Note: as weak callbacks can execute arbitrary code, we cannot
1230   // hope that eventually there will be no weak callbacks invocations.
1231   // Therefore stop recollecting after several attempts.
1232   if (gc_reason == GarbageCollectionReason::kLastResort) {
1233     InvokeNearHeapLimitCallback();
1234   }
1235   RuntimeCallTimerScope runtime_timer(
1236       isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
1237 
1238   // The optimizing compiler may be unnecessarily holding on to memory.
1239   isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
1240   isolate()->ClearSerializerData();
1241   set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
1242   isolate_->compilation_cache()->Clear();
1243   const int kMaxNumberOfAttempts = 7;
1244   const int kMinNumberOfAttempts = 2;
1245   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
1246     if (!CollectGarbage(OLD_SPACE, gc_reason,
1247                         v8::kGCCallbackFlagCollectAllAvailableGarbage) &&
1248         attempt + 1 >= kMinNumberOfAttempts) {
1249       break;
1250     }
1251   }
1252 
1253   set_current_gc_flags(kNoGCFlags);
1254   new_space_->Shrink();
1255   UncommitFromSpace();
1256   memory_allocator()->unmapper()->EnsureUnmappingCompleted();
1257 
1258   if (FLAG_trace_duplicate_threshold_kb) {
1259     std::map<int, std::vector<HeapObject*>> objects_by_size;
1260     PagedSpaces spaces(this);
1261     for (PagedSpace* space = spaces.next(); space != nullptr;
1262          space = spaces.next()) {
1263       HeapObjectIterator it(space);
1264       for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
1265         objects_by_size[obj->Size()].push_back(obj);
1266       }
1267     }
1268     {
1269       LargeObjectIterator it(lo_space());
1270       for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
1271         objects_by_size[obj->Size()].push_back(obj);
1272       }
1273     }
1274     for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
1275          ++it) {
1276       ReportDuplicates(it->first, it->second);
1277     }
1278   }
1279 }
1280 
ReportExternalMemoryPressure()1281 void Heap::ReportExternalMemoryPressure() {
1282   const GCCallbackFlags kGCCallbackFlagsForExternalMemory =
1283       static_cast<GCCallbackFlags>(
1284           kGCCallbackFlagSynchronousPhantomCallbackProcessing |
1285           kGCCallbackFlagCollectAllExternalMemory);
1286   if (external_memory_ >
1287       (external_memory_at_last_mark_compact_ + external_memory_hard_limit())) {
1288     CollectAllGarbage(
1289         kReduceMemoryFootprintMask | kFinalizeIncrementalMarkingMask,
1290         GarbageCollectionReason::kExternalMemoryPressure,
1291         static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage |
1292                                      kGCCallbackFlagsForExternalMemory));
1293     return;
1294   }
1295   if (incremental_marking()->IsStopped()) {
1296     if (incremental_marking()->CanBeActivated()) {
1297       StartIncrementalMarking(GCFlagsForIncrementalMarking(),
1298                               GarbageCollectionReason::kExternalMemoryPressure,
1299                               kGCCallbackFlagsForExternalMemory);
1300     } else {
1301       CollectAllGarbage(i::Heap::kNoGCFlags,
1302                         GarbageCollectionReason::kExternalMemoryPressure,
1303                         kGCCallbackFlagsForExternalMemory);
1304     }
1305   } else {
1306     // Incremental marking is turned on an has already been started.
1307     const double kMinStepSize = 5;
1308     const double kMaxStepSize = 10;
1309     const double ms_step =
1310         Min(kMaxStepSize,
1311             Max(kMinStepSize, static_cast<double>(external_memory_) /
1312                                   external_memory_limit_ * kMinStepSize));
1313     const double deadline = MonotonicallyIncreasingTimeInMs() + ms_step;
1314     // Extend the gc callback flags with external memory flags.
1315     current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
1316         current_gc_callback_flags_ | kGCCallbackFlagsForExternalMemory);
1317     incremental_marking()->AdvanceIncrementalMarking(
1318         deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8);
1319   }
1320 }
1321 
EnsureFillerObjectAtTop()1322 void Heap::EnsureFillerObjectAtTop() {
1323   // There may be an allocation memento behind objects in new space. Upon
1324   // evacuation of a non-full new space (or if we are on the last page) there
1325   // may be uninitialized memory behind top. We fill the remainder of the page
1326   // with a filler.
1327   Address to_top = new_space_->top();
1328   Page* page = Page::FromAddress(to_top - kPointerSize);
1329   if (page->Contains(to_top)) {
1330     int remaining_in_page = static_cast<int>(page->area_end() - to_top);
1331     CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
1332   }
1333 }
1334 
CollectGarbage(AllocationSpace space,GarbageCollectionReason gc_reason,const v8::GCCallbackFlags gc_callback_flags)1335 bool Heap::CollectGarbage(AllocationSpace space,
1336                           GarbageCollectionReason gc_reason,
1337                           const v8::GCCallbackFlags gc_callback_flags) {
1338   const char* collector_reason = nullptr;
1339   GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
1340 
1341   if (!CanExpandOldGeneration(new_space()->Capacity())) {
1342     InvokeNearHeapLimitCallback();
1343   }
1344 
1345   // The VM is in the GC state until exiting this function.
1346   VMState<GC> state(isolate());
1347 
1348 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
1349   // Reset the allocation timeout, but make sure to allow at least a few
1350   // allocations after a collection. The reason for this is that we have a lot
1351   // of allocation sequences and we assume that a garbage collection will allow
1352   // the subsequent allocation attempts to go through.
1353   if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
1354     allocation_timeout_ = Max(6, NextAllocationTimeout(allocation_timeout_));
1355   }
1356 #endif
1357 
1358   EnsureFillerObjectAtTop();
1359 
1360   if (IsYoungGenerationCollector(collector) &&
1361       !incremental_marking()->IsStopped()) {
1362     if (FLAG_trace_incremental_marking) {
1363       isolate()->PrintWithTimestamp(
1364           "[IncrementalMarking] Scavenge during marking.\n");
1365     }
1366   }
1367 
1368   bool next_gc_likely_to_collect_more = false;
1369   size_t committed_memory_before = 0;
1370 
1371   if (collector == MARK_COMPACTOR) {
1372     committed_memory_before = CommittedOldGenerationMemory();
1373   }
1374 
1375   {
1376     tracer()->Start(collector, gc_reason, collector_reason);
1377     DCHECK(AllowHeapAllocation::IsAllowed());
1378     DisallowHeapAllocation no_allocation_during_gc;
1379     GarbageCollectionPrologue();
1380 
1381     {
1382       HistogramTimer* gc_type_timer = GCTypeTimer(collector);
1383       HistogramTimerScope histogram_timer_scope(gc_type_timer);
1384       TRACE_EVENT0("v8", gc_type_timer->name());
1385 
1386       HistogramTimer* gc_type_priority_timer = GCTypePriorityTimer(collector);
1387       HistogramTimerScope histogram_timer_priority_scope(
1388           gc_type_priority_timer);
1389 
1390       next_gc_likely_to_collect_more =
1391           PerformGarbageCollection(collector, gc_callback_flags);
1392     }
1393 
1394     GarbageCollectionEpilogue();
1395     if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
1396       isolate()->CheckDetachedContextsAfterGC();
1397     }
1398 
1399     if (collector == MARK_COMPACTOR) {
1400       size_t committed_memory_after = CommittedOldGenerationMemory();
1401       size_t used_memory_after = OldGenerationSizeOfObjects();
1402       MemoryReducer::Event event;
1403       event.type = MemoryReducer::kMarkCompact;
1404       event.time_ms = MonotonicallyIncreasingTimeInMs();
1405       // Trigger one more GC if
1406       // - this GC decreased committed memory,
1407       // - there is high fragmentation,
1408       // - there are live detached contexts.
1409       event.next_gc_likely_to_collect_more =
1410           (committed_memory_before > committed_memory_after + MB) ||
1411           HasHighFragmentation(used_memory_after, committed_memory_after) ||
1412           (detached_contexts()->length() > 0);
1413       event.committed_memory = committed_memory_after;
1414       if (deserialization_complete_) {
1415         memory_reducer_->NotifyMarkCompact(event);
1416       }
1417       memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
1418     }
1419 
1420     tracer()->Stop(collector);
1421   }
1422 
1423   if (collector == MARK_COMPACTOR &&
1424       (gc_callback_flags & (kGCCallbackFlagForced |
1425                             kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
1426     isolate()->CountUsage(v8::Isolate::kForcedGC);
1427   }
1428 
1429   // Start incremental marking for the next cycle. The heap snapshot
1430   // generator needs incremental marking to stay off after it aborted.
1431   // We do this only for scavenger to avoid a loop where mark-compact
1432   // causes another mark-compact.
1433   if (IsYoungGenerationCollector(collector) &&
1434       !ShouldAbortIncrementalMarking()) {
1435     StartIncrementalMarkingIfAllocationLimitIsReached(
1436         GCFlagsForIncrementalMarking(),
1437         kGCCallbackScheduleIdleGarbageCollection);
1438   }
1439 
1440   return next_gc_likely_to_collect_more;
1441 }
1442 
1443 
NotifyContextDisposed(bool dependant_context)1444 int Heap::NotifyContextDisposed(bool dependant_context) {
1445   if (!dependant_context) {
1446     tracer()->ResetSurvivalEvents();
1447     old_generation_size_configured_ = false;
1448     MemoryReducer::Event event;
1449     event.type = MemoryReducer::kPossibleGarbage;
1450     event.time_ms = MonotonicallyIncreasingTimeInMs();
1451     memory_reducer_->NotifyPossibleGarbage(event);
1452   }
1453   isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
1454 
1455   number_of_disposed_maps_ = retained_maps()->length();
1456   tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
1457   return ++contexts_disposed_;
1458 }
1459 
StartIncrementalMarking(int gc_flags,GarbageCollectionReason gc_reason,GCCallbackFlags gc_callback_flags)1460 void Heap::StartIncrementalMarking(int gc_flags,
1461                                    GarbageCollectionReason gc_reason,
1462                                    GCCallbackFlags gc_callback_flags) {
1463   DCHECK(incremental_marking()->IsStopped());
1464   set_current_gc_flags(gc_flags);
1465   current_gc_callback_flags_ = gc_callback_flags;
1466   incremental_marking()->Start(gc_reason);
1467 }
1468 
StartIncrementalMarkingIfAllocationLimitIsReached(int gc_flags,const GCCallbackFlags gc_callback_flags)1469 void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
1470     int gc_flags, const GCCallbackFlags gc_callback_flags) {
1471   if (incremental_marking()->IsStopped()) {
1472     IncrementalMarkingLimit reached_limit = IncrementalMarkingLimitReached();
1473     if (reached_limit == IncrementalMarkingLimit::kSoftLimit) {
1474       incremental_marking()->incremental_marking_job()->ScheduleTask(this);
1475     } else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
1476       StartIncrementalMarking(gc_flags,
1477                               GarbageCollectionReason::kAllocationLimit,
1478                               gc_callback_flags);
1479     }
1480   }
1481 }
1482 
StartIdleIncrementalMarking(GarbageCollectionReason gc_reason,const GCCallbackFlags gc_callback_flags)1483 void Heap::StartIdleIncrementalMarking(
1484     GarbageCollectionReason gc_reason,
1485     const GCCallbackFlags gc_callback_flags) {
1486   gc_idle_time_handler_->ResetNoProgressCounter();
1487   StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason,
1488                           gc_callback_flags);
1489 }
1490 
MoveElements(FixedArray * array,int dst_index,int src_index,int len,WriteBarrierMode mode)1491 void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
1492                         int len, WriteBarrierMode mode) {
1493   if (len == 0) return;
1494 
1495   DCHECK(array->map() != fixed_cow_array_map());
1496   Object** dst = array->data_start() + dst_index;
1497   Object** src = array->data_start() + src_index;
1498   if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
1499     if (dst < src) {
1500       for (int i = 0; i < len; i++) {
1501         base::AsAtomicPointer::Relaxed_Store(
1502             dst + i, base::AsAtomicPointer::Relaxed_Load(src + i));
1503       }
1504     } else {
1505       for (int i = len - 1; i >= 0; i--) {
1506         base::AsAtomicPointer::Relaxed_Store(
1507             dst + i, base::AsAtomicPointer::Relaxed_Load(src + i));
1508       }
1509     }
1510   } else {
1511     MemMove(dst, src, len * kPointerSize);
1512   }
1513   if (mode == SKIP_WRITE_BARRIER) return;
1514   FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len);
1515 }
1516 
1517 
1518 #ifdef VERIFY_HEAP
1519 // Helper class for verifying the string table.
1520 class StringTableVerifier : public ObjectVisitor {
1521  public:
VisitPointers(HeapObject * host,Object ** start,Object ** end)1522   void VisitPointers(HeapObject* host, Object** start, Object** end) override {
1523     // Visit all HeapObject pointers in [start, end).
1524     for (Object** p = start; p < end; p++) {
1525       DCHECK(!HasWeakHeapObjectTag(*p));
1526       if ((*p)->IsHeapObject()) {
1527         HeapObject* object = HeapObject::cast(*p);
1528         Isolate* isolate = object->GetIsolate();
1529         // Check that the string is actually internalized.
1530         CHECK(object->IsTheHole(isolate) || object->IsUndefined(isolate) ||
1531               object->IsInternalizedString());
1532       }
1533     }
1534   }
VisitPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)1535   void VisitPointers(HeapObject* host, MaybeObject** start,
1536                      MaybeObject** end) override {
1537     UNREACHABLE();
1538   }
1539 };
1540 
1541 
VerifyStringTable(Heap * heap)1542 static void VerifyStringTable(Heap* heap) {
1543   StringTableVerifier verifier;
1544   heap->string_table()->IterateElements(&verifier);
1545 }
1546 #endif  // VERIFY_HEAP
1547 
ReserveSpace(Reservation * reservations,std::vector<Address> * maps)1548 bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
1549   bool gc_performed = true;
1550   int counter = 0;
1551   static const int kThreshold = 20;
1552   while (gc_performed && counter++ < kThreshold) {
1553     gc_performed = false;
1554     for (int space = FIRST_SPACE;
1555          space < SerializerDeserializer::kNumberOfSpaces; space++) {
1556       Reservation* reservation = &reservations[space];
1557       DCHECK_LE(1, reservation->size());
1558       if (reservation->at(0).size == 0) continue;
1559       bool perform_gc = false;
1560       if (space == MAP_SPACE) {
1561         // We allocate each map individually to avoid fragmentation.
1562         maps->clear();
1563         DCHECK_LE(reservation->size(), 2);
1564         int reserved_size = 0;
1565         for (const Chunk& c : *reservation) reserved_size += c.size;
1566         DCHECK_EQ(0, reserved_size % Map::kSize);
1567         int num_maps = reserved_size / Map::kSize;
1568         for (int i = 0; i < num_maps; i++) {
1569           // The deserializer will update the skip list.
1570           AllocationResult allocation = map_space()->AllocateRawUnaligned(
1571               Map::kSize, PagedSpace::IGNORE_SKIP_LIST);
1572           HeapObject* free_space = nullptr;
1573           if (allocation.To(&free_space)) {
1574             // Mark with a free list node, in case we have a GC before
1575             // deserializing.
1576             Address free_space_address = free_space->address();
1577             CreateFillerObjectAt(free_space_address, Map::kSize,
1578                                  ClearRecordedSlots::kNo);
1579             maps->push_back(free_space_address);
1580           } else {
1581             perform_gc = true;
1582             break;
1583           }
1584         }
1585       } else if (space == LO_SPACE) {
1586         // Just check that we can allocate during deserialization.
1587         DCHECK_LE(reservation->size(), 2);
1588         int reserved_size = 0;
1589         for (const Chunk& c : *reservation) reserved_size += c.size;
1590         perform_gc = !CanExpandOldGeneration(reserved_size);
1591       } else {
1592         for (auto& chunk : *reservation) {
1593           AllocationResult allocation;
1594           int size = chunk.size;
1595           DCHECK_LE(static_cast<size_t>(size),
1596                     MemoryAllocator::PageAreaSize(
1597                         static_cast<AllocationSpace>(space)));
1598           if (space == NEW_SPACE) {
1599             allocation = new_space()->AllocateRawUnaligned(size);
1600           } else {
1601             // The deserializer will update the skip list.
1602             allocation = paged_space(space)->AllocateRawUnaligned(
1603                 size, PagedSpace::IGNORE_SKIP_LIST);
1604           }
1605           HeapObject* free_space = nullptr;
1606           if (allocation.To(&free_space)) {
1607             // Mark with a free list node, in case we have a GC before
1608             // deserializing.
1609             Address free_space_address = free_space->address();
1610             CreateFillerObjectAt(free_space_address, size,
1611                                  ClearRecordedSlots::kNo);
1612             DCHECK_GT(SerializerDeserializer::kNumberOfPreallocatedSpaces,
1613                       space);
1614             chunk.start = free_space_address;
1615             chunk.end = free_space_address + size;
1616           } else {
1617             perform_gc = true;
1618             break;
1619           }
1620         }
1621       }
1622       if (perform_gc) {
1623         // We cannot perfom a GC with an uninitialized isolate. This check
1624         // fails for example if the max old space size is chosen unwisely,
1625         // so that we cannot allocate space to deserialize the initial heap.
1626         if (!deserialization_complete_) {
1627           V8::FatalProcessOutOfMemory(
1628               isolate(), "insufficient memory to create an Isolate");
1629         }
1630         if (space == NEW_SPACE) {
1631           CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer);
1632         } else {
1633           if (counter > 1) {
1634             CollectAllGarbage(
1635                 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
1636                 GarbageCollectionReason::kDeserializer);
1637           } else {
1638             CollectAllGarbage(kAbortIncrementalMarkingMask,
1639                               GarbageCollectionReason::kDeserializer);
1640           }
1641         }
1642         gc_performed = true;
1643         break;  // Abort for-loop over spaces and retry.
1644       }
1645     }
1646   }
1647 
1648   return !gc_performed;
1649 }
1650 
1651 
EnsureFromSpaceIsCommitted()1652 void Heap::EnsureFromSpaceIsCommitted() {
1653   if (new_space_->CommitFromSpaceIfNeeded()) return;
1654 
1655   // Committing memory to from space failed.
1656   // Memory is exhausted and we will die.
1657   FatalProcessOutOfMemory("Committing semi space failed.");
1658 }
1659 
1660 
UpdateSurvivalStatistics(int start_new_space_size)1661 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1662   if (start_new_space_size == 0) return;
1663 
1664   promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
1665                       static_cast<double>(start_new_space_size) * 100);
1666 
1667   if (previous_semi_space_copied_object_size_ > 0) {
1668     promotion_rate_ =
1669         (static_cast<double>(promoted_objects_size_) /
1670          static_cast<double>(previous_semi_space_copied_object_size_) * 100);
1671   } else {
1672     promotion_rate_ = 0;
1673   }
1674 
1675   semi_space_copied_rate_ =
1676       (static_cast<double>(semi_space_copied_object_size_) /
1677        static_cast<double>(start_new_space_size) * 100);
1678 
1679   double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
1680   tracer()->AddSurvivalRatio(survival_rate);
1681 }
1682 
PerformGarbageCollection(GarbageCollector collector,const v8::GCCallbackFlags gc_callback_flags)1683 bool Heap::PerformGarbageCollection(
1684     GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1685   int freed_global_handles = 0;
1686 
1687   if (!IsYoungGenerationCollector(collector)) {
1688     PROFILE(isolate_, CodeMovingGCEvent());
1689   }
1690 
1691 #ifdef VERIFY_HEAP
1692   if (FLAG_verify_heap) {
1693     VerifyStringTable(this);
1694   }
1695 #endif
1696 
1697   GCType gc_type =
1698       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1699 
1700   {
1701     GCCallbacksScope scope(this);
1702     if (scope.CheckReenter()) {
1703       AllowHeapAllocation allow_allocation;
1704       TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
1705       VMState<EXTERNAL> state(isolate_);
1706       HandleScope handle_scope(isolate_);
1707       CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1708     }
1709   }
1710 
1711   EnsureFromSpaceIsCommitted();
1712 
1713   size_t start_new_space_size = Heap::new_space()->Size();
1714 
1715   {
1716     Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_);
1717 
1718     switch (collector) {
1719       case MARK_COMPACTOR:
1720         UpdateOldGenerationAllocationCounter();
1721         // Perform mark-sweep with optional compaction.
1722         MarkCompact();
1723         old_generation_size_configured_ = true;
1724         // This should be updated before PostGarbageCollectionProcessing, which
1725         // can cause another GC. Take into account the objects promoted during
1726         // GC.
1727         old_generation_allocation_counter_at_last_gc_ +=
1728             static_cast<size_t>(promoted_objects_size_);
1729         old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects();
1730         break;
1731       case MINOR_MARK_COMPACTOR:
1732         MinorMarkCompact();
1733         break;
1734       case SCAVENGER:
1735         if ((fast_promotion_mode_ &&
1736              CanExpandOldGeneration(new_space()->Size()))) {
1737           tracer()->NotifyYoungGenerationHandling(
1738               YoungGenerationHandling::kFastPromotionDuringScavenge);
1739           EvacuateYoungGeneration();
1740         } else {
1741           tracer()->NotifyYoungGenerationHandling(
1742               YoungGenerationHandling::kRegularScavenge);
1743 
1744           Scavenge();
1745         }
1746         break;
1747     }
1748 
1749     ProcessPretenuringFeedback();
1750   }
1751 
1752   UpdateSurvivalStatistics(static_cast<int>(start_new_space_size));
1753   ConfigureInitialOldGenerationSize();
1754 
1755   if (collector != MARK_COMPACTOR) {
1756     // Objects that died in the new space might have been accounted
1757     // as bytes marked ahead of schedule by the incremental marker.
1758     incremental_marking()->UpdateMarkedBytesAfterScavenge(
1759         start_new_space_size - SurvivedNewSpaceObjectSize());
1760   }
1761 
1762   if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) {
1763     ComputeFastPromotionMode();
1764   }
1765 
1766   isolate_->counters()->objs_since_last_young()->Set(0);
1767 
1768   gc_post_processing_depth_++;
1769   {
1770     AllowHeapAllocation allow_allocation;
1771     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
1772     freed_global_handles =
1773         isolate_->global_handles()->PostGarbageCollectionProcessing(
1774             collector, gc_callback_flags);
1775   }
1776   gc_post_processing_depth_--;
1777 
1778   isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1779 
1780   // Update relocatables.
1781   Relocatable::PostGarbageCollectionProcessing(isolate_);
1782 
1783   double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
1784   double mutator_speed =
1785       tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
1786   size_t old_gen_size = OldGenerationSizeOfObjects();
1787   if (collector == MARK_COMPACTOR) {
1788     // Register the amount of external allocated memory.
1789     external_memory_at_last_mark_compact_ = external_memory_;
1790     external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
1791     SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1792     CheckIneffectiveMarkCompact(
1793         old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
1794   } else if (HasLowYoungGenerationAllocationRate() &&
1795              old_generation_size_configured_) {
1796     DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1797   }
1798 
1799   {
1800     GCCallbacksScope scope(this);
1801     if (scope.CheckReenter()) {
1802       AllowHeapAllocation allow_allocation;
1803       TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
1804       VMState<EXTERNAL> state(isolate_);
1805       HandleScope handle_scope(isolate_);
1806       CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1807     }
1808   }
1809 
1810 #ifdef VERIFY_HEAP
1811   if (FLAG_verify_heap) {
1812     VerifyStringTable(this);
1813   }
1814 #endif
1815 
1816   return freed_global_handles > 0;
1817 }
1818 
1819 
CallGCPrologueCallbacks(GCType gc_type,GCCallbackFlags flags)1820 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1821   RuntimeCallTimerScope runtime_timer(
1822       isolate(), RuntimeCallCounterId::kGCPrologueCallback);
1823   for (const GCCallbackTuple& info : gc_prologue_callbacks_) {
1824     if (gc_type & info.gc_type) {
1825       v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1826       info.callback(isolate, gc_type, flags, info.data);
1827     }
1828   }
1829 }
1830 
CallGCEpilogueCallbacks(GCType gc_type,GCCallbackFlags flags)1831 void Heap::CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1832   RuntimeCallTimerScope runtime_timer(
1833       isolate(), RuntimeCallCounterId::kGCEpilogueCallback);
1834   for (const GCCallbackTuple& info : gc_epilogue_callbacks_) {
1835     if (gc_type & info.gc_type) {
1836       v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1837       info.callback(isolate, gc_type, flags, info.data);
1838     }
1839   }
1840 }
1841 
1842 
MarkCompact()1843 void Heap::MarkCompact() {
1844   PauseAllocationObserversScope pause_observers(this);
1845 
1846   SetGCState(MARK_COMPACT);
1847 
1848   LOG(isolate_, ResourceEvent("markcompact", "begin"));
1849 
1850   uint64_t size_of_objects_before_gc = SizeOfObjects();
1851 
1852   CodeSpaceMemoryModificationScope code_modifcation(this);
1853 
1854   mark_compact_collector()->Prepare();
1855 
1856   ms_count_++;
1857 
1858   MarkCompactPrologue();
1859 
1860   mark_compact_collector()->CollectGarbage();
1861 
1862   LOG(isolate_, ResourceEvent("markcompact", "end"));
1863 
1864   MarkCompactEpilogue();
1865 
1866   if (FLAG_allocation_site_pretenuring) {
1867     EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1868   }
1869 }
1870 
MinorMarkCompact()1871 void Heap::MinorMarkCompact() {
1872 #ifdef ENABLE_MINOR_MC
1873   DCHECK(FLAG_minor_mc);
1874 
1875   PauseAllocationObserversScope pause_observers(this);
1876   SetGCState(MINOR_MARK_COMPACT);
1877   LOG(isolate_, ResourceEvent("MinorMarkCompact", "begin"));
1878 
1879   TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC);
1880   AlwaysAllocateScope always_allocate(isolate());
1881   IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
1882       incremental_marking());
1883   ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
1884 
1885   minor_mark_compact_collector()->CollectGarbage();
1886 
1887   LOG(isolate_, ResourceEvent("MinorMarkCompact", "end"));
1888   SetGCState(NOT_IN_GC);
1889 #else
1890   UNREACHABLE();
1891 #endif  // ENABLE_MINOR_MC
1892 }
1893 
MarkCompactEpilogue()1894 void Heap::MarkCompactEpilogue() {
1895   TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
1896   SetGCState(NOT_IN_GC);
1897 
1898   isolate_->counters()->objs_since_last_full()->Set(0);
1899 
1900   incremental_marking()->Epilogue();
1901 
1902   PreprocessStackTraces();
1903   DCHECK(incremental_marking()->IsStopped());
1904 }
1905 
1906 
MarkCompactPrologue()1907 void Heap::MarkCompactPrologue() {
1908   TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE);
1909   isolate_->context_slot_cache()->Clear();
1910   isolate_->descriptor_lookup_cache()->Clear();
1911   RegExpResultsCache::Clear(string_split_cache());
1912   RegExpResultsCache::Clear(regexp_multiple_cache());
1913 
1914   isolate_->compilation_cache()->MarkCompactPrologue();
1915 
1916   FlushNumberStringCache();
1917 }
1918 
1919 
CheckNewSpaceExpansionCriteria()1920 void Heap::CheckNewSpaceExpansionCriteria() {
1921   if (FLAG_experimental_new_space_growth_heuristic) {
1922     if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
1923         survived_last_scavenge_ * 100 / new_space_->TotalCapacity() >= 10) {
1924       // Grow the size of new space if there is room to grow, and more than 10%
1925       // have survived the last scavenge.
1926       new_space_->Grow();
1927       survived_since_last_expansion_ = 0;
1928     }
1929   } else if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
1930              survived_since_last_expansion_ > new_space_->TotalCapacity()) {
1931     // Grow the size of new space if there is room to grow, and enough data
1932     // has survived scavenge since the last expansion.
1933     new_space_->Grow();
1934     survived_since_last_expansion_ = 0;
1935   }
1936 }
1937 
IsUnscavengedHeapObject(Heap * heap,Object ** p)1938 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1939   return heap->InFromSpace(*p) &&
1940          !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1941 }
1942 
1943 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1944  public:
ScavengeWeakObjectRetainer(Heap * heap)1945   explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
1946 
RetainAs(Object * object)1947   virtual Object* RetainAs(Object* object) {
1948     if (!heap_->InFromSpace(object)) {
1949       return object;
1950     }
1951 
1952     MapWord map_word = HeapObject::cast(object)->map_word();
1953     if (map_word.IsForwardingAddress()) {
1954       return map_word.ToForwardingAddress();
1955     }
1956     return nullptr;
1957   }
1958 
1959  private:
1960   Heap* heap_;
1961 };
1962 
EvacuateYoungGeneration()1963 void Heap::EvacuateYoungGeneration() {
1964   TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE);
1965   base::LockGuard<base::Mutex> guard(relocation_mutex());
1966   ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
1967   if (!FLAG_concurrent_marking) {
1968     DCHECK(fast_promotion_mode_);
1969     DCHECK(CanExpandOldGeneration(new_space()->Size()));
1970   }
1971 
1972   mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
1973 
1974   SetGCState(SCAVENGE);
1975   LOG(isolate_, ResourceEvent("scavenge", "begin"));
1976 
1977   // Move pages from new->old generation.
1978   PageRange range(new_space()->bottom(), new_space()->top());
1979   for (auto it = range.begin(); it != range.end();) {
1980     Page* p = (*++it)->prev_page();
1981     p->Unlink();
1982     Page::ConvertNewToOld(p);
1983     if (incremental_marking()->IsMarking())
1984       mark_compact_collector()->RecordLiveSlotsOnPage(p);
1985   }
1986 
1987   // Reset new space.
1988   if (!new_space()->Rebalance()) {
1989     FatalProcessOutOfMemory("NewSpace::Rebalance");
1990   }
1991   new_space()->ResetLinearAllocationArea();
1992   new_space()->set_age_mark(new_space()->top());
1993 
1994   // Fix up special trackers.
1995   external_string_table_.PromoteAllNewSpaceStrings();
1996   // GlobalHandles are updated in PostGarbageCollectonProcessing
1997 
1998   IncrementYoungSurvivorsCounter(new_space()->Size());
1999   IncrementPromotedObjectsSize(new_space()->Size());
2000   IncrementSemiSpaceCopiedObjectSize(0);
2001 
2002   LOG(isolate_, ResourceEvent("scavenge", "end"));
2003   SetGCState(NOT_IN_GC);
2004 }
2005 
IsLogging(Isolate * isolate)2006 static bool IsLogging(Isolate* isolate) {
2007   return FLAG_verify_predictable || isolate->logger()->is_logging() ||
2008          isolate->is_profiling() ||
2009          (isolate->heap_profiler() != nullptr &&
2010           isolate->heap_profiler()->is_tracking_object_moves()) ||
2011          isolate->heap()->has_heap_object_allocation_tracker();
2012 }
2013 
2014 class PageScavengingItem final : public ItemParallelJob::Item {
2015  public:
PageScavengingItem(MemoryChunk * chunk)2016   explicit PageScavengingItem(MemoryChunk* chunk) : chunk_(chunk) {}
~PageScavengingItem()2017   virtual ~PageScavengingItem() {}
2018 
Process(Scavenger * scavenger)2019   void Process(Scavenger* scavenger) { scavenger->ScavengePage(chunk_); }
2020 
2021  private:
2022   MemoryChunk* const chunk_;
2023 };
2024 
2025 class ScavengingTask final : public ItemParallelJob::Task {
2026  public:
ScavengingTask(Heap * heap,Scavenger * scavenger,OneshotBarrier * barrier)2027   ScavengingTask(Heap* heap, Scavenger* scavenger, OneshotBarrier* barrier)
2028       : ItemParallelJob::Task(heap->isolate()),
2029         heap_(heap),
2030         scavenger_(scavenger),
2031         barrier_(barrier) {}
2032 
RunInParallel()2033   void RunInParallel() final {
2034     TRACE_BACKGROUND_GC(
2035         heap_->tracer(),
2036         GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
2037     double scavenging_time = 0.0;
2038     {
2039       barrier_->Start();
2040       TimedScope scope(&scavenging_time);
2041       PageScavengingItem* item = nullptr;
2042       while ((item = GetItem<PageScavengingItem>()) != nullptr) {
2043         item->Process(scavenger_);
2044         item->MarkFinished();
2045       }
2046       do {
2047         scavenger_->Process(barrier_);
2048       } while (!barrier_->Wait());
2049       scavenger_->Process();
2050     }
2051     if (FLAG_trace_parallel_scavenge) {
2052       PrintIsolate(heap_->isolate(),
2053                    "scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
2054                    static_cast<void*>(this), scavenging_time,
2055                    scavenger_->bytes_copied(), scavenger_->bytes_promoted());
2056     }
2057   };
2058 
2059  private:
2060   Heap* const heap_;
2061   Scavenger* const scavenger_;
2062   OneshotBarrier* const barrier_;
2063 };
2064 
NumberOfScavengeTasks()2065 int Heap::NumberOfScavengeTasks() {
2066   if (!FLAG_parallel_scavenge) return 1;
2067   const int num_scavenge_tasks =
2068       static_cast<int>(new_space()->TotalCapacity()) / MB;
2069   static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
2070   int tasks =
2071       Max(1, Min(Min(num_scavenge_tasks, kMaxScavengerTasks), num_cores));
2072   if (!CanExpandOldGeneration(static_cast<size_t>(tasks * Page::kPageSize))) {
2073     // Optimize for memory usage near the heap limit.
2074     tasks = 1;
2075   }
2076   return tasks;
2077 }
2078 
Scavenge()2079 void Heap::Scavenge() {
2080   TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
2081   base::LockGuard<base::Mutex> guard(relocation_mutex());
2082   ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
2083   // There are soft limits in the allocation code, designed to trigger a mark
2084   // sweep collection by failing allocations. There is no sense in trying to
2085   // trigger one during scavenge: scavenges allocation should always succeed.
2086   AlwaysAllocateScope scope(isolate());
2087 
2088   // Bump-pointer allocations done during scavenge are not real allocations.
2089   // Pause the inline allocation steps.
2090   PauseAllocationObserversScope pause_observers(this);
2091 
2092   IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
2093       incremental_marking());
2094 
2095 
2096   mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
2097 
2098   SetGCState(SCAVENGE);
2099 
2100   // Implements Cheney's copying algorithm
2101   LOG(isolate_, ResourceEvent("scavenge", "begin"));
2102 
2103   // Flip the semispaces.  After flipping, to space is empty, from space has
2104   // live objects.
2105   new_space_->Flip();
2106   new_space_->ResetLinearAllocationArea();
2107 
2108   ItemParallelJob job(isolate()->cancelable_task_manager(),
2109                       &parallel_scavenge_semaphore_);
2110   const int kMainThreadId = 0;
2111   Scavenger* scavengers[kMaxScavengerTasks];
2112   const bool is_logging = IsLogging(isolate());
2113   const int num_scavenge_tasks = NumberOfScavengeTasks();
2114   OneshotBarrier barrier;
2115   Scavenger::CopiedList copied_list(num_scavenge_tasks);
2116   Scavenger::PromotionList promotion_list(num_scavenge_tasks);
2117   for (int i = 0; i < num_scavenge_tasks; i++) {
2118     scavengers[i] =
2119         new Scavenger(this, is_logging, &copied_list, &promotion_list, i);
2120     job.AddTask(new ScavengingTask(this, scavengers[i], &barrier));
2121   }
2122 
2123   {
2124     Sweeper* sweeper = mark_compact_collector()->sweeper();
2125     // Pause the concurrent sweeper.
2126     Sweeper::PauseOrCompleteScope pause_scope(sweeper);
2127     // Filter out pages from the sweeper that need to be processed for old to
2128     // new slots by the Scavenger. After processing, the Scavenger adds back
2129     // pages that are still unsweeped. This way the Scavenger has exclusive
2130     // access to the slots of a page and can completely avoid any locks on
2131     // the page itself.
2132     Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope);
2133     filter_scope.FilterOldSpaceSweepingPages(
2134         [](Page* page) { return !page->ContainsSlots<OLD_TO_NEW>(); });
2135     RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
2136         this, [&job](MemoryChunk* chunk) {
2137           job.AddItem(new PageScavengingItem(chunk));
2138         });
2139 
2140     RootScavengeVisitor root_scavenge_visitor(this, scavengers[kMainThreadId]);
2141 
2142     {
2143       // Identify weak unmodified handles. Requires an unmodified graph.
2144       TRACE_GC(
2145           tracer(),
2146           GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY);
2147       isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
2148           &JSObject::IsUnmodifiedApiObject);
2149     }
2150     {
2151       // Copy roots.
2152       TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS);
2153       IterateRoots(&root_scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
2154     }
2155     {
2156       // Weak collections are held strongly by the Scavenger.
2157       TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK);
2158       IterateEncounteredWeakCollections(&root_scavenge_visitor);
2159     }
2160     {
2161       // Parallel phase scavenging all copied and promoted objects.
2162       TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
2163       job.Run(isolate()->async_counters());
2164       DCHECK(copied_list.IsGlobalEmpty());
2165       DCHECK(promotion_list.IsGlobalEmpty());
2166     }
2167     {
2168       // Scavenge weak global handles.
2169       TRACE_GC(tracer(),
2170                GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS);
2171       isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
2172           &IsUnscavengedHeapObject);
2173       isolate()
2174           ->global_handles()
2175           ->IterateNewSpaceWeakUnmodifiedRootsForFinalizers(
2176               &root_scavenge_visitor);
2177       scavengers[kMainThreadId]->Process();
2178 
2179       DCHECK(copied_list.IsGlobalEmpty());
2180       DCHECK(promotion_list.IsGlobalEmpty());
2181       isolate()
2182           ->global_handles()
2183           ->IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
2184               &root_scavenge_visitor, &IsUnscavengedHeapObject);
2185     }
2186 
2187     for (int i = 0; i < num_scavenge_tasks; i++) {
2188       scavengers[i]->Finalize();
2189       delete scavengers[i];
2190     }
2191   }
2192 
2193   UpdateNewSpaceReferencesInExternalStringTable(
2194       &UpdateNewSpaceReferenceInExternalStringTableEntry);
2195 
2196   incremental_marking()->UpdateMarkingWorklistAfterScavenge();
2197 
2198   if (FLAG_concurrent_marking) {
2199     // Ensure that concurrent marker does not track pages that are
2200     // going to be unmapped.
2201     for (Page* p : PageRange(new_space()->FromSpaceStart(),
2202                              new_space()->FromSpaceEnd())) {
2203       concurrent_marking()->ClearLiveness(p);
2204     }
2205   }
2206 
2207   ScavengeWeakObjectRetainer weak_object_retainer(this);
2208   ProcessYoungWeakReferences(&weak_object_retainer);
2209 
2210   // Set age mark.
2211   new_space_->set_age_mark(new_space_->top());
2212 
2213   {
2214     TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_PROCESS_ARRAY_BUFFERS);
2215     ArrayBufferTracker::PrepareToFreeDeadInNewSpace(this);
2216   }
2217   array_buffer_collector()->FreeAllocationsOnBackgroundThread();
2218 
2219   RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(this, [](MemoryChunk* chunk) {
2220     if (chunk->SweepingDone()) {
2221       RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
2222     } else {
2223       RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
2224     }
2225   });
2226 
2227   // Update how much has survived scavenge.
2228   IncrementYoungSurvivorsCounter(SurvivedNewSpaceObjectSize());
2229 
2230   // Scavenger may find new wrappers by iterating objects promoted onto a black
2231   // page.
2232   local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
2233 
2234   LOG(isolate_, ResourceEvent("scavenge", "end"));
2235 
2236   SetGCState(NOT_IN_GC);
2237 }
2238 
ComputeFastPromotionMode()2239 void Heap::ComputeFastPromotionMode() {
2240   const size_t survived_in_new_space =
2241       survived_last_scavenge_ * 100 / new_space_->Capacity();
2242   fast_promotion_mode_ =
2243       !FLAG_optimize_for_size && FLAG_fast_promotion_new_space &&
2244       !ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() &&
2245       survived_in_new_space >= kMinPromotedPercentForFastPromotionMode;
2246   if (FLAG_trace_gc_verbose) {
2247     PrintIsolate(
2248         isolate(), "Fast promotion mode: %s survival rate: %" PRIuS "%%\n",
2249         fast_promotion_mode_ ? "true" : "false", survived_in_new_space);
2250   }
2251 }
2252 
UnprotectAndRegisterMemoryChunk(MemoryChunk * chunk)2253 void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk) {
2254   if (unprotected_memory_chunks_registry_enabled_) {
2255     base::LockGuard<base::Mutex> guard(&unprotected_memory_chunks_mutex_);
2256     if (unprotected_memory_chunks_.insert(chunk).second) {
2257       chunk->SetReadAndWritable();
2258     }
2259   }
2260 }
2261 
UnprotectAndRegisterMemoryChunk(HeapObject * object)2262 void Heap::UnprotectAndRegisterMemoryChunk(HeapObject* object) {
2263   UnprotectAndRegisterMemoryChunk(MemoryChunk::FromAddress(object->address()));
2264 }
2265 
UnregisterUnprotectedMemoryChunk(MemoryChunk * chunk)2266 void Heap::UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk) {
2267   unprotected_memory_chunks_.erase(chunk);
2268 }
2269 
ProtectUnprotectedMemoryChunks()2270 void Heap::ProtectUnprotectedMemoryChunks() {
2271   DCHECK(unprotected_memory_chunks_registry_enabled_);
2272   for (auto chunk = unprotected_memory_chunks_.begin();
2273        chunk != unprotected_memory_chunks_.end(); chunk++) {
2274     CHECK(memory_allocator()->IsMemoryChunkExecutable(*chunk));
2275     (*chunk)->SetReadAndExecutable();
2276   }
2277   unprotected_memory_chunks_.clear();
2278 }
2279 
UpdateNewSpaceReferenceInExternalStringTableEntry(Heap * heap,Object ** p)2280 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
2281                                                                 Object** p) {
2282   MapWord first_word = HeapObject::cast(*p)->map_word();
2283 
2284   if (!first_word.IsForwardingAddress()) {
2285     // Unreachable external string can be finalized.
2286     String* string = String::cast(*p);
2287     if (!string->IsExternalString()) {
2288       // Original external string has been internalized.
2289       DCHECK(string->IsThinString());
2290       return nullptr;
2291     }
2292     heap->FinalizeExternalString(string);
2293     return nullptr;
2294   }
2295 
2296   // String is still reachable.
2297   String* string = String::cast(first_word.ToForwardingAddress());
2298   if (string->IsThinString()) string = ThinString::cast(string)->actual();
2299   // Internalization can replace external strings with non-external strings.
2300   return string->IsExternalString() ? string : nullptr;
2301 }
2302 
Verify()2303 void Heap::ExternalStringTable::Verify() {
2304 #ifdef DEBUG
2305   for (size_t i = 0; i < new_space_strings_.size(); ++i) {
2306     Object* obj = Object::cast(new_space_strings_[i]);
2307     DCHECK(heap_->InNewSpace(obj));
2308     DCHECK(!obj->IsTheHole(heap_->isolate()));
2309   }
2310   for (size_t i = 0; i < old_space_strings_.size(); ++i) {
2311     Object* obj = Object::cast(old_space_strings_[i]);
2312     DCHECK(!heap_->InNewSpace(obj));
2313     DCHECK(!obj->IsTheHole(heap_->isolate()));
2314   }
2315 #endif
2316 }
2317 
UpdateNewSpaceReferences(Heap::ExternalStringTableUpdaterCallback updater_func)2318 void Heap::ExternalStringTable::UpdateNewSpaceReferences(
2319     Heap::ExternalStringTableUpdaterCallback updater_func) {
2320   if (new_space_strings_.empty()) return;
2321 
2322   Object** start = new_space_strings_.data();
2323   Object** end = start + new_space_strings_.size();
2324   Object** last = start;
2325 
2326   for (Object** p = start; p < end; ++p) {
2327     String* target = updater_func(heap_, p);
2328 
2329     if (target == nullptr) continue;
2330 
2331     DCHECK(target->IsExternalString());
2332 
2333     if (heap_->InNewSpace(target)) {
2334       // String is still in new space. Update the table entry.
2335       *last = target;
2336       ++last;
2337     } else {
2338       // String got promoted. Move it to the old string list.
2339       old_space_strings_.push_back(target);
2340     }
2341   }
2342 
2343   DCHECK_LE(last, end);
2344   new_space_strings_.resize(static_cast<size_t>(last - start));
2345 #ifdef VERIFY_HEAP
2346   if (FLAG_verify_heap) {
2347     Verify();
2348   }
2349 #endif
2350 }
2351 
PromoteAllNewSpaceStrings()2352 void Heap::ExternalStringTable::PromoteAllNewSpaceStrings() {
2353   old_space_strings_.reserve(old_space_strings_.size() +
2354                              new_space_strings_.size());
2355   std::move(std::begin(new_space_strings_), std::end(new_space_strings_),
2356             std::back_inserter(old_space_strings_));
2357   new_space_strings_.clear();
2358 }
2359 
IterateNewSpaceStrings(RootVisitor * v)2360 void Heap::ExternalStringTable::IterateNewSpaceStrings(RootVisitor* v) {
2361   if (!new_space_strings_.empty()) {
2362     v->VisitRootPointers(Root::kExternalStringsTable, nullptr,
2363                          new_space_strings_.data(),
2364                          new_space_strings_.data() + new_space_strings_.size());
2365   }
2366 }
2367 
IterateAll(RootVisitor * v)2368 void Heap::ExternalStringTable::IterateAll(RootVisitor* v) {
2369   IterateNewSpaceStrings(v);
2370   if (!old_space_strings_.empty()) {
2371     v->VisitRootPointers(Root::kExternalStringsTable, nullptr,
2372                          old_space_strings_.data(),
2373                          old_space_strings_.data() + old_space_strings_.size());
2374   }
2375 }
2376 
UpdateNewSpaceReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)2377 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
2378     ExternalStringTableUpdaterCallback updater_func) {
2379   external_string_table_.UpdateNewSpaceReferences(updater_func);
2380 }
2381 
UpdateReferences(Heap::ExternalStringTableUpdaterCallback updater_func)2382 void Heap::ExternalStringTable::UpdateReferences(
2383     Heap::ExternalStringTableUpdaterCallback updater_func) {
2384   if (old_space_strings_.size() > 0) {
2385     Object** start = old_space_strings_.data();
2386     Object** end = start + old_space_strings_.size();
2387     for (Object** p = start; p < end; ++p) *p = updater_func(heap_, p);
2388   }
2389 
2390   UpdateNewSpaceReferences(updater_func);
2391 }
2392 
UpdateReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)2393 void Heap::UpdateReferencesInExternalStringTable(
2394     ExternalStringTableUpdaterCallback updater_func) {
2395   external_string_table_.UpdateReferences(updater_func);
2396 }
2397 
2398 
ProcessAllWeakReferences(WeakObjectRetainer * retainer)2399 void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
2400   ProcessNativeContexts(retainer);
2401   ProcessAllocationSites(retainer);
2402 }
2403 
2404 
ProcessYoungWeakReferences(WeakObjectRetainer * retainer)2405 void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
2406   ProcessNativeContexts(retainer);
2407 }
2408 
2409 
ProcessNativeContexts(WeakObjectRetainer * retainer)2410 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
2411   Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
2412   // Update the head of the list of contexts.
2413   set_native_contexts_list(head);
2414 }
2415 
2416 
ProcessAllocationSites(WeakObjectRetainer * retainer)2417 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
2418   Object* allocation_site_obj =
2419       VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
2420   set_allocation_sites_list(allocation_site_obj);
2421 }
2422 
ProcessWeakListRoots(WeakObjectRetainer * retainer)2423 void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
2424   set_native_contexts_list(retainer->RetainAs(native_contexts_list()));
2425   set_allocation_sites_list(retainer->RetainAs(allocation_sites_list()));
2426 }
2427 
ResetAllAllocationSitesDependentCode(PretenureFlag flag)2428 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
2429   DisallowHeapAllocation no_allocation_scope;
2430   Object* cur = allocation_sites_list();
2431   bool marked = false;
2432   while (cur->IsAllocationSite()) {
2433     AllocationSite* casted = AllocationSite::cast(cur);
2434     if (casted->GetPretenureMode() == flag) {
2435       casted->ResetPretenureDecision();
2436       casted->set_deopt_dependent_code(true);
2437       marked = true;
2438       RemoveAllocationSitePretenuringFeedback(casted);
2439     }
2440     cur = casted->weak_next();
2441   }
2442   if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
2443 }
2444 
2445 
EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc)2446 void Heap::EvaluateOldSpaceLocalPretenuring(
2447     uint64_t size_of_objects_before_gc) {
2448   uint64_t size_of_objects_after_gc = SizeOfObjects();
2449   double old_generation_survival_rate =
2450       (static_cast<double>(size_of_objects_after_gc) * 100) /
2451       static_cast<double>(size_of_objects_before_gc);
2452 
2453   if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
2454     // Too many objects died in the old generation, pretenuring of wrong
2455     // allocation sites may be the cause for that. We have to deopt all
2456     // dependent code registered in the allocation sites to re-evaluate
2457     // our pretenuring decisions.
2458     ResetAllAllocationSitesDependentCode(TENURED);
2459     if (FLAG_trace_pretenuring) {
2460       PrintF(
2461           "Deopt all allocation sites dependent code due to low survival "
2462           "rate in the old generation %f\n",
2463           old_generation_survival_rate);
2464     }
2465   }
2466 }
2467 
2468 
VisitExternalResources(v8::ExternalResourceVisitor * visitor)2469 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
2470   DisallowHeapAllocation no_allocation;
2471   // All external strings are listed in the external string table.
2472 
2473   class ExternalStringTableVisitorAdapter : public RootVisitor {
2474    public:
2475     explicit ExternalStringTableVisitorAdapter(
2476         v8::ExternalResourceVisitor* visitor)
2477         : visitor_(visitor) {}
2478     virtual void VisitRootPointers(Root root, const char* description,
2479                                    Object** start, Object** end) {
2480       for (Object** p = start; p < end; p++) {
2481         DCHECK((*p)->IsExternalString());
2482         visitor_->VisitExternalString(
2483             Utils::ToLocal(Handle<String>(String::cast(*p))));
2484       }
2485     }
2486 
2487    private:
2488     v8::ExternalResourceVisitor* visitor_;
2489   } external_string_table_visitor(visitor);
2490 
2491   external_string_table_.IterateAll(&external_string_table_visitor);
2492 }
2493 
2494 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
2495               0);  // NOLINT
2496 STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) ==
2497               0);  // NOLINT
2498 #ifdef V8_HOST_ARCH_32_BIT
2499 STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) !=
2500               0);  // NOLINT
2501 #endif
2502 
2503 
GetMaximumFillToAlign(AllocationAlignment alignment)2504 int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
2505   switch (alignment) {
2506     case kWordAligned:
2507       return 0;
2508     case kDoubleAligned:
2509     case kDoubleUnaligned:
2510       return kDoubleSize - kPointerSize;
2511     default:
2512       UNREACHABLE();
2513   }
2514   return 0;
2515 }
2516 
2517 
GetFillToAlign(Address address,AllocationAlignment alignment)2518 int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
2519   intptr_t offset = OffsetFrom(address);
2520   if (alignment == kDoubleAligned && (offset & kDoubleAlignmentMask) != 0)
2521     return kPointerSize;
2522   if (alignment == kDoubleUnaligned && (offset & kDoubleAlignmentMask) == 0)
2523     return kDoubleSize - kPointerSize;  // No fill if double is always aligned.
2524   return 0;
2525 }
2526 
2527 
PrecedeWithFiller(HeapObject * object,int filler_size)2528 HeapObject* Heap::PrecedeWithFiller(HeapObject* object, int filler_size) {
2529   CreateFillerObjectAt(object->address(), filler_size, ClearRecordedSlots::kNo);
2530   return HeapObject::FromAddress(object->address() + filler_size);
2531 }
2532 
2533 
AlignWithFiller(HeapObject * object,int object_size,int allocation_size,AllocationAlignment alignment)2534 HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size,
2535                                   int allocation_size,
2536                                   AllocationAlignment alignment) {
2537   int filler_size = allocation_size - object_size;
2538   DCHECK_LT(0, filler_size);
2539   int pre_filler = GetFillToAlign(object->address(), alignment);
2540   if (pre_filler) {
2541     object = PrecedeWithFiller(object, pre_filler);
2542     filler_size -= pre_filler;
2543   }
2544   if (filler_size)
2545     CreateFillerObjectAt(object->address() + object_size, filler_size,
2546                          ClearRecordedSlots::kNo);
2547   return object;
2548 }
2549 
RegisterNewArrayBuffer(JSArrayBuffer * buffer)2550 void Heap::RegisterNewArrayBuffer(JSArrayBuffer* buffer) {
2551   ArrayBufferTracker::RegisterNew(this, buffer);
2552 }
2553 
2554 
UnregisterArrayBuffer(JSArrayBuffer * buffer)2555 void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
2556   ArrayBufferTracker::Unregister(this, buffer);
2557 }
2558 
ConfigureInitialOldGenerationSize()2559 void Heap::ConfigureInitialOldGenerationSize() {
2560   if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
2561     old_generation_allocation_limit_ =
2562         Max(MinimumAllocationLimitGrowingStep(),
2563             static_cast<size_t>(
2564                 static_cast<double>(old_generation_allocation_limit_) *
2565                 (tracer()->AverageSurvivalRatio() / 100)));
2566   }
2567 }
2568 
CreateJSEntryStub()2569 void Heap::CreateJSEntryStub() {
2570   JSEntryStub stub(isolate(), StackFrame::ENTRY);
2571   set_js_entry_code(*stub.GetCode());
2572 }
2573 
2574 
CreateJSConstructEntryStub()2575 void Heap::CreateJSConstructEntryStub() {
2576   JSEntryStub stub(isolate(), StackFrame::CONSTRUCT_ENTRY);
2577   set_js_construct_entry_code(*stub.GetCode());
2578 }
2579 
CreateJSRunMicrotasksEntryStub()2580 void Heap::CreateJSRunMicrotasksEntryStub() {
2581   JSEntryStub stub(isolate(), JSEntryStub::SpecialTarget::kRunMicrotasks);
2582   set_js_run_microtasks_entry_code(*stub.GetCode());
2583 }
2584 
CreateFixedStubs()2585 void Heap::CreateFixedStubs() {
2586   // Here we create roots for fixed stubs. They are needed at GC
2587   // for cooking and uncooking (check out frames.cc).
2588   // The eliminates the need for doing dictionary lookup in the
2589   // stub cache for these stubs.
2590   HandleScope scope(isolate());
2591   // Canonicalize handles, so that we can share constant pool entries pointing
2592   // to code targets without dereferencing their handles.
2593   CanonicalHandleScope canonical(isolate());
2594 
2595   // Create stubs that should be there, so we don't unexpectedly have to
2596   // create them if we need them during the creation of another stub.
2597   // Stub creation mixes raw pointers and handles in an unsafe manner so
2598   // we cannot create stubs while we are creating stubs.
2599   CodeStub::GenerateStubsAheadOfTime(isolate());
2600 
2601   // gcc-4.4 has problem generating correct code of following snippet:
2602   // {  JSEntryStub stub;
2603   //    js_entry_code_ = *stub.GetCode();
2604   // }
2605   // {  JSConstructEntryStub stub;
2606   //    js_construct_entry_code_ = *stub.GetCode();
2607   // }
2608   // To workaround the problem, make separate functions without inlining.
2609   Heap::CreateJSEntryStub();
2610   Heap::CreateJSConstructEntryStub();
2611   Heap::CreateJSRunMicrotasksEntryStub();
2612 }
2613 
RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index)2614 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2615   switch (root_index) {
2616     case kNumberStringCacheRootIndex:
2617     case kCodeStubsRootIndex:
2618     case kScriptListRootIndex:
2619     case kMaterializedObjectsRootIndex:
2620     case kMicrotaskQueueRootIndex:
2621     case kDetachedContextsRootIndex:
2622     case kRetainedMapsRootIndex:
2623     case kRetainingPathTargetsRootIndex:
2624     case kFeedbackVectorsForProfilingToolsRootIndex:
2625     case kNoScriptSharedFunctionInfosRootIndex:
2626     case kWeakStackTraceListRootIndex:
2627     case kSerializedObjectsRootIndex:
2628     case kSerializedGlobalProxySizesRootIndex:
2629     case kPublicSymbolTableRootIndex:
2630     case kApiSymbolTableRootIndex:
2631     case kApiPrivateSymbolTableRootIndex:
2632     case kMessageListenersRootIndex:
2633     case kDeserializeLazyHandlerRootIndex:
2634     case kDeserializeLazyHandlerWideRootIndex:
2635     case kDeserializeLazyHandlerExtraWideRootIndex:
2636 // Smi values
2637 #define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
2638       SMI_ROOT_LIST(SMI_ENTRY)
2639 #undef SMI_ENTRY
2640     // String table
2641     case kStringTableRootIndex:
2642       return true;
2643 
2644     default:
2645       return false;
2646   }
2647 }
2648 
RootCanBeTreatedAsConstant(RootListIndex root_index)2649 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
2650   bool can_be = !RootCanBeWrittenAfterInitialization(root_index) &&
2651                 !InNewSpace(root(root_index));
2652   DCHECK_IMPLIES(can_be, IsImmovable(HeapObject::cast(root(root_index))));
2653   return can_be;
2654 }
2655 
FullSizeNumberStringCacheLength()2656 int Heap::FullSizeNumberStringCacheLength() {
2657   // Compute the size of the number string cache based on the max newspace size.
2658   // The number string cache has a minimum size based on twice the initial cache
2659   // size to ensure that it is bigger after being made 'full size'.
2660   size_t number_string_cache_size = max_semi_space_size_ / 512;
2661   number_string_cache_size =
2662       Max(static_cast<size_t>(kInitialNumberStringCacheSize * 2),
2663           Min<size_t>(0x4000u, number_string_cache_size));
2664   // There is a string and a number per entry so the length is twice the number
2665   // of entries.
2666   return static_cast<int>(number_string_cache_size * 2);
2667 }
2668 
2669 
FlushNumberStringCache()2670 void Heap::FlushNumberStringCache() {
2671   // Flush the number to string cache.
2672   int len = number_string_cache()->length();
2673   for (int i = 0; i < len; i++) {
2674     number_string_cache()->set_undefined(i);
2675   }
2676 }
2677 
2678 namespace {
2679 
RootIndexForFixedTypedArray(ExternalArrayType array_type)2680 Heap::RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type) {
2681   switch (array_type) {
2682 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
2683   case kExternal##Type##Array:                                  \
2684     return Heap::kFixed##Type##ArrayMapRootIndex;
2685 
2686     TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
2687 #undef ARRAY_TYPE_TO_ROOT_INDEX
2688   }
2689   UNREACHABLE();
2690 }
2691 
RootIndexForFixedTypedArray(ElementsKind elements_kind)2692 Heap::RootListIndex RootIndexForFixedTypedArray(ElementsKind elements_kind) {
2693   switch (elements_kind) {
2694 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
2695   case TYPE##_ELEMENTS:                                 \
2696     return Heap::kFixed##Type##ArrayMapRootIndex;
2697     TYPED_ARRAYS(TYPED_ARRAY_CASE)
2698     default:
2699       UNREACHABLE();
2700 #undef TYPED_ARRAY_CASE
2701   }
2702 }
2703 
RootIndexForEmptyFixedTypedArray(ElementsKind elements_kind)2704 Heap::RootListIndex RootIndexForEmptyFixedTypedArray(
2705     ElementsKind elements_kind) {
2706   switch (elements_kind) {
2707 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
2708   case TYPE##_ELEMENTS:                                           \
2709     return Heap::kEmptyFixed##Type##ArrayRootIndex;
2710 
2711     TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
2712 #undef ELEMENT_KIND_TO_ROOT_INDEX
2713     default:
2714       UNREACHABLE();
2715   }
2716 }
2717 
2718 }  // namespace
2719 
MapForFixedTypedArray(ExternalArrayType array_type)2720 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
2721   return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
2722 }
2723 
MapForFixedTypedArray(ElementsKind elements_kind)2724 Map* Heap::MapForFixedTypedArray(ElementsKind elements_kind) {
2725   return Map::cast(roots_[RootIndexForFixedTypedArray(elements_kind)]);
2726 }
2727 
EmptyFixedTypedArrayForMap(const Map * map)2728 FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(const Map* map) {
2729   return FixedTypedArrayBase::cast(
2730       roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
2731 }
2732 
CreateFillerObjectAt(Address addr,int size,ClearRecordedSlots clear_slots_mode,ClearFreedMemoryMode clear_memory_mode)2733 HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
2734                                        ClearRecordedSlots clear_slots_mode,
2735                                        ClearFreedMemoryMode clear_memory_mode) {
2736   if (size == 0) return nullptr;
2737   HeapObject* filler = HeapObject::FromAddress(addr);
2738   if (size == kPointerSize) {
2739     filler->set_map_after_allocation(
2740         reinterpret_cast<Map*>(root(kOnePointerFillerMapRootIndex)),
2741         SKIP_WRITE_BARRIER);
2742   } else if (size == 2 * kPointerSize) {
2743     filler->set_map_after_allocation(
2744         reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex)),
2745         SKIP_WRITE_BARRIER);
2746     if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
2747       Memory::Address_at(addr + kPointerSize) =
2748           static_cast<Address>(kClearedFreeMemoryValue);
2749     }
2750   } else {
2751     DCHECK_GT(size, 2 * kPointerSize);
2752     filler->set_map_after_allocation(
2753         reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)),
2754         SKIP_WRITE_BARRIER);
2755     FreeSpace::cast(filler)->relaxed_write_size(size);
2756     if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
2757       memset(reinterpret_cast<void*>(addr + 2 * kPointerSize),
2758              kClearedFreeMemoryValue, size - 2 * kPointerSize);
2759     }
2760   }
2761   if (clear_slots_mode == ClearRecordedSlots::kYes) {
2762     ClearRecordedSlotRange(addr, addr + size);
2763   }
2764 
2765   // At this point, we may be deserializing the heap from a snapshot, and
2766   // none of the maps have been created yet and are nullptr.
2767   DCHECK((filler->map() == nullptr && !deserialization_complete_) ||
2768          filler->map()->IsMap());
2769   return filler;
2770 }
2771 
2772 
CanMoveObjectStart(HeapObject * object)2773 bool Heap::CanMoveObjectStart(HeapObject* object) {
2774   if (!FLAG_move_object_start) return false;
2775 
2776   // Sampling heap profiler may have a reference to the object.
2777   if (isolate()->heap_profiler()->is_sampling_allocations()) return false;
2778 
2779   Address address = object->address();
2780 
2781   if (lo_space()->Contains(object)) return false;
2782 
2783   // We can move the object start if the page was already swept.
2784   return Page::FromAddress(address)->SweepingDone();
2785 }
2786 
IsImmovable(HeapObject * object)2787 bool Heap::IsImmovable(HeapObject* object) {
2788   MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
2789   return chunk->NeverEvacuate() || chunk->owner()->identity() == LO_SPACE;
2790 }
2791 
LeftTrimFixedArray(FixedArrayBase * object,int elements_to_trim)2792 FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
2793                                          int elements_to_trim) {
2794   CHECK_NOT_NULL(object);
2795   DCHECK(CanMoveObjectStart(object));
2796   // Add custom visitor to concurrent marker if new left-trimmable type
2797   // is added.
2798   DCHECK(object->IsFixedArray() || object->IsFixedDoubleArray());
2799   const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
2800   const int bytes_to_trim = elements_to_trim * element_size;
2801   Map* map = object->map();
2802 
2803   // For now this trick is only applied to objects in new and paged space.
2804   // In large object space the object's start must coincide with chunk
2805   // and thus the trick is just not applicable.
2806   DCHECK(!lo_space()->Contains(object));
2807   DCHECK(object->map() != fixed_cow_array_map());
2808 
2809   STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
2810   STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
2811   STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
2812 
2813   const int len = object->length();
2814   DCHECK(elements_to_trim <= len);
2815 
2816   // Calculate location of new array start.
2817   Address old_start = object->address();
2818   Address new_start = old_start + bytes_to_trim;
2819 
2820   if (incremental_marking()->IsMarking()) {
2821     incremental_marking()->NotifyLeftTrimming(
2822         object, HeapObject::FromAddress(new_start));
2823   }
2824 
2825   // Technically in new space this write might be omitted (except for
2826   // debug mode which iterates through the heap), but to play safer
2827   // we still do it.
2828   CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
2829 
2830   // Initialize header of the trimmed array. Since left trimming is only
2831   // performed on pages which are not concurrently swept creating a filler
2832   // object does not require synchronization.
2833   RELAXED_WRITE_FIELD(object, bytes_to_trim, map);
2834   RELAXED_WRITE_FIELD(object, bytes_to_trim + kPointerSize,
2835                       Smi::FromInt(len - elements_to_trim));
2836 
2837   FixedArrayBase* new_object =
2838       FixedArrayBase::cast(HeapObject::FromAddress(new_start));
2839 
2840   // Remove recorded slots for the new map and length offset.
2841   ClearRecordedSlot(new_object, HeapObject::RawField(new_object, 0));
2842   ClearRecordedSlot(new_object, HeapObject::RawField(
2843                                     new_object, FixedArrayBase::kLengthOffset));
2844 
2845   // Notify the heap profiler of change in object layout.
2846   OnMoveEvent(new_object, object, new_object->Size());
2847   return new_object;
2848 }
2849 
RightTrimFixedArray(FixedArrayBase * object,int elements_to_trim)2850 void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
2851   const int len = object->length();
2852   DCHECK_LE(elements_to_trim, len);
2853   DCHECK_GE(elements_to_trim, 0);
2854 
2855   int bytes_to_trim;
2856   DCHECK(!object->IsFixedTypedArrayBase());
2857   if (object->IsByteArray()) {
2858     int new_size = ByteArray::SizeFor(len - elements_to_trim);
2859     bytes_to_trim = ByteArray::SizeFor(len) - new_size;
2860     DCHECK_GE(bytes_to_trim, 0);
2861   } else if (object->IsFixedArray()) {
2862     bytes_to_trim = elements_to_trim * kPointerSize;
2863   } else {
2864     DCHECK(object->IsFixedDoubleArray());
2865     bytes_to_trim = elements_to_trim * kDoubleSize;
2866   }
2867 
2868   CreateFillerForArray<FixedArrayBase>(object, elements_to_trim, bytes_to_trim);
2869 }
2870 
RightTrimWeakFixedArray(WeakFixedArray * object,int elements_to_trim)2871 void Heap::RightTrimWeakFixedArray(WeakFixedArray* object,
2872                                    int elements_to_trim) {
2873   CreateFillerForArray<WeakFixedArray>(object, elements_to_trim,
2874                                        elements_to_trim * kPointerSize);
2875 }
2876 
2877 template <typename T>
CreateFillerForArray(T * object,int elements_to_trim,int bytes_to_trim)2878 void Heap::CreateFillerForArray(T* object, int elements_to_trim,
2879                                 int bytes_to_trim) {
2880   DCHECK(object->IsFixedArrayBase() || object->IsByteArray() ||
2881          object->IsWeakFixedArray());
2882 
2883   // For now this trick is only applied to objects in new and paged space.
2884   DCHECK(object->map() != fixed_cow_array_map());
2885 
2886   if (bytes_to_trim == 0) {
2887     DCHECK_EQ(elements_to_trim, 0);
2888     // No need to create filler and update live bytes counters.
2889     return;
2890   }
2891 
2892   // Calculate location of new array end.
2893   Address old_end = object->address() + object->Size();
2894   Address new_end = old_end - bytes_to_trim;
2895 
2896   // Technically in new space this write might be omitted (except for
2897   // debug mode which iterates through the heap), but to play safer
2898   // we still do it.
2899   // We do not create a filler for objects in large object space.
2900   // TODO(hpayer): We should shrink the large object page if the size
2901   // of the object changed significantly.
2902   if (!lo_space()->Contains(object)) {
2903     HeapObject* filler =
2904         CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
2905     DCHECK_NOT_NULL(filler);
2906     // Clear the mark bits of the black area that belongs now to the filler.
2907     // This is an optimization. The sweeper will release black fillers anyway.
2908     if (incremental_marking()->black_allocation() &&
2909         incremental_marking()->marking_state()->IsBlackOrGrey(filler)) {
2910       Page* page = Page::FromAddress(new_end);
2911       incremental_marking()->marking_state()->bitmap(page)->ClearRange(
2912           page->AddressToMarkbitIndex(new_end),
2913           page->AddressToMarkbitIndex(new_end + bytes_to_trim));
2914     }
2915   }
2916 
2917   // Initialize header of the trimmed array. We are storing the new length
2918   // using release store after creating a filler for the left-over space to
2919   // avoid races with the sweeper thread.
2920   object->synchronized_set_length(object->length() - elements_to_trim);
2921 
2922   // Notify the heap object allocation tracker of change in object layout. The
2923   // array may not be moved during GC, and size has to be adjusted nevertheless.
2924   for (auto& tracker : allocation_trackers_) {
2925     tracker->UpdateObjectSizeEvent(object->address(), object->Size());
2926   }
2927 }
2928 
MakeHeapIterable()2929 void Heap::MakeHeapIterable() {
2930   mark_compact_collector()->EnsureSweepingCompleted();
2931 }
2932 
2933 
ComputeMutatorUtilization(double mutator_speed,double gc_speed)2934 static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
2935   const double kMinMutatorUtilization = 0.0;
2936   const double kConservativeGcSpeedInBytesPerMillisecond = 200000;
2937   if (mutator_speed == 0) return kMinMutatorUtilization;
2938   if (gc_speed == 0) gc_speed = kConservativeGcSpeedInBytesPerMillisecond;
2939   // Derivation:
2940   // mutator_utilization = mutator_time / (mutator_time + gc_time)
2941   // mutator_time = 1 / mutator_speed
2942   // gc_time = 1 / gc_speed
2943   // mutator_utilization = (1 / mutator_speed) /
2944   //                       (1 / mutator_speed + 1 / gc_speed)
2945   // mutator_utilization = gc_speed / (mutator_speed + gc_speed)
2946   return gc_speed / (mutator_speed + gc_speed);
2947 }
2948 
2949 
YoungGenerationMutatorUtilization()2950 double Heap::YoungGenerationMutatorUtilization() {
2951   double mutator_speed = static_cast<double>(
2952       tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
2953   double gc_speed =
2954       tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects);
2955   double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
2956   if (FLAG_trace_mutator_utilization) {
2957     isolate()->PrintWithTimestamp(
2958         "Young generation mutator utilization = %.3f ("
2959         "mutator_speed=%.f, gc_speed=%.f)\n",
2960         result, mutator_speed, gc_speed);
2961   }
2962   return result;
2963 }
2964 
2965 
OldGenerationMutatorUtilization()2966 double Heap::OldGenerationMutatorUtilization() {
2967   double mutator_speed = static_cast<double>(
2968       tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond());
2969   double gc_speed = static_cast<double>(
2970       tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
2971   double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
2972   if (FLAG_trace_mutator_utilization) {
2973     isolate()->PrintWithTimestamp(
2974         "Old generation mutator utilization = %.3f ("
2975         "mutator_speed=%.f, gc_speed=%.f)\n",
2976         result, mutator_speed, gc_speed);
2977   }
2978   return result;
2979 }
2980 
2981 
HasLowYoungGenerationAllocationRate()2982 bool Heap::HasLowYoungGenerationAllocationRate() {
2983   const double high_mutator_utilization = 0.993;
2984   return YoungGenerationMutatorUtilization() > high_mutator_utilization;
2985 }
2986 
2987 
HasLowOldGenerationAllocationRate()2988 bool Heap::HasLowOldGenerationAllocationRate() {
2989   const double high_mutator_utilization = 0.993;
2990   return OldGenerationMutatorUtilization() > high_mutator_utilization;
2991 }
2992 
2993 
HasLowAllocationRate()2994 bool Heap::HasLowAllocationRate() {
2995   return HasLowYoungGenerationAllocationRate() &&
2996          HasLowOldGenerationAllocationRate();
2997 }
2998 
IsIneffectiveMarkCompact(size_t old_generation_size,double mutator_utilization)2999 bool Heap::IsIneffectiveMarkCompact(size_t old_generation_size,
3000                                     double mutator_utilization) {
3001   const double kHighHeapPercentage = 0.8;
3002   const double kLowMutatorUtilization = 0.4;
3003   return old_generation_size >=
3004              kHighHeapPercentage * max_old_generation_size_ &&
3005          mutator_utilization < kLowMutatorUtilization;
3006 }
3007 
CheckIneffectiveMarkCompact(size_t old_generation_size,double mutator_utilization)3008 void Heap::CheckIneffectiveMarkCompact(size_t old_generation_size,
3009                                        double mutator_utilization) {
3010   const int kMaxConsecutiveIneffectiveMarkCompacts = 4;
3011   if (!FLAG_detect_ineffective_gcs_near_heap_limit) return;
3012   if (!IsIneffectiveMarkCompact(old_generation_size, mutator_utilization)) {
3013     consecutive_ineffective_mark_compacts_ = 0;
3014     return;
3015   }
3016   ++consecutive_ineffective_mark_compacts_;
3017   if (consecutive_ineffective_mark_compacts_ ==
3018       kMaxConsecutiveIneffectiveMarkCompacts) {
3019     if (InvokeNearHeapLimitCallback()) {
3020       // The callback increased the heap limit.
3021       consecutive_ineffective_mark_compacts_ = 0;
3022       return;
3023     }
3024     FatalProcessOutOfMemory("Ineffective mark-compacts near heap limit");
3025   }
3026 }
3027 
HasHighFragmentation()3028 bool Heap::HasHighFragmentation() {
3029   size_t used = OldGenerationSizeOfObjects();
3030   size_t committed = CommittedOldGenerationMemory();
3031   return HasHighFragmentation(used, committed);
3032 }
3033 
HasHighFragmentation(size_t used,size_t committed)3034 bool Heap::HasHighFragmentation(size_t used, size_t committed) {
3035   const size_t kSlack = 16 * MB;
3036   // Fragmentation is high if committed > 2 * used + kSlack.
3037   // Rewrite the exression to avoid overflow.
3038   DCHECK_GE(committed, used);
3039   return committed - used > used + kSlack;
3040 }
3041 
ShouldOptimizeForMemoryUsage()3042 bool Heap::ShouldOptimizeForMemoryUsage() {
3043   const size_t kOldGenerationSlack = max_old_generation_size_ / 8;
3044   return FLAG_optimize_for_size || isolate()->IsIsolateInBackground() ||
3045          HighMemoryPressure() || !CanExpandOldGeneration(kOldGenerationSlack);
3046 }
3047 
ActivateMemoryReducerIfNeeded()3048 void Heap::ActivateMemoryReducerIfNeeded() {
3049   // Activate memory reducer when switching to background if
3050   // - there was no mark compact since the start.
3051   // - the committed memory can be potentially reduced.
3052   // 2 pages for the old, code, and map space + 1 page for new space.
3053   const int kMinCommittedMemory = 7 * Page::kPageSize;
3054   if (ms_count_ == 0 && CommittedMemory() > kMinCommittedMemory &&
3055       isolate()->IsIsolateInBackground()) {
3056     MemoryReducer::Event event;
3057     event.type = MemoryReducer::kPossibleGarbage;
3058     event.time_ms = MonotonicallyIncreasingTimeInMs();
3059     memory_reducer_->NotifyPossibleGarbage(event);
3060   }
3061 }
3062 
ReduceNewSpaceSize()3063 void Heap::ReduceNewSpaceSize() {
3064   // TODO(ulan): Unify this constant with the similar constant in
3065   // GCIdleTimeHandler once the change is merged to 4.5.
3066   static const size_t kLowAllocationThroughput = 1000;
3067   const double allocation_throughput =
3068       tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
3069 
3070   if (FLAG_predictable) return;
3071 
3072   if (ShouldReduceMemory() ||
3073       ((allocation_throughput != 0) &&
3074        (allocation_throughput < kLowAllocationThroughput))) {
3075     new_space_->Shrink();
3076     UncommitFromSpace();
3077   }
3078 }
3079 
FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason)3080 void Heap::FinalizeIncrementalMarkingIfComplete(
3081     GarbageCollectionReason gc_reason) {
3082   if (incremental_marking()->IsMarking() &&
3083       (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
3084        (!incremental_marking()->finalize_marking_completed() &&
3085         mark_compact_collector()->marking_worklist()->IsEmpty() &&
3086         local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking()))) {
3087     FinalizeIncrementalMarking(gc_reason);
3088   } else if (incremental_marking()->IsComplete() ||
3089              (mark_compact_collector()->marking_worklist()->IsEmpty() &&
3090               local_embedder_heap_tracer()
3091                   ->ShouldFinalizeIncrementalMarking())) {
3092     CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
3093   }
3094 }
3095 
RegisterDeserializedObjectsForBlackAllocation(Reservation * reservations,const std::vector<HeapObject * > & large_objects,const std::vector<Address> & maps)3096 void Heap::RegisterDeserializedObjectsForBlackAllocation(
3097     Reservation* reservations, const std::vector<HeapObject*>& large_objects,
3098     const std::vector<Address>& maps) {
3099   // TODO(ulan): pause black allocation during deserialization to avoid
3100   // iterating all these objects in one go.
3101 
3102   if (!incremental_marking()->black_allocation()) return;
3103 
3104   // Iterate black objects in old space, code space, map space, and large
3105   // object space for side effects.
3106   IncrementalMarking::MarkingState* marking_state =
3107       incremental_marking()->marking_state();
3108   for (int i = OLD_SPACE; i < Serializer<>::kNumberOfSpaces; i++) {
3109     const Heap::Reservation& res = reservations[i];
3110     for (auto& chunk : res) {
3111       Address addr = chunk.start;
3112       while (addr < chunk.end) {
3113         HeapObject* obj = HeapObject::FromAddress(addr);
3114         // Objects can have any color because incremental marking can
3115         // start in the middle of Heap::ReserveSpace().
3116         if (marking_state->IsBlack(obj)) {
3117           incremental_marking()->ProcessBlackAllocatedObject(obj);
3118         }
3119         addr += obj->Size();
3120       }
3121     }
3122   }
3123   // We potentially deserialized wrappers which require registering with the
3124   // embedder as the marker will not find them.
3125   local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
3126 
3127   // Large object space doesn't use reservations, so it needs custom handling.
3128   for (HeapObject* object : large_objects) {
3129     incremental_marking()->ProcessBlackAllocatedObject(object);
3130   }
3131 
3132   // Map space doesn't use reservations, so it needs custom handling.
3133   for (Address addr : maps) {
3134     incremental_marking()->ProcessBlackAllocatedObject(
3135         HeapObject::FromAddress(addr));
3136   }
3137 }
3138 
NotifyObjectLayoutChange(HeapObject * object,int size,const DisallowHeapAllocation &)3139 void Heap::NotifyObjectLayoutChange(HeapObject* object, int size,
3140                                     const DisallowHeapAllocation&) {
3141   DCHECK(InOldSpace(object) || InNewSpace(object) ||
3142          (lo_space()->Contains(object) && object->IsString()));
3143   if (FLAG_incremental_marking && incremental_marking()->IsMarking()) {
3144     incremental_marking()->MarkBlackAndPush(object);
3145     if (InOldSpace(object) && incremental_marking()->IsCompacting()) {
3146       // The concurrent marker might have recorded slots for the object.
3147       // Register this object as invalidated to filter out the slots.
3148       MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
3149       chunk->RegisterObjectWithInvalidatedSlots(object, size);
3150     }
3151   }
3152 #ifdef VERIFY_HEAP
3153   if (FLAG_verify_heap) {
3154     DCHECK_NULL(pending_layout_change_object_);
3155     pending_layout_change_object_ = object;
3156   }
3157 #endif
3158 }
3159 
3160 #ifdef VERIFY_HEAP
3161 // Helper class for collecting slot addresses.
3162 class SlotCollectingVisitor final : public ObjectVisitor {
3163  public:
VisitPointers(HeapObject * host,Object ** start,Object ** end)3164   void VisitPointers(HeapObject* host, Object** start, Object** end) override {
3165     VisitPointers(host, reinterpret_cast<MaybeObject**>(start),
3166                   reinterpret_cast<MaybeObject**>(end));
3167   }
VisitPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)3168   void VisitPointers(HeapObject* host, MaybeObject** start,
3169                      MaybeObject** end) final {
3170     for (MaybeObject** p = start; p < end; p++) {
3171       slots_.push_back(p);
3172     }
3173   }
3174 
number_of_slots()3175   int number_of_slots() { return static_cast<int>(slots_.size()); }
3176 
slot(int i)3177   MaybeObject** slot(int i) { return slots_[i]; }
3178 
3179  private:
3180   std::vector<MaybeObject**> slots_;
3181 };
3182 
VerifyObjectLayoutChange(HeapObject * object,Map * new_map)3183 void Heap::VerifyObjectLayoutChange(HeapObject* object, Map* new_map) {
3184   if (!FLAG_verify_heap) return;
3185 
3186   // Check that Heap::NotifyObjectLayout was called for object transitions
3187   // that are not safe for concurrent marking.
3188   // If you see this check triggering for a freshly allocated object,
3189   // use object->set_map_after_allocation() to initialize its map.
3190   if (pending_layout_change_object_ == nullptr) {
3191     if (object->IsJSObject()) {
3192       DCHECK(!object->map()->TransitionRequiresSynchronizationWithGC(new_map));
3193     } else {
3194       // Check that the set of slots before and after the transition match.
3195       SlotCollectingVisitor old_visitor;
3196       object->IterateFast(&old_visitor);
3197       MapWord old_map_word = object->map_word();
3198       // Temporarily set the new map to iterate new slots.
3199       object->set_map_word(MapWord::FromMap(new_map));
3200       SlotCollectingVisitor new_visitor;
3201       object->IterateFast(&new_visitor);
3202       // Restore the old map.
3203       object->set_map_word(old_map_word);
3204       DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
3205       for (int i = 0; i < new_visitor.number_of_slots(); i++) {
3206         DCHECK_EQ(new_visitor.slot(i), old_visitor.slot(i));
3207       }
3208     }
3209   } else {
3210     DCHECK_EQ(pending_layout_change_object_, object);
3211     pending_layout_change_object_ = nullptr;
3212   }
3213 }
3214 #endif
3215 
ComputeHeapState()3216 GCIdleTimeHeapState Heap::ComputeHeapState() {
3217   GCIdleTimeHeapState heap_state;
3218   heap_state.contexts_disposed = contexts_disposed_;
3219   heap_state.contexts_disposal_rate =
3220       tracer()->ContextDisposalRateInMilliseconds();
3221   heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
3222   heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
3223   return heap_state;
3224 }
3225 
3226 
PerformIdleTimeAction(GCIdleTimeAction action,GCIdleTimeHeapState heap_state,double deadline_in_ms)3227 bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
3228                                  GCIdleTimeHeapState heap_state,
3229                                  double deadline_in_ms) {
3230   bool result = false;
3231   switch (action.type) {
3232     case DONE:
3233       result = true;
3234       break;
3235     case DO_INCREMENTAL_STEP: {
3236       const double remaining_idle_time_in_ms =
3237           incremental_marking()->AdvanceIncrementalMarking(
3238               deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
3239               StepOrigin::kTask);
3240       if (remaining_idle_time_in_ms > 0.0) {
3241         FinalizeIncrementalMarkingIfComplete(
3242             GarbageCollectionReason::kFinalizeMarkingViaTask);
3243       }
3244       result = incremental_marking()->IsStopped();
3245       break;
3246     }
3247     case DO_FULL_GC: {
3248       DCHECK_LT(0, contexts_disposed_);
3249       HistogramTimerScope scope(isolate_->counters()->gc_context());
3250       TRACE_EVENT0("v8", "V8.GCContext");
3251       CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kContextDisposal);
3252       break;
3253     }
3254     case DO_NOTHING:
3255       break;
3256   }
3257 
3258   return result;
3259 }
3260 
IdleNotificationEpilogue(GCIdleTimeAction action,GCIdleTimeHeapState heap_state,double start_ms,double deadline_in_ms)3261 void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
3262                                     GCIdleTimeHeapState heap_state,
3263                                     double start_ms, double deadline_in_ms) {
3264   double idle_time_in_ms = deadline_in_ms - start_ms;
3265   double current_time = MonotonicallyIncreasingTimeInMs();
3266   last_idle_notification_time_ = current_time;
3267   double deadline_difference = deadline_in_ms - current_time;
3268 
3269   contexts_disposed_ = 0;
3270 
3271   if (deadline_in_ms - start_ms >
3272       GCIdleTimeHandler::kMaxFrameRenderingIdleTime) {
3273     int committed_memory = static_cast<int>(CommittedMemory() / KB);
3274     int used_memory = static_cast<int>(heap_state.size_of_objects / KB);
3275     isolate()->counters()->aggregated_memory_heap_committed()->AddSample(
3276         start_ms, committed_memory);
3277     isolate()->counters()->aggregated_memory_heap_used()->AddSample(
3278         start_ms, used_memory);
3279   }
3280 
3281   if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) ||
3282       FLAG_trace_idle_notification_verbose) {
3283     isolate_->PrintWithTimestamp(
3284         "Idle notification: requested idle time %.2f ms, used idle time %.2f "
3285         "ms, deadline usage %.2f ms [",
3286         idle_time_in_ms, idle_time_in_ms - deadline_difference,
3287         deadline_difference);
3288     action.Print();
3289     PrintF("]");
3290     if (FLAG_trace_idle_notification_verbose) {
3291       PrintF("[");
3292       heap_state.Print();
3293       PrintF("]");
3294     }
3295     PrintF("\n");
3296   }
3297 }
3298 
3299 
MonotonicallyIncreasingTimeInMs()3300 double Heap::MonotonicallyIncreasingTimeInMs() {
3301   return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
3302          static_cast<double>(base::Time::kMillisecondsPerSecond);
3303 }
3304 
3305 
IdleNotification(int idle_time_in_ms)3306 bool Heap::IdleNotification(int idle_time_in_ms) {
3307   return IdleNotification(
3308       V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
3309       (static_cast<double>(idle_time_in_ms) /
3310        static_cast<double>(base::Time::kMillisecondsPerSecond)));
3311 }
3312 
3313 
IdleNotification(double deadline_in_seconds)3314 bool Heap::IdleNotification(double deadline_in_seconds) {
3315   CHECK(HasBeenSetUp());
3316   double deadline_in_ms =
3317       deadline_in_seconds *
3318       static_cast<double>(base::Time::kMillisecondsPerSecond);
3319   HistogramTimerScope idle_notification_scope(
3320       isolate_->counters()->gc_idle_notification());
3321   TRACE_EVENT0("v8", "V8.GCIdleNotification");
3322   double start_ms = MonotonicallyIncreasingTimeInMs();
3323   double idle_time_in_ms = deadline_in_ms - start_ms;
3324 
3325   tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
3326                              OldGenerationAllocationCounter());
3327 
3328   GCIdleTimeHeapState heap_state = ComputeHeapState();
3329 
3330   GCIdleTimeAction action =
3331       gc_idle_time_handler_->Compute(idle_time_in_ms, heap_state);
3332 
3333   bool result = PerformIdleTimeAction(action, heap_state, deadline_in_ms);
3334 
3335   IdleNotificationEpilogue(action, heap_state, start_ms, deadline_in_ms);
3336   return result;
3337 }
3338 
3339 
RecentIdleNotificationHappened()3340 bool Heap::RecentIdleNotificationHappened() {
3341   return (last_idle_notification_time_ +
3342           GCIdleTimeHandler::kMaxScheduledIdleTime) >
3343          MonotonicallyIncreasingTimeInMs();
3344 }
3345 
3346 class MemoryPressureInterruptTask : public CancelableTask {
3347  public:
MemoryPressureInterruptTask(Heap * heap)3348   explicit MemoryPressureInterruptTask(Heap* heap)
3349       : CancelableTask(heap->isolate()), heap_(heap) {}
3350 
~MemoryPressureInterruptTask()3351   virtual ~MemoryPressureInterruptTask() {}
3352 
3353  private:
3354   // v8::internal::CancelableTask overrides.
RunInternal()3355   void RunInternal() override { heap_->CheckMemoryPressure(); }
3356 
3357   Heap* heap_;
3358   DISALLOW_COPY_AND_ASSIGN(MemoryPressureInterruptTask);
3359 };
3360 
CheckMemoryPressure()3361 void Heap::CheckMemoryPressure() {
3362   if (HighMemoryPressure()) {
3363     // The optimizing compiler may be unnecessarily holding on to memory.
3364     isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
3365   }
3366   if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
3367     CollectGarbageOnMemoryPressure();
3368   } else if (memory_pressure_level_.Value() == MemoryPressureLevel::kModerate) {
3369     if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
3370       StartIncrementalMarking(kReduceMemoryFootprintMask,
3371                               GarbageCollectionReason::kMemoryPressure);
3372     }
3373   }
3374   if (memory_reducer_) {
3375     MemoryReducer::Event event;
3376     event.type = MemoryReducer::kPossibleGarbage;
3377     event.time_ms = MonotonicallyIncreasingTimeInMs();
3378     memory_reducer_->NotifyPossibleGarbage(event);
3379   }
3380 }
3381 
CollectGarbageOnMemoryPressure()3382 void Heap::CollectGarbageOnMemoryPressure() {
3383   const int kGarbageThresholdInBytes = 8 * MB;
3384   const double kGarbageThresholdAsFractionOfTotalMemory = 0.1;
3385   // This constant is the maximum response time in RAIL performance model.
3386   const double kMaxMemoryPressurePauseMs = 100;
3387 
3388   double start = MonotonicallyIncreasingTimeInMs();
3389   CollectAllGarbage(kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
3390                     GarbageCollectionReason::kMemoryPressure,
3391                     kGCCallbackFlagCollectAllAvailableGarbage);
3392   double end = MonotonicallyIncreasingTimeInMs();
3393 
3394   // Estimate how much memory we can free.
3395   int64_t potential_garbage =
3396       (CommittedMemory() - SizeOfObjects()) + external_memory_;
3397   // If we can potentially free large amount of memory, then start GC right
3398   // away instead of waiting for memory reducer.
3399   if (potential_garbage >= kGarbageThresholdInBytes &&
3400       potential_garbage >=
3401           CommittedMemory() * kGarbageThresholdAsFractionOfTotalMemory) {
3402     // If we spent less than half of the time budget, then perform full GC
3403     // Otherwise, start incremental marking.
3404     if (end - start < kMaxMemoryPressurePauseMs / 2) {
3405       CollectAllGarbage(
3406           kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
3407           GarbageCollectionReason::kMemoryPressure,
3408           kGCCallbackFlagCollectAllAvailableGarbage);
3409     } else {
3410       if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
3411         StartIncrementalMarking(kReduceMemoryFootprintMask,
3412                                 GarbageCollectionReason::kMemoryPressure);
3413       }
3414     }
3415   }
3416 }
3417 
MemoryPressureNotification(MemoryPressureLevel level,bool is_isolate_locked)3418 void Heap::MemoryPressureNotification(MemoryPressureLevel level,
3419                                       bool is_isolate_locked) {
3420   MemoryPressureLevel previous = memory_pressure_level_.Value();
3421   memory_pressure_level_.SetValue(level);
3422   if ((previous != MemoryPressureLevel::kCritical &&
3423        level == MemoryPressureLevel::kCritical) ||
3424       (previous == MemoryPressureLevel::kNone &&
3425        level == MemoryPressureLevel::kModerate)) {
3426     if (is_isolate_locked) {
3427       CheckMemoryPressure();
3428     } else {
3429       ExecutionAccess access(isolate());
3430       isolate()->stack_guard()->RequestGC();
3431       V8::GetCurrentPlatform()->CallOnForegroundThread(
3432           reinterpret_cast<v8::Isolate*>(isolate()),
3433           new MemoryPressureInterruptTask(this));
3434     }
3435   }
3436 }
3437 
AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,void * data)3438 void Heap::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
3439                                     void* data) {
3440   const size_t kMaxCallbacks = 100;
3441   CHECK_LT(near_heap_limit_callbacks_.size(), kMaxCallbacks);
3442   for (auto callback_data : near_heap_limit_callbacks_) {
3443     CHECK_NE(callback_data.first, callback);
3444   }
3445   near_heap_limit_callbacks_.push_back(std::make_pair(callback, data));
3446 }
3447 
RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,size_t heap_limit)3448 void Heap::RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
3449                                        size_t heap_limit) {
3450   for (size_t i = 0; i < near_heap_limit_callbacks_.size(); i++) {
3451     if (near_heap_limit_callbacks_[i].first == callback) {
3452       near_heap_limit_callbacks_.erase(near_heap_limit_callbacks_.begin() + i);
3453       if (heap_limit) {
3454         RestoreHeapLimit(heap_limit);
3455       }
3456       return;
3457     }
3458   }
3459   UNREACHABLE();
3460 }
3461 
InvokeNearHeapLimitCallback()3462 bool Heap::InvokeNearHeapLimitCallback() {
3463   if (near_heap_limit_callbacks_.size() > 0) {
3464     HandleScope scope(isolate());
3465     v8::NearHeapLimitCallback callback =
3466         near_heap_limit_callbacks_.back().first;
3467     void* data = near_heap_limit_callbacks_.back().second;
3468     size_t heap_limit = callback(data, max_old_generation_size_,
3469                                  initial_max_old_generation_size_);
3470     if (heap_limit > max_old_generation_size_) {
3471       max_old_generation_size_ = heap_limit;
3472       return true;
3473     }
3474   }
3475   return false;
3476 }
3477 
CollectCodeStatistics()3478 void Heap::CollectCodeStatistics() {
3479   TRACE_EVENT0("v8", "Heap::CollectCodeStatistics");
3480   CodeStatistics::ResetCodeAndMetadataStatistics(isolate());
3481   // We do not look for code in new space, or map space.  If code
3482   // somehow ends up in those spaces, we would miss it here.
3483   CodeStatistics::CollectCodeStatistics(code_space_, isolate());
3484   CodeStatistics::CollectCodeStatistics(old_space_, isolate());
3485   CodeStatistics::CollectCodeStatistics(lo_space_, isolate());
3486 }
3487 
3488 #ifdef DEBUG
3489 
Print()3490 void Heap::Print() {
3491   if (!HasBeenSetUp()) return;
3492   isolate()->PrintStack(stdout);
3493 
3494   for (SpaceIterator it(this); it.has_next();) {
3495     it.next()->Print();
3496   }
3497 }
3498 
3499 
ReportCodeStatistics(const char * title)3500 void Heap::ReportCodeStatistics(const char* title) {
3501   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
3502   CollectCodeStatistics();
3503   CodeStatistics::ReportCodeStatistics(isolate());
3504 }
3505 
3506 #endif  // DEBUG
3507 
GarbageCollectionReasonToString(GarbageCollectionReason gc_reason)3508 const char* Heap::GarbageCollectionReasonToString(
3509     GarbageCollectionReason gc_reason) {
3510   switch (gc_reason) {
3511     case GarbageCollectionReason::kAllocationFailure:
3512       return "allocation failure";
3513     case GarbageCollectionReason::kAllocationLimit:
3514       return "allocation limit";
3515     case GarbageCollectionReason::kContextDisposal:
3516       return "context disposal";
3517     case GarbageCollectionReason::kCountersExtension:
3518       return "counters extension";
3519     case GarbageCollectionReason::kDebugger:
3520       return "debugger";
3521     case GarbageCollectionReason::kDeserializer:
3522       return "deserialize";
3523     case GarbageCollectionReason::kExternalMemoryPressure:
3524       return "external memory pressure";
3525     case GarbageCollectionReason::kFinalizeMarkingViaStackGuard:
3526       return "finalize incremental marking via stack guard";
3527     case GarbageCollectionReason::kFinalizeMarkingViaTask:
3528       return "finalize incremental marking via task";
3529     case GarbageCollectionReason::kFullHashtable:
3530       return "full hash-table";
3531     case GarbageCollectionReason::kHeapProfiler:
3532       return "heap profiler";
3533     case GarbageCollectionReason::kIdleTask:
3534       return "idle task";
3535     case GarbageCollectionReason::kLastResort:
3536       return "last resort";
3537     case GarbageCollectionReason::kLowMemoryNotification:
3538       return "low memory notification";
3539     case GarbageCollectionReason::kMakeHeapIterable:
3540       return "make heap iterable";
3541     case GarbageCollectionReason::kMemoryPressure:
3542       return "memory pressure";
3543     case GarbageCollectionReason::kMemoryReducer:
3544       return "memory reducer";
3545     case GarbageCollectionReason::kRuntime:
3546       return "runtime";
3547     case GarbageCollectionReason::kSamplingProfiler:
3548       return "sampling profiler";
3549     case GarbageCollectionReason::kSnapshotCreator:
3550       return "snapshot creator";
3551     case GarbageCollectionReason::kTesting:
3552       return "testing";
3553     case GarbageCollectionReason::kUnknown:
3554       return "unknown";
3555   }
3556   UNREACHABLE();
3557 }
3558 
Contains(HeapObject * value)3559 bool Heap::Contains(HeapObject* value) {
3560   if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
3561     return false;
3562   }
3563   return HasBeenSetUp() &&
3564          (new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
3565           code_space_->Contains(value) || map_space_->Contains(value) ||
3566           lo_space_->Contains(value) || read_only_space_->Contains(value));
3567 }
3568 
ContainsSlow(Address addr)3569 bool Heap::ContainsSlow(Address addr) {
3570   if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
3571     return false;
3572   }
3573   return HasBeenSetUp() &&
3574          (new_space_->ToSpaceContainsSlow(addr) ||
3575           old_space_->ContainsSlow(addr) || code_space_->ContainsSlow(addr) ||
3576           map_space_->ContainsSlow(addr) || lo_space_->ContainsSlow(addr) ||
3577           read_only_space_->Contains(addr));
3578 }
3579 
InSpace(HeapObject * value,AllocationSpace space)3580 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
3581   if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
3582     return false;
3583   }
3584   if (!HasBeenSetUp()) return false;
3585 
3586   switch (space) {
3587     case NEW_SPACE:
3588       return new_space_->ToSpaceContains(value);
3589     case OLD_SPACE:
3590       return old_space_->Contains(value);
3591     case CODE_SPACE:
3592       return code_space_->Contains(value);
3593     case MAP_SPACE:
3594       return map_space_->Contains(value);
3595     case LO_SPACE:
3596       return lo_space_->Contains(value);
3597     case RO_SPACE:
3598       return read_only_space_->Contains(value);
3599   }
3600   UNREACHABLE();
3601 }
3602 
InSpaceSlow(Address addr,AllocationSpace space)3603 bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
3604   if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
3605     return false;
3606   }
3607   if (!HasBeenSetUp()) return false;
3608 
3609   switch (space) {
3610     case NEW_SPACE:
3611       return new_space_->ToSpaceContainsSlow(addr);
3612     case OLD_SPACE:
3613       return old_space_->ContainsSlow(addr);
3614     case CODE_SPACE:
3615       return code_space_->ContainsSlow(addr);
3616     case MAP_SPACE:
3617       return map_space_->ContainsSlow(addr);
3618     case LO_SPACE:
3619       return lo_space_->ContainsSlow(addr);
3620     case RO_SPACE:
3621       return read_only_space_->ContainsSlow(addr);
3622   }
3623   UNREACHABLE();
3624 }
3625 
3626 
IsValidAllocationSpace(AllocationSpace space)3627 bool Heap::IsValidAllocationSpace(AllocationSpace space) {
3628   switch (space) {
3629     case NEW_SPACE:
3630     case OLD_SPACE:
3631     case CODE_SPACE:
3632     case MAP_SPACE:
3633     case LO_SPACE:
3634     case RO_SPACE:
3635       return true;
3636     default:
3637       return false;
3638   }
3639 }
3640 
3641 
RootIsImmortalImmovable(int root_index)3642 bool Heap::RootIsImmortalImmovable(int root_index) {
3643   switch (root_index) {
3644 #define IMMORTAL_IMMOVABLE_ROOT(name) case Heap::k##name##RootIndex:
3645     IMMORTAL_IMMOVABLE_ROOT_LIST(IMMORTAL_IMMOVABLE_ROOT)
3646 #undef IMMORTAL_IMMOVABLE_ROOT
3647 #define INTERNALIZED_STRING(name, value) case Heap::k##name##RootIndex:
3648     INTERNALIZED_STRING_LIST(INTERNALIZED_STRING)
3649 #undef INTERNALIZED_STRING
3650 #define STRING_TYPE(NAME, size, name, Name) case Heap::k##Name##MapRootIndex:
3651     STRING_TYPE_LIST(STRING_TYPE)
3652 #undef STRING_TYPE
3653     return true;
3654     default:
3655       return false;
3656   }
3657 }
3658 
3659 #ifdef VERIFY_HEAP
3660 class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
3661  protected:
VerifyPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)3662   void VerifyPointers(HeapObject* host, MaybeObject** start,
3663                       MaybeObject** end) override {
3664     Heap* heap = host->GetIsolate()->heap();
3665     if (host != nullptr) {
3666       CHECK(heap->InReadOnlySpace(host->map()));
3667     }
3668     VerifyPointersVisitor::VerifyPointers(host, start, end);
3669 
3670     for (MaybeObject** current = start; current < end; current++) {
3671       HeapObject* object;
3672       if ((*current)->ToStrongOrWeakHeapObject(&object)) {
3673         CHECK(heap->InReadOnlySpace(object));
3674       }
3675     }
3676   }
3677 };
3678 
Verify()3679 void Heap::Verify() {
3680   CHECK(HasBeenSetUp());
3681   HandleScope scope(isolate());
3682 
3683   // We have to wait here for the sweeper threads to have an iterable heap.
3684   mark_compact_collector()->EnsureSweepingCompleted();
3685 
3686   VerifyPointersVisitor visitor;
3687   IterateRoots(&visitor, VISIT_ONLY_STRONG);
3688 
3689   VerifySmisVisitor smis_visitor;
3690   IterateSmiRoots(&smis_visitor);
3691 
3692   new_space_->Verify();
3693 
3694   old_space_->Verify(&visitor);
3695   map_space_->Verify(&visitor);
3696 
3697   VerifyPointersVisitor no_dirty_regions_visitor;
3698   code_space_->Verify(&no_dirty_regions_visitor);
3699 
3700   lo_space_->Verify();
3701 
3702   VerifyReadOnlyPointersVisitor read_only_visitor;
3703   read_only_space_->Verify(&read_only_visitor);
3704 }
3705 
3706 class SlotVerifyingVisitor : public ObjectVisitor {
3707  public:
SlotVerifyingVisitor(std::set<Address> * untyped,std::set<std::pair<SlotType,Address>> * typed)3708   SlotVerifyingVisitor(std::set<Address>* untyped,
3709                        std::set<std::pair<SlotType, Address> >* typed)
3710       : untyped_(untyped), typed_(typed) {}
3711 
3712   virtual bool ShouldHaveBeenRecorded(HeapObject* host,
3713                                       MaybeObject* target) = 0;
3714 
VisitPointers(HeapObject * host,Object ** start,Object ** end)3715   void VisitPointers(HeapObject* host, Object** start, Object** end) override {
3716 #ifdef DEBUG
3717     for (Object** slot = start; slot < end; slot++) {
3718       DCHECK(!HasWeakHeapObjectTag(*slot));
3719     }
3720 #endif  // DEBUG
3721     VisitPointers(host, reinterpret_cast<MaybeObject**>(start),
3722                   reinterpret_cast<MaybeObject**>(end));
3723   }
3724 
VisitPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)3725   void VisitPointers(HeapObject* host, MaybeObject** start,
3726                      MaybeObject** end) final {
3727     for (MaybeObject** slot = start; slot < end; slot++) {
3728       if (ShouldHaveBeenRecorded(host, *slot)) {
3729         CHECK_GT(untyped_->count(reinterpret_cast<Address>(slot)), 0);
3730       }
3731     }
3732   }
3733 
VisitCodeTarget(Code * host,RelocInfo * rinfo)3734   void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
3735     Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
3736     if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
3737       CHECK(
3738           InTypedSet(CODE_TARGET_SLOT, rinfo->pc()) ||
3739           (rinfo->IsInConstantPool() &&
3740            InTypedSet(CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address())));
3741     }
3742   }
3743 
VisitEmbeddedPointer(Code * host,RelocInfo * rinfo)3744   void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
3745     Object* target = rinfo->target_object();
3746     if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
3747       CHECK(InTypedSet(EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
3748             (rinfo->IsInConstantPool() &&
3749              InTypedSet(OBJECT_SLOT, rinfo->constant_pool_entry_address())));
3750     }
3751   }
3752 
3753  private:
InTypedSet(SlotType type,Address slot)3754   bool InTypedSet(SlotType type, Address slot) {
3755     return typed_->count(std::make_pair(type, slot)) > 0;
3756   }
3757   std::set<Address>* untyped_;
3758   std::set<std::pair<SlotType, Address> >* typed_;
3759 };
3760 
3761 class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
3762  public:
OldToNewSlotVerifyingVisitor(Heap * heap,std::set<Address> * untyped,std::set<std::pair<SlotType,Address>> * typed)3763   OldToNewSlotVerifyingVisitor(Heap* heap, std::set<Address>* untyped,
3764                                std::set<std::pair<SlotType, Address> >* typed)
3765       : SlotVerifyingVisitor(untyped, typed), heap_(heap) {}
3766 
ShouldHaveBeenRecorded(HeapObject * host,MaybeObject * target)3767   bool ShouldHaveBeenRecorded(HeapObject* host, MaybeObject* target) override {
3768     DCHECK_IMPLIES(
3769         target->IsStrongOrWeakHeapObject() && heap_->InNewSpace(target),
3770         heap_->InToSpace(target));
3771     return target->IsStrongOrWeakHeapObject() && heap_->InNewSpace(target) &&
3772            !heap_->InNewSpace(host);
3773   }
3774 
3775  private:
3776   Heap* heap_;
3777 };
3778 
3779 template <RememberedSetType direction>
CollectSlots(MemoryChunk * chunk,Address start,Address end,std::set<Address> * untyped,std::set<std::pair<SlotType,Address>> * typed)3780 void CollectSlots(MemoryChunk* chunk, Address start, Address end,
3781                   std::set<Address>* untyped,
3782                   std::set<std::pair<SlotType, Address> >* typed) {
3783   RememberedSet<direction>::Iterate(chunk,
3784                                     [start, end, untyped](Address slot) {
3785                                       if (start <= slot && slot < end) {
3786                                         untyped->insert(slot);
3787                                       }
3788                                       return KEEP_SLOT;
3789                                     },
3790                                     SlotSet::PREFREE_EMPTY_BUCKETS);
3791   RememberedSet<direction>::IterateTyped(
3792       chunk, [start, end, typed](SlotType type, Address host, Address slot) {
3793         if (start <= slot && slot < end) {
3794           typed->insert(std::make_pair(type, slot));
3795         }
3796         return KEEP_SLOT;
3797       });
3798 }
3799 
VerifyRememberedSetFor(HeapObject * object)3800 void Heap::VerifyRememberedSetFor(HeapObject* object) {
3801   MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
3802   DCHECK_IMPLIES(chunk->mutex() == nullptr, InReadOnlySpace(object));
3803   // In RO_SPACE chunk->mutex() may be nullptr, so just ignore it.
3804   base::LockGuard<base::Mutex, base::NullBehavior::kIgnoreIfNull> lock_guard(
3805       chunk->mutex());
3806   Address start = object->address();
3807   Address end = start + object->Size();
3808   std::set<Address> old_to_new;
3809   std::set<std::pair<SlotType, Address> > typed_old_to_new;
3810   if (!InNewSpace(object)) {
3811     store_buffer()->MoveAllEntriesToRememberedSet();
3812     CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
3813     OldToNewSlotVerifyingVisitor visitor(this, &old_to_new, &typed_old_to_new);
3814     object->IterateBody(&visitor);
3815   }
3816   // TODO(ulan): Add old to old slot set verification once all weak objects
3817   // have their own instance types and slots are recorded for all weal fields.
3818 }
3819 #endif
3820 
3821 #ifdef DEBUG
VerifyCountersAfterSweeping()3822 void Heap::VerifyCountersAfterSweeping() {
3823   PagedSpaces spaces(this);
3824   for (PagedSpace* space = spaces.next(); space != nullptr;
3825        space = spaces.next()) {
3826     space->VerifyCountersAfterSweeping();
3827   }
3828 }
3829 
VerifyCountersBeforeConcurrentSweeping()3830 void Heap::VerifyCountersBeforeConcurrentSweeping() {
3831   PagedSpaces spaces(this);
3832   for (PagedSpace* space = spaces.next(); space != nullptr;
3833        space = spaces.next()) {
3834     space->VerifyCountersBeforeConcurrentSweeping();
3835   }
3836 }
3837 #endif
3838 
ZapFromSpace()3839 void Heap::ZapFromSpace() {
3840   if (!new_space_->IsFromSpaceCommitted()) return;
3841   for (Page* page :
3842        PageRange(new_space_->FromSpaceStart(), new_space_->FromSpaceEnd())) {
3843     for (Address cursor = page->area_start(), limit = page->area_end();
3844          cursor < limit; cursor += kPointerSize) {
3845       Memory::Address_at(cursor) = static_cast<Address>(kFromSpaceZapValue);
3846     }
3847   }
3848 }
3849 
ZapCodeObject(Address start_address,int size_in_bytes)3850 void Heap::ZapCodeObject(Address start_address, int size_in_bytes) {
3851 #ifdef DEBUG
3852   for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
3853     reinterpret_cast<Object**>(start_address)[i] = Smi::FromInt(kCodeZapValue);
3854   }
3855 #endif
3856 }
3857 
IterateRoots(RootVisitor * v,VisitMode mode)3858 void Heap::IterateRoots(RootVisitor* v, VisitMode mode) {
3859   IterateStrongRoots(v, mode);
3860   IterateWeakRoots(v, mode);
3861 }
3862 
IterateWeakRoots(RootVisitor * v,VisitMode mode)3863 void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
3864   const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
3865                          mode == VISIT_ALL_IN_MINOR_MC_MARK ||
3866                          mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
3867   v->VisitRootPointer(
3868       Root::kStringTable, nullptr,
3869       reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
3870   v->Synchronize(VisitorSynchronization::kStringTable);
3871   if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE &&
3872       mode != VISIT_FOR_SERIALIZATION) {
3873     // Scavenge collections have special processing for this.
3874     // Do not visit for serialization, since the external string table will
3875     // be populated from scratch upon deserialization.
3876     external_string_table_.IterateAll(v);
3877   }
3878   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
3879 }
3880 
IterateSmiRoots(RootVisitor * v)3881 void Heap::IterateSmiRoots(RootVisitor* v) {
3882   // Acquire execution access since we are going to read stack limit values.
3883   ExecutionAccess access(isolate());
3884   v->VisitRootPointers(Root::kSmiRootList, nullptr, &roots_[kSmiRootsStart],
3885                        &roots_[kRootListLength]);
3886   v->Synchronize(VisitorSynchronization::kSmiRootList);
3887 }
3888 
IterateEncounteredWeakCollections(RootVisitor * visitor)3889 void Heap::IterateEncounteredWeakCollections(RootVisitor* visitor) {
3890   visitor->VisitRootPointer(Root::kWeakCollections, nullptr,
3891                             &encountered_weak_collections_);
3892 }
3893 
3894 // We cannot avoid stale handles to left-trimmed objects, but can only make
3895 // sure all handles still needed are updated. Filter out a stale pointer
3896 // and clear the slot to allow post processing of handles (needed because
3897 // the sweeper might actually free the underlying page).
3898 class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
3899  public:
FixStaleLeftTrimmedHandlesVisitor(Heap * heap)3900   explicit FixStaleLeftTrimmedHandlesVisitor(Heap* heap) : heap_(heap) {
3901     USE(heap_);
3902   }
3903 
VisitRootPointer(Root root,const char * description,Object ** p)3904   void VisitRootPointer(Root root, const char* description,
3905                         Object** p) override {
3906     FixHandle(p);
3907   }
3908 
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)3909   void VisitRootPointers(Root root, const char* description, Object** start,
3910                          Object** end) override {
3911     for (Object** p = start; p < end; p++) FixHandle(p);
3912   }
3913 
3914  private:
FixHandle(Object ** p)3915   inline void FixHandle(Object** p) {
3916     if (!(*p)->IsHeapObject()) return;
3917     HeapObject* current = reinterpret_cast<HeapObject*>(*p);
3918     const MapWord map_word = current->map_word();
3919     if (!map_word.IsForwardingAddress() && current->IsFiller()) {
3920 #ifdef DEBUG
3921       // We need to find a FixedArrayBase map after walking the fillers.
3922       while (current->IsFiller()) {
3923         Address next = reinterpret_cast<Address>(current);
3924         if (current->map() == heap_->one_pointer_filler_map()) {
3925           next += kPointerSize;
3926         } else if (current->map() == heap_->two_pointer_filler_map()) {
3927           next += 2 * kPointerSize;
3928         } else {
3929           next += current->Size();
3930         }
3931         current = reinterpret_cast<HeapObject*>(next);
3932       }
3933       DCHECK(current->IsFixedArrayBase());
3934 #endif  // DEBUG
3935       *p = nullptr;
3936     }
3937   }
3938 
3939   Heap* heap_;
3940 };
3941 
IterateStrongRoots(RootVisitor * v,VisitMode mode)3942 void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
3943   const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
3944                          mode == VISIT_ALL_IN_MINOR_MC_MARK ||
3945                          mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
3946   v->VisitRootPointers(Root::kStrongRootList, nullptr, &roots_[0],
3947                        &roots_[kStrongRootListLength]);
3948   v->Synchronize(VisitorSynchronization::kStrongRootList);
3949 
3950   isolate_->bootstrapper()->Iterate(v);
3951   v->Synchronize(VisitorSynchronization::kBootstrapper);
3952   isolate_->Iterate(v);
3953   v->Synchronize(VisitorSynchronization::kTop);
3954   Relocatable::Iterate(isolate_, v);
3955   v->Synchronize(VisitorSynchronization::kRelocatable);
3956   isolate_->debug()->Iterate(v);
3957   v->Synchronize(VisitorSynchronization::kDebug);
3958 
3959   isolate_->compilation_cache()->Iterate(v);
3960   v->Synchronize(VisitorSynchronization::kCompilationCache);
3961 
3962   // Iterate over local handles in handle scopes.
3963   FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
3964   isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
3965   isolate_->handle_scope_implementer()->Iterate(v);
3966   isolate_->IterateDeferredHandles(v);
3967   v->Synchronize(VisitorSynchronization::kHandleScope);
3968 
3969   // Iterate over the builtin code objects and code stubs in the
3970   // heap. Note that it is not necessary to iterate over code objects
3971   // on scavenge collections.
3972   if (!isMinorGC) {
3973     isolate_->builtins()->IterateBuiltins(v);
3974     v->Synchronize(VisitorSynchronization::kBuiltins);
3975     isolate_->interpreter()->IterateDispatchTable(v);
3976     v->Synchronize(VisitorSynchronization::kDispatchTable);
3977   }
3978 
3979   // Iterate over global handles.
3980   switch (mode) {
3981     case VISIT_FOR_SERIALIZATION:
3982       // Global handles are not iterated by the serializer. Values referenced by
3983       // global handles need to be added manually.
3984       break;
3985     case VISIT_ONLY_STRONG:
3986       isolate_->global_handles()->IterateStrongRoots(v);
3987       break;
3988     case VISIT_ALL_IN_SCAVENGE:
3989       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
3990       break;
3991     case VISIT_ALL_IN_MINOR_MC_MARK:
3992       // Global handles are processed manually be the minor MC.
3993       break;
3994     case VISIT_ALL_IN_MINOR_MC_UPDATE:
3995       // Global handles are processed manually be the minor MC.
3996       break;
3997     case VISIT_ALL_IN_SWEEP_NEWSPACE:
3998     case VISIT_ALL:
3999       isolate_->global_handles()->IterateAllRoots(v);
4000       break;
4001   }
4002   v->Synchronize(VisitorSynchronization::kGlobalHandles);
4003 
4004   // Iterate over eternal handles. Eternal handles are not iterated by the
4005   // serializer. Values referenced by eternal handles need to be added manually.
4006   if (mode != VISIT_FOR_SERIALIZATION) {
4007     if (isMinorGC) {
4008       isolate_->eternal_handles()->IterateNewSpaceRoots(v);
4009     } else {
4010       isolate_->eternal_handles()->IterateAllRoots(v);
4011     }
4012   }
4013   v->Synchronize(VisitorSynchronization::kEternalHandles);
4014 
4015   // Iterate over pointers being held by inactive threads.
4016   isolate_->thread_manager()->Iterate(v);
4017   v->Synchronize(VisitorSynchronization::kThreadManager);
4018 
4019   // Iterate over other strong roots (currently only identity maps).
4020   for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
4021     v->VisitRootPointers(Root::kStrongRoots, nullptr, list->start, list->end);
4022   }
4023   v->Synchronize(VisitorSynchronization::kStrongRoots);
4024 
4025   // Iterate over the partial snapshot cache unless serializing.
4026   if (mode != VISIT_FOR_SERIALIZATION) {
4027     SerializerDeserializer::Iterate(isolate_, v);
4028     // We don't do a v->Synchronize call here because the serializer and the
4029     // deserializer are deliberately out of sync here.
4030   }
4031 }
4032 
IterateWeakGlobalHandles(RootVisitor * v)4033 void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
4034   isolate_->global_handles()->IterateWeakRoots(v);
4035 }
4036 
4037 // TODO(1236194): Since the heap size is configurable on the command line
4038 // and through the API, we should gracefully handle the case that the heap
4039 // size is not big enough to fit all the initial objects.
ConfigureHeap(size_t max_semi_space_size_in_kb,size_t max_old_generation_size_in_mb,size_t code_range_size_in_mb)4040 bool Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
4041                          size_t max_old_generation_size_in_mb,
4042                          size_t code_range_size_in_mb) {
4043   if (HasBeenSetUp()) return false;
4044 
4045   // Overwrite default configuration.
4046   if (max_semi_space_size_in_kb != 0) {
4047     max_semi_space_size_ =
4048         RoundUp<Page::kPageSize>(max_semi_space_size_in_kb * KB);
4049   }
4050   if (max_old_generation_size_in_mb != 0) {
4051     max_old_generation_size_ = max_old_generation_size_in_mb * MB;
4052   }
4053 
4054   // If max space size flags are specified overwrite the configuration.
4055   if (FLAG_max_semi_space_size > 0) {
4056     max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
4057   }
4058   if (FLAG_max_old_space_size > 0) {
4059     max_old_generation_size_ =
4060         static_cast<size_t>(FLAG_max_old_space_size) * MB;
4061   }
4062 
4063   if (Page::kPageSize > MB) {
4064     max_semi_space_size_ = RoundUp<Page::kPageSize>(max_semi_space_size_);
4065     max_old_generation_size_ =
4066         RoundUp<Page::kPageSize>(max_old_generation_size_);
4067   }
4068 
4069   if (FLAG_stress_compaction) {
4070     // This will cause more frequent GCs when stressing.
4071     max_semi_space_size_ = MB;
4072   }
4073 
4074   // The new space size must be a power of two to support single-bit testing
4075   // for containment.
4076   max_semi_space_size_ = static_cast<size_t>(base::bits::RoundUpToPowerOfTwo64(
4077       static_cast<uint64_t>(max_semi_space_size_)));
4078 
4079   if (max_semi_space_size_ == kMaxSemiSpaceSizeInKB * KB) {
4080     // Start with at least 1*MB semi-space on machines with a lot of memory.
4081     initial_semispace_size_ =
4082         Max(initial_semispace_size_, static_cast<size_t>(1 * MB));
4083   }
4084 
4085   if (FLAG_min_semi_space_size > 0) {
4086     size_t initial_semispace_size =
4087         static_cast<size_t>(FLAG_min_semi_space_size) * MB;
4088     if (initial_semispace_size > max_semi_space_size_) {
4089       initial_semispace_size_ = max_semi_space_size_;
4090       if (FLAG_trace_gc) {
4091         PrintIsolate(isolate_,
4092                      "Min semi-space size cannot be more than the maximum "
4093                      "semi-space size of %" PRIuS " MB\n",
4094                      max_semi_space_size_ / MB);
4095       }
4096     } else {
4097       initial_semispace_size_ =
4098           RoundUp<Page::kPageSize>(initial_semispace_size);
4099     }
4100   }
4101 
4102   initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
4103 
4104   if (FLAG_semi_space_growth_factor < 2) {
4105     FLAG_semi_space_growth_factor = 2;
4106   }
4107 
4108   // The old generation is paged and needs at least one page for each space.
4109   int paged_space_count =
4110       LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
4111   initial_max_old_generation_size_ = max_old_generation_size_ =
4112       Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
4113           max_old_generation_size_);
4114 
4115   if (FLAG_initial_old_space_size > 0) {
4116     initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
4117   } else {
4118     initial_old_generation_size_ =
4119         max_old_generation_size_ / kInitalOldGenerationLimitFactor;
4120   }
4121   old_generation_allocation_limit_ = initial_old_generation_size_;
4122 
4123   // We rely on being able to allocate new arrays in paged spaces.
4124   DCHECK(kMaxRegularHeapObjectSize >=
4125          (JSArray::kSize +
4126           FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
4127           AllocationMemento::kSize));
4128 
4129   code_range_size_ = code_range_size_in_mb * MB;
4130 
4131   configured_ = true;
4132   return true;
4133 }
4134 
4135 
AddToRingBuffer(const char * string)4136 void Heap::AddToRingBuffer(const char* string) {
4137   size_t first_part =
4138       Min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
4139   memcpy(trace_ring_buffer_ + ring_buffer_end_, string, first_part);
4140   ring_buffer_end_ += first_part;
4141   if (first_part < strlen(string)) {
4142     ring_buffer_full_ = true;
4143     size_t second_part = strlen(string) - first_part;
4144     memcpy(trace_ring_buffer_, string + first_part, second_part);
4145     ring_buffer_end_ = second_part;
4146   }
4147 }
4148 
4149 
GetFromRingBuffer(char * buffer)4150 void Heap::GetFromRingBuffer(char* buffer) {
4151   size_t copied = 0;
4152   if (ring_buffer_full_) {
4153     copied = kTraceRingBufferSize - ring_buffer_end_;
4154     memcpy(buffer, trace_ring_buffer_ + ring_buffer_end_, copied);
4155   }
4156   memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
4157 }
4158 
ConfigureHeapDefault()4159 bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0); }
4160 
RecordStats(HeapStats * stats,bool take_snapshot)4161 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
4162   *stats->start_marker = HeapStats::kStartMarker;
4163   *stats->end_marker = HeapStats::kEndMarker;
4164   *stats->ro_space_size = read_only_space_->Size();
4165   *stats->ro_space_capacity = read_only_space_->Capacity();
4166   *stats->new_space_size = new_space_->Size();
4167   *stats->new_space_capacity = new_space_->Capacity();
4168   *stats->old_space_size = old_space_->SizeOfObjects();
4169   *stats->old_space_capacity = old_space_->Capacity();
4170   *stats->code_space_size = code_space_->SizeOfObjects();
4171   *stats->code_space_capacity = code_space_->Capacity();
4172   *stats->map_space_size = map_space_->SizeOfObjects();
4173   *stats->map_space_capacity = map_space_->Capacity();
4174   *stats->lo_space_size = lo_space_->Size();
4175   isolate_->global_handles()->RecordStats(stats);
4176   *stats->memory_allocator_size = memory_allocator()->Size();
4177   *stats->memory_allocator_capacity =
4178       memory_allocator()->Size() + memory_allocator()->Available();
4179   *stats->os_error = base::OS::GetLastError();
4180   *stats->malloced_memory = isolate_->allocator()->GetCurrentMemoryUsage();
4181   *stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
4182   if (take_snapshot) {
4183     HeapIterator iterator(this);
4184     for (HeapObject* obj = iterator.next(); obj != nullptr;
4185          obj = iterator.next()) {
4186       InstanceType type = obj->map()->instance_type();
4187       DCHECK(0 <= type && type <= LAST_TYPE);
4188       stats->objects_per_type[type]++;
4189       stats->size_per_type[type] += obj->Size();
4190     }
4191   }
4192   if (stats->last_few_messages != nullptr)
4193     GetFromRingBuffer(stats->last_few_messages);
4194   if (stats->js_stacktrace != nullptr) {
4195     FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1);
4196     StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
4197     if (gc_state() == Heap::NOT_IN_GC) {
4198       isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
4199     } else {
4200       accumulator.Add("Cannot get stack trace in GC.");
4201     }
4202   }
4203 }
4204 
OldGenerationSizeOfObjects()4205 size_t Heap::OldGenerationSizeOfObjects() {
4206   PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
4207   size_t total = 0;
4208   for (PagedSpace* space = spaces.next(); space != nullptr;
4209        space = spaces.next()) {
4210     total += space->SizeOfObjects();
4211   }
4212   return total + lo_space_->SizeOfObjects();
4213 }
4214 
PromotedExternalMemorySize()4215 uint64_t Heap::PromotedExternalMemorySize() {
4216   if (external_memory_ <= external_memory_at_last_mark_compact_) return 0;
4217   return static_cast<uint64_t>(external_memory_ -
4218                                external_memory_at_last_mark_compact_);
4219 }
4220 
4221 
4222 const double Heap::kMinHeapGrowingFactor = 1.1;
4223 const double Heap::kMaxHeapGrowingFactor = 4.0;
4224 const double Heap::kMaxHeapGrowingFactorMemoryConstrained = 2.0;
4225 const double Heap::kMaxHeapGrowingFactorIdle = 1.5;
4226 const double Heap::kConservativeHeapGrowingFactor = 1.3;
4227 const double Heap::kTargetMutatorUtilization = 0.97;
4228 
4229 // Given GC speed in bytes per ms, the allocation throughput in bytes per ms
4230 // (mutator speed), this function returns the heap growing factor that will
4231 // achieve the kTargetMutatorUtilisation if the GC speed and the mutator speed
4232 // remain the same until the next GC.
4233 //
4234 // For a fixed time-frame T = TM + TG, the mutator utilization is the ratio
4235 // TM / (TM + TG), where TM is the time spent in the mutator and TG is the
4236 // time spent in the garbage collector.
4237 //
4238 // Let MU be kTargetMutatorUtilisation, the desired mutator utilization for the
4239 // time-frame from the end of the current GC to the end of the next GC. Based
4240 // on the MU we can compute the heap growing factor F as
4241 //
4242 // F = R * (1 - MU) / (R * (1 - MU) - MU), where R = gc_speed / mutator_speed.
4243 //
4244 // This formula can be derived as follows.
4245 //
4246 // F = Limit / Live by definition, where the Limit is the allocation limit,
4247 // and the Live is size of live objects.
4248 // Let’s assume that we already know the Limit. Then:
4249 //   TG = Limit / gc_speed
4250 //   TM = (TM + TG) * MU, by definition of MU.
4251 //   TM = TG * MU / (1 - MU)
4252 //   TM = Limit *  MU / (gc_speed * (1 - MU))
4253 // On the other hand, if the allocation throughput remains constant:
4254 //   Limit = Live + TM * allocation_throughput = Live + TM * mutator_speed
4255 // Solving it for TM, we get
4256 //   TM = (Limit - Live) / mutator_speed
4257 // Combining the two equation for TM:
4258 //   (Limit - Live) / mutator_speed = Limit * MU / (gc_speed * (1 - MU))
4259 //   (Limit - Live) = Limit * MU * mutator_speed / (gc_speed * (1 - MU))
4260 // substitute R = gc_speed / mutator_speed
4261 //   (Limit - Live) = Limit * MU  / (R * (1 - MU))
4262 // substitute F = Limit / Live
4263 //   F - 1 = F * MU  / (R * (1 - MU))
4264 //   F - F * MU / (R * (1 - MU)) = 1
4265 //   F * (1 - MU / (R * (1 - MU))) = 1
4266 //   F * (R * (1 - MU) - MU) / (R * (1 - MU)) = 1
4267 //   F = R * (1 - MU) / (R * (1 - MU) - MU)
HeapGrowingFactor(double gc_speed,double mutator_speed,double max_factor)4268 double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed,
4269                                double max_factor) {
4270   DCHECK_LE(kMinHeapGrowingFactor, max_factor);
4271   DCHECK_GE(kMaxHeapGrowingFactor, max_factor);
4272   if (gc_speed == 0 || mutator_speed == 0) return max_factor;
4273 
4274   const double speed_ratio = gc_speed / mutator_speed;
4275   const double mu = kTargetMutatorUtilization;
4276 
4277   const double a = speed_ratio * (1 - mu);
4278   const double b = speed_ratio * (1 - mu) - mu;
4279 
4280   // The factor is a / b, but we need to check for small b first.
4281   double factor = (a < b * max_factor) ? a / b : max_factor;
4282   factor = Min(factor, max_factor);
4283   factor = Max(factor, kMinHeapGrowingFactor);
4284   return factor;
4285 }
4286 
MaxHeapGrowingFactor(size_t max_old_generation_size)4287 double Heap::MaxHeapGrowingFactor(size_t max_old_generation_size) {
4288   const double min_small_factor = 1.3;
4289   const double max_small_factor = 2.0;
4290   const double high_factor = 4.0;
4291 
4292   size_t max_old_generation_size_in_mb = max_old_generation_size / MB;
4293   max_old_generation_size_in_mb =
4294       Max(max_old_generation_size_in_mb,
4295           static_cast<size_t>(kMinOldGenerationSize));
4296 
4297   // If we are on a device with lots of memory, we allow a high heap
4298   // growing factor.
4299   if (max_old_generation_size_in_mb >= kMaxOldGenerationSize) {
4300     return high_factor;
4301   }
4302 
4303   DCHECK_GE(max_old_generation_size_in_mb, kMinOldGenerationSize);
4304   DCHECK_LT(max_old_generation_size_in_mb, kMaxOldGenerationSize);
4305 
4306   // On smaller devices we linearly scale the factor: (X-A)/(B-A)*(D-C)+C
4307   double factor = (max_old_generation_size_in_mb - kMinOldGenerationSize) *
4308                       (max_small_factor - min_small_factor) /
4309                       (kMaxOldGenerationSize - kMinOldGenerationSize) +
4310                   min_small_factor;
4311   return factor;
4312 }
4313 
CalculateOldGenerationAllocationLimit(double factor,size_t old_gen_size)4314 size_t Heap::CalculateOldGenerationAllocationLimit(double factor,
4315                                                    size_t old_gen_size) {
4316   CHECK_LT(1.0, factor);
4317   CHECK_LT(0, old_gen_size);
4318   uint64_t limit = static_cast<uint64_t>(old_gen_size * factor);
4319   limit = Max(limit, static_cast<uint64_t>(old_gen_size) +
4320                          MinimumAllocationLimitGrowingStep());
4321   limit += new_space_->Capacity();
4322   uint64_t halfway_to_the_max =
4323       (static_cast<uint64_t>(old_gen_size) + max_old_generation_size_) / 2;
4324   return static_cast<size_t>(Min(limit, halfway_to_the_max));
4325 }
4326 
MinimumAllocationLimitGrowingStep()4327 size_t Heap::MinimumAllocationLimitGrowingStep() {
4328   const size_t kRegularAllocationLimitGrowingStep = 8;
4329   const size_t kLowMemoryAllocationLimitGrowingStep = 2;
4330   size_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB);
4331   return limit * (ShouldOptimizeForMemoryUsage()
4332                       ? kLowMemoryAllocationLimitGrowingStep
4333                       : kRegularAllocationLimitGrowingStep);
4334 }
4335 
SetOldGenerationAllocationLimit(size_t old_gen_size,double gc_speed,double mutator_speed)4336 void Heap::SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
4337                                            double mutator_speed) {
4338   double max_factor = MaxHeapGrowingFactor(max_old_generation_size_);
4339   double factor = HeapGrowingFactor(gc_speed, mutator_speed, max_factor);
4340 
4341   if (FLAG_trace_gc_verbose) {
4342     isolate_->PrintWithTimestamp(
4343         "Heap growing factor %.1f based on mu=%.3f, speed_ratio=%.f "
4344         "(gc=%.f, mutator=%.f)\n",
4345         factor, kTargetMutatorUtilization, gc_speed / mutator_speed, gc_speed,
4346         mutator_speed);
4347   }
4348 
4349   if (memory_reducer_->ShouldGrowHeapSlowly() ||
4350       ShouldOptimizeForMemoryUsage()) {
4351     factor = Min(factor, kConservativeHeapGrowingFactor);
4352   }
4353 
4354   if (FLAG_stress_compaction || ShouldReduceMemory()) {
4355     factor = kMinHeapGrowingFactor;
4356   }
4357 
4358   if (FLAG_heap_growing_percent > 0) {
4359     factor = 1.0 + FLAG_heap_growing_percent / 100.0;
4360   }
4361 
4362   old_generation_allocation_limit_ =
4363       CalculateOldGenerationAllocationLimit(factor, old_gen_size);
4364 
4365   if (FLAG_trace_gc_verbose) {
4366     isolate_->PrintWithTimestamp(
4367         "Grow: old size: %" PRIuS " KB, new limit: %" PRIuS " KB (%.1f)\n",
4368         old_gen_size / KB, old_generation_allocation_limit_ / KB, factor);
4369   }
4370 }
4371 
DampenOldGenerationAllocationLimit(size_t old_gen_size,double gc_speed,double mutator_speed)4372 void Heap::DampenOldGenerationAllocationLimit(size_t old_gen_size,
4373                                               double gc_speed,
4374                                               double mutator_speed) {
4375   double max_factor = MaxHeapGrowingFactor(max_old_generation_size_);
4376   double factor = HeapGrowingFactor(gc_speed, mutator_speed, max_factor);
4377   size_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
4378   if (limit < old_generation_allocation_limit_) {
4379     if (FLAG_trace_gc_verbose) {
4380       isolate_->PrintWithTimestamp(
4381           "Dampen: old size: %" PRIuS " KB, old limit: %" PRIuS
4382           " KB, "
4383           "new limit: %" PRIuS " KB (%.1f)\n",
4384           old_gen_size / KB, old_generation_allocation_limit_ / KB, limit / KB,
4385           factor);
4386     }
4387     old_generation_allocation_limit_ = limit;
4388   }
4389 }
4390 
ShouldOptimizeForLoadTime()4391 bool Heap::ShouldOptimizeForLoadTime() {
4392   return isolate()->rail_mode() == PERFORMANCE_LOAD &&
4393          !AllocationLimitOvershotByLargeMargin() &&
4394          MonotonicallyIncreasingTimeInMs() <
4395              isolate()->LoadStartTimeMs() + kMaxLoadTimeMs;
4396 }
4397 
4398 // This predicate is called when an old generation space cannot allocated from
4399 // the free list and is about to add a new page. Returning false will cause a
4400 // major GC. It happens when the old generation allocation limit is reached and
4401 // - either we need to optimize for memory usage,
4402 // - or the incremental marking is not in progress and we cannot start it.
ShouldExpandOldGenerationOnSlowAllocation()4403 bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
4404   if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
4405   // We reached the old generation allocation limit.
4406 
4407   if (ShouldOptimizeForMemoryUsage()) return false;
4408 
4409   if (ShouldOptimizeForLoadTime()) return true;
4410 
4411   if (incremental_marking()->NeedsFinalization()) {
4412     return !AllocationLimitOvershotByLargeMargin();
4413   }
4414 
4415   if (incremental_marking()->IsStopped() &&
4416       IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
4417     // We cannot start incremental marking.
4418     return false;
4419   }
4420   return true;
4421 }
4422 
4423 // This function returns either kNoLimit, kSoftLimit, or kHardLimit.
4424 // The kNoLimit means that either incremental marking is disabled or it is too
4425 // early to start incremental marking.
4426 // The kSoftLimit means that incremental marking should be started soon.
4427 // The kHardLimit means that incremental marking should be started immediately.
IncrementalMarkingLimitReached()4428 Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
4429   // Code using an AlwaysAllocateScope assumes that the GC state does not
4430   // change; that implies that no marking steps must be performed.
4431   if (!incremental_marking()->CanBeActivated() || always_allocate()) {
4432     // Incremental marking is disabled or it is too early to start.
4433     return IncrementalMarkingLimit::kNoLimit;
4434   }
4435   if (FLAG_stress_incremental_marking) {
4436     return IncrementalMarkingLimit::kHardLimit;
4437   }
4438   if (OldGenerationSizeOfObjects() <=
4439       IncrementalMarking::kActivationThreshold) {
4440     // Incremental marking is disabled or it is too early to start.
4441     return IncrementalMarkingLimit::kNoLimit;
4442   }
4443   if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) ||
4444       HighMemoryPressure()) {
4445     // If there is high memory pressure or stress testing is enabled, then
4446     // start marking immediately.
4447     return IncrementalMarkingLimit::kHardLimit;
4448   }
4449 
4450   if (FLAG_stress_marking > 0) {
4451     double gained_since_last_gc =
4452         PromotedSinceLastGC() +
4453         (external_memory_ - external_memory_at_last_mark_compact_);
4454     double size_before_gc =
4455         OldGenerationObjectsAndPromotedExternalMemorySize() -
4456         gained_since_last_gc;
4457     double bytes_to_limit = old_generation_allocation_limit_ - size_before_gc;
4458     if (bytes_to_limit > 0) {
4459       double current_percent = (gained_since_last_gc / bytes_to_limit) * 100.0;
4460 
4461       if (FLAG_trace_stress_marking) {
4462         isolate()->PrintWithTimestamp(
4463             "[IncrementalMarking] %.2lf%% of the memory limit reached\n",
4464             current_percent);
4465       }
4466 
4467       if (FLAG_fuzzer_gc_analysis) {
4468         // Skips values >=100% since they already trigger marking.
4469         if (current_percent < 100.0) {
4470           max_marking_limit_reached_ =
4471               std::max(max_marking_limit_reached_, current_percent);
4472         }
4473       } else if (static_cast<int>(current_percent) >=
4474                  stress_marking_percentage_) {
4475         stress_marking_percentage_ = NextStressMarkingLimit();
4476         return IncrementalMarkingLimit::kHardLimit;
4477       }
4478     }
4479   }
4480 
4481   size_t old_generation_space_available = OldGenerationSpaceAvailable();
4482 
4483   if (old_generation_space_available > new_space_->Capacity()) {
4484     return IncrementalMarkingLimit::kNoLimit;
4485   }
4486   if (ShouldOptimizeForMemoryUsage()) {
4487     return IncrementalMarkingLimit::kHardLimit;
4488   }
4489   if (ShouldOptimizeForLoadTime()) {
4490     return IncrementalMarkingLimit::kNoLimit;
4491   }
4492   if (old_generation_space_available == 0) {
4493     return IncrementalMarkingLimit::kHardLimit;
4494   }
4495   return IncrementalMarkingLimit::kSoftLimit;
4496 }
4497 
EnableInlineAllocation()4498 void Heap::EnableInlineAllocation() {
4499   if (!inline_allocation_disabled_) return;
4500   inline_allocation_disabled_ = false;
4501 
4502   // Update inline allocation limit for new space.
4503   new_space()->UpdateInlineAllocationLimit(0);
4504 }
4505 
4506 
DisableInlineAllocation()4507 void Heap::DisableInlineAllocation() {
4508   if (inline_allocation_disabled_) return;
4509   inline_allocation_disabled_ = true;
4510 
4511   // Update inline allocation limit for new space.
4512   new_space()->UpdateInlineAllocationLimit(0);
4513 
4514   // Update inline allocation limit for old spaces.
4515   PagedSpaces spaces(this);
4516   CodeSpaceMemoryModificationScope modification_scope(this);
4517   for (PagedSpace* space = spaces.next(); space != nullptr;
4518        space = spaces.next()) {
4519     space->FreeLinearAllocationArea();
4520   }
4521 }
4522 
EnsureImmovableCode(HeapObject * heap_object,int object_size)4523 HeapObject* Heap::EnsureImmovableCode(HeapObject* heap_object,
4524                                       int object_size) {
4525   // Code objects which should stay at a fixed address are allocated either
4526   // in the first page of code space, in large object space, or (during
4527   // snapshot creation) the containing page is marked as immovable.
4528   DCHECK(heap_object);
4529   DCHECK(code_space_->Contains(heap_object));
4530   DCHECK_GE(object_size, 0);
4531   if (!Heap::IsImmovable(heap_object)) {
4532     if (isolate()->serializer_enabled() ||
4533         code_space_->FirstPage()->Contains(heap_object->address())) {
4534       MemoryChunk::FromAddress(heap_object->address())->MarkNeverEvacuate();
4535     } else {
4536       // Discard the first code allocation, which was on a page where it could
4537       // be moved.
4538       CreateFillerObjectAt(heap_object->address(), object_size,
4539                            ClearRecordedSlots::kNo);
4540       heap_object = AllocateRawCodeInLargeObjectSpace(object_size);
4541       UnprotectAndRegisterMemoryChunk(heap_object);
4542       ZapCodeObject(heap_object->address(), object_size);
4543       OnAllocationEvent(heap_object, object_size);
4544     }
4545   }
4546   return heap_object;
4547 }
4548 
AllocateRawWithLigthRetry(int size,AllocationSpace space,AllocationAlignment alignment)4549 HeapObject* Heap::AllocateRawWithLigthRetry(int size, AllocationSpace space,
4550                                             AllocationAlignment alignment) {
4551   HeapObject* result;
4552   AllocationResult alloc = AllocateRaw(size, space, alignment);
4553   if (alloc.To(&result)) {
4554     DCHECK(result != exception());
4555     return result;
4556   }
4557   // Two GCs before panicking. In newspace will almost always succeed.
4558   for (int i = 0; i < 2; i++) {
4559     CollectGarbage(alloc.RetrySpace(),
4560                    GarbageCollectionReason::kAllocationFailure);
4561     alloc = AllocateRaw(size, space, alignment);
4562     if (alloc.To(&result)) {
4563       DCHECK(result != exception());
4564       return result;
4565     }
4566   }
4567   return nullptr;
4568 }
4569 
AllocateRawWithRetryOrFail(int size,AllocationSpace space,AllocationAlignment alignment)4570 HeapObject* Heap::AllocateRawWithRetryOrFail(int size, AllocationSpace space,
4571                                              AllocationAlignment alignment) {
4572   AllocationResult alloc;
4573   HeapObject* result = AllocateRawWithLigthRetry(size, space, alignment);
4574   if (result) return result;
4575 
4576   isolate()->counters()->gc_last_resort_from_handles()->Increment();
4577   CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
4578   {
4579     AlwaysAllocateScope scope(isolate());
4580     alloc = AllocateRaw(size, space, alignment);
4581   }
4582   if (alloc.To(&result)) {
4583     DCHECK(result != exception());
4584     return result;
4585   }
4586   // TODO(1181417): Fix this.
4587   FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
4588   return nullptr;
4589 }
4590 
4591 // TODO(jkummerow): Refactor this. AllocateRaw should take an "immovability"
4592 // parameter and just do what's necessary.
AllocateRawCodeInLargeObjectSpace(int size)4593 HeapObject* Heap::AllocateRawCodeInLargeObjectSpace(int size) {
4594   AllocationResult alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
4595   HeapObject* result;
4596   if (alloc.To(&result)) {
4597     DCHECK(result != exception());
4598     return result;
4599   }
4600   // Two GCs before panicking.
4601   for (int i = 0; i < 2; i++) {
4602     CollectGarbage(alloc.RetrySpace(),
4603                    GarbageCollectionReason::kAllocationFailure);
4604     alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
4605     if (alloc.To(&result)) {
4606       DCHECK(result != exception());
4607       return result;
4608     }
4609   }
4610   isolate()->counters()->gc_last_resort_from_handles()->Increment();
4611   CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
4612   {
4613     AlwaysAllocateScope scope(isolate());
4614     alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
4615   }
4616   if (alloc.To(&result)) {
4617     DCHECK(result != exception());
4618     return result;
4619   }
4620   // TODO(1181417): Fix this.
4621   FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
4622   return nullptr;
4623 }
4624 
SetUp()4625 bool Heap::SetUp() {
4626 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
4627   allocation_timeout_ = NextAllocationTimeout();
4628 #endif
4629 
4630   // Initialize heap spaces and initial maps and objects. Whenever something
4631   // goes wrong, just return false. The caller should check the results and
4632   // call Heap::TearDown() to release allocated memory.
4633   //
4634   // If the heap is not yet configured (e.g. through the API), configure it.
4635   // Configuration is based on the flags new-space-size (really the semispace
4636   // size) and old-space-size if set or the initial values of semispace_size_
4637   // and old_generation_size_ otherwise.
4638   if (!configured_) {
4639     if (!ConfigureHeapDefault()) return false;
4640   }
4641 
4642   mmap_region_base_ =
4643       reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
4644       ~kMmapRegionMask;
4645 
4646   // Set up memory allocator.
4647   memory_allocator_ = new MemoryAllocator(isolate_);
4648   if (!memory_allocator_->SetUp(MaxReserved(), code_range_size_)) return false;
4649 
4650   store_buffer_ = new StoreBuffer(this);
4651 
4652   mark_compact_collector_ = new MarkCompactCollector(this);
4653   incremental_marking_ =
4654       new IncrementalMarking(this, mark_compact_collector_->marking_worklist(),
4655                              mark_compact_collector_->weak_objects());
4656 
4657   if (FLAG_concurrent_marking) {
4658     MarkCompactCollector::MarkingWorklist* marking_worklist =
4659         mark_compact_collector_->marking_worklist();
4660     concurrent_marking_ = new ConcurrentMarking(
4661         this, marking_worklist->shared(), marking_worklist->bailout(),
4662         marking_worklist->on_hold(), mark_compact_collector_->weak_objects());
4663   } else {
4664     concurrent_marking_ =
4665         new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr);
4666   }
4667 
4668   for (int i = 0; i <= LAST_SPACE; i++) {
4669     space_[i] = nullptr;
4670   }
4671 
4672   space_[NEW_SPACE] = new_space_ = new NewSpace(this);
4673   if (!new_space_->SetUp(initial_semispace_size_, max_semi_space_size_)) {
4674     return false;
4675   }
4676 
4677   space_[OLD_SPACE] = old_space_ = new OldSpace(this);
4678   if (!old_space_->SetUp()) return false;
4679 
4680   space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
4681   if (!code_space_->SetUp()) return false;
4682 
4683   space_[MAP_SPACE] = map_space_ = new MapSpace(this, MAP_SPACE);
4684   if (!map_space_->SetUp()) return false;
4685 
4686   // The large object code space may contain code or data.  We set the memory
4687   // to be non-executable here for safety, but this means we need to enable it
4688   // explicitly when allocating large code objects.
4689   space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this, LO_SPACE);
4690   if (!lo_space_->SetUp()) return false;
4691 
4692   space_[RO_SPACE] = read_only_space_ =
4693       new ReadOnlySpace(this, RO_SPACE, NOT_EXECUTABLE);
4694   if (!read_only_space_->SetUp()) return false;
4695 
4696   for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
4697        i++) {
4698     deferred_counters_[i] = 0;
4699   }
4700 
4701   tracer_ = new GCTracer(this);
4702 #ifdef ENABLE_MINOR_MC
4703   minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
4704 #else
4705   minor_mark_compact_collector_ = nullptr;
4706 #endif  // ENABLE_MINOR_MC
4707   array_buffer_collector_ = new ArrayBufferCollector(this);
4708   gc_idle_time_handler_ = new GCIdleTimeHandler();
4709   memory_reducer_ = new MemoryReducer(this);
4710   if (V8_UNLIKELY(FLAG_gc_stats)) {
4711     live_object_stats_ = new ObjectStats(this);
4712     dead_object_stats_ = new ObjectStats(this);
4713   }
4714   scavenge_job_ = new ScavengeJob();
4715   local_embedder_heap_tracer_ = new LocalEmbedderHeapTracer();
4716 
4717   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
4718   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
4719 
4720   store_buffer()->SetUp();
4721 
4722   mark_compact_collector()->SetUp();
4723 #ifdef ENABLE_MINOR_MC
4724   if (minor_mark_compact_collector() != nullptr) {
4725     minor_mark_compact_collector()->SetUp();
4726   }
4727 #endif  // ENABLE_MINOR_MC
4728 
4729   idle_scavenge_observer_ = new IdleScavengeObserver(
4730       *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
4731   new_space()->AddAllocationObserver(idle_scavenge_observer_);
4732 
4733   SetGetExternallyAllocatedMemoryInBytesCallback(
4734       DefaultGetExternallyAllocatedMemoryInBytesCallback);
4735 
4736   if (FLAG_stress_marking > 0) {
4737     stress_marking_percentage_ = NextStressMarkingLimit();
4738     stress_marking_observer_ = new StressMarkingObserver(*this);
4739     AddAllocationObserversToAllSpaces(stress_marking_observer_,
4740                                       stress_marking_observer_);
4741   }
4742   if (FLAG_stress_scavenge > 0) {
4743     stress_scavenge_observer_ = new StressScavengeObserver(*this);
4744     new_space()->AddAllocationObserver(stress_scavenge_observer_);
4745   }
4746 
4747   write_protect_code_memory_ = FLAG_write_protect_code_memory;
4748 
4749   external_reference_table_.Init(isolate_);
4750 
4751   return true;
4752 }
4753 
InitializeHashSeed()4754 void Heap::InitializeHashSeed() {
4755   uint64_t new_hash_seed;
4756   if (FLAG_hash_seed == 0) {
4757     int64_t rnd = isolate()->random_number_generator()->NextInt64();
4758     new_hash_seed = static_cast<uint64_t>(rnd);
4759   } else {
4760     new_hash_seed = static_cast<uint64_t>(FLAG_hash_seed);
4761   }
4762   hash_seed()->copy_in(0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
4763 }
4764 
SetStackLimits()4765 void Heap::SetStackLimits() {
4766   DCHECK_NOT_NULL(isolate_);
4767   DCHECK(isolate_ == isolate());
4768   // On 64 bit machines, pointers are generally out of range of Smis.  We write
4769   // something that looks like an out of range Smi to the GC.
4770 
4771   // Set up the special root array entries containing the stack limits.
4772   // These are actually addresses, but the tag makes the GC ignore it.
4773   roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
4774       (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
4775   roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
4776       (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
4777 }
4778 
ClearStackLimits()4779 void Heap::ClearStackLimits() {
4780   roots_[kStackLimitRootIndex] = Smi::kZero;
4781   roots_[kRealStackLimitRootIndex] = Smi::kZero;
4782 }
4783 
NextAllocationTimeout(int current_timeout)4784 int Heap::NextAllocationTimeout(int current_timeout) {
4785   if (FLAG_random_gc_interval > 0) {
4786     // If current timeout hasn't reached 0 the GC was caused by something
4787     // different than --stress-atomic-gc flag and we don't update the timeout.
4788     if (current_timeout <= 0) {
4789       return isolate()->fuzzer_rng()->NextInt(FLAG_random_gc_interval + 1);
4790     } else {
4791       return current_timeout;
4792     }
4793   }
4794   return FLAG_gc_interval;
4795 }
4796 
PrintAllocationsHash()4797 void Heap::PrintAllocationsHash() {
4798   uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
4799   PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
4800 }
4801 
PrintMaxMarkingLimitReached()4802 void Heap::PrintMaxMarkingLimitReached() {
4803   PrintF("\n### Maximum marking limit reached = %.02lf\n",
4804          max_marking_limit_reached_);
4805 }
4806 
PrintMaxNewSpaceSizeReached()4807 void Heap::PrintMaxNewSpaceSizeReached() {
4808   PrintF("\n### Maximum new space size reached = %.02lf\n",
4809          stress_scavenge_observer_->MaxNewSpaceSizeReached());
4810 }
4811 
NextStressMarkingLimit()4812 int Heap::NextStressMarkingLimit() {
4813   return isolate()->fuzzer_rng()->NextInt(FLAG_stress_marking + 1);
4814 }
4815 
NotifyDeserializationComplete()4816 void Heap::NotifyDeserializationComplete() {
4817   PagedSpaces spaces(this);
4818   for (PagedSpace* s = spaces.next(); s != nullptr; s = spaces.next()) {
4819     if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
4820 #ifdef DEBUG
4821     // All pages right after bootstrapping must be marked as never-evacuate.
4822     for (Page* p : *s) {
4823       DCHECK(p->NeverEvacuate());
4824     }
4825 #endif  // DEBUG
4826   }
4827 
4828   read_only_space()->MarkAsReadOnly();
4829   deserialization_complete_ = true;
4830 }
4831 
SetEmbedderHeapTracer(EmbedderHeapTracer * tracer)4832 void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
4833   DCHECK_EQ(gc_state_, HeapState::NOT_IN_GC);
4834   local_embedder_heap_tracer()->SetRemoteTracer(tracer);
4835 }
4836 
TracePossibleWrapper(JSObject * js_object)4837 void Heap::TracePossibleWrapper(JSObject* js_object) {
4838   DCHECK(js_object->WasConstructedFromApiFunction());
4839   if (js_object->GetEmbedderFieldCount() >= 2 &&
4840       js_object->GetEmbedderField(0) &&
4841       js_object->GetEmbedderField(0) != undefined_value() &&
4842       js_object->GetEmbedderField(1) != undefined_value()) {
4843     DCHECK_EQ(0,
4844               reinterpret_cast<intptr_t>(js_object->GetEmbedderField(0)) % 2);
4845     local_embedder_heap_tracer()->AddWrapperToTrace(std::pair<void*, void*>(
4846         reinterpret_cast<void*>(js_object->GetEmbedderField(0)),
4847         reinterpret_cast<void*>(js_object->GetEmbedderField(1))));
4848   }
4849 }
4850 
RegisterExternallyReferencedObject(Object ** object)4851 void Heap::RegisterExternallyReferencedObject(Object** object) {
4852   // The embedder is not aware of whether numbers are materialized as heap
4853   // objects are just passed around as Smis.
4854   if (!(*object)->IsHeapObject()) return;
4855   HeapObject* heap_object = HeapObject::cast(*object);
4856   DCHECK(Contains(heap_object));
4857   if (FLAG_incremental_marking_wrappers && incremental_marking()->IsMarking()) {
4858     incremental_marking()->WhiteToGreyAndPush(heap_object);
4859   } else {
4860     DCHECK(mark_compact_collector()->in_use());
4861     mark_compact_collector()->MarkExternallyReferencedObject(heap_object);
4862   }
4863 }
4864 
StartTearDown()4865 void Heap::StartTearDown() { SetGCState(TEAR_DOWN); }
4866 
TearDown()4867 void Heap::TearDown() {
4868   DCHECK_EQ(gc_state_, TEAR_DOWN);
4869 #ifdef VERIFY_HEAP
4870   if (FLAG_verify_heap) {
4871     Verify();
4872   }
4873 #endif
4874 
4875   UpdateMaximumCommitted();
4876 
4877   if (FLAG_verify_predictable || FLAG_fuzzer_gc_analysis) {
4878     PrintAllocationsHash();
4879   }
4880 
4881   if (FLAG_fuzzer_gc_analysis) {
4882     if (FLAG_stress_marking > 0) {
4883       PrintMaxMarkingLimitReached();
4884     }
4885     if (FLAG_stress_scavenge > 0) {
4886       PrintMaxNewSpaceSizeReached();
4887     }
4888   }
4889 
4890   new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
4891   delete idle_scavenge_observer_;
4892   idle_scavenge_observer_ = nullptr;
4893 
4894   if (FLAG_stress_marking > 0) {
4895     RemoveAllocationObserversFromAllSpaces(stress_marking_observer_,
4896                                            stress_marking_observer_);
4897     delete stress_marking_observer_;
4898     stress_marking_observer_ = nullptr;
4899   }
4900   if (FLAG_stress_scavenge > 0) {
4901     new_space()->RemoveAllocationObserver(stress_scavenge_observer_);
4902     delete stress_scavenge_observer_;
4903     stress_scavenge_observer_ = nullptr;
4904   }
4905 
4906   if (mark_compact_collector_ != nullptr) {
4907     mark_compact_collector_->TearDown();
4908     delete mark_compact_collector_;
4909     mark_compact_collector_ = nullptr;
4910   }
4911 
4912 #ifdef ENABLE_MINOR_MC
4913   if (minor_mark_compact_collector_ != nullptr) {
4914     minor_mark_compact_collector_->TearDown();
4915     delete minor_mark_compact_collector_;
4916     minor_mark_compact_collector_ = nullptr;
4917   }
4918 #endif  // ENABLE_MINOR_MC
4919 
4920   if (array_buffer_collector_ != nullptr) {
4921     delete array_buffer_collector_;
4922     array_buffer_collector_ = nullptr;
4923   }
4924 
4925   delete incremental_marking_;
4926   incremental_marking_ = nullptr;
4927 
4928   delete concurrent_marking_;
4929   concurrent_marking_ = nullptr;
4930 
4931   delete gc_idle_time_handler_;
4932   gc_idle_time_handler_ = nullptr;
4933 
4934   if (memory_reducer_ != nullptr) {
4935     memory_reducer_->TearDown();
4936     delete memory_reducer_;
4937     memory_reducer_ = nullptr;
4938   }
4939 
4940   if (live_object_stats_ != nullptr) {
4941     delete live_object_stats_;
4942     live_object_stats_ = nullptr;
4943   }
4944 
4945   if (dead_object_stats_ != nullptr) {
4946     delete dead_object_stats_;
4947     dead_object_stats_ = nullptr;
4948   }
4949 
4950   delete local_embedder_heap_tracer_;
4951   local_embedder_heap_tracer_ = nullptr;
4952 
4953   delete scavenge_job_;
4954   scavenge_job_ = nullptr;
4955 
4956   isolate_->global_handles()->TearDown();
4957 
4958   external_string_table_.TearDown();
4959 
4960   // Tear down all ArrayBuffers before tearing down the heap since  their
4961   // byte_length may be a HeapNumber which is required for freeing the backing
4962   // store.
4963   ArrayBufferTracker::TearDown(this);
4964 
4965   delete tracer_;
4966   tracer_ = nullptr;
4967 
4968   new_space_->TearDown();
4969   delete new_space_;
4970   new_space_ = nullptr;
4971 
4972   if (old_space_ != nullptr) {
4973     delete old_space_;
4974     old_space_ = nullptr;
4975   }
4976 
4977   if (code_space_ != nullptr) {
4978     delete code_space_;
4979     code_space_ = nullptr;
4980   }
4981 
4982   if (map_space_ != nullptr) {
4983     delete map_space_;
4984     map_space_ = nullptr;
4985   }
4986 
4987   if (lo_space_ != nullptr) {
4988     lo_space_->TearDown();
4989     delete lo_space_;
4990     lo_space_ = nullptr;
4991   }
4992 
4993   if (read_only_space_ != nullptr) {
4994     delete read_only_space_;
4995     read_only_space_ = nullptr;
4996   }
4997 
4998   store_buffer()->TearDown();
4999 
5000   memory_allocator()->TearDown();
5001 
5002   StrongRootsList* next = nullptr;
5003   for (StrongRootsList* list = strong_roots_list_; list; list = next) {
5004     next = list->next;
5005     delete list;
5006   }
5007   strong_roots_list_ = nullptr;
5008 
5009   delete store_buffer_;
5010   store_buffer_ = nullptr;
5011 
5012   delete memory_allocator_;
5013   memory_allocator_ = nullptr;
5014 }
5015 
AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,GCType gc_type,void * data)5016 void Heap::AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
5017                                  GCType gc_type, void* data) {
5018   DCHECK_NOT_NULL(callback);
5019   DCHECK(gc_prologue_callbacks_.end() ==
5020          std::find(gc_prologue_callbacks_.begin(), gc_prologue_callbacks_.end(),
5021                    GCCallbackTuple(callback, gc_type, data)));
5022   gc_prologue_callbacks_.emplace_back(callback, gc_type, data);
5023 }
5024 
RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,void * data)5025 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
5026                                     void* data) {
5027   DCHECK_NOT_NULL(callback);
5028   for (size_t i = 0; i < gc_prologue_callbacks_.size(); i++) {
5029     if (gc_prologue_callbacks_[i].callback == callback &&
5030         gc_prologue_callbacks_[i].data == data) {
5031       gc_prologue_callbacks_[i] = gc_prologue_callbacks_.back();
5032       gc_prologue_callbacks_.pop_back();
5033       return;
5034     }
5035   }
5036   UNREACHABLE();
5037 }
5038 
AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,GCType gc_type,void * data)5039 void Heap::AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
5040                                  GCType gc_type, void* data) {
5041   DCHECK_NOT_NULL(callback);
5042   DCHECK(gc_epilogue_callbacks_.end() ==
5043          std::find(gc_epilogue_callbacks_.begin(), gc_epilogue_callbacks_.end(),
5044                    GCCallbackTuple(callback, gc_type, data)));
5045   gc_epilogue_callbacks_.emplace_back(callback, gc_type, data);
5046 }
5047 
RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,void * data)5048 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
5049                                     void* data) {
5050   DCHECK_NOT_NULL(callback);
5051   for (size_t i = 0; i < gc_epilogue_callbacks_.size(); i++) {
5052     if (gc_epilogue_callbacks_[i].callback == callback &&
5053         gc_epilogue_callbacks_[i].data == data) {
5054       gc_epilogue_callbacks_[i] = gc_epilogue_callbacks_.back();
5055       gc_epilogue_callbacks_.pop_back();
5056       return;
5057     }
5058   }
5059   UNREACHABLE();
5060 }
5061 
5062 namespace {
CompactFixedArrayOfWeakCells(Object * object)5063 void CompactFixedArrayOfWeakCells(Object* object) {
5064   if (object->IsFixedArrayOfWeakCells()) {
5065     FixedArrayOfWeakCells* array = FixedArrayOfWeakCells::cast(object);
5066     array->Compact<FixedArrayOfWeakCells::NullCallback>();
5067   }
5068 }
5069 }  // anonymous namespace
5070 
CompactFixedArraysOfWeakCells()5071 void Heap::CompactFixedArraysOfWeakCells() {
5072   // Find known FixedArrayOfWeakCells and compact them.
5073   HeapIterator iterator(this);
5074   for (HeapObject* o = iterator.next(); o != nullptr; o = iterator.next()) {
5075     if (o->IsPrototypeInfo()) {
5076       Object* prototype_users = PrototypeInfo::cast(o)->prototype_users();
5077       if (prototype_users->IsFixedArrayOfWeakCells()) {
5078         FixedArrayOfWeakCells* array =
5079             FixedArrayOfWeakCells::cast(prototype_users);
5080         array->Compact<JSObject::PrototypeRegistryCompactionCallback>();
5081       }
5082     }
5083   }
5084   CompactFixedArrayOfWeakCells(noscript_shared_function_infos());
5085   CompactFixedArrayOfWeakCells(script_list());
5086   CompactFixedArrayOfWeakCells(weak_stack_trace_list());
5087 }
5088 
AddRetainedMap(Handle<Map> map)5089 void Heap::AddRetainedMap(Handle<Map> map) {
5090   if (map->is_in_retained_map_list()) {
5091     return;
5092   }
5093   Handle<WeakArrayList> array(retained_maps(), isolate());
5094   if (array->IsFull()) {
5095     CompactRetainedMaps(*array);
5096   }
5097   array =
5098       WeakArrayList::Add(array, map, Smi::FromInt(FLAG_retain_maps_for_n_gc));
5099   if (*array != retained_maps()) {
5100     set_retained_maps(*array);
5101   }
5102   map->set_is_in_retained_map_list(true);
5103 }
5104 
CompactRetainedMaps(WeakArrayList * retained_maps)5105 void Heap::CompactRetainedMaps(WeakArrayList* retained_maps) {
5106   DCHECK_EQ(retained_maps, this->retained_maps());
5107   int length = retained_maps->length();
5108   int new_length = 0;
5109   int new_number_of_disposed_maps = 0;
5110   // This loop compacts the array by removing cleared weak cells.
5111   for (int i = 0; i < length; i += 2) {
5112     MaybeObject* maybe_object = retained_maps->Get(i);
5113     if (maybe_object->IsClearedWeakHeapObject()) {
5114       continue;
5115     }
5116 
5117     DCHECK(maybe_object->IsWeakHeapObject());
5118 
5119     MaybeObject* age = retained_maps->Get(i + 1);
5120     DCHECK(age->IsSmi());
5121     if (i != new_length) {
5122       retained_maps->Set(new_length, maybe_object);
5123       retained_maps->Set(new_length + 1, age);
5124     }
5125     if (i < number_of_disposed_maps_) {
5126       new_number_of_disposed_maps += 2;
5127     }
5128     new_length += 2;
5129   }
5130   number_of_disposed_maps_ = new_number_of_disposed_maps;
5131   HeapObject* undefined = undefined_value();
5132   for (int i = new_length; i < length; i++) {
5133     retained_maps->Set(i, HeapObjectReference::Strong(undefined));
5134   }
5135   if (new_length != length) retained_maps->set_length(new_length);
5136 }
5137 
FatalProcessOutOfMemory(const char * location)5138 void Heap::FatalProcessOutOfMemory(const char* location) {
5139   v8::internal::V8::FatalProcessOutOfMemory(isolate(), location, true);
5140 }
5141 
5142 #ifdef DEBUG
5143 
5144 class PrintHandleVisitor : public RootVisitor {
5145  public:
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)5146   void VisitRootPointers(Root root, const char* description, Object** start,
5147                          Object** end) override {
5148     for (Object** p = start; p < end; p++)
5149       PrintF("  handle %p to %p\n", reinterpret_cast<void*>(p),
5150              reinterpret_cast<void*>(*p));
5151   }
5152 };
5153 
5154 
PrintHandles()5155 void Heap::PrintHandles() {
5156   PrintF("Handles:\n");
5157   PrintHandleVisitor v;
5158   isolate_->handle_scope_implementer()->Iterate(&v);
5159 }
5160 
5161 #endif
5162 
5163 class CheckHandleCountVisitor : public RootVisitor {
5164  public:
CheckHandleCountVisitor()5165   CheckHandleCountVisitor() : handle_count_(0) {}
~CheckHandleCountVisitor()5166   ~CheckHandleCountVisitor() override {
5167     CHECK_GT(HandleScope::kCheckHandleThreshold, handle_count_);
5168   }
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)5169   void VisitRootPointers(Root root, const char* description, Object** start,
5170                          Object** end) override {
5171     handle_count_ += end - start;
5172   }
5173 
5174  private:
5175   ptrdiff_t handle_count_;
5176 };
5177 
5178 
CheckHandleCount()5179 void Heap::CheckHandleCount() {
5180   CheckHandleCountVisitor v;
5181   isolate_->handle_scope_implementer()->Iterate(&v);
5182 }
5183 
ClearRecordedSlot(HeapObject * object,Object ** slot)5184 void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
5185   Address slot_addr = reinterpret_cast<Address>(slot);
5186   Page* page = Page::FromAddress(slot_addr);
5187   if (!page->InNewSpace()) {
5188     DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
5189     store_buffer()->DeleteEntry(slot_addr);
5190   }
5191 }
5192 
HasRecordedSlot(HeapObject * object,Object ** slot)5193 bool Heap::HasRecordedSlot(HeapObject* object, Object** slot) {
5194   if (InNewSpace(object)) {
5195     return false;
5196   }
5197   Address slot_addr = reinterpret_cast<Address>(slot);
5198   Page* page = Page::FromAddress(slot_addr);
5199   DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
5200   store_buffer()->MoveAllEntriesToRememberedSet();
5201   return RememberedSet<OLD_TO_NEW>::Contains(page, slot_addr) ||
5202          RememberedSet<OLD_TO_OLD>::Contains(page, slot_addr);
5203 }
5204 
ClearRecordedSlotRange(Address start,Address end)5205 void Heap::ClearRecordedSlotRange(Address start, Address end) {
5206   Page* page = Page::FromAddress(start);
5207   if (!page->InNewSpace()) {
5208     DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
5209     store_buffer()->DeleteEntry(start, end);
5210   }
5211 }
5212 
RecordWriteIntoCodeSlow(Code * host,RelocInfo * rinfo,Object * value)5213 void Heap::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
5214                                    Object* value) {
5215   DCHECK(InNewSpace(value));
5216   Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
5217   RelocInfo::Mode rmode = rinfo->rmode();
5218   Address addr = rinfo->pc();
5219   SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
5220   if (rinfo->IsInConstantPool()) {
5221     addr = rinfo->constant_pool_entry_address();
5222     if (RelocInfo::IsCodeTarget(rmode)) {
5223       slot_type = CODE_ENTRY_SLOT;
5224     } else {
5225       DCHECK(RelocInfo::IsEmbeddedObject(rmode));
5226       slot_type = OBJECT_SLOT;
5227     }
5228   }
5229   RememberedSet<OLD_TO_NEW>::InsertTyped(
5230       source_page, reinterpret_cast<Address>(host), slot_type, addr);
5231 }
5232 
RecordWritesIntoCode(Code * code)5233 void Heap::RecordWritesIntoCode(Code* code) {
5234   for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
5235        !it.done(); it.next()) {
5236     RecordWriteIntoCode(code, it.rinfo(), it.rinfo()->target_object());
5237   }
5238 }
5239 
5240 
next()5241 PagedSpace* PagedSpaces::next() {
5242   switch (counter_++) {
5243     case RO_SPACE:
5244       // skip NEW_SPACE
5245       counter_++;
5246       return heap_->read_only_space();
5247     case OLD_SPACE:
5248       return heap_->old_space();
5249     case CODE_SPACE:
5250       return heap_->code_space();
5251     case MAP_SPACE:
5252       return heap_->map_space();
5253     default:
5254       return nullptr;
5255   }
5256 }
5257 
SpaceIterator(Heap * heap)5258 SpaceIterator::SpaceIterator(Heap* heap)
5259     : heap_(heap), current_space_(FIRST_SPACE - 1) {}
5260 
~SpaceIterator()5261 SpaceIterator::~SpaceIterator() {
5262 }
5263 
5264 
has_next()5265 bool SpaceIterator::has_next() {
5266   // Iterate until no more spaces.
5267   return current_space_ != LAST_SPACE;
5268 }
5269 
next()5270 Space* SpaceIterator::next() {
5271   DCHECK(has_next());
5272   return heap_->space(++current_space_);
5273 }
5274 
5275 
5276 class HeapObjectsFilter {
5277  public:
~HeapObjectsFilter()5278   virtual ~HeapObjectsFilter() {}
5279   virtual bool SkipObject(HeapObject* object) = 0;
5280 };
5281 
5282 
5283 class UnreachableObjectsFilter : public HeapObjectsFilter {
5284  public:
UnreachableObjectsFilter(Heap * heap)5285   explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
5286     MarkReachableObjects();
5287   }
5288 
~UnreachableObjectsFilter()5289   ~UnreachableObjectsFilter() {
5290     for (auto it : reachable_) {
5291       delete it.second;
5292       it.second = nullptr;
5293     }
5294   }
5295 
SkipObject(HeapObject * object)5296   bool SkipObject(HeapObject* object) {
5297     if (object->IsFiller()) return true;
5298     MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
5299     if (reachable_.count(chunk) == 0) return true;
5300     return reachable_[chunk]->count(object) == 0;
5301   }
5302 
5303  private:
MarkAsReachable(HeapObject * object)5304   bool MarkAsReachable(HeapObject* object) {
5305     MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
5306     if (reachable_.count(chunk) == 0) {
5307       reachable_[chunk] = new std::unordered_set<HeapObject*>();
5308     }
5309     if (reachable_[chunk]->count(object)) return false;
5310     reachable_[chunk]->insert(object);
5311     return true;
5312   }
5313 
5314   class MarkingVisitor : public ObjectVisitor, public RootVisitor {
5315    public:
MarkingVisitor(UnreachableObjectsFilter * filter)5316     explicit MarkingVisitor(UnreachableObjectsFilter* filter)
5317         : filter_(filter) {}
5318 
VisitPointers(HeapObject * host,Object ** start,Object ** end)5319     void VisitPointers(HeapObject* host, Object** start,
5320                        Object** end) override {
5321       MarkPointers(reinterpret_cast<MaybeObject**>(start),
5322                    reinterpret_cast<MaybeObject**>(end));
5323     }
5324 
VisitPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)5325     void VisitPointers(HeapObject* host, MaybeObject** start,
5326                        MaybeObject** end) final {
5327       MarkPointers(start, end);
5328     }
5329 
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)5330     void VisitRootPointers(Root root, const char* description, Object** start,
5331                            Object** end) override {
5332       MarkPointers(reinterpret_cast<MaybeObject**>(start),
5333                    reinterpret_cast<MaybeObject**>(end));
5334     }
5335 
TransitiveClosure()5336     void TransitiveClosure() {
5337       while (!marking_stack_.empty()) {
5338         HeapObject* obj = marking_stack_.back();
5339         marking_stack_.pop_back();
5340         obj->Iterate(this);
5341       }
5342     }
5343 
5344    private:
MarkPointers(MaybeObject ** start,MaybeObject ** end)5345     void MarkPointers(MaybeObject** start, MaybeObject** end) {
5346       // Treat weak references as strong.
5347       for (MaybeObject** p = start; p < end; p++) {
5348         HeapObject* heap_object;
5349         if ((*p)->ToStrongOrWeakHeapObject(&heap_object)) {
5350           if (filter_->MarkAsReachable(heap_object)) {
5351             marking_stack_.push_back(heap_object);
5352           }
5353         }
5354       }
5355     }
5356     UnreachableObjectsFilter* filter_;
5357     std::vector<HeapObject*> marking_stack_;
5358   };
5359 
5360   friend class MarkingVisitor;
5361 
MarkReachableObjects()5362   void MarkReachableObjects() {
5363     MarkingVisitor visitor(this);
5364     heap_->IterateRoots(&visitor, VISIT_ALL);
5365     visitor.TransitiveClosure();
5366   }
5367 
5368   Heap* heap_;
5369   DisallowHeapAllocation no_allocation_;
5370   std::unordered_map<MemoryChunk*, std::unordered_set<HeapObject*>*> reachable_;
5371 };
5372 
HeapIterator(Heap * heap,HeapIterator::HeapObjectsFiltering filtering)5373 HeapIterator::HeapIterator(Heap* heap,
5374                            HeapIterator::HeapObjectsFiltering filtering)
5375     : no_heap_allocation_(),
5376       heap_(heap),
5377       filtering_(filtering),
5378       filter_(nullptr),
5379       space_iterator_(nullptr),
5380       object_iterator_(nullptr) {
5381   heap_->MakeHeapIterable();
5382   heap_->heap_iterator_start();
5383   // Start the iteration.
5384   space_iterator_ = new SpaceIterator(heap_);
5385   switch (filtering_) {
5386     case kFilterUnreachable:
5387       filter_ = new UnreachableObjectsFilter(heap_);
5388       break;
5389     default:
5390       break;
5391   }
5392   object_iterator_ = space_iterator_->next()->GetObjectIterator();
5393 }
5394 
5395 
~HeapIterator()5396 HeapIterator::~HeapIterator() {
5397   heap_->heap_iterator_end();
5398 #ifdef DEBUG
5399   // Assert that in filtering mode we have iterated through all
5400   // objects. Otherwise, heap will be left in an inconsistent state.
5401   if (filtering_ != kNoFiltering) {
5402     DCHECK_NULL(object_iterator_);
5403   }
5404 #endif
5405   delete space_iterator_;
5406   delete filter_;
5407 }
5408 
5409 
next()5410 HeapObject* HeapIterator::next() {
5411   if (filter_ == nullptr) return NextObject();
5412 
5413   HeapObject* obj = NextObject();
5414   while ((obj != nullptr) && (filter_->SkipObject(obj))) obj = NextObject();
5415   return obj;
5416 }
5417 
5418 
NextObject()5419 HeapObject* HeapIterator::NextObject() {
5420   // No iterator means we are done.
5421   if (object_iterator_.get() == nullptr) return nullptr;
5422 
5423   if (HeapObject* obj = object_iterator_.get()->Next()) {
5424     // If the current iterator has more objects we are fine.
5425     return obj;
5426   } else {
5427     // Go though the spaces looking for one that has objects.
5428     while (space_iterator_->has_next()) {
5429       object_iterator_ = space_iterator_->next()->GetObjectIterator();
5430       if (HeapObject* obj = object_iterator_.get()->Next()) {
5431         return obj;
5432       }
5433     }
5434   }
5435   // Done with the last space.
5436   object_iterator_.reset(nullptr);
5437   return nullptr;
5438 }
5439 
5440 
UpdateTotalGCTime(double duration)5441 void Heap::UpdateTotalGCTime(double duration) {
5442   if (FLAG_trace_gc_verbose) {
5443     total_gc_time_ms_ += duration;
5444   }
5445 }
5446 
CleanUpNewSpaceStrings()5447 void Heap::ExternalStringTable::CleanUpNewSpaceStrings() {
5448   int last = 0;
5449   Isolate* isolate = heap_->isolate();
5450   for (size_t i = 0; i < new_space_strings_.size(); ++i) {
5451     Object* o = new_space_strings_[i];
5452     if (o->IsTheHole(isolate)) {
5453       continue;
5454     }
5455     if (o->IsThinString()) {
5456       o = ThinString::cast(o)->actual();
5457       if (!o->IsExternalString()) continue;
5458     }
5459     DCHECK(o->IsExternalString());
5460     if (heap_->InNewSpace(o)) {
5461       new_space_strings_[last++] = o;
5462     } else {
5463       old_space_strings_.push_back(o);
5464     }
5465   }
5466   new_space_strings_.resize(last);
5467 }
5468 
CleanUpAll()5469 void Heap::ExternalStringTable::CleanUpAll() {
5470   CleanUpNewSpaceStrings();
5471   int last = 0;
5472   Isolate* isolate = heap_->isolate();
5473   for (size_t i = 0; i < old_space_strings_.size(); ++i) {
5474     Object* o = old_space_strings_[i];
5475     if (o->IsTheHole(isolate)) {
5476       continue;
5477     }
5478     if (o->IsThinString()) {
5479       o = ThinString::cast(o)->actual();
5480       if (!o->IsExternalString()) continue;
5481     }
5482     DCHECK(o->IsExternalString());
5483     DCHECK(!heap_->InNewSpace(o));
5484     old_space_strings_[last++] = o;
5485   }
5486   old_space_strings_.resize(last);
5487 #ifdef VERIFY_HEAP
5488   if (FLAG_verify_heap) {
5489     Verify();
5490   }
5491 #endif
5492 }
5493 
TearDown()5494 void Heap::ExternalStringTable::TearDown() {
5495   for (size_t i = 0; i < new_space_strings_.size(); ++i) {
5496     Object* o = new_space_strings_[i];
5497     if (o->IsThinString()) {
5498       o = ThinString::cast(o)->actual();
5499       if (!o->IsExternalString()) continue;
5500     }
5501     heap_->FinalizeExternalString(ExternalString::cast(o));
5502   }
5503   new_space_strings_.clear();
5504   for (size_t i = 0; i < old_space_strings_.size(); ++i) {
5505     Object* o = old_space_strings_[i];
5506     if (o->IsThinString()) {
5507       o = ThinString::cast(o)->actual();
5508       if (!o->IsExternalString()) continue;
5509     }
5510     heap_->FinalizeExternalString(ExternalString::cast(o));
5511   }
5512   old_space_strings_.clear();
5513 }
5514 
5515 
RememberUnmappedPage(Address page,bool compacted)5516 void Heap::RememberUnmappedPage(Address page, bool compacted) {
5517   // Tag the page pointer to make it findable in the dump file.
5518   if (compacted) {
5519     page ^= 0xC1EAD & (Page::kPageSize - 1);  // Cleared.
5520   } else {
5521     page ^= 0x1D1ED & (Page::kPageSize - 1);  // I died.
5522   }
5523   remembered_unmapped_pages_[remembered_unmapped_pages_index_] = page;
5524   remembered_unmapped_pages_index_++;
5525   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
5526 }
5527 
RegisterStrongRoots(Object ** start,Object ** end)5528 void Heap::RegisterStrongRoots(Object** start, Object** end) {
5529   StrongRootsList* list = new StrongRootsList();
5530   list->next = strong_roots_list_;
5531   list->start = start;
5532   list->end = end;
5533   strong_roots_list_ = list;
5534 }
5535 
5536 
UnregisterStrongRoots(Object ** start)5537 void Heap::UnregisterStrongRoots(Object** start) {
5538   StrongRootsList* prev = nullptr;
5539   StrongRootsList* list = strong_roots_list_;
5540   while (list != nullptr) {
5541     StrongRootsList* next = list->next;
5542     if (list->start == start) {
5543       if (prev) {
5544         prev->next = next;
5545       } else {
5546         strong_roots_list_ = next;
5547       }
5548       delete list;
5549     } else {
5550       prev = list;
5551     }
5552     list = next;
5553   }
5554 }
5555 
IsDeserializeLazyHandler(Code * code)5556 bool Heap::IsDeserializeLazyHandler(Code* code) {
5557   return (code == deserialize_lazy_handler() ||
5558           code == deserialize_lazy_handler_wide() ||
5559           code == deserialize_lazy_handler_extra_wide());
5560 }
5561 
SetDeserializeLazyHandler(Code * code)5562 void Heap::SetDeserializeLazyHandler(Code* code) {
5563   set_deserialize_lazy_handler(code);
5564 }
5565 
SetDeserializeLazyHandlerWide(Code * code)5566 void Heap::SetDeserializeLazyHandlerWide(Code* code) {
5567   set_deserialize_lazy_handler_wide(code);
5568 }
5569 
SetDeserializeLazyHandlerExtraWide(Code * code)5570 void Heap::SetDeserializeLazyHandlerExtraWide(Code* code) {
5571   set_deserialize_lazy_handler_extra_wide(code);
5572 }
5573 
SetBuiltinsConstantsTable(FixedArray * cache)5574 void Heap::SetBuiltinsConstantsTable(FixedArray* cache) {
5575   set_builtins_constants_table(cache);
5576 }
5577 
NumberOfTrackedHeapObjectTypes()5578 size_t Heap::NumberOfTrackedHeapObjectTypes() {
5579   return ObjectStats::OBJECT_STATS_COUNT;
5580 }
5581 
5582 
ObjectCountAtLastGC(size_t index)5583 size_t Heap::ObjectCountAtLastGC(size_t index) {
5584   if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
5585     return 0;
5586   return live_object_stats_->object_count_last_gc(index);
5587 }
5588 
5589 
ObjectSizeAtLastGC(size_t index)5590 size_t Heap::ObjectSizeAtLastGC(size_t index) {
5591   if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
5592     return 0;
5593   return live_object_stats_->object_size_last_gc(index);
5594 }
5595 
5596 
GetObjectTypeName(size_t index,const char ** object_type,const char ** object_sub_type)5597 bool Heap::GetObjectTypeName(size_t index, const char** object_type,
5598                              const char** object_sub_type) {
5599   if (index >= ObjectStats::OBJECT_STATS_COUNT) return false;
5600 
5601   switch (static_cast<int>(index)) {
5602 #define COMPARE_AND_RETURN_NAME(name) \
5603   case name:                          \
5604     *object_type = #name;             \
5605     *object_sub_type = "";            \
5606     return true;
5607     INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
5608 #undef COMPARE_AND_RETURN_NAME
5609 
5610 #define COMPARE_AND_RETURN_NAME(name)                       \
5611   case ObjectStats::FIRST_VIRTUAL_TYPE + ObjectStats::name: \
5612     *object_type = #name;                                   \
5613     *object_sub_type = "";                                  \
5614     return true;
5615     VIRTUAL_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
5616 #undef COMPARE_AND_RETURN_NAME
5617   }
5618   return false;
5619 }
5620 
NumberOfNativeContexts()5621 size_t Heap::NumberOfNativeContexts() {
5622   int result = 0;
5623   Object* context = native_contexts_list();
5624   while (!context->IsUndefined(isolate())) {
5625     ++result;
5626     Context* native_context = Context::cast(context);
5627     context = native_context->next_context_link();
5628   }
5629   return result;
5630 }
5631 
NumberOfDetachedContexts()5632 size_t Heap::NumberOfDetachedContexts() {
5633   // The detached_contexts() array has two entries per detached context.
5634   return detached_contexts()->length() / 2;
5635 }
5636 
AllocationSpaceName(AllocationSpace space)5637 const char* AllocationSpaceName(AllocationSpace space) {
5638   switch (space) {
5639     case NEW_SPACE:
5640       return "NEW_SPACE";
5641     case OLD_SPACE:
5642       return "OLD_SPACE";
5643     case CODE_SPACE:
5644       return "CODE_SPACE";
5645     case MAP_SPACE:
5646       return "MAP_SPACE";
5647     case LO_SPACE:
5648       return "LO_SPACE";
5649     case RO_SPACE:
5650       return "RO_SPACE";
5651     default:
5652       UNREACHABLE();
5653   }
5654   return nullptr;
5655 }
5656 
VisitPointers(HeapObject * host,Object ** start,Object ** end)5657 void VerifyPointersVisitor::VisitPointers(HeapObject* host, Object** start,
5658                                           Object** end) {
5659   VerifyPointers(host, reinterpret_cast<MaybeObject**>(start),
5660                  reinterpret_cast<MaybeObject**>(end));
5661 }
5662 
VisitPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)5663 void VerifyPointersVisitor::VisitPointers(HeapObject* host, MaybeObject** start,
5664                                           MaybeObject** end) {
5665   VerifyPointers(host, start, end);
5666 }
5667 
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)5668 void VerifyPointersVisitor::VisitRootPointers(Root root,
5669                                               const char* description,
5670                                               Object** start, Object** end) {
5671   VerifyPointers(nullptr, reinterpret_cast<MaybeObject**>(start),
5672                  reinterpret_cast<MaybeObject**>(end));
5673 }
5674 
VerifyPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)5675 void VerifyPointersVisitor::VerifyPointers(HeapObject* host,
5676                                            MaybeObject** start,
5677                                            MaybeObject** end) {
5678   for (MaybeObject** current = start; current < end; current++) {
5679     HeapObject* object;
5680     if ((*current)->ToStrongOrWeakHeapObject(&object)) {
5681       CHECK(object->GetIsolate()->heap()->Contains(object));
5682       CHECK(object->map()->IsMap());
5683     } else {
5684       CHECK((*current)->IsSmi() || (*current)->IsClearedWeakHeapObject());
5685     }
5686   }
5687 }
5688 
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)5689 void VerifySmisVisitor::VisitRootPointers(Root root, const char* description,
5690                                           Object** start, Object** end) {
5691   for (Object** current = start; current < end; current++) {
5692     CHECK((*current)->IsSmi());
5693   }
5694 }
5695 
AllowedToBeMigrated(HeapObject * obj,AllocationSpace dst)5696 bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
5697   // Object migration is governed by the following rules:
5698   //
5699   // 1) Objects in new-space can be migrated to the old space
5700   //    that matches their target space or they stay in new-space.
5701   // 2) Objects in old-space stay in the same space when migrating.
5702   // 3) Fillers (two or more words) can migrate due to left-trimming of
5703   //    fixed arrays in new-space or old space.
5704   // 4) Fillers (one word) can never migrate, they are skipped by
5705   //    incremental marking explicitly to prevent invalid pattern.
5706   //
5707   // Since this function is used for debugging only, we do not place
5708   // asserts here, but check everything explicitly.
5709   if (obj->map() == one_pointer_filler_map()) return false;
5710   InstanceType type = obj->map()->instance_type();
5711   MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
5712   AllocationSpace src = chunk->owner()->identity();
5713   switch (src) {
5714     case NEW_SPACE:
5715       return dst == NEW_SPACE || dst == OLD_SPACE;
5716     case OLD_SPACE:
5717       return dst == OLD_SPACE;
5718     case CODE_SPACE:
5719       return dst == CODE_SPACE && type == CODE_TYPE;
5720     case MAP_SPACE:
5721     case LO_SPACE:
5722     case RO_SPACE:
5723       return false;
5724   }
5725   UNREACHABLE();
5726 }
5727 
CreateObjectStats()5728 void Heap::CreateObjectStats() {
5729   if (V8_LIKELY(FLAG_gc_stats == 0)) return;
5730   if (!live_object_stats_) {
5731     live_object_stats_ = new ObjectStats(this);
5732   }
5733   if (!dead_object_stats_) {
5734     dead_object_stats_ = new ObjectStats(this);
5735   }
5736 }
5737 
AllocationStep(int bytes_allocated,Address soon_object,size_t size)5738 void AllocationObserver::AllocationStep(int bytes_allocated,
5739                                         Address soon_object, size_t size) {
5740   DCHECK_GE(bytes_allocated, 0);
5741   bytes_to_next_step_ -= bytes_allocated;
5742   if (bytes_to_next_step_ <= 0) {
5743     Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object, size);
5744     step_size_ = GetNextStepSize();
5745     bytes_to_next_step_ = step_size_;
5746   }
5747   DCHECK_GE(bytes_to_next_step_, 0);
5748 }
5749 
5750 namespace {
5751 
GcSafeMapOfCodeSpaceObject(HeapObject * object)5752 Map* GcSafeMapOfCodeSpaceObject(HeapObject* object) {
5753   MapWord map_word = object->map_word();
5754   return map_word.IsForwardingAddress() ? map_word.ToForwardingAddress()->map()
5755                                         : map_word.ToMap();
5756 }
5757 
GcSafeSizeOfCodeSpaceObject(HeapObject * object)5758 int GcSafeSizeOfCodeSpaceObject(HeapObject* object) {
5759   return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
5760 }
5761 
GcSafeCastToCode(Heap * heap,HeapObject * object,Address inner_pointer)5762 Code* GcSafeCastToCode(Heap* heap, HeapObject* object, Address inner_pointer) {
5763   Code* code = reinterpret_cast<Code*>(object);
5764   DCHECK_NOT_NULL(code);
5765   DCHECK(heap->GcSafeCodeContains(code, inner_pointer));
5766   return code;
5767 }
5768 
5769 }  // namespace
5770 
GcSafeCodeContains(HeapObject * code,Address addr)5771 bool Heap::GcSafeCodeContains(HeapObject* code, Address addr) {
5772   Map* map = GcSafeMapOfCodeSpaceObject(code);
5773   DCHECK(map == code->GetHeap()->code_map());
5774 #ifdef V8_EMBEDDED_BUILTINS
5775   if (InstructionStream::TryLookupCode(isolate(), addr) == code) return true;
5776 #endif
5777   Address start = code->address();
5778   Address end = code->address() + code->SizeFromMap(map);
5779   return start <= addr && addr < end;
5780 }
5781 
GcSafeFindCodeForInnerPointer(Address inner_pointer)5782 Code* Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
5783 #ifdef V8_EMBEDDED_BUILTINS
5784   Code* code = InstructionStream::TryLookupCode(isolate(), inner_pointer);
5785   if (code != nullptr) return code;
5786 #endif
5787 
5788   // Check if the inner pointer points into a large object chunk.
5789   LargePage* large_page = lo_space()->FindPage(inner_pointer);
5790   if (large_page != nullptr) {
5791     return GcSafeCastToCode(this, large_page->GetObject(), inner_pointer);
5792   }
5793 
5794   DCHECK(code_space()->Contains(inner_pointer));
5795 
5796   // Iterate through the page until we reach the end or find an object starting
5797   // after the inner pointer.
5798   Page* page = Page::FromAddress(inner_pointer);
5799   DCHECK_EQ(page->owner(), code_space());
5800   mark_compact_collector()->sweeper()->EnsurePageIsIterable(page);
5801 
5802   Address addr = page->skip_list()->StartFor(inner_pointer);
5803   Address top = code_space()->top();
5804   Address limit = code_space()->limit();
5805 
5806   while (true) {
5807     if (addr == top && addr != limit) {
5808       addr = limit;
5809       continue;
5810     }
5811 
5812     HeapObject* obj = HeapObject::FromAddress(addr);
5813     int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
5814     Address next_addr = addr + obj_size;
5815     if (next_addr > inner_pointer)
5816       return GcSafeCastToCode(this, obj, inner_pointer);
5817     addr = next_addr;
5818   }
5819 }
5820 
5821 }  // namespace internal
5822 }  // namespace v8
5823