1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_HEAP_H_
6 #define V8_HEAP_HEAP_H_
7 
8 #include <cmath>
9 #include <map>
10 #include <unordered_map>
11 #include <unordered_set>
12 #include <vector>
13 
14 // Clients of this interface shouldn't depend on lots of heap internals.
15 // Do not include anything from src/heap here!
16 #include "include/v8.h"
17 #include "src/accessors.h"
18 #include "src/allocation.h"
19 #include "src/assert-scope.h"
20 #include "src/base/atomic-utils.h"
21 #include "src/external-reference-table.h"
22 #include "src/globals.h"
23 #include "src/heap-symbols.h"
24 #include "src/objects.h"
25 #include "src/objects/fixed-array.h"
26 #include "src/objects/string-table.h"
27 #include "src/visitors.h"
28 
29 namespace v8 {
30 
31 namespace debug {
32 typedef void (*OutOfMemoryCallback)(void* data);
33 }  // namespace debug
34 
35 namespace internal {
36 
37 namespace heap {
38 class HeapTester;
39 class TestMemoryAllocatorScope;
40 }  // namespace heap
41 
42 class BoilerplateDescription;
43 class BytecodeArray;
44 class CodeDataContainer;
45 class DeoptimizationData;
46 class HandlerTable;
47 class IncrementalMarking;
48 class JSArrayBuffer;
49 
50 using v8::MemoryPressureLevel;
51 
52 // Defines all the roots in Heap.
53 #define STRONG_ROOT_LIST(V)                                                    \
54   /* Cluster the most popular ones in a few cache lines here at the top.    */ \
55   /* The first 32 entries are most often used in the startup snapshot and   */ \
56   /* can use a shorter representation in the serialization format.          */ \
57   V(Map, free_space_map, FreeSpaceMap)                                         \
58   V(Map, one_pointer_filler_map, OnePointerFillerMap)                          \
59   V(Map, two_pointer_filler_map, TwoPointerFillerMap)                          \
60   V(Oddball, uninitialized_value, UninitializedValue)                          \
61   V(Oddball, undefined_value, UndefinedValue)                                  \
62   V(Oddball, the_hole_value, TheHoleValue)                                     \
63   V(Oddball, null_value, NullValue)                                            \
64   V(Oddball, true_value, TrueValue)                                            \
65   V(Oddball, false_value, FalseValue)                                          \
66   V(String, empty_string, empty_string)                                        \
67   V(Map, meta_map, MetaMap)                                                    \
68   V(Map, byte_array_map, ByteArrayMap)                                         \
69   V(Map, fixed_array_map, FixedArrayMap)                                       \
70   V(Map, fixed_cow_array_map, FixedCOWArrayMap)                                \
71   V(Map, hash_table_map, HashTableMap)                                         \
72   V(Map, symbol_map, SymbolMap)                                                \
73   V(Map, one_byte_string_map, OneByteStringMap)                                \
74   V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap)       \
75   V(Map, scope_info_map, ScopeInfoMap)                                         \
76   V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
77   V(Map, code_map, CodeMap)                                                    \
78   V(Map, function_context_map, FunctionContextMap)                             \
79   V(Map, cell_map, CellMap)                                                    \
80   V(Map, weak_cell_map, WeakCellMap)                                           \
81   V(Map, global_property_cell_map, GlobalPropertyCellMap)                      \
82   V(Map, foreign_map, ForeignMap)                                              \
83   V(Map, heap_number_map, HeapNumberMap)                                       \
84   V(Map, transition_array_map, TransitionArrayMap)                             \
85   V(Map, feedback_vector_map, FeedbackVectorMap)                               \
86   V(ScopeInfo, empty_scope_info, EmptyScopeInfo)                               \
87   V(FixedArray, empty_fixed_array, EmptyFixedArray)                            \
88   V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray)             \
89   /* Entries beyond the first 32                                            */ \
90   /* The roots above this line should be boring from a GC point of view.    */ \
91   /* This means they are never in new space and never on a page that is     */ \
92   /* being compacted.*/                                                        \
93   /* Oddballs */                                                               \
94   V(Oddball, arguments_marker, ArgumentsMarker)                                \
95   V(Oddball, exception, Exception)                                             \
96   V(Oddball, termination_exception, TerminationException)                      \
97   V(Oddball, optimized_out, OptimizedOut)                                      \
98   V(Oddball, stale_register, StaleRegister)                                    \
99   /* Context maps */                                                           \
100   V(Map, native_context_map, NativeContextMap)                                 \
101   V(Map, module_context_map, ModuleContextMap)                                 \
102   V(Map, eval_context_map, EvalContextMap)                                     \
103   V(Map, script_context_map, ScriptContextMap)                                 \
104   V(Map, block_context_map, BlockContextMap)                                   \
105   V(Map, catch_context_map, CatchContextMap)                                   \
106   V(Map, with_context_map, WithContextMap)                                     \
107   V(Map, debug_evaluate_context_map, DebugEvaluateContextMap)                  \
108   V(Map, script_context_table_map, ScriptContextTableMap)                      \
109   /* Maps */                                                                   \
110   V(Map, feedback_metadata_map, FeedbackMetadataArrayMap)                      \
111   V(Map, array_list_map, ArrayListMap)                                         \
112   V(Map, bigint_map, BigIntMap)                                                \
113   V(Map, boilerplate_description_map, BoilerplateDescriptionMap)               \
114   V(Map, bytecode_array_map, BytecodeArrayMap)                                 \
115   V(Map, code_data_container_map, CodeDataContainerMap)                        \
116   V(Map, descriptor_array_map, DescriptorArrayMap)                             \
117   V(Map, external_map, ExternalMap)                                            \
118   V(Map, fixed_double_array_map, FixedDoubleArrayMap)                          \
119   V(Map, global_dictionary_map, GlobalDictionaryMap)                           \
120   V(Map, many_closures_cell_map, ManyClosuresCellMap)                          \
121   V(Map, message_object_map, JSMessageObjectMap)                               \
122   V(Map, module_info_map, ModuleInfoMap)                                       \
123   V(Map, mutable_heap_number_map, MutableHeapNumberMap)                        \
124   V(Map, name_dictionary_map, NameDictionaryMap)                               \
125   V(Map, no_closures_cell_map, NoClosuresCellMap)                              \
126   V(Map, number_dictionary_map, NumberDictionaryMap)                           \
127   V(Map, one_closure_cell_map, OneClosureCellMap)                              \
128   V(Map, ordered_hash_map_map, OrderedHashMapMap)                              \
129   V(Map, ordered_hash_set_map, OrderedHashSetMap)                              \
130   V(Map, property_array_map, PropertyArrayMap)                                 \
131   V(Map, side_effect_call_handler_info_map, SideEffectCallHandlerInfoMap)      \
132   V(Map, side_effect_free_call_handler_info_map,                               \
133     SideEffectFreeCallHandlerInfoMap)                                          \
134   V(Map, next_call_side_effect_free_call_handler_info_map,                     \
135     NextCallSideEffectFreeCallHandlerInfoMap)                                  \
136   V(Map, simple_number_dictionary_map, SimpleNumberDictionaryMap)              \
137   V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap)            \
138   V(Map, small_ordered_hash_map_map, SmallOrderedHashMapMap)                   \
139   V(Map, small_ordered_hash_set_map, SmallOrderedHashSetMap)                   \
140   V(Map, string_table_map, StringTableMap)                                     \
141   V(Map, weak_fixed_array_map, WeakFixedArrayMap)                              \
142   V(Map, weak_array_list_map, WeakArrayListMap)                                \
143   /* String maps */                                                            \
144   V(Map, native_source_string_map, NativeSourceStringMap)                      \
145   V(Map, string_map, StringMap)                                                \
146   V(Map, cons_one_byte_string_map, ConsOneByteStringMap)                       \
147   V(Map, cons_string_map, ConsStringMap)                                       \
148   V(Map, thin_one_byte_string_map, ThinOneByteStringMap)                       \
149   V(Map, thin_string_map, ThinStringMap)                                       \
150   V(Map, sliced_string_map, SlicedStringMap)                                   \
151   V(Map, sliced_one_byte_string_map, SlicedOneByteStringMap)                   \
152   V(Map, external_string_map, ExternalStringMap)                               \
153   V(Map, external_string_with_one_byte_data_map,                               \
154     ExternalStringWithOneByteDataMap)                                          \
155   V(Map, external_one_byte_string_map, ExternalOneByteStringMap)               \
156   V(Map, short_external_string_map, ShortExternalStringMap)                    \
157   V(Map, short_external_string_with_one_byte_data_map,                         \
158     ShortExternalStringWithOneByteDataMap)                                     \
159   V(Map, internalized_string_map, InternalizedStringMap)                       \
160   V(Map, external_internalized_string_map, ExternalInternalizedStringMap)      \
161   V(Map, external_internalized_string_with_one_byte_data_map,                  \
162     ExternalInternalizedStringWithOneByteDataMap)                              \
163   V(Map, external_one_byte_internalized_string_map,                            \
164     ExternalOneByteInternalizedStringMap)                                      \
165   V(Map, short_external_internalized_string_map,                               \
166     ShortExternalInternalizedStringMap)                                        \
167   V(Map, short_external_internalized_string_with_one_byte_data_map,            \
168     ShortExternalInternalizedStringWithOneByteDataMap)                         \
169   V(Map, short_external_one_byte_internalized_string_map,                      \
170     ShortExternalOneByteInternalizedStringMap)                                 \
171   V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap)    \
172   /* Array element maps */                                                     \
173   V(Map, fixed_uint8_array_map, FixedUint8ArrayMap)                            \
174   V(Map, fixed_int8_array_map, FixedInt8ArrayMap)                              \
175   V(Map, fixed_uint16_array_map, FixedUint16ArrayMap)                          \
176   V(Map, fixed_int16_array_map, FixedInt16ArrayMap)                            \
177   V(Map, fixed_uint32_array_map, FixedUint32ArrayMap)                          \
178   V(Map, fixed_int32_array_map, FixedInt32ArrayMap)                            \
179   V(Map, fixed_float32_array_map, FixedFloat32ArrayMap)                        \
180   V(Map, fixed_float64_array_map, FixedFloat64ArrayMap)                        \
181   V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap)             \
182   V(Map, fixed_biguint64_array_map, FixedBigUint64ArrayMap)                    \
183   V(Map, fixed_bigint64_array_map, FixedBigInt64ArrayMap)                      \
184   /* Oddball maps */                                                           \
185   V(Map, undefined_map, UndefinedMap)                                          \
186   V(Map, the_hole_map, TheHoleMap)                                             \
187   V(Map, null_map, NullMap)                                                    \
188   V(Map, boolean_map, BooleanMap)                                              \
189   V(Map, uninitialized_map, UninitializedMap)                                  \
190   V(Map, arguments_marker_map, ArgumentsMarkerMap)                             \
191   V(Map, exception_map, ExceptionMap)                                          \
192   V(Map, termination_exception_map, TerminationExceptionMap)                   \
193   V(Map, optimized_out_map, OptimizedOutMap)                                   \
194   V(Map, stale_register_map, StaleRegisterMap)                                 \
195   V(Map, self_reference_marker_map, SelfReferenceMarkerMap)                    \
196   /* Canonical empty values */                                                 \
197   V(EnumCache, empty_enum_cache, EmptyEnumCache)                               \
198   V(PropertyArray, empty_property_array, EmptyPropertyArray)                   \
199   V(ByteArray, empty_byte_array, EmptyByteArray)                               \
200   V(BoilerplateDescription, empty_boilerplate_description,                     \
201     EmptyBoilerplateDescription)                                               \
202   V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array)        \
203   V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array)          \
204   V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array)      \
205   V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array)        \
206   V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array)      \
207   V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array)        \
208   V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array)    \
209   V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array)    \
210   V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array,                      \
211     EmptyFixedUint8ClampedArray)                                               \
212   V(FixedTypedArrayBase, empty_fixed_biguint64_array,                          \
213     EmptyFixedBigUint64Array)                                                  \
214   V(FixedTypedArrayBase, empty_fixed_bigint64_array, EmptyFixedBigInt64Array)  \
215   V(Script, empty_script, EmptyScript)                                         \
216   V(FeedbackCell, many_closures_cell, ManyClosuresCell)                        \
217   V(FixedArray, empty_sloppy_arguments_elements, EmptySloppyArgumentsElements) \
218   V(NumberDictionary, empty_slow_element_dictionary,                           \
219     EmptySlowElementDictionary)                                                \
220   V(FixedArray, empty_ordered_hash_map, EmptyOrderedHashMap)                   \
221   V(FixedArray, empty_ordered_hash_set, EmptyOrderedHashSet)                   \
222   V(FeedbackMetadata, empty_feedback_metadata, EmptyFeedbackMetadata)          \
223   V(PropertyCell, empty_property_cell, EmptyPropertyCell)                      \
224   V(WeakCell, empty_weak_cell, EmptyWeakCell)                                  \
225   V(Cell, invalid_prototype_validity_cell, InvalidPrototypeValidityCell)       \
226   V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo)               \
227   V(WeakFixedArray, empty_weak_fixed_array, EmptyWeakFixedArray)               \
228   V(WeakArrayList, empty_weak_array_list, EmptyWeakArrayList)                  \
229   /* Protectors */                                                             \
230   V(Cell, array_constructor_protector, ArrayConstructorProtector)              \
231   V(PropertyCell, no_elements_protector, NoElementsProtector)                  \
232   V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector)         \
233   V(PropertyCell, array_species_protector, ArraySpeciesProtector)              \
234   V(PropertyCell, typed_array_species_protector, TypedArraySpeciesProtector)   \
235   V(PropertyCell, promise_species_protector, PromiseSpeciesProtector)          \
236   V(Cell, string_length_protector, StringLengthProtector)                      \
237   V(PropertyCell, array_iterator_protector, ArrayIteratorProtector)            \
238   V(PropertyCell, array_buffer_neutering_protector,                            \
239     ArrayBufferNeuteringProtector)                                             \
240   V(PropertyCell, promise_hook_protector, PromiseHookProtector)                \
241   V(Cell, promise_resolve_protector, PromiseResolveProtector)                  \
242   V(PropertyCell, promise_then_protector, PromiseThenProtector)                \
243   /* Special numbers */                                                        \
244   V(HeapNumber, nan_value, NanValue)                                           \
245   V(HeapNumber, hole_nan_value, HoleNanValue)                                  \
246   V(HeapNumber, infinity_value, InfinityValue)                                 \
247   V(HeapNumber, minus_zero_value, MinusZeroValue)                              \
248   V(HeapNumber, minus_infinity_value, MinusInfinityValue)                      \
249   /* Marker for self-references during code-generation */                      \
250   V(HeapObject, self_reference_marker, SelfReferenceMarker)                    \
251   /* Caches */                                                                 \
252   V(FixedArray, number_string_cache, NumberStringCache)                        \
253   V(FixedArray, single_character_string_cache, SingleCharacterStringCache)     \
254   V(FixedArray, string_split_cache, StringSplitCache)                          \
255   V(FixedArray, regexp_multiple_cache, RegExpMultipleCache)                    \
256   /* Lists and dictionaries */                                                 \
257   V(NameDictionary, empty_property_dictionary, EmptyPropertyDictionary)        \
258   V(NameDictionary, public_symbol_table, PublicSymbolTable)                    \
259   V(NameDictionary, api_symbol_table, ApiSymbolTable)                          \
260   V(NameDictionary, api_private_symbol_table, ApiPrivateSymbolTable)           \
261   V(Object, script_list, ScriptList)                                           \
262   V(SimpleNumberDictionary, code_stubs, CodeStubs)                             \
263   V(FixedArray, materialized_objects, MaterializedObjects)                     \
264   V(FixedArray, microtask_queue, MicrotaskQueue)                               \
265   V(FixedArray, detached_contexts, DetachedContexts)                           \
266   V(HeapObject, retaining_path_targets, RetainingPathTargets)                  \
267   V(WeakArrayList, retained_maps, RetainedMaps)                                \
268   /* Indirection lists for isolate-independent builtins */                     \
269   V(FixedArray, builtins_constants_table, BuiltinsConstantsTable)              \
270   /* Feedback vectors that we need for code coverage or type profile */        \
271   V(Object, feedback_vectors_for_profiling_tools,                              \
272     FeedbackVectorsForProfilingTools)                                          \
273   V(Object, weak_stack_trace_list, WeakStackTraceList)                         \
274   V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos)       \
275   V(FixedArray, serialized_objects, SerializedObjects)                         \
276   V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes)     \
277   V(TemplateList, message_listeners, MessageListeners)                         \
278   /* DeserializeLazy handlers for lazy bytecode deserialization */             \
279   V(Object, deserialize_lazy_handler, DeserializeLazyHandler)                  \
280   V(Object, deserialize_lazy_handler_wide, DeserializeLazyHandlerWide)         \
281   V(Object, deserialize_lazy_handler_extra_wide,                               \
282     DeserializeLazyHandlerExtraWide)                                           \
283   /* Hash seed */                                                              \
284   V(ByteArray, hash_seed, HashSeed)                                            \
285   /* JS Entries */                                                             \
286   V(Code, js_entry_code, JsEntryCode)                                          \
287   V(Code, js_construct_entry_code, JsConstructEntryCode)                       \
288   V(Code, js_run_microtasks_entry_code, JsRunMicrotasksEntryCode)
289 
290 // Entries in this list are limited to Smis and are not visited during GC.
291 #define SMI_ROOT_LIST(V)                                                       \
292   V(Smi, stack_limit, StackLimit)                                              \
293   V(Smi, real_stack_limit, RealStackLimit)                                     \
294   V(Smi, last_script_id, LastScriptId)                                         \
295   V(Smi, last_debugging_id, LastDebuggingId)                                   \
296   /* To distinguish the function templates, so that we can find them in the */ \
297   /* function cache of the native context. */                                  \
298   V(Smi, next_template_serial_number, NextTemplateSerialNumber)                \
299   V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset)     \
300   V(Smi, construct_stub_create_deopt_pc_offset,                                \
301     ConstructStubCreateDeoptPCOffset)                                          \
302   V(Smi, construct_stub_invoke_deopt_pc_offset,                                \
303     ConstructStubInvokeDeoptPCOffset)                                          \
304   V(Smi, interpreter_entry_return_pc_offset, InterpreterEntryReturnPCOffset)
305 
306 #define ROOT_LIST(V)  \
307   STRONG_ROOT_LIST(V) \
308   SMI_ROOT_LIST(V)    \
309   V(StringTable, string_table, StringTable)
310 
311 
312 // Heap roots that are known to be immortal immovable, for which we can safely
313 // skip write barriers. This list is not complete and has omissions.
314 #define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
315   V(ArgumentsMarker)                    \
316   V(ArgumentsMarkerMap)                 \
317   V(ArrayBufferNeuteringProtector)      \
318   V(ArrayIteratorProtector)             \
319   V(BigIntMap)                          \
320   V(BlockContextMap)                    \
321   V(BoilerplateDescriptionMap)          \
322   V(BooleanMap)                         \
323   V(ByteArrayMap)                       \
324   V(BytecodeArrayMap)                   \
325   V(CatchContextMap)                    \
326   V(CellMap)                            \
327   V(CodeMap)                            \
328   V(DebugEvaluateContextMap)            \
329   V(DescriptorArrayMap)                 \
330   V(EmptyByteArray)                     \
331   V(EmptyDescriptorArray)               \
332   V(EmptyFixedArray)                    \
333   V(EmptyFixedFloat32Array)             \
334   V(EmptyFixedFloat64Array)             \
335   V(EmptyFixedInt16Array)               \
336   V(EmptyFixedInt32Array)               \
337   V(EmptyFixedInt8Array)                \
338   V(EmptyFixedUint16Array)              \
339   V(EmptyFixedUint32Array)              \
340   V(EmptyFixedUint8Array)               \
341   V(EmptyFixedUint8ClampedArray)        \
342   V(EmptyOrderedHashMap)                \
343   V(EmptyOrderedHashSet)                \
344   V(EmptyPropertyCell)                  \
345   V(EmptyScopeInfo)                     \
346   V(EmptyScript)                        \
347   V(EmptySloppyArgumentsElements)       \
348   V(EmptySlowElementDictionary)         \
349   V(EmptyWeakCell)                      \
350   V(EvalContextMap)                     \
351   V(Exception)                          \
352   V(FalseValue)                         \
353   V(FixedArrayMap)                      \
354   V(FixedCOWArrayMap)                   \
355   V(FixedDoubleArrayMap)                \
356   V(ForeignMap)                         \
357   V(FreeSpaceMap)                       \
358   V(FunctionContextMap)                 \
359   V(GlobalDictionaryMap)                \
360   V(GlobalPropertyCellMap)              \
361   V(HashTableMap)                       \
362   V(HeapNumberMap)                      \
363   V(HoleNanValue)                       \
364   V(InfinityValue)                      \
365   V(IsConcatSpreadableProtector)        \
366   V(JSMessageObjectMap)                 \
367   V(JsConstructEntryCode)               \
368   V(JsEntryCode)                        \
369   V(ManyClosuresCell)                   \
370   V(ManyClosuresCellMap)                \
371   V(MetaMap)                            \
372   V(MinusInfinityValue)                 \
373   V(MinusZeroValue)                     \
374   V(ModuleContextMap)                   \
375   V(ModuleInfoMap)                      \
376   V(MutableHeapNumberMap)               \
377   V(NameDictionaryMap)                  \
378   V(NanValue)                           \
379   V(NativeContextMap)                   \
380   V(NoClosuresCellMap)                  \
381   V(NoElementsProtector)                \
382   V(NullMap)                            \
383   V(NullValue)                          \
384   V(NumberDictionaryMap)                \
385   V(OneClosureCellMap)                  \
386   V(OnePointerFillerMap)                \
387   V(OptimizedOut)                       \
388   V(OrderedHashMapMap)                  \
389   V(OrderedHashSetMap)                  \
390   V(PropertyArrayMap)                   \
391   V(ScopeInfoMap)                       \
392   V(ScriptContextMap)                   \
393   V(ScriptContextTableMap)              \
394   V(SelfReferenceMarker)                \
395   V(SharedFunctionInfoMap)              \
396   V(SimpleNumberDictionaryMap)          \
397   V(SloppyArgumentsElementsMap)         \
398   V(SmallOrderedHashMapMap)             \
399   V(SmallOrderedHashSetMap)             \
400   V(ArraySpeciesProtector)              \
401   V(TypedArraySpeciesProtector)         \
402   V(PromiseSpeciesProtector)            \
403   V(StaleRegister)                      \
404   V(StringLengthProtector)              \
405   V(StringTableMap)                     \
406   V(SymbolMap)                          \
407   V(TerminationException)               \
408   V(TheHoleMap)                         \
409   V(TheHoleValue)                       \
410   V(TransitionArrayMap)                 \
411   V(TrueValue)                          \
412   V(TwoPointerFillerMap)                \
413   V(UndefinedMap)                       \
414   V(UndefinedValue)                     \
415   V(UninitializedMap)                   \
416   V(UninitializedValue)                 \
417   V(WeakCellMap)                        \
418   V(WeakFixedArrayMap)                  \
419   V(WeakArrayListMap)                   \
420   V(WithContextMap)                     \
421   V(empty_string)                       \
422   PRIVATE_SYMBOL_LIST(V)
423 
424 #define FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(heap, array, start, length) \
425   do {                                                                 \
426     heap->RecordFixedArrayElements(array, start, length);              \
427     heap->incremental_marking()->RecordWrites(array);                  \
428   } while (false)
429 
430 class AllocationObserver;
431 class ArrayBufferCollector;
432 class ArrayBufferTracker;
433 class ConcurrentMarking;
434 class GCIdleTimeAction;
435 class GCIdleTimeHandler;
436 class GCIdleTimeHeapState;
437 class GCTracer;
438 class HeapObjectAllocationTracker;
439 class HeapObjectsFilter;
440 class HeapStats;
441 class HistogramTimer;
442 class Isolate;
443 class LocalEmbedderHeapTracer;
444 class MemoryAllocator;
445 class MemoryReducer;
446 class MinorMarkCompactCollector;
447 class ObjectIterator;
448 class ObjectStats;
449 class Page;
450 class PagedSpace;
451 class RootVisitor;
452 class ScavengeJob;
453 class Scavenger;
454 class Space;
455 class StoreBuffer;
456 class StressScavengeObserver;
457 class TracePossibleWrapperReporter;
458 class WeakObjectRetainer;
459 
460 typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
461 
462 enum ArrayStorageAllocationMode {
463   DONT_INITIALIZE_ARRAY_ELEMENTS,
464   INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
465 };
466 
467 enum class ClearRecordedSlots { kYes, kNo };
468 
469 enum class ClearFreedMemoryMode { kClearFreedMemory, kDontClearFreedMemory };
470 
471 enum class FixedArrayVisitationMode { kRegular, kIncremental };
472 
473 enum class TraceRetainingPathMode { kEnabled, kDisabled };
474 
475 enum class RetainingPathOption { kDefault, kTrackEphemeralPath };
476 
477 enum class GarbageCollectionReason {
478   kUnknown = 0,
479   kAllocationFailure = 1,
480   kAllocationLimit = 2,
481   kContextDisposal = 3,
482   kCountersExtension = 4,
483   kDebugger = 5,
484   kDeserializer = 6,
485   kExternalMemoryPressure = 7,
486   kFinalizeMarkingViaStackGuard = 8,
487   kFinalizeMarkingViaTask = 9,
488   kFullHashtable = 10,
489   kHeapProfiler = 11,
490   kIdleTask = 12,
491   kLastResort = 13,
492   kLowMemoryNotification = 14,
493   kMakeHeapIterable = 15,
494   kMemoryPressure = 16,
495   kMemoryReducer = 17,
496   kRuntime = 18,
497   kSamplingProfiler = 19,
498   kSnapshotCreator = 20,
499   kTesting = 21
500   // If you add new items here, then update the incremental_marking_reason,
501   // mark_compact_reason, and scavenge_reason counters in counters.h.
502   // Also update src/tools/metrics/histograms/histograms.xml in chromium.
503 };
504 
505 enum class YoungGenerationHandling {
506   kRegularScavenge = 0,
507   kFastPromotionDuringScavenge = 1,
508   // Histogram::InspectConstructionArguments in chromium requires us to have at
509   // least three buckets.
510   kUnusedBucket = 2,
511   // If you add new items here, then update the young_generation_handling in
512   // counters.h.
513   // Also update src/tools/metrics/histograms/histograms.xml in chromium.
514 };
515 
516 class AllocationResult {
517  public:
518   static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
519     return AllocationResult(space);
520   }
521 
522   // Implicit constructor from Object*.
AllocationResult(Object * object)523   AllocationResult(Object* object)  // NOLINT
524       : object_(object) {
525     // AllocationResults can't return Smis, which are used to represent
526     // failure and the space to retry in.
527     CHECK(!object->IsSmi());
528   }
529 
AllocationResult()530   AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
531 
IsRetry()532   inline bool IsRetry() { return object_->IsSmi(); }
533   inline HeapObject* ToObjectChecked();
534   inline AllocationSpace RetrySpace();
535 
536   template <typename T>
To(T ** obj)537   bool To(T** obj) {
538     if (IsRetry()) return false;
539     *obj = T::cast(object_);
540     return true;
541   }
542 
543  private:
AllocationResult(AllocationSpace space)544   explicit AllocationResult(AllocationSpace space)
545       : object_(Smi::FromInt(static_cast<int>(space))) {}
546 
547   Object* object_;
548 };
549 
550 STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
551 
552 #ifdef DEBUG
553 struct CommentStatistic {
554   const char* comment;
555   int size;
556   int count;
ClearCommentStatistic557   void Clear() {
558     comment = nullptr;
559     size = 0;
560     count = 0;
561   }
562   // Must be small, since an iteration is used for lookup.
563   static const int kMaxComments = 64;
564 };
565 #endif
566 
567 class Heap {
568  public:
569   // Declare all the root indices.  This defines the root list order.
570   // clang-format off
571   enum RootListIndex {
572 #define DECL(type, name, camel_name) k##camel_name##RootIndex,
573     STRONG_ROOT_LIST(DECL)
574 #undef DECL
575 
576 #define DECL(name, str) k##name##RootIndex,
577     INTERNALIZED_STRING_LIST(DECL)
578 #undef DECL
579 
580 #define DECL(name) k##name##RootIndex,
581     PRIVATE_SYMBOL_LIST(DECL)
582 #undef DECL
583 
584 #define DECL(name, description) k##name##RootIndex,
585     PUBLIC_SYMBOL_LIST(DECL)
586     WELL_KNOWN_SYMBOL_LIST(DECL)
587 #undef DECL
588 
589 #define DECL(accessor_name, AccessorName) k##AccessorName##AccessorRootIndex,
590     ACCESSOR_INFO_LIST(DECL)
591 #undef DECL
592 
593 #define DECL(NAME, Name, name) k##Name##MapRootIndex,
594     STRUCT_LIST(DECL)
595 #undef DECL
596 
597 #define DECL(NAME, Name, Size, name) k##Name##Size##MapRootIndex,
598     DATA_HANDLER_LIST(DECL)
599 #undef DECL
600 
601     kStringTableRootIndex,
602 
603 #define DECL(type, name, camel_name) k##camel_name##RootIndex,
604     SMI_ROOT_LIST(DECL)
605 #undef DECL
606 
607     kRootListLength,
608     kStrongRootListLength = kStringTableRootIndex,
609     kSmiRootsStart = kStringTableRootIndex + 1
610   };
611   // clang-format on
612 
613   enum FindMementoMode { kForRuntime, kForGC };
614 
615   enum HeapState {
616     NOT_IN_GC,
617     SCAVENGE,
618     MARK_COMPACT,
619     MINOR_MARK_COMPACT,
620     TEAR_DOWN
621   };
622 
623   using PretenuringFeedbackMap = std::unordered_map<AllocationSite*, size_t>;
624 
625   // Taking this mutex prevents the GC from entering a phase that relocates
626   // object references.
relocation_mutex()627   base::Mutex* relocation_mutex() { return &relocation_mutex_; }
628 
629   // Support for partial snapshots.  After calling this we have a linear
630   // space to write objects in each space.
631   struct Chunk {
632     uint32_t size;
633     Address start;
634     Address end;
635   };
636   typedef std::vector<Chunk> Reservation;
637 
638   static const int kInitalOldGenerationLimitFactor = 2;
639 
640 #if V8_OS_ANDROID
641   // Don't apply pointer multiplier on Android since it has no swap space and
642   // should instead adapt it's heap size based on available physical memory.
643   static const int kPointerMultiplier = 1;
644 #else
645   static const int kPointerMultiplier = i::kPointerSize / 4;
646 #endif
647 
648   // Semi-space size needs to be a multiple of page size.
649   static const size_t kMinSemiSpaceSizeInKB =
650       1 * kPointerMultiplier * ((1 << kPageSizeBits) / KB);
651   static const size_t kMaxSemiSpaceSizeInKB =
652       16 * kPointerMultiplier * ((1 << kPageSizeBits) / KB);
653 
654   // The old space size has to be a multiple of Page::kPageSize.
655   // Sizes are in MB.
656   static const size_t kMinOldGenerationSize = 128 * kPointerMultiplier;
657   static const size_t kMaxOldGenerationSize = 1024 * kPointerMultiplier;
658 
659   static const int kTraceRingBufferSize = 512;
660   static const int kStacktraceBufferSize = 512;
661 
662   V8_EXPORT_PRIVATE static const double kMinHeapGrowingFactor;
663   V8_EXPORT_PRIVATE static const double kMaxHeapGrowingFactor;
664   static const double kMaxHeapGrowingFactorMemoryConstrained;
665   static const double kMaxHeapGrowingFactorIdle;
666   static const double kConservativeHeapGrowingFactor;
667   static const double kTargetMutatorUtilization;
668 
669   static const int kNoGCFlags = 0;
670   static const int kReduceMemoryFootprintMask = 1;
671   static const int kAbortIncrementalMarkingMask = 2;
672   static const int kFinalizeIncrementalMarkingMask = 4;
673 
674   // Making the heap iterable requires us to abort incremental marking.
675   static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;
676 
677   // The roots that have an index less than this are always in old space.
678   static const int kOldSpaceRoots = 0x20;
679 
680   // The minimum size of a HeapObject on the heap.
681   static const int kMinObjectSizeInWords = 2;
682 
683   static const int kMinPromotedPercentForFastPromotionMode = 90;
684 
685   STATIC_ASSERT(kUndefinedValueRootIndex ==
686                 Internals::kUndefinedValueRootIndex);
687   STATIC_ASSERT(kTheHoleValueRootIndex == Internals::kTheHoleValueRootIndex);
688   STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex);
689   STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
690   STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
691   STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);
692 
693   // Calculates the maximum amount of filler that could be required by the
694   // given alignment.
695   static int GetMaximumFillToAlign(AllocationAlignment alignment);
696   // Calculates the actual amount of filler required for a given address at the
697   // given alignment.
698   static int GetFillToAlign(Address address, AllocationAlignment alignment);
699 
700   void FatalProcessOutOfMemory(const char* location);
701 
702   V8_EXPORT_PRIVATE static bool RootIsImmortalImmovable(int root_index);
703 
704   // Checks whether the space is valid.
705   static bool IsValidAllocationSpace(AllocationSpace space);
706 
707   // Generated code can embed direct references to non-writable roots if
708   // they are in new space.
709   static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
710 
711   // Zapping is needed for verify heap, and always done in debug builds.
ShouldZapGarbage()712   static inline bool ShouldZapGarbage() {
713 #ifdef DEBUG
714     return true;
715 #else
716 #ifdef VERIFY_HEAP
717     return FLAG_verify_heap;
718 #else
719     return false;
720 #endif
721 #endif
722   }
723 
IsYoungGenerationCollector(GarbageCollector collector)724   static inline bool IsYoungGenerationCollector(GarbageCollector collector) {
725     return collector == SCAVENGER || collector == MINOR_MARK_COMPACTOR;
726   }
727 
YoungGenerationCollector()728   static inline GarbageCollector YoungGenerationCollector() {
729 #if ENABLE_MINOR_MC
730     return (FLAG_minor_mc) ? MINOR_MARK_COMPACTOR : SCAVENGER;
731 #else
732     return SCAVENGER;
733 #endif  // ENABLE_MINOR_MC
734   }
735 
CollectorName(GarbageCollector collector)736   static inline const char* CollectorName(GarbageCollector collector) {
737     switch (collector) {
738       case SCAVENGER:
739         return "Scavenger";
740       case MARK_COMPACTOR:
741         return "Mark-Compact";
742       case MINOR_MARK_COMPACTOR:
743         return "Minor Mark-Compact";
744     }
745     return "Unknown collector";
746   }
747 
748   V8_EXPORT_PRIVATE static double MaxHeapGrowingFactor(
749       size_t max_old_generation_size);
750   V8_EXPORT_PRIVATE static double HeapGrowingFactor(double gc_speed,
751                                                     double mutator_speed,
752                                                     double max_factor);
753 
754   // Copy block of memory from src to dst. Size of block should be aligned
755   // by pointer size.
756   static inline void CopyBlock(Address dst, Address src, int byte_size);
757 
758   // Notifies the heap that is ok to start marking or other activities that
759   // should not happen during deserialization.
760   void NotifyDeserializationComplete();
761 
762   inline Address* NewSpaceAllocationTopAddress();
763   inline Address* NewSpaceAllocationLimitAddress();
764   inline Address* OldSpaceAllocationTopAddress();
765   inline Address* OldSpaceAllocationLimitAddress();
766 
767   // FreeSpace objects have a null map after deserialization. Update the map.
768   void RepairFreeListsAfterDeserialization();
769 
770   // Move len elements within a given array from src_index index to dst_index
771   // index.
772   void MoveElements(FixedArray* array, int dst_index, int src_index, int len,
773                     WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
774 
775   // Initialize a filler object to keep the ability to iterate over the heap
776   // when introducing gaps within pages. If slots could have been recorded in
777   // the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
778   // pass ClearRecordedSlots::kNo. If the memory after the object header of
779   // the filler should be cleared, pass in kClearFreedMemory. The default is
780   // kDontClearFreedMemory.
781   V8_EXPORT_PRIVATE HeapObject* CreateFillerObjectAt(
782       Address addr, int size, ClearRecordedSlots clear_slots_mode,
783       ClearFreedMemoryMode clear_memory_mode =
784           ClearFreedMemoryMode::kDontClearFreedMemory);
785 
786   template <typename T>
787   void CreateFillerForArray(T* object, int elements_to_trim, int bytes_to_trim);
788 
789   bool CanMoveObjectStart(HeapObject* object);
790 
791   static bool IsImmovable(HeapObject* object);
792 
793   // Trim the given array from the left. Note that this relocates the object
794   // start and hence is only valid if there is only a single reference to it.
795   FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
796 
797   // Trim the given array from the right.
798   void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
799   void RightTrimWeakFixedArray(WeakFixedArray* obj, int elements_to_trim);
800 
801   // Converts the given boolean condition to JavaScript boolean value.
802   inline Oddball* ToBoolean(bool condition);
803 
804   // Notify the heap that a context has been disposed.
805   int NotifyContextDisposed(bool dependant_context);
806 
set_native_contexts_list(Object * object)807   void set_native_contexts_list(Object* object) {
808     native_contexts_list_ = object;
809   }
native_contexts_list()810   Object* native_contexts_list() const { return native_contexts_list_; }
811 
set_allocation_sites_list(Object * object)812   void set_allocation_sites_list(Object* object) {
813     allocation_sites_list_ = object;
814   }
allocation_sites_list()815   Object* allocation_sites_list() { return allocation_sites_list_; }
816 
817   // Used in CreateAllocationSiteStub and the (de)serializer.
allocation_sites_list_address()818   Object** allocation_sites_list_address() { return &allocation_sites_list_; }
819 
set_encountered_weak_collections(Object * weak_collection)820   void set_encountered_weak_collections(Object* weak_collection) {
821     encountered_weak_collections_ = weak_collection;
822   }
encountered_weak_collections()823   Object* encountered_weak_collections() const {
824     return encountered_weak_collections_;
825   }
826   void IterateEncounteredWeakCollections(RootVisitor* visitor);
827 
828   // Number of mark-sweeps.
ms_count()829   int ms_count() const { return ms_count_; }
830 
831   // Checks whether the given object is allowed to be migrated from it's
832   // current space into the given destination space. Used for debugging.
833   bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
834 
835   void CheckHandleCount();
836 
837   // Number of "runtime allocations" done so far.
allocations_count()838   uint32_t allocations_count() { return allocations_count_; }
839 
840   // Print short heap statistics.
841   void PrintShortHeapStatistics();
842 
write_protect_code_memory()843   bool write_protect_code_memory() const { return write_protect_code_memory_; }
844 
code_space_memory_modification_scope_depth()845   uintptr_t code_space_memory_modification_scope_depth() {
846     return code_space_memory_modification_scope_depth_;
847   }
848 
increment_code_space_memory_modification_scope_depth()849   void increment_code_space_memory_modification_scope_depth() {
850     code_space_memory_modification_scope_depth_++;
851   }
852 
decrement_code_space_memory_modification_scope_depth()853   void decrement_code_space_memory_modification_scope_depth() {
854     code_space_memory_modification_scope_depth_--;
855   }
856 
857   void UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk);
858   void UnprotectAndRegisterMemoryChunk(HeapObject* object);
859   void UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk);
860   V8_EXPORT_PRIVATE void ProtectUnprotectedMemoryChunks();
861 
EnableUnprotectedMemoryChunksRegistry()862   void EnableUnprotectedMemoryChunksRegistry() {
863     unprotected_memory_chunks_registry_enabled_ = true;
864   }
865 
DisableUnprotectedMemoryChunksRegistry()866   void DisableUnprotectedMemoryChunksRegistry() {
867     unprotected_memory_chunks_registry_enabled_ = false;
868   }
869 
unprotected_memory_chunks_registry_enabled()870   bool unprotected_memory_chunks_registry_enabled() {
871     return unprotected_memory_chunks_registry_enabled_;
872   }
873 
gc_state()874   inline HeapState gc_state() { return gc_state_; }
875   void SetGCState(HeapState state);
IsTearingDown()876   bool IsTearingDown() const { return gc_state_ == TEAR_DOWN; }
877 
IsInGCPostProcessing()878   inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
879 
880   // If an object has an AllocationMemento trailing it, return it, otherwise
881   // return nullptr;
882   template <FindMementoMode mode>
883   inline AllocationMemento* FindAllocationMemento(Map* map, HeapObject* object);
884 
885   // Returns false if not able to reserve.
886   bool ReserveSpace(Reservation* reservations, std::vector<Address>* maps);
887 
888   //
889   // Support for the API.
890   //
891 
892   void CreateApiObjects();
893 
894   // Implements the corresponding V8 API function.
895   bool IdleNotification(double deadline_in_seconds);
896   bool IdleNotification(int idle_time_in_ms);
897 
898   void MemoryPressureNotification(MemoryPressureLevel level,
899                                   bool is_isolate_locked);
900   void CheckMemoryPressure();
901 
902   void AddNearHeapLimitCallback(v8::NearHeapLimitCallback, void* data);
903   void RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
904                                    size_t heap_limit);
905 
906   double MonotonicallyIncreasingTimeInMs();
907 
908   void RecordStats(HeapStats* stats, bool take_snapshot = false);
909 
910   // Check new space expansion criteria and expand semispaces if it was hit.
911   void CheckNewSpaceExpansionCriteria();
912 
913   void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
914 
915   // An object should be promoted if the object has survived a
916   // scavenge operation.
917   inline bool ShouldBePromoted(Address old_address);
918 
919   void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
920 
921   inline uint64_t HashSeed();
922 
923   inline int NextScriptId();
924   inline int NextDebuggingId();
925   inline int GetNextTemplateSerialNumber();
926 
927   void SetSerializedObjects(FixedArray* objects);
928   void SetSerializedGlobalProxySizes(FixedArray* sizes);
929 
930   // For post mortem debugging.
931   void RememberUnmappedPage(Address page, bool compacted);
932 
external_memory_hard_limit()933   int64_t external_memory_hard_limit() { return MaxOldGenerationSize() / 2; }
934 
external_memory()935   int64_t external_memory() { return external_memory_; }
update_external_memory(int64_t delta)936   void update_external_memory(int64_t delta) { external_memory_ += delta; }
937 
update_external_memory_concurrently_freed(intptr_t freed)938   void update_external_memory_concurrently_freed(intptr_t freed) {
939     external_memory_concurrently_freed_ += freed;
940   }
941 
account_external_memory_concurrently_freed()942   void account_external_memory_concurrently_freed() {
943     external_memory_ -= external_memory_concurrently_freed_;
944     external_memory_concurrently_freed_ = 0;
945   }
946 
947   void CompactFixedArraysOfWeakCells();
948 
949   void AddRetainedMap(Handle<Map> map);
950 
951   // This event is triggered after successful allocation of a new object made
952   // by runtime. Allocations of target space for object evacuation do not
953   // trigger the event. In order to track ALL allocations one must turn off
954   // FLAG_inline_new.
955   inline void OnAllocationEvent(HeapObject* object, int size_in_bytes);
956 
957   // This event is triggered after object is moved to a new place.
958   inline void OnMoveEvent(HeapObject* target, HeapObject* source,
959                           int size_in_bytes);
960 
961   inline bool CanAllocateInReadOnlySpace();
deserialization_complete()962   bool deserialization_complete() const { return deserialization_complete_; }
963 
964   bool HasLowAllocationRate();
965   bool HasHighFragmentation();
966   bool HasHighFragmentation(size_t used, size_t committed);
967 
968   void ActivateMemoryReducerIfNeeded();
969 
970   bool ShouldOptimizeForMemoryUsage();
971 
HighMemoryPressure()972   bool HighMemoryPressure() {
973     return memory_pressure_level_.Value() != MemoryPressureLevel::kNone;
974   }
975 
RestoreHeapLimit(size_t heap_limit)976   void RestoreHeapLimit(size_t heap_limit) {
977     // Do not set the limit lower than the live size + some slack.
978     size_t min_limit = SizeOfObjects() + SizeOfObjects() / 4;
979     max_old_generation_size_ =
980         Min(max_old_generation_size_, Max(heap_limit, min_limit));
981   }
982 
983   // ===========================================================================
984   // Initialization. ===========================================================
985   // ===========================================================================
986 
987   // Configure heap sizes
988   // max_semi_space_size_in_kb: maximum semi-space size in KB
989   // max_old_generation_size_in_mb: maximum old generation size in MB
990   // code_range_size_in_mb: code range size in MB
991   // Return false if the heap has been set up already.
992   bool ConfigureHeap(size_t max_semi_space_size_in_kb,
993                      size_t max_old_generation_size_in_mb,
994                      size_t code_range_size_in_mb);
995   bool ConfigureHeapDefault();
996 
997   // Prepares the heap, setting up memory areas that are needed in the isolate
998   // without actually creating any objects.
999   bool SetUp();
1000 
1001   // (Re-)Initialize hash seed from flag or RNG.
1002   void InitializeHashSeed();
1003 
1004   // Bootstraps the object heap with the core set of objects required to run.
1005   // Returns whether it succeeded.
1006   bool CreateHeapObjects();
1007 
1008   // Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
1009   void CreateObjectStats();
1010 
1011   // Sets the TearDown state, so no new GC tasks get posted.
1012   void StartTearDown();
1013 
1014   // Destroys all memory allocated by the heap.
1015   void TearDown();
1016 
1017   // Returns whether SetUp has been called.
1018   bool HasBeenSetUp();
1019 
1020   // ===========================================================================
1021   // Getters for spaces. =======================================================
1022   // ===========================================================================
1023 
1024   inline Address NewSpaceTop();
1025 
new_space()1026   NewSpace* new_space() { return new_space_; }
old_space()1027   OldSpace* old_space() { return old_space_; }
code_space()1028   CodeSpace* code_space() { return code_space_; }
map_space()1029   MapSpace* map_space() { return map_space_; }
lo_space()1030   LargeObjectSpace* lo_space() { return lo_space_; }
read_only_space()1031   ReadOnlySpace* read_only_space() { return read_only_space_; }
1032 
1033   inline PagedSpace* paged_space(int idx);
1034   inline Space* space(int idx);
1035 
1036   // Returns name of the space.
1037   const char* GetSpaceName(int idx);
1038 
1039   // ===========================================================================
1040   // Getters to other components. ==============================================
1041   // ===========================================================================
1042 
tracer()1043   GCTracer* tracer() { return tracer_; }
1044 
memory_allocator()1045   MemoryAllocator* memory_allocator() { return memory_allocator_; }
1046 
1047   inline Isolate* isolate();
1048 
mark_compact_collector()1049   MarkCompactCollector* mark_compact_collector() {
1050     return mark_compact_collector_;
1051   }
1052 
minor_mark_compact_collector()1053   MinorMarkCompactCollector* minor_mark_compact_collector() {
1054     return minor_mark_compact_collector_;
1055   }
1056 
array_buffer_collector()1057   ArrayBufferCollector* array_buffer_collector() {
1058     return array_buffer_collector_;
1059   }
1060 
1061   // ===========================================================================
1062   // Root set access. ==========================================================
1063   // ===========================================================================
1064 
1065   // Heap root getters.
1066 #define ROOT_ACCESSOR(type, name, camel_name) inline type* name();
1067   ROOT_LIST(ROOT_ACCESSOR)
1068 #undef ROOT_ACCESSOR
1069 
1070   // Utility type maps.
1071 #define STRUCT_MAP_ACCESSOR(NAME, Name, name) inline Map* name##_map();
STRUCT_LIST(STRUCT_MAP_ACCESSOR)1072   STRUCT_LIST(STRUCT_MAP_ACCESSOR)
1073 #undef STRUCT_MAP_ACCESSOR
1074 
1075 #define DATA_HANDLER_MAP_ACCESSOR(NAME, Name, Size, name) \
1076   inline Map* name##_map();
1077   DATA_HANDLER_LIST(DATA_HANDLER_MAP_ACCESSOR)
1078 #undef DATA_HANDLER_MAP_ACCESSOR
1079 
1080 #define STRING_ACCESSOR(name, str) inline String* name();
1081   INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
1082 #undef STRING_ACCESSOR
1083 
1084 #define SYMBOL_ACCESSOR(name) inline Symbol* name();
1085   PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
1086 #undef SYMBOL_ACCESSOR
1087 
1088 #define SYMBOL_ACCESSOR(name, description) inline Symbol* name();
1089   PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
1090   WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
1091 #undef SYMBOL_ACCESSOR
1092 
1093 #define ACCESSOR_INFO_ACCESSOR(accessor_name, AccessorName) \
1094   inline AccessorInfo* accessor_name##_accessor();
1095   ACCESSOR_INFO_LIST(ACCESSOR_INFO_ACCESSOR)
1096 #undef ACCESSOR_INFO_ACCESSOR
1097 
1098   Object* root(RootListIndex index) { return roots_[index]; }
root_handle(RootListIndex index)1099   Handle<Object> root_handle(RootListIndex index) {
1100     return Handle<Object>(&roots_[index]);
1101   }
1102   template <typename T>
IsRootHandle(Handle<T> handle,RootListIndex * index)1103   bool IsRootHandle(Handle<T> handle, RootListIndex* index) const {
1104     Object** const handle_location = bit_cast<Object**>(handle.address());
1105     if (handle_location >= &roots_[kRootListLength]) return false;
1106     if (handle_location < &roots_[0]) return false;
1107     *index = static_cast<RootListIndex>(handle_location - &roots_[0]);
1108     return true;
1109   }
1110 
1111   // Generated code can embed this address to get access to the roots.
roots_array_start()1112   Object** roots_array_start() { return roots_; }
1113 
external_reference_table()1114   ExternalReferenceTable* external_reference_table() {
1115     DCHECK(external_reference_table_.is_initialized());
1116     return &external_reference_table_;
1117   }
1118 
roots_to_external_reference_table_offset()1119   static constexpr int roots_to_external_reference_table_offset() {
1120     return kRootsExternalReferenceTableOffset;
1121   }
1122 
1123   // Sets the stub_cache_ (only used when expanding the dictionary).
1124   void SetRootCodeStubs(SimpleNumberDictionary* value);
1125 
SetRootMaterializedObjects(FixedArray * objects)1126   void SetRootMaterializedObjects(FixedArray* objects) {
1127     roots_[kMaterializedObjectsRootIndex] = objects;
1128   }
1129 
SetRootScriptList(Object * value)1130   void SetRootScriptList(Object* value) {
1131     roots_[kScriptListRootIndex] = value;
1132   }
1133 
SetRootStringTable(StringTable * value)1134   void SetRootStringTable(StringTable* value) {
1135     roots_[kStringTableRootIndex] = value;
1136   }
1137 
SetRootNoScriptSharedFunctionInfos(Object * value)1138   void SetRootNoScriptSharedFunctionInfos(Object* value) {
1139     roots_[kNoScriptSharedFunctionInfosRootIndex] = value;
1140   }
1141 
SetMessageListeners(TemplateList * value)1142   void SetMessageListeners(TemplateList* value) {
1143     roots_[kMessageListenersRootIndex] = value;
1144   }
1145 
1146   // Set the stack limit in the roots_ array.  Some architectures generate
1147   // code that looks here, because it is faster than loading from the static
1148   // jslimit_/real_jslimit_ variable in the StackGuard.
1149   void SetStackLimits();
1150 
1151   // The stack limit is thread-dependent. To be able to reproduce the same
1152   // snapshot blob, we need to reset it before serializing.
1153   void ClearStackLimits();
1154 
1155   // Generated code can treat direct references to this root as constant.
1156   bool RootCanBeTreatedAsConstant(RootListIndex root_index);
1157 
1158   Map* MapForFixedTypedArray(ExternalArrayType array_type);
1159   Map* MapForFixedTypedArray(ElementsKind elements_kind);
1160   FixedTypedArrayBase* EmptyFixedTypedArrayForMap(const Map* map);
1161 
1162   void RegisterStrongRoots(Object** start, Object** end);
1163   void UnregisterStrongRoots(Object** start);
1164 
1165   bool IsDeserializeLazyHandler(Code* code);
1166   void SetDeserializeLazyHandler(Code* code);
1167   void SetDeserializeLazyHandlerWide(Code* code);
1168   void SetDeserializeLazyHandlerExtraWide(Code* code);
1169 
1170   void SetBuiltinsConstantsTable(FixedArray* cache);
1171 
1172   // ===========================================================================
1173   // Inline allocation. ========================================================
1174   // ===========================================================================
1175 
1176   // Indicates whether inline bump-pointer allocation has been disabled.
inline_allocation_disabled()1177   bool inline_allocation_disabled() { return inline_allocation_disabled_; }
1178 
1179   // Switch whether inline bump-pointer allocation should be used.
1180   void EnableInlineAllocation();
1181   void DisableInlineAllocation();
1182 
1183   // ===========================================================================
1184   // Methods triggering GCs. ===================================================
1185   // ===========================================================================
1186 
1187   // Performs garbage collection operation.
1188   // Returns whether there is a chance that another major GC could
1189   // collect more garbage.
1190   bool CollectGarbage(
1191       AllocationSpace space, GarbageCollectionReason gc_reason,
1192       const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1193 
1194   // Performs a full garbage collection.  If (flags & kMakeHeapIterableMask) is
1195   // non-zero, then the slower precise sweeper is used, which leaves the heap
1196   // in a state where we can iterate over the heap visiting all objects.
1197   void CollectAllGarbage(
1198       int flags, GarbageCollectionReason gc_reason,
1199       const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1200 
1201   // Last hope GC, should try to squeeze as much as possible.
1202   void CollectAllAvailableGarbage(GarbageCollectionReason gc_reason);
1203 
1204   // Reports and external memory pressure event, either performs a major GC or
1205   // completes incremental marking in order to free external resources.
1206   void ReportExternalMemoryPressure();
1207 
1208   typedef v8::Isolate::GetExternallyAllocatedMemoryInBytesCallback
1209       GetExternallyAllocatedMemoryInBytesCallback;
1210 
SetGetExternallyAllocatedMemoryInBytesCallback(GetExternallyAllocatedMemoryInBytesCallback callback)1211   void SetGetExternallyAllocatedMemoryInBytesCallback(
1212       GetExternallyAllocatedMemoryInBytesCallback callback) {
1213     external_memory_callback_ = callback;
1214   }
1215 
1216   // Invoked when GC was requested via the stack guard.
1217   void HandleGCRequest();
1218 
1219   // ===========================================================================
1220   // Iterators. ================================================================
1221   // ===========================================================================
1222 
1223   void IterateRoots(RootVisitor* v, VisitMode mode);
1224   void IterateStrongRoots(RootVisitor* v, VisitMode mode);
1225   // Iterates over entries in the smi roots list.  Only interesting to the
1226   // serializer/deserializer, since GC does not care about smis.
1227   void IterateSmiRoots(RootVisitor* v);
1228   // Iterates over weak string tables.
1229   void IterateWeakRoots(RootVisitor* v, VisitMode mode);
1230   // Iterates over weak global handles.
1231   void IterateWeakGlobalHandles(RootVisitor* v);
1232 
1233   // ===========================================================================
1234   // Store buffer API. =========================================================
1235   // ===========================================================================
1236 
1237   // Write barrier support for object[offset] = o;
1238   inline void RecordWrite(Object* object, MaybeObject** slot,
1239                           MaybeObject* value);
1240   inline void RecordWrite(Object* object, Object** slot, Object* value);
1241   inline void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* target);
1242   void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* target);
1243   void RecordWritesIntoCode(Code* code);
1244   inline void RecordFixedArrayElements(FixedArray* array, int offset,
1245                                        int length);
1246 
1247   // Used for query incremental marking status in generated code.
IsMarkingFlagAddress()1248   Address* IsMarkingFlagAddress() {
1249     return reinterpret_cast<Address*>(&is_marking_flag_);
1250   }
1251 
SetIsMarkingFlag(uint8_t flag)1252   void SetIsMarkingFlag(uint8_t flag) { is_marking_flag_ = flag; }
1253 
1254   inline Address* store_buffer_top_address();
1255 
1256   void ClearRecordedSlot(HeapObject* object, Object** slot);
1257   void ClearRecordedSlotRange(Address start, Address end);
1258 
1259   bool HasRecordedSlot(HeapObject* object, Object** slot);
1260 
1261   // ===========================================================================
1262   // Incremental marking API. ==================================================
1263   // ===========================================================================
1264 
GCFlagsForIncrementalMarking()1265   int GCFlagsForIncrementalMarking() {
1266     return ShouldOptimizeForMemoryUsage() ? kReduceMemoryFootprintMask
1267                                           : kNoGCFlags;
1268   }
1269 
1270   // Start incremental marking and ensure that idle time handler can perform
1271   // incremental steps.
1272   void StartIdleIncrementalMarking(
1273       GarbageCollectionReason gc_reason,
1274       GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
1275 
1276   // Starts incremental marking assuming incremental marking is currently
1277   // stopped.
1278   void StartIncrementalMarking(
1279       int gc_flags, GarbageCollectionReason gc_reason,
1280       GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
1281 
1282   void StartIncrementalMarkingIfAllocationLimitIsReached(
1283       int gc_flags,
1284       GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
1285 
1286   void FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason);
1287 
1288   void RegisterDeserializedObjectsForBlackAllocation(
1289       Reservation* reservations, const std::vector<HeapObject*>& large_objects,
1290       const std::vector<Address>& maps);
1291 
incremental_marking()1292   IncrementalMarking* incremental_marking() { return incremental_marking_; }
1293 
1294   // ===========================================================================
1295   // Concurrent marking API. ===================================================
1296   // ===========================================================================
1297 
concurrent_marking()1298   ConcurrentMarking* concurrent_marking() { return concurrent_marking_; }
1299 
1300   // The runtime uses this function to notify potentially unsafe object layout
1301   // changes that require special synchronization with the concurrent marker.
1302   // The old size is the size of the object before layout change.
1303   void NotifyObjectLayoutChange(HeapObject* object, int old_size,
1304                                 const DisallowHeapAllocation&);
1305 
1306 #ifdef VERIFY_HEAP
1307   // This function checks that either
1308   // - the map transition is safe,
1309   // - or it was communicated to GC using NotifyObjectLayoutChange.
1310   void VerifyObjectLayoutChange(HeapObject* object, Map* new_map);
1311 #endif
1312 
1313   // ===========================================================================
1314   // Deoptimization support API. ===============================================
1315   // ===========================================================================
1316 
1317   // Setters for code offsets of well-known deoptimization targets.
1318   void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
1319   void SetConstructStubCreateDeoptPCOffset(int pc_offset);
1320   void SetConstructStubInvokeDeoptPCOffset(int pc_offset);
1321   void SetInterpreterEntryReturnPCOffset(int pc_offset);
1322 
1323   // Invalidates references in the given {code} object that are directly
1324   // embedded within the instruction stream. Mutates write-protected code.
1325   void InvalidateCodeEmbeddedObjects(Code* code);
1326 
1327   // Invalidates references in the given {code} object that are referenced
1328   // transitively from the deoptimization data. Mutates write-protected code.
1329   void InvalidateCodeDeoptimizationData(Code* code);
1330 
1331   void DeoptMarkedAllocationSites();
1332 
1333   bool DeoptMaybeTenuredAllocationSites();
1334 
1335   // ===========================================================================
1336   // Embedder heap tracer support. =============================================
1337   // ===========================================================================
1338 
local_embedder_heap_tracer()1339   LocalEmbedderHeapTracer* local_embedder_heap_tracer() {
1340     return local_embedder_heap_tracer_;
1341   }
1342   void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
1343   void TracePossibleWrapper(JSObject* js_object);
1344   void RegisterExternallyReferencedObject(Object** object);
1345 
1346   // ===========================================================================
1347   // External string table API. ================================================
1348   // ===========================================================================
1349 
1350   // Registers an external string.
1351   inline void RegisterExternalString(String* string);
1352 
1353   // Finalizes an external string by deleting the associated external
1354   // data and clearing the resource pointer.
1355   inline void FinalizeExternalString(String* string);
1356 
1357   // ===========================================================================
1358   // Methods checking/returning the space of a given object/address. ===========
1359   // ===========================================================================
1360 
1361   // Returns whether the object resides in new space.
1362   inline bool InNewSpace(Object* object);
1363   inline bool InNewSpace(MaybeObject* object);
1364   inline bool InNewSpace(HeapObject* heap_object);
1365   inline bool InFromSpace(Object* object);
1366   inline bool InFromSpace(MaybeObject* object);
1367   inline bool InFromSpace(HeapObject* heap_object);
1368   inline bool InToSpace(Object* object);
1369   inline bool InToSpace(MaybeObject* object);
1370   inline bool InToSpace(HeapObject* heap_object);
1371 
1372   // Returns whether the object resides in old space.
1373   inline bool InOldSpace(Object* object);
1374 
1375   // Returns whether the object resides in read-only space.
1376   inline bool InReadOnlySpace(Object* object);
1377 
1378   // Checks whether an address/object in the heap (including auxiliary
1379   // area and unused area).
1380   bool Contains(HeapObject* value);
1381 
1382   // Checks whether an address/object in a space.
1383   // Currently used by tests, serialization and heap verification only.
1384   bool InSpace(HeapObject* value, AllocationSpace space);
1385 
1386   // Slow methods that can be used for verification as they can also be used
1387   // with off-heap Addresses.
1388   bool ContainsSlow(Address addr);
1389   bool InSpaceSlow(Address addr, AllocationSpace space);
1390   inline bool InNewSpaceSlow(Address address);
1391   inline bool InOldSpaceSlow(Address address);
1392 
1393   // ===========================================================================
1394   // Object statistics tracking. ===============================================
1395   // ===========================================================================
1396 
1397   // Returns the number of buckets used by object statistics tracking during a
1398   // major GC. Note that the following methods fail gracefully when the bounds
1399   // are exceeded though.
1400   size_t NumberOfTrackedHeapObjectTypes();
1401 
1402   // Returns object statistics about count and size at the last major GC.
1403   // Objects are being grouped into buckets that roughly resemble existing
1404   // instance types.
1405   size_t ObjectCountAtLastGC(size_t index);
1406   size_t ObjectSizeAtLastGC(size_t index);
1407 
1408   // Retrieves names of buckets used by object statistics tracking.
1409   bool GetObjectTypeName(size_t index, const char** object_type,
1410                          const char** object_sub_type);
1411 
1412   // The total number of native contexts object on the heap.
1413   size_t NumberOfNativeContexts();
1414   // The total number of native contexts that were detached but were not
1415   // garbage collected yet.
1416   size_t NumberOfDetachedContexts();
1417 
1418   // ===========================================================================
1419   // Code statistics. ==========================================================
1420   // ===========================================================================
1421 
1422   // Collect code (Code and BytecodeArray objects) statistics.
1423   void CollectCodeStatistics();
1424 
1425   // ===========================================================================
1426   // GC statistics. ============================================================
1427   // ===========================================================================
1428 
1429   // Returns the maximum amount of memory reserved for the heap.
1430   size_t MaxReserved();
MaxSemiSpaceSize()1431   size_t MaxSemiSpaceSize() { return max_semi_space_size_; }
InitialSemiSpaceSize()1432   size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
MaxOldGenerationSize()1433   size_t MaxOldGenerationSize() { return max_old_generation_size_; }
1434 
ComputeMaxOldGenerationSize(uint64_t physical_memory)1435   static size_t ComputeMaxOldGenerationSize(uint64_t physical_memory) {
1436     const size_t old_space_physical_memory_factor = 4;
1437     size_t computed_size = static_cast<size_t>(
1438         physical_memory / i::MB / old_space_physical_memory_factor *
1439         kPointerMultiplier);
1440     return Max(Min(computed_size, kMaxOldGenerationSize),
1441                kMinOldGenerationSize);
1442   }
1443 
ComputeMaxSemiSpaceSize(uint64_t physical_memory)1444   static size_t ComputeMaxSemiSpaceSize(uint64_t physical_memory) {
1445     const uint64_t min_physical_memory = 512 * MB;
1446     const uint64_t max_physical_memory = 3 * static_cast<uint64_t>(GB);
1447 
1448     uint64_t capped_physical_memory =
1449         Max(Min(physical_memory, max_physical_memory), min_physical_memory);
1450     // linearly scale max semi-space size: (X-A)/(B-A)*(D-C)+C
1451     size_t semi_space_size_in_kb =
1452         static_cast<size_t>(((capped_physical_memory - min_physical_memory) *
1453                              (kMaxSemiSpaceSizeInKB - kMinSemiSpaceSizeInKB)) /
1454                                 (max_physical_memory - min_physical_memory) +
1455                             kMinSemiSpaceSizeInKB);
1456     return RoundUp(semi_space_size_in_kb, (1 << kPageSizeBits) / KB);
1457   }
1458 
1459   // Returns the capacity of the heap in bytes w/o growing. Heap grows when
1460   // more spaces are needed until it reaches the limit.
1461   size_t Capacity();
1462 
1463   // Returns the capacity of the old generation.
1464   size_t OldGenerationCapacity();
1465 
1466   // Returns the amount of memory currently committed for the heap.
1467   size_t CommittedMemory();
1468 
1469   // Returns the amount of memory currently committed for the old space.
1470   size_t CommittedOldGenerationMemory();
1471 
1472   // Returns the amount of executable memory currently committed for the heap.
1473   size_t CommittedMemoryExecutable();
1474 
1475   // Returns the amount of phyical memory currently committed for the heap.
1476   size_t CommittedPhysicalMemory();
1477 
1478   // Returns the maximum amount of memory ever committed for the heap.
MaximumCommittedMemory()1479   size_t MaximumCommittedMemory() { return maximum_committed_; }
1480 
1481   // Updates the maximum committed memory for the heap. Should be called
1482   // whenever a space grows.
1483   void UpdateMaximumCommitted();
1484 
1485   // Returns the available bytes in space w/o growing.
1486   // Heap doesn't guarantee that it can allocate an object that requires
1487   // all available bytes. Check MaxHeapObjectSize() instead.
1488   size_t Available();
1489 
1490   // Returns of size of all objects residing in the heap.
1491   size_t SizeOfObjects();
1492 
1493   void UpdateSurvivalStatistics(int start_new_space_size);
1494 
IncrementPromotedObjectsSize(size_t object_size)1495   inline void IncrementPromotedObjectsSize(size_t object_size) {
1496     promoted_objects_size_ += object_size;
1497   }
promoted_objects_size()1498   inline size_t promoted_objects_size() { return promoted_objects_size_; }
1499 
IncrementSemiSpaceCopiedObjectSize(size_t object_size)1500   inline void IncrementSemiSpaceCopiedObjectSize(size_t object_size) {
1501     semi_space_copied_object_size_ += object_size;
1502   }
semi_space_copied_object_size()1503   inline size_t semi_space_copied_object_size() {
1504     return semi_space_copied_object_size_;
1505   }
1506 
SurvivedNewSpaceObjectSize()1507   inline size_t SurvivedNewSpaceObjectSize() {
1508     return promoted_objects_size_ + semi_space_copied_object_size_;
1509   }
1510 
IncrementNodesDiedInNewSpace()1511   inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
1512 
IncrementNodesCopiedInNewSpace()1513   inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; }
1514 
IncrementNodesPromoted()1515   inline void IncrementNodesPromoted() { nodes_promoted_++; }
1516 
IncrementYoungSurvivorsCounter(size_t survived)1517   inline void IncrementYoungSurvivorsCounter(size_t survived) {
1518     survived_last_scavenge_ = survived;
1519     survived_since_last_expansion_ += survived;
1520   }
1521 
OldGenerationObjectsAndPromotedExternalMemorySize()1522   inline uint64_t OldGenerationObjectsAndPromotedExternalMemorySize() {
1523     return OldGenerationSizeOfObjects() + PromotedExternalMemorySize();
1524   }
1525 
1526   inline void UpdateNewSpaceAllocationCounter();
1527 
1528   inline size_t NewSpaceAllocationCounter();
1529 
1530   // This should be used only for testing.
set_new_space_allocation_counter(size_t new_value)1531   void set_new_space_allocation_counter(size_t new_value) {
1532     new_space_allocation_counter_ = new_value;
1533   }
1534 
UpdateOldGenerationAllocationCounter()1535   void UpdateOldGenerationAllocationCounter() {
1536     old_generation_allocation_counter_at_last_gc_ =
1537         OldGenerationAllocationCounter();
1538     old_generation_size_at_last_gc_ = 0;
1539   }
1540 
OldGenerationAllocationCounter()1541   size_t OldGenerationAllocationCounter() {
1542     return old_generation_allocation_counter_at_last_gc_ +
1543            PromotedSinceLastGC();
1544   }
1545 
1546   // This should be used only for testing.
set_old_generation_allocation_counter_at_last_gc(size_t new_value)1547   void set_old_generation_allocation_counter_at_last_gc(size_t new_value) {
1548     old_generation_allocation_counter_at_last_gc_ = new_value;
1549   }
1550 
PromotedSinceLastGC()1551   size_t PromotedSinceLastGC() {
1552     size_t old_generation_size = OldGenerationSizeOfObjects();
1553     DCHECK_GE(old_generation_size, old_generation_size_at_last_gc_);
1554     return old_generation_size - old_generation_size_at_last_gc_;
1555   }
1556 
1557   // This is called by the sweeper when it discovers more free space
1558   // than expected at the end of the preceding GC.
NotifyRefinedOldGenerationSize(size_t decreased_bytes)1559   void NotifyRefinedOldGenerationSize(size_t decreased_bytes) {
1560     if (old_generation_size_at_last_gc_ != 0) {
1561       // OldGenerationSizeOfObjects() is now smaller by |decreased_bytes|.
1562       // Adjust old_generation_size_at_last_gc_ too, so that PromotedSinceLastGC
1563       // continues to increase monotonically, rather than decreasing here.
1564       DCHECK_GE(old_generation_size_at_last_gc_, decreased_bytes);
1565       old_generation_size_at_last_gc_ -= decreased_bytes;
1566     }
1567   }
1568 
gc_count()1569   int gc_count() const { return gc_count_; }
1570 
1571   // Returns the size of objects residing in non-new spaces.
1572   // Excludes external memory held by those objects.
1573   size_t OldGenerationSizeOfObjects();
1574 
1575   // ===========================================================================
1576   // Prologue/epilogue callback methods.========================================
1577   // ===========================================================================
1578 
1579   void AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
1580                              GCType gc_type_filter, void* data);
1581   void RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
1582                                 void* data);
1583 
1584   void AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
1585                              GCType gc_type_filter, void* data);
1586   void RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
1587                                 void* data);
1588 
1589   void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
1590   void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
1591 
1592   // ===========================================================================
1593   // Allocation methods. =======================================================
1594   // ===========================================================================
1595 
1596   // Creates a filler object and returns a heap object immediately after it.
1597   V8_WARN_UNUSED_RESULT HeapObject* PrecedeWithFiller(HeapObject* object,
1598                                                       int filler_size);
1599 
1600   // Creates a filler object if needed for alignment and returns a heap object
1601   // immediately after it. If any space is left after the returned object,
1602   // another filler object is created so the over allocated memory is iterable.
1603   V8_WARN_UNUSED_RESULT HeapObject* AlignWithFiller(
1604       HeapObject* object, int object_size, int allocation_size,
1605       AllocationAlignment alignment);
1606 
1607   // ===========================================================================
1608   // ArrayBuffer tracking. =====================================================
1609   // ===========================================================================
1610 
1611   // TODO(gc): API usability: encapsulate mutation of JSArrayBuffer::is_external
1612   // in the registration/unregistration APIs. Consider dropping the "New" from
1613   // "RegisterNewArrayBuffer" because one can re-register a previously
1614   // unregistered buffer, too, and the name is confusing.
1615   void RegisterNewArrayBuffer(JSArrayBuffer* buffer);
1616   void UnregisterArrayBuffer(JSArrayBuffer* buffer);
1617 
1618   // ===========================================================================
1619   // Allocation site tracking. =================================================
1620   // ===========================================================================
1621 
1622   // Updates the AllocationSite of a given {object}. The entry (including the
1623   // count) is cached on the local pretenuring feedback.
1624   inline void UpdateAllocationSite(
1625       Map* map, HeapObject* object,
1626       PretenuringFeedbackMap* pretenuring_feedback);
1627 
1628   // Merges local pretenuring feedback into the global one. Note that this
1629   // method needs to be called after evacuation, as allocation sites may be
1630   // evacuated and this method resolves forward pointers accordingly.
1631   void MergeAllocationSitePretenuringFeedback(
1632       const PretenuringFeedbackMap& local_pretenuring_feedback);
1633 
1634   // ===========================================================================
1635   // Allocation tracking. ======================================================
1636   // ===========================================================================
1637 
1638   // Adds {new_space_observer} to new space and {observer} to any other space.
1639   void AddAllocationObserversToAllSpaces(
1640       AllocationObserver* observer, AllocationObserver* new_space_observer);
1641 
1642   // Removes {new_space_observer} from new space and {observer} from any other
1643   // space.
1644   void RemoveAllocationObserversFromAllSpaces(
1645       AllocationObserver* observer, AllocationObserver* new_space_observer);
1646 
allocation_step_in_progress()1647   bool allocation_step_in_progress() { return allocation_step_in_progress_; }
set_allocation_step_in_progress(bool val)1648   void set_allocation_step_in_progress(bool val) {
1649     allocation_step_in_progress_ = val;
1650   }
1651 
1652   // ===========================================================================
1653   // Heap object allocation tracking. ==========================================
1654   // ===========================================================================
1655 
1656   void AddHeapObjectAllocationTracker(HeapObjectAllocationTracker* tracker);
1657   void RemoveHeapObjectAllocationTracker(HeapObjectAllocationTracker* tracker);
has_heap_object_allocation_tracker()1658   bool has_heap_object_allocation_tracker() const {
1659     return !allocation_trackers_.empty();
1660   }
1661 
1662   // Retaining path tracking. ==================================================
1663   // ===========================================================================
1664 
1665   // Adds the given object to the weak table of retaining path targets.
1666   // On each GC if the marker discovers the object, it will print the retaining
1667   // path. This requires --track-retaining-path flag.
1668   void AddRetainingPathTarget(Handle<HeapObject> object,
1669                               RetainingPathOption option);
1670 
1671   // ===========================================================================
1672   // Stack frame support. ======================================================
1673   // ===========================================================================
1674 
1675   // Returns the Code object for a given interior pointer. Returns nullptr if
1676   // {inner_pointer} is not contained within a Code object.
1677   Code* GcSafeFindCodeForInnerPointer(Address inner_pointer);
1678 
1679   // Returns true if {addr} is contained within {code} and false otherwise.
1680   // Mostly useful for debugging.
1681   bool GcSafeCodeContains(HeapObject* code, Address addr);
1682 
1683 // =============================================================================
1684 #ifdef VERIFY_HEAP
1685   // Verify the heap is in its normal state before or after a GC.
1686   void Verify();
1687   void VerifyRememberedSetFor(HeapObject* object);
1688 #endif
1689 
1690 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
set_allocation_timeout(int timeout)1691   void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
1692 #endif
1693 
1694 #ifdef DEBUG
1695   void VerifyCountersAfterSweeping();
1696   void VerifyCountersBeforeConcurrentSweeping();
1697 
1698   void Print();
1699   void PrintHandles();
1700 
1701   // Report code statistics.
1702   void ReportCodeStatistics(const char* title);
1703 #endif
GetRandomMmapAddr()1704   void* GetRandomMmapAddr() {
1705     void* result = v8::internal::GetRandomMmapAddr();
1706 #if V8_TARGET_ARCH_X64
1707 #if V8_OS_MACOSX
1708     // The Darwin kernel [as of macOS 10.12.5] does not clean up page
1709     // directory entries [PDE] created from mmap or mach_vm_allocate, even
1710     // after the region is destroyed. Using a virtual address space that is
1711     // too large causes a leak of about 1 wired [can never be paged out] page
1712     // per call to mmap(). The page is only reclaimed when the process is
1713     // killed. Confine the hint to a 32-bit section of the virtual address
1714     // space. See crbug.com/700928.
1715     uintptr_t offset =
1716         reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
1717         kMmapRegionMask;
1718     result = reinterpret_cast<void*>(mmap_region_base_ + offset);
1719 #endif  // V8_OS_MACOSX
1720 #endif  // V8_TARGET_ARCH_X64
1721     return result;
1722   }
1723 
1724   static const char* GarbageCollectionReasonToString(
1725       GarbageCollectionReason gc_reason);
1726 
1727  private:
1728   class SkipStoreBufferScope;
1729 
1730   typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
1731                                                         Object** pointer);
1732 
1733   // External strings table is a place where all external strings are
1734   // registered.  We need to keep track of such strings to properly
1735   // finalize them.
1736   class ExternalStringTable {
1737    public:
ExternalStringTable(Heap * heap)1738     explicit ExternalStringTable(Heap* heap) : heap_(heap) {}
1739 
1740     // Registers an external string.
1741     inline void AddString(String* string);
1742 
1743     void IterateAll(RootVisitor* v);
1744     void IterateNewSpaceStrings(RootVisitor* v);
1745     void PromoteAllNewSpaceStrings();
1746 
1747     // Restores internal invariant and gets rid of collected strings. Must be
1748     // called after each Iterate*() that modified the strings.
1749     void CleanUpAll();
1750     void CleanUpNewSpaceStrings();
1751 
1752     // Finalize all registered external strings and clear tables.
1753     void TearDown();
1754 
1755     void UpdateNewSpaceReferences(
1756         Heap::ExternalStringTableUpdaterCallback updater_func);
1757     void UpdateReferences(
1758         Heap::ExternalStringTableUpdaterCallback updater_func);
1759 
1760    private:
1761     void Verify();
1762 
1763     Heap* const heap_;
1764 
1765     // To speed up scavenge collections new space string are kept
1766     // separate from old space strings.
1767     std::vector<Object*> new_space_strings_;
1768     std::vector<Object*> old_space_strings_;
1769 
1770     DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
1771   };
1772 
1773   struct StrongRootsList;
1774 
1775   struct StringTypeTable {
1776     InstanceType type;
1777     int size;
1778     RootListIndex index;
1779   };
1780 
1781   struct ConstantStringTable {
1782     const char* contents;
1783     RootListIndex index;
1784   };
1785 
1786   struct StructTable {
1787     InstanceType type;
1788     int size;
1789     RootListIndex index;
1790   };
1791 
1792   struct GCCallbackTuple {
GCCallbackTupleGCCallbackTuple1793     GCCallbackTuple(v8::Isolate::GCCallbackWithData callback, GCType gc_type,
1794                     void* data)
1795         : callback(callback), gc_type(gc_type), data(data) {}
1796 
1797     bool operator==(const GCCallbackTuple& other) const;
1798     GCCallbackTuple& operator=(const GCCallbackTuple& other);
1799 
1800     v8::Isolate::GCCallbackWithData callback;
1801     GCType gc_type;
1802     void* data;
1803   };
1804 
1805   static const int kInitialStringTableSize = StringTable::kMinCapacity;
1806   static const int kInitialEvalCacheSize = 64;
1807   static const int kInitialNumberStringCacheSize = 256;
1808 
1809   static const int kRememberedUnmappedPages = 128;
1810 
1811   static const StringTypeTable string_type_table[];
1812   static const ConstantStringTable constant_string_table[];
1813   static const StructTable struct_table[];
1814 
1815   static const int kYoungSurvivalRateHighThreshold = 90;
1816   static const int kYoungSurvivalRateAllowedDeviation = 15;
1817   static const int kOldSurvivalRateLowThreshold = 10;
1818 
1819   static const int kMaxMarkCompactsInIdleRound = 7;
1820   static const int kIdleScavengeThreshold = 5;
1821 
1822   static const int kInitialFeedbackCapacity = 256;
1823 
1824   static const int kMaxScavengerTasks = 8;
1825 
1826   Heap();
1827 
1828   static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
1829       Heap* heap, Object** pointer);
1830 
1831   // Selects the proper allocation space based on the pretenuring decision.
SelectSpace(PretenureFlag pretenure)1832   static AllocationSpace SelectSpace(PretenureFlag pretenure) {
1833     switch (pretenure) {
1834       case TENURED_READ_ONLY:
1835         return RO_SPACE;
1836       case TENURED:
1837         return OLD_SPACE;
1838       case NOT_TENURED:
1839         return NEW_SPACE;
1840       default:
1841         UNREACHABLE();
1842     }
1843   }
1844 
DefaultGetExternallyAllocatedMemoryInBytesCallback()1845   static size_t DefaultGetExternallyAllocatedMemoryInBytesCallback() {
1846     return 0;
1847   }
1848 
1849 #define ROOT_ACCESSOR(type, name, camel_name) \
1850   inline void set_##name(type* value);
ROOT_LIST(ROOT_ACCESSOR)1851   ROOT_LIST(ROOT_ACCESSOR)
1852 #undef ROOT_ACCESSOR
1853 
1854   StoreBuffer* store_buffer() { return store_buffer_; }
1855 
set_current_gc_flags(int flags)1856   void set_current_gc_flags(int flags) {
1857     current_gc_flags_ = flags;
1858     DCHECK(!ShouldFinalizeIncrementalMarking() ||
1859            !ShouldAbortIncrementalMarking());
1860   }
1861 
ShouldReduceMemory()1862   inline bool ShouldReduceMemory() const {
1863     return (current_gc_flags_ & kReduceMemoryFootprintMask) != 0;
1864   }
1865 
ShouldAbortIncrementalMarking()1866   inline bool ShouldAbortIncrementalMarking() const {
1867     return (current_gc_flags_ & kAbortIncrementalMarkingMask) != 0;
1868   }
1869 
ShouldFinalizeIncrementalMarking()1870   inline bool ShouldFinalizeIncrementalMarking() const {
1871     return (current_gc_flags_ & kFinalizeIncrementalMarkingMask) != 0;
1872   }
1873 
1874   int NumberOfScavengeTasks();
1875 
1876   void PreprocessStackTraces();
1877 
1878   // Checks whether a global GC is necessary
1879   GarbageCollector SelectGarbageCollector(AllocationSpace space,
1880                                           const char** reason);
1881 
1882   // Make sure there is a filler value behind the top of the new space
1883   // so that the GC does not confuse some unintialized/stale memory
1884   // with the allocation memento of the object at the top
1885   void EnsureFillerObjectAtTop();
1886 
1887   // Ensure that we have swept all spaces in such a way that we can iterate
1888   // over all objects.  May cause a GC.
1889   void MakeHeapIterable();
1890 
1891   // Performs garbage collection
1892   // Returns whether there is a chance another major GC could
1893   // collect more garbage.
1894   bool PerformGarbageCollection(
1895       GarbageCollector collector,
1896       const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1897 
1898   inline void UpdateOldSpaceLimits();
1899 
1900   bool CreateInitialMaps();
1901   void CreateInternalAccessorInfoObjects();
1902   void CreateInitialObjects();
1903 
1904   // These five Create*EntryStub functions are here and forced to not be inlined
1905   // because of a gcc-4.4 bug that assigns wrong vtable entries.
1906   NO_INLINE(void CreateJSEntryStub());
1907   NO_INLINE(void CreateJSConstructEntryStub());
1908   NO_INLINE(void CreateJSRunMicrotasksEntryStub());
1909 
1910   void CreateFixedStubs();
1911 
1912   // Commits from space if it is uncommitted.
1913   void EnsureFromSpaceIsCommitted();
1914 
1915   // Uncommit unused semi space.
1916   bool UncommitFromSpace();
1917 
1918   // Fill in bogus values in from space
1919   void ZapFromSpace();
1920 
1921   // Zaps the memory of a code object.
1922   void ZapCodeObject(Address start_address, int size_in_bytes);
1923 
1924   // Deopts all code that contains allocation instruction which are tenured or
1925   // not tenured. Moreover it clears the pretenuring allocation site statistics.
1926   void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
1927 
1928   // Evaluates local pretenuring for the old space and calls
1929   // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
1930   // the old space.
1931   void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
1932 
1933   // Record statistics after garbage collection.
1934   void ReportStatisticsAfterGC();
1935 
1936   // Creates and installs the full-sized number string cache.
1937   int FullSizeNumberStringCacheLength();
1938   // Flush the number to string cache.
1939   void FlushNumberStringCache();
1940 
1941   void ConfigureInitialOldGenerationSize();
1942 
1943   bool HasLowYoungGenerationAllocationRate();
1944   bool HasLowOldGenerationAllocationRate();
1945   double YoungGenerationMutatorUtilization();
1946   double OldGenerationMutatorUtilization();
1947 
1948   void ReduceNewSpaceSize();
1949 
1950   GCIdleTimeHeapState ComputeHeapState();
1951 
1952   bool PerformIdleTimeAction(GCIdleTimeAction action,
1953                              GCIdleTimeHeapState heap_state,
1954                              double deadline_in_ms);
1955 
1956   void IdleNotificationEpilogue(GCIdleTimeAction action,
1957                                 GCIdleTimeHeapState heap_state, double start_ms,
1958                                 double deadline_in_ms);
1959 
1960   int NextAllocationTimeout(int current_timeout = 0);
1961   inline void UpdateAllocationsHash(HeapObject* object);
1962   inline void UpdateAllocationsHash(uint32_t value);
1963   void PrintAllocationsHash();
1964 
1965   void PrintMaxMarkingLimitReached();
1966   void PrintMaxNewSpaceSizeReached();
1967 
1968   int NextStressMarkingLimit();
1969 
1970   void AddToRingBuffer(const char* string);
1971   void GetFromRingBuffer(char* buffer);
1972 
1973   void CompactRetainedMaps(WeakArrayList* retained_maps);
1974 
1975   void CollectGarbageOnMemoryPressure();
1976 
1977   bool InvokeNearHeapLimitCallback();
1978 
1979   void ComputeFastPromotionMode();
1980 
1981   // Attempt to over-approximate the weak closure by marking object groups and
1982   // implicit references from global handles, but don't atomically complete
1983   // marking. If we continue to mark incrementally, we might have marked
1984   // objects that die later.
1985   void FinalizeIncrementalMarking(GarbageCollectionReason gc_reason);
1986 
1987   // Returns the timer used for a given GC type.
1988   // - GCScavenger: young generation GC
1989   // - GCCompactor: full GC
1990   // - GCFinalzeMC: finalization of incremental full GC
1991   // - GCFinalizeMCReduceMemory: finalization of incremental full GC with
1992   // memory reduction
1993   HistogramTimer* GCTypeTimer(GarbageCollector collector);
1994   HistogramTimer* GCTypePriorityTimer(GarbageCollector collector);
1995 
1996   // ===========================================================================
1997   // Pretenuring. ==============================================================
1998   // ===========================================================================
1999 
2000   // Pretenuring decisions are made based on feedback collected during new space
2001   // evacuation. Note that between feedback collection and calling this method
2002   // object in old space must not move.
2003   void ProcessPretenuringFeedback();
2004 
2005   // Removes an entry from the global pretenuring storage.
2006   void RemoveAllocationSitePretenuringFeedback(AllocationSite* site);
2007 
2008   // ===========================================================================
2009   // Actual GC. ================================================================
2010   // ===========================================================================
2011 
2012   // Code that should be run before and after each GC.  Includes some
2013   // reporting/verification activities when compiled with DEBUG set.
2014   void GarbageCollectionPrologue();
2015   void GarbageCollectionEpilogue();
2016 
2017   // Performs a major collection in the whole heap.
2018   void MarkCompact();
2019   // Performs a minor collection of just the young generation.
2020   void MinorMarkCompact();
2021 
2022   // Code to be run before and after mark-compact.
2023   void MarkCompactPrologue();
2024   void MarkCompactEpilogue();
2025 
2026   // Performs a minor collection in new generation.
2027   void Scavenge();
2028   void EvacuateYoungGeneration();
2029 
2030   void UpdateNewSpaceReferencesInExternalStringTable(
2031       ExternalStringTableUpdaterCallback updater_func);
2032 
2033   void UpdateReferencesInExternalStringTable(
2034       ExternalStringTableUpdaterCallback updater_func);
2035 
2036   void ProcessAllWeakReferences(WeakObjectRetainer* retainer);
2037   void ProcessYoungWeakReferences(WeakObjectRetainer* retainer);
2038   void ProcessNativeContexts(WeakObjectRetainer* retainer);
2039   void ProcessAllocationSites(WeakObjectRetainer* retainer);
2040   void ProcessWeakListRoots(WeakObjectRetainer* retainer);
2041 
2042   // ===========================================================================
2043   // GC statistics. ============================================================
2044   // ===========================================================================
2045 
OldGenerationSpaceAvailable()2046   inline size_t OldGenerationSpaceAvailable() {
2047     if (old_generation_allocation_limit_ <=
2048         OldGenerationObjectsAndPromotedExternalMemorySize())
2049       return 0;
2050     return old_generation_allocation_limit_ -
2051            static_cast<size_t>(
2052                OldGenerationObjectsAndPromotedExternalMemorySize());
2053   }
2054 
2055   // We allow incremental marking to overshoot the allocation limit for
2056   // performace reasons. If the overshoot is too large then we are more
2057   // eager to finalize incremental marking.
AllocationLimitOvershotByLargeMargin()2058   inline bool AllocationLimitOvershotByLargeMargin() {
2059     // This guards against too eager finalization in small heaps.
2060     // The number is chosen based on v8.browsing_mobile on Nexus 7v2.
2061     size_t kMarginForSmallHeaps = 32u * MB;
2062     if (old_generation_allocation_limit_ >=
2063         OldGenerationObjectsAndPromotedExternalMemorySize())
2064       return false;
2065     uint64_t overshoot = OldGenerationObjectsAndPromotedExternalMemorySize() -
2066                          old_generation_allocation_limit_;
2067     // Overshoot margin is 50% of allocation limit or half-way to the max heap
2068     // with special handling of small heaps.
2069     uint64_t margin =
2070         Min(Max(old_generation_allocation_limit_ / 2, kMarginForSmallHeaps),
2071             (max_old_generation_size_ - old_generation_allocation_limit_) / 2);
2072     return overshoot >= margin;
2073   }
2074 
2075   void UpdateTotalGCTime(double duration);
2076 
MaximumSizeScavenge()2077   bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
2078 
2079   bool IsIneffectiveMarkCompact(size_t old_generation_size,
2080                                 double mutator_utilization);
2081   void CheckIneffectiveMarkCompact(size_t old_generation_size,
2082                                    double mutator_utilization);
2083 
2084   // ===========================================================================
2085   // Growing strategy. =========================================================
2086   // ===========================================================================
2087 
2088   // For some webpages RAIL mode does not switch from PERFORMANCE_LOAD.
2089   // This constant limits the effect of load RAIL mode on GC.
2090   // The value is arbitrary and chosen as the largest load time observed in
2091   // v8 browsing benchmarks.
2092   static const int kMaxLoadTimeMs = 7000;
2093 
2094   bool ShouldOptimizeForLoadTime();
2095 
2096   // Decrease the allocation limit if the new limit based on the given
2097   // parameters is lower than the current limit.
2098   void DampenOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
2099                                           double mutator_speed);
2100 
2101   // Calculates the allocation limit based on a given growing factor and a
2102   // given old generation size.
2103   size_t CalculateOldGenerationAllocationLimit(double factor,
2104                                                size_t old_gen_size);
2105 
2106   // Sets the allocation limit to trigger the next full garbage collection.
2107   void SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
2108                                        double mutator_speed);
2109 
2110   size_t MinimumAllocationLimitGrowingStep();
2111 
old_generation_allocation_limit()2112   size_t old_generation_allocation_limit() const {
2113     return old_generation_allocation_limit_;
2114   }
2115 
always_allocate()2116   bool always_allocate() { return always_allocate_scope_count_ != 0; }
2117 
2118   bool CanExpandOldGeneration(size_t size);
2119 
2120   bool ShouldExpandOldGenerationOnSlowAllocation();
2121 
2122   enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
2123   IncrementalMarkingLimit IncrementalMarkingLimitReached();
2124 
2125   // ===========================================================================
2126   // Idle notification. ========================================================
2127   // ===========================================================================
2128 
2129   bool RecentIdleNotificationHappened();
2130   void ScheduleIdleScavengeIfNeeded(int bytes_allocated);
2131 
2132   // ===========================================================================
2133   // HeapIterator helpers. =====================================================
2134   // ===========================================================================
2135 
heap_iterator_start()2136   void heap_iterator_start() { heap_iterator_depth_++; }
2137 
heap_iterator_end()2138   void heap_iterator_end() { heap_iterator_depth_--; }
2139 
in_heap_iterator()2140   bool in_heap_iterator() { return heap_iterator_depth_ > 0; }
2141 
2142   // ===========================================================================
2143   // Allocation methods. =======================================================
2144   // ===========================================================================
2145 
2146   // Allocates a JS Map in the heap.
2147   V8_WARN_UNUSED_RESULT AllocationResult
2148   AllocateMap(InstanceType instance_type, int instance_size,
2149               ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
2150               int inobject_properties = 0);
2151 
2152   // Allocate an uninitialized object.  The memory is non-executable if the
2153   // hardware and OS allow.  This is the single choke-point for allocations
2154   // performed by the runtime and should not be bypassed (to extend this to
2155   // inlined allocations, use the Heap::DisableInlineAllocation() support).
2156   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
2157       int size_in_bytes, AllocationSpace space,
2158       AllocationAlignment aligment = kWordAligned);
2159 
2160   // This method will try to perform an allocation of a given size in a given
2161   // space. If the allocation fails, a regular full garbage collection is
2162   // triggered and the allocation is retried. This is performed multiple times.
2163   // If after that retry procedure the allocation still fails nullptr is
2164   // returned.
2165   HeapObject* AllocateRawWithLigthRetry(
2166       int size, AllocationSpace space,
2167       AllocationAlignment alignment = kWordAligned);
2168 
2169   // This method will try to perform an allocation of a given size in a given
2170   // space. If the allocation fails, a regular full garbage collection is
2171   // triggered and the allocation is retried. This is performed multiple times.
2172   // If after that retry procedure the allocation still fails a "hammer"
2173   // garbage collection is triggered which tries to significantly reduce memory.
2174   // If the allocation still fails after that a fatal error is thrown.
2175   HeapObject* AllocateRawWithRetryOrFail(
2176       int size, AllocationSpace space,
2177       AllocationAlignment alignment = kWordAligned);
2178   HeapObject* AllocateRawCodeInLargeObjectSpace(int size);
2179 
2180   // Allocates a heap object based on the map.
2181   V8_WARN_UNUSED_RESULT AllocationResult Allocate(Map* map,
2182                                                   AllocationSpace space);
2183 
2184   // Takes a code object and checks if it is on memory which is not subject to
2185   // compaction. This method will return a new code object on an immovable
2186   // memory location if the original code object was movable.
2187   HeapObject* EnsureImmovableCode(HeapObject* heap_object, int object_size);
2188 
2189   // Allocates a partial map for bootstrapping.
2190   V8_WARN_UNUSED_RESULT AllocationResult
2191   AllocatePartialMap(InstanceType instance_type, int instance_size);
2192 
2193   void FinalizePartialMap(Map* map);
2194 
2195   // Allocate empty fixed typed array of given type.
2196   V8_WARN_UNUSED_RESULT AllocationResult
2197   AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
2198 
set_force_oom(bool value)2199   void set_force_oom(bool value) { force_oom_ = value; }
2200 
2201   // ===========================================================================
2202   // Retaining path tracing ====================================================
2203   // ===========================================================================
2204 
2205   void AddRetainer(HeapObject* retainer, HeapObject* object);
2206   void AddEphemeralRetainer(HeapObject* retainer, HeapObject* object);
2207   void AddRetainingRoot(Root root, HeapObject* object);
2208   // Returns true if the given object is a target of retaining path tracking.
2209   // Stores the option corresponding to the object in the provided *option.
2210   bool IsRetainingPathTarget(HeapObject* object, RetainingPathOption* option);
2211   void PrintRetainingPath(HeapObject* object, RetainingPathOption option);
2212 
2213   // The amount of external memory registered through the API.
2214   int64_t external_memory_;
2215 
2216   // The limit when to trigger memory pressure from the API.
2217   int64_t external_memory_limit_;
2218 
2219   // Caches the amount of external memory registered at the last MC.
2220   int64_t external_memory_at_last_mark_compact_;
2221 
2222   // The amount of memory that has been freed concurrently.
2223   std::atomic<intptr_t> external_memory_concurrently_freed_;
2224 
2225   // This can be calculated directly from a pointer to the heap; however, it is
2226   // more expedient to get at the isolate directly from within Heap methods.
2227   Isolate* isolate_;
2228 
2229   Object* roots_[kRootListLength];
2230 
2231   // This table is accessed from builtin code compiled into the snapshot, and
2232   // thus its offset from roots_ must remain static. This is verified in
2233   // Isolate::Init() using runtime checks.
2234   static constexpr int kRootsExternalReferenceTableOffset =
2235       kRootListLength * kPointerSize;
2236   ExternalReferenceTable external_reference_table_;
2237 
2238   size_t code_range_size_;
2239   size_t max_semi_space_size_;
2240   size_t initial_semispace_size_;
2241   size_t max_old_generation_size_;
2242   size_t initial_max_old_generation_size_;
2243   size_t initial_old_generation_size_;
2244   bool old_generation_size_configured_;
2245   size_t maximum_committed_;
2246 
2247   // For keeping track of how much data has survived
2248   // scavenge since last new space expansion.
2249   size_t survived_since_last_expansion_;
2250 
2251   // ... and since the last scavenge.
2252   size_t survived_last_scavenge_;
2253 
2254   // This is not the depth of nested AlwaysAllocateScope's but rather a single
2255   // count, as scopes can be acquired from multiple tasks (read: threads).
2256   std::atomic<size_t> always_allocate_scope_count_;
2257 
2258   // Stores the memory pressure level that set by MemoryPressureNotification
2259   // and reset by a mark-compact garbage collection.
2260   base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
2261 
2262   std::vector<std::pair<v8::NearHeapLimitCallback, void*> >
2263       near_heap_limit_callbacks_;
2264 
2265   // For keeping track of context disposals.
2266   int contexts_disposed_;
2267 
2268   // The length of the retained_maps array at the time of context disposal.
2269   // This separates maps in the retained_maps array that were created before
2270   // and after context disposal.
2271   int number_of_disposed_maps_;
2272 
2273   NewSpace* new_space_;
2274   OldSpace* old_space_;
2275   CodeSpace* code_space_;
2276   MapSpace* map_space_;
2277   LargeObjectSpace* lo_space_;
2278   ReadOnlySpace* read_only_space_;
2279   // Map from the space id to the space.
2280   Space* space_[LAST_SPACE + 1];
2281 
2282   // Determines whether code space is write-protected. This is essentially a
2283   // race-free copy of the {FLAG_write_protect_code_memory} flag.
2284   bool write_protect_code_memory_;
2285 
2286   // Holds the number of open CodeSpaceMemoryModificationScopes.
2287   uintptr_t code_space_memory_modification_scope_depth_;
2288 
2289   HeapState gc_state_;
2290   int gc_post_processing_depth_;
2291 
2292   // Returns the amount of external memory registered since last global gc.
2293   uint64_t PromotedExternalMemorySize();
2294 
2295   // How many "runtime allocations" happened.
2296   uint32_t allocations_count_;
2297 
2298   // Running hash over allocations performed.
2299   uint32_t raw_allocations_hash_;
2300 
2301   // Starts marking when stress_marking_percentage_% of the marking start limit
2302   // is reached.
2303   int stress_marking_percentage_;
2304 
2305   // Observer that causes more frequent checks for reached incremental marking
2306   // limit.
2307   AllocationObserver* stress_marking_observer_;
2308 
2309   // Observer that can cause early scavenge start.
2310   StressScavengeObserver* stress_scavenge_observer_;
2311 
2312   bool allocation_step_in_progress_;
2313 
2314   // The maximum percent of the marking limit reached wihout causing marking.
2315   // This is tracked when specyfing --fuzzer-gc-analysis.
2316   double max_marking_limit_reached_;
2317 
2318   // How many mark-sweep collections happened.
2319   unsigned int ms_count_;
2320 
2321   // How many gc happened.
2322   unsigned int gc_count_;
2323 
2324   // The number of Mark-Compact garbage collections that are considered as
2325   // ineffective. See IsIneffectiveMarkCompact() predicate.
2326   int consecutive_ineffective_mark_compacts_;
2327 
2328   static const uintptr_t kMmapRegionMask = 0xFFFFFFFFu;
2329   uintptr_t mmap_region_base_;
2330 
2331   // For post mortem debugging.
2332   int remembered_unmapped_pages_index_;
2333   Address remembered_unmapped_pages_[kRememberedUnmappedPages];
2334 
2335   // Limit that triggers a global GC on the next (normally caused) GC.  This
2336   // is checked when we have already decided to do a GC to help determine
2337   // which collector to invoke, before expanding a paged space in the old
2338   // generation and on every allocation in large object space.
2339   size_t old_generation_allocation_limit_;
2340 
2341   // Indicates that inline bump-pointer allocation has been globally disabled
2342   // for all spaces. This is used to disable allocations in generated code.
2343   bool inline_allocation_disabled_;
2344 
2345   // Weak list heads, threaded through the objects.
2346   // List heads are initialized lazily and contain the undefined_value at start.
2347   Object* native_contexts_list_;
2348   Object* allocation_sites_list_;
2349 
2350   // List of encountered weak collections (JSWeakMap and JSWeakSet) during
2351   // marking. It is initialized during marking, destroyed after marking and
2352   // contains Smi(0) while marking is not active.
2353   Object* encountered_weak_collections_;
2354 
2355   std::vector<GCCallbackTuple> gc_epilogue_callbacks_;
2356   std::vector<GCCallbackTuple> gc_prologue_callbacks_;
2357 
2358   GetExternallyAllocatedMemoryInBytesCallback external_memory_callback_;
2359 
2360   int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
2361 
2362   GCTracer* tracer_;
2363 
2364   size_t promoted_objects_size_;
2365   double promotion_ratio_;
2366   double promotion_rate_;
2367   size_t semi_space_copied_object_size_;
2368   size_t previous_semi_space_copied_object_size_;
2369   double semi_space_copied_rate_;
2370   int nodes_died_in_new_space_;
2371   int nodes_copied_in_new_space_;
2372   int nodes_promoted_;
2373 
2374   // This is the pretenuring trigger for allocation sites that are in maybe
2375   // tenure state. When we switched to the maximum new space size we deoptimize
2376   // the code that belongs to the allocation site and derive the lifetime
2377   // of the allocation site.
2378   unsigned int maximum_size_scavenges_;
2379 
2380   // Total time spent in GC.
2381   double total_gc_time_ms_;
2382 
2383   // Last time an idle notification happened.
2384   double last_idle_notification_time_;
2385 
2386   // Last time a garbage collection happened.
2387   double last_gc_time_;
2388 
2389   MarkCompactCollector* mark_compact_collector_;
2390   MinorMarkCompactCollector* minor_mark_compact_collector_;
2391 
2392   ArrayBufferCollector* array_buffer_collector_;
2393 
2394   MemoryAllocator* memory_allocator_;
2395 
2396   StoreBuffer* store_buffer_;
2397 
2398   IncrementalMarking* incremental_marking_;
2399   ConcurrentMarking* concurrent_marking_;
2400 
2401   GCIdleTimeHandler* gc_idle_time_handler_;
2402 
2403   MemoryReducer* memory_reducer_;
2404 
2405   ObjectStats* live_object_stats_;
2406   ObjectStats* dead_object_stats_;
2407 
2408   ScavengeJob* scavenge_job_;
2409   base::Semaphore parallel_scavenge_semaphore_;
2410 
2411   AllocationObserver* idle_scavenge_observer_;
2412 
2413   // This counter is increased before each GC and never reset.
2414   // To account for the bytes allocated since the last GC, use the
2415   // NewSpaceAllocationCounter() function.
2416   size_t new_space_allocation_counter_;
2417 
2418   // This counter is increased before each GC and never reset. To
2419   // account for the bytes allocated since the last GC, use the
2420   // OldGenerationAllocationCounter() function.
2421   size_t old_generation_allocation_counter_at_last_gc_;
2422 
2423   // The size of objects in old generation after the last MarkCompact GC.
2424   size_t old_generation_size_at_last_gc_;
2425 
2426   // The feedback storage is used to store allocation sites (keys) and how often
2427   // they have been visited (values) by finding a memento behind an object. The
2428   // storage is only alive temporary during a GC. The invariant is that all
2429   // pointers in this map are already fixed, i.e., they do not point to
2430   // forwarding pointers.
2431   PretenuringFeedbackMap global_pretenuring_feedback_;
2432 
2433   char trace_ring_buffer_[kTraceRingBufferSize];
2434 
2435   // Used as boolean.
2436   uint8_t is_marking_flag_;
2437 
2438   // If it's not full then the data is from 0 to ring_buffer_end_.  If it's
2439   // full then the data is from ring_buffer_end_ to the end of the buffer and
2440   // from 0 to ring_buffer_end_.
2441   bool ring_buffer_full_;
2442   size_t ring_buffer_end_;
2443 
2444   // Flag is set when the heap has been configured.  The heap can be repeatedly
2445   // configured through the API until it is set up.
2446   bool configured_;
2447 
2448   // Currently set GC flags that are respected by all GC components.
2449   int current_gc_flags_;
2450 
2451   // Currently set GC callback flags that are used to pass information between
2452   // the embedder and V8's GC.
2453   GCCallbackFlags current_gc_callback_flags_;
2454 
2455   ExternalStringTable external_string_table_;
2456 
2457   base::Mutex relocation_mutex_;
2458 
2459   int gc_callbacks_depth_;
2460 
2461   bool deserialization_complete_;
2462 
2463   StrongRootsList* strong_roots_list_;
2464 
2465   // The depth of HeapIterator nestings.
2466   int heap_iterator_depth_;
2467 
2468   LocalEmbedderHeapTracer* local_embedder_heap_tracer_;
2469 
2470   bool fast_promotion_mode_;
2471 
2472   // Used for testing purposes.
2473   bool force_oom_;
2474   bool delay_sweeper_tasks_for_testing_;
2475 
2476   HeapObject* pending_layout_change_object_;
2477 
2478   base::Mutex unprotected_memory_chunks_mutex_;
2479   std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;
2480   bool unprotected_memory_chunks_registry_enabled_;
2481 
2482 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
2483   // If the --gc-interval flag is set to a positive value, this
2484   // variable holds the value indicating the number of allocations
2485   // remain until the next failure and garbage collection.
2486   int allocation_timeout_;
2487 #endif  // V8_ENABLE_ALLOCATION_TIMEOUT
2488 
2489   std::map<HeapObject*, HeapObject*> retainer_;
2490   std::map<HeapObject*, Root> retaining_root_;
2491   // If an object is retained by an ephemeron, then the retaining key of the
2492   // ephemeron is stored in this map.
2493   std::map<HeapObject*, HeapObject*> ephemeral_retainer_;
2494   // For each index inthe retaining_path_targets_ array this map
2495   // stores the option of the corresponding target.
2496   std::map<int, RetainingPathOption> retaining_path_target_option_;
2497 
2498   std::vector<HeapObjectAllocationTracker*> allocation_trackers_;
2499 
2500   // Classes in "heap" can be friends.
2501   friend class AlwaysAllocateScope;
2502   friend class ConcurrentMarking;
2503   friend class GCCallbacksScope;
2504   friend class GCTracer;
2505   friend class HeapIterator;
2506   friend class IdleScavengeObserver;
2507   friend class IncrementalMarking;
2508   friend class IncrementalMarkingJob;
2509   friend class LargeObjectSpace;
2510   template <FixedArrayVisitationMode fixed_array_mode,
2511             TraceRetainingPathMode retaining_path_mode, typename MarkingState>
2512   friend class MarkingVisitor;
2513   friend class MarkCompactCollector;
2514   friend class MarkCompactCollectorBase;
2515   friend class MinorMarkCompactCollector;
2516   friend class NewSpace;
2517   friend class ObjectStatsCollector;
2518   friend class Page;
2519   friend class PagedSpace;
2520   friend class Scavenger;
2521   friend class StoreBuffer;
2522   friend class Sweeper;
2523   friend class heap::TestMemoryAllocatorScope;
2524 
2525   // The allocator interface.
2526   friend class Factory;
2527 
2528   // The Isolate constructs us.
2529   friend class Isolate;
2530 
2531   // Used in cctest.
2532   friend class heap::HeapTester;
2533 
2534   DISALLOW_COPY_AND_ASSIGN(Heap);
2535 };
2536 
2537 
2538 class HeapStats {
2539  public:
2540   static const int kStartMarker = 0xDECADE00;
2541   static const int kEndMarker = 0xDECADE01;
2542 
2543   intptr_t* start_marker;                  //  0
2544   size_t* ro_space_size;                   //  1
2545   size_t* ro_space_capacity;               //  2
2546   size_t* new_space_size;                  //  3
2547   size_t* new_space_capacity;              //  4
2548   size_t* old_space_size;                  //  5
2549   size_t* old_space_capacity;              //  6
2550   size_t* code_space_size;                 //  7
2551   size_t* code_space_capacity;             //  8
2552   size_t* map_space_size;                  //  9
2553   size_t* map_space_capacity;              // 10
2554   size_t* lo_space_size;                   // 11
2555   size_t* global_handle_count;             // 12
2556   size_t* weak_global_handle_count;        // 13
2557   size_t* pending_global_handle_count;     // 14
2558   size_t* near_death_global_handle_count;  // 15
2559   size_t* free_global_handle_count;        // 16
2560   size_t* memory_allocator_size;           // 17
2561   size_t* memory_allocator_capacity;       // 18
2562   size_t* malloced_memory;                 // 19
2563   size_t* malloced_peak_memory;            // 20
2564   size_t* objects_per_type;                // 21
2565   size_t* size_per_type;                   // 22
2566   int* os_error;                           // 23
2567   char* last_few_messages;                 // 24
2568   char* js_stacktrace;                     // 25
2569   intptr_t* end_marker;                    // 26
2570 };
2571 
2572 
2573 class AlwaysAllocateScope {
2574  public:
2575   explicit inline AlwaysAllocateScope(Isolate* isolate);
2576   inline ~AlwaysAllocateScope();
2577 
2578  private:
2579   Heap* heap_;
2580 };
2581 
2582 // The CodeSpaceMemoryModificationScope can only be used by the main thread.
2583 class CodeSpaceMemoryModificationScope {
2584  public:
2585   explicit inline CodeSpaceMemoryModificationScope(Heap* heap);
2586   inline ~CodeSpaceMemoryModificationScope();
2587 
2588  private:
2589   Heap* heap_;
2590 };
2591 
2592 // The CodePageCollectionMemoryModificationScope can only be used by the main
2593 // thread. It will not be enabled if a CodeSpaceMemoryModificationScope is
2594 // already active.
2595 class CodePageCollectionMemoryModificationScope {
2596  public:
2597   explicit inline CodePageCollectionMemoryModificationScope(Heap* heap);
2598   inline ~CodePageCollectionMemoryModificationScope();
2599 
2600  private:
2601   Heap* heap_;
2602 };
2603 
2604 // The CodePageMemoryModificationScope does not check if tansitions to
2605 // writeable and back to executable are actually allowed, i.e. the MemoryChunk
2606 // was registered to be executable. It can be used by concurrent threads.
2607 class CodePageMemoryModificationScope {
2608  public:
2609   explicit inline CodePageMemoryModificationScope(MemoryChunk* chunk);
2610   inline ~CodePageMemoryModificationScope();
2611 
2612  private:
2613   MemoryChunk* chunk_;
2614   bool scope_active_;
2615 
2616   // Disallow any GCs inside this scope, as a relocation of the underlying
2617   // object would change the {MemoryChunk} that this scope targets.
2618   DisallowHeapAllocation no_heap_allocation_;
2619 };
2620 
2621 // Visitor class to verify interior pointers in spaces that do not contain
2622 // or care about intergenerational references. All heap object pointers have to
2623 // point into the heap to a location that has a map pointer at its first word.
2624 // Caveat: Heap::Contains is an approximation because it can return true for
2625 // objects in a heap space but above the allocation pointer.
2626 class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
2627  public:
2628   void VisitPointers(HeapObject* host, Object** start, Object** end) override;
2629   void VisitPointers(HeapObject* host, MaybeObject** start,
2630                      MaybeObject** end) override;
2631   void VisitRootPointers(Root root, const char* description, Object** start,
2632                          Object** end) override;
2633 
2634  protected:
2635   virtual void VerifyPointers(HeapObject* host, MaybeObject** start,
2636                               MaybeObject** end);
2637 };
2638 
2639 
2640 // Verify that all objects are Smis.
2641 class VerifySmisVisitor : public RootVisitor {
2642  public:
2643   void VisitRootPointers(Root root, const char* description, Object** start,
2644                          Object** end) override;
2645 };
2646 
2647 // Space iterator for iterating over all the paged spaces of the heap: Map
2648 // space, old space, code space and optionally read only space. Returns each
2649 // space in turn, and null when it is done.
2650 class V8_EXPORT_PRIVATE PagedSpaces BASE_EMBEDDED {
2651  public:
2652   enum class SpacesSpecifier { kSweepablePagedSpaces, kAllPagedSpaces };
2653 
2654   explicit PagedSpaces(Heap* heap, SpacesSpecifier specifier =
2655                                        SpacesSpecifier::kSweepablePagedSpaces)
heap_(heap)2656       : heap_(heap),
2657         counter_(specifier == SpacesSpecifier::kAllPagedSpaces ? RO_SPACE
2658                                                                : OLD_SPACE) {}
2659   PagedSpace* next();
2660 
2661  private:
2662   Heap* heap_;
2663   int counter_;
2664 };
2665 
2666 
2667 class SpaceIterator : public Malloced {
2668  public:
2669   explicit SpaceIterator(Heap* heap);
2670   virtual ~SpaceIterator();
2671 
2672   bool has_next();
2673   Space* next();
2674 
2675  private:
2676   Heap* heap_;
2677   int current_space_;         // from enum AllocationSpace.
2678 };
2679 
2680 
2681 // A HeapIterator provides iteration over the whole heap. It
2682 // aggregates the specific iterators for the different spaces as
2683 // these can only iterate over one space only.
2684 //
2685 // HeapIterator ensures there is no allocation during its lifetime
2686 // (using an embedded DisallowHeapAllocation instance).
2687 //
2688 // HeapIterator can skip free list nodes (that is, de-allocated heap
2689 // objects that still remain in the heap). As implementation of free
2690 // nodes filtering uses GC marks, it can't be used during MS/MC GC
2691 // phases. Also, it is forbidden to interrupt iteration in this mode,
2692 // as this will leave heap objects marked (and thus, unusable).
2693 class HeapIterator BASE_EMBEDDED {
2694  public:
2695   enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };
2696 
2697   explicit HeapIterator(Heap* heap,
2698                         HeapObjectsFiltering filtering = kNoFiltering);
2699   ~HeapIterator();
2700 
2701   HeapObject* next();
2702 
2703  private:
2704   HeapObject* NextObject();
2705 
2706   DisallowHeapAllocation no_heap_allocation_;
2707 
2708   Heap* heap_;
2709   HeapObjectsFiltering filtering_;
2710   HeapObjectsFilter* filter_;
2711   // Space iterator for iterating all the spaces.
2712   SpaceIterator* space_iterator_;
2713   // Object iterator for the space currently being iterated.
2714   std::unique_ptr<ObjectIterator> object_iterator_;
2715 };
2716 
2717 // Abstract base class for checking whether a weak object should be retained.
2718 class WeakObjectRetainer {
2719  public:
~WeakObjectRetainer()2720   virtual ~WeakObjectRetainer() {}
2721 
2722   // Return whether this object should be retained. If nullptr is returned the
2723   // object has no references. Otherwise the address of the retained object
2724   // should be returned as in some GC situations the object has been moved.
2725   virtual Object* RetainAs(Object* object) = 0;
2726 };
2727 
2728 // -----------------------------------------------------------------------------
2729 // Allows observation of allocations.
2730 class AllocationObserver {
2731  public:
AllocationObserver(intptr_t step_size)2732   explicit AllocationObserver(intptr_t step_size)
2733       : step_size_(step_size), bytes_to_next_step_(step_size) {
2734     DCHECK_LE(kPointerSize, step_size);
2735   }
~AllocationObserver()2736   virtual ~AllocationObserver() {}
2737 
2738   // Called each time the observed space does an allocation step. This may be
2739   // more frequently than the step_size we are monitoring (e.g. when there are
2740   // multiple observers, or when page or space boundary is encountered.)
2741   void AllocationStep(int bytes_allocated, Address soon_object, size_t size);
2742 
2743  protected:
step_size()2744   intptr_t step_size() const { return step_size_; }
bytes_to_next_step()2745   intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }
2746 
2747   // Pure virtual method provided by the subclasses that gets called when at
2748   // least step_size bytes have been allocated. soon_object is the address just
2749   // allocated (but not yet initialized.) size is the size of the object as
2750   // requested (i.e. w/o the alignment fillers). Some complexities to be aware
2751   // of:
2752   // 1) soon_object will be nullptr in cases where we end up observing an
2753   //    allocation that happens to be a filler space (e.g. page boundaries.)
2754   // 2) size is the requested size at the time of allocation. Right-trimming
2755   //    may change the object size dynamically.
2756   // 3) soon_object may actually be the first object in an allocation-folding
2757   //    group. In such a case size is the size of the group rather than the
2758   //    first object.
2759   virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
2760 
2761   // Subclasses can override this method to make step size dynamic.
GetNextStepSize()2762   virtual intptr_t GetNextStepSize() { return step_size_; }
2763 
2764   intptr_t step_size_;
2765   intptr_t bytes_to_next_step_;
2766 
2767  private:
2768   friend class Space;
2769   DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
2770 };
2771 
2772 V8_EXPORT_PRIVATE const char* AllocationSpaceName(AllocationSpace space);
2773 
2774 // -----------------------------------------------------------------------------
2775 // Allows observation of heap object allocations.
2776 class HeapObjectAllocationTracker {
2777  public:
2778   virtual void AllocationEvent(Address addr, int size) = 0;
MoveEvent(Address from,Address to,int size)2779   virtual void MoveEvent(Address from, Address to, int size) {}
UpdateObjectSizeEvent(Address addr,int size)2780   virtual void UpdateObjectSizeEvent(Address addr, int size) {}
2781   virtual ~HeapObjectAllocationTracker() = default;
2782 };
2783 
2784 }  // namespace internal
2785 }  // namespace v8
2786 
2787 #endif  // V8_HEAP_HEAP_H_
2788