1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_EXECUTION_ISOLATE_H_
6 #define V8_EXECUTION_ISOLATE_H_
7 
8 #include <atomic>
9 #include <cstddef>
10 #include <functional>
11 #include <memory>
12 #include <queue>
13 #include <unordered_map>
14 #include <vector>
15 
16 #include "include/v8-context.h"
17 #include "include/v8-internal.h"
18 #include "include/v8-isolate.h"
19 #include "include/v8-metrics.h"
20 #include "include/v8-snapshot.h"
21 #include "src/base/macros.h"
22 #include "src/base/platform/mutex.h"
23 #include "src/builtins/builtins.h"
24 #include "src/common/globals.h"
25 #include "src/debug/interface-types.h"
26 #include "src/execution/execution.h"
27 #include "src/execution/external-pointer-table.h"
28 #include "src/execution/futex-emulation.h"
29 #include "src/execution/isolate-data.h"
30 #include "src/execution/messages.h"
31 #include "src/execution/shared-mutex-guard-if-off-thread.h"
32 #include "src/execution/stack-guard.h"
33 #include "src/handles/handles.h"
34 #include "src/heap/factory.h"
35 #include "src/heap/heap.h"
36 #include "src/heap/read-only-heap.h"
37 #include "src/init/isolate-allocator.h"
38 #include "src/init/vm-cage.h"
39 #include "src/objects/code.h"
40 #include "src/objects/contexts.h"
41 #include "src/objects/debug-objects.h"
42 #include "src/runtime/runtime.h"
43 #include "src/strings/unicode.h"
44 #include "src/utils/allocation.h"
45 
46 #ifdef V8_INTL_SUPPORT
47 #include "unicode/uversion.h"  // Define U_ICU_NAMESPACE.
48 namespace U_ICU_NAMESPACE {
49 class UMemory;
50 }  // namespace U_ICU_NAMESPACE
51 #endif  // V8_INTL_SUPPORT
52 
53 namespace v8_inspector {
54 class V8Inspector;
55 }  // namespace v8_inspector
56 
57 namespace v8 {
58 
59 namespace base {
60 class RandomNumberGenerator;
61 }  // namespace base
62 
63 namespace bigint {
64 class Processor;
65 }
66 
67 namespace debug {
68 class ConsoleDelegate;
69 class AsyncEventDelegate;
70 }  // namespace debug
71 
72 namespace internal {
73 
74 namespace heap {
75 class HeapTester;
76 }  // namespace heap
77 
78 class AddressToIndexHashMap;
79 class AstStringConstants;
80 class Bootstrapper;
81 class BuiltinsConstantsTableBuilder;
82 class CancelableTaskManager;
83 class CodeEventDispatcher;
84 class CodeTracer;
85 class CommonFrame;
86 class CompilationCache;
87 class CompilationStatistics;
88 class Counters;
89 class Debug;
90 class Deoptimizer;
91 class DescriptorLookupCache;
92 class EmbeddedFileWriterInterface;
93 class EternalHandles;
94 class HandleScopeImplementer;
95 class HeapObjectToIndexHashMap;
96 class HeapProfiler;
97 class GlobalHandles;
98 class InnerPointerToCodeCache;
99 class LazyCompileDispatcher;
100 class LocalIsolate;
101 class Logger;
102 class MaterializedObjectStore;
103 class Microtask;
104 class MicrotaskQueue;
105 class OptimizingCompileDispatcher;
106 class PersistentHandles;
107 class PersistentHandlesList;
108 class ReadOnlyArtifacts;
109 class RegExpStack;
110 class RootVisitor;
111 class RuntimeProfiler;
112 class SetupIsolateDelegate;
113 class Simulator;
114 class SnapshotData;
115 class StringTable;
116 class StubCache;
117 class ThreadManager;
118 class ThreadState;
119 class ThreadVisitor;  // Defined in v8threads.h
120 class TracingCpuProfilerImpl;
121 class UnicodeCache;
122 struct ManagedPtrDestructor;
123 
124 template <StateTag Tag>
125 class VMState;
126 
127 namespace baseline {
128 class BaselineBatchCompiler;
129 }  // namespace baseline
130 
131 namespace interpreter {
132 class Interpreter;
133 }  // namespace interpreter
134 
135 namespace compiler {
136 class NodeObserver;
137 class PerIsolateCompilerCache;
138 }  // namespace compiler
139 
140 namespace win64_unwindinfo {
141 class BuiltinUnwindInfo;
142 }  // namespace win64_unwindinfo
143 
144 namespace metrics {
145 class Recorder;
146 }  // namespace metrics
147 
148 #define RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate) \
149   do {                                                 \
150     Isolate* __isolate__ = (isolate);                  \
151     DCHECK(!__isolate__->has_pending_exception());     \
152     if (__isolate__->has_scheduled_exception()) {      \
153       return __isolate__->PromoteScheduledException(); \
154     }                                                  \
155   } while (false)
156 
157 // Macros for MaybeHandle.
158 
159 #define RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, value) \
160   do {                                                      \
161     Isolate* __isolate__ = (isolate);                       \
162     DCHECK(!__isolate__->has_pending_exception());          \
163     if (__isolate__->has_scheduled_exception()) {           \
164       __isolate__->PromoteScheduledException();             \
165       return value;                                         \
166     }                                                       \
167   } while (false)
168 
169 #define RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, T) \
170   RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, MaybeHandle<T>())
171 
172 #define ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(isolate, dst, call, value) \
173   do {                                                                        \
174     Isolate* __isolate__ = (isolate);                                         \
175     if (!(call).ToLocal(&dst)) {                                              \
176       DCHECK(__isolate__->has_scheduled_exception());                         \
177       __isolate__->PromoteScheduledException();                               \
178       return value;                                                           \
179     }                                                                         \
180   } while (false)
181 
182 #define RETURN_ON_SCHEDULED_EXCEPTION_VALUE(isolate, call, value) \
183   do {                                                            \
184     Isolate* __isolate__ = (isolate);                             \
185     if ((call).IsNothing()) {                                     \
186       DCHECK(__isolate__->has_scheduled_exception());             \
187       __isolate__->PromoteScheduledException();                   \
188       return value;                                               \
189     }                                                             \
190   } while (false)
191 
192 /**
193  * RETURN_RESULT_OR_FAILURE is used in functions with return type Object (such
194  * as "RUNTIME_FUNCTION(...) {...}" or "BUILTIN(...) {...}" ) to return either
195  * the contents of a MaybeHandle<X>, or the "exception" sentinel value.
196  * Example usage:
197  *
198  * RUNTIME_FUNCTION(Runtime_Func) {
199  *   ...
200  *   RETURN_RESULT_OR_FAILURE(
201  *       isolate,
202  *       FunctionWithReturnTypeMaybeHandleX(...));
203  * }
204  *
205  * If inside a function with return type MaybeHandle<X> use RETURN_ON_EXCEPTION
206  * instead.
207  * If inside a function with return type Handle<X>, or Maybe<X> use
208  * RETURN_ON_EXCEPTION_VALUE instead.
209  */
210 #define RETURN_RESULT_OR_FAILURE(isolate, call)      \
211   do {                                               \
212     Handle<Object> __result__;                       \
213     Isolate* __isolate__ = (isolate);                \
214     if (!(call).ToHandle(&__result__)) {             \
215       DCHECK(__isolate__->has_pending_exception());  \
216       return ReadOnlyRoots(__isolate__).exception(); \
217     }                                                \
218     DCHECK(!__isolate__->has_pending_exception());   \
219     return *__result__;                              \
220   } while (false)
221 
222 #define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value) \
223   do {                                                              \
224     if (!(call).ToHandle(&dst)) {                                   \
225       DCHECK((isolate)->has_pending_exception());                   \
226       return value;                                                 \
227     }                                                               \
228   } while (false)
229 
230 #define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call)                \
231   do {                                                                        \
232     auto* __isolate__ = (isolate);                                            \
233     ASSIGN_RETURN_ON_EXCEPTION_VALUE(__isolate__, dst, call,                  \
234                                      ReadOnlyRoots(__isolate__).exception()); \
235   } while (false)
236 
237 #define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T) \
238   ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, MaybeHandle<T>())
239 
240 #define THROW_NEW_ERROR(isolate, call, T)                                \
241   do {                                                                   \
242     auto* __isolate__ = (isolate);                                       \
243     return __isolate__->template Throw<T>(__isolate__->factory()->call); \
244   } while (false)
245 
246 #define THROW_NEW_ERROR_RETURN_FAILURE(isolate, call)         \
247   do {                                                        \
248     auto* __isolate__ = (isolate);                            \
249     return __isolate__->Throw(*__isolate__->factory()->call); \
250   } while (false)
251 
252 #define THROW_NEW_ERROR_RETURN_VALUE(isolate, call, value) \
253   do {                                                     \
254     auto* __isolate__ = (isolate);                         \
255     __isolate__->Throw(*__isolate__->factory()->call);     \
256     return value;                                          \
257   } while (false)
258 
259 /**
260  * RETURN_ON_EXCEPTION_VALUE conditionally returns the given value when the
261  * given MaybeHandle is empty. It is typically used in functions with return
262  * type Maybe<X> or Handle<X>. Example usage:
263  *
264  * Handle<X> Func() {
265  *   ...
266  *   RETURN_ON_EXCEPTION_VALUE(
267  *       isolate,
268  *       FunctionWithReturnTypeMaybeHandleX(...),
269  *       Handle<X>());
270  *   // code to handle non exception
271  *   ...
272  * }
273  *
274  * Maybe<bool> Func() {
275  *   ..
276  *   RETURN_ON_EXCEPTION_VALUE(
277  *       isolate,
278  *       FunctionWithReturnTypeMaybeHandleX(...),
279  *       Nothing<bool>);
280  *   // code to handle non exception
281  *   return Just(true);
282  * }
283  *
284  * If inside a function with return type MaybeHandle<X>, use RETURN_ON_EXCEPTION
285  * instead.
286  * If inside a function with return type Object, use
287  * RETURN_FAILURE_ON_EXCEPTION instead.
288  */
289 #define RETURN_ON_EXCEPTION_VALUE(isolate, call, value) \
290   do {                                                  \
291     if ((call).is_null()) {                             \
292       DCHECK((isolate)->has_pending_exception());       \
293       return value;                                     \
294     }                                                   \
295   } while (false)
296 
297 /**
298  * RETURN_FAILURE_ON_EXCEPTION conditionally returns the "exception" sentinel if
299  * the given MaybeHandle is empty; so it can only be used in functions with
300  * return type Object, such as RUNTIME_FUNCTION(...) {...} or BUILTIN(...)
301  * {...}. Example usage:
302  *
303  * RUNTIME_FUNCTION(Runtime_Func) {
304  *   ...
305  *   RETURN_FAILURE_ON_EXCEPTION(
306  *       isolate,
307  *       FunctionWithReturnTypeMaybeHandleX(...));
308  *   // code to handle non exception
309  *   ...
310  * }
311  *
312  * If inside a function with return type MaybeHandle<X>, use RETURN_ON_EXCEPTION
313  * instead.
314  * If inside a function with return type Maybe<X> or Handle<X>, use
315  * RETURN_ON_EXCEPTION_VALUE instead.
316  */
317 #define RETURN_FAILURE_ON_EXCEPTION(isolate, call)                     \
318   do {                                                                 \
319     Isolate* __isolate__ = (isolate);                                  \
320     RETURN_ON_EXCEPTION_VALUE(__isolate__, call,                       \
321                               ReadOnlyRoots(__isolate__).exception()); \
322   } while (false);
323 
324 /**
325  * RETURN_ON_EXCEPTION conditionally returns an empty MaybeHandle<T> if the
326  * given MaybeHandle is empty. Use it to return immediately from a function with
327  * return type MaybeHandle when an exception was thrown. Example usage:
328  *
329  * MaybeHandle<X> Func() {
330  *   ...
331  *   RETURN_ON_EXCEPTION(
332  *       isolate,
333  *       FunctionWithReturnTypeMaybeHandleY(...),
334  *       X);
335  *   // code to handle non exception
336  *   ...
337  * }
338  *
339  * If inside a function with return type Object, use
340  * RETURN_FAILURE_ON_EXCEPTION instead.
341  * If inside a function with return type
342  * Maybe<X> or Handle<X>, use RETURN_ON_EXCEPTION_VALUE instead.
343  */
344 #define RETURN_ON_EXCEPTION(isolate, call, T) \
345   RETURN_ON_EXCEPTION_VALUE(isolate, call, MaybeHandle<T>())
346 
347 #define RETURN_FAILURE(isolate, should_throw, call) \
348   do {                                              \
349     if ((should_throw) == kDontThrow) {             \
350       return Just(false);                           \
351     } else {                                        \
352       isolate->Throw(*isolate->factory()->call);    \
353       return Nothing<bool>();                       \
354     }                                               \
355   } while (false)
356 
357 #define MAYBE_RETURN(call, value)         \
358   do {                                    \
359     if ((call).IsNothing()) return value; \
360   } while (false)
361 
362 #define MAYBE_RETURN_NULL(call) MAYBE_RETURN(call, MaybeHandle<Object>())
363 
364 #define MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call) \
365   do {                                                               \
366     Isolate* __isolate__ = (isolate);                                \
367     if (!(call).To(&dst)) {                                          \
368       DCHECK(__isolate__->has_pending_exception());                  \
369       return ReadOnlyRoots(__isolate__).exception();                 \
370     }                                                                \
371   } while (false)
372 
373 #define FOR_WITH_HANDLE_SCOPE(isolate, loop_var_type, init, loop_var,      \
374                               limit_check, increment, body)                \
375   do {                                                                     \
376     loop_var_type init;                                                    \
377     loop_var_type for_with_handle_limit = loop_var;                        \
378     Isolate* for_with_handle_isolate = isolate;                            \
379     while (limit_check) {                                                  \
380       for_with_handle_limit += 1024;                                       \
381       HandleScope loop_scope(for_with_handle_isolate);                     \
382       for (; limit_check && loop_var < for_with_handle_limit; increment) { \
383         body                                                               \
384       }                                                                    \
385     }                                                                      \
386   } while (false)
387 
388 #define WHILE_WITH_HANDLE_SCOPE(isolate, limit_check, body)                  \
389   do {                                                                       \
390     Isolate* for_with_handle_isolate = isolate;                              \
391     while (limit_check) {                                                    \
392       HandleScope loop_scope(for_with_handle_isolate);                       \
393       for (int for_with_handle_it = 0;                                       \
394            limit_check && for_with_handle_it < 1024; ++for_with_handle_it) { \
395         body                                                                 \
396       }                                                                      \
397     }                                                                        \
398   } while (false)
399 
400 #define FIELD_ACCESSOR(type, name)                \
401   inline void set_##name(type v) { name##_ = v; } \
402   inline type name() const { return name##_; }
403 
404 // Controls for manual embedded blob lifecycle management, used by tests and
405 // mksnapshot.
406 V8_EXPORT_PRIVATE void DisableEmbeddedBlobRefcounting();
407 V8_EXPORT_PRIVATE void FreeCurrentEmbeddedBlob();
408 
409 #ifdef DEBUG
410 
411 #define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)               \
412   V(CommentStatistic, paged_space_comments_statistics, \
413     CommentStatistic::kMaxComments + 1)                \
414   V(int, code_kind_statistics, kCodeKindCount)
415 #else
416 
417 #define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
418 
419 #endif
420 
421 #define ISOLATE_INIT_ARRAY_LIST(V)                                             \
422   /* SerializerDeserializer state. */                                          \
423   V(int32_t, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
424   V(int, bad_char_shift_table, kUC16AlphabetSize)                              \
425   V(int, good_suffix_shift_table, (kBMMaxShift + 1))                           \
426   V(int, suffix_table, (kBMMaxShift + 1))                                      \
427   ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
428 
429 using DebugObjectCache = std::vector<Handle<HeapObject>>;
430 
431 #define ISOLATE_INIT_LIST(V)                                                  \
432   /* Assembler state. */                                                      \
433   V(FatalErrorCallback, exception_behavior, nullptr)                          \
434   V(OOMErrorCallback, oom_behavior, nullptr)                                  \
435   V(LogEventCallback, event_logger, nullptr)                                  \
436   V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
437   V(ModifyCodeGenerationFromStringsCallback, modify_code_gen_callback,        \
438     nullptr)                                                                  \
439   V(ModifyCodeGenerationFromStringsCallback2, modify_code_gen_callback2,      \
440     nullptr)                                                                  \
441   V(AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback, nullptr)   \
442   V(ExtensionCallback, wasm_module_callback, &NoExtension)                    \
443   V(ExtensionCallback, wasm_instance_callback, &NoExtension)                  \
444   V(SharedArrayBufferConstructorEnabledCallback,                              \
445     sharedarraybuffer_constructor_enabled_callback, nullptr)                  \
446   V(WasmStreamingCallback, wasm_streaming_callback, nullptr)                  \
447   V(WasmLoadSourceMapCallback, wasm_load_source_map_callback, nullptr)        \
448   V(WasmSimdEnabledCallback, wasm_simd_enabled_callback, nullptr)             \
449   V(WasmExceptionsEnabledCallback, wasm_exceptions_enabled_callback, nullptr) \
450   V(WasmDynamicTieringEnabledCallback, wasm_dynamic_tiering_enabled_callback, \
451     nullptr)                                                                  \
452   /* State for Relocatable. */                                                \
453   V(Relocatable*, relocatable_top, nullptr)                                   \
454   V(DebugObjectCache*, string_stream_debug_object_cache, nullptr)             \
455   V(Object, string_stream_current_security_token, Object())                   \
456   V(const intptr_t*, api_external_references, nullptr)                        \
457   V(AddressToIndexHashMap*, external_reference_map, nullptr)                  \
458   V(HeapObjectToIndexHashMap*, root_index_map, nullptr)                       \
459   V(MicrotaskQueue*, default_microtask_queue, nullptr)                        \
460   V(CompilationStatistics*, turbo_statistics, nullptr)                        \
461   V(CodeTracer*, code_tracer, nullptr)                                        \
462   V(PromiseRejectCallback, promise_reject_callback, nullptr)                  \
463   V(const v8::StartupData*, snapshot_blob, nullptr)                           \
464   V(int, code_and_metadata_size, 0)                                           \
465   V(int, bytecode_and_metadata_size, 0)                                       \
466   V(int, external_script_source_size, 0)                                      \
467   /* Number of CPU profilers running on the isolate. */                       \
468   V(size_t, num_cpu_profilers, 0)                                             \
469   /* true if a trace is being formatted through Error.prepareStackTrace. */   \
470   V(bool, formatting_stack_trace, false)                                      \
471   /* Perform side effect checks on function call and API callbacks. */        \
472   V(DebugInfo::ExecutionMode, debug_execution_mode, DebugInfo::kBreakpoints)  \
473   V(debug::TypeProfileMode, type_profile_mode, debug::TypeProfileMode::kNone) \
474   V(bool, disable_bytecode_flushing, false)                                   \
475   V(int, last_console_context_id, 0)                                          \
476   V(v8_inspector::V8Inspector*, inspector, nullptr)                           \
477   V(bool, next_v8_call_is_safe_for_termination, false)                        \
478   V(bool, only_terminate_in_safe_scope, false)                                \
479   V(int, embedder_wrapper_type_index, -1)                                     \
480   V(int, embedder_wrapper_object_index, -1)                                   \
481   V(compiler::NodeObserver*, node_observer, nullptr)                          \
482   /* Used in combination with --script-run-delay-once */                      \
483   V(bool, did_run_script_delay, false)                                        \
484   V(bool, javascript_execution_assert, true)                                  \
485   V(bool, javascript_execution_throws, true)                                  \
486   V(bool, javascript_execution_dump, true)                                    \
487   V(bool, deoptimization_assert, true)                                        \
488   V(bool, compilation_assert, true)                                           \
489   V(bool, no_exception_assert, true)
490 
491 #define THREAD_LOCAL_TOP_ACCESSOR(type, name)                         \
492   inline void set_##name(type v) { thread_local_top()->name##_ = v; } \
493   inline type name() const { return thread_local_top()->name##_; }
494 
495 #define THREAD_LOCAL_TOP_ADDRESS(type, name) \
496   type* name##_address() { return &thread_local_top()->name##_; }
497 
498 // HiddenFactory exists so Isolate can privately inherit from it without making
499 // Factory's members available to Isolate directly.
500 class V8_EXPORT_PRIVATE HiddenFactory : private Factory {};
501 
502 class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
503   // These forward declarations are required to make the friend declarations in
504   // PerIsolateThreadData work on some older versions of gcc.
505   class ThreadDataTable;
506   class EntryStackItem;
507 
508  public:
509   Isolate(const Isolate&) = delete;
510   Isolate& operator=(const Isolate&) = delete;
511 
512   using HandleScopeType = HandleScope;
513   void* operator new(size_t) = delete;
514   void operator delete(void*) = delete;
515 
516   // A thread has a PerIsolateThreadData instance for each isolate that it has
517   // entered. That instance is allocated when the isolate is initially entered
518   // and reused on subsequent entries.
519   class PerIsolateThreadData {
520    public:
PerIsolateThreadData(Isolate * isolate,ThreadId thread_id)521     PerIsolateThreadData(Isolate* isolate, ThreadId thread_id)
522         : isolate_(isolate),
523           thread_id_(thread_id),
524           stack_limit_(0),
525           thread_state_(nullptr)
526 #if USE_SIMULATOR
527           ,
528           simulator_(nullptr)
529 #endif
530     {
531     }
532     ~PerIsolateThreadData();
533     PerIsolateThreadData(const PerIsolateThreadData&) = delete;
534     PerIsolateThreadData& operator=(const PerIsolateThreadData&) = delete;
isolate()535     Isolate* isolate() const { return isolate_; }
thread_id()536     ThreadId thread_id() const { return thread_id_; }
537 
FIELD_ACCESSOR(uintptr_t,stack_limit)538     FIELD_ACCESSOR(uintptr_t, stack_limit)
539     FIELD_ACCESSOR(ThreadState*, thread_state)
540 
541 #if USE_SIMULATOR
542     FIELD_ACCESSOR(Simulator*, simulator)
543 #endif
544 
545     bool Matches(Isolate* isolate, ThreadId thread_id) const {
546       return isolate_ == isolate && thread_id_ == thread_id;
547     }
548 
549    private:
550     Isolate* isolate_;
551     ThreadId thread_id_;
552     uintptr_t stack_limit_;
553     ThreadState* thread_state_;
554 
555 #if USE_SIMULATOR
556     Simulator* simulator_;
557 #endif
558 
559     friend class Isolate;
560     friend class ThreadDataTable;
561     friend class EntryStackItem;
562   };
563 
564   static void InitializeOncePerProcess();
565 
566   // Creates Isolate object. Must be used instead of constructing Isolate with
567   // new operator.
568   static Isolate* New();
569 
570   // Creates a new shared Isolate object.
571   static Isolate* NewShared(const v8::Isolate::CreateParams& params);
572 
573   // Deletes Isolate object. Must be used instead of delete operator.
574   // Destroys the non-default isolates.
575   // Sets default isolate into "has_been_disposed" state rather then destroying,
576   // for legacy API reasons.
577   static void Delete(Isolate* isolate);
578 
579   void SetUpFromReadOnlyArtifacts(std::shared_ptr<ReadOnlyArtifacts> artifacts,
580                                   ReadOnlyHeap* ro_heap);
set_read_only_heap(ReadOnlyHeap * ro_heap)581   void set_read_only_heap(ReadOnlyHeap* ro_heap) { read_only_heap_ = ro_heap; }
582 
583   // Page allocator that must be used for allocating V8 heap pages.
584   v8::PageAllocator* page_allocator() const;
585 
586   // Returns the PerIsolateThreadData for the current thread (or nullptr if one
587   // is not currently set).
CurrentPerIsolateThreadData()588   static PerIsolateThreadData* CurrentPerIsolateThreadData() {
589     return reinterpret_cast<PerIsolateThreadData*>(
590         base::Thread::GetThreadLocal(per_isolate_thread_data_key_));
591   }
592 
593   // Returns the isolate inside which the current thread is running or nullptr.
TryGetCurrent()594   V8_INLINE static Isolate* TryGetCurrent() {
595     DCHECK_EQ(true, isolate_key_created_.load(std::memory_order_relaxed));
596     return reinterpret_cast<Isolate*>(
597         base::Thread::GetExistingThreadLocal(isolate_key_));
598   }
599 
600   // Returns the isolate inside which the current thread is running.
Current()601   V8_INLINE static Isolate* Current() {
602     Isolate* isolate = TryGetCurrent();
603     DCHECK_NOT_NULL(isolate);
604     return isolate;
605   }
606 
607   // Usually called by Init(), but can be called early e.g. to allow
608   // testing components that require logging but not the whole
609   // isolate.
610   //
611   // Safe to call more than once.
612   void InitializeLoggingAndCounters();
613   bool InitializeCounters();  // Returns false if already initialized.
614 
615   bool InitWithoutSnapshot();
616   bool InitWithSnapshot(SnapshotData* startup_snapshot_data,
617                         SnapshotData* read_only_snapshot_data, bool can_rehash);
618 
619   // True if at least one thread Enter'ed this isolate.
IsInUse()620   bool IsInUse() { return entry_stack_ != nullptr; }
621 
622   void ReleaseSharedPtrs();
623 
624   void ClearSerializerData();
625 
626   bool LogObjectRelocation();
627 
628   // Initializes the current thread to run this Isolate.
629   // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
630   // at the same time, this should be prevented using external locking.
631   void Enter();
632 
633   // Exits the current thread. The previosuly entered Isolate is restored
634   // for the thread.
635   // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
636   // at the same time, this should be prevented using external locking.
637   void Exit();
638 
639   // Find the PerThread for this particular (isolate, thread) combination.
640   // If one does not yet exist, allocate a new one.
641   PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
642 
643   // Find the PerThread for this particular (isolate, thread) combination
644   // If one does not yet exist, return null.
645   PerIsolateThreadData* FindPerThreadDataForThisThread();
646 
647   // Find the PerThread for given (isolate, thread) combination
648   // If one does not yet exist, return null.
649   PerIsolateThreadData* FindPerThreadDataForThread(ThreadId thread_id);
650 
651   // Discard the PerThread for this particular (isolate, thread) combination
652   // If one does not yet exist, no-op.
653   void DiscardPerThreadDataForThisThread();
654 
655   // Mutex for serializing access to break control structures.
break_access()656   base::RecursiveMutex* break_access() { return &break_access_; }
657 
658   // Shared mutex for allowing thread-safe concurrent reads of FeedbackVectors.
feedback_vector_access()659   base::SharedMutex* feedback_vector_access() {
660     return &feedback_vector_access_;
661   }
662 
663   // Shared mutex for allowing thread-safe concurrent reads of
664   // InternalizedStrings.
internalized_string_access()665   base::SharedMutex* internalized_string_access() {
666     return &internalized_string_access_;
667   }
668 
669   // Shared mutex for allowing thread-safe concurrent reads of TransitionArrays
670   // of kind kFullTransitionArray.
full_transition_array_access()671   base::SharedMutex* full_transition_array_access() {
672     return &full_transition_array_access_;
673   }
674 
675   // Shared mutex for allowing thread-safe concurrent reads of
676   // SharedFunctionInfos.
shared_function_info_access()677   base::SharedMutex* shared_function_info_access() {
678     return &shared_function_info_access_;
679   }
680 
681   // Protects (most) map update operations, see also MapUpdater.
map_updater_access()682   base::SharedMutex* map_updater_access() { return &map_updater_access_; }
683 
684   // Protects JSObject boilerplate migrations (i.e. calls to MigrateInstance on
685   // boilerplate objects; elements kind transitions are *not* protected).
686   // Note this lock interacts with `map_updater_access` as follows
687   //
688   // - boilerplate migrations may trigger map updates.
689   // - if so, `boilerplate_migration_access` is locked before
690   //   `map_updater_access`.
691   // - backgrounds threads must use the same lock order to avoid deadlocks.
boilerplate_migration_access()692   base::SharedMutex* boilerplate_migration_access() {
693     return &boilerplate_migration_access_;
694   }
695 
696   // The isolate's string table.
string_table()697   StringTable* string_table() const { return string_table_.get(); }
698 
699   Address get_address_from_id(IsolateAddressId id);
700 
701   // Access to top context (where the current function object was created).
context()702   Context context() const { return thread_local_top()->context_; }
703   inline void set_context(Context context);
context_address()704   Context* context_address() { return &thread_local_top()->context_; }
705 
706   // Access to current thread id.
set_thread_id(ThreadId id)707   inline void set_thread_id(ThreadId id) {
708     thread_local_top()->thread_id_.store(id, std::memory_order_relaxed);
709   }
thread_id()710   inline ThreadId thread_id() const {
711     return thread_local_top()->thread_id_.load(std::memory_order_relaxed);
712   }
713 
714   void InstallConditionalFeatures(Handle<Context> context);
715 
716   bool IsSharedArrayBufferConstructorEnabled(Handle<Context> context);
717 
718   bool IsWasmSimdEnabled(Handle<Context> context);
719   bool AreWasmExceptionsEnabled(Handle<Context> context);
720   bool IsWasmDynamicTieringEnabled();
721 
THREAD_LOCAL_TOP_ADDRESS(Context,pending_handler_context)722   THREAD_LOCAL_TOP_ADDRESS(Context, pending_handler_context)
723   THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_entrypoint)
724   THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_constant_pool)
725   THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_fp)
726   THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_sp)
727 
728   THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception)
729 
730   v8::TryCatch* try_catch_handler() {
731     return thread_local_top()->try_catch_handler_;
732   }
733 
734   THREAD_LOCAL_TOP_ADDRESS(bool, external_caught_exception)
735 
736   // Interface to pending exception.
737   THREAD_LOCAL_TOP_ADDRESS(Object, pending_exception)
738   inline Object pending_exception();
739   inline void set_pending_exception(Object exception_obj);
740   inline void clear_pending_exception();
741   inline bool has_pending_exception();
742 
743   THREAD_LOCAL_TOP_ADDRESS(Object, pending_message)
744   inline void clear_pending_message();
745   inline Object pending_message();
746   inline bool has_pending_message();
747   inline void set_pending_message(Object message_obj);
748 
749   THREAD_LOCAL_TOP_ADDRESS(Object, scheduled_exception)
750   inline Object scheduled_exception();
751   inline bool has_scheduled_exception();
752   inline void clear_scheduled_exception();
753   inline void set_scheduled_exception(Object exception);
754 
755   bool IsJavaScriptHandlerOnTop(Object exception);
756   bool IsExternalHandlerOnTop(Object exception);
757 
758   inline bool is_catchable_by_javascript(Object exception);
759   inline bool is_catchable_by_wasm(Object exception);
760 
761   // JS execution stack (see frames.h).
c_entry_fp(ThreadLocalTop * thread)762   static Address c_entry_fp(ThreadLocalTop* thread) {
763     return thread->c_entry_fp_;
764   }
handler(ThreadLocalTop * thread)765   static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
c_function()766   Address c_function() { return thread_local_top()->c_function_; }
767 
c_entry_fp_address()768   inline Address* c_entry_fp_address() {
769     return &thread_local_top()->c_entry_fp_;
770   }
c_entry_fp_offset()771   static uint32_t c_entry_fp_offset() {
772     return static_cast<uint32_t>(
773         OFFSET_OF(Isolate, thread_local_top()->c_entry_fp_) -
774         isolate_root_bias());
775   }
handler_address()776   inline Address* handler_address() { return &thread_local_top()->handler_; }
c_function_address()777   inline Address* c_function_address() {
778     return &thread_local_top()->c_function_;
779   }
780 
781 #if defined(DEBUG) || defined(VERIFY_HEAP)
782   // Count the number of active deserializers, so that the heap verifier knows
783   // whether there is currently an active deserialization happening.
784   //
785   // This is needed as the verifier currently doesn't support verifying objects
786   // which are partially deserialized.
787   //
788   // TODO(leszeks): Make the verifier a bit more deserialization compatible.
RegisterDeserializerStarted()789   void RegisterDeserializerStarted() { ++num_active_deserializers_; }
RegisterDeserializerFinished()790   void RegisterDeserializerFinished() {
791     CHECK_GE(--num_active_deserializers_, 0);
792   }
has_active_deserializer()793   bool has_active_deserializer() const {
794     return num_active_deserializers_.load(std::memory_order_acquire) > 0;
795   }
796 #else
RegisterDeserializerStarted()797   void RegisterDeserializerStarted() {}
RegisterDeserializerFinished()798   void RegisterDeserializerFinished() {}
has_active_deserializer()799   bool has_active_deserializer() const { UNREACHABLE(); }
800 #endif
801 
802   // Bottom JS entry.
js_entry_sp()803   Address js_entry_sp() { return thread_local_top()->js_entry_sp_; }
js_entry_sp_address()804   inline Address* js_entry_sp_address() {
805     return &thread_local_top()->js_entry_sp_;
806   }
807 
808   std::vector<MemoryRange>* GetCodePages() const;
809 
810   void SetCodePages(std::vector<MemoryRange>* new_code_pages);
811 
812   // Returns the global object of the current context. It could be
813   // a builtin object, or a JS global object.
814   inline Handle<JSGlobalObject> global_object();
815 
816   // Returns the global proxy object of the current context.
817   inline Handle<JSGlobalProxy> global_proxy();
818 
ArchiveSpacePerThread()819   static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
FreeThreadResources()820   void FreeThreadResources() { thread_local_top()->Free(); }
821 
822   // This method is called by the api after operations that may throw
823   // exceptions.  If an exception was thrown and not handled by an external
824   // handler the exception is scheduled to be rethrown when we return to running
825   // JavaScript code.  If an exception is scheduled true is returned.
826   bool OptionalRescheduleException(bool clear_exception);
827 
828   // Push and pop a promise and the current try-catch handler.
829   void PushPromise(Handle<JSObject> promise);
830   void PopPromise();
831 
832   // Return the relevant Promise that a throw/rejection pertains to, based
833   // on the contents of the Promise stack
834   Handle<Object> GetPromiseOnStackOnThrow();
835 
836   // Heuristically guess whether a Promise is handled by user catch handler
837   bool PromiseHasUserDefinedRejectHandler(Handle<JSPromise> promise);
838 
839   class V8_NODISCARD ExceptionScope {
840    public:
841     // Scope currently can only be used for regular exceptions,
842     // not termination exception.
843     inline explicit ExceptionScope(Isolate* isolate);
844     inline ~ExceptionScope();
845 
846    private:
847     Isolate* isolate_;
848     Handle<Object> pending_exception_;
849   };
850 
851   void SetCaptureStackTraceForUncaughtExceptions(
852       bool capture, int frame_limit, StackTrace::StackTraceOptions options);
853   bool get_capture_stack_trace_for_uncaught_exceptions() const;
854 
855   void SetAbortOnUncaughtExceptionCallback(
856       v8::Isolate::AbortOnUncaughtExceptionCallback callback);
857 
858   enum PrintStackMode { kPrintStackConcise, kPrintStackVerbose };
859   void PrintCurrentStackTrace(std::ostream& out);
860   void PrintStack(StringStream* accumulator,
861                   PrintStackMode mode = kPrintStackVerbose);
862   void PrintStack(FILE* out, PrintStackMode mode = kPrintStackVerbose);
863   Handle<String> StackTraceString();
864   // Stores a stack trace in a stack-allocated temporary buffer which will
865   // end up in the minidump for debugging purposes.
866   V8_NOINLINE void PushStackTraceAndDie(void* ptr1 = nullptr,
867                                         void* ptr2 = nullptr,
868                                         void* ptr3 = nullptr,
869                                         void* ptr4 = nullptr);
870   Handle<FixedArray> CaptureCurrentStackTrace(
871       int frame_limit, StackTrace::StackTraceOptions options);
872   Handle<Object> CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
873                                          FrameSkipMode mode,
874                                          Handle<Object> caller);
875   MaybeHandle<JSReceiver> CaptureAndSetDetailedStackTrace(
876       Handle<JSReceiver> error_object);
877   MaybeHandle<JSReceiver> CaptureAndSetSimpleStackTrace(
878       Handle<JSReceiver> error_object, FrameSkipMode mode,
879       Handle<Object> caller);
880   Handle<FixedArray> GetDetailedStackTrace(Handle<JSObject> error_object);
881 
882   Address GetAbstractPC(int* line, int* column);
883 
884   // Returns if the given context may access the given global object. If
885   // the result is false, the pending exception is guaranteed to be
886   // set.
887   bool MayAccess(Handle<Context> accessing_context, Handle<JSObject> receiver);
888 
889   void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
890   void ReportFailedAccessCheck(Handle<JSObject> receiver);
891 
892   // Exception throwing support. The caller should use the result
893   // of Throw() as its return value.
Throw(Object exception)894   Object Throw(Object exception) { return ThrowInternal(exception, nullptr); }
895   Object ThrowAt(Handle<JSObject> exception, MessageLocation* location);
896   Object ThrowIllegalOperation();
897 
898   template <typename T>
Throw(Handle<Object> exception)899   V8_WARN_UNUSED_RESULT MaybeHandle<T> Throw(Handle<Object> exception) {
900     Throw(*exception);
901     return MaybeHandle<T>();
902   }
903 
904   template <typename T>
ThrowAt(Handle<JSObject> exception,MessageLocation * location)905   V8_WARN_UNUSED_RESULT MaybeHandle<T> ThrowAt(Handle<JSObject> exception,
906                                                MessageLocation* location) {
907     ThrowAt(exception, location);
908     return MaybeHandle<T>();
909   }
910 
FatalProcessOutOfHeapMemory(const char * location)911   void FatalProcessOutOfHeapMemory(const char* location) {
912     heap()->FatalProcessOutOfMemory(location);
913   }
914 
set_console_delegate(debug::ConsoleDelegate * delegate)915   void set_console_delegate(debug::ConsoleDelegate* delegate) {
916     console_delegate_ = delegate;
917   }
console_delegate()918   debug::ConsoleDelegate* console_delegate() { return console_delegate_; }
919 
set_async_event_delegate(debug::AsyncEventDelegate * delegate)920   void set_async_event_delegate(debug::AsyncEventDelegate* delegate) {
921     async_event_delegate_ = delegate;
922     PromiseHookStateUpdated();
923   }
924   void OnAsyncFunctionStateChanged(Handle<JSPromise> promise,
925                                    debug::DebugAsyncActionType);
926 
927   // Re-throw an exception.  This involves no error reporting since error
928   // reporting was handled when the exception was thrown originally.
929   Object ReThrow(Object exception);
930 
931   // Find the correct handler for the current pending exception. This also
932   // clears and returns the current pending exception.
933   Object UnwindAndFindHandler();
934 
935   // Tries to predict whether an exception will be caught. Note that this can
936   // only produce an estimate, because it is undecidable whether a finally
937   // clause will consume or re-throw an exception.
938   enum CatchType {
939     NOT_CAUGHT,
940     CAUGHT_BY_JAVASCRIPT,
941     CAUGHT_BY_EXTERNAL,
942     CAUGHT_BY_PROMISE,
943     CAUGHT_BY_ASYNC_AWAIT
944   };
945   CatchType PredictExceptionCatcher();
946 
947   void ScheduleThrow(Object exception);
948   // Re-set pending message, script and positions reported to the TryCatch
949   // back to the TLS for re-use when rethrowing.
950   void RestorePendingMessageFromTryCatch(v8::TryCatch* handler);
951   // Un-schedule an exception that was caught by a TryCatch handler.
952   void CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler);
953   void ReportPendingMessages();
954 
955   // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
956   Object PromoteScheduledException();
957 
958   // Attempts to compute the current source location, storing the
959   // result in the target out parameter. The source location is attached to a
960   // Message object as the location which should be shown to the user. It's
961   // typically the top-most meaningful location on the stack.
962   bool ComputeLocation(MessageLocation* target);
963   bool ComputeLocationFromException(MessageLocation* target,
964                                     Handle<Object> exception);
965   bool ComputeLocationFromStackTrace(MessageLocation* target,
966                                      Handle<Object> exception);
967 
968   Handle<JSMessageObject> CreateMessage(Handle<Object> exception,
969                                         MessageLocation* location);
970   Handle<JSMessageObject> CreateMessageOrAbort(Handle<Object> exception,
971                                                MessageLocation* location);
972 
973   // Out of resource exception helpers.
974   Object StackOverflow();
975   Object TerminateExecution();
976   void CancelTerminateExecution();
977 
978   void RequestInterrupt(InterruptCallback callback, void* data);
979   void InvokeApiInterruptCallbacks();
980 
981   // Administration
982   void Iterate(RootVisitor* v);
983   void Iterate(RootVisitor* v, ThreadLocalTop* t);
984   char* Iterate(RootVisitor* v, char* t);
985   void IterateThread(ThreadVisitor* v, char* t);
986 
987   // Returns the current native context.
988   inline Handle<NativeContext> native_context();
989   inline NativeContext raw_native_context();
990 
991   Handle<Context> GetIncumbentContext();
992 
993   void RegisterTryCatchHandler(v8::TryCatch* that);
994   void UnregisterTryCatchHandler(v8::TryCatch* that);
995 
996   char* ArchiveThread(char* to);
997   char* RestoreThread(char* from);
998 
999   static const int kUC16AlphabetSize = 256;  // See StringSearchBase.
1000   static const int kBMMaxShift = 250;        // See StringSearchBase.
1001 
1002   // Accessors.
1003 #define GLOBAL_ACCESSOR(type, name, initialvalue)                \
1004   inline type name() const {                                     \
1005     DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
1006     return name##_;                                              \
1007   }                                                              \
1008   inline void set_##name(type value) {                           \
1009     DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
1010     name##_ = value;                                             \
1011   }
ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)1012   ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
1013 #undef GLOBAL_ACCESSOR
1014 
1015   void SetDetailedSourcePositionsForProfiling(bool value) {
1016     if (value) {
1017       CollectSourcePositionsForAllBytecodeArrays();
1018     }
1019     detailed_source_positions_for_profiling_ = value;
1020   }
1021 
detailed_source_positions_for_profiling()1022   bool detailed_source_positions_for_profiling() const {
1023     return detailed_source_positions_for_profiling_;
1024   }
1025 
1026 #define GLOBAL_ARRAY_ACCESSOR(type, name, length)                \
1027   inline type* name() {                                          \
1028     DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
1029     return &(name##_)[0];                                        \
1030   }
1031   ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
1032 #undef GLOBAL_ARRAY_ACCESSOR
1033 
1034 #define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
1035   inline Handle<type> name();                            \
1036   inline bool is_##name(type value);
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)1037   NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
1038 #undef NATIVE_CONTEXT_FIELD_ACCESSOR
1039 
1040   Bootstrapper* bootstrapper() { return bootstrapper_; }
1041   // Use for updating counters on a foreground thread.
counters()1042   Counters* counters() { return async_counters().get(); }
1043   // Use for updating counters on a background thread.
async_counters()1044   const std::shared_ptr<Counters>& async_counters() {
1045     // Make sure InitializeCounters() has been called.
1046     DCHECK_NOT_NULL(async_counters_.get());
1047     return async_counters_;
1048   }
metrics_recorder()1049   const std::shared_ptr<metrics::Recorder>& metrics_recorder() {
1050     return metrics_recorder_;
1051   }
runtime_profiler()1052   RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
compilation_cache()1053   CompilationCache* compilation_cache() { return compilation_cache_; }
logger()1054   Logger* logger() {
1055     // Call InitializeLoggingAndCounters() if logging is needed before
1056     // the isolate is fully initialized.
1057     DCHECK_NOT_NULL(logger_);
1058     return logger_;
1059   }
stack_guard()1060   StackGuard* stack_guard() { return isolate_data()->stack_guard(); }
heap()1061   Heap* heap() { return &heap_; }
heap()1062   const Heap* heap() const { return &heap_; }
read_only_heap()1063   ReadOnlyHeap* read_only_heap() const { return read_only_heap_; }
FromHeap(Heap * heap)1064   static Isolate* FromHeap(Heap* heap) {
1065     return reinterpret_cast<Isolate*>(reinterpret_cast<Address>(heap) -
1066                                       OFFSET_OF(Isolate, heap_));
1067   }
1068 
isolate_data()1069   const IsolateData* isolate_data() const { return &isolate_data_; }
isolate_data()1070   IsolateData* isolate_data() { return &isolate_data_; }
1071 
1072   // When pointer compression is on, this is the base address of the pointer
1073   // compression cage, and the kPtrComprCageBaseRegister is set to this
1074   // value. When pointer compression is off, this is always kNullAddress.
cage_base()1075   Address cage_base() const {
1076     DCHECK_IMPLIES(!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL &&
1077                        !COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL,
1078                    isolate_data()->cage_base() == kNullAddress);
1079     return isolate_data()->cage_base();
1080   }
1081 
code_cage_base()1082   Address code_cage_base() const { return cage_base(); }
1083 
1084   // When pointer compression is on, the PtrComprCage used by this
1085   // Isolate. Otherwise nullptr.
GetPtrComprCage()1086   VirtualMemoryCage* GetPtrComprCage() {
1087     return isolate_allocator_->GetPtrComprCage();
1088   }
GetPtrComprCage()1089   const VirtualMemoryCage* GetPtrComprCage() const {
1090     return isolate_allocator_->GetPtrComprCage();
1091   }
1092 
1093   // Generated code can embed this address to get access to the isolate-specific
1094   // data (for example, roots, external references, builtins, etc.).
1095   // The kRootRegister is set to this value.
isolate_root()1096   Address isolate_root() const { return isolate_data()->isolate_root(); }
isolate_root_bias()1097   static size_t isolate_root_bias() {
1098     return OFFSET_OF(Isolate, isolate_data_) + IsolateData::kIsolateRootBias;
1099   }
FromRootAddress(Address isolate_root)1100   static Isolate* FromRootAddress(Address isolate_root) {
1101     return reinterpret_cast<Isolate*>(isolate_root - isolate_root_bias());
1102   }
1103 
roots_table()1104   RootsTable& roots_table() { return isolate_data()->roots(); }
roots_table()1105   const RootsTable& roots_table() const { return isolate_data()->roots(); }
1106 
1107   // A sub-region of the Isolate object that has "predictable" layout which
1108   // depends only on the pointer size and therefore it's guaranteed that there
1109   // will be no compatibility issues because of different compilers used for
1110   // snapshot generator and actual V8 code.
1111   // Thus, kRootRegister may be used to address any location that falls into
1112   // this region.
1113   // See IsolateData::AssertPredictableLayout() for details.
root_register_addressable_region()1114   base::AddressRegion root_register_addressable_region() const {
1115     return base::AddressRegion(reinterpret_cast<Address>(&isolate_data_),
1116                                sizeof(IsolateData));
1117   }
1118 
root(RootIndex index)1119   Object root(RootIndex index) const { return Object(roots_table()[index]); }
1120 
root_handle(RootIndex index)1121   Handle<Object> root_handle(RootIndex index) {
1122     return Handle<Object>(&roots_table()[index]);
1123   }
1124 
external_reference_table()1125   ExternalReferenceTable* external_reference_table() {
1126     DCHECK(isolate_data()->external_reference_table()->is_initialized());
1127     return isolate_data()->external_reference_table();
1128   }
1129 
builtin_entry_table()1130   Address* builtin_entry_table() { return isolate_data_.builtin_entry_table(); }
builtin_table()1131   V8_INLINE Address* builtin_table() { return isolate_data_.builtin_table(); }
1132 
1133   bool IsBuiltinTableHandleLocation(Address* handle_location);
1134 
load_stub_cache()1135   StubCache* load_stub_cache() const { return load_stub_cache_; }
store_stub_cache()1136   StubCache* store_stub_cache() const { return store_stub_cache_; }
GetAndClearCurrentDeoptimizer()1137   Deoptimizer* GetAndClearCurrentDeoptimizer() {
1138     Deoptimizer* result = current_deoptimizer_;
1139     CHECK_NOT_NULL(result);
1140     current_deoptimizer_ = nullptr;
1141     return result;
1142   }
set_current_deoptimizer(Deoptimizer * deoptimizer)1143   void set_current_deoptimizer(Deoptimizer* deoptimizer) {
1144     DCHECK_NULL(current_deoptimizer_);
1145     DCHECK_NOT_NULL(deoptimizer);
1146     current_deoptimizer_ = deoptimizer;
1147   }
deoptimizer_lazy_throw()1148   bool deoptimizer_lazy_throw() const { return deoptimizer_lazy_throw_; }
set_deoptimizer_lazy_throw(bool value)1149   void set_deoptimizer_lazy_throw(bool value) {
1150     deoptimizer_lazy_throw_ = value;
1151   }
1152   void InitializeThreadLocal();
thread_local_top()1153   ThreadLocalTop* thread_local_top() {
1154     return &isolate_data_.thread_local_top_;
1155   }
thread_local_top()1156   ThreadLocalTop const* thread_local_top() const {
1157     return &isolate_data_.thread_local_top_;
1158   }
1159 
thread_in_wasm_flag_address_offset()1160   static uint32_t thread_in_wasm_flag_address_offset() {
1161     // For WebAssembly trap handlers there is a flag in thread-local storage
1162     // which indicates that the executing thread executes WebAssembly code. To
1163     // access this flag directly from generated code, we store a pointer to the
1164     // flag in ThreadLocalTop in thread_in_wasm_flag_address_. This function
1165     // here returns the offset of that member from {isolate_root()}.
1166     return static_cast<uint32_t>(
1167         OFFSET_OF(Isolate, thread_local_top()->thread_in_wasm_flag_address_) -
1168         isolate_root_bias());
1169   }
1170 
THREAD_LOCAL_TOP_ADDRESS(Address,thread_in_wasm_flag_address)1171   THREAD_LOCAL_TOP_ADDRESS(Address, thread_in_wasm_flag_address)
1172 
1173   MaterializedObjectStore* materialized_object_store() const {
1174     return materialized_object_store_;
1175   }
1176 
descriptor_lookup_cache()1177   DescriptorLookupCache* descriptor_lookup_cache() const {
1178     return descriptor_lookup_cache_;
1179   }
1180 
handle_scope_data()1181   HandleScopeData* handle_scope_data() { return &handle_scope_data_; }
1182 
handle_scope_implementer()1183   HandleScopeImplementer* handle_scope_implementer() const {
1184     DCHECK(handle_scope_implementer_);
1185     return handle_scope_implementer_;
1186   }
1187 
unicode_cache()1188   UnicodeCache* unicode_cache() const { return unicode_cache_; }
1189 
inner_pointer_to_code_cache()1190   InnerPointerToCodeCache* inner_pointer_to_code_cache() {
1191     return inner_pointer_to_code_cache_;
1192   }
1193 
global_handles()1194   GlobalHandles* global_handles() const { return global_handles_; }
1195 
eternal_handles()1196   EternalHandles* eternal_handles() const { return eternal_handles_; }
1197 
thread_manager()1198   ThreadManager* thread_manager() const { return thread_manager_; }
1199 
bigint_processor()1200   bigint::Processor* bigint_processor() { return bigint_processor_; }
1201 
1202 #ifndef V8_INTL_SUPPORT
jsregexp_uncanonicalize()1203   unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
1204     return &jsregexp_uncanonicalize_;
1205   }
1206 
jsregexp_canonrange()1207   unibrow::Mapping<unibrow::CanonicalizationRange>* jsregexp_canonrange() {
1208     return &jsregexp_canonrange_;
1209   }
1210 
1211   unibrow::Mapping<unibrow::Ecma262Canonicalize>*
regexp_macro_assembler_canonicalize()1212   regexp_macro_assembler_canonicalize() {
1213     return &regexp_macro_assembler_canonicalize_;
1214   }
1215 #endif  // !V8_INTL_SUPPORT
1216 
runtime_state()1217   RuntimeState* runtime_state() { return &runtime_state_; }
1218 
builtins()1219   Builtins* builtins() { return &builtins_; }
1220 
regexp_stack()1221   RegExpStack* regexp_stack() const { return regexp_stack_; }
1222 
total_regexp_code_generated()1223   size_t total_regexp_code_generated() const {
1224     return total_regexp_code_generated_;
1225   }
1226   void IncreaseTotalRegexpCodeGenerated(Handle<HeapObject> code);
1227 
regexp_indices()1228   std::vector<int>* regexp_indices() { return &regexp_indices_; }
1229 
debug()1230   Debug* debug() const { return debug_; }
1231 
is_profiling_address()1232   void* is_profiling_address() { return &is_profiling_; }
1233 
is_profiling()1234   bool is_profiling() const {
1235     return is_profiling_.load(std::memory_order_relaxed);
1236   }
1237 
SetIsProfiling(bool enabled)1238   void SetIsProfiling(bool enabled) {
1239     if (enabled) {
1240       CollectSourcePositionsForAllBytecodeArrays();
1241     }
1242     is_profiling_.store(enabled, std::memory_order_relaxed);
1243   }
1244 
code_event_dispatcher()1245   CodeEventDispatcher* code_event_dispatcher() const {
1246     return code_event_dispatcher_.get();
1247   }
heap_profiler()1248   HeapProfiler* heap_profiler() const { return heap_profiler_; }
1249 
1250 #ifdef DEBUG
non_disposed_isolates()1251   static size_t non_disposed_isolates() { return non_disposed_isolates_; }
1252 #endif
1253 
factory()1254   v8::internal::Factory* factory() {
1255     // Upcast to the privately inherited base-class using c-style casts to avoid
1256     // undefined behavior (as static_cast cannot cast across private bases).
1257     return (v8::internal::Factory*)this;
1258   }
1259 
1260   static const int kJSRegexpStaticOffsetsVectorSize = 128;
1261 
THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope *,external_callback_scope)1262   THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope*, external_callback_scope)
1263 
1264   THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state)
1265 
1266   void SetData(uint32_t slot, void* data) {
1267     DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
1268     isolate_data_.embedder_data_[slot] = data;
1269   }
GetData(uint32_t slot)1270   void* GetData(uint32_t slot) const {
1271     DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
1272     return isolate_data_.embedder_data_[slot];
1273   }
1274 
serializer_enabled()1275   bool serializer_enabled() const { return serializer_enabled_; }
1276 
enable_serializer()1277   void enable_serializer() { serializer_enabled_ = true; }
1278 
snapshot_available()1279   bool snapshot_available() const {
1280     return snapshot_blob_ != nullptr && snapshot_blob_->raw_size != 0;
1281   }
1282 
IsDead()1283   bool IsDead() const { return has_fatal_error_; }
SignalFatalError()1284   void SignalFatalError() { has_fatal_error_ = true; }
1285 
1286   bool use_optimizer();
1287 
initialized_from_snapshot()1288   bool initialized_from_snapshot() { return initialized_from_snapshot_; }
1289 
1290   bool NeedsSourcePositionsForProfiling() const;
1291 
1292   bool NeedsDetailedOptimizedCodeLineInfo() const;
1293 
is_best_effort_code_coverage()1294   bool is_best_effort_code_coverage() const {
1295     return code_coverage_mode() == debug::CoverageMode::kBestEffort;
1296   }
1297 
is_precise_count_code_coverage()1298   bool is_precise_count_code_coverage() const {
1299     return code_coverage_mode() == debug::CoverageMode::kPreciseCount;
1300   }
1301 
is_precise_binary_code_coverage()1302   bool is_precise_binary_code_coverage() const {
1303     return code_coverage_mode() == debug::CoverageMode::kPreciseBinary;
1304   }
1305 
is_block_count_code_coverage()1306   bool is_block_count_code_coverage() const {
1307     return code_coverage_mode() == debug::CoverageMode::kBlockCount;
1308   }
1309 
is_block_binary_code_coverage()1310   bool is_block_binary_code_coverage() const {
1311     return code_coverage_mode() == debug::CoverageMode::kBlockBinary;
1312   }
1313 
is_block_code_coverage()1314   bool is_block_code_coverage() const {
1315     return is_block_count_code_coverage() || is_block_binary_code_coverage();
1316   }
1317 
is_binary_code_coverage()1318   bool is_binary_code_coverage() const {
1319     return is_precise_binary_code_coverage() || is_block_binary_code_coverage();
1320   }
1321 
is_count_code_coverage()1322   bool is_count_code_coverage() const {
1323     return is_precise_count_code_coverage() || is_block_count_code_coverage();
1324   }
1325 
is_collecting_type_profile()1326   bool is_collecting_type_profile() const {
1327     return type_profile_mode() == debug::TypeProfileMode::kCollect;
1328   }
1329 
1330   // Collect feedback vectors with data for code coverage or type profile.
1331   // Reset the list, when both code coverage and type profile are not
1332   // needed anymore. This keeps many feedback vectors alive, but code
1333   // coverage or type profile are used for debugging only and increase in
1334   // memory usage is expected.
1335   void SetFeedbackVectorsForProfilingTools(Object value);
1336 
1337   void MaybeInitializeVectorListFromHeap();
1338 
time_millis_since_init()1339   double time_millis_since_init() const {
1340     return heap_.MonotonicallyIncreasingTimeInMs() - time_millis_at_init_;
1341   }
1342 
date_cache()1343   DateCache* date_cache() const { return date_cache_; }
1344 
1345   void set_date_cache(DateCache* date_cache);
1346 
1347 #ifdef V8_INTL_SUPPORT
1348 
default_locale()1349   const std::string& default_locale() { return default_locale_; }
1350 
ResetDefaultLocale()1351   void ResetDefaultLocale() { default_locale_.clear(); }
1352 
set_default_locale(const std::string & locale)1353   void set_default_locale(const std::string& locale) {
1354     DCHECK_EQ(default_locale_.length(), 0);
1355     default_locale_ = locale;
1356   }
1357 
1358   enum class ICUObjectCacheType{
1359       kDefaultCollator, kDefaultNumberFormat, kDefaultSimpleDateFormat,
1360       kDefaultSimpleDateFormatForTime, kDefaultSimpleDateFormatForDate};
1361   static constexpr int kICUObjectCacheTypeCount = 5;
1362 
1363   icu::UMemory* get_cached_icu_object(ICUObjectCacheType cache_type,
1364                                       Handle<Object> locales);
1365   void set_icu_object_in_cache(ICUObjectCacheType cache_type,
1366                                Handle<Object> locales,
1367                                std::shared_ptr<icu::UMemory> obj);
1368   void clear_cached_icu_object(ICUObjectCacheType cache_type);
1369   void clear_cached_icu_objects();
1370 
1371 #endif  // V8_INTL_SUPPORT
1372 
1373   enum class KnownPrototype { kNone, kObject, kArray, kString };
1374 
1375   KnownPrototype IsArrayOrObjectOrStringPrototype(Object object);
1376 
1377   // On intent to set an element in object, make sure that appropriate
1378   // notifications occur if the set is on the elements of the array or
1379   // object prototype. Also ensure that changes to prototype chain between
1380   // Array and Object fire notifications.
1381   void UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object);
UpdateNoElementsProtectorOnSetLength(Handle<JSObject> object)1382   void UpdateNoElementsProtectorOnSetLength(Handle<JSObject> object) {
1383     UpdateNoElementsProtectorOnSetElement(object);
1384   }
UpdateNoElementsProtectorOnSetPrototype(Handle<JSObject> object)1385   void UpdateNoElementsProtectorOnSetPrototype(Handle<JSObject> object) {
1386     UpdateNoElementsProtectorOnSetElement(object);
1387   }
UpdateNoElementsProtectorOnNormalizeElements(Handle<JSObject> object)1388   void UpdateNoElementsProtectorOnNormalizeElements(Handle<JSObject> object) {
1389     UpdateNoElementsProtectorOnSetElement(object);
1390   }
1391 
1392   // Returns true if array is the initial array prototype in any native context.
1393   inline bool IsAnyInitialArrayPrototype(JSArray array);
1394 
1395   std::unique_ptr<PersistentHandles> NewPersistentHandles();
1396 
persistent_handles_list()1397   PersistentHandlesList* persistent_handles_list() const {
1398     return persistent_handles_list_.get();
1399   }
1400 
1401 #ifdef DEBUG
1402   bool IsDeferredHandle(Address* location);
1403 #endif  // DEBUG
1404 
concurrent_recompilation_enabled()1405   bool concurrent_recompilation_enabled() {
1406     // Thread is only available with flag enabled.
1407     DCHECK(optimizing_compile_dispatcher_ == nullptr ||
1408            FLAG_concurrent_recompilation);
1409     return optimizing_compile_dispatcher_ != nullptr;
1410   }
1411 
optimizing_compile_dispatcher()1412   OptimizingCompileDispatcher* optimizing_compile_dispatcher() {
1413     DCHECK_NOT_NULL(optimizing_compile_dispatcher_);
1414     return optimizing_compile_dispatcher_;
1415   }
1416   // Flushes all pending concurrent optimzation jobs from the optimizing
1417   // compile dispatcher's queue.
1418   void AbortConcurrentOptimization(BlockingBehavior blocking_behavior);
1419 
id()1420   int id() const { return id_; }
1421 
1422   CompilationStatistics* GetTurboStatistics();
1423   CodeTracer* GetCodeTracer();
1424 
1425   void DumpAndResetStats();
1426 
stress_deopt_count_address()1427   void* stress_deopt_count_address() { return &stress_deopt_count_; }
1428 
set_force_slow_path(bool v)1429   void set_force_slow_path(bool v) { force_slow_path_ = v; }
force_slow_path()1430   bool force_slow_path() const { return force_slow_path_; }
force_slow_path_address()1431   bool* force_slow_path_address() { return &force_slow_path_; }
1432 
debug_execution_mode_address()1433   DebugInfo::ExecutionMode* debug_execution_mode_address() {
1434     return &debug_execution_mode_;
1435   }
1436 
1437   base::RandomNumberGenerator* random_number_generator();
1438 
1439   base::RandomNumberGenerator* fuzzer_rng();
1440 
1441   // Generates a random number that is non-zero when masked
1442   // with the provided mask.
1443   int GenerateIdentityHash(uint32_t mask);
1444 
1445   // Given an address occupied by a live code object, return that object.
1446   Code FindCodeObject(Address a);
1447 
NextOptimizationId()1448   int NextOptimizationId() {
1449     int id = next_optimization_id_++;
1450     if (!Smi::IsValid(next_optimization_id_)) {
1451       next_optimization_id_ = 0;
1452     }
1453     return id;
1454   }
1455 
1456   // https://github.com/tc39/proposal-top-level-await/pull/159
1457   // TODO(syg): Update to actual spec link once merged.
1458   //
1459   // According to the spec, modules that depend on async modules (i.e. modules
1460   // with top-level await) must be evaluated in order in which their
1461   // [[AsyncEvaluating]] flags were set to true. V8 tracks this global total
1462   // order with next_module_async_evaluating_ordinal_. Each module that sets its
1463   // [[AsyncEvaluating]] to true grabs the next ordinal.
NextModuleAsyncEvaluatingOrdinal()1464   unsigned NextModuleAsyncEvaluatingOrdinal() {
1465     unsigned ordinal = next_module_async_evaluating_ordinal_++;
1466     CHECK_LT(ordinal, kMaxModuleAsyncEvaluatingOrdinal);
1467     return ordinal;
1468   }
1469 
1470   inline void DidFinishModuleAsyncEvaluation(unsigned ordinal);
1471 
1472   void AddNearHeapLimitCallback(v8::NearHeapLimitCallback, void* data);
1473   void RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
1474                                    size_t heap_limit);
1475   void AddCallCompletedCallback(CallCompletedCallback callback);
1476   void RemoveCallCompletedCallback(CallCompletedCallback callback);
FireCallCompletedCallback(MicrotaskQueue * microtask_queue)1477   void FireCallCompletedCallback(MicrotaskQueue* microtask_queue) {
1478     if (!thread_local_top()->CallDepthIsZero()) return;
1479     FireCallCompletedCallbackInternal(microtask_queue);
1480   }
1481 
1482   void AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
1483   void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
1484   inline void FireBeforeCallEnteredCallback();
1485 
1486   void SetPromiseRejectCallback(PromiseRejectCallback callback);
1487   void ReportPromiseReject(Handle<JSPromise> promise, Handle<Object> value,
1488                            v8::PromiseRejectEvent event);
1489 
1490   void SetTerminationOnExternalTryCatch();
1491 
1492   Handle<Symbol> SymbolFor(RootIndex dictionary_index, Handle<String> name,
1493                            bool private_symbol);
1494 
1495   void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
1496   void CountUsage(v8::Isolate::UseCounterFeature feature);
1497 
1498   static std::string GetTurboCfgFileName(Isolate* isolate);
1499 
1500   int GetNextScriptId();
1501 
1502 #if V8_SFI_HAS_UNIQUE_ID
GetNextUniqueSharedFunctionInfoId()1503   int GetNextUniqueSharedFunctionInfoId() {
1504     int current_id = next_unique_sfi_id_.load(std::memory_order_relaxed);
1505     int next_id;
1506     do {
1507       if (current_id >= Smi::kMaxValue) {
1508         next_id = 0;
1509       } else {
1510         next_id = current_id + 1;
1511       }
1512     } while (!next_unique_sfi_id_.compare_exchange_weak(
1513         current_id, next_id, std::memory_order_relaxed));
1514     return current_id;
1515   }
1516 #endif
1517 
SetHasContextPromiseHooks(bool context_promise_hook)1518   void SetHasContextPromiseHooks(bool context_promise_hook) {
1519     promise_hook_flags_ = PromiseHookFields::HasContextPromiseHook::update(
1520         promise_hook_flags_, context_promise_hook);
1521     PromiseHookStateUpdated();
1522   }
1523 
HasContextPromiseHooks()1524   bool HasContextPromiseHooks() const {
1525     return PromiseHookFields::HasContextPromiseHook::decode(
1526         promise_hook_flags_);
1527   }
1528 
promise_hook_flags_address()1529   Address promise_hook_flags_address() {
1530     return reinterpret_cast<Address>(&promise_hook_flags_);
1531   }
1532 
promise_hook_address()1533   Address promise_hook_address() {
1534     return reinterpret_cast<Address>(&promise_hook_);
1535   }
1536 
async_event_delegate_address()1537   Address async_event_delegate_address() {
1538     return reinterpret_cast<Address>(&async_event_delegate_);
1539   }
1540 
javascript_execution_assert_address()1541   Address javascript_execution_assert_address() {
1542     return reinterpret_cast<Address>(&javascript_execution_assert_);
1543   }
1544 
handle_scope_implementer_address()1545   Address handle_scope_implementer_address() {
1546     return reinterpret_cast<Address>(&handle_scope_implementer_);
1547   }
1548 
1549   void SetAtomicsWaitCallback(v8::Isolate::AtomicsWaitCallback callback,
1550                               void* data);
1551   void RunAtomicsWaitCallback(v8::Isolate::AtomicsWaitEvent event,
1552                               Handle<JSArrayBuffer> array_buffer,
1553                               size_t offset_in_bytes, int64_t value,
1554                               double timeout_in_ms,
1555                               AtomicsWaitWakeHandle* stop_handle);
1556 
1557   void SetPromiseHook(PromiseHook hook);
1558   void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
1559                       Handle<Object> parent);
1560   void RunAllPromiseHooks(PromiseHookType type, Handle<JSPromise> promise,
1561                           Handle<Object> parent);
1562   void UpdatePromiseHookProtector();
1563   void PromiseHookStateUpdated();
1564 
1565   void AddDetachedContext(Handle<Context> context);
1566   void CheckDetachedContextsAfterGC();
1567 
1568   // Detach the environment from its outer global object.
1569   void DetachGlobal(Handle<Context> env);
1570 
startup_object_cache()1571   std::vector<Object>* startup_object_cache() { return &startup_object_cache_; }
1572 
IsGeneratingEmbeddedBuiltins()1573   bool IsGeneratingEmbeddedBuiltins() const {
1574     return builtins_constants_table_builder() != nullptr;
1575   }
1576 
builtins_constants_table_builder()1577   BuiltinsConstantsTableBuilder* builtins_constants_table_builder() const {
1578     return builtins_constants_table_builder_;
1579   }
1580 
1581   // Hashes bits of the Isolate that are relevant for embedded builtins. In
1582   // particular, the embedded blob requires builtin Code object layout and the
1583   // builtins constants table to remain unchanged from build-time.
1584   size_t HashIsolateForEmbeddedBlob();
1585 
1586   static const uint8_t* CurrentEmbeddedBlobCode();
1587   static uint32_t CurrentEmbeddedBlobCodeSize();
1588   static const uint8_t* CurrentEmbeddedBlobData();
1589   static uint32_t CurrentEmbeddedBlobDataSize();
1590   static bool CurrentEmbeddedBlobIsBinaryEmbedded();
1591 
1592   // These always return the same result as static methods above, but don't
1593   // access the global atomic variable (and thus *might be* slightly faster).
1594   const uint8_t* embedded_blob_code() const;
1595   uint32_t embedded_blob_code_size() const;
1596   const uint8_t* embedded_blob_data() const;
1597   uint32_t embedded_blob_data_size() const;
1598 
1599   // Returns true if short bultin calls optimization is enabled for the Isolate.
is_short_builtin_calls_enabled()1600   bool is_short_builtin_calls_enabled() const {
1601     return V8_SHORT_BUILTIN_CALLS_BOOL && is_short_builtin_calls_enabled_;
1602   }
1603 
set_array_buffer_allocator(v8::ArrayBuffer::Allocator * allocator)1604   void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) {
1605     array_buffer_allocator_ = allocator;
1606   }
array_buffer_allocator()1607   v8::ArrayBuffer::Allocator* array_buffer_allocator() const {
1608     return array_buffer_allocator_;
1609   }
1610 
set_array_buffer_allocator_shared(std::shared_ptr<v8::ArrayBuffer::Allocator> allocator)1611   void set_array_buffer_allocator_shared(
1612       std::shared_ptr<v8::ArrayBuffer::Allocator> allocator) {
1613     array_buffer_allocator_shared_ = std::move(allocator);
1614   }
array_buffer_allocator_shared()1615   std::shared_ptr<v8::ArrayBuffer::Allocator> array_buffer_allocator_shared()
1616       const {
1617     return array_buffer_allocator_shared_;
1618   }
1619 
futex_wait_list_node()1620   FutexWaitListNode* futex_wait_list_node() { return &futex_wait_list_node_; }
1621 
cancelable_task_manager()1622   CancelableTaskManager* cancelable_task_manager() {
1623     return cancelable_task_manager_;
1624   }
1625 
ast_string_constants()1626   const AstStringConstants* ast_string_constants() const {
1627     return ast_string_constants_;
1628   }
1629 
interpreter()1630   interpreter::Interpreter* interpreter() const { return interpreter_; }
1631 
compiler_cache()1632   compiler::PerIsolateCompilerCache* compiler_cache() const {
1633     return compiler_cache_;
1634   }
set_compiler_utils(compiler::PerIsolateCompilerCache * cache,Zone * zone)1635   void set_compiler_utils(compiler::PerIsolateCompilerCache* cache,
1636                           Zone* zone) {
1637     compiler_cache_ = cache;
1638     compiler_zone_ = zone;
1639   }
1640 
allocator()1641   AccountingAllocator* allocator() { return allocator_; }
1642 
lazy_compile_dispatcher()1643   LazyCompileDispatcher* lazy_compile_dispatcher() const {
1644     return compiler_dispatcher_;
1645   }
1646 
baseline_batch_compiler()1647   baseline::BaselineBatchCompiler* baseline_batch_compiler() const {
1648     return baseline_batch_compiler_;
1649   }
1650 
1651   bool IsInAnyContext(Object object, uint32_t index);
1652 
1653   void ClearKeptObjects();
1654 
1655   void SetHostImportModuleDynamicallyCallback(
1656       HostImportModuleDynamicallyWithImportAssertionsCallback callback);
1657   MaybeHandle<JSPromise> RunHostImportModuleDynamicallyCallback(
1658       Handle<Script> referrer, Handle<Object> specifier,
1659       MaybeHandle<Object> maybe_import_assertions_argument);
1660 
1661   void SetHostInitializeImportMetaObjectCallback(
1662       HostInitializeImportMetaObjectCallback callback);
1663   MaybeHandle<JSObject> RunHostInitializeImportMetaObjectCallback(
1664       Handle<SourceTextModule> module);
1665 
RegisterEmbeddedFileWriter(EmbeddedFileWriterInterface * writer)1666   void RegisterEmbeddedFileWriter(EmbeddedFileWriterInterface* writer) {
1667     embedded_file_writer_ = writer;
1668   }
1669 
1670   int LookupOrAddExternallyCompiledFilename(const char* filename);
1671   const char* GetExternallyCompiledFilename(int index) const;
1672   int GetExternallyCompiledFilenameCount() const;
1673   // PrepareBuiltinSourcePositionMap is necessary in order to preserve the
1674   // builtin source positions before the corresponding code objects are
1675   // replaced with trampolines. Those source positions are used to
1676   // annotate the builtin blob with debugging information.
1677   void PrepareBuiltinSourcePositionMap();
1678 
1679   // Store the position of the labels that will be used in the list of allowed
1680   // return addresses.
1681   void PrepareBuiltinLabelInfoMap();
1682 
1683 #if defined(V8_OS_WIN64)
1684   void SetBuiltinUnwindData(
1685       Builtin builtin,
1686       const win64_unwindinfo::BuiltinUnwindInfo& unwinding_info);
1687 #endif  // V8_OS_WIN64
1688 
1689   void SetPrepareStackTraceCallback(PrepareStackTraceCallback callback);
1690   MaybeHandle<Object> RunPrepareStackTraceCallback(Handle<Context>,
1691                                                    Handle<JSObject> Error,
1692                                                    Handle<JSArray> sites);
1693   bool HasPrepareStackTraceCallback() const;
1694 
1695   void SetAddCrashKeyCallback(AddCrashKeyCallback callback);
AddCrashKey(CrashKeyId id,const std::string & value)1696   void AddCrashKey(CrashKeyId id, const std::string& value) {
1697     if (add_crash_key_callback_) {
1698       add_crash_key_callback_(id, value);
1699     }
1700   }
1701 
1702   void SetRAILMode(RAILMode rail_mode);
1703 
rail_mode()1704   RAILMode rail_mode() { return rail_mode_.load(); }
1705 
set_code_coverage_mode(debug::CoverageMode coverage_mode)1706   void set_code_coverage_mode(debug::CoverageMode coverage_mode) {
1707     code_coverage_mode_.store(coverage_mode, std::memory_order_relaxed);
1708   }
code_coverage_mode()1709   debug::CoverageMode code_coverage_mode() const {
1710     return code_coverage_mode_.load(std::memory_order_relaxed);
1711   }
1712 
1713   double LoadStartTimeMs();
1714 
1715   void UpdateLoadStartTime();
1716 
1717   void IsolateInForegroundNotification();
1718 
1719   void IsolateInBackgroundNotification();
1720 
IsIsolateInBackground()1721   bool IsIsolateInBackground() { return is_isolate_in_background_; }
1722 
EnableMemorySavingsMode()1723   void EnableMemorySavingsMode() { memory_savings_mode_active_ = true; }
1724 
DisableMemorySavingsMode()1725   void DisableMemorySavingsMode() { memory_savings_mode_active_ = false; }
1726 
IsMemorySavingsModeActive()1727   bool IsMemorySavingsModeActive() { return memory_savings_mode_active_; }
1728 
1729   PRINTF_FORMAT(2, 3) void PrintWithTimestamp(const char* format, ...);
1730 
set_allow_atomics_wait(bool set)1731   void set_allow_atomics_wait(bool set) { allow_atomics_wait_ = set; }
allow_atomics_wait()1732   bool allow_atomics_wait() { return allow_atomics_wait_; }
1733 
1734   // Register a finalizer to be called at isolate teardown.
1735   void RegisterManagedPtrDestructor(ManagedPtrDestructor* finalizer);
1736 
1737   // Removes a previously-registered shared object finalizer.
1738   void UnregisterManagedPtrDestructor(ManagedPtrDestructor* finalizer);
1739 
elements_deletion_counter()1740   size_t elements_deletion_counter() { return elements_deletion_counter_; }
set_elements_deletion_counter(size_t value)1741   void set_elements_deletion_counter(size_t value) {
1742     elements_deletion_counter_ = value;
1743   }
1744 
1745 #if V8_ENABLE_WEBASSEMBLY
1746   void AddSharedWasmMemory(Handle<WasmMemoryObject> memory_object);
1747 #endif  // V8_ENABLE_WEBASSEMBLY
1748 
top_backup_incumbent_scope()1749   const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope() const {
1750     return top_backup_incumbent_scope_;
1751   }
set_top_backup_incumbent_scope(const v8::Context::BackupIncumbentScope * top_backup_incumbent_scope)1752   void set_top_backup_incumbent_scope(
1753       const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope) {
1754     top_backup_incumbent_scope_ = top_backup_incumbent_scope;
1755   }
1756 
1757   void SetIdle(bool is_idle);
1758 
1759   // Changing various modes can cause differences in generated bytecode which
1760   // interferes with lazy source positions, so this should be called immediately
1761   // before such a mode change to ensure that this cannot happen.
1762   void CollectSourcePositionsForAllBytecodeArrays();
1763 
1764   void AddCodeMemoryChunk(MemoryChunk* chunk);
1765   void RemoveCodeMemoryChunk(MemoryChunk* chunk);
1766   void AddCodeRange(Address begin, size_t length_in_bytes);
1767 
1768   bool RequiresCodeRange() const;
1769 
1770   static Address load_from_stack_count_address(const char* function_name);
1771   static Address store_to_stack_count_address(const char* function_name);
1772 
1773   v8::metrics::Recorder::ContextId GetOrRegisterRecorderContextId(
1774       Handle<NativeContext> context);
1775   MaybeLocal<v8::Context> GetContextFromRecorderContextId(
1776       v8::metrics::Recorder::ContextId id);
1777 
1778   void UpdateLongTaskStats();
1779   v8::metrics::LongTaskStats* GetCurrentLongTaskStats();
1780 
main_thread_local_isolate()1781   LocalIsolate* main_thread_local_isolate() {
1782     return main_thread_local_isolate_.get();
1783   }
1784 
AsIsolate()1785   Isolate* AsIsolate() { return this; }
AsLocalIsolate()1786   LocalIsolate* AsLocalIsolate() { return main_thread_local_isolate(); }
1787 
1788   LocalHeap* main_thread_local_heap();
1789   LocalHeap* CurrentLocalHeap();
1790 
1791 #ifdef V8_HEAP_SANDBOX
external_pointer_table()1792   ExternalPointerTable& external_pointer_table() {
1793     return isolate_data_.external_pointer_table_;
1794   }
1795 
external_pointer_table()1796   const ExternalPointerTable& external_pointer_table() const {
1797     return isolate_data_.external_pointer_table_;
1798   }
1799 
external_pointer_table_address()1800   Address external_pointer_table_address() {
1801     return reinterpret_cast<Address>(&isolate_data_.external_pointer_table_);
1802   }
1803 #endif
1804 
1805   struct PromiseHookFields {
1806     using HasContextPromiseHook = base::BitField<bool, 0, 1>;
1807     using HasIsolatePromiseHook = HasContextPromiseHook::Next<bool, 1>;
1808     using HasAsyncEventDelegate = HasIsolatePromiseHook::Next<bool, 1>;
1809     using IsDebugActive = HasAsyncEventDelegate::Next<bool, 1>;
1810   };
1811 
is_shared()1812   bool is_shared() { return is_shared_; }
shared_isolate()1813   Isolate* shared_isolate() { return shared_isolate_; }
1814 
1815   void AttachToSharedIsolate(Isolate* shared);
1816   void DetachFromSharedIsolate();
1817 
HasClientIsolates()1818   bool HasClientIsolates() const { return client_isolate_head_; }
1819 
1820   template <typename Callback>
IterateClientIsolates(Callback callback)1821   void IterateClientIsolates(Callback callback) {
1822     for (Isolate* current = client_isolate_head_; current;
1823          current = current->next_client_isolate_) {
1824       callback(current);
1825     }
1826   }
1827 
client_isolate_mutex()1828   base::Mutex* client_isolate_mutex() { return &client_isolate_mutex_; }
1829 
1830  private:
1831   explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator,
1832                    bool is_shared);
1833   ~Isolate();
1834 
1835   bool Init(SnapshotData* startup_snapshot_data,
1836             SnapshotData* read_only_snapshot_data, bool can_rehash);
1837 
1838   void CheckIsolateLayout();
1839 
1840   void InitializeCodeRanges();
1841   void AddCodeMemoryRange(MemoryRange range);
1842 
1843   // Common method to create an Isolate used by Isolate::New() and
1844   // Isolate::NewShared().
1845   static Isolate* Allocate(bool is_shared);
1846 
1847   static void RemoveContextIdCallback(const v8::WeakCallbackInfo<void>& data);
1848 
1849   void FireCallCompletedCallbackInternal(MicrotaskQueue* microtask_queue);
1850 
1851   class ThreadDataTable {
1852    public:
1853     ThreadDataTable() = default;
1854 
1855     PerIsolateThreadData* Lookup(ThreadId thread_id);
1856     void Insert(PerIsolateThreadData* data);
1857     void Remove(PerIsolateThreadData* data);
1858     void RemoveAllThreads();
1859 
1860    private:
1861     struct Hasher {
operatorHasher1862       std::size_t operator()(const ThreadId& t) const {
1863         return std::hash<int>()(t.ToInteger());
1864       }
1865     };
1866 
1867     std::unordered_map<ThreadId, PerIsolateThreadData*, Hasher> table_;
1868   };
1869 
1870   // These items form a stack synchronously with threads Enter'ing and Exit'ing
1871   // the Isolate. The top of the stack points to a thread which is currently
1872   // running the Isolate. When the stack is empty, the Isolate is considered
1873   // not entered by any thread and can be Disposed.
1874   // If the same thread enters the Isolate more than once, the entry_count_
1875   // is incremented rather then a new item pushed to the stack.
1876   class EntryStackItem {
1877    public:
EntryStackItem(PerIsolateThreadData * previous_thread_data,Isolate * previous_isolate,EntryStackItem * previous_item)1878     EntryStackItem(PerIsolateThreadData* previous_thread_data,
1879                    Isolate* previous_isolate, EntryStackItem* previous_item)
1880         : entry_count(1),
1881           previous_thread_data(previous_thread_data),
1882           previous_isolate(previous_isolate),
1883           previous_item(previous_item) {}
1884     EntryStackItem(const EntryStackItem&) = delete;
1885     EntryStackItem& operator=(const EntryStackItem&) = delete;
1886 
1887     int entry_count;
1888     PerIsolateThreadData* previous_thread_data;
1889     Isolate* previous_isolate;
1890     EntryStackItem* previous_item;
1891   };
1892 
1893   static base::Thread::LocalStorageKey per_isolate_thread_data_key_;
1894   static base::Thread::LocalStorageKey isolate_key_;
1895 
1896 #ifdef DEBUG
1897   static std::atomic<bool> isolate_key_created_;
1898 #endif
1899 
1900   void Deinit();
1901 
1902   static void SetIsolateThreadLocals(Isolate* isolate,
1903                                      PerIsolateThreadData* data);
1904 
1905   void MarkCompactPrologue(bool is_compacting,
1906                            ThreadLocalTop* archived_thread_data);
1907   void MarkCompactEpilogue(bool is_compacting,
1908                            ThreadLocalTop* archived_thread_data);
1909 
1910   void FillCache();
1911 
1912   // Propagate pending exception message to the v8::TryCatch.
1913   // If there is no external try-catch or message was successfully propagated,
1914   // then return true.
1915   bool PropagatePendingExceptionToExternalTryCatch();
1916 
1917   void RunPromiseHookForAsyncEventDelegate(PromiseHookType type,
1918                                            Handle<JSPromise> promise);
1919 
HasIsolatePromiseHooks()1920   bool HasIsolatePromiseHooks() const {
1921     return PromiseHookFields::HasIsolatePromiseHook::decode(
1922         promise_hook_flags_);
1923   }
1924 
HasAsyncEventDelegate()1925   bool HasAsyncEventDelegate() const {
1926     return PromiseHookFields::HasAsyncEventDelegate::decode(
1927         promise_hook_flags_);
1928   }
1929 
RAILModeName(RAILMode rail_mode)1930   const char* RAILModeName(RAILMode rail_mode) const {
1931     switch (rail_mode) {
1932       case PERFORMANCE_RESPONSE:
1933         return "RESPONSE";
1934       case PERFORMANCE_ANIMATION:
1935         return "ANIMATION";
1936       case PERFORMANCE_IDLE:
1937         return "IDLE";
1938       case PERFORMANCE_LOAD:
1939         return "LOAD";
1940     }
1941     return "";
1942   }
1943 
1944   void AddCrashKeysForIsolateAndHeapPointers();
1945 
1946   // Returns the Exception sentinel.
1947   Object ThrowInternal(Object exception, MessageLocation* location);
1948 
1949   // Methods for appending and removing to/from client isolates list.
1950   void AppendAsClientIsolate(Isolate* client);
1951   void RemoveAsClientIsolate(Isolate* client);
1952 
1953   // This class contains a collection of data accessible from both C++ runtime
1954   // and compiled code (including assembly stubs, builtins, interpreter bytecode
1955   // handlers and optimized code).
1956   IsolateData isolate_data_;
1957 
1958   std::unique_ptr<IsolateAllocator> isolate_allocator_;
1959   Heap heap_;
1960   ReadOnlyHeap* read_only_heap_ = nullptr;
1961   std::shared_ptr<ReadOnlyArtifacts> artifacts_;
1962   std::unique_ptr<StringTable> string_table_;
1963 
1964   const int id_;
1965   EntryStackItem* entry_stack_ = nullptr;
1966   int stack_trace_nesting_level_ = 0;
1967   StringStream* incomplete_message_ = nullptr;
1968   Address isolate_addresses_[kIsolateAddressCount + 1] = {};
1969   Bootstrapper* bootstrapper_ = nullptr;
1970   RuntimeProfiler* runtime_profiler_ = nullptr;
1971   CompilationCache* compilation_cache_ = nullptr;
1972   std::shared_ptr<Counters> async_counters_;
1973   base::RecursiveMutex break_access_;
1974   base::SharedMutex feedback_vector_access_;
1975   base::SharedMutex internalized_string_access_;
1976   base::SharedMutex full_transition_array_access_;
1977   base::SharedMutex shared_function_info_access_;
1978   base::SharedMutex map_updater_access_;
1979   base::SharedMutex boilerplate_migration_access_;
1980   Logger* logger_ = nullptr;
1981   StubCache* load_stub_cache_ = nullptr;
1982   StubCache* store_stub_cache_ = nullptr;
1983   Deoptimizer* current_deoptimizer_ = nullptr;
1984   bool deoptimizer_lazy_throw_ = false;
1985   MaterializedObjectStore* materialized_object_store_ = nullptr;
1986   bool capture_stack_trace_for_uncaught_exceptions_ = false;
1987   int stack_trace_for_uncaught_exceptions_frame_limit_ = 0;
1988   StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_ =
1989       StackTrace::kOverview;
1990   DescriptorLookupCache* descriptor_lookup_cache_ = nullptr;
1991   HandleScopeData handle_scope_data_;
1992   HandleScopeImplementer* handle_scope_implementer_ = nullptr;
1993   UnicodeCache* unicode_cache_ = nullptr;
1994   AccountingAllocator* allocator_ = nullptr;
1995   InnerPointerToCodeCache* inner_pointer_to_code_cache_ = nullptr;
1996   GlobalHandles* global_handles_ = nullptr;
1997   EternalHandles* eternal_handles_ = nullptr;
1998   ThreadManager* thread_manager_ = nullptr;
1999   bigint::Processor* bigint_processor_ = nullptr;
2000   RuntimeState runtime_state_;
2001   Builtins builtins_;
2002   SetupIsolateDelegate* setup_delegate_ = nullptr;
2003 #if defined(DEBUG) || defined(VERIFY_HEAP)
2004   std::atomic<int> num_active_deserializers_;
2005 #endif
2006 #ifndef V8_INTL_SUPPORT
2007   unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
2008   unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
2009   unibrow::Mapping<unibrow::Ecma262Canonicalize>
2010       regexp_macro_assembler_canonicalize_;
2011 #endif  // !V8_INTL_SUPPORT
2012   RegExpStack* regexp_stack_ = nullptr;
2013   std::vector<int> regexp_indices_;
2014   DateCache* date_cache_ = nullptr;
2015   base::RandomNumberGenerator* random_number_generator_ = nullptr;
2016   base::RandomNumberGenerator* fuzzer_rng_ = nullptr;
2017   std::atomic<RAILMode> rail_mode_;
2018   v8::Isolate::AtomicsWaitCallback atomics_wait_callback_ = nullptr;
2019   void* atomics_wait_callback_data_ = nullptr;
2020   PromiseHook promise_hook_ = nullptr;
2021   HostImportModuleDynamicallyWithImportAssertionsCallback
2022       host_import_module_dynamically_with_import_assertions_callback_ = nullptr;
2023   std::atomic<debug::CoverageMode> code_coverage_mode_{
2024       debug::CoverageMode::kBestEffort};
2025 
2026   // Helper function for RunHostImportModuleDynamicallyCallback.
2027   // Unpacks import assertions, if present, from the second argument to dynamic
2028   // import() and returns them in a FixedArray, sorted by code point order of
2029   // the keys, in the form [key1, value1, key2, value2, ...]. Returns an empty
2030   // MaybeHandle if an error was thrown.  In this case, the host callback should
2031   // not be called and instead the caller should use the pending exception to
2032   // reject the import() call's Promise.
2033   MaybeHandle<FixedArray> GetImportAssertionsFromArgument(
2034       MaybeHandle<Object> maybe_import_assertions_argument);
2035 
2036   HostInitializeImportMetaObjectCallback
2037       host_initialize_import_meta_object_callback_ = nullptr;
2038   base::Mutex rail_mutex_;
2039   double load_start_time_ms_ = 0;
2040 
2041 #ifdef V8_INTL_SUPPORT
2042   std::string default_locale_;
2043 
2044   // The cache stores the most recently accessed {locales,obj} pair for each
2045   // cache type.
2046   struct ICUObjectCacheEntry {
2047     std::string locales;
2048     std::shared_ptr<icu::UMemory> obj;
2049 
2050     ICUObjectCacheEntry() = default;
ICUObjectCacheEntryICUObjectCacheEntry2051     ICUObjectCacheEntry(std::string locales, std::shared_ptr<icu::UMemory> obj)
2052         : locales(locales), obj(std::move(obj)) {}
2053   };
2054 
2055   ICUObjectCacheEntry icu_object_cache_[kICUObjectCacheTypeCount];
2056 #endif  // V8_INTL_SUPPORT
2057 
2058   // true if being profiled. Causes collection of extra compile info.
2059   std::atomic<bool> is_profiling_{false};
2060 
2061   // Whether the isolate has been created for snapshotting.
2062   bool serializer_enabled_ = false;
2063 
2064   // True if fatal error has been signaled for this isolate.
2065   bool has_fatal_error_ = false;
2066 
2067   // True if this isolate was initialized from a snapshot.
2068   bool initialized_from_snapshot_ = false;
2069 
2070   // True if short bultin calls optimization is enabled.
2071   bool is_short_builtin_calls_enabled_ = false;
2072 
2073   // True if the isolate is in background. This flag is used
2074   // to prioritize between memory usage and latency.
2075   bool is_isolate_in_background_ = false;
2076 
2077   // True if the isolate is in memory savings mode. This flag is used to
2078   // favor memory over runtime performance.
2079   bool memory_savings_mode_active_ = false;
2080 
2081   // Time stamp at initialization.
2082   double time_millis_at_init_ = 0;
2083 
2084 #ifdef DEBUG
2085   static std::atomic<size_t> non_disposed_isolates_;
2086 
2087   JSObject::SpillInformation js_spill_information_;
2088 #endif
2089 
2090   Debug* debug_ = nullptr;
2091   HeapProfiler* heap_profiler_ = nullptr;
2092   std::unique_ptr<CodeEventDispatcher> code_event_dispatcher_;
2093 
2094   const AstStringConstants* ast_string_constants_ = nullptr;
2095 
2096   interpreter::Interpreter* interpreter_ = nullptr;
2097 
2098   compiler::PerIsolateCompilerCache* compiler_cache_ = nullptr;
2099   // The following zone is for compiler-related objects that should live
2100   // through all compilations (and thus all JSHeapBroker instances).
2101   Zone* compiler_zone_ = nullptr;
2102 
2103   LazyCompileDispatcher* compiler_dispatcher_ = nullptr;
2104   baseline::BaselineBatchCompiler* baseline_batch_compiler_ = nullptr;
2105 
2106   using InterruptEntry = std::pair<InterruptCallback, void*>;
2107   std::queue<InterruptEntry> api_interrupts_queue_;
2108 
2109 #define GLOBAL_BACKING_STORE(type, name, initialvalue) type name##_;
2110   ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
2111 #undef GLOBAL_BACKING_STORE
2112 
2113 #define GLOBAL_ARRAY_BACKING_STORE(type, name, length) type name##_[length];
2114   ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_BACKING_STORE)
2115 #undef GLOBAL_ARRAY_BACKING_STORE
2116 
2117 #ifdef DEBUG
2118   // This class is huge and has a number of fields controlled by
2119   // preprocessor defines. Make sure the offsets of these fields agree
2120   // between compilation units.
2121 #define ISOLATE_FIELD_OFFSET(type, name, ignored) \
2122   static const intptr_t name##_debug_offset_;
2123   ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
2124   ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
2125 #undef ISOLATE_FIELD_OFFSET
2126 #endif
2127 
2128   bool detailed_source_positions_for_profiling_;
2129 
2130   OptimizingCompileDispatcher* optimizing_compile_dispatcher_ = nullptr;
2131 
2132   std::unique_ptr<PersistentHandlesList> persistent_handles_list_;
2133 
2134   // Counts deopt points if deopt_every_n_times is enabled.
2135   unsigned int stress_deopt_count_ = 0;
2136 
2137   bool force_slow_path_ = false;
2138 
2139   bool initialized_ = false;
2140   bool jitless_ = false;
2141 
2142   int next_optimization_id_ = 0;
2143 
2144 #if V8_SFI_HAS_UNIQUE_ID
2145   std::atomic<int> next_unique_sfi_id_;
2146 #endif
2147 
2148   unsigned next_module_async_evaluating_ordinal_;
2149 
2150   // Vector of callbacks before a Call starts execution.
2151   std::vector<BeforeCallEnteredCallback> before_call_entered_callbacks_;
2152 
2153   // Vector of callbacks when a Call completes.
2154   std::vector<CallCompletedCallback> call_completed_callbacks_;
2155 
2156   v8::Isolate::UseCounterCallback use_counter_callback_ = nullptr;
2157 
2158   std::shared_ptr<metrics::Recorder> metrics_recorder_;
2159   uintptr_t last_recorder_context_id_ = 0;
2160   std::unordered_map<
2161       uintptr_t,
2162       Persistent<v8::Context, v8::CopyablePersistentTraits<v8::Context>>>
2163       recorder_context_id_map_;
2164 
2165   size_t last_long_task_stats_counter_ = 0;
2166   v8::metrics::LongTaskStats long_task_stats_;
2167 
2168   std::vector<Object> startup_object_cache_;
2169 
2170   // Used during builtins compilation to build the builtins constants table,
2171   // which is stored on the root list prior to serialization.
2172   BuiltinsConstantsTableBuilder* builtins_constants_table_builder_ = nullptr;
2173 
2174   void InitializeDefaultEmbeddedBlob();
2175   void CreateAndSetEmbeddedBlob();
2176   void MaybeRemapEmbeddedBuiltinsIntoCodeRange();
2177   void TearDownEmbeddedBlob();
2178   void SetEmbeddedBlob(const uint8_t* code, uint32_t code_size,
2179                        const uint8_t* data, uint32_t data_size);
2180   void ClearEmbeddedBlob();
2181 
2182   const uint8_t* embedded_blob_code_ = nullptr;
2183   uint32_t embedded_blob_code_size_ = 0;
2184   const uint8_t* embedded_blob_data_ = nullptr;
2185   uint32_t embedded_blob_data_size_ = 0;
2186 
2187   v8::ArrayBuffer::Allocator* array_buffer_allocator_ = nullptr;
2188   std::shared_ptr<v8::ArrayBuffer::Allocator> array_buffer_allocator_shared_;
2189 
2190   FutexWaitListNode futex_wait_list_node_;
2191 
2192   CancelableTaskManager* cancelable_task_manager_ = nullptr;
2193 
2194   debug::ConsoleDelegate* console_delegate_ = nullptr;
2195 
2196   debug::AsyncEventDelegate* async_event_delegate_ = nullptr;
2197   uint32_t promise_hook_flags_ = 0;
2198   int async_task_count_ = 0;
2199 
2200   std::unique_ptr<LocalIsolate> main_thread_local_isolate_;
2201 
2202   v8::Isolate::AbortOnUncaughtExceptionCallback
2203       abort_on_uncaught_exception_callback_ = nullptr;
2204 
2205   bool allow_atomics_wait_ = true;
2206 
2207   base::Mutex managed_ptr_destructors_mutex_;
2208   ManagedPtrDestructor* managed_ptr_destructors_head_ = nullptr;
2209 
2210   size_t total_regexp_code_generated_ = 0;
2211 
2212   size_t elements_deletion_counter_ = 0;
2213 
2214   std::unique_ptr<TracingCpuProfilerImpl> tracing_cpu_profiler_;
2215 
2216   EmbeddedFileWriterInterface* embedded_file_writer_ = nullptr;
2217 
2218   // The top entry of the v8::Context::BackupIncumbentScope stack.
2219   const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope_ =
2220       nullptr;
2221 
2222   PrepareStackTraceCallback prepare_stack_trace_callback_ = nullptr;
2223 
2224   // TODO(kenton@cloudflare.com): This mutex can be removed if
2225   // thread_data_table_ is always accessed under the isolate lock. I do not
2226   // know if this is the case, so I'm preserving it for now.
2227   base::Mutex thread_data_table_mutex_;
2228   ThreadDataTable thread_data_table_;
2229 
2230   // Set to true if this isolate is used as shared heap.
2231   const bool is_shared_;
2232 
2233   // Stores the shared isolate for this client isolate. nullptr for shared
2234   // isolates or when no shared isolate is used.
2235   Isolate* shared_isolate_ = nullptr;
2236 
2237   // A shared isolate will use these two fields to track all its client
2238   // isolates.
2239   base::Mutex client_isolate_mutex_;
2240   Isolate* client_isolate_head_ = nullptr;
2241 
2242   // Used to form a linked list of all client isolates. Protected by
2243   // client_isolate_mutex_.
2244   Isolate* prev_client_isolate_ = nullptr;
2245   Isolate* next_client_isolate_ = nullptr;
2246 
2247   // A signal-safe vector of heap pages containing code. Used with the
2248   // v8::Unwinder API.
2249   std::atomic<std::vector<MemoryRange>*> code_pages_{nullptr};
2250   std::vector<MemoryRange> code_pages_buffer1_;
2251   std::vector<MemoryRange> code_pages_buffer2_;
2252 
2253   // Enables the host application to provide a mechanism for recording a
2254   // predefined set of data as crash keys to be used in postmortem debugging
2255   // in case of a crash.
2256   AddCrashKeyCallback add_crash_key_callback_ = nullptr;
2257 
2258   // Delete new/delete operators to ensure that Isolate::New() and
2259   // Isolate::Delete() are used for Isolate creation and deletion.
new(size_t,void * ptr)2260   void* operator new(size_t, void* ptr) { return ptr; }
2261 
2262   friend class heap::HeapTester;
2263   friend class TestSerializer;
2264 };
2265 
2266 #undef FIELD_ACCESSOR
2267 #undef THREAD_LOCAL_TOP_ACCESSOR
2268 
2269 class PromiseOnStack {
2270  public:
PromiseOnStack(Handle<JSObject> promise,PromiseOnStack * prev)2271   PromiseOnStack(Handle<JSObject> promise, PromiseOnStack* prev)
2272       : promise_(promise), prev_(prev) {}
promise()2273   Handle<JSObject> promise() { return promise_; }
prev()2274   PromiseOnStack* prev() { return prev_; }
2275 
2276  private:
2277   Handle<JSObject> promise_;
2278   PromiseOnStack* prev_;
2279 };
2280 
2281 // SaveContext scopes save the current context on the Isolate on creation, and
2282 // restore it on destruction.
2283 class V8_EXPORT_PRIVATE SaveContext {
2284  public:
2285   explicit SaveContext(Isolate* isolate);
2286 
2287   ~SaveContext();
2288 
context()2289   Handle<Context> context() { return context_; }
2290 
2291   // Returns true if this save context is below a given JavaScript frame.
2292   bool IsBelowFrame(CommonFrame* frame);
2293 
2294  private:
2295   Isolate* const isolate_;
2296   Handle<Context> context_;
2297   Address c_entry_fp_;
2298 };
2299 
2300 // Like SaveContext, but also switches the Context to a new one in the
2301 // constructor.
2302 class V8_EXPORT_PRIVATE SaveAndSwitchContext : public SaveContext {
2303  public:
2304   SaveAndSwitchContext(Isolate* isolate, Context new_context);
2305 };
2306 
2307 // A scope which sets the given isolate's context to null for its lifetime to
2308 // ensure that code does not make assumptions on a context being available.
2309 class V8_NODISCARD NullContextScope : public SaveAndSwitchContext {
2310  public:
NullContextScope(Isolate * isolate)2311   explicit NullContextScope(Isolate* isolate)
2312       : SaveAndSwitchContext(isolate, Context()) {}
2313 };
2314 
2315 class AssertNoContextChange {
2316 #ifdef DEBUG
2317  public:
2318   explicit AssertNoContextChange(Isolate* isolate);
~AssertNoContextChange()2319   ~AssertNoContextChange() { DCHECK(isolate_->context() == *context_); }
2320 
2321  private:
2322   Isolate* isolate_;
2323   Handle<Context> context_;
2324 #else
2325  public:
2326   explicit AssertNoContextChange(Isolate* isolate) {}
2327 #endif
2328 };
2329 
2330 class ExecutionAccess {
2331  public:
ExecutionAccess(Isolate * isolate)2332   explicit ExecutionAccess(Isolate* isolate) : isolate_(isolate) {
2333     Lock(isolate);
2334   }
~ExecutionAccess()2335   ~ExecutionAccess() { Unlock(isolate_); }
2336 
Lock(Isolate * isolate)2337   static void Lock(Isolate* isolate) { isolate->break_access()->Lock(); }
Unlock(Isolate * isolate)2338   static void Unlock(Isolate* isolate) { isolate->break_access()->Unlock(); }
2339 
TryLock(Isolate * isolate)2340   static bool TryLock(Isolate* isolate) {
2341     return isolate->break_access()->TryLock();
2342   }
2343 
2344  private:
2345   Isolate* isolate_;
2346 };
2347 
2348 // Support for checking for stack-overflows.
2349 class StackLimitCheck {
2350  public:
StackLimitCheck(Isolate * isolate)2351   explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) {}
2352 
2353   // Use this to check for stack-overflows in C++ code.
HasOverflowed()2354   bool HasOverflowed() const {
2355     StackGuard* stack_guard = isolate_->stack_guard();
2356     return GetCurrentStackPosition() < stack_guard->real_climit();
2357   }
2358   static bool HasOverflowed(LocalIsolate* local_isolate);
2359 
2360   // Use this to check for interrupt request in C++ code.
InterruptRequested()2361   bool InterruptRequested() {
2362     StackGuard* stack_guard = isolate_->stack_guard();
2363     return GetCurrentStackPosition() < stack_guard->climit();
2364   }
2365 
2366   // Use this to check for stack-overflow when entering runtime from JS code.
2367   bool JsHasOverflowed(uintptr_t gap = 0) const;
2368 
2369  private:
2370   Isolate* isolate_;
2371 };
2372 
2373 // This macro may be used in context that disallows JS execution.
2374 // That is why it checks only for a stack overflow and termination.
2375 #define STACK_CHECK(isolate, result_value)                   \
2376   do {                                                       \
2377     StackLimitCheck stack_check(isolate);                    \
2378     if (stack_check.InterruptRequested()) {                  \
2379       if (stack_check.HasOverflowed()) {                     \
2380         isolate->StackOverflow();                            \
2381         return result_value;                                 \
2382       }                                                      \
2383       if (isolate->stack_guard()->HasTerminationRequest()) { \
2384         isolate->TerminateExecution();                       \
2385         return result_value;                                 \
2386       }                                                      \
2387     }                                                        \
2388   } while (false)
2389 
2390 class StackTraceFailureMessage {
2391  public:
2392   explicit StackTraceFailureMessage(Isolate* isolate, void* ptr1 = nullptr,
2393                                     void* ptr2 = nullptr, void* ptr3 = nullptr,
2394                                     void* ptr4 = nullptr);
2395 
2396   V8_NOINLINE void Print() volatile;
2397 
2398   static const uintptr_t kStartMarker = 0xdecade30;
2399   static const uintptr_t kEndMarker = 0xdecade31;
2400   static const int kStacktraceBufferSize = 32 * KB;
2401 
2402   uintptr_t start_marker_ = kStartMarker;
2403   void* isolate_;
2404   void* ptr1_;
2405   void* ptr2_;
2406   void* ptr3_;
2407   void* ptr4_;
2408   void* code_objects_[4];
2409   char js_stack_trace_[kStacktraceBufferSize];
2410   uintptr_t end_marker_ = kEndMarker;
2411 };
2412 
2413 template <base::MutexSharedType kIsShared>
2414 class V8_NODISCARD SharedMutexGuardIfOffThread<Isolate, kIsShared> final {
2415  public:
SharedMutexGuardIfOffThread(base::SharedMutex * mutex,Isolate * isolate)2416   SharedMutexGuardIfOffThread(base::SharedMutex* mutex, Isolate* isolate) {
2417     DCHECK_NOT_NULL(mutex);
2418     DCHECK_NOT_NULL(isolate);
2419     DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
2420   }
2421 
2422   SharedMutexGuardIfOffThread(const SharedMutexGuardIfOffThread&) = delete;
2423   SharedMutexGuardIfOffThread& operator=(const SharedMutexGuardIfOffThread&) =
2424       delete;
2425 };
2426 
2427 }  // namespace internal
2428 }  // namespace v8
2429 
2430 #endif  // V8_EXECUTION_ISOLATE_H_
2431