1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/handles/handles.h"
6
7 #include "src/api/api.h"
8 #include "src/base/logging.h"
9 #include "src/codegen/optimized-compilation-info.h"
10 #include "src/execution/isolate.h"
11 #include "src/execution/thread-id.h"
12 #include "src/handles/maybe-handles.h"
13 #include "src/objects/objects-inl.h"
14 #include "src/roots/roots-inl.h"
15 #include "src/utils/address-map.h"
16 #include "src/utils/identity-map.h"
17
18 #ifdef DEBUG
19 // For GetIsolateFromWritableHeapObject.
20 #include "src/heap/heap-write-barrier-inl.h"
21 #endif
22
23 namespace v8 {
24 namespace internal {
25
26 // Handles should be trivially copyable so that they can be efficiently passed
27 // by value. If they are not trivially copyable, they cannot be passed in
28 // registers.
29 ASSERT_TRIVIALLY_COPYABLE(HandleBase);
30 ASSERT_TRIVIALLY_COPYABLE(Handle<Object>);
31 ASSERT_TRIVIALLY_COPYABLE(MaybeHandle<Object>);
32
33 #ifdef DEBUG
IsDereferenceAllowed() const34 bool HandleBase::IsDereferenceAllowed() const {
35 DCHECK_NOT_NULL(location_);
36 Object object(*location_);
37 if (object.IsSmi()) return true;
38 HeapObject heap_object = HeapObject::cast(object);
39 if (IsReadOnlyHeapObject(heap_object)) return true;
40 Isolate* isolate = GetIsolateFromWritableObject(heap_object);
41 RootIndex root_index;
42 if (isolate->roots_table().IsRootHandleLocation(location_, &root_index) &&
43 RootsTable::IsImmortalImmovable(root_index)) {
44 return true;
45 }
46 if (isolate->IsBuiltinTableHandleLocation(location_)) return true;
47 if (!AllowHandleDereference::IsAllowed()) return false;
48
49 LocalHeap* local_heap = isolate->CurrentLocalHeap();
50
51 // Local heap can't access handles when parked
52 if (!local_heap->IsHandleDereferenceAllowed()) {
53 StdoutStream{} << "Cannot dereference handle owned by "
54 << "non-running local heap\n";
55 return false;
56 }
57
58 // We are pretty strict with handle dereferences on background threads: A
59 // background local heap is only allowed to dereference its own local or
60 // persistent handles.
61 if (!local_heap->is_main_thread()) {
62 // The current thread owns the handle and thus can dereference it.
63 return local_heap->ContainsPersistentHandle(location_) ||
64 local_heap->ContainsLocalHandle(location_);
65 }
66 // If LocalHeap::Current() is null, we're on the main thread -- if we were to
67 // check main thread HandleScopes here, we should additionally check the
68 // main-thread LocalHeap.
69 DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
70
71 // TODO(leszeks): Check if the main thread owns this handle.
72 return true;
73 }
74 #endif
75
NumberOfHandles(Isolate * isolate)76 int HandleScope::NumberOfHandles(Isolate* isolate) {
77 HandleScopeImplementer* impl = isolate->handle_scope_implementer();
78 int n = static_cast<int>(impl->blocks()->size());
79 if (n == 0) return 0;
80 return ((n - 1) * kHandleBlockSize) +
81 static_cast<int>(
82 (isolate->handle_scope_data()->next - impl->blocks()->back()));
83 }
84
Extend(Isolate * isolate)85 Address* HandleScope::Extend(Isolate* isolate) {
86 HandleScopeData* current = isolate->handle_scope_data();
87
88 Address* result = current->next;
89
90 DCHECK(result == current->limit);
91 // Make sure there's at least one scope on the stack and that the
92 // top of the scope stack isn't a barrier.
93 if (!Utils::ApiCheck(current->level != current->sealed_level,
94 "v8::HandleScope::CreateHandle()",
95 "Cannot create a handle without a HandleScope")) {
96 return nullptr;
97 }
98 HandleScopeImplementer* impl = isolate->handle_scope_implementer();
99 // If there's more room in the last block, we use that. This is used
100 // for fast creation of scopes after scope barriers.
101 if (!impl->blocks()->empty()) {
102 Address* limit = &impl->blocks()->back()[kHandleBlockSize];
103 if (current->limit != limit) {
104 current->limit = limit;
105 DCHECK_LT(limit - current->next, kHandleBlockSize);
106 }
107 }
108
109 // If we still haven't found a slot for the handle, we extend the
110 // current handle scope by allocating a new handle block.
111 if (result == current->limit) {
112 // If there's a spare block, use it for growing the current scope.
113 result = impl->GetSpareOrNewBlock();
114 // Add the extension to the global list of blocks, but count the
115 // extension as part of the current scope.
116 impl->blocks()->push_back(result);
117 current->limit = &result[kHandleBlockSize];
118 }
119
120 return result;
121 }
122
DeleteExtensions(Isolate * isolate)123 void HandleScope::DeleteExtensions(Isolate* isolate) {
124 HandleScopeData* current = isolate->handle_scope_data();
125 isolate->handle_scope_implementer()->DeleteExtensions(current->limit);
126 }
127
128 #ifdef ENABLE_HANDLE_ZAPPING
ZapRange(Address * start,Address * end)129 void HandleScope::ZapRange(Address* start, Address* end) {
130 DCHECK_LE(end - start, kHandleBlockSize);
131 for (Address* p = start; p != end; p++) {
132 *p = static_cast<Address>(kHandleZapValue);
133 }
134 }
135 #endif
136
current_level_address(Isolate * isolate)137 Address HandleScope::current_level_address(Isolate* isolate) {
138 return reinterpret_cast<Address>(&isolate->handle_scope_data()->level);
139 }
140
current_next_address(Isolate * isolate)141 Address HandleScope::current_next_address(Isolate* isolate) {
142 return reinterpret_cast<Address>(&isolate->handle_scope_data()->next);
143 }
144
current_limit_address(Isolate * isolate)145 Address HandleScope::current_limit_address(Isolate* isolate) {
146 return reinterpret_cast<Address>(&isolate->handle_scope_data()->limit);
147 }
148
CanonicalHandleScope(Isolate * isolate,OptimizedCompilationInfo * info)149 CanonicalHandleScope::CanonicalHandleScope(Isolate* isolate,
150 OptimizedCompilationInfo* info)
151 : isolate_(isolate),
152 info_(info),
153 zone_(info ? info->zone() : new Zone(isolate->allocator(), ZONE_NAME)) {
154 HandleScopeData* handle_scope_data = isolate_->handle_scope_data();
155 prev_canonical_scope_ = handle_scope_data->canonical_scope;
156 handle_scope_data->canonical_scope = this;
157 root_index_map_ = new RootIndexMap(isolate);
158 identity_map_ = std::make_unique<CanonicalHandlesMap>(
159 isolate->heap(), ZoneAllocationPolicy(zone_));
160 canonical_level_ = handle_scope_data->level;
161 }
162
~CanonicalHandleScope()163 CanonicalHandleScope::~CanonicalHandleScope() {
164 delete root_index_map_;
165 if (info_) {
166 // If we passed a compilation info as parameter, we created the identity map
167 // on its zone(). Then, we pass it to the compilation info which is
168 // responsible for the disposal.
169 info_->set_canonical_handles(DetachCanonicalHandles());
170 } else {
171 // If we don't have a compilation info, we created the zone manually. To
172 // properly dispose of said zone, we need to first free the identity_map_.
173 // Then we do so manually even though identity_map_ is a unique_ptr.
174 identity_map_.reset();
175 delete zone_;
176 }
177 isolate_->handle_scope_data()->canonical_scope = prev_canonical_scope_;
178 }
179
Lookup(Address object)180 Address* CanonicalHandleScope::Lookup(Address object) {
181 DCHECK_LE(canonical_level_, isolate_->handle_scope_data()->level);
182 if (isolate_->handle_scope_data()->level != canonical_level_) {
183 // We are in an inner handle scope. Do not canonicalize since we will leave
184 // this handle scope while still being in the canonical scope.
185 return HandleScope::CreateHandle(isolate_, object);
186 }
187 if (Internals::HasHeapObjectTag(object)) {
188 RootIndex root_index;
189 if (root_index_map_->Lookup(object, &root_index)) {
190 return isolate_->root_handle(root_index).location();
191 }
192 }
193 auto find_result = identity_map_->FindOrInsert(Object(object));
194 if (!find_result.already_exists) {
195 // Allocate new handle location.
196 *find_result.entry = HandleScope::CreateHandle(isolate_, object);
197 }
198 return *find_result.entry;
199 }
200
201 std::unique_ptr<CanonicalHandlesMap>
DetachCanonicalHandles()202 CanonicalHandleScope::DetachCanonicalHandles() {
203 return std::move(identity_map_);
204 }
205
206 } // namespace internal
207 } // namespace v8
208