1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/wasm/wasm-memory.h"
6 #include "src/objects-inl.h"
7 #include "src/wasm/wasm-engine.h"
8 #include "src/wasm/wasm-limits.h"
9 #include "src/wasm/wasm-module.h"
10 
11 namespace v8 {
12 namespace internal {
13 namespace wasm {
14 
15 namespace {
16 
TryAllocateBackingStore(WasmMemoryTracker * memory_tracker,Heap * heap,size_t size,bool require_full_guard_regions,void ** allocation_base,size_t * allocation_length)17 void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
18                               size_t size, bool require_full_guard_regions,
19                               void** allocation_base,
20                               size_t* allocation_length) {
21   using AllocationStatus = WasmMemoryTracker::AllocationStatus;
22 #if V8_TARGET_ARCH_32_BIT
23   DCHECK(!require_full_guard_regions);
24 #endif
25   // We always allocate the largest possible offset into the heap, so the
26   // addressable memory after the guard page can be made inaccessible.
27   *allocation_length =
28       require_full_guard_regions
29           ? RoundUp(kWasmMaxHeapOffset, CommitPageSize())
30           : RoundUp(
31                 base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size)),
32                 kWasmPageSize);
33   DCHECK_GE(*allocation_length, size);
34   DCHECK_GE(*allocation_length, kWasmPageSize);
35 
36   // Let the WasmMemoryTracker know we are going to reserve a bunch of
37   // address space.
38   // Try up to three times; getting rid of dead JSArrayBuffer allocations might
39   // require two GCs.
40   // TODO(gc): Fix this to only require one GC (crbug.com/v8/7621).
41   bool did_retry = false;
42   for (int trial = 0;; ++trial) {
43     if (memory_tracker->ReserveAddressSpace(*allocation_length)) break;
44     // Collect garbage and retry.
45     heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
46     did_retry = true;
47     // After first and second GC: retry.
48     if (trial < 2) continue;
49     // We are over the address space limit. Fail.
50     //
51     // When running under the correctness fuzzer (i.e.
52     // --abort-on-stack-or-string-length-overflow is preset), we crash instead
53     // so it is not incorrectly reported as a correctness violation. See
54     // https://crbug.com/828293#c4
55     if (FLAG_abort_on_stack_or_string_length_overflow) {
56       FATAL("could not allocate wasm memory");
57     }
58     memory_tracker->AddAllocationStatusSample(
59         AllocationStatus::kAddressSpaceLimitReachedFailure);
60     return nullptr;
61   }
62 
63   // The Reserve makes the whole region inaccessible by default.
64   *allocation_base = AllocatePages(nullptr, *allocation_length, kWasmPageSize,
65                                    PageAllocator::kNoAccess);
66   if (*allocation_base == nullptr) {
67     memory_tracker->ReleaseReservation(*allocation_length);
68     memory_tracker->AddAllocationStatusSample(AllocationStatus::kOtherFailure);
69     return nullptr;
70   }
71   void* memory = *allocation_base;
72 
73   // Make the part we care about accessible.
74   if (size > 0) {
75     bool result = SetPermissions(memory, RoundUp(size, kWasmPageSize),
76                                  PageAllocator::kReadWrite);
77     // SetPermissions commits the extra memory, which may put us over the
78     // process memory limit. If so, report this as an OOM.
79     if (!result) {
80       V8::FatalProcessOutOfMemory(nullptr, "TryAllocateBackingStore");
81     }
82   }
83 
84   memory_tracker->RegisterAllocation(*allocation_base, *allocation_length,
85                                      memory, size);
86   memory_tracker->AddAllocationStatusSample(
87       did_retry ? AllocationStatus::kSuccessAfterRetry
88                 : AllocationStatus::kSuccess);
89   return memory;
90 }
91 }  // namespace
92 
~WasmMemoryTracker()93 WasmMemoryTracker::~WasmMemoryTracker() {
94   if (empty_backing_store_.allocation_base != nullptr) {
95     CHECK(FreePages(empty_backing_store_.allocation_base,
96                     empty_backing_store_.allocation_length));
97     InternalReleaseAllocation(empty_backing_store_.buffer_start);
98   }
99 
100   // All reserved address space should be released before the allocation tracker
101   // is destroyed.
102   DCHECK_EQ(reserved_address_space_, 0u);
103   DCHECK_EQ(allocated_address_space_, 0u);
104 }
105 
ReserveAddressSpace(size_t num_bytes)106 bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) {
107 // Address space reservations are currently only meaningful using guard
108 // regions, which is currently only supported on 64-bit systems. On other
109 // platforms, we always fall back on bounds checks.
110 #if V8_TARGET_ARCH_64_BIT
111   constexpr size_t kAddressSpaceLimit = 0x10000000000L;  // 1 TiB
112 #else
113   constexpr size_t kAddressSpaceLimit = 0x80000000;  // 2 GiB
114 #endif
115 
116   size_t const old_count = reserved_address_space_.fetch_add(num_bytes);
117   DCHECK_GE(old_count + num_bytes, old_count);
118   if (old_count + num_bytes <= kAddressSpaceLimit) {
119     return true;
120   }
121   reserved_address_space_ -= num_bytes;
122   return false;
123 }
124 
ReleaseReservation(size_t num_bytes)125 void WasmMemoryTracker::ReleaseReservation(size_t num_bytes) {
126   size_t const old_reserved = reserved_address_space_.fetch_sub(num_bytes);
127   USE(old_reserved);
128   DCHECK_LE(num_bytes, old_reserved);
129 }
130 
RegisterAllocation(void * allocation_base,size_t allocation_length,void * buffer_start,size_t buffer_length)131 void WasmMemoryTracker::RegisterAllocation(void* allocation_base,
132                                            size_t allocation_length,
133                                            void* buffer_start,
134                                            size_t buffer_length) {
135   base::LockGuard<base::Mutex> scope_lock(&mutex_);
136 
137   allocated_address_space_ += allocation_length;
138   AddAddressSpaceSample();
139 
140   allocations_.emplace(buffer_start,
141                        AllocationData{allocation_base, allocation_length,
142                                       buffer_start, buffer_length});
143 }
144 
ReleaseAllocation(const void * buffer_start)145 WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation(
146     const void* buffer_start) {
147   if (IsEmptyBackingStore(buffer_start)) {
148     return AllocationData();
149   }
150   return InternalReleaseAllocation(buffer_start);
151 }
152 
InternalReleaseAllocation(const void * buffer_start)153 WasmMemoryTracker::AllocationData WasmMemoryTracker::InternalReleaseAllocation(
154     const void* buffer_start) {
155   base::LockGuard<base::Mutex> scope_lock(&mutex_);
156 
157   auto find_result = allocations_.find(buffer_start);
158   CHECK_NE(find_result, allocations_.end());
159 
160   if (find_result != allocations_.end()) {
161     size_t num_bytes = find_result->second.allocation_length;
162     DCHECK_LE(num_bytes, reserved_address_space_);
163     DCHECK_LE(num_bytes, allocated_address_space_);
164     reserved_address_space_ -= num_bytes;
165     allocated_address_space_ -= num_bytes;
166     AddAddressSpaceSample();
167 
168     AllocationData allocation_data = find_result->second;
169     allocations_.erase(find_result);
170     return allocation_data;
171   }
172   UNREACHABLE();
173 }
174 
FindAllocationData(const void * buffer_start)175 const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData(
176     const void* buffer_start) {
177   base::LockGuard<base::Mutex> scope_lock(&mutex_);
178   const auto& result = allocations_.find(buffer_start);
179   if (result != allocations_.end()) {
180     return &result->second;
181   }
182   return nullptr;
183 }
184 
IsWasmMemory(const void * buffer_start)185 bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) {
186   base::LockGuard<base::Mutex> scope_lock(&mutex_);
187   return allocations_.find(buffer_start) != allocations_.end();
188 }
189 
GetEmptyBackingStore(void ** allocation_base,size_t * allocation_length,Heap * heap)190 void* WasmMemoryTracker::GetEmptyBackingStore(void** allocation_base,
191                                               size_t* allocation_length,
192                                               Heap* heap) {
193   if (empty_backing_store_.allocation_base == nullptr) {
194     constexpr size_t buffer_length = 0;
195     const bool require_full_guard_regions =
196         trap_handler::IsTrapHandlerEnabled();
197     void* local_allocation_base;
198     size_t local_allocation_length;
199     void* buffer_start = TryAllocateBackingStore(
200         this, heap, buffer_length, require_full_guard_regions,
201         &local_allocation_base, &local_allocation_length);
202 
203     empty_backing_store_ =
204         AllocationData(local_allocation_base, local_allocation_length,
205                        buffer_start, buffer_length);
206   }
207   *allocation_base = empty_backing_store_.allocation_base;
208   *allocation_length = empty_backing_store_.allocation_length;
209   return empty_backing_store_.buffer_start;
210 }
211 
IsEmptyBackingStore(const void * buffer_start) const212 bool WasmMemoryTracker::IsEmptyBackingStore(const void* buffer_start) const {
213   return buffer_start == empty_backing_store_.buffer_start;
214 }
215 
FreeMemoryIfIsWasmMemory(const void * buffer_start)216 bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(const void* buffer_start) {
217   if (IsEmptyBackingStore(buffer_start)) {
218     // We don't need to do anything for the empty backing store, because this
219     // will be freed when WasmMemoryTracker shuts down. Return true so callers
220     // will not try to free the buffer on their own.
221     return true;
222   }
223   if (IsWasmMemory(buffer_start)) {
224     const AllocationData allocation = ReleaseAllocation(buffer_start);
225     CHECK(FreePages(allocation.allocation_base, allocation.allocation_length));
226     return true;
227   }
228   return false;
229 }
230 
AddAllocationStatusSample(AllocationStatus status)231 void WasmMemoryTracker::AddAllocationStatusSample(AllocationStatus status) {
232   if (allocation_result_) {
233     allocation_result_->AddSample(static_cast<int>(status));
234   }
235 }
236 
AddAddressSpaceSample()237 void WasmMemoryTracker::AddAddressSpaceSample() {
238   if (address_space_usage_mb_) {
239     // Report address space usage in MiB so the full range fits in an int on all
240     // platforms.
241     address_space_usage_mb_->AddSample(
242         static_cast<int>(allocated_address_space_ >> 20));
243   }
244 }
245 
SetupArrayBuffer(Isolate * isolate,void * backing_store,size_t size,bool is_external,SharedFlag shared)246 Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
247                                        size_t size, bool is_external,
248                                        SharedFlag shared) {
249   Handle<JSArrayBuffer> buffer =
250       isolate->factory()->NewJSArrayBuffer(shared, TENURED);
251   DCHECK_GE(kMaxInt, size);
252   if (shared == SharedFlag::kShared) DCHECK(FLAG_experimental_wasm_threads);
253   constexpr bool is_wasm_memory = true;
254   JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store,
255                        static_cast<int>(size), shared, is_wasm_memory);
256   buffer->set_is_neuterable(false);
257   buffer->set_is_growable(true);
258   return buffer;
259 }
260 
NewArrayBuffer(Isolate * isolate,size_t size,SharedFlag shared)261 MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
262                                           SharedFlag shared) {
263   // Check against kMaxInt, since the byte length is stored as int in the
264   // JSArrayBuffer. Note that wasm_max_mem_pages can be raised from the command
265   // line, and we don't want to fail a CHECK then.
266   if (size > FLAG_wasm_max_mem_pages * kWasmPageSize || size > kMaxInt) {
267     // TODO(titzer): lift restriction on maximum memory allocated here.
268     return {};
269   }
270 
271   WasmMemoryTracker* memory_tracker = isolate->wasm_engine()->memory_tracker();
272 
273   // Set by TryAllocateBackingStore or GetEmptyBackingStore
274   void* allocation_base = nullptr;
275   size_t allocation_length = 0;
276 
277   void* memory;
278   if (size == 0) {
279     memory = memory_tracker->GetEmptyBackingStore(
280         &allocation_base, &allocation_length, isolate->heap());
281   } else {
282 #if V8_TARGET_ARCH_64_BIT
283     bool require_full_guard_regions = true;
284 #else
285     bool require_full_guard_regions = false;
286 #endif
287     memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
288                                      require_full_guard_regions,
289                                      &allocation_base, &allocation_length);
290     if (memory == nullptr && !trap_handler::IsTrapHandlerEnabled()) {
291       // If we failed to allocate with full guard regions, fall back on
292       // mini-guards.
293       require_full_guard_regions = false;
294       memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
295                                        require_full_guard_regions,
296                                        &allocation_base, &allocation_length);
297     }
298   }
299   if (memory == nullptr) {
300     return {};
301   }
302 
303 #if DEBUG
304   // Double check the API allocator actually zero-initialized the memory.
305   const byte* bytes = reinterpret_cast<const byte*>(memory);
306   for (size_t i = 0; i < size; ++i) {
307     DCHECK_EQ(0, bytes[i]);
308   }
309 #endif
310 
311   reinterpret_cast<v8::Isolate*>(isolate)
312       ->AdjustAmountOfExternalAllocatedMemory(size);
313 
314   constexpr bool is_external = false;
315   return SetupArrayBuffer(isolate, memory, size, is_external, shared);
316 }
317 
DetachMemoryBuffer(Isolate * isolate,Handle<JSArrayBuffer> buffer,bool free_memory)318 void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
319                         bool free_memory) {
320   if (buffer->is_shared()) return;  // Detaching shared buffers is impossible.
321   DCHECK(!buffer->is_neuterable());
322 
323   const bool is_external = buffer->is_external();
324   DCHECK(!buffer->is_neuterable());
325   if (!is_external) {
326     buffer->set_is_external(true);
327     isolate->heap()->UnregisterArrayBuffer(*buffer);
328     if (free_memory) {
329       // We need to free the memory before neutering the buffer because
330       // FreeBackingStore reads buffer->allocation_base(), which is nulled out
331       // by Neuter. This means there is a dangling pointer until we neuter the
332       // buffer. Since there is no way for the user to directly call
333       // FreeBackingStore, we can ensure this is safe.
334       buffer->FreeBackingStoreFromMainThread();
335     }
336   }
337 
338   DCHECK(buffer->is_external());
339   buffer->set_is_wasm_memory(false);
340   buffer->set_is_neuterable(true);
341   buffer->Neuter();
342 }
343 
344 }  // namespace wasm
345 }  // namespace internal
346 }  // namespace v8
347