1 // Copyright 2019 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/objects/backing-store.h"
6 
7 #include <cstring>
8 
9 #include "src/execution/isolate.h"
10 #include "src/handles/global-handles.h"
11 #include "src/logging/counters.h"
12 #include "src/wasm/wasm-constants.h"
13 #include "src/wasm/wasm-engine.h"
14 #include "src/wasm/wasm-limits.h"
15 #include "src/wasm/wasm-objects-inl.h"
16 
17 #define TRACE_BS(...)                                  \
18   do {                                                 \
19     if (FLAG_trace_backing_store) PrintF(__VA_ARGS__); \
20   } while (false)
21 
22 namespace v8 {
23 namespace internal {
24 
25 namespace {
26 #if V8_TARGET_ARCH_64_BIT
27 constexpr bool kUseGuardRegions = true;
28 #else
29 constexpr bool kUseGuardRegions = false;
30 #endif
31 
32 #if V8_TARGET_ARCH_MIPS64
33 // MIPS64 has a user space of 2^40 bytes on most processors,
34 // address space limits needs to be smaller.
35 constexpr size_t kAddressSpaceLimit = 0x8000000000L;  // 512 GiB
36 #elif V8_TARGET_ARCH_64_BIT
37 constexpr size_t kAddressSpaceLimit = 0x10100000000L;  // 1 TiB + 4 GiB
38 #else
39 constexpr size_t kAddressSpaceLimit = 0xC0000000;  // 3 GiB
40 #endif
41 
42 constexpr uint64_t kOneGiB = 1024 * 1024 * 1024;
43 constexpr uint64_t kNegativeGuardSize = 2 * kOneGiB;
44 
45 #if V8_TARGET_ARCH_64_BIT
46 constexpr uint64_t kFullGuardSize = 10 * kOneGiB;
47 #endif
48 
49 std::atomic<uint64_t> reserved_address_space_{0};
50 
51 // Allocation results are reported to UMA
52 //
53 // See wasm_memory_allocation_result in counters.h
54 enum class AllocationStatus {
55   kSuccess,  // Succeeded on the first try
56 
57   kSuccessAfterRetry,  // Succeeded after garbage collection
58 
59   kAddressSpaceLimitReachedFailure,  // Failed because Wasm is at its address
60                                      // space limit
61 
62   kOtherFailure  // Failed for an unknown reason
63 };
64 
65 #if V8_TARGET_ARCH_64_BIT
GetGuardedRegion(void * buffer_start,size_t byte_length)66 base::AddressRegion GetGuardedRegion(void* buffer_start, size_t byte_length) {
67   // Guard regions always look like this:
68   // |xxx(2GiB)xxx|.......(4GiB)..xxxxx|xxxxxx(4GiB)xxxxxx|
69   //              ^ buffer_start
70   //                              ^ byte_length
71   // ^ negative guard region           ^ positive guard region
72 
73   Address start = reinterpret_cast<Address>(buffer_start);
74   DCHECK_EQ(8, sizeof(size_t));  // only use on 64-bit
75   DCHECK_EQ(0, start % AllocatePageSize());
76   return base::AddressRegion(start - (2 * kOneGiB),
77                              static_cast<size_t>(kFullGuardSize));
78 }
79 #endif
80 
GetRegion(bool has_guard_regions,void * buffer_start,size_t byte_length,size_t byte_capacity)81 base::AddressRegion GetRegion(bool has_guard_regions, void* buffer_start,
82                               size_t byte_length, size_t byte_capacity) {
83 #if V8_TARGET_ARCH_64_BIT
84   if (has_guard_regions) return GetGuardedRegion(buffer_start, byte_length);
85 #else
86   DCHECK(!has_guard_regions);
87 #endif
88 
89   return base::AddressRegion(reinterpret_cast<Address>(buffer_start),
90                              byte_capacity);
91 }
92 
GetReservationSize(bool has_guard_regions,size_t byte_capacity)93 size_t GetReservationSize(bool has_guard_regions, size_t byte_capacity) {
94 #if V8_TARGET_ARCH_64_BIT
95   if (has_guard_regions) return kFullGuardSize;
96 #else
97   DCHECK(!has_guard_regions);
98 #endif
99 
100   return byte_capacity;
101 }
102 
RecordStatus(Isolate * isolate,AllocationStatus status)103 void RecordStatus(Isolate* isolate, AllocationStatus status) {
104   isolate->counters()->wasm_memory_allocation_result()->AddSample(
105       static_cast<int>(status));
106 }
107 
DebugCheckZero(void * start,size_t byte_length)108 inline void DebugCheckZero(void* start, size_t byte_length) {
109 #if DEBUG
110   // Double check memory is zero-initialized. Despite being DEBUG-only,
111   // this function is somewhat optimized for the benefit of test suite
112   // execution times (some tests allocate several gigabytes).
113   const byte* bytes = reinterpret_cast<const byte*>(start);
114   const size_t kBaseCase = 32;
115   for (size_t i = 0; i < kBaseCase && i < byte_length; i++) {
116     DCHECK_EQ(0, bytes[i]);
117   }
118   // Having checked the first kBaseCase bytes to be zero, we can now use
119   // {memcmp} to compare the range against itself shifted by that amount,
120   // thereby inductively checking the remaining bytes.
121   if (byte_length > kBaseCase) {
122     DCHECK_EQ(0, memcmp(bytes, bytes + kBaseCase, byte_length - kBaseCase));
123   }
124 #endif
125 }
126 }  // namespace
127 
ReserveAddressSpace(uint64_t num_bytes)128 bool BackingStore::ReserveAddressSpace(uint64_t num_bytes) {
129   uint64_t reservation_limit = kAddressSpaceLimit;
130   uint64_t old_count = reserved_address_space_.load(std::memory_order_relaxed);
131   while (true) {
132     if (old_count > reservation_limit) return false;
133     if (reservation_limit - old_count < num_bytes) return false;
134     if (reserved_address_space_.compare_exchange_weak(
135             old_count, old_count + num_bytes, std::memory_order_acq_rel)) {
136       return true;
137     }
138   }
139 }
140 
ReleaseReservation(uint64_t num_bytes)141 void BackingStore::ReleaseReservation(uint64_t num_bytes) {
142   uint64_t old_reserved = reserved_address_space_.fetch_sub(num_bytes);
143   USE(old_reserved);
144   DCHECK_LE(num_bytes, old_reserved);
145 }
146 
147 // The backing store for a Wasm shared memory remembers all the isolates
148 // with which it has been shared.
149 struct SharedWasmMemoryData {
150   std::vector<Isolate*> isolates_;
151 };
152 
Clear()153 void BackingStore::Clear() {
154   buffer_start_ = nullptr;
155   byte_length_ = 0;
156   has_guard_regions_ = false;
157   if (holds_shared_ptr_to_allocator_) {
158     type_specific_data_.v8_api_array_buffer_allocator_shared
159         .std::shared_ptr<v8::ArrayBuffer::Allocator>::~shared_ptr();
160     holds_shared_ptr_to_allocator_ = false;
161   }
162   type_specific_data_.v8_api_array_buffer_allocator = nullptr;
163 }
164 
~BackingStore()165 BackingStore::~BackingStore() {
166   GlobalBackingStoreRegistry::Unregister(this);
167 
168   if (buffer_start_ == nullptr) return;  // nothing to deallocate
169 
170   if (is_wasm_memory_) {
171     DCHECK(free_on_destruct_);
172     DCHECK(!custom_deleter_);
173     TRACE_BS("BSw:free  bs=%p mem=%p (length=%zu, capacity=%zu)\n", this,
174              buffer_start_, byte_length(), byte_capacity_);
175     if (is_shared_) {
176       // Deallocate the list of attached memory objects.
177       SharedWasmMemoryData* shared_data = get_shared_wasm_memory_data();
178       delete shared_data;
179       type_specific_data_.shared_wasm_memory_data = nullptr;
180     }
181 
182     // Wasm memories are always allocated through the page allocator.
183     auto region = GetRegion(has_guard_regions_, buffer_start_, byte_length_,
184                             byte_capacity_);
185 
186     bool pages_were_freed =
187         region.size() == 0 /* no need to free any pages */ ||
188         FreePages(GetPlatformPageAllocator(),
189                   reinterpret_cast<void*>(region.begin()), region.size());
190     CHECK(pages_were_freed);
191     BackingStore::ReleaseReservation(
192         GetReservationSize(has_guard_regions_, byte_capacity_));
193     Clear();
194     return;
195   }
196   if (custom_deleter_) {
197     DCHECK(free_on_destruct_);
198     TRACE_BS("BS:custome deleter bs=%p mem=%p (length=%zu, capacity=%zu)\n",
199              this, buffer_start_, byte_length(), byte_capacity_);
200     type_specific_data_.deleter.callback(buffer_start_, byte_length_,
201                                          type_specific_data_.deleter.data);
202     Clear();
203     return;
204   }
205   if (free_on_destruct_) {
206     // JSArrayBuffer backing store. Deallocate through the embedder's allocator.
207     auto allocator = get_v8_api_array_buffer_allocator();
208     TRACE_BS("BS:free   bs=%p mem=%p (length=%zu, capacity=%zu)\n", this,
209              buffer_start_, byte_length(), byte_capacity_);
210     allocator->Free(buffer_start_, byte_length_);
211   }
212   Clear();
213 }
214 
215 // Allocate a backing store using the array buffer allocator from the embedder.
Allocate(Isolate * isolate,size_t byte_length,SharedFlag shared,InitializedFlag initialized)216 std::unique_ptr<BackingStore> BackingStore::Allocate(
217     Isolate* isolate, size_t byte_length, SharedFlag shared,
218     InitializedFlag initialized) {
219   void* buffer_start = nullptr;
220   auto allocator = isolate->array_buffer_allocator();
221   CHECK_NOT_NULL(allocator);
222   if (byte_length != 0) {
223     auto counters = isolate->counters();
224     int mb_length = static_cast<int>(byte_length / MB);
225     if (mb_length > 0) {
226       counters->array_buffer_big_allocations()->AddSample(mb_length);
227     }
228     if (shared == SharedFlag::kShared) {
229       counters->shared_array_allocations()->AddSample(mb_length);
230     }
231     auto allocate_buffer = [allocator, initialized](size_t byte_length) {
232       if (initialized == InitializedFlag::kUninitialized) {
233         return allocator->AllocateUninitialized(byte_length);
234       }
235       void* buffer_start = allocator->Allocate(byte_length);
236       if (buffer_start) {
237         // TODO(wasm): node does not implement the zero-initialization API.
238         // Reenable this debug check when node does implement it properly.
239         constexpr bool
240             kDebugCheckZeroDisabledDueToNodeNotImplementingZeroInitAPI = true;
241         if ((!(kDebugCheckZeroDisabledDueToNodeNotImplementingZeroInitAPI)) &&
242             !FLAG_mock_arraybuffer_allocator) {
243           DebugCheckZero(buffer_start, byte_length);
244         }
245       }
246       return buffer_start;
247     };
248 
249     buffer_start = isolate->heap()->AllocateExternalBackingStore(
250         allocate_buffer, byte_length);
251 
252     if (buffer_start == nullptr) {
253       // Allocation failed.
254       counters->array_buffer_new_size_failures()->AddSample(mb_length);
255       return {};
256     }
257   }
258 
259   auto result = new BackingStore(buffer_start,  // start
260                                  byte_length,   // length
261                                  byte_length,   // capacity
262                                  shared,        // shared
263                                  false,         // is_wasm_memory
264                                  true,          // free_on_destruct
265                                  false,         // has_guard_regions
266                                  false,         // custom_deleter
267                                  false);        // empty_deleter
268 
269   TRACE_BS("BS:alloc  bs=%p mem=%p (length=%zu)\n", result,
270            result->buffer_start(), byte_length);
271   result->SetAllocatorFromIsolate(isolate);
272   return std::unique_ptr<BackingStore>(result);
273 }
274 
SetAllocatorFromIsolate(Isolate * isolate)275 void BackingStore::SetAllocatorFromIsolate(Isolate* isolate) {
276   if (auto allocator_shared = isolate->array_buffer_allocator_shared()) {
277     holds_shared_ptr_to_allocator_ = true;
278     new (&type_specific_data_.v8_api_array_buffer_allocator_shared)
279         std::shared_ptr<v8::ArrayBuffer::Allocator>(
280             std::move(allocator_shared));
281   } else {
282     type_specific_data_.v8_api_array_buffer_allocator =
283         isolate->array_buffer_allocator();
284   }
285 }
286 
287 // Allocate a backing store for a Wasm memory. Always use the page allocator
288 // and add guard regions.
TryAllocateWasmMemory(Isolate * isolate,size_t initial_pages,size_t maximum_pages,SharedFlag shared)289 std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
290     Isolate* isolate, size_t initial_pages, size_t maximum_pages,
291     SharedFlag shared) {
292   // Cannot reserve 0 pages on some OSes.
293   if (maximum_pages == 0) maximum_pages = 1;
294 
295   TRACE_BS("BSw:try   %zu pages, %zu max\n", initial_pages, maximum_pages);
296 
297   bool guards = kUseGuardRegions;
298 
299   // For accounting purposes, whether a GC was necessary.
300   bool did_retry = false;
301 
302   // A helper to try running a function up to 3 times, executing a GC
303   // if the first and second attempts failed.
304   auto gc_retry = [&](const std::function<bool()>& fn) {
305     for (int i = 0; i < 3; i++) {
306       if (fn()) return true;
307       // Collect garbage and retry.
308       did_retry = true;
309       // TODO(wasm): try Heap::EagerlyFreeExternalMemory() first?
310       isolate->heap()->MemoryPressureNotification(
311           MemoryPressureLevel::kCritical, true);
312     }
313     return false;
314   };
315 
316   // Compute size of reserved memory.
317 
318   size_t engine_max_pages = wasm::max_maximum_mem_pages();
319   maximum_pages = std::min(engine_max_pages, maximum_pages);
320   CHECK_LE(maximum_pages,
321            std::numeric_limits<size_t>::max() / wasm::kWasmPageSize);
322   size_t byte_capacity = maximum_pages * wasm::kWasmPageSize;
323   size_t reservation_size = GetReservationSize(guards, byte_capacity);
324 
325   //--------------------------------------------------------------------------
326   // 1. Enforce maximum address space reservation per engine.
327   //--------------------------------------------------------------------------
328   auto reserve_memory_space = [&] {
329     return BackingStore::ReserveAddressSpace(reservation_size);
330   };
331 
332   if (!gc_retry(reserve_memory_space)) {
333     // Crash on out-of-memory if the correctness fuzzer is running.
334     if (FLAG_correctness_fuzzer_suppressions) {
335       FATAL("could not allocate wasm memory backing store");
336     }
337     RecordStatus(isolate, AllocationStatus::kAddressSpaceLimitReachedFailure);
338     TRACE_BS("BSw:try   failed to reserve address space\n");
339     return {};
340   }
341 
342   //--------------------------------------------------------------------------
343   // 2. Allocate pages (inaccessible by default).
344   //--------------------------------------------------------------------------
345   void* allocation_base = nullptr;
346   auto allocate_pages = [&] {
347     allocation_base =
348         AllocatePages(GetPlatformPageAllocator(), nullptr, reservation_size,
349                       wasm::kWasmPageSize, PageAllocator::kNoAccess);
350     return allocation_base != nullptr;
351   };
352   if (!gc_retry(allocate_pages)) {
353     // Page allocator could not reserve enough pages.
354     BackingStore::ReleaseReservation(reservation_size);
355     RecordStatus(isolate, AllocationStatus::kOtherFailure);
356     TRACE_BS("BSw:try   failed to allocate pages\n");
357     return {};
358   }
359 
360   // Get a pointer to the start of the buffer, skipping negative guard region
361   // if necessary.
362   byte* buffer_start = reinterpret_cast<byte*>(allocation_base) +
363                        (guards ? kNegativeGuardSize : 0);
364 
365   //--------------------------------------------------------------------------
366   // 3. Commit the initial pages (allow read/write).
367   //--------------------------------------------------------------------------
368   size_t byte_length = initial_pages * wasm::kWasmPageSize;
369   auto commit_memory = [&] {
370     return byte_length == 0 ||
371            SetPermissions(GetPlatformPageAllocator(), buffer_start, byte_length,
372                           PageAllocator::kReadWrite);
373   };
374   if (!gc_retry(commit_memory)) {
375     // SetPermissions put us over the process memory limit.
376     V8::FatalProcessOutOfMemory(nullptr, "BackingStore::AllocateWasmMemory()");
377     TRACE_BS("BSw:try   failed to set permissions\n");
378   }
379 
380   DebugCheckZero(buffer_start, byte_length);  // touch the bytes.
381 
382   RecordStatus(isolate, did_retry ? AllocationStatus::kSuccessAfterRetry
383                                   : AllocationStatus::kSuccess);
384 
385   auto result = new BackingStore(buffer_start,   // start
386                                  byte_length,    // length
387                                  byte_capacity,  // capacity
388                                  shared,         // shared
389                                  true,           // is_wasm_memory
390                                  true,           // free_on_destruct
391                                  guards,         // has_guard_regions
392                                  false,          // custom_deleter
393                                  false);         // empty_deleter
394 
395   TRACE_BS("BSw:alloc bs=%p mem=%p (length=%zu, capacity=%zu)\n", result,
396            result->buffer_start(), byte_length, byte_capacity);
397 
398   // Shared Wasm memories need an anchor for the memory object list.
399   if (shared == SharedFlag::kShared) {
400     result->type_specific_data_.shared_wasm_memory_data =
401         new SharedWasmMemoryData();
402   }
403 
404   return std::unique_ptr<BackingStore>(result);
405 }
406 
407 // Allocate a backing store for a Wasm memory. Always use the page allocator
408 // and add guard regions.
AllocateWasmMemory(Isolate * isolate,size_t initial_pages,size_t maximum_pages,SharedFlag shared)409 std::unique_ptr<BackingStore> BackingStore::AllocateWasmMemory(
410     Isolate* isolate, size_t initial_pages, size_t maximum_pages,
411     SharedFlag shared) {
412   // Wasm pages must be a multiple of the allocation page size.
413   DCHECK_EQ(0, wasm::kWasmPageSize % AllocatePageSize());
414 
415   // Enforce engine limitation on the maximum number of pages.
416   if (initial_pages > wasm::kV8MaxWasmMemoryPages) return nullptr;
417 
418   // Trying to allocate 4 GiB on a 32-bit platform is guaranteed to fail.
419   // We don't lower the official max_maximum_mem_pages() limit because that
420   // would be observable upon instantiation; this way the effective limit
421   // on 32-bit platforms is defined by the allocator.
422   constexpr size_t kPlatformMax =
423       std::numeric_limits<size_t>::max() / wasm::kWasmPageSize;
424   if (initial_pages > kPlatformMax) return nullptr;
425 
426   auto backing_store =
427       TryAllocateWasmMemory(isolate, initial_pages, maximum_pages, shared);
428   if (!backing_store && maximum_pages > initial_pages) {
429     // If reserving {maximum_pages} failed, try with maximum = initial.
430     backing_store =
431         TryAllocateWasmMemory(isolate, initial_pages, initial_pages, shared);
432   }
433   return backing_store;
434 }
435 
CopyWasmMemory(Isolate * isolate,size_t new_pages)436 std::unique_ptr<BackingStore> BackingStore::CopyWasmMemory(Isolate* isolate,
437                                                            size_t new_pages) {
438   // Note that we could allocate uninitialized to save initialization cost here,
439   // but since Wasm memories are allocated by the page allocator, the zeroing
440   // cost is already built-in.
441   // TODO(titzer): should we use a suitable maximum here?
442   auto new_backing_store = BackingStore::AllocateWasmMemory(
443       isolate, new_pages, new_pages,
444       is_shared() ? SharedFlag::kShared : SharedFlag::kNotShared);
445 
446   if (!new_backing_store ||
447       new_backing_store->has_guard_regions() != has_guard_regions_) {
448     return {};
449   }
450 
451   if (byte_length_ > 0) {
452     // If the allocation was successful, then the new buffer must be at least
453     // as big as the old one.
454     DCHECK_GE(new_pages * wasm::kWasmPageSize, byte_length_);
455     memcpy(new_backing_store->buffer_start(), buffer_start_, byte_length_);
456   }
457 
458   return new_backing_store;
459 }
460 
461 // Try to grow the size of a wasm memory in place, without realloc + copy.
GrowWasmMemoryInPlace(Isolate * isolate,size_t delta_pages,size_t max_pages)462 bool BackingStore::GrowWasmMemoryInPlace(Isolate* isolate, size_t delta_pages,
463                                          size_t max_pages) {
464   DCHECK(is_wasm_memory_);
465   max_pages = std::min(max_pages, byte_capacity_ / wasm::kWasmPageSize);
466 
467   if (delta_pages == 0) return true;          // degenerate grow.
468   if (delta_pages > max_pages) return false;  // would never work.
469 
470   // Do a compare-exchange loop, because we also need to adjust page
471   // permissions. Note that multiple racing grows both try to set page
472   // permissions for the entire range (to be RW), so the operating system
473   // should deal with that raciness. We know we succeeded when we can
474   // compare/swap the old length with the new length.
475   size_t old_length = byte_length_.load(std::memory_order_relaxed);
476   size_t new_length = 0;
477   while (true) {
478     size_t current_pages = old_length / wasm::kWasmPageSize;
479 
480     // Check if we have exceed the supplied maximum.
481     if (current_pages > (max_pages - delta_pages)) return false;
482 
483     new_length = (current_pages + delta_pages) * wasm::kWasmPageSize;
484 
485     // Try to adjust the permissions on the memory.
486     if (!i::SetPermissions(GetPlatformPageAllocator(), buffer_start_,
487                            new_length, PageAllocator::kReadWrite)) {
488       return false;
489     }
490     if (byte_length_.compare_exchange_weak(old_length, new_length,
491                                            std::memory_order_acq_rel)) {
492       // Successfully updated both the length and permissions.
493       break;
494     }
495   }
496 
497   if (!is_shared_ && free_on_destruct_) {
498     // Only do per-isolate accounting for non-shared backing stores.
499     reinterpret_cast<v8::Isolate*>(isolate)
500         ->AdjustAmountOfExternalAllocatedMemory(new_length - old_length);
501   }
502   return true;
503 }
504 
AttachSharedWasmMemoryObject(Isolate * isolate,Handle<WasmMemoryObject> memory_object)505 void BackingStore::AttachSharedWasmMemoryObject(
506     Isolate* isolate, Handle<WasmMemoryObject> memory_object) {
507   DCHECK(is_wasm_memory_);
508   DCHECK(is_shared_);
509   // We need to take the global registry lock for this operation.
510   GlobalBackingStoreRegistry::AddSharedWasmMemoryObject(isolate, this,
511                                                         memory_object);
512 }
513 
BroadcastSharedWasmMemoryGrow(Isolate * isolate,std::shared_ptr<BackingStore> backing_store,size_t new_pages)514 void BackingStore::BroadcastSharedWasmMemoryGrow(
515     Isolate* isolate, std::shared_ptr<BackingStore> backing_store,
516     size_t new_pages) {
517   GlobalBackingStoreRegistry::BroadcastSharedWasmMemoryGrow(
518       isolate, backing_store, new_pages);
519 }
520 
RemoveSharedWasmMemoryObjects(Isolate * isolate)521 void BackingStore::RemoveSharedWasmMemoryObjects(Isolate* isolate) {
522   GlobalBackingStoreRegistry::Purge(isolate);
523 }
524 
UpdateSharedWasmMemoryObjects(Isolate * isolate)525 void BackingStore::UpdateSharedWasmMemoryObjects(Isolate* isolate) {
526   GlobalBackingStoreRegistry::UpdateSharedWasmMemoryObjects(isolate);
527 }
528 
WrapAllocation(Isolate * isolate,void * allocation_base,size_t allocation_length,SharedFlag shared,bool free_on_destruct)529 std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
530     Isolate* isolate, void* allocation_base, size_t allocation_length,
531     SharedFlag shared, bool free_on_destruct) {
532   auto result = new BackingStore(allocation_base,    // start
533                                  allocation_length,  // length
534                                  allocation_length,  // capacity
535                                  shared,             // shared
536                                  false,              // is_wasm_memory
537                                  free_on_destruct,   // free_on_destruct
538                                  false,              // has_guard_regions
539                                  false,              // custom_deleter
540                                  false);             // empty_deleter
541   result->SetAllocatorFromIsolate(isolate);
542   TRACE_BS("BS:wrap   bs=%p mem=%p (length=%zu)\n", result,
543            result->buffer_start(), result->byte_length());
544   return std::unique_ptr<BackingStore>(result);
545 }
546 
WrapAllocation(void * allocation_base,size_t allocation_length,v8::BackingStore::DeleterCallback deleter,void * deleter_data,SharedFlag shared)547 std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
548     void* allocation_base, size_t allocation_length,
549     v8::BackingStore::DeleterCallback deleter, void* deleter_data,
550     SharedFlag shared) {
551   bool is_empty_deleter = (deleter == v8::BackingStore::EmptyDeleter);
552   auto result = new BackingStore(allocation_base,    // start
553                                  allocation_length,  // length
554                                  allocation_length,  // capacity
555                                  shared,             // shared
556                                  false,              // is_wasm_memory
557                                  true,               // free_on_destruct
558                                  false,              // has_guard_regions
559                                  true,               // custom_deleter
560                                  is_empty_deleter);  // empty_deleter
561   result->type_specific_data_.deleter = {deleter, deleter_data};
562   TRACE_BS("BS:wrap   bs=%p mem=%p (length=%zu)\n", result,
563            result->buffer_start(), result->byte_length());
564   return std::unique_ptr<BackingStore>(result);
565 }
566 
EmptyBackingStore(SharedFlag shared)567 std::unique_ptr<BackingStore> BackingStore::EmptyBackingStore(
568     SharedFlag shared) {
569   auto result = new BackingStore(nullptr,  // start
570                                  0,        // length
571                                  0,        // capacity
572                                  shared,   // shared
573                                  false,    // is_wasm_memory
574                                  true,     // free_on_destruct
575                                  false,    // has_guard_regions
576                                  false,    // custom_deleter
577                                  false);   // empty_deleter
578 
579   return std::unique_ptr<BackingStore>(result);
580 }
581 
Reallocate(Isolate * isolate,size_t new_byte_length)582 bool BackingStore::Reallocate(Isolate* isolate, size_t new_byte_length) {
583   CHECK(!is_wasm_memory_ && !custom_deleter_ && !globally_registered_ &&
584         free_on_destruct_);
585   auto allocator = get_v8_api_array_buffer_allocator();
586   CHECK_EQ(isolate->array_buffer_allocator(), allocator);
587   CHECK_EQ(byte_length_, byte_capacity_);
588   void* new_start =
589       allocator->Reallocate(buffer_start_, byte_length_, new_byte_length);
590   if (!new_start) return false;
591   buffer_start_ = new_start;
592   byte_capacity_ = new_byte_length;
593   byte_length_ = new_byte_length;
594   return true;
595 }
596 
get_v8_api_array_buffer_allocator()597 v8::ArrayBuffer::Allocator* BackingStore::get_v8_api_array_buffer_allocator() {
598   CHECK(!is_wasm_memory_);
599   auto array_buffer_allocator =
600       holds_shared_ptr_to_allocator_
601           ? type_specific_data_.v8_api_array_buffer_allocator_shared.get()
602           : type_specific_data_.v8_api_array_buffer_allocator;
603   CHECK_NOT_NULL(array_buffer_allocator);
604   return array_buffer_allocator;
605 }
606 
get_shared_wasm_memory_data()607 SharedWasmMemoryData* BackingStore::get_shared_wasm_memory_data() {
608   CHECK(is_wasm_memory_ && is_shared_);
609   auto shared_wasm_memory_data = type_specific_data_.shared_wasm_memory_data;
610   CHECK(shared_wasm_memory_data);
611   return shared_wasm_memory_data;
612 }
613 
614 namespace {
615 // Implementation details of GlobalBackingStoreRegistry.
616 struct GlobalBackingStoreRegistryImpl {
GlobalBackingStoreRegistryImplv8::internal::__anon358a536a0711::GlobalBackingStoreRegistryImpl617   GlobalBackingStoreRegistryImpl() {}
618   base::Mutex mutex_;
619   std::unordered_map<const void*, std::weak_ptr<BackingStore>> map_;
620 };
621 base::LazyInstance<GlobalBackingStoreRegistryImpl>::type global_registry_impl_ =
622     LAZY_INSTANCE_INITIALIZER;
impl()623 inline GlobalBackingStoreRegistryImpl* impl() {
624   return global_registry_impl_.Pointer();
625 }
626 }  // namespace
627 
Register(std::shared_ptr<BackingStore> backing_store)628 void GlobalBackingStoreRegistry::Register(
629     std::shared_ptr<BackingStore> backing_store) {
630   if (!backing_store || !backing_store->buffer_start()) return;
631 
632   if (!backing_store->free_on_destruct()) {
633     // If the backing store buffer is managed by the embedder,
634     // then we don't have to guarantee that there is single unique
635     // BackingStore per buffer_start() because the destructor of
636     // of the BackingStore will be a no-op in that case.
637 
638     // All Wasm memory has to be registered.
639     CHECK(!backing_store->is_wasm_memory());
640     return;
641   }
642 
643   base::MutexGuard scope_lock(&impl()->mutex_);
644   if (backing_store->globally_registered_) return;
645   TRACE_BS("BS:reg    bs=%p mem=%p (length=%zu, capacity=%zu)\n",
646            backing_store.get(), backing_store->buffer_start(),
647            backing_store->byte_length(), backing_store->byte_capacity());
648   std::weak_ptr<BackingStore> weak = backing_store;
649   auto result = impl()->map_.insert({backing_store->buffer_start(), weak});
650   CHECK(result.second);
651   backing_store->globally_registered_ = true;
652 }
653 
Unregister(BackingStore * backing_store)654 void GlobalBackingStoreRegistry::Unregister(BackingStore* backing_store) {
655   if (!backing_store->globally_registered_) return;
656 
657   DCHECK_NOT_NULL(backing_store->buffer_start());
658 
659   base::MutexGuard scope_lock(&impl()->mutex_);
660   const auto& result = impl()->map_.find(backing_store->buffer_start());
661   if (result != impl()->map_.end()) {
662     DCHECK(!result->second.lock());
663     impl()->map_.erase(result);
664   }
665   backing_store->globally_registered_ = false;
666 }
667 
Lookup(void * buffer_start,size_t length)668 std::shared_ptr<BackingStore> GlobalBackingStoreRegistry::Lookup(
669     void* buffer_start, size_t length) {
670   base::MutexGuard scope_lock(&impl()->mutex_);
671   TRACE_BS("BS:lookup   mem=%p (%zu bytes)\n", buffer_start, length);
672   const auto& result = impl()->map_.find(buffer_start);
673   if (result == impl()->map_.end()) {
674     return std::shared_ptr<BackingStore>();
675   }
676   auto backing_store = result->second.lock();
677   CHECK_EQ(buffer_start, backing_store->buffer_start());
678   if (backing_store->is_wasm_memory()) {
679     // Grow calls to shared WebAssembly threads can be triggered from different
680     // workers, length equality cannot be guaranteed here.
681     CHECK_LE(length, backing_store->byte_length());
682   } else {
683     CHECK_EQ(length, backing_store->byte_length());
684   }
685   return backing_store;
686 }
687 
Purge(Isolate * isolate)688 void GlobalBackingStoreRegistry::Purge(Isolate* isolate) {
689   // We need to keep a reference to all backing stores that are inspected
690   // in the purging loop below. Otherwise, we might get a deadlock
691   // if the temporary backing store reference created in the loop is
692   // the last reference. In that case the destructor of the backing store
693   // may try to take the &impl()->mutex_ in order to unregister itself.
694   std::vector<std::shared_ptr<BackingStore>> prevent_destruction_under_lock;
695   base::MutexGuard scope_lock(&impl()->mutex_);
696   // Purge all entries in the map that refer to the given isolate.
697   for (auto& entry : impl()->map_) {
698     auto backing_store = entry.second.lock();
699     prevent_destruction_under_lock.emplace_back(backing_store);
700     if (!backing_store) continue;  // skip entries where weak ptr is null
701     if (!backing_store->is_wasm_memory()) continue;  // skip non-wasm memory
702     if (!backing_store->is_shared()) continue;       // skip non-shared memory
703     SharedWasmMemoryData* shared_data =
704         backing_store->get_shared_wasm_memory_data();
705     // Remove this isolate from the isolates list.
706     auto& isolates = shared_data->isolates_;
707     for (size_t i = 0; i < isolates.size(); i++) {
708       if (isolates[i] == isolate) isolates[i] = nullptr;
709     }
710   }
711 }
712 
AddSharedWasmMemoryObject(Isolate * isolate,BackingStore * backing_store,Handle<WasmMemoryObject> memory_object)713 void GlobalBackingStoreRegistry::AddSharedWasmMemoryObject(
714     Isolate* isolate, BackingStore* backing_store,
715     Handle<WasmMemoryObject> memory_object) {
716   // Add to the weak array list of shared memory objects in the isolate.
717   isolate->AddSharedWasmMemory(memory_object);
718 
719   // Add the isolate to the list of isolates sharing this backing store.
720   base::MutexGuard scope_lock(&impl()->mutex_);
721   SharedWasmMemoryData* shared_data =
722       backing_store->get_shared_wasm_memory_data();
723   auto& isolates = shared_data->isolates_;
724   int free_entry = -1;
725   for (size_t i = 0; i < isolates.size(); i++) {
726     if (isolates[i] == isolate) return;
727     if (isolates[i] == nullptr) free_entry = static_cast<int>(i);
728   }
729   if (free_entry >= 0)
730     isolates[free_entry] = isolate;
731   else
732     isolates.push_back(isolate);
733 }
734 
BroadcastSharedWasmMemoryGrow(Isolate * isolate,std::shared_ptr<BackingStore> backing_store,size_t new_pages)735 void GlobalBackingStoreRegistry::BroadcastSharedWasmMemoryGrow(
736     Isolate* isolate, std::shared_ptr<BackingStore> backing_store,
737     size_t new_pages) {
738   {
739     // The global lock protects the list of isolates per backing store.
740     base::MutexGuard scope_lock(&impl()->mutex_);
741     SharedWasmMemoryData* shared_data =
742         backing_store->get_shared_wasm_memory_data();
743     for (Isolate* other : shared_data->isolates_) {
744       if (other && other != isolate) {
745         other->stack_guard()->RequestGrowSharedMemory();
746       }
747     }
748   }
749   // Update memory objects in this isolate.
750   UpdateSharedWasmMemoryObjects(isolate);
751 }
752 
UpdateSharedWasmMemoryObjects(Isolate * isolate)753 void GlobalBackingStoreRegistry::UpdateSharedWasmMemoryObjects(
754     Isolate* isolate) {
755   HandleScope scope(isolate);
756   Handle<WeakArrayList> shared_wasm_memories =
757       isolate->factory()->shared_wasm_memories();
758 
759   for (int i = 0; i < shared_wasm_memories->length(); i++) {
760     HeapObject obj;
761     if (!shared_wasm_memories->Get(i).GetHeapObject(&obj)) continue;
762 
763     Handle<WasmMemoryObject> memory_object(WasmMemoryObject::cast(obj),
764                                            isolate);
765     Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer(), isolate);
766     std::shared_ptr<BackingStore> backing_store = old_buffer->GetBackingStore();
767 
768     Handle<JSArrayBuffer> new_buffer =
769         isolate->factory()->NewJSSharedArrayBuffer(std::move(backing_store));
770     memory_object->update_instances(isolate, new_buffer);
771   }
772 }
773 
774 }  // namespace internal
775 }  // namespace v8
776 
777 #undef TRACE_BS
778