1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 *
4 * Copyright 2017 Mozilla Foundation
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19 #include "wasm/WasmProcess.h"
20
21 #include "mozilla/BinarySearch.h"
22 #include "mozilla/ScopeExit.h"
23
24 #include "gc/Memory.h"
25 #include "threading/ExclusiveData.h"
26 #include "vm/MutexIDs.h"
27 #include "vm/Runtime.h"
28 #ifdef ENABLE_WASM_CRANELIFT
29 # include "wasm/cranelift/clifapi.h"
30 #endif
31 #include "wasm/WasmBuiltins.h"
32 #include "wasm/WasmCode.h"
33 #include "wasm/WasmInstance.h"
34
35 using namespace js;
36 using namespace wasm;
37
38 using mozilla::BinarySearchIf;
39
40 // Per-process map from values of program-counter (pc) to CodeSegments.
41 //
42 // Whenever a new CodeSegment is ready to use, it has to be registered so that
43 // we can have fast lookups from pc to CodeSegments in numerous places. Since
44 // wasm compilation may be tiered, and the second tier doesn't have access to
45 // any JSContext/JS::Compartment/etc lying around, we have to use a process-wide
46 // map instead.
47
48 using CodeSegmentVector = Vector<const CodeSegment*, 0, SystemAllocPolicy>;
49
50 Atomic<bool> wasm::CodeExists(false);
51
52 // Because of profiling, the thread running wasm might need to know to which
53 // CodeSegment the current PC belongs, during a call to lookup(). A lookup
54 // is a read-only operation, and we don't want to take a lock then
55 // (otherwise, we could have a deadlock situation if an async lookup
56 // happened on a given thread that was holding mutatorsMutex_ while getting
57 // sampled). Since the writer could be modifying the data that is getting
58 // looked up, the writer functions use spin-locks to know if there are any
59 // observers (i.e. calls to lookup()) of the atomic data.
60
61 static Atomic<size_t> sNumActiveLookups(0);
62
63 class ProcessCodeSegmentMap {
64 // Since writes (insertions or removals) can happen on any background
65 // thread at the same time, we need a lock here.
66
67 Mutex mutatorsMutex_;
68
69 CodeSegmentVector segments1_;
70 CodeSegmentVector segments2_;
71
72 // Except during swapAndWait(), there are no lookup() observers of the
73 // vector pointed to by mutableCodeSegments_
74
75 CodeSegmentVector* mutableCodeSegments_;
76 Atomic<const CodeSegmentVector*> readonlyCodeSegments_;
77
78 struct CodeSegmentPC {
79 const void* pc;
CodeSegmentPCProcessCodeSegmentMap::CodeSegmentPC80 explicit CodeSegmentPC(const void* pc) : pc(pc) {}
operator ()ProcessCodeSegmentMap::CodeSegmentPC81 int operator()(const CodeSegment* cs) const {
82 if (cs->containsCodePC(pc)) {
83 return 0;
84 }
85 if (pc < cs->base()) {
86 return -1;
87 }
88 return 1;
89 }
90 };
91
swapAndWait()92 void swapAndWait() {
93 // Both vectors are consistent for lookup at this point although their
94 // contents are different: there is no way for the looked up PC to be
95 // in the code segment that is getting registered, because the code
96 // segment is not even fully created yet.
97
98 // If a lookup happens before this instruction, then the
99 // soon-to-become-former read-only pointer is used during the lookup,
100 // which is valid.
101
102 mutableCodeSegments_ = const_cast<CodeSegmentVector*>(
103 readonlyCodeSegments_.exchange(mutableCodeSegments_));
104
105 // If a lookup happens after this instruction, then the updated vector
106 // is used, which is valid:
107 // - in case of insertion, it means the new vector contains more data,
108 // but it's fine since the code segment is getting registered and thus
109 // isn't even fully created yet, so the code can't be running.
110 // - in case of removal, it means the new vector contains one less
111 // entry, but it's fine since unregistering means the code segment
112 // isn't used by any live instance anymore, thus PC can't be in the
113 // to-be-removed code segment's range.
114
115 // A lookup could have happened on any of the two vectors. Wait for
116 // observers to be done using any vector before mutating.
117
118 while (sNumActiveLookups > 0) {
119 }
120 }
121
122 public:
ProcessCodeSegmentMap()123 ProcessCodeSegmentMap()
124 : mutatorsMutex_(mutexid::WasmCodeSegmentMap),
125 mutableCodeSegments_(&segments1_),
126 readonlyCodeSegments_(&segments2_) {}
127
~ProcessCodeSegmentMap()128 ~ProcessCodeSegmentMap() {
129 MOZ_RELEASE_ASSERT(sNumActiveLookups == 0);
130 MOZ_ASSERT(segments1_.empty());
131 MOZ_ASSERT(segments2_.empty());
132 segments1_.clearAndFree();
133 segments2_.clearAndFree();
134 }
135
insert(const CodeSegment * cs)136 bool insert(const CodeSegment* cs) {
137 LockGuard<Mutex> lock(mutatorsMutex_);
138
139 size_t index;
140 MOZ_ALWAYS_FALSE(BinarySearchIf(*mutableCodeSegments_, 0,
141 mutableCodeSegments_->length(),
142 CodeSegmentPC(cs->base()), &index));
143
144 if (!mutableCodeSegments_->insert(mutableCodeSegments_->begin() + index,
145 cs)) {
146 return false;
147 }
148
149 CodeExists = true;
150
151 swapAndWait();
152
153 #ifdef DEBUG
154 size_t otherIndex;
155 MOZ_ALWAYS_FALSE(BinarySearchIf(*mutableCodeSegments_, 0,
156 mutableCodeSegments_->length(),
157 CodeSegmentPC(cs->base()), &otherIndex));
158 MOZ_ASSERT(index == otherIndex);
159 #endif
160
161 // Although we could simply revert the insertion in the read-only
162 // vector, it is simpler to just crash and given that each CodeSegment
163 // consumes multiple pages, it is unlikely this insert() would OOM in
164 // practice
165 AutoEnterOOMUnsafeRegion oom;
166 if (!mutableCodeSegments_->insert(mutableCodeSegments_->begin() + index,
167 cs)) {
168 oom.crash("when inserting a CodeSegment in the process-wide map");
169 }
170
171 return true;
172 }
173
remove(const CodeSegment * cs)174 void remove(const CodeSegment* cs) {
175 LockGuard<Mutex> lock(mutatorsMutex_);
176
177 size_t index;
178 MOZ_ALWAYS_TRUE(BinarySearchIf(*mutableCodeSegments_, 0,
179 mutableCodeSegments_->length(),
180 CodeSegmentPC(cs->base()), &index));
181
182 mutableCodeSegments_->erase(mutableCodeSegments_->begin() + index);
183
184 if (!mutableCodeSegments_->length()) {
185 CodeExists = false;
186 }
187
188 swapAndWait();
189
190 #ifdef DEBUG
191 size_t otherIndex;
192 MOZ_ALWAYS_TRUE(BinarySearchIf(*mutableCodeSegments_, 0,
193 mutableCodeSegments_->length(),
194 CodeSegmentPC(cs->base()), &otherIndex));
195 MOZ_ASSERT(index == otherIndex);
196 #endif
197
198 mutableCodeSegments_->erase(mutableCodeSegments_->begin() + index);
199 }
200
lookup(const void * pc)201 const CodeSegment* lookup(const void* pc) {
202 const CodeSegmentVector* readonly = readonlyCodeSegments_;
203
204 size_t index;
205 if (!BinarySearchIf(*readonly, 0, readonly->length(), CodeSegmentPC(pc),
206 &index)) {
207 return nullptr;
208 }
209
210 // It is fine returning a raw CodeSegment*, because we assume we are
211 // looking up a live PC in code which is on the stack, keeping the
212 // CodeSegment alive.
213
214 return (*readonly)[index];
215 }
216 };
217
218 // This field is only atomic to handle buggy scenarios where we crash during
219 // startup or shutdown and thus racily perform wasm::LookupCodeSegment() from
220 // the crashing thread.
221
222 static Atomic<ProcessCodeSegmentMap*> sProcessCodeSegmentMap(nullptr);
223
RegisterCodeSegment(const CodeSegment * cs)224 bool wasm::RegisterCodeSegment(const CodeSegment* cs) {
225 MOZ_ASSERT(cs->codeTier().code().initialized());
226
227 // This function cannot race with startup/shutdown.
228 ProcessCodeSegmentMap* map = sProcessCodeSegmentMap;
229 MOZ_RELEASE_ASSERT(map);
230 return map->insert(cs);
231 }
232
UnregisterCodeSegment(const CodeSegment * cs)233 void wasm::UnregisterCodeSegment(const CodeSegment* cs) {
234 // This function cannot race with startup/shutdown.
235 ProcessCodeSegmentMap* map = sProcessCodeSegmentMap;
236 MOZ_RELEASE_ASSERT(map);
237 map->remove(cs);
238 }
239
LookupCodeSegment(const void * pc,const CodeRange ** codeRange)240 const CodeSegment* wasm::LookupCodeSegment(
241 const void* pc, const CodeRange** codeRange /*= nullptr */) {
242 // Since wasm::LookupCodeSegment() can race with wasm::ShutDown(), we must
243 // additionally keep sNumActiveLookups above zero for the duration we're
244 // using the ProcessCodeSegmentMap. wasm::ShutDown() spin-waits on
245 // sNumActiveLookups getting to zero.
246
247 auto decObserver = mozilla::MakeScopeExit([&] {
248 MOZ_ASSERT(sNumActiveLookups > 0);
249 sNumActiveLookups--;
250 });
251 sNumActiveLookups++;
252
253 ProcessCodeSegmentMap* map = sProcessCodeSegmentMap;
254 if (!map) {
255 return nullptr;
256 }
257
258 if (const CodeSegment* found = map->lookup(pc)) {
259 if (codeRange) {
260 *codeRange = found->isModule() ? found->asModule()->lookupRange(pc)
261 : found->asLazyStub()->lookupRange(pc);
262 }
263 return found;
264 }
265
266 if (codeRange) {
267 *codeRange = nullptr;
268 }
269
270 return nullptr;
271 }
272
LookupCode(const void * pc,const CodeRange ** codeRange)273 const Code* wasm::LookupCode(const void* pc,
274 const CodeRange** codeRange /* = nullptr */) {
275 const CodeSegment* found = LookupCodeSegment(pc, codeRange);
276 MOZ_ASSERT_IF(!found && codeRange, !*codeRange);
277 return found ? &found->code() : nullptr;
278 }
279
InCompiledCode(void * pc)280 bool wasm::InCompiledCode(void* pc) {
281 if (LookupCodeSegment(pc)) {
282 return true;
283 }
284
285 const CodeRange* codeRange;
286 uint8_t* codeBase;
287 return LookupBuiltinThunk(pc, &codeRange, &codeBase);
288 }
289
290 /**
291 * ReadLockFlag maintains a flag that can be mutated multiple times before it
292 * is read, at which point it maintains the same value.
293 */
294 class ReadLockFlag {
295 private:
296 bool enabled_;
297 bool read_;
298
299 public:
ReadLockFlag()300 ReadLockFlag() : enabled_(false), read_(false) {}
301
get()302 bool get() {
303 read_ = true;
304 return enabled_;
305 }
306
set(bool enabled)307 bool set(bool enabled) {
308 if (read_) {
309 return false;
310 }
311 enabled_ = enabled;
312 return true;
313 }
314 };
315
316 #ifdef WASM_SUPPORTS_HUGE_MEMORY
317 /*
318 * Some 64 bit systems greatly limit the range of available virtual memory. We
319 * require about 6GiB for each wasm huge memory, which can exhaust the address
320 * spaces of these systems quickly. In order to avoid this, we only enable huge
321 * memory if we observe a large enough address space.
322 *
323 * This number is conservatively chosen to continue using huge memory on our
324 * smallest address space system, Android on ARM64 (39 bits), along with a bit
325 * for error in detecting the address space limit.
326 */
327 static const size_t MinAddressBitsForHugeMemory = 38;
328
329 /*
330 * In addition to the above, some systems impose an independent limit on the
331 * amount of virtual memory that may be used.
332 */
333 static const size_t MinVirtualMemoryLimitForHugeMemory =
334 size_t(1) << MinAddressBitsForHugeMemory;
335 #endif
336
337 ExclusiveData<ReadLockFlag> sHugeMemoryEnabled(mutexid::WasmHugeMemoryEnabled);
338
IsHugeMemoryEnabledHelper()339 static bool IsHugeMemoryEnabledHelper() {
340 auto state = sHugeMemoryEnabled.lock();
341 return state->get();
342 }
343
IsHugeMemoryEnabled()344 bool wasm::IsHugeMemoryEnabled() {
345 static bool enabled = IsHugeMemoryEnabledHelper();
346 return enabled;
347 }
348
DisableHugeMemory()349 bool wasm::DisableHugeMemory() {
350 auto state = sHugeMemoryEnabled.lock();
351 return state->set(false);
352 }
353
ConfigureHugeMemory()354 void ConfigureHugeMemory() {
355 #ifdef WASM_SUPPORTS_HUGE_MEMORY
356 if (gc::SystemAddressBits() < MinAddressBitsForHugeMemory) {
357 return;
358 }
359
360 if (gc::VirtualMemoryLimit() != size_t(-1) &&
361 gc::VirtualMemoryLimit() < MinVirtualMemoryLimitForHugeMemory) {
362 return;
363 }
364
365 auto state = sHugeMemoryEnabled.lock();
366 bool set = state->set(true);
367 MOZ_RELEASE_ASSERT(set);
368 #endif
369 }
370
Init()371 bool wasm::Init() {
372 MOZ_RELEASE_ASSERT(!sProcessCodeSegmentMap);
373
374 ConfigureHugeMemory();
375
376 #ifdef ENABLE_WASM_CRANELIFT
377 cranelift_initialize();
378 #endif
379
380 AutoEnterOOMUnsafeRegion oomUnsafe;
381 ProcessCodeSegmentMap* map = js_new<ProcessCodeSegmentMap>();
382 if (!map) {
383 oomUnsafe.crash("js::wasm::Init");
384 }
385
386 sProcessCodeSegmentMap = map;
387 return true;
388 }
389
ShutDown()390 void wasm::ShutDown() {
391 // If there are live runtimes then we are already pretty much leaking the
392 // world, so to avoid spurious assertions (which are valid and valuable when
393 // there are not live JSRuntimes), don't bother releasing anything here.
394 if (JSRuntime::hasLiveRuntimes()) {
395 return;
396 }
397
398 // After signalling shutdown by clearing sProcessCodeSegmentMap, wait for
399 // concurrent wasm::LookupCodeSegment()s to finish.
400 ProcessCodeSegmentMap* map = sProcessCodeSegmentMap;
401 MOZ_RELEASE_ASSERT(map);
402 sProcessCodeSegmentMap = nullptr;
403 while (sNumActiveLookups > 0) {
404 }
405
406 ReleaseBuiltinThunks();
407 js_delete(map);
408 }
409