1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/ProcessExecutableMemory.h"
8
9 #include "mozilla/Array.h"
10 #include "mozilla/Atomics.h"
11 #include "mozilla/DebugOnly.h"
12 #include "mozilla/Maybe.h"
13 #include "mozilla/TaggedAnonymousMemory.h"
14 #include "mozilla/XorShift128PlusRNG.h"
15
16 #include <errno.h>
17
18 #include "jsfriendapi.h"
19 #include "jsmath.h"
20
21 #include "gc/Memory.h"
22 #include "jit/FlushICache.h" // js::jit::FlushICache
23 #include "jit/JitOptions.h"
24 #include "threading/LockGuard.h"
25 #include "threading/Mutex.h"
26 #include "util/Memory.h"
27 #include "util/Poison.h"
28 #include "util/WindowsWrapper.h"
29 #include "vm/MutexIDs.h"
30
31 #ifdef XP_WIN
32 # include "mozilla/StackWalk_windows.h"
33 # include "mozilla/WindowsVersion.h"
34 #elif defined(__wasi__)
35 // Nothing.
36 #else
37 # include <sys/mman.h>
38 # include <unistd.h>
39 #endif
40
41 #ifdef MOZ_VALGRIND
42 # include <valgrind/valgrind.h>
43 #endif
44
45 using namespace js;
46 using namespace js::jit;
47
48 #ifdef XP_WIN
49 # if defined(HAVE_64BIT_BUILD)
50 # define NEED_JIT_UNWIND_HANDLING
51 # endif
52
ComputeRandomAllocationAddress()53 static void* ComputeRandomAllocationAddress() {
54 /*
55 * Inspiration is V8's OS::Allocate in platform-win32.cc.
56 *
57 * VirtualAlloc takes 64K chunks out of the virtual address space, so we
58 * keep 16b alignment.
59 *
60 * x86: V8 comments say that keeping addresses in the [64MiB, 1GiB) range
61 * tries to avoid system default DLL mapping space. In the end, we get 13
62 * bits of randomness in our selection.
63 * x64: [2GiB, 4TiB), with 25 bits of randomness.
64 */
65 # ifdef HAVE_64BIT_BUILD
66 static const uintptr_t base = 0x0000000080000000;
67 static const uintptr_t mask = 0x000003ffffff0000;
68 # elif defined(_M_IX86) || defined(__i386__)
69 static const uintptr_t base = 0x04000000;
70 static const uintptr_t mask = 0x3fff0000;
71 # else
72 # error "Unsupported architecture"
73 # endif
74
75 uint64_t rand = js::GenerateRandomSeed();
76 return (void*)(base | (rand & mask));
77 }
78
79 # ifdef NEED_JIT_UNWIND_HANDLING
80 static js::JitExceptionHandler sJitExceptionHandler;
81 # endif
82
SetJitExceptionHandler(JitExceptionHandler handler)83 JS_PUBLIC_API void js::SetJitExceptionHandler(JitExceptionHandler handler) {
84 # ifdef NEED_JIT_UNWIND_HANDLING
85 MOZ_ASSERT(!sJitExceptionHandler);
86 sJitExceptionHandler = handler;
87 # else
88 // Just do nothing if unwind handling is disabled.
89 # endif
90 }
91
92 # ifdef NEED_JIT_UNWIND_HANDLING
93 # if defined(_M_ARM64)
94 // See the ".xdata records" section of
95 // https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling
96 // These records can have various fields present or absent depending on the
97 // bits set in the header. Our struct will use one 32-bit slot for unwind codes,
98 // and no slots for epilog scopes.
99 struct UnwindInfo {
100 uint32_t functionLength : 18;
101 uint32_t version : 2;
102 uint32_t hasExceptionHandler : 1;
103 uint32_t packedEpilog : 1;
104 uint32_t epilogCount : 5;
105 uint32_t codeWords : 5;
106 uint8_t unwindCodes[4];
107 uint32_t exceptionHandler;
108 };
109 static const unsigned ThunkLength = 20;
110 # else
111 // From documentation for UNWIND_INFO on
112 // http://msdn.microsoft.com/en-us/library/ddssxxy8.aspx
113 struct UnwindInfo {
114 uint8_t version : 3;
115 uint8_t flags : 5;
116 uint8_t sizeOfPrologue;
117 uint8_t countOfUnwindCodes;
118 uint8_t frameRegister : 4;
119 uint8_t frameOffset : 4;
120 ULONG exceptionHandler;
121 };
122 static const unsigned ThunkLength = 12;
123 # endif
124
125 struct ExceptionHandlerRecord {
126 RUNTIME_FUNCTION runtimeFunction;
127 UnwindInfo unwindInfo;
128 uint8_t thunk[ThunkLength];
129 };
130
131 // This function must match the function pointer type PEXCEPTION_HANDLER
132 // mentioned in:
133 // http://msdn.microsoft.com/en-us/library/ssa62fwe.aspx.
134 // This type is rather elusive in documentation; Wine is the best I've found:
135 // http://source.winehq.org/source/include/winnt.h
ExceptionHandler(PEXCEPTION_RECORD exceptionRecord,_EXCEPTION_REGISTRATION_RECORD *,PCONTEXT context,_EXCEPTION_REGISTRATION_RECORD **)136 static DWORD ExceptionHandler(PEXCEPTION_RECORD exceptionRecord,
137 _EXCEPTION_REGISTRATION_RECORD*, PCONTEXT context,
138 _EXCEPTION_REGISTRATION_RECORD**) {
139 return sJitExceptionHandler(exceptionRecord, context);
140 }
141
142 PRUNTIME_FUNCTION RuntimeFunctionCallback(DWORD64 ControlPc, PVOID Context);
143
144 // For an explanation of the problem being solved here, see
145 // SetJitExceptionFilter in jsfriendapi.h.
RegisterExecutableMemory(void * p,size_t bytes,size_t pageSize)146 static bool RegisterExecutableMemory(void* p, size_t bytes, size_t pageSize) {
147 if (!VirtualAlloc(p, pageSize, MEM_COMMIT, PAGE_READWRITE)) {
148 MOZ_CRASH();
149 }
150
151 ExceptionHandlerRecord* r = reinterpret_cast<ExceptionHandlerRecord*>(p);
152 void* handler = JS_FUNC_TO_DATA_PTR(void*, ExceptionHandler);
153
154 // Because the .xdata format on ARM64 can only encode sizes up to 1M (much
155 // too small for our JIT code regions), we register a function table callback
156 // to provide RUNTIME_FUNCTIONs at runtime. Windows doesn't seem to care about
157 // the size fields on RUNTIME_FUNCTIONs that are created in this way, so the
158 // same RUNTIME_FUNCTION can work for any address in the region. We'll set up
159 // a generic one now and the callback can just return a pointer to it.
160
161 // All these fields are specified to be offsets from the base of the
162 // executable code (which is 'p'), even if they have 'Address' in their
163 // names. In particular, exceptionHandler is a ULONG offset which is a
164 // 32-bit integer. Since 'p' can be farther than INT32_MAX away from
165 // sJitExceptionHandler, we must generate a little thunk inside the
166 // record. The record is put on its own page so that we can take away write
167 // access to protect against accidental clobbering.
168
169 # if defined(_M_ARM64)
170 r->runtimeFunction.BeginAddress = pageSize;
171 r->runtimeFunction.UnwindData = offsetof(ExceptionHandlerRecord, unwindInfo);
172 static_assert(offsetof(ExceptionHandlerRecord, unwindInfo) % 4 == 0,
173 "The ARM64 .pdata format requires that exception information "
174 "RVAs be 4-byte aligned.");
175
176 memset(&r->unwindInfo, 0, sizeof(r->unwindInfo));
177 r->unwindInfo.hasExceptionHandler = true;
178 r->unwindInfo.exceptionHandler = offsetof(ExceptionHandlerRecord, thunk);
179
180 // Use a fake unwind code to make the Windows unwinder do _something_. If the
181 // PC and SP both stay unchanged, we'll fail the unwinder's sanity checks and
182 // it won't call our exception handler.
183 r->unwindInfo.codeWords = 1; // one 32-bit word gives us up to 4 codes
184 r->unwindInfo.unwindCodes[0] =
185 0b00000001; // alloc_s small stack of size 1*16
186 r->unwindInfo.unwindCodes[1] = 0b11100100; // end
187
188 uint32_t* thunk = (uint32_t*)r->thunk;
189 uint16_t* addr = (uint16_t*)&handler;
190
191 // xip0/r16 should be safe to clobber: Windows just used it to call our thunk.
192 const uint8_t reg = 16;
193
194 // Say `handler` is 0x4444333322221111, then:
195 thunk[0] = 0xd2800000 | addr[0] << 5 | reg; // mov xip0, 1111
196 thunk[1] = 0xf2a00000 | addr[1] << 5 | reg; // movk xip0, 2222 lsl #0x10
197 thunk[2] = 0xf2c00000 | addr[2] << 5 | reg; // movk xip0, 3333 lsl #0x20
198 thunk[3] = 0xf2e00000 | addr[3] << 5 | reg; // movk xip0, 4444 lsl #0x30
199 thunk[4] = 0xd61f0000 | reg << 5; // br xip0
200 # else
201 r->runtimeFunction.BeginAddress = pageSize;
202 r->runtimeFunction.EndAddress = (DWORD)bytes;
203 r->runtimeFunction.UnwindData = offsetof(ExceptionHandlerRecord, unwindInfo);
204
205 r->unwindInfo.version = 1;
206 r->unwindInfo.flags = UNW_FLAG_EHANDLER;
207 r->unwindInfo.sizeOfPrologue = 0;
208 r->unwindInfo.countOfUnwindCodes = 0;
209 r->unwindInfo.frameRegister = 0;
210 r->unwindInfo.frameOffset = 0;
211 r->unwindInfo.exceptionHandler = offsetof(ExceptionHandlerRecord, thunk);
212
213 // mov imm64, rax
214 r->thunk[0] = 0x48;
215 r->thunk[1] = 0xb8;
216 memcpy(&r->thunk[2], &handler, 8);
217
218 // jmp rax
219 r->thunk[10] = 0xff;
220 r->thunk[11] = 0xe0;
221 # endif
222
223 DWORD oldProtect;
224 if (!VirtualProtect(p, pageSize, PAGE_EXECUTE_READ, &oldProtect)) {
225 MOZ_CRASH();
226 }
227
228 // XXX NB: The profiler believes this function is only called from the main
229 // thread. If that ever becomes untrue, the profiler must be updated
230 // immediately.
231 AutoSuppressStackWalking suppress;
232 return RtlInstallFunctionTableCallback((DWORD64)p | 0x3, (DWORD64)p, bytes,
233 RuntimeFunctionCallback, NULL, NULL);
234 }
235
UnregisterExecutableMemory(void * p,size_t bytes,size_t pageSize)236 static void UnregisterExecutableMemory(void* p, size_t bytes, size_t pageSize) {
237 // There's no such thing as RtlUninstallFunctionTableCallback, so there's
238 // nothing to do here.
239 }
240 # endif
241
ReserveProcessExecutableMemory(size_t bytes)242 static void* ReserveProcessExecutableMemory(size_t bytes) {
243 # ifdef NEED_JIT_UNWIND_HANDLING
244 size_t pageSize = gc::SystemPageSize();
245 if (sJitExceptionHandler) {
246 bytes += pageSize;
247 }
248 # endif
249
250 void* p = nullptr;
251 for (size_t i = 0; i < 10; i++) {
252 void* randomAddr = ComputeRandomAllocationAddress();
253 p = VirtualAlloc(randomAddr, bytes, MEM_RESERVE, PAGE_NOACCESS);
254 if (p) {
255 break;
256 }
257 }
258
259 if (!p) {
260 // Try again without randomization.
261 p = VirtualAlloc(nullptr, bytes, MEM_RESERVE, PAGE_NOACCESS);
262 if (!p) {
263 return nullptr;
264 }
265 }
266
267 # ifdef NEED_JIT_UNWIND_HANDLING
268 if (sJitExceptionHandler) {
269 if (!RegisterExecutableMemory(p, bytes, pageSize)) {
270 VirtualFree(p, 0, MEM_RELEASE);
271 return nullptr;
272 }
273
274 p = (uint8_t*)p + pageSize;
275 bytes -= pageSize;
276 }
277
278 RegisterJitCodeRegion((uint8_t*)p, bytes);
279 # endif
280
281 return p;
282 }
283
DeallocateProcessExecutableMemory(void * addr,size_t bytes)284 static void DeallocateProcessExecutableMemory(void* addr, size_t bytes) {
285 # ifdef NEED_JIT_UNWIND_HANDLING
286 UnregisterJitCodeRegion((uint8_t*)addr, bytes);
287
288 if (sJitExceptionHandler) {
289 size_t pageSize = gc::SystemPageSize();
290 addr = (uint8_t*)addr - pageSize;
291 UnregisterExecutableMemory(addr, bytes, pageSize);
292 }
293 # endif
294
295 VirtualFree(addr, 0, MEM_RELEASE);
296 }
297
ProtectionSettingToFlags(ProtectionSetting protection)298 static DWORD ProtectionSettingToFlags(ProtectionSetting protection) {
299 switch (protection) {
300 case ProtectionSetting::Protected:
301 return PAGE_NOACCESS;
302 case ProtectionSetting::Writable:
303 return PAGE_READWRITE;
304 case ProtectionSetting::Executable:
305 return PAGE_EXECUTE_READ;
306 }
307 MOZ_CRASH();
308 }
309
CommitPages(void * addr,size_t bytes,ProtectionSetting protection)310 [[nodiscard]] static bool CommitPages(void* addr, size_t bytes,
311 ProtectionSetting protection) {
312 void* p = VirtualAlloc(addr, bytes, MEM_COMMIT,
313 ProtectionSettingToFlags(protection));
314 if (!p) {
315 return false;
316 }
317 MOZ_RELEASE_ASSERT(p == addr);
318 return true;
319 }
320
DecommitPages(void * addr,size_t bytes)321 static void DecommitPages(void* addr, size_t bytes) {
322 if (!VirtualFree(addr, bytes, MEM_DECOMMIT)) {
323 MOZ_CRASH("DecommitPages failed");
324 }
325 }
326 #elif defined(__wasi__)
ReserveProcessExecutableMemory(size_t bytes)327 static void* ReserveProcessExecutableMemory(size_t bytes) {
328 MOZ_CRASH("NYI for WASI.");
329 return nullptr;
330 }
DeallocateProcessExecutableMemory(void * addr,size_t bytes)331 static void DeallocateProcessExecutableMemory(void* addr, size_t bytes) {
332 MOZ_CRASH("NYI for WASI.");
333 }
CommitPages(void * addr,size_t bytes,ProtectionSetting protection)334 [[nodiscard]] static bool CommitPages(void* addr, size_t bytes,
335 ProtectionSetting protection) {
336 MOZ_CRASH("NYI for WASI.");
337 return false;
338 }
DecommitPages(void * addr,size_t bytes)339 static void DecommitPages(void* addr, size_t bytes) {
340 MOZ_CRASH("NYI for WASI.");
341 }
342 #else // !XP_WIN && !__wasi__
343 # ifndef MAP_NORESERVE
344 # define MAP_NORESERVE 0
345 # endif
346
ComputeRandomAllocationAddress()347 static void* ComputeRandomAllocationAddress() {
348 # ifdef __OpenBSD__
349 // OpenBSD already has random mmap and the idea that all x64 cpus
350 // have 48-bit address space is not correct. Returning nullptr
351 // allows OpenBSD do to the right thing.
352 return nullptr;
353 # else
354 uint64_t rand = js::GenerateRandomSeed();
355
356 # ifdef HAVE_64BIT_BUILD
357 // x64 CPUs have a 48-bit address space and on some platforms the OS will
358 // give us access to 47 bits, so to be safe we right shift by 18 to leave
359 // 46 bits.
360 rand >>= 18;
361 # else
362 // On 32-bit, right shift by 34 to leave 30 bits, range [0, 1GiB). Then add
363 // 512MiB to get range [512MiB, 1.5GiB), or [0x20000000, 0x60000000). This
364 // is based on V8 comments in platform-posix.cc saying this range is
365 // relatively unpopulated across a variety of kernels.
366 rand >>= 34;
367 rand += 512 * 1024 * 1024;
368 # endif
369
370 // Ensure page alignment.
371 uintptr_t mask = ~uintptr_t(gc::SystemPageSize() - 1);
372 return (void*)uintptr_t(rand & mask);
373 # endif
374 }
375
ReserveProcessExecutableMemory(size_t bytes)376 static void* ReserveProcessExecutableMemory(size_t bytes) {
377 // Note that randomAddr is just a hint: if the address is not available
378 // mmap will pick a different address.
379 void* randomAddr = ComputeRandomAllocationAddress();
380 void* p = MozTaggedAnonymousMmap(randomAddr, bytes, PROT_NONE,
381 MAP_NORESERVE | MAP_PRIVATE | MAP_ANON, -1,
382 0, "js-executable-memory");
383 if (p == MAP_FAILED) {
384 return nullptr;
385 }
386 return p;
387 }
388
DeallocateProcessExecutableMemory(void * addr,size_t bytes)389 static void DeallocateProcessExecutableMemory(void* addr, size_t bytes) {
390 mozilla::DebugOnly<int> result = munmap(addr, bytes);
391 MOZ_ASSERT(!result || errno == ENOMEM);
392 }
393
ProtectionSettingToFlags(ProtectionSetting protection)394 static unsigned ProtectionSettingToFlags(ProtectionSetting protection) {
395 # ifdef MOZ_VALGRIND
396 // If we're configured for Valgrind and running on it, use a slacker
397 // scheme that doesn't change execute permissions, since doing so causes
398 // Valgrind a lot of extra overhead re-JITting code that loses and later
399 // regains execute permission. See bug 1338179.
400 if (RUNNING_ON_VALGRIND) {
401 switch (protection) {
402 case ProtectionSetting::Protected:
403 return PROT_NONE;
404 case ProtectionSetting::Writable:
405 return PROT_READ | PROT_WRITE | PROT_EXEC;
406 case ProtectionSetting::Executable:
407 return PROT_READ | PROT_EXEC;
408 }
409 MOZ_CRASH();
410 }
411 // If we get here, we're configured for Valgrind but not running on
412 // it, so use the standard scheme.
413 # endif
414 switch (protection) {
415 case ProtectionSetting::Protected:
416 return PROT_NONE;
417 case ProtectionSetting::Writable:
418 return PROT_READ | PROT_WRITE;
419 case ProtectionSetting::Executable:
420 return PROT_READ | PROT_EXEC;
421 }
422 MOZ_CRASH();
423 }
424
CommitPages(void * addr,size_t bytes,ProtectionSetting protection)425 [[nodiscard]] static bool CommitPages(void* addr, size_t bytes,
426 ProtectionSetting protection) {
427 void* p = MozTaggedAnonymousMmap(
428 addr, bytes, ProtectionSettingToFlags(protection),
429 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0, "js-executable-memory");
430 if (p == MAP_FAILED) {
431 return false;
432 }
433 MOZ_RELEASE_ASSERT(p == addr);
434 return true;
435 }
436
DecommitPages(void * addr,size_t bytes)437 static void DecommitPages(void* addr, size_t bytes) {
438 // Use mmap with MAP_FIXED and PROT_NONE. Inspired by jemalloc's
439 // pages_decommit.
440 void* p = MozTaggedAnonymousMmap(addr, bytes, PROT_NONE,
441 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0,
442 "js-executable-memory");
443 MOZ_RELEASE_ASSERT(addr == p);
444 }
445 #endif
446
447 template <size_t NumBits>
448 class PageBitSet {
449 using WordType = uint32_t;
450 static const size_t BitsPerWord = sizeof(WordType) * 8;
451
452 static_assert((NumBits % BitsPerWord) == 0,
453 "NumBits must be a multiple of BitsPerWord");
454 static const size_t NumWords = NumBits / BitsPerWord;
455
456 mozilla::Array<WordType, NumWords> words_;
457
indexToWord(uint32_t index) const458 uint32_t indexToWord(uint32_t index) const {
459 MOZ_ASSERT(index < NumBits);
460 return index / BitsPerWord;
461 }
indexToBit(uint32_t index) const462 WordType indexToBit(uint32_t index) const {
463 MOZ_ASSERT(index < NumBits);
464 return WordType(1) << (index % BitsPerWord);
465 }
466
467 public:
init()468 void init() { mozilla::PodArrayZero(words_); }
contains(size_t index) const469 bool contains(size_t index) const {
470 uint32_t word = indexToWord(index);
471 return words_[word] & indexToBit(index);
472 }
insert(size_t index)473 void insert(size_t index) {
474 MOZ_ASSERT(!contains(index));
475 uint32_t word = indexToWord(index);
476 words_[word] |= indexToBit(index);
477 }
remove(size_t index)478 void remove(size_t index) {
479 MOZ_ASSERT(contains(index));
480 uint32_t word = indexToWord(index);
481 words_[word] &= ~indexToBit(index);
482 }
483
484 #ifdef DEBUG
empty() const485 bool empty() const {
486 for (size_t i = 0; i < NumWords; i++) {
487 if (words_[i] != 0) {
488 return false;
489 }
490 }
491 return true;
492 }
493 #endif
494 };
495
496 // Per-process executable memory allocator. It reserves a block of memory of
497 // MaxCodeBytesPerProcess bytes, then allocates/deallocates pages from that.
498 //
499 // This has a number of benefits compared to raw mmap/VirtualAlloc:
500 //
501 // * More resillient against certain attacks.
502 //
503 // * Behaves more consistently across platforms: it avoids the 64K granularity
504 // issues on Windows, for instance.
505 //
506 // * On x64, near jumps can be used for jumps to other JIT pages.
507 //
508 // * On Win64, we have to register the exception handler only once (at process
509 // startup). This saves some memory and avoids RtlAddFunctionTable profiler
510 // deadlocks.
511 class ProcessExecutableMemory {
512 static_assert(
513 (MaxCodeBytesPerProcess % ExecutableCodePageSize) == 0,
514 "MaxCodeBytesPerProcess must be a multiple of ExecutableCodePageSize");
515 static const size_t MaxCodePages =
516 MaxCodeBytesPerProcess / ExecutableCodePageSize;
517
518 // Start of the MaxCodeBytesPerProcess memory block or nullptr if
519 // uninitialized. Note that this is NOT guaranteed to be aligned to
520 // ExecutableCodePageSize.
521 uint8_t* base_;
522
523 // The fields below should only be accessed while we hold the lock.
524 Mutex lock_;
525
526 // pagesAllocated_ is an Atomic so that bytesAllocated does not have to
527 // take the lock.
528 mozilla::Atomic<size_t, mozilla::ReleaseAcquire> pagesAllocated_;
529
530 // Page where we should try to allocate next.
531 size_t cursor_;
532
533 mozilla::Maybe<mozilla::non_crypto::XorShift128PlusRNG> rng_;
534 PageBitSet<MaxCodePages> pages_;
535
536 public:
ProcessExecutableMemory()537 ProcessExecutableMemory()
538 : base_(nullptr),
539 lock_(mutexid::ProcessExecutableRegion),
540 pagesAllocated_(0),
541 cursor_(0),
542 rng_(),
543 pages_() {}
544
init()545 [[nodiscard]] bool init() {
546 pages_.init();
547
548 MOZ_RELEASE_ASSERT(!initialized());
549 MOZ_RELEASE_ASSERT(HasJitBackend());
550 MOZ_RELEASE_ASSERT(gc::SystemPageSize() <= ExecutableCodePageSize);
551
552 void* p = ReserveProcessExecutableMemory(MaxCodeBytesPerProcess);
553 if (!p) {
554 return false;
555 }
556
557 base_ = static_cast<uint8_t*>(p);
558
559 mozilla::Array<uint64_t, 2> seed;
560 GenerateXorShift128PlusSeed(seed);
561 rng_.emplace(seed[0], seed[1]);
562 return true;
563 }
564
base() const565 uint8_t* base() const { return base_; }
566
initialized() const567 bool initialized() const { return base_ != nullptr; }
568
bytesAllocated() const569 size_t bytesAllocated() const {
570 MOZ_ASSERT(pagesAllocated_ <= MaxCodePages);
571 return pagesAllocated_ * ExecutableCodePageSize;
572 }
573
release()574 void release() {
575 MOZ_ASSERT(initialized());
576 MOZ_ASSERT(pages_.empty());
577 MOZ_ASSERT(pagesAllocated_ == 0);
578 DeallocateProcessExecutableMemory(base_, MaxCodeBytesPerProcess);
579 base_ = nullptr;
580 rng_.reset();
581 MOZ_ASSERT(!initialized());
582 }
583
assertValidAddress(void * p,size_t bytes) const584 void assertValidAddress(void* p, size_t bytes) const {
585 MOZ_RELEASE_ASSERT(p >= base_ &&
586 uintptr_t(p) + bytes <=
587 uintptr_t(base_) + MaxCodeBytesPerProcess);
588 }
589
containsAddress(const void * p) const590 bool containsAddress(const void* p) const {
591 return p >= base_ &&
592 uintptr_t(p) < uintptr_t(base_) + MaxCodeBytesPerProcess;
593 }
594
595 void* allocate(size_t bytes, ProtectionSetting protection,
596 MemCheckKind checkKind);
597 void deallocate(void* addr, size_t bytes, bool decommit);
598 };
599
allocate(size_t bytes,ProtectionSetting protection,MemCheckKind checkKind)600 void* ProcessExecutableMemory::allocate(size_t bytes,
601 ProtectionSetting protection,
602 MemCheckKind checkKind) {
603 MOZ_ASSERT(initialized());
604 MOZ_ASSERT(HasJitBackend());
605 MOZ_ASSERT(bytes > 0);
606 MOZ_ASSERT((bytes % ExecutableCodePageSize) == 0);
607
608 size_t numPages = bytes / ExecutableCodePageSize;
609
610 // Take the lock and try to allocate.
611 void* p = nullptr;
612 {
613 LockGuard<Mutex> guard(lock_);
614 MOZ_ASSERT(pagesAllocated_ <= MaxCodePages);
615
616 // Check if we have enough pages available.
617 if (pagesAllocated_ + numPages >= MaxCodePages) {
618 return nullptr;
619 }
620
621 MOZ_ASSERT(bytes <= MaxCodeBytesPerProcess);
622
623 // Maybe skip a page to make allocations less predictable.
624 size_t page = cursor_ + (rng_.ref().next() % 2);
625
626 for (size_t i = 0; i < MaxCodePages; i++) {
627 // Make sure page + numPages - 1 is a valid index.
628 if (page + numPages > MaxCodePages) {
629 page = 0;
630 }
631
632 bool available = true;
633 for (size_t j = 0; j < numPages; j++) {
634 if (pages_.contains(page + j)) {
635 available = false;
636 break;
637 }
638 }
639 if (!available) {
640 page++;
641 continue;
642 }
643
644 // Mark the pages as unavailable.
645 for (size_t j = 0; j < numPages; j++) {
646 pages_.insert(page + j);
647 }
648
649 pagesAllocated_ += numPages;
650 MOZ_ASSERT(pagesAllocated_ <= MaxCodePages);
651
652 // If we allocated a small number of pages, move cursor_ to the
653 // next page. We don't do this for larger allocations to avoid
654 // skipping a large number of small holes.
655 if (numPages <= 2) {
656 cursor_ = page + numPages;
657 }
658
659 p = base_ + page * ExecutableCodePageSize;
660 break;
661 }
662 if (!p) {
663 return nullptr;
664 }
665 }
666
667 // Commit the pages after releasing the lock.
668 if (!CommitPages(p, bytes, protection)) {
669 deallocate(p, bytes, /* decommit = */ false);
670 return nullptr;
671 }
672
673 SetMemCheckKind(p, bytes, checkKind);
674
675 return p;
676 }
677
deallocate(void * addr,size_t bytes,bool decommit)678 void ProcessExecutableMemory::deallocate(void* addr, size_t bytes,
679 bool decommit) {
680 MOZ_ASSERT(initialized());
681 MOZ_ASSERT(addr);
682 MOZ_ASSERT((uintptr_t(addr) % gc::SystemPageSize()) == 0);
683 MOZ_ASSERT(bytes > 0);
684 MOZ_ASSERT((bytes % ExecutableCodePageSize) == 0);
685
686 assertValidAddress(addr, bytes);
687
688 size_t firstPage =
689 (static_cast<uint8_t*>(addr) - base_) / ExecutableCodePageSize;
690 size_t numPages = bytes / ExecutableCodePageSize;
691
692 // Decommit before taking the lock.
693 MOZ_MAKE_MEM_NOACCESS(addr, bytes);
694 if (decommit) {
695 DecommitPages(addr, bytes);
696 }
697
698 LockGuard<Mutex> guard(lock_);
699 MOZ_ASSERT(numPages <= pagesAllocated_);
700 pagesAllocated_ -= numPages;
701
702 for (size_t i = 0; i < numPages; i++) {
703 pages_.remove(firstPage + i);
704 }
705
706 // Move the cursor back so we can reuse pages instead of fragmenting the
707 // whole region.
708 if (firstPage < cursor_) {
709 cursor_ = firstPage;
710 }
711 }
712
713 static ProcessExecutableMemory execMemory;
714
AllocateExecutableMemory(size_t bytes,ProtectionSetting protection,MemCheckKind checkKind)715 void* js::jit::AllocateExecutableMemory(size_t bytes,
716 ProtectionSetting protection,
717 MemCheckKind checkKind) {
718 return execMemory.allocate(bytes, protection, checkKind);
719 }
720
DeallocateExecutableMemory(void * addr,size_t bytes)721 void js::jit::DeallocateExecutableMemory(void* addr, size_t bytes) {
722 execMemory.deallocate(addr, bytes, /* decommit = */ true);
723 }
724
InitProcessExecutableMemory()725 bool js::jit::InitProcessExecutableMemory() { return execMemory.init(); }
726
ReleaseProcessExecutableMemory()727 void js::jit::ReleaseProcessExecutableMemory() { execMemory.release(); }
728
LikelyAvailableExecutableMemory()729 size_t js::jit::LikelyAvailableExecutableMemory() {
730 // Round down available memory to the closest MB.
731 return MaxCodeBytesPerProcess -
732 AlignBytes(execMemory.bytesAllocated(), 0x100000U);
733 }
734
CanLikelyAllocateMoreExecutableMemory()735 bool js::jit::CanLikelyAllocateMoreExecutableMemory() {
736 // Use a 8 MB buffer.
737 static const size_t BufferSize = 8 * 1024 * 1024;
738
739 MOZ_ASSERT(execMemory.bytesAllocated() <= MaxCodeBytesPerProcess);
740
741 return execMemory.bytesAllocated() + BufferSize <= MaxCodeBytesPerProcess;
742 }
743
AddressIsInExecutableMemory(const void * p)744 bool js::jit::AddressIsInExecutableMemory(const void* p) {
745 return execMemory.containsAddress(p);
746 }
747
ReprotectRegion(void * start,size_t size,ProtectionSetting protection,MustFlushICache flushICache)748 bool js::jit::ReprotectRegion(void* start, size_t size,
749 ProtectionSetting protection,
750 MustFlushICache flushICache) {
751 // Flush ICache when making code executable, before we modify |size|.
752 if (flushICache == MustFlushICache::LocalThreadOnly ||
753 flushICache == MustFlushICache::AllThreads) {
754 MOZ_ASSERT(protection == ProtectionSetting::Executable);
755 bool codeIsThreadLocal = flushICache == MustFlushICache::LocalThreadOnly;
756 jit::FlushICache(start, size, codeIsThreadLocal);
757 }
758
759 // Calculate the start of the page containing this region,
760 // and account for this extra memory within size.
761 size_t pageSize = gc::SystemPageSize();
762 intptr_t startPtr = reinterpret_cast<intptr_t>(start);
763 intptr_t pageStartPtr = startPtr & ~(pageSize - 1);
764 void* pageStart = reinterpret_cast<void*>(pageStartPtr);
765 size += (startPtr - pageStartPtr);
766
767 // Round size up
768 size += (pageSize - 1);
769 size &= ~(pageSize - 1);
770
771 MOZ_ASSERT((uintptr_t(pageStart) % pageSize) == 0);
772
773 execMemory.assertValidAddress(pageStart, size);
774
775 // On weak memory systems, make sure new code is visible on all cores before
776 // addresses of the code are made public. Now is the latest moment in time
777 // when we can do that, and we're assuming that every other thread that has
778 // written into the memory that is being reprotected here has synchronized
779 // with this thread in such a way that the memory writes have become visible
780 // and we therefore only need to execute the fence once here. See bug 1529933
781 // for a longer discussion of why this is both necessary and sufficient.
782 //
783 // We use the C++ fence here -- and not AtomicOperations::fenceSeqCst() --
784 // primarily because ReprotectRegion will be called while we construct our own
785 // jitted atomics. But the C++ fence is sufficient and correct, too.
786 #ifdef __wasi__
787 MOZ_CRASH("NYI FOR WASI.");
788 #else
789 std::atomic_thread_fence(std::memory_order_seq_cst);
790
791 # ifdef XP_WIN
792 DWORD oldProtect;
793 DWORD flags = ProtectionSettingToFlags(protection);
794 if (!VirtualProtect(pageStart, size, flags, &oldProtect)) {
795 return false;
796 }
797 # else
798 unsigned flags = ProtectionSettingToFlags(protection);
799 if (mprotect(pageStart, size, flags)) {
800 return false;
801 }
802 # endif
803 #endif // __wasi__
804
805 execMemory.assertValidAddress(pageStart, size);
806 return true;
807 }
808
809 #if defined(XP_WIN) && defined(NEED_JIT_UNWIND_HANDLING)
RuntimeFunctionCallback(DWORD64 ControlPc,PVOID Context)810 static PRUNTIME_FUNCTION RuntimeFunctionCallback(DWORD64 ControlPc,
811 PVOID Context) {
812 MOZ_ASSERT(sJitExceptionHandler);
813
814 // RegisterExecutableMemory already set up the runtime function in the
815 // exception-data page preceding the allocation.
816 uint8_t* p = execMemory.base();
817 if (!p) {
818 return nullptr;
819 }
820 return (PRUNTIME_FUNCTION)(p - gc::SystemPageSize());
821 }
822 #endif
823