1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  *
4  * Copyright 2016 Mozilla Foundation
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 #include "wasm/WasmCode.h"
20 
21 #include "mozilla/Atomics.h"
22 #include "mozilla/BinarySearch.h"
23 #include "mozilla/EnumeratedRange.h"
24 #include "mozilla/Sprintf.h"
25 
26 #include <algorithm>
27 
28 #include "jsnum.h"
29 
30 #include "jit/Disassemble.h"
31 #include "jit/ExecutableAllocator.h"
32 #include "jit/MacroAssembler.h"
33 #ifdef JS_ION_PERF
34 #  include "jit/PerfSpewer.h"
35 #endif
36 #include "util/Poison.h"
37 #ifdef MOZ_VTUNE
38 #  include "vtune/VTuneWrapper.h"
39 #endif
40 #include "wasm/WasmModule.h"
41 #include "wasm/WasmProcess.h"
42 #include "wasm/WasmSerialize.h"
43 #include "wasm/WasmStubs.h"
44 
45 using namespace js;
46 using namespace js::jit;
47 using namespace js::wasm;
48 using mozilla::BinarySearch;
49 using mozilla::BinarySearchIf;
50 using mozilla::MakeEnumeratedRange;
51 using mozilla::PodAssign;
52 
serializedSize() const53 size_t LinkData::SymbolicLinkArray::serializedSize() const {
54   size_t size = 0;
55   for (const Uint32Vector& offsets : *this) {
56     size += SerializedPodVectorSize(offsets);
57   }
58   return size;
59 }
60 
serialize(uint8_t * cursor) const61 uint8_t* LinkData::SymbolicLinkArray::serialize(uint8_t* cursor) const {
62   for (const Uint32Vector& offsets : *this) {
63     cursor = SerializePodVector(cursor, offsets);
64   }
65   return cursor;
66 }
67 
deserialize(const uint8_t * cursor)68 const uint8_t* LinkData::SymbolicLinkArray::deserialize(const uint8_t* cursor) {
69   for (Uint32Vector& offsets : *this) {
70     cursor = DeserializePodVector(cursor, &offsets);
71     if (!cursor) {
72       return nullptr;
73     }
74   }
75   return cursor;
76 }
77 
sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const78 size_t LinkData::SymbolicLinkArray::sizeOfExcludingThis(
79     MallocSizeOf mallocSizeOf) const {
80   size_t size = 0;
81   for (const Uint32Vector& offsets : *this) {
82     size += offsets.sizeOfExcludingThis(mallocSizeOf);
83   }
84   return size;
85 }
86 
serializedSize() const87 size_t LinkData::serializedSize() const {
88   return sizeof(pod()) + SerializedPodVectorSize(internalLinks) +
89          symbolicLinks.serializedSize();
90 }
91 
serialize(uint8_t * cursor) const92 uint8_t* LinkData::serialize(uint8_t* cursor) const {
93   MOZ_ASSERT(tier == Tier::Serialized);
94 
95   cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
96   cursor = SerializePodVector(cursor, internalLinks);
97   cursor = symbolicLinks.serialize(cursor);
98   return cursor;
99 }
100 
deserialize(const uint8_t * cursor)101 const uint8_t* LinkData::deserialize(const uint8_t* cursor) {
102   MOZ_ASSERT(tier == Tier::Serialized);
103 
104   (cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) &&
105       (cursor = DeserializePodVector(cursor, &internalLinks)) &&
106       (cursor = symbolicLinks.deserialize(cursor));
107   return cursor;
108 }
109 
~CodeSegment()110 CodeSegment::~CodeSegment() {
111   if (unregisterOnDestroy_) {
112     UnregisterCodeSegment(this);
113   }
114 }
115 
RoundupCodeLength(uint32_t codeLength)116 static uint32_t RoundupCodeLength(uint32_t codeLength) {
117   // AllocateExecutableMemory() requires a multiple of ExecutableCodePageSize.
118   return RoundUp(codeLength, ExecutableCodePageSize);
119 }
120 
121 /* static */
AllocateCodeBytes(uint32_t codeLength)122 UniqueCodeBytes CodeSegment::AllocateCodeBytes(uint32_t codeLength) {
123   if (codeLength > MaxCodeBytesPerProcess) {
124     return nullptr;
125   }
126 
127   static_assert(MaxCodeBytesPerProcess <= INT32_MAX, "rounding won't overflow");
128   uint32_t roundedCodeLength = RoundupCodeLength(codeLength);
129 
130   void* p =
131       AllocateExecutableMemory(roundedCodeLength, ProtectionSetting::Writable,
132                                MemCheckKind::MakeUndefined);
133 
134   // If the allocation failed and the embedding gives us a last-ditch attempt
135   // to purge all memory (which, in gecko, does a purging GC/CC/GC), do that
136   // then retry the allocation.
137   if (!p) {
138     if (OnLargeAllocationFailure) {
139       OnLargeAllocationFailure();
140       p = AllocateExecutableMemory(roundedCodeLength,
141                                    ProtectionSetting::Writable,
142                                    MemCheckKind::MakeUndefined);
143     }
144   }
145 
146   if (!p) {
147     return nullptr;
148   }
149 
150   // Zero the padding.
151   memset(((uint8_t*)p) + codeLength, 0, roundedCodeLength - codeLength);
152 
153   // We account for the bytes allocated in WasmModuleObject::create, where we
154   // have the necessary JSContext.
155 
156   return UniqueCodeBytes((uint8_t*)p, FreeCode(roundedCodeLength));
157 }
158 
initialize(const CodeTier & codeTier)159 bool CodeSegment::initialize(const CodeTier& codeTier) {
160   MOZ_ASSERT(!initialized());
161   codeTier_ = &codeTier;
162   MOZ_ASSERT(initialized());
163 
164   // In the case of tiering, RegisterCodeSegment() immediately makes this code
165   // segment live to access from other threads executing the containing
166   // module. So only call once the CodeSegment is fully initialized.
167   if (!RegisterCodeSegment(this)) {
168     return false;
169   }
170 
171   // This bool is only used by the destructor which cannot be called racily
172   // and so it is not a problem to mutate it after RegisterCodeSegment().
173   MOZ_ASSERT(!unregisterOnDestroy_);
174   unregisterOnDestroy_ = true;
175   return true;
176 }
177 
code() const178 const Code& CodeSegment::code() const {
179   MOZ_ASSERT(codeTier_);
180   return codeTier_->code();
181 }
182 
addSizeOfMisc(MallocSizeOf mallocSizeOf,size_t * code) const183 void CodeSegment::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code) const {
184   *code += RoundupCodeLength(length());
185 }
186 
operator ()(uint8_t * bytes)187 void FreeCode::operator()(uint8_t* bytes) {
188   MOZ_ASSERT(codeLength);
189   MOZ_ASSERT(codeLength == RoundupCodeLength(codeLength));
190 
191 #ifdef MOZ_VTUNE
192   vtune::UnmarkBytes(bytes, codeLength);
193 #endif
194   DeallocateExecutableMemory(bytes, codeLength);
195 }
196 
StaticallyLink(const ModuleSegment & ms,const LinkData & linkData)197 static bool StaticallyLink(const ModuleSegment& ms, const LinkData& linkData) {
198   for (LinkData::InternalLink link : linkData.internalLinks) {
199     CodeLabel label;
200     label.patchAt()->bind(link.patchAtOffset);
201     label.target()->bind(link.targetOffset);
202 #ifdef JS_CODELABEL_LINKMODE
203     label.setLinkMode(static_cast<CodeLabel::LinkMode>(link.mode));
204 #endif
205     Assembler::Bind(ms.base(), label);
206   }
207 
208   if (!EnsureBuiltinThunksInitialized()) {
209     return false;
210   }
211 
212   for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
213     const Uint32Vector& offsets = linkData.symbolicLinks[imm];
214     if (offsets.empty()) {
215       continue;
216     }
217 
218     void* target = SymbolicAddressTarget(imm);
219     for (uint32_t offset : offsets) {
220       uint8_t* patchAt = ms.base() + offset;
221       Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
222                                          PatchedImmPtr(target),
223                                          PatchedImmPtr((void*)-1));
224     }
225   }
226 
227   return true;
228 }
229 
StaticallyUnlink(uint8_t * base,const LinkData & linkData)230 static void StaticallyUnlink(uint8_t* base, const LinkData& linkData) {
231   for (LinkData::InternalLink link : linkData.internalLinks) {
232     CodeLabel label;
233     label.patchAt()->bind(link.patchAtOffset);
234     label.target()->bind(-size_t(base));  // to reset immediate to null
235 #ifdef JS_CODELABEL_LINKMODE
236     label.setLinkMode(static_cast<CodeLabel::LinkMode>(link.mode));
237 #endif
238     Assembler::Bind(base, label);
239   }
240 
241   for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
242     const Uint32Vector& offsets = linkData.symbolicLinks[imm];
243     if (offsets.empty()) {
244       continue;
245     }
246 
247     void* target = SymbolicAddressTarget(imm);
248     for (uint32_t offset : offsets) {
249       uint8_t* patchAt = base + offset;
250       Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
251                                          PatchedImmPtr((void*)-1),
252                                          PatchedImmPtr(target));
253     }
254   }
255 }
256 
257 #ifdef JS_ION_PERF
AppendToString(const char * str,UTF8Bytes * bytes)258 static bool AppendToString(const char* str, UTF8Bytes* bytes) {
259   return bytes->append(str, strlen(str)) && bytes->append('\0');
260 }
261 #endif
262 
SendCodeRangesToProfiler(const ModuleSegment & ms,const Metadata & metadata,const CodeRangeVector & codeRanges)263 static void SendCodeRangesToProfiler(const ModuleSegment& ms,
264                                      const Metadata& metadata,
265                                      const CodeRangeVector& codeRanges) {
266   bool enabled = false;
267 #ifdef JS_ION_PERF
268   enabled |= PerfFuncEnabled();
269 #endif
270 #ifdef MOZ_VTUNE
271   enabled |= vtune::IsProfilingActive();
272 #endif
273   if (!enabled) {
274     return;
275   }
276 
277   for (const CodeRange& codeRange : codeRanges) {
278     if (!codeRange.hasFuncIndex()) {
279       continue;
280     }
281 
282     uintptr_t start = uintptr_t(ms.base() + codeRange.begin());
283     uintptr_t size = codeRange.end() - codeRange.begin();
284 
285     UTF8Bytes name;
286     if (!metadata.getFuncNameStandalone(codeRange.funcIndex(), &name)) {
287       return;
288     }
289 
290     // Avoid "unused" warnings
291     (void)start;
292     (void)size;
293 
294 #ifdef JS_ION_PERF
295     if (PerfFuncEnabled()) {
296       const char* file = metadata.filename.get();
297       if (codeRange.isFunction()) {
298         if (!name.append('\0')) {
299           return;
300         }
301         unsigned line = codeRange.funcLineOrBytecode();
302         writePerfSpewerWasmFunctionMap(start, size, file, line, name.begin());
303       } else if (codeRange.isInterpEntry()) {
304         if (!AppendToString(" slow entry", &name)) {
305           return;
306         }
307         writePerfSpewerWasmMap(start, size, file, name.begin());
308       } else if (codeRange.isJitEntry()) {
309         if (!AppendToString(" fast entry", &name)) {
310           return;
311         }
312         writePerfSpewerWasmMap(start, size, file, name.begin());
313       } else if (codeRange.isImportInterpExit()) {
314         if (!AppendToString(" slow exit", &name)) {
315           return;
316         }
317         writePerfSpewerWasmMap(start, size, file, name.begin());
318       } else if (codeRange.isImportJitExit()) {
319         if (!AppendToString(" fast exit", &name)) {
320           return;
321         }
322         writePerfSpewerWasmMap(start, size, file, name.begin());
323       } else {
324         MOZ_CRASH("unhandled perf hasFuncIndex type");
325       }
326     }
327 #endif
328 #ifdef MOZ_VTUNE
329     if (!vtune::IsProfilingActive()) {
330       continue;
331     }
332     if (!codeRange.isFunction()) {
333       continue;
334     }
335     if (!name.append('\0')) {
336       return;
337     }
338     vtune::MarkWasm(vtune::GenerateUniqueMethodID(), name.begin(), (void*)start,
339                     size);
340 #endif
341   }
342 }
343 
ModuleSegment(Tier tier,UniqueCodeBytes codeBytes,uint32_t codeLength,const LinkData & linkData)344 ModuleSegment::ModuleSegment(Tier tier, UniqueCodeBytes codeBytes,
345                              uint32_t codeLength, const LinkData& linkData)
346     : CodeSegment(std::move(codeBytes), codeLength, CodeSegment::Kind::Module),
347       tier_(tier),
348       trapCode_(base() + linkData.trapOffset) {}
349 
350 /* static */
create(Tier tier,MacroAssembler & masm,const LinkData & linkData)351 UniqueModuleSegment ModuleSegment::create(Tier tier, MacroAssembler& masm,
352                                           const LinkData& linkData) {
353   uint32_t codeLength = masm.bytesNeeded();
354 
355   UniqueCodeBytes codeBytes = AllocateCodeBytes(codeLength);
356   if (!codeBytes) {
357     return nullptr;
358   }
359 
360   masm.executableCopy(codeBytes.get());
361 
362   return js::MakeUnique<ModuleSegment>(tier, std::move(codeBytes), codeLength,
363                                        linkData);
364 }
365 
366 /* static */
create(Tier tier,const Bytes & unlinkedBytes,const LinkData & linkData)367 UniqueModuleSegment ModuleSegment::create(Tier tier, const Bytes& unlinkedBytes,
368                                           const LinkData& linkData) {
369   uint32_t codeLength = unlinkedBytes.length();
370 
371   UniqueCodeBytes codeBytes = AllocateCodeBytes(codeLength);
372   if (!codeBytes) {
373     return nullptr;
374   }
375 
376   memcpy(codeBytes.get(), unlinkedBytes.begin(), codeLength);
377 
378   return js::MakeUnique<ModuleSegment>(tier, std::move(codeBytes), codeLength,
379                                        linkData);
380 }
381 
initialize(IsTier2 isTier2,const CodeTier & codeTier,const LinkData & linkData,const Metadata & metadata,const MetadataTier & metadataTier)382 bool ModuleSegment::initialize(IsTier2 isTier2, const CodeTier& codeTier,
383                                const LinkData& linkData,
384                                const Metadata& metadata,
385                                const MetadataTier& metadataTier) {
386   if (!StaticallyLink(*this, linkData)) {
387     return false;
388   }
389 
390   // Optimized compilation finishes on a background thread, so we must make sure
391   // to flush the icaches of all the executing threads.
392   FlushICacheSpec flushIcacheSpec = isTier2 == IsTier2::Tier2
393                                         ? FlushICacheSpec::AllThreads
394                                         : FlushICacheSpec::LocalThreadOnly;
395 
396   // Reprotect the whole region to avoid having separate RW and RX mappings.
397   if (!ExecutableAllocator::makeExecutableAndFlushICache(
398           flushIcacheSpec, base(), RoundupCodeLength(length()))) {
399     return false;
400   }
401 
402   SendCodeRangesToProfiler(*this, metadata, metadataTier.codeRanges);
403 
404   // See comments in CodeSegment::initialize() for why this must be last.
405   return CodeSegment::initialize(codeTier);
406 }
407 
serializedSize() const408 size_t ModuleSegment::serializedSize() const {
409   return sizeof(uint32_t) + length();
410 }
411 
addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf,size_t * code,size_t * data) const412 void ModuleSegment::addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf,
413                                   size_t* code, size_t* data) const {
414   CodeSegment::addSizeOfMisc(mallocSizeOf, code);
415   *data += mallocSizeOf(this);
416 }
417 
serialize(uint8_t * cursor,const LinkData & linkData) const418 uint8_t* ModuleSegment::serialize(uint8_t* cursor,
419                                   const LinkData& linkData) const {
420   MOZ_ASSERT(tier() == Tier::Serialized);
421 
422   cursor = WriteScalar<uint32_t>(cursor, length());
423   uint8_t* serializedBase = cursor;
424   cursor = WriteBytes(cursor, base(), length());
425   StaticallyUnlink(serializedBase, linkData);
426   return cursor;
427 }
428 
deserialize(const uint8_t * cursor,const LinkData & linkData,UniqueModuleSegment * segment)429 /* static */ const uint8_t* ModuleSegment::deserialize(
430     const uint8_t* cursor, const LinkData& linkData,
431     UniqueModuleSegment* segment) {
432   uint32_t length;
433   cursor = ReadScalar<uint32_t>(cursor, &length);
434   if (!cursor) {
435     return nullptr;
436   }
437 
438   UniqueCodeBytes bytes = AllocateCodeBytes(length);
439   if (!bytes) {
440     return nullptr;
441   }
442 
443   cursor = ReadBytes(cursor, bytes.get(), length);
444   if (!cursor) {
445     return nullptr;
446   }
447 
448   *segment = js::MakeUnique<ModuleSegment>(Tier::Serialized, std::move(bytes),
449                                            length, linkData);
450   if (!*segment) {
451     return nullptr;
452   }
453 
454   return cursor;
455 }
456 
lookupRange(const void * pc) const457 const CodeRange* ModuleSegment::lookupRange(const void* pc) const {
458   return codeTier().lookupRange(pc);
459 }
460 
serializedSize() const461 size_t FuncExport::serializedSize() const {
462   return funcType_.serializedSize() + sizeof(pod);
463 }
464 
serialize(uint8_t * cursor) const465 uint8_t* FuncExport::serialize(uint8_t* cursor) const {
466   cursor = funcType_.serialize(cursor);
467   cursor = WriteBytes(cursor, &pod, sizeof(pod));
468   return cursor;
469 }
470 
deserialize(const uint8_t * cursor)471 const uint8_t* FuncExport::deserialize(const uint8_t* cursor) {
472   (cursor = funcType_.deserialize(cursor)) &&
473       (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
474   return cursor;
475 }
476 
sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const477 size_t FuncExport::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
478   return funcType_.sizeOfExcludingThis(mallocSizeOf);
479 }
480 
serializedSize() const481 size_t FuncImport::serializedSize() const {
482   return funcType_.serializedSize() + sizeof(pod);
483 }
484 
serialize(uint8_t * cursor) const485 uint8_t* FuncImport::serialize(uint8_t* cursor) const {
486   cursor = funcType_.serialize(cursor);
487   cursor = WriteBytes(cursor, &pod, sizeof(pod));
488   return cursor;
489 }
490 
deserialize(const uint8_t * cursor)491 const uint8_t* FuncImport::deserialize(const uint8_t* cursor) {
492   (cursor = funcType_.deserialize(cursor)) &&
493       (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
494   return cursor;
495 }
496 
sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const497 size_t FuncImport::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
498   return funcType_.sizeOfExcludingThis(mallocSizeOf);
499 }
500 
StringLengthWithNullChar(const char * chars)501 static size_t StringLengthWithNullChar(const char* chars) {
502   return chars ? strlen(chars) + 1 : 0;
503 }
504 
serializedSize() const505 size_t CacheableChars::serializedSize() const {
506   return sizeof(uint32_t) + StringLengthWithNullChar(get());
507 }
508 
serialize(uint8_t * cursor) const509 uint8_t* CacheableChars::serialize(uint8_t* cursor) const {
510   uint32_t lengthWithNullChar = StringLengthWithNullChar(get());
511   cursor = WriteScalar<uint32_t>(cursor, lengthWithNullChar);
512   cursor = WriteBytes(cursor, get(), lengthWithNullChar);
513   return cursor;
514 }
515 
deserialize(const uint8_t * cursor)516 const uint8_t* CacheableChars::deserialize(const uint8_t* cursor) {
517   uint32_t lengthWithNullChar;
518   cursor = ReadBytes(cursor, &lengthWithNullChar, sizeof(uint32_t));
519 
520   if (lengthWithNullChar) {
521     reset(js_pod_malloc<char>(lengthWithNullChar));
522     if (!get()) {
523       return nullptr;
524     }
525 
526     cursor = ReadBytes(cursor, get(), lengthWithNullChar);
527   } else {
528     MOZ_ASSERT(!get());
529   }
530 
531   return cursor;
532 }
533 
sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const534 size_t CacheableChars::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
535   return mallocSizeOf(get());
536 }
537 
serializedSize() const538 size_t MetadataTier::serializedSize() const {
539   return SerializedPodVectorSize(funcToCodeRange) +
540          SerializedPodVectorSize(codeRanges) +
541          SerializedPodVectorSize(callSites) +
542 #ifdef ENABLE_WASM_EXCEPTIONS
543          SerializedPodVectorSize(tryNotes) +
544 #endif
545          trapSites.serializedSize() + SerializedVectorSize(funcImports) +
546          SerializedVectorSize(funcExports);
547 }
548 
sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const549 size_t MetadataTier::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
550   return funcToCodeRange.sizeOfExcludingThis(mallocSizeOf) +
551          codeRanges.sizeOfExcludingThis(mallocSizeOf) +
552          callSites.sizeOfExcludingThis(mallocSizeOf) +
553 #ifdef ENABLE_WASM_EXCEPTIONS
554          tryNotes.sizeOfExcludingThis(mallocSizeOf) +
555 #endif
556          trapSites.sizeOfExcludingThis(mallocSizeOf) +
557          SizeOfVectorExcludingThis(funcImports, mallocSizeOf) +
558          SizeOfVectorExcludingThis(funcExports, mallocSizeOf);
559 }
560 
serialize(uint8_t * cursor) const561 uint8_t* MetadataTier::serialize(uint8_t* cursor) const {
562   cursor = SerializePodVector(cursor, funcToCodeRange);
563   cursor = SerializePodVector(cursor, codeRanges);
564   cursor = SerializePodVector(cursor, callSites);
565 #ifdef ENABLE_WASM_EXCEPTIONS
566   cursor = SerializePodVector(cursor, tryNotes);
567 #endif
568   cursor = trapSites.serialize(cursor);
569   cursor = SerializeVector(cursor, funcImports);
570   cursor = SerializeVector(cursor, funcExports);
571   MOZ_ASSERT(debugTrapFarJumpOffsets.empty());
572   return cursor;
573 }
574 
deserialize(const uint8_t * cursor)575 /* static */ const uint8_t* MetadataTier::deserialize(const uint8_t* cursor) {
576   (cursor = DeserializePodVector(cursor, &funcToCodeRange)) &&
577       (cursor = DeserializePodVector(cursor, &codeRanges)) &&
578       (cursor = DeserializePodVector(cursor, &callSites)) &&
579 #ifdef ENABLE_WASM_EXCEPTIONS
580       (cursor = DeserializePodVector(cursor, &tryNotes)) &&
581 #endif
582       (cursor = trapSites.deserialize(cursor)) &&
583       (cursor = DeserializeVector(cursor, &funcImports)) &&
584       (cursor = DeserializeVector(cursor, &funcExports));
585   MOZ_ASSERT(debugTrapFarJumpOffsets.empty());
586   return cursor;
587 }
588 
create(const CodeTier & codeTier,size_t length)589 UniqueLazyStubSegment LazyStubSegment::create(const CodeTier& codeTier,
590                                               size_t length) {
591   UniqueCodeBytes codeBytes = AllocateCodeBytes(length);
592   if (!codeBytes) {
593     return nullptr;
594   }
595 
596   auto segment = js::MakeUnique<LazyStubSegment>(std::move(codeBytes), length);
597   if (!segment || !segment->initialize(codeTier)) {
598     return nullptr;
599   }
600 
601   return segment;
602 }
603 
hasSpace(size_t bytes) const604 bool LazyStubSegment::hasSpace(size_t bytes) const {
605   MOZ_ASSERT(AlignBytesNeeded(bytes) == bytes);
606   return bytes <= length() && usedBytes_ <= length() - bytes;
607 }
608 
addStubs(size_t codeLength,const Uint32Vector & funcExportIndices,const FuncExportVector & funcExports,const CodeRangeVector & codeRanges,uint8_t ** codePtr,size_t * indexFirstInsertedCodeRange)609 bool LazyStubSegment::addStubs(size_t codeLength,
610                                const Uint32Vector& funcExportIndices,
611                                const FuncExportVector& funcExports,
612                                const CodeRangeVector& codeRanges,
613                                uint8_t** codePtr,
614                                size_t* indexFirstInsertedCodeRange) {
615   MOZ_ASSERT(hasSpace(codeLength));
616 
617   size_t offsetInSegment = usedBytes_;
618   *codePtr = base() + usedBytes_;
619   usedBytes_ += codeLength;
620 
621   *indexFirstInsertedCodeRange = codeRanges_.length();
622 
623   if (!codeRanges_.reserve(codeRanges_.length() + 2 * codeRanges.length())) {
624     return false;
625   }
626 
627   size_t i = 0;
628   for (uint32_t funcExportIndex : funcExportIndices) {
629     const CodeRange& interpRange = codeRanges[i];
630     MOZ_ASSERT(interpRange.isInterpEntry());
631     MOZ_ASSERT(interpRange.funcIndex() ==
632                funcExports[funcExportIndex].funcIndex());
633 
634     codeRanges_.infallibleAppend(interpRange);
635     codeRanges_.back().offsetBy(offsetInSegment);
636     i++;
637 
638     if (!funcExports[funcExportIndex].canHaveJitEntry()) {
639       continue;
640     }
641 
642     const CodeRange& jitRange = codeRanges[i];
643     MOZ_ASSERT(jitRange.isJitEntry());
644     MOZ_ASSERT(jitRange.funcIndex() == interpRange.funcIndex());
645 
646     codeRanges_.infallibleAppend(jitRange);
647     codeRanges_.back().offsetBy(offsetInSegment);
648     i++;
649   }
650 
651   return true;
652 }
653 
lookupRange(const void * pc) const654 const CodeRange* LazyStubSegment::lookupRange(const void* pc) const {
655   // Do not search if the search will not find anything.  There can be many
656   // segments, each with many entries.
657   if (pc < base() || pc >= base() + length()) {
658     return nullptr;
659   }
660   return LookupInSorted(codeRanges_,
661                         CodeRange::OffsetInCode((uint8_t*)pc - base()));
662 }
663 
addSizeOfMisc(MallocSizeOf mallocSizeOf,size_t * code,size_t * data) const664 void LazyStubSegment::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
665                                     size_t* data) const {
666   CodeSegment::addSizeOfMisc(mallocSizeOf, code);
667   *data += codeRanges_.sizeOfExcludingThis(mallocSizeOf);
668   *data += mallocSizeOf(this);
669 }
670 
671 // When allocating a single stub to a page, we should not always place the stub
672 // at the beginning of the page as the stubs will tend to thrash the icache by
673 // creating conflicts (everything ends up in the same cache set).  Instead,
674 // locate stubs at different line offsets up to 3/4 the system page size (the
675 // code allocation quantum).
676 //
677 // This may be called on background threads, hence the atomic.
678 
PadCodeForSingleStub(MacroAssembler & masm)679 static void PadCodeForSingleStub(MacroAssembler& masm) {
680   // Assume 64B icache line size
681   static uint8_t zeroes[64];
682 
683   // The counter serves only to spread the code out, it has no other meaning and
684   // can wrap around.
685   static mozilla::Atomic<uint32_t, mozilla::MemoryOrdering::ReleaseAcquire>
686       counter(0);
687 
688   uint32_t maxPadLines = ((gc::SystemPageSize() * 3) / 4) / sizeof(zeroes);
689   uint32_t padLines = counter++ % maxPadLines;
690   for (uint32_t i = 0; i < padLines; i++) {
691     masm.appendRawCode(zeroes, sizeof(zeroes));
692   }
693 }
694 
695 static constexpr unsigned LAZY_STUB_LIFO_DEFAULT_CHUNK_SIZE = 8 * 1024;
696 
createManyEntryStubs(const Uint32Vector & funcExportIndices,const CodeTier & codeTier,bool flushAllThreadsIcaches,size_t * stubSegmentIndex)697 bool LazyStubTier::createManyEntryStubs(const Uint32Vector& funcExportIndices,
698                                         const CodeTier& codeTier,
699                                         bool flushAllThreadsIcaches,
700                                         size_t* stubSegmentIndex) {
701   MOZ_ASSERT(funcExportIndices.length());
702 
703   LifoAlloc lifo(LAZY_STUB_LIFO_DEFAULT_CHUNK_SIZE);
704   TempAllocator alloc(&lifo);
705   JitContext jitContext(&alloc);
706   WasmMacroAssembler masm(alloc);
707 
708   if (funcExportIndices.length() == 1) {
709     PadCodeForSingleStub(masm);
710   }
711 
712   const MetadataTier& metadata = codeTier.metadata();
713   const FuncExportVector& funcExports = metadata.funcExports;
714   uint8_t* moduleSegmentBase = codeTier.segment().base();
715 
716   CodeRangeVector codeRanges;
717   DebugOnly<uint32_t> numExpectedRanges = 0;
718   for (uint32_t funcExportIndex : funcExportIndices) {
719     const FuncExport& fe = funcExports[funcExportIndex];
720     // Exports that don't support a jit entry get only the interp entry.
721     numExpectedRanges += (fe.canHaveJitEntry() ? 2 : 1);
722     void* calleePtr =
723         moduleSegmentBase + metadata.codeRange(fe).funcUncheckedCallEntry();
724     Maybe<ImmPtr> callee;
725     callee.emplace(calleePtr, ImmPtr::NoCheckToken());
726     if (!GenerateEntryStubs(masm, funcExportIndex, fe, callee,
727                             /* asmjs */ false, &codeRanges)) {
728       return false;
729     }
730   }
731   MOZ_ASSERT(codeRanges.length() == numExpectedRanges,
732              "incorrect number of entries per function");
733 
734   masm.finish();
735 
736   MOZ_ASSERT(masm.callSites().empty());
737   MOZ_ASSERT(masm.callSiteTargets().empty());
738   MOZ_ASSERT(masm.trapSites().empty());
739 #ifdef ENABLE_WASM_EXCEPTIONS
740   MOZ_ASSERT(masm.tryNotes().empty());
741 #endif
742 
743   if (masm.oom()) {
744     return false;
745   }
746 
747   size_t codeLength = LazyStubSegment::AlignBytesNeeded(masm.bytesNeeded());
748 
749   if (!stubSegments_.length() ||
750       !stubSegments_[lastStubSegmentIndex_]->hasSpace(codeLength)) {
751     size_t newSegmentSize = std::max(codeLength, ExecutableCodePageSize);
752     UniqueLazyStubSegment newSegment =
753         LazyStubSegment::create(codeTier, newSegmentSize);
754     if (!newSegment) {
755       return false;
756     }
757     lastStubSegmentIndex_ = stubSegments_.length();
758     if (!stubSegments_.emplaceBack(std::move(newSegment))) {
759       return false;
760     }
761   }
762 
763   LazyStubSegment* segment = stubSegments_[lastStubSegmentIndex_].get();
764   *stubSegmentIndex = lastStubSegmentIndex_;
765 
766   size_t interpRangeIndex;
767   uint8_t* codePtr = nullptr;
768   if (!segment->addStubs(codeLength, funcExportIndices, funcExports, codeRanges,
769                          &codePtr, &interpRangeIndex)) {
770     return false;
771   }
772 
773   masm.executableCopy(codePtr);
774   PatchDebugSymbolicAccesses(codePtr, masm);
775   memset(codePtr + masm.bytesNeeded(), 0, codeLength - masm.bytesNeeded());
776 
777   for (const CodeLabel& label : masm.codeLabels()) {
778     Assembler::Bind(codePtr, label);
779   }
780 
781   // Optimized compilation finishes on a background thread, so we must make sure
782   // to flush the icaches of all the executing threads.
783   FlushICacheSpec flushIcacheSpec = flushAllThreadsIcaches
784                                         ? FlushICacheSpec::AllThreads
785                                         : FlushICacheSpec::LocalThreadOnly;
786   if (!ExecutableAllocator::makeExecutableAndFlushICache(flushIcacheSpec,
787                                                          codePtr, codeLength)) {
788     return false;
789   }
790 
791   // Create lazy function exports for funcIndex -> entry lookup.
792   if (!exports_.reserve(exports_.length() + funcExportIndices.length())) {
793     return false;
794   }
795 
796   for (uint32_t funcExportIndex : funcExportIndices) {
797     const FuncExport& fe = funcExports[funcExportIndex];
798 
799     DebugOnly<CodeRange> cr = segment->codeRanges()[interpRangeIndex];
800     MOZ_ASSERT(cr.value.isInterpEntry());
801     MOZ_ASSERT(cr.value.funcIndex() == fe.funcIndex());
802 
803     LazyFuncExport lazyExport(fe.funcIndex(), *stubSegmentIndex,
804                               interpRangeIndex);
805 
806     size_t exportIndex;
807     const uint32_t targetFunctionIndex = fe.funcIndex();
808     MOZ_ALWAYS_FALSE(BinarySearchIf(
809         exports_, 0, exports_.length(),
810         [targetFunctionIndex](const LazyFuncExport& funcExport) {
811           return targetFunctionIndex - funcExport.funcIndex;
812         },
813         &exportIndex));
814     MOZ_ALWAYS_TRUE(
815         exports_.insert(exports_.begin() + exportIndex, std::move(lazyExport)));
816 
817     // Exports that don't support a jit entry get only the interp entry.
818     interpRangeIndex += (fe.canHaveJitEntry() ? 2 : 1);
819   }
820 
821   return true;
822 }
823 
createOneEntryStub(uint32_t funcExportIndex,const CodeTier & codeTier)824 bool LazyStubTier::createOneEntryStub(uint32_t funcExportIndex,
825                                       const CodeTier& codeTier) {
826   Uint32Vector funcExportIndexes;
827   if (!funcExportIndexes.append(funcExportIndex)) {
828     return false;
829   }
830 
831   // This happens on the executing thread (when createOneEntryStub is called
832   // from GetInterpEntryAndEnsureStubs), so no need to flush the icaches on all
833   // the threads.
834   bool flushAllThreadIcaches = false;
835 
836   size_t stubSegmentIndex;
837   if (!createManyEntryStubs(funcExportIndexes, codeTier, flushAllThreadIcaches,
838                             &stubSegmentIndex)) {
839     return false;
840   }
841 
842   const UniqueLazyStubSegment& segment = stubSegments_[stubSegmentIndex];
843   const CodeRangeVector& codeRanges = segment->codeRanges();
844 
845   // Exports that don't support a jit entry get only the interp entry.
846   if (!codeTier.metadata().funcExports[funcExportIndex].canHaveJitEntry()) {
847     MOZ_ASSERT(codeRanges.length() >= 1);
848     MOZ_ASSERT(codeRanges.back().isInterpEntry());
849     return true;
850   }
851 
852   MOZ_ASSERT(codeRanges.length() >= 2);
853   MOZ_ASSERT(codeRanges[codeRanges.length() - 2].isInterpEntry());
854 
855   const CodeRange& cr = codeRanges[codeRanges.length() - 1];
856   MOZ_ASSERT(cr.isJitEntry());
857 
858   codeTier.code().setJitEntry(cr.funcIndex(), segment->base() + cr.begin());
859   return true;
860 }
861 
createTier2(const Uint32Vector & funcExportIndices,const CodeTier & codeTier,Maybe<size_t> * outStubSegmentIndex)862 bool LazyStubTier::createTier2(const Uint32Vector& funcExportIndices,
863                                const CodeTier& codeTier,
864                                Maybe<size_t>* outStubSegmentIndex) {
865   if (!funcExportIndices.length()) {
866     return true;
867   }
868 
869   // This compilation happens on a background compiler thread, so the icache may
870   // need to be flushed on all the threads.
871   bool flushAllThreadIcaches = true;
872 
873   size_t stubSegmentIndex;
874   if (!createManyEntryStubs(funcExportIndices, codeTier, flushAllThreadIcaches,
875                             &stubSegmentIndex)) {
876     return false;
877   }
878 
879   outStubSegmentIndex->emplace(stubSegmentIndex);
880   return true;
881 }
882 
setJitEntries(const Maybe<size_t> & stubSegmentIndex,const Code & code)883 void LazyStubTier::setJitEntries(const Maybe<size_t>& stubSegmentIndex,
884                                  const Code& code) {
885   if (!stubSegmentIndex) {
886     return;
887   }
888   const UniqueLazyStubSegment& segment = stubSegments_[*stubSegmentIndex];
889   for (const CodeRange& cr : segment->codeRanges()) {
890     if (!cr.isJitEntry()) {
891       continue;
892     }
893     code.setJitEntry(cr.funcIndex(), segment->base() + cr.begin());
894   }
895 }
896 
hasEntryStub(uint32_t funcIndex) const897 bool LazyStubTier::hasEntryStub(uint32_t funcIndex) const {
898   size_t match;
899   return BinarySearchIf(
900       exports_, 0, exports_.length(),
901       [funcIndex](const LazyFuncExport& funcExport) {
902         return funcIndex - funcExport.funcIndex;
903       },
904       &match);
905 }
906 
lookupInterpEntry(uint32_t funcIndex) const907 void* LazyStubTier::lookupInterpEntry(uint32_t funcIndex) const {
908   size_t match;
909   if (!BinarySearchIf(
910           exports_, 0, exports_.length(),
911           [funcIndex](const LazyFuncExport& funcExport) {
912             return funcIndex - funcExport.funcIndex;
913           },
914           &match)) {
915     return nullptr;
916   }
917   const LazyFuncExport& fe = exports_[match];
918   const LazyStubSegment& stub = *stubSegments_[fe.lazyStubSegmentIndex];
919   return stub.base() + stub.codeRanges()[fe.funcCodeRangeIndex].begin();
920 }
921 
addSizeOfMisc(MallocSizeOf mallocSizeOf,size_t * code,size_t * data) const922 void LazyStubTier::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
923                                  size_t* data) const {
924   *data += sizeof(*this);
925   *data += exports_.sizeOfExcludingThis(mallocSizeOf);
926   for (const UniqueLazyStubSegment& stub : stubSegments_) {
927     stub->addSizeOfMisc(mallocSizeOf, code, data);
928   }
929 }
930 
clone(const MetadataTier & src)931 bool MetadataTier::clone(const MetadataTier& src) {
932   if (!funcToCodeRange.appendAll(src.funcToCodeRange)) {
933     return false;
934   }
935   if (!codeRanges.appendAll(src.codeRanges)) {
936     return false;
937   }
938   if (!callSites.appendAll(src.callSites)) {
939     return false;
940   }
941   if (!debugTrapFarJumpOffsets.appendAll(src.debugTrapFarJumpOffsets)) {
942     return false;
943   }
944 #ifdef ENABLE_WASM_EXCEPTIONS
945   if (!tryNotes.appendAll(src.tryNotes)) {
946     return false;
947   }
948 #endif
949 
950   for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
951     if (!trapSites[trap].appendAll(src.trapSites[trap])) {
952       return false;
953     }
954   }
955 
956   if (!funcImports.resize(src.funcImports.length())) {
957     return false;
958   }
959   for (size_t i = 0; i < src.funcImports.length(); i++) {
960     funcImports[i].clone(src.funcImports[i]);
961   }
962 
963   if (!funcExports.resize(src.funcExports.length())) {
964     return false;
965   }
966   for (size_t i = 0; i < src.funcExports.length(); i++) {
967     funcExports[i].clone(src.funcExports[i]);
968   }
969 
970   return true;
971 }
972 
serializedSize() const973 size_t Metadata::serializedSize() const {
974   return sizeof(pod()) + SerializedVectorSize(types) +
975          SerializedPodVectorSize(typesRenumbering) +
976          SerializedVectorSize(globals) + SerializedPodVectorSize(tables) +
977 #ifdef ENABLE_WASM_EXCEPTIONS
978          SerializedVectorSize(tags) +
979 #endif
980          sizeof(moduleName) + SerializedPodVectorSize(funcNames) +
981          filename.serializedSize() + sourceMapURL.serializedSize();
982 }
983 
serialize(uint8_t * cursor) const984 uint8_t* Metadata::serialize(uint8_t* cursor) const {
985   MOZ_ASSERT(!debugEnabled && debugFuncArgTypes.empty() &&
986              debugFuncReturnTypes.empty());
987   cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
988   cursor = SerializeVector(cursor, types);
989   cursor = SerializePodVector(cursor, typesRenumbering);
990   cursor = SerializeVector(cursor, globals);
991   cursor = SerializePodVector(cursor, tables);
992 #ifdef ENABLE_WASM_EXCEPTIONS
993   cursor = SerializeVector(cursor, tags);
994 #endif
995   cursor = WriteBytes(cursor, &moduleName, sizeof(moduleName));
996   cursor = SerializePodVector(cursor, funcNames);
997   cursor = filename.serialize(cursor);
998   cursor = sourceMapURL.serialize(cursor);
999   return cursor;
1000 }
1001 
deserialize(const uint8_t * cursor)1002 /* static */ const uint8_t* Metadata::deserialize(const uint8_t* cursor) {
1003   (cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) &&
1004       (cursor = DeserializeVector(cursor, &types)) &&
1005       (cursor = DeserializePodVector(cursor, &typesRenumbering)) &&
1006       (cursor = DeserializeVector(cursor, &globals)) &&
1007       (cursor = DeserializePodVector(cursor, &tables)) &&
1008 #ifdef ENABLE_WASM_EXCEPTIONS
1009       (cursor = DeserializeVector(cursor, &tags)) &&
1010 #endif
1011       (cursor = ReadBytes(cursor, &moduleName, sizeof(moduleName))) &&
1012       (cursor = DeserializePodVector(cursor, &funcNames)) &&
1013       (cursor = filename.deserialize(cursor)) &&
1014       (cursor = sourceMapURL.deserialize(cursor));
1015   debugEnabled = false;
1016   debugFuncArgTypes.clear();
1017   debugFuncReturnTypes.clear();
1018   return cursor;
1019 }
1020 
sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const1021 size_t Metadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
1022   return SizeOfVectorExcludingThis(types, mallocSizeOf) +
1023          typesRenumbering.sizeOfExcludingThis(mallocSizeOf) +
1024          globals.sizeOfExcludingThis(mallocSizeOf) +
1025          tables.sizeOfExcludingThis(mallocSizeOf) +
1026 #ifdef ENABLE_WASM_EXCEPTIONS
1027          tags.sizeOfExcludingThis(mallocSizeOf) +
1028 #endif
1029          funcNames.sizeOfExcludingThis(mallocSizeOf) +
1030          filename.sizeOfExcludingThis(mallocSizeOf) +
1031          sourceMapURL.sizeOfExcludingThis(mallocSizeOf);
1032 }
1033 
1034 struct ProjectFuncIndex {
1035   const FuncExportVector& funcExports;
ProjectFuncIndexProjectFuncIndex1036   explicit ProjectFuncIndex(const FuncExportVector& funcExports)
1037       : funcExports(funcExports) {}
operator []ProjectFuncIndex1038   uint32_t operator[](size_t index) const {
1039     return funcExports[index].funcIndex();
1040   }
1041 };
1042 
lookupFuncExport(uint32_t funcIndex,size_t * funcExportIndex)1043 FuncExport& MetadataTier::lookupFuncExport(
1044     uint32_t funcIndex, size_t* funcExportIndex /* = nullptr */) {
1045   size_t match;
1046   if (!BinarySearch(ProjectFuncIndex(funcExports), 0, funcExports.length(),
1047                     funcIndex, &match)) {
1048     MOZ_CRASH("missing function export");
1049   }
1050   if (funcExportIndex) {
1051     *funcExportIndex = match;
1052   }
1053   return funcExports[match];
1054 }
1055 
lookupFuncExport(uint32_t funcIndex,size_t * funcExportIndex) const1056 const FuncExport& MetadataTier::lookupFuncExport(
1057     uint32_t funcIndex, size_t* funcExportIndex) const {
1058   return const_cast<MetadataTier*>(this)->lookupFuncExport(funcIndex,
1059                                                            funcExportIndex);
1060 }
1061 
AppendName(const Bytes & namePayload,const Name & name,UTF8Bytes * bytes)1062 static bool AppendName(const Bytes& namePayload, const Name& name,
1063                        UTF8Bytes* bytes) {
1064   MOZ_RELEASE_ASSERT(name.offsetInNamePayload <= namePayload.length());
1065   MOZ_RELEASE_ASSERT(name.length <=
1066                      namePayload.length() - name.offsetInNamePayload);
1067   return bytes->append(
1068       (const char*)namePayload.begin() + name.offsetInNamePayload, name.length);
1069 }
1070 
AppendFunctionIndexName(uint32_t funcIndex,UTF8Bytes * bytes)1071 static bool AppendFunctionIndexName(uint32_t funcIndex, UTF8Bytes* bytes) {
1072   const char beforeFuncIndex[] = "wasm-function[";
1073   const char afterFuncIndex[] = "]";
1074 
1075   ToCStringBuf cbuf;
1076   const char* funcIndexStr = NumberToCString(nullptr, &cbuf, funcIndex);
1077   MOZ_ASSERT(funcIndexStr);
1078 
1079   return bytes->append(beforeFuncIndex, strlen(beforeFuncIndex)) &&
1080          bytes->append(funcIndexStr, strlen(funcIndexStr)) &&
1081          bytes->append(afterFuncIndex, strlen(afterFuncIndex));
1082 }
1083 
getFuncName(NameContext ctx,uint32_t funcIndex,UTF8Bytes * name) const1084 bool Metadata::getFuncName(NameContext ctx, uint32_t funcIndex,
1085                            UTF8Bytes* name) const {
1086   if (moduleName && moduleName->length != 0) {
1087     if (!AppendName(namePayload->bytes, *moduleName, name)) {
1088       return false;
1089     }
1090     if (!name->append('.')) {
1091       return false;
1092     }
1093   }
1094 
1095   if (funcIndex < funcNames.length() && funcNames[funcIndex].length != 0) {
1096     return AppendName(namePayload->bytes, funcNames[funcIndex], name);
1097   }
1098 
1099   if (ctx == NameContext::BeforeLocation) {
1100     return true;
1101   }
1102 
1103   return AppendFunctionIndexName(funcIndex, name);
1104 }
1105 
initialize(IsTier2 isTier2,const Code & code,const LinkData & linkData,const Metadata & metadata)1106 bool CodeTier::initialize(IsTier2 isTier2, const Code& code,
1107                           const LinkData& linkData, const Metadata& metadata) {
1108   MOZ_ASSERT(!initialized());
1109   code_ = &code;
1110 
1111   MOZ_ASSERT(lazyStubs_.readLock()->entryStubsEmpty());
1112 
1113   // See comments in CodeSegment::initialize() for why this must be last.
1114   if (!segment_->initialize(isTier2, *this, linkData, metadata, *metadata_)) {
1115     return false;
1116   }
1117 
1118   MOZ_ASSERT(initialized());
1119   return true;
1120 }
1121 
serializedSize() const1122 size_t CodeTier::serializedSize() const {
1123   return segment_->serializedSize() + metadata_->serializedSize();
1124 }
1125 
serialize(uint8_t * cursor,const LinkData & linkData) const1126 uint8_t* CodeTier::serialize(uint8_t* cursor, const LinkData& linkData) const {
1127   cursor = metadata_->serialize(cursor);
1128   cursor = segment_->serialize(cursor, linkData);
1129   return cursor;
1130 }
1131 
deserialize(const uint8_t * cursor,const LinkData & linkData,UniqueCodeTier * codeTier)1132 /* static */ const uint8_t* CodeTier::deserialize(const uint8_t* cursor,
1133                                                   const LinkData& linkData,
1134                                                   UniqueCodeTier* codeTier) {
1135   auto metadata = js::MakeUnique<MetadataTier>(Tier::Serialized);
1136   if (!metadata) {
1137     return nullptr;
1138   }
1139   cursor = metadata->deserialize(cursor);
1140   if (!cursor) {
1141     return nullptr;
1142   }
1143 
1144   UniqueModuleSegment segment;
1145   cursor = ModuleSegment::deserialize(cursor, linkData, &segment);
1146   if (!cursor) {
1147     return nullptr;
1148   }
1149 
1150   *codeTier = js::MakeUnique<CodeTier>(std::move(metadata), std::move(segment));
1151   if (!*codeTier) {
1152     return nullptr;
1153   }
1154 
1155   return cursor;
1156 }
1157 
addSizeOfMisc(MallocSizeOf mallocSizeOf,size_t * code,size_t * data) const1158 void CodeTier::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
1159                              size_t* data) const {
1160   segment_->addSizeOfMisc(mallocSizeOf, code, data);
1161   lazyStubs_.readLock()->addSizeOfMisc(mallocSizeOf, code, data);
1162   *data += metadata_->sizeOfExcludingThis(mallocSizeOf);
1163 }
1164 
lookupRange(const void * pc) const1165 const CodeRange* CodeTier::lookupRange(const void* pc) const {
1166   CodeRange::OffsetInCode target((uint8_t*)pc - segment_->base());
1167   return LookupInSorted(metadata_->codeRanges, target);
1168 }
1169 
1170 #ifdef ENABLE_WASM_EXCEPTIONS
lookupWasmTryNote(const void * pc) const1171 const wasm::WasmTryNote* CodeTier::lookupWasmTryNote(const void* pc) const {
1172   size_t target = (uint8_t*)pc - segment_->base();
1173   const WasmTryNoteVector& tryNotes = metadata_->tryNotes;
1174 
1175   // We find the first hit (there may be multiple) to obtain the innermost
1176   // handler, which is why we cannot binary search here.
1177   for (const auto& tryNote : tryNotes) {
1178     if (target > tryNote.begin && target <= tryNote.end) {
1179       return &tryNote;
1180     }
1181   }
1182 
1183   return nullptr;
1184 }
1185 #endif
1186 
init(CompileMode mode,const ModuleSegment & ms,const CodeRangeVector & codeRanges)1187 bool JumpTables::init(CompileMode mode, const ModuleSegment& ms,
1188                       const CodeRangeVector& codeRanges) {
1189   static_assert(JSScript::offsetOfJitCodeRaw() == 0,
1190                 "wasm fast jit entry is at (void*) jit[funcIndex]");
1191 
1192   mode_ = mode;
1193 
1194   size_t numFuncs = 0;
1195   for (const CodeRange& cr : codeRanges) {
1196     if (cr.isFunction()) {
1197       numFuncs++;
1198     }
1199   }
1200 
1201   numFuncs_ = numFuncs;
1202 
1203   if (mode_ == CompileMode::Tier1) {
1204     tiering_ = TablePointer(js_pod_calloc<void*>(numFuncs));
1205     if (!tiering_) {
1206       return false;
1207     }
1208   }
1209 
1210   // The number of jit entries is overestimated, but it is simpler when
1211   // filling/looking up the jit entries and safe (worst case we'll crash
1212   // because of a null deref when trying to call the jit entry of an
1213   // unexported function).
1214   jit_ = TablePointer(js_pod_calloc<void*>(numFuncs));
1215   if (!jit_) {
1216     return false;
1217   }
1218 
1219   uint8_t* codeBase = ms.base();
1220   for (const CodeRange& cr : codeRanges) {
1221     if (cr.isFunction()) {
1222       setTieringEntry(cr.funcIndex(), codeBase + cr.funcTierEntry());
1223     } else if (cr.isJitEntry()) {
1224       setJitEntry(cr.funcIndex(), codeBase + cr.begin());
1225     }
1226   }
1227   return true;
1228 }
1229 
Code(UniqueCodeTier tier1,const Metadata & metadata,JumpTables && maybeJumpTables)1230 Code::Code(UniqueCodeTier tier1, const Metadata& metadata,
1231            JumpTables&& maybeJumpTables)
1232     : tier1_(std::move(tier1)),
1233       metadata_(&metadata),
1234       profilingLabels_(mutexid::WasmCodeProfilingLabels,
1235                        CacheableCharsVector()),
1236       jumpTables_(std::move(maybeJumpTables)) {}
1237 
initialize(const LinkData & linkData)1238 bool Code::initialize(const LinkData& linkData) {
1239   MOZ_ASSERT(!initialized());
1240 
1241   if (!tier1_->initialize(IsTier2::NotTier2, *this, linkData, *metadata_)) {
1242     return false;
1243   }
1244 
1245   MOZ_ASSERT(initialized());
1246   return true;
1247 }
1248 
setAndBorrowTier2(UniqueCodeTier tier2,const LinkData & linkData,const CodeTier ** borrowedTier) const1249 bool Code::setAndBorrowTier2(UniqueCodeTier tier2, const LinkData& linkData,
1250                              const CodeTier** borrowedTier) const {
1251   MOZ_RELEASE_ASSERT(!hasTier2());
1252   MOZ_RELEASE_ASSERT(tier2->tier() == Tier::Optimized &&
1253                      tier1_->tier() == Tier::Baseline);
1254 
1255   if (!tier2->initialize(IsTier2::Tier2, *this, linkData, *metadata_)) {
1256     return false;
1257   }
1258 
1259   tier2_ = std::move(tier2);
1260   *borrowedTier = &*tier2_;
1261 
1262   return true;
1263 }
1264 
commitTier2() const1265 void Code::commitTier2() const {
1266   MOZ_RELEASE_ASSERT(!hasTier2());
1267   hasTier2_ = true;
1268   MOZ_ASSERT(hasTier2());
1269 
1270   // To maintain the invariant that tier2_ is never read without the tier having
1271   // been committed, this checks tier2_ here instead of before setting hasTier2_
1272   // (as would be natural).  See comment in WasmCode.h.
1273   MOZ_RELEASE_ASSERT(tier2_.get());
1274 }
1275 
getFuncIndex(JSFunction * fun) const1276 uint32_t Code::getFuncIndex(JSFunction* fun) const {
1277   MOZ_ASSERT(fun->isWasm() || fun->isAsmJSNative());
1278   if (!fun->isWasmWithJitEntry()) {
1279     return fun->wasmFuncIndex();
1280   }
1281   return jumpTables_.funcIndexFromJitEntry(fun->wasmJitEntry());
1282 }
1283 
tiers() const1284 Tiers Code::tiers() const {
1285   if (hasTier2()) {
1286     return Tiers(tier1_->tier(), tier2_->tier());
1287   }
1288   return Tiers(tier1_->tier());
1289 }
1290 
hasTier(Tier t) const1291 bool Code::hasTier(Tier t) const {
1292   if (hasTier2() && tier2_->tier() == t) {
1293     return true;
1294   }
1295   return tier1_->tier() == t;
1296 }
1297 
stableTier() const1298 Tier Code::stableTier() const { return tier1_->tier(); }
1299 
bestTier() const1300 Tier Code::bestTier() const {
1301   if (hasTier2()) {
1302     return tier2_->tier();
1303   }
1304   return tier1_->tier();
1305 }
1306 
codeTier(Tier tier) const1307 const CodeTier& Code::codeTier(Tier tier) const {
1308   switch (tier) {
1309     case Tier::Baseline:
1310       if (tier1_->tier() == Tier::Baseline) {
1311         MOZ_ASSERT(tier1_->initialized());
1312         return *tier1_;
1313       }
1314       MOZ_CRASH("No code segment at this tier");
1315     case Tier::Optimized:
1316       if (tier1_->tier() == Tier::Optimized) {
1317         MOZ_ASSERT(tier1_->initialized());
1318         return *tier1_;
1319       }
1320       // It is incorrect to ask for the optimized tier without there being such
1321       // a tier and the tier having been committed.  The guard here could
1322       // instead be `if (hasTier2()) ... ` but codeTier(t) should not be called
1323       // in contexts where that test is necessary.
1324       MOZ_RELEASE_ASSERT(hasTier2());
1325       MOZ_ASSERT(tier2_->initialized());
1326       return *tier2_;
1327   }
1328   MOZ_CRASH();
1329 }
1330 
containsCodePC(const void * pc) const1331 bool Code::containsCodePC(const void* pc) const {
1332   for (Tier t : tiers()) {
1333     const ModuleSegment& ms = segment(t);
1334     if (ms.containsCodePC(pc)) {
1335       return true;
1336     }
1337   }
1338   return false;
1339 }
1340 
1341 struct CallSiteRetAddrOffset {
1342   const CallSiteVector& callSites;
CallSiteRetAddrOffsetCallSiteRetAddrOffset1343   explicit CallSiteRetAddrOffset(const CallSiteVector& callSites)
1344       : callSites(callSites) {}
operator []CallSiteRetAddrOffset1345   uint32_t operator[](size_t index) const {
1346     return callSites[index].returnAddressOffset();
1347   }
1348 };
1349 
lookupCallSite(void * returnAddress) const1350 const CallSite* Code::lookupCallSite(void* returnAddress) const {
1351   for (Tier t : tiers()) {
1352     uint32_t target = ((uint8_t*)returnAddress) - segment(t).base();
1353     size_t lowerBound = 0;
1354     size_t upperBound = metadata(t).callSites.length();
1355 
1356     size_t match;
1357     if (BinarySearch(CallSiteRetAddrOffset(metadata(t).callSites), lowerBound,
1358                      upperBound, target, &match)) {
1359       return &metadata(t).callSites[match];
1360     }
1361   }
1362 
1363   return nullptr;
1364 }
1365 
lookupFuncRange(void * pc) const1366 const CodeRange* Code::lookupFuncRange(void* pc) const {
1367   for (Tier t : tiers()) {
1368     const CodeRange* result = codeTier(t).lookupRange(pc);
1369     if (result && result->isFunction()) {
1370       return result;
1371     }
1372   }
1373   return nullptr;
1374 }
1375 
lookupStackMap(uint8_t * nextPC) const1376 const StackMap* Code::lookupStackMap(uint8_t* nextPC) const {
1377   for (Tier t : tiers()) {
1378     const StackMap* result = metadata(t).stackMaps.findMap(nextPC);
1379     if (result) {
1380       return result;
1381     }
1382   }
1383   return nullptr;
1384 }
1385 
1386 #ifdef ENABLE_WASM_EXCEPTIONS
lookupWasmTryNote(void * pc,Tier * tier) const1387 const wasm::WasmTryNote* Code::lookupWasmTryNote(void* pc, Tier* tier) const {
1388   for (Tier t : tiers()) {
1389     const WasmTryNote* result = codeTier(t).lookupWasmTryNote(pc);
1390     if (result) {
1391       *tier = t;
1392       return result;
1393     }
1394   }
1395   return nullptr;
1396 }
1397 #endif
1398 
1399 struct TrapSitePCOffset {
1400   const TrapSiteVector& trapSites;
TrapSitePCOffsetTrapSitePCOffset1401   explicit TrapSitePCOffset(const TrapSiteVector& trapSites)
1402       : trapSites(trapSites) {}
operator []TrapSitePCOffset1403   uint32_t operator[](size_t index) const { return trapSites[index].pcOffset; }
1404 };
1405 
lookupTrap(void * pc,Trap * trapOut,BytecodeOffset * bytecode) const1406 bool Code::lookupTrap(void* pc, Trap* trapOut, BytecodeOffset* bytecode) const {
1407   for (Tier t : tiers()) {
1408     const TrapSiteVectorArray& trapSitesArray = metadata(t).trapSites;
1409     for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
1410       const TrapSiteVector& trapSites = trapSitesArray[trap];
1411 
1412       uint32_t target = ((uint8_t*)pc) - segment(t).base();
1413       size_t lowerBound = 0;
1414       size_t upperBound = trapSites.length();
1415 
1416       size_t match;
1417       if (BinarySearch(TrapSitePCOffset(trapSites), lowerBound, upperBound,
1418                        target, &match)) {
1419         MOZ_ASSERT(segment(t).containsCodePC(pc));
1420         *trapOut = trap;
1421         *bytecode = trapSites[match].bytecode;
1422         return true;
1423       }
1424     }
1425   }
1426 
1427   return false;
1428 }
1429 
1430 // When enabled, generate profiling labels for every name in funcNames_ that is
1431 // the name of some Function CodeRange. This involves malloc() so do it now
1432 // since, once we start sampling, we'll be in a signal-handing context where we
1433 // cannot malloc.
ensureProfilingLabels(bool profilingEnabled) const1434 void Code::ensureProfilingLabels(bool profilingEnabled) const {
1435   auto labels = profilingLabels_.lock();
1436 
1437   if (!profilingEnabled) {
1438     labels->clear();
1439     return;
1440   }
1441 
1442   if (!labels->empty()) {
1443     return;
1444   }
1445 
1446   // Any tier will do, we only need tier-invariant data that are incidentally
1447   // stored with the code ranges.
1448 
1449   for (const CodeRange& codeRange : metadata(stableTier()).codeRanges) {
1450     if (!codeRange.isFunction()) {
1451       continue;
1452     }
1453 
1454     ToCStringBuf cbuf;
1455     const char* bytecodeStr =
1456         NumberToCString(nullptr, &cbuf, codeRange.funcLineOrBytecode());
1457     MOZ_ASSERT(bytecodeStr);
1458 
1459     UTF8Bytes name;
1460     if (!metadata().getFuncNameStandalone(codeRange.funcIndex(), &name)) {
1461       return;
1462     }
1463     if (!name.append(" (", 2)) {
1464       return;
1465     }
1466 
1467     if (const char* filename = metadata().filename.get()) {
1468       if (!name.append(filename, strlen(filename))) {
1469         return;
1470       }
1471     } else {
1472       if (!name.append('?')) {
1473         return;
1474       }
1475     }
1476 
1477     if (!name.append(':') || !name.append(bytecodeStr, strlen(bytecodeStr)) ||
1478         !name.append(")\0", 2)) {
1479       return;
1480     }
1481 
1482     UniqueChars label(name.extractOrCopyRawBuffer());
1483     if (!label) {
1484       return;
1485     }
1486 
1487     if (codeRange.funcIndex() >= labels->length()) {
1488       if (!labels->resize(codeRange.funcIndex() + 1)) {
1489         return;
1490       }
1491     }
1492 
1493     ((CacheableCharsVector&)labels)[codeRange.funcIndex()] = std::move(label);
1494   }
1495 }
1496 
profilingLabel(uint32_t funcIndex) const1497 const char* Code::profilingLabel(uint32_t funcIndex) const {
1498   auto labels = profilingLabels_.lock();
1499 
1500   if (funcIndex >= labels->length() ||
1501       !((CacheableCharsVector&)labels)[funcIndex]) {
1502     return "?";
1503   }
1504   return ((CacheableCharsVector&)labels)[funcIndex].get();
1505 }
1506 
addSizeOfMiscIfNotSeen(MallocSizeOf mallocSizeOf,Metadata::SeenSet * seenMetadata,Code::SeenSet * seenCode,size_t * code,size_t * data) const1507 void Code::addSizeOfMiscIfNotSeen(MallocSizeOf mallocSizeOf,
1508                                   Metadata::SeenSet* seenMetadata,
1509                                   Code::SeenSet* seenCode, size_t* code,
1510                                   size_t* data) const {
1511   auto p = seenCode->lookupForAdd(this);
1512   if (p) {
1513     return;
1514   }
1515   bool ok = seenCode->add(p, this);
1516   (void)ok;  // oh well
1517 
1518   *data += mallocSizeOf(this) +
1519            metadata().sizeOfIncludingThisIfNotSeen(mallocSizeOf, seenMetadata) +
1520            profilingLabels_.lock()->sizeOfExcludingThis(mallocSizeOf) +
1521            jumpTables_.sizeOfMiscExcludingThis();
1522 
1523   for (auto t : tiers()) {
1524     codeTier(t).addSizeOfMisc(mallocSizeOf, code, data);
1525   }
1526 }
1527 
serializedSize() const1528 size_t Code::serializedSize() const {
1529   return metadata().serializedSize() +
1530          codeTier(Tier::Serialized).serializedSize();
1531 }
1532 
serialize(uint8_t * cursor,const LinkData & linkData) const1533 uint8_t* Code::serialize(uint8_t* cursor, const LinkData& linkData) const {
1534   MOZ_RELEASE_ASSERT(!metadata().debugEnabled);
1535 
1536   cursor = metadata().serialize(cursor);
1537   cursor = codeTier(Tier::Serialized).serialize(cursor, linkData);
1538   return cursor;
1539 }
1540 
deserialize(const uint8_t * cursor,const LinkData & linkData,Metadata & metadata,SharedCode * out)1541 /* static */ const uint8_t* Code::deserialize(const uint8_t* cursor,
1542                                               const LinkData& linkData,
1543                                               Metadata& metadata,
1544                                               SharedCode* out) {
1545   cursor = metadata.deserialize(cursor);
1546   if (!cursor) {
1547     return nullptr;
1548   }
1549 
1550   UniqueCodeTier codeTier;
1551   cursor = CodeTier::deserialize(cursor, linkData, &codeTier);
1552   if (!cursor) {
1553     return nullptr;
1554   }
1555 
1556   JumpTables jumpTables;
1557   if (!jumpTables.init(CompileMode::Once, codeTier->segment(),
1558                        codeTier->metadata().codeRanges)) {
1559     return nullptr;
1560   }
1561 
1562   MutableCode code =
1563       js_new<Code>(std::move(codeTier), metadata, std::move(jumpTables));
1564   if (!code || !code->initialize(linkData)) {
1565     return nullptr;
1566   }
1567 
1568   *out = code;
1569   return cursor;
1570 }
1571 
disassemble(JSContext * cx,Tier tier,int kindSelection,PrintCallback printString) const1572 void Code::disassemble(JSContext* cx, Tier tier, int kindSelection,
1573                        PrintCallback printString) const {
1574   const MetadataTier& metadataTier = metadata(tier);
1575   const CodeTier& codeTier = this->codeTier(tier);
1576   const ModuleSegment& segment = codeTier.segment();
1577 
1578   for (const CodeRange& range : metadataTier.codeRanges) {
1579     if (kindSelection & (1 << range.kind())) {
1580       MOZ_ASSERT(range.begin() < segment.length());
1581       MOZ_ASSERT(range.end() < segment.length());
1582 
1583       const char* kind;
1584       char kindbuf[128];
1585       switch (range.kind()) {
1586         case CodeRange::Function:
1587           kind = "Function";
1588           break;
1589         case CodeRange::InterpEntry:
1590           kind = "InterpEntry";
1591           break;
1592         case CodeRange::JitEntry:
1593           kind = "JitEntry";
1594           break;
1595         case CodeRange::ImportInterpExit:
1596           kind = "ImportInterpExit";
1597           break;
1598         case CodeRange::ImportJitExit:
1599           kind = "ImportJitExit";
1600           break;
1601         default:
1602           SprintfLiteral(kindbuf, "CodeRange::Kind(%d)", range.kind());
1603           kind = kindbuf;
1604           break;
1605       }
1606       const char* separator =
1607           "\n--------------------------------------------------\n";
1608       // The buffer is quite large in order to accomodate mangled C++ names;
1609       // lengths over 3500 have been observed in the wild.
1610       char buf[4096];
1611       if (range.hasFuncIndex()) {
1612         const char* funcName = "(unknown)";
1613         UTF8Bytes namebuf;
1614         if (metadata().getFuncNameStandalone(range.funcIndex(), &namebuf) &&
1615             namebuf.append('\0')) {
1616           funcName = namebuf.begin();
1617         }
1618         SprintfLiteral(buf, "%sKind = %s, index = %d, name = %s:\n", separator,
1619                        kind, range.funcIndex(), funcName);
1620       } else {
1621         SprintfLiteral(buf, "%sKind = %s\n", separator, kind);
1622       }
1623       printString(buf);
1624 
1625       uint8_t* theCode = segment.base() + range.begin();
1626       jit::Disassemble(theCode, range.end() - range.begin(), printString);
1627     }
1628   }
1629 }
1630 
PatchDebugSymbolicAccesses(uint8_t * codeBase,MacroAssembler & masm)1631 void wasm::PatchDebugSymbolicAccesses(uint8_t* codeBase, MacroAssembler& masm) {
1632 #ifdef WASM_CODEGEN_DEBUG
1633   for (auto& access : masm.symbolicAccesses()) {
1634     switch (access.target) {
1635       case SymbolicAddress::PrintI32:
1636       case SymbolicAddress::PrintPtr:
1637       case SymbolicAddress::PrintF32:
1638       case SymbolicAddress::PrintF64:
1639       case SymbolicAddress::PrintText:
1640         break;
1641       default:
1642         MOZ_CRASH("unexpected symbol in PatchDebugSymbolicAccesses");
1643     }
1644     ABIFunctionType abiType;
1645     void* target = AddressOf(access.target, &abiType);
1646     uint8_t* patchAt = codeBase + access.patchAt.offset();
1647     Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
1648                                        PatchedImmPtr(target),
1649                                        PatchedImmPtr((void*)-1));
1650   }
1651 #else
1652   MOZ_ASSERT(masm.symbolicAccesses().empty());
1653 #endif
1654 }
1655