1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 *
4 * Copyright 2015 Mozilla Foundation
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19 #include "wasm/WasmGenerator.h"
20
21 #include "mozilla/CheckedInt.h"
22 #include "mozilla/EnumeratedRange.h"
23 #include "mozilla/SHA1.h"
24
25 #include <algorithm>
26
27 #include "util/Memory.h"
28 #include "util/Text.h"
29 #include "vm/HelperThreads.h"
30 #include "vm/Time.h"
31 #include "vm/TraceLogging.h"
32 #include "vm/TraceLoggingTypes.h"
33 #include "wasm/WasmBaselineCompile.h"
34 #include "wasm/WasmCompile.h"
35 #include "wasm/WasmCraneliftCompile.h"
36 #include "wasm/WasmGC.h"
37 #include "wasm/WasmIonCompile.h"
38 #include "wasm/WasmStubs.h"
39
40 #include "jit/MacroAssembler-inl.h"
41
42 using namespace js;
43 using namespace js::jit;
44 using namespace js::wasm;
45
46 using mozilla::CheckedInt;
47 using mozilla::MakeEnumeratedRange;
48
swap(MacroAssembler & masm)49 bool CompiledCode::swap(MacroAssembler& masm) {
50 MOZ_ASSERT(bytes.empty());
51 if (!masm.swapBuffer(bytes)) {
52 return false;
53 }
54
55 callSites.swap(masm.callSites());
56 callSiteTargets.swap(masm.callSiteTargets());
57 trapSites.swap(masm.trapSites());
58 symbolicAccesses.swap(masm.symbolicAccesses());
59 #ifdef ENABLE_WASM_EXCEPTIONS
60 tryNotes.swap(masm.tryNotes());
61 #endif
62 codeLabels.swap(masm.codeLabels());
63 return true;
64 }
65
swapCranelift(MacroAssembler & masm,CraneliftReusableData & data)66 bool CompiledCode::swapCranelift(MacroAssembler& masm,
67 CraneliftReusableData& data) {
68 if (!swap(masm)) {
69 return false;
70 }
71 std::swap(data, craneliftReusableData);
72 return true;
73 }
74
75 // ****************************************************************************
76 // ModuleGenerator
77
78 static const unsigned GENERATOR_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
79 static const unsigned COMPILATION_LIFO_DEFAULT_CHUNK_SIZE = 64 * 1024;
80 static const uint32_t BAD_CODE_RANGE = UINT32_MAX;
81
ModuleGenerator(const CompileArgs & args,ModuleEnvironment * moduleEnv,CompilerEnvironment * compilerEnv,const Atomic<bool> * cancelled,UniqueChars * error)82 ModuleGenerator::ModuleGenerator(const CompileArgs& args,
83 ModuleEnvironment* moduleEnv,
84 CompilerEnvironment* compilerEnv,
85 const Atomic<bool>* cancelled,
86 UniqueChars* error)
87 : compileArgs_(&args),
88 error_(error),
89 cancelled_(cancelled),
90 moduleEnv_(moduleEnv),
91 compilerEnv_(compilerEnv),
92 linkData_(nullptr),
93 metadataTier_(nullptr),
94 lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE),
95 masmAlloc_(&lifo_),
96 masm_(masmAlloc_, *moduleEnv, /* limitedSize= */ false),
97 debugTrapCodeOffset_(),
98 lastPatchedCallSite_(0),
99 startOfUnpatchedCallsites_(0),
100 parallel_(false),
101 outstanding_(0),
102 currentTask_(nullptr),
103 batchedBytecode_(0),
104 finishedFuncDefs_(false) {
105 MOZ_ASSERT(IsCompilingWasm());
106 }
107
~ModuleGenerator()108 ModuleGenerator::~ModuleGenerator() {
109 MOZ_ASSERT_IF(finishedFuncDefs_, !batchedBytecode_);
110 MOZ_ASSERT_IF(finishedFuncDefs_, !currentTask_);
111
112 if (parallel_) {
113 if (outstanding_) {
114 AutoLockHelperThreadState lock;
115
116 // Remove any pending compilation tasks from the worklist.
117 size_t removed = RemovePendingWasmCompileTasks(taskState_, mode(), lock);
118 MOZ_ASSERT(outstanding_ >= removed);
119 outstanding_ -= removed;
120
121 // Wait until all active compilation tasks have finished.
122 while (true) {
123 MOZ_ASSERT(outstanding_ >= taskState_.finished().length());
124 outstanding_ -= taskState_.finished().length();
125 taskState_.finished().clear();
126
127 MOZ_ASSERT(outstanding_ >= taskState_.numFailed());
128 outstanding_ -= taskState_.numFailed();
129 taskState_.numFailed() = 0;
130
131 if (!outstanding_) {
132 break;
133 }
134
135 taskState_.condVar().wait(lock); /* failed or finished */
136 }
137 }
138 } else {
139 MOZ_ASSERT(!outstanding_);
140 }
141
142 // Propagate error state.
143 if (error_ && !*error_) {
144 AutoLockHelperThreadState lock;
145 *error_ = std::move(taskState_.errorMessage());
146 }
147 }
148
allocateGlobalBytes(uint32_t bytes,uint32_t align,uint32_t * globalDataOffset)149 bool ModuleGenerator::allocateGlobalBytes(uint32_t bytes, uint32_t align,
150 uint32_t* globalDataOffset) {
151 CheckedInt<uint32_t> newGlobalDataLength(metadata_->globalDataLength);
152
153 newGlobalDataLength +=
154 ComputeByteAlignment(newGlobalDataLength.value(), align);
155 if (!newGlobalDataLength.isValid()) {
156 return false;
157 }
158
159 *globalDataOffset = newGlobalDataLength.value();
160 newGlobalDataLength += bytes;
161
162 if (!newGlobalDataLength.isValid()) {
163 return false;
164 }
165
166 metadata_->globalDataLength = newGlobalDataLength.value();
167 return true;
168 }
169
init(Metadata * maybeAsmJSMetadata)170 bool ModuleGenerator::init(Metadata* maybeAsmJSMetadata) {
171 // Perform fallible metadata, linkdata, assumption allocations.
172
173 MOZ_ASSERT(isAsmJS() == !!maybeAsmJSMetadata);
174 if (maybeAsmJSMetadata) {
175 metadata_ = maybeAsmJSMetadata;
176 } else {
177 metadata_ = js_new<Metadata>();
178 if (!metadata_) {
179 return false;
180 }
181 }
182
183 if (compileArgs_->scriptedCaller.filename) {
184 metadata_->filename =
185 DuplicateString(compileArgs_->scriptedCaller.filename.get());
186 if (!metadata_->filename) {
187 return false;
188 }
189
190 metadata_->filenameIsURL = compileArgs_->scriptedCaller.filenameIsURL;
191 } else {
192 MOZ_ASSERT(!compileArgs_->scriptedCaller.filenameIsURL);
193 }
194
195 if (compileArgs_->sourceMapURL) {
196 metadata_->sourceMapURL = DuplicateString(compileArgs_->sourceMapURL.get());
197 if (!metadata_->sourceMapURL) {
198 return false;
199 }
200 }
201
202 linkData_ = js::MakeUnique<LinkData>(tier());
203 if (!linkData_) {
204 return false;
205 }
206
207 metadataTier_ = js::MakeUnique<MetadataTier>(tier());
208 if (!metadataTier_) {
209 return false;
210 }
211
212 // funcToCodeRange maps function indices to code-range indices and all
213 // elements will be initialized by the time module generation is finished.
214
215 if (!metadataTier_->funcToCodeRange.appendN(BAD_CODE_RANGE,
216 moduleEnv_->funcs.length())) {
217 return false;
218 }
219
220 // Pre-reserve space for large Vectors to avoid the significant cost of the
221 // final reallocs. In particular, the MacroAssembler can be enormous, so be
222 // extra conservative. Since large over-reservations may fail when the
223 // actual allocations will succeed, ignore OOM failures. Note,
224 // shrinkStorageToFit calls at the end will trim off unneeded capacity.
225
226 size_t codeSectionSize =
227 moduleEnv_->codeSection ? moduleEnv_->codeSection->size : 0;
228
229 size_t estimatedCodeSize =
230 1.2 * EstimateCompiledCodeSize(tier(), codeSectionSize);
231 (void)masm_.reserve(std::min(estimatedCodeSize, MaxCodeBytesPerProcess));
232
233 (void)metadataTier_->codeRanges.reserve(2 * moduleEnv_->numFuncDefs());
234
235 const size_t ByteCodesPerCallSite = 50;
236 (void)metadataTier_->callSites.reserve(codeSectionSize /
237 ByteCodesPerCallSite);
238
239 const size_t ByteCodesPerOOBTrap = 10;
240 (void)metadataTier_->trapSites[Trap::OutOfBounds].reserve(
241 codeSectionSize / ByteCodesPerOOBTrap);
242
243 // Allocate space in TlsData for declarations that need it.
244
245 MOZ_ASSERT(metadata_->globalDataLength == 0);
246
247 for (size_t i = 0; i < moduleEnv_->funcImportGlobalDataOffsets.length();
248 i++) {
249 uint32_t globalDataOffset;
250 if (!allocateGlobalBytes(sizeof(FuncImportTls), sizeof(void*),
251 &globalDataOffset)) {
252 return false;
253 }
254
255 moduleEnv_->funcImportGlobalDataOffsets[i] = globalDataOffset;
256
257 FuncType copy;
258 if (!copy.clone(*moduleEnv_->funcs[i].type)) {
259 return false;
260 }
261 if (!metadataTier_->funcImports.emplaceBack(std::move(copy),
262 globalDataOffset)) {
263 return false;
264 }
265 }
266
267 for (TableDesc& table : moduleEnv_->tables) {
268 if (!allocateGlobalBytes(sizeof(TableTls), sizeof(void*),
269 &table.globalDataOffset)) {
270 return false;
271 }
272 }
273
274 if (!isAsmJS()) {
275 // Copy type definitions to metadata that are required at runtime,
276 // allocating global data so that codegen can find the type id's at
277 // runtime.
278 for (uint32_t typeIndex = 0; typeIndex < moduleEnv_->types.length();
279 typeIndex++) {
280 const TypeDef& typeDef = moduleEnv_->types[typeIndex];
281 TypeIdDesc& typeId = moduleEnv_->typeIds[typeIndex];
282
283 if (TypeIdDesc::isGlobal(typeDef)) {
284 uint32_t globalDataOffset;
285 if (!allocateGlobalBytes(sizeof(void*), sizeof(void*),
286 &globalDataOffset)) {
287 return false;
288 }
289
290 typeId = TypeIdDesc::global(typeDef, globalDataOffset);
291
292 TypeDef copy;
293 if (!copy.clone(typeDef)) {
294 return false;
295 }
296
297 if (!metadata_->types.emplaceBack(std::move(copy), typeId)) {
298 return false;
299 }
300 } else {
301 typeId = TypeIdDesc::immediate(typeDef);
302 }
303 }
304
305 // If we allow type indices, then we need to rewrite the index space to
306 // account for types that are omitted from metadata, such as function
307 // types that fit in an immediate.
308 if (moduleEnv_->functionReferencesEnabled()) {
309 // Do a linear pass to create a map from src index to dest index.
310 RenumberMap map;
311 for (uint32_t srcIndex = 0, destIndex = 0;
312 srcIndex < moduleEnv_->types.length(); srcIndex++) {
313 const TypeDef& typeDef = moduleEnv_->types[srcIndex];
314 if (!TypeIdDesc::isGlobal(typeDef)) {
315 continue;
316 }
317 if (!map.put(srcIndex, destIndex++)) {
318 return false;
319 }
320 }
321
322 // Apply the map
323 for (TypeDefWithId& typeDef : metadata_->types) {
324 typeDef.renumber(map);
325 }
326 }
327 }
328
329 for (GlobalDesc& global : moduleEnv_->globals) {
330 if (global.isConstant()) {
331 continue;
332 }
333
334 uint32_t width =
335 global.isIndirect() ? sizeof(void*) : SizeOf(global.type());
336
337 uint32_t globalDataOffset;
338 if (!allocateGlobalBytes(width, width, &globalDataOffset)) {
339 return false;
340 }
341
342 global.setOffset(globalDataOffset);
343 }
344
345 // Accumulate all exported functions:
346 // - explicitly marked as such;
347 // - implicitly exported by being an element of function tables;
348 // - implicitly exported by being the start function;
349 // - implicitly exported by being used in global ref.func initializer
350 // ModuleEnvironment accumulates this information for us during decoding,
351 // transfer it to the FuncExportVector stored in Metadata.
352
353 uint32_t exportedFuncCount = 0;
354 for (const FuncDesc& func : moduleEnv_->funcs) {
355 if (func.isExported()) {
356 exportedFuncCount++;
357 }
358 }
359 if (!metadataTier_->funcExports.reserve(exportedFuncCount)) {
360 return false;
361 }
362
363 for (uint32_t funcIndex = 0; funcIndex < moduleEnv_->funcs.length();
364 funcIndex++) {
365 const FuncDesc& func = moduleEnv_->funcs[funcIndex];
366
367 if (!func.isExported()) {
368 continue;
369 }
370
371 FuncType funcType;
372 if (!funcType.clone(*func.type)) {
373 return false;
374 }
375 metadataTier_->funcExports.infallibleEmplaceBack(std::move(funcType),
376 funcIndex, func.isEager());
377 }
378
379 // Determine whether parallel or sequential compilation is to be used and
380 // initialize the CompileTasks that will be used in either mode.
381
382 MOZ_ASSERT(GetHelperThreadCount() > 1);
383
384 uint32_t numTasks;
385 if (CanUseExtraThreads() && GetHelperThreadCPUCount() > 1) {
386 parallel_ = true;
387 numTasks = 2 * GetMaxWasmCompilationThreads();
388 } else {
389 numTasks = 1;
390 }
391
392 if (!tasks_.initCapacity(numTasks)) {
393 return false;
394 }
395 for (size_t i = 0; i < numTasks; i++) {
396 tasks_.infallibleEmplaceBack(*moduleEnv_, *compilerEnv_, taskState_,
397 COMPILATION_LIFO_DEFAULT_CHUNK_SIZE);
398 }
399
400 if (!freeTasks_.reserve(numTasks)) {
401 return false;
402 }
403 for (size_t i = 0; i < numTasks; i++) {
404 freeTasks_.infallibleAppend(&tasks_[i]);
405 }
406
407 // Fill in function stubs for each import so that imported functions can be
408 // used in all the places that normal function definitions can (table
409 // elements, export calls, etc).
410
411 CompiledCode& importCode = tasks_[0].output;
412 MOZ_ASSERT(importCode.empty());
413
414 if (!GenerateImportFunctions(*moduleEnv_, metadataTier_->funcImports,
415 &importCode)) {
416 return false;
417 }
418
419 if (!linkCompiledCode(importCode)) {
420 return false;
421 }
422
423 importCode.clear();
424 return true;
425 }
426
funcIsCompiled(uint32_t funcIndex) const427 bool ModuleGenerator::funcIsCompiled(uint32_t funcIndex) const {
428 return metadataTier_->funcToCodeRange[funcIndex] != BAD_CODE_RANGE;
429 }
430
funcCodeRange(uint32_t funcIndex) const431 const CodeRange& ModuleGenerator::funcCodeRange(uint32_t funcIndex) const {
432 MOZ_ASSERT(funcIsCompiled(funcIndex));
433 const CodeRange& cr =
434 metadataTier_->codeRanges[metadataTier_->funcToCodeRange[funcIndex]];
435 MOZ_ASSERT(cr.isFunction());
436 return cr;
437 }
438
InRange(uint32_t caller,uint32_t callee)439 static bool InRange(uint32_t caller, uint32_t callee) {
440 // We assume JumpImmediateRange is defined conservatively enough that the
441 // slight difference between 'caller' (which is really the return address
442 // offset) and the actual base of the relative displacement computation
443 // isn't significant.
444 uint32_t range = std::min(JitOptions.jumpThreshold, JumpImmediateRange);
445 if (caller < callee) {
446 return callee - caller < range;
447 }
448 return caller - callee < range;
449 }
450
451 using OffsetMap =
452 HashMap<uint32_t, uint32_t, DefaultHasher<uint32_t>, SystemAllocPolicy>;
453 using TrapMaybeOffsetArray =
454 EnumeratedArray<Trap, Trap::Limit, Maybe<uint32_t>>;
455
linkCallSites()456 bool ModuleGenerator::linkCallSites() {
457 masm_.haltingAlign(CodeAlignment);
458
459 // Create far jumps for calls that have relative offsets that may otherwise
460 // go out of range. This method is called both between function bodies (at a
461 // frequency determined by the ISA's jump range) and once at the very end of
462 // a module's codegen after all possible calls/traps have been emitted.
463
464 OffsetMap existingCallFarJumps;
465 for (; lastPatchedCallSite_ < metadataTier_->callSites.length();
466 lastPatchedCallSite_++) {
467 const CallSite& callSite = metadataTier_->callSites[lastPatchedCallSite_];
468 const CallSiteTarget& target = callSiteTargets_[lastPatchedCallSite_];
469 uint32_t callerOffset = callSite.returnAddressOffset();
470 switch (callSite.kind()) {
471 case CallSiteDesc::Dynamic:
472 case CallSiteDesc::Symbolic:
473 break;
474 case CallSiteDesc::Func: {
475 if (funcIsCompiled(target.funcIndex())) {
476 uint32_t calleeOffset =
477 funcCodeRange(target.funcIndex()).funcUncheckedCallEntry();
478 if (InRange(callerOffset, calleeOffset)) {
479 masm_.patchCall(callerOffset, calleeOffset);
480 break;
481 }
482 }
483
484 OffsetMap::AddPtr p =
485 existingCallFarJumps.lookupForAdd(target.funcIndex());
486 if (!p) {
487 Offsets offsets;
488 offsets.begin = masm_.currentOffset();
489 if (!callFarJumps_.emplaceBack(target.funcIndex(),
490 masm_.farJumpWithPatch())) {
491 return false;
492 }
493 offsets.end = masm_.currentOffset();
494 if (masm_.oom()) {
495 return false;
496 }
497 if (!metadataTier_->codeRanges.emplaceBack(CodeRange::FarJumpIsland,
498 offsets)) {
499 return false;
500 }
501 if (!existingCallFarJumps.add(p, target.funcIndex(), offsets.begin)) {
502 return false;
503 }
504 }
505
506 masm_.patchCall(callerOffset, p->value());
507 break;
508 }
509 case CallSiteDesc::Breakpoint:
510 case CallSiteDesc::EnterFrame:
511 case CallSiteDesc::LeaveFrame: {
512 Uint32Vector& jumps = metadataTier_->debugTrapFarJumpOffsets;
513 if (jumps.empty() || !InRange(jumps.back(), callerOffset)) {
514 Offsets offsets;
515 offsets.begin = masm_.currentOffset();
516 CodeOffset jumpOffset = masm_.farJumpWithPatch();
517 offsets.end = masm_.currentOffset();
518 if (masm_.oom()) {
519 return false;
520 }
521 if (!metadataTier_->codeRanges.emplaceBack(CodeRange::FarJumpIsland,
522 offsets)) {
523 return false;
524 }
525 if (!debugTrapFarJumps_.emplaceBack(jumpOffset)) {
526 return false;
527 }
528 if (!jumps.emplaceBack(offsets.begin)) {
529 return false;
530 }
531 }
532 break;
533 }
534 }
535 }
536
537 masm_.flushBuffer();
538 return !masm_.oom();
539 }
540
noteCodeRange(uint32_t codeRangeIndex,const CodeRange & codeRange)541 void ModuleGenerator::noteCodeRange(uint32_t codeRangeIndex,
542 const CodeRange& codeRange) {
543 switch (codeRange.kind()) {
544 case CodeRange::Function:
545 MOZ_ASSERT(metadataTier_->funcToCodeRange[codeRange.funcIndex()] ==
546 BAD_CODE_RANGE);
547 metadataTier_->funcToCodeRange[codeRange.funcIndex()] = codeRangeIndex;
548 break;
549 case CodeRange::InterpEntry:
550 metadataTier_->lookupFuncExport(codeRange.funcIndex())
551 .initEagerInterpEntryOffset(codeRange.begin());
552 break;
553 case CodeRange::JitEntry:
554 // Nothing to do: jit entries are linked in the jump tables.
555 break;
556 case CodeRange::ImportJitExit:
557 metadataTier_->funcImports[codeRange.funcIndex()].initJitExitOffset(
558 codeRange.begin());
559 break;
560 case CodeRange::ImportInterpExit:
561 metadataTier_->funcImports[codeRange.funcIndex()].initInterpExitOffset(
562 codeRange.begin());
563 break;
564 case CodeRange::DebugTrap:
565 MOZ_ASSERT(!debugTrapCodeOffset_);
566 debugTrapCodeOffset_ = codeRange.begin();
567 break;
568 case CodeRange::TrapExit:
569 MOZ_ASSERT(!linkData_->trapOffset);
570 linkData_->trapOffset = codeRange.begin();
571 break;
572 case CodeRange::Throw:
573 // Jumped to by other stubs, so nothing to do.
574 break;
575 case CodeRange::FarJumpIsland:
576 case CodeRange::BuiltinThunk:
577 MOZ_CRASH("Unexpected CodeRange kind");
578 }
579 }
580
581 template <class Vec, class Op>
AppendForEach(Vec * dstVec,const Vec & srcVec,Op op)582 static bool AppendForEach(Vec* dstVec, const Vec& srcVec, Op op) {
583 if (!dstVec->growByUninitialized(srcVec.length())) {
584 return false;
585 }
586
587 using T = typename Vec::ElementType;
588
589 const T* src = srcVec.begin();
590
591 T* dstBegin = dstVec->begin();
592 T* dstEnd = dstVec->end();
593 T* dstStart = dstEnd - srcVec.length();
594
595 for (T* dst = dstStart; dst != dstEnd; dst++, src++) {
596 new (dst) T(*src);
597 op(dst - dstBegin, dst);
598 }
599
600 return true;
601 }
602
linkCompiledCode(CompiledCode & code)603 bool ModuleGenerator::linkCompiledCode(CompiledCode& code) {
604 // Before merging in new code, if calls in a prior code range might go out of
605 // range, insert far jumps to extend the range.
606
607 if (!InRange(startOfUnpatchedCallsites_,
608 masm_.size() + code.bytes.length())) {
609 startOfUnpatchedCallsites_ = masm_.size();
610 if (!linkCallSites()) {
611 return false;
612 }
613 }
614
615 // All code offsets in 'code' must be incremented by their position in the
616 // overall module when the code was appended.
617
618 masm_.haltingAlign(CodeAlignment);
619 const size_t offsetInModule = masm_.size();
620 if (!masm_.appendRawCode(code.bytes.begin(), code.bytes.length())) {
621 return false;
622 }
623
624 auto codeRangeOp = [=](uint32_t codeRangeIndex, CodeRange* codeRange) {
625 codeRange->offsetBy(offsetInModule);
626 noteCodeRange(codeRangeIndex, *codeRange);
627 };
628 if (!AppendForEach(&metadataTier_->codeRanges, code.codeRanges,
629 codeRangeOp)) {
630 return false;
631 }
632
633 auto callSiteOp = [=](uint32_t, CallSite* cs) {
634 cs->offsetBy(offsetInModule);
635 };
636 if (!AppendForEach(&metadataTier_->callSites, code.callSites, callSiteOp)) {
637 return false;
638 }
639
640 if (!callSiteTargets_.appendAll(code.callSiteTargets)) {
641 return false;
642 }
643
644 for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
645 auto trapSiteOp = [=](uint32_t, TrapSite* ts) {
646 ts->offsetBy(offsetInModule);
647 };
648 if (!AppendForEach(&metadataTier_->trapSites[trap], code.trapSites[trap],
649 trapSiteOp)) {
650 return false;
651 }
652 }
653
654 for (const SymbolicAccess& access : code.symbolicAccesses) {
655 uint32_t patchAt = offsetInModule + access.patchAt.offset();
656 if (!linkData_->symbolicLinks[access.target].append(patchAt)) {
657 return false;
658 }
659 }
660
661 for (const CodeLabel& codeLabel : code.codeLabels) {
662 LinkData::InternalLink link;
663 link.patchAtOffset = offsetInModule + codeLabel.patchAt().offset();
664 link.targetOffset = offsetInModule + codeLabel.target().offset();
665 #ifdef JS_CODELABEL_LINKMODE
666 link.mode = codeLabel.linkMode();
667 #endif
668 if (!linkData_->internalLinks.append(link)) {
669 return false;
670 }
671 }
672
673 for (size_t i = 0; i < code.stackMaps.length(); i++) {
674 StackMaps::Maplet maplet = code.stackMaps.move(i);
675 maplet.offsetBy(offsetInModule);
676 if (!metadataTier_->stackMaps.add(maplet)) {
677 // This function is now the only owner of maplet.map, so we'd better
678 // free it right now.
679 maplet.map->destroy();
680 return false;
681 }
682 }
683
684 #ifdef ENABLE_WASM_EXCEPTIONS
685 auto tryNoteOp = [=](uint32_t, WasmTryNote* tn) {
686 tn->offsetBy(offsetInModule);
687 };
688 if (!AppendForEach(&metadataTier_->tryNotes, code.tryNotes, tryNoteOp)) {
689 return false;
690 }
691 #endif
692
693 return true;
694 }
695
ExecuteCompileTask(CompileTask * task,UniqueChars * error)696 static bool ExecuteCompileTask(CompileTask* task, UniqueChars* error) {
697 MOZ_ASSERT(task->lifo.isEmpty());
698 MOZ_ASSERT(task->output.empty());
699
700 switch (task->compilerEnv.tier()) {
701 case Tier::Optimized:
702 switch (task->compilerEnv.optimizedBackend()) {
703 case OptimizedBackend::Cranelift:
704 if (!CraneliftCompileFunctions(task->moduleEnv, task->compilerEnv,
705 task->lifo, task->inputs,
706 &task->output, error)) {
707 return false;
708 }
709 break;
710 case OptimizedBackend::Ion:
711 if (!IonCompileFunctions(task->moduleEnv, task->compilerEnv,
712 task->lifo, task->inputs, &task->output,
713 error)) {
714 return false;
715 }
716 break;
717 }
718 break;
719 case Tier::Baseline:
720 if (!BaselineCompileFunctions(task->moduleEnv, task->compilerEnv,
721 task->lifo, task->inputs, &task->output,
722 error)) {
723 return false;
724 }
725 break;
726 }
727
728 MOZ_ASSERT(task->lifo.isEmpty());
729 MOZ_ASSERT(task->inputs.length() == task->output.codeRanges.length());
730 task->inputs.clear();
731 return true;
732 }
733
runHelperThreadTask(AutoLockHelperThreadState & lock)734 void CompileTask::runHelperThreadTask(AutoLockHelperThreadState& lock) {
735 TraceLoggerThread* logger = TraceLoggerForCurrentThread();
736 AutoTraceLog logCompile(logger, TraceLogger_WasmCompilation);
737
738 UniqueChars error;
739 bool ok;
740
741 {
742 AutoUnlockHelperThreadState unlock(lock);
743 ok = ExecuteCompileTask(this, &error);
744 }
745
746 // Don't release the lock between updating our state and returning from this
747 // method.
748
749 if (!ok || !state.finished().append(this)) {
750 state.numFailed()++;
751 if (!state.errorMessage()) {
752 state.errorMessage() = std::move(error);
753 }
754 }
755
756 state.condVar().notify_one(); /* failed or finished */
757 }
758
threadType()759 ThreadType CompileTask::threadType() {
760 switch (compilerEnv.mode()) {
761 case CompileMode::Once:
762 case CompileMode::Tier1:
763 return ThreadType::THREAD_TYPE_WASM_COMPILE_TIER1;
764 case CompileMode::Tier2:
765 return ThreadType::THREAD_TYPE_WASM_COMPILE_TIER2;
766 default:
767 MOZ_CRASH();
768 }
769 }
770
locallyCompileCurrentTask()771 bool ModuleGenerator::locallyCompileCurrentTask() {
772 if (!ExecuteCompileTask(currentTask_, error_)) {
773 return false;
774 }
775 if (!finishTask(currentTask_)) {
776 return false;
777 }
778 currentTask_ = nullptr;
779 batchedBytecode_ = 0;
780 return true;
781 }
782
finishTask(CompileTask * task)783 bool ModuleGenerator::finishTask(CompileTask* task) {
784 masm_.haltingAlign(CodeAlignment);
785
786 if (!linkCompiledCode(task->output)) {
787 return false;
788 }
789
790 task->output.clear();
791
792 MOZ_ASSERT(task->inputs.empty());
793 MOZ_ASSERT(task->output.empty());
794 MOZ_ASSERT(task->lifo.isEmpty());
795 freeTasks_.infallibleAppend(task);
796 return true;
797 }
798
launchBatchCompile()799 bool ModuleGenerator::launchBatchCompile() {
800 MOZ_ASSERT(currentTask_);
801
802 if (cancelled_ && *cancelled_) {
803 return false;
804 }
805
806 if (!parallel_) {
807 return locallyCompileCurrentTask();
808 }
809
810 if (!StartOffThreadWasmCompile(currentTask_, mode())) {
811 return false;
812 }
813 outstanding_++;
814 currentTask_ = nullptr;
815 batchedBytecode_ = 0;
816 return true;
817 }
818
finishOutstandingTask()819 bool ModuleGenerator::finishOutstandingTask() {
820 MOZ_ASSERT(parallel_);
821
822 CompileTask* task = nullptr;
823 {
824 AutoLockHelperThreadState lock;
825 while (true) {
826 MOZ_ASSERT(outstanding_ > 0);
827
828 if (taskState_.numFailed() > 0) {
829 return false;
830 }
831
832 if (!taskState_.finished().empty()) {
833 outstanding_--;
834 task = taskState_.finished().popCopy();
835 break;
836 }
837
838 taskState_.condVar().wait(lock); /* failed or finished */
839 }
840 }
841
842 // Call outside of the compilation lock.
843 return finishTask(task);
844 }
845
compileFuncDef(uint32_t funcIndex,uint32_t lineOrBytecode,const uint8_t * begin,const uint8_t * end,Uint32Vector && lineNums)846 bool ModuleGenerator::compileFuncDef(uint32_t funcIndex,
847 uint32_t lineOrBytecode,
848 const uint8_t* begin, const uint8_t* end,
849 Uint32Vector&& lineNums) {
850 MOZ_ASSERT(!finishedFuncDefs_);
851 MOZ_ASSERT(funcIndex < moduleEnv_->numFuncs());
852
853 uint32_t threshold;
854 switch (tier()) {
855 case Tier::Baseline:
856 threshold = JitOptions.wasmBatchBaselineThreshold;
857 break;
858 case Tier::Optimized:
859 switch (compilerEnv_->optimizedBackend()) {
860 case OptimizedBackend::Ion:
861 threshold = JitOptions.wasmBatchIonThreshold;
862 break;
863 case OptimizedBackend::Cranelift:
864 threshold = JitOptions.wasmBatchCraneliftThreshold;
865 break;
866 default:
867 MOZ_CRASH("Invalid optimizedBackend value");
868 }
869 break;
870 default:
871 MOZ_CRASH("Invalid tier value");
872 break;
873 }
874
875 uint32_t funcBytecodeLength = end - begin;
876
877 // Do not go over the threshold if we can avoid it: spin off the compilation
878 // before appending the function if we would go over. (Very large single
879 // functions may still exceed the threshold but this is fine; it'll be very
880 // uncommon and is in any case safely handled by the MacroAssembler's buffer
881 // limit logic.)
882
883 if (currentTask_ && currentTask_->inputs.length() &&
884 batchedBytecode_ + funcBytecodeLength > threshold) {
885 if (!launchBatchCompile()) {
886 return false;
887 }
888 }
889
890 if (!currentTask_) {
891 if (freeTasks_.empty() && !finishOutstandingTask()) {
892 return false;
893 }
894 currentTask_ = freeTasks_.popCopy();
895 }
896
897 if (!currentTask_->inputs.emplaceBack(funcIndex, lineOrBytecode, begin, end,
898 std::move(lineNums))) {
899 return false;
900 }
901
902 batchedBytecode_ += funcBytecodeLength;
903 MOZ_ASSERT(batchedBytecode_ <= MaxCodeSectionBytes);
904 return true;
905 }
906
finishFuncDefs()907 bool ModuleGenerator::finishFuncDefs() {
908 MOZ_ASSERT(!finishedFuncDefs_);
909
910 if (currentTask_ && !locallyCompileCurrentTask()) {
911 return false;
912 }
913
914 finishedFuncDefs_ = true;
915 return true;
916 }
917
finishCodegen()918 bool ModuleGenerator::finishCodegen() {
919 // Now that all functions and stubs are generated and their CodeRanges
920 // known, patch all calls (which can emit far jumps) and far jumps. Linking
921 // can emit tiny far-jump stubs, so there is an ordering dependency here.
922
923 if (!linkCallSites()) {
924 return false;
925 }
926
927 for (CallFarJump far : callFarJumps_) {
928 masm_.patchFarJump(far.jump,
929 funcCodeRange(far.funcIndex).funcUncheckedCallEntry());
930 }
931
932 for (CodeOffset farJump : debugTrapFarJumps_) {
933 masm_.patchFarJump(farJump, debugTrapCodeOffset_);
934 }
935
936 // None of the linking or far-jump operations should emit masm metadata.
937
938 MOZ_ASSERT(masm_.callSites().empty());
939 MOZ_ASSERT(masm_.callSiteTargets().empty());
940 MOZ_ASSERT(masm_.trapSites().empty());
941 MOZ_ASSERT(masm_.symbolicAccesses().empty());
942 #ifdef ENABLE_WASM_EXCEPTIONS
943 MOZ_ASSERT(masm_.tryNotes().empty());
944 #endif
945 MOZ_ASSERT(masm_.codeLabels().empty());
946
947 masm_.finish();
948 return !masm_.oom();
949 }
950
finishMetadataTier()951 bool ModuleGenerator::finishMetadataTier() {
952 // The stackmaps aren't yet sorted. Do so now, since we'll need to
953 // binary-search them at GC time.
954 metadataTier_->stackMaps.sort();
955
956 // The try notes also need to be sorted to simplify lookup.
957 #ifdef ENABLE_WASM_EXCEPTIONS
958 std::sort(metadataTier_->tryNotes.begin(), metadataTier_->tryNotes.end());
959 #endif
960
961 #ifdef DEBUG
962 // Check that the stackmap contains no duplicates, since that could lead to
963 // ambiguities about stack slot pointerness.
964 uint8_t* previousNextInsnAddr = nullptr;
965 for (size_t i = 0; i < metadataTier_->stackMaps.length(); i++) {
966 const StackMaps::Maplet& maplet = metadataTier_->stackMaps.get(i);
967 MOZ_ASSERT_IF(i > 0, uintptr_t(maplet.nextInsnAddr) >
968 uintptr_t(previousNextInsnAddr));
969 previousNextInsnAddr = maplet.nextInsnAddr;
970 }
971
972 // Assert all sorted metadata is sorted.
973 uint32_t last = 0;
974 for (const CodeRange& codeRange : metadataTier_->codeRanges) {
975 MOZ_ASSERT(codeRange.begin() >= last);
976 last = codeRange.end();
977 }
978
979 last = 0;
980 for (const CallSite& callSite : metadataTier_->callSites) {
981 MOZ_ASSERT(callSite.returnAddressOffset() >= last);
982 last = callSite.returnAddressOffset();
983 }
984
985 for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
986 last = 0;
987 for (const TrapSite& trapSite : metadataTier_->trapSites[trap]) {
988 MOZ_ASSERT(trapSite.pcOffset >= last);
989 last = trapSite.pcOffset;
990 }
991 }
992
993 last = 0;
994 for (uint32_t debugTrapFarJumpOffset :
995 metadataTier_->debugTrapFarJumpOffsets) {
996 MOZ_ASSERT(debugTrapFarJumpOffset >= last);
997 last = debugTrapFarJumpOffset;
998 }
999
1000 // Try notes should be sorted so that the end of ranges are in rising order
1001 // so that the innermost catch handler is chosen.
1002 # ifdef ENABLE_WASM_EXCEPTIONS
1003 last = 0;
1004 for (const WasmTryNote& tryNote : metadataTier_->tryNotes) {
1005 MOZ_ASSERT(tryNote.end >= last);
1006 MOZ_ASSERT(tryNote.end > tryNote.begin);
1007 last = tryNote.end;
1008 }
1009 # endif
1010 #endif
1011
1012 // These Vectors can get large and the excess capacity can be significant,
1013 // so realloc them down to size.
1014
1015 metadataTier_->funcToCodeRange.shrinkStorageToFit();
1016 metadataTier_->codeRanges.shrinkStorageToFit();
1017 metadataTier_->callSites.shrinkStorageToFit();
1018 metadataTier_->trapSites.shrinkStorageToFit();
1019 metadataTier_->debugTrapFarJumpOffsets.shrinkStorageToFit();
1020 #ifdef ENABLE_WASM_EXCEPTIONS
1021 metadataTier_->tryNotes.shrinkStorageToFit();
1022 #endif
1023 for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
1024 metadataTier_->trapSites[trap].shrinkStorageToFit();
1025 }
1026
1027 return true;
1028 }
1029
finishCodeTier()1030 UniqueCodeTier ModuleGenerator::finishCodeTier() {
1031 MOZ_ASSERT(finishedFuncDefs_);
1032
1033 while (outstanding_ > 0) {
1034 if (!finishOutstandingTask()) {
1035 return nullptr;
1036 }
1037 }
1038
1039 #ifdef DEBUG
1040 for (uint32_t codeRangeIndex : metadataTier_->funcToCodeRange) {
1041 MOZ_ASSERT(codeRangeIndex != BAD_CODE_RANGE);
1042 }
1043 #endif
1044
1045 // Now that all imports/exports are known, we can generate a special
1046 // CompiledCode containing stubs.
1047
1048 CompiledCode& stubCode = tasks_[0].output;
1049 MOZ_ASSERT(stubCode.empty());
1050
1051 if (!GenerateStubs(*moduleEnv_, metadataTier_->funcImports,
1052 metadataTier_->funcExports, &stubCode)) {
1053 return nullptr;
1054 }
1055
1056 if (!linkCompiledCode(stubCode)) {
1057 return nullptr;
1058 }
1059
1060 // Finish linking and metadata.
1061
1062 if (!finishCodegen()) {
1063 return nullptr;
1064 }
1065
1066 if (!finishMetadataTier()) {
1067 return nullptr;
1068 }
1069
1070 UniqueModuleSegment segment =
1071 ModuleSegment::create(tier(), masm_, *linkData_);
1072 if (!segment) {
1073 return nullptr;
1074 }
1075
1076 metadataTier_->stackMaps.offsetBy(uintptr_t(segment->base()));
1077
1078 #ifdef DEBUG
1079 // Check that each stackmap is associated with a plausible instruction.
1080 for (size_t i = 0; i < metadataTier_->stackMaps.length(); i++) {
1081 MOZ_ASSERT(IsValidStackMapKey(compilerEnv_->debugEnabled(),
1082 metadataTier_->stackMaps.get(i).nextInsnAddr),
1083 "wasm stackmap does not reference a valid insn");
1084 }
1085 #endif
1086
1087 return js::MakeUnique<CodeTier>(std::move(metadataTier_), std::move(segment));
1088 }
1089
finishMetadata(const Bytes & bytecode)1090 SharedMetadata ModuleGenerator::finishMetadata(const Bytes& bytecode) {
1091 // Finish initialization of Metadata, which is only needed for constructing
1092 // the initial Module, not for tier-2 compilation.
1093 MOZ_ASSERT(mode() != CompileMode::Tier2);
1094
1095 // Copy over data from the ModuleEnvironment.
1096
1097 metadata_->memory = moduleEnv_->memory;
1098 metadata_->startFuncIndex = moduleEnv_->startFuncIndex;
1099 metadata_->tables = std::move(moduleEnv_->tables);
1100 metadata_->globals = std::move(moduleEnv_->globals);
1101 #ifdef ENABLE_WASM_EXCEPTIONS
1102 metadata_->events = std::move(moduleEnv_->events);
1103 #endif
1104 metadata_->nameCustomSectionIndex = moduleEnv_->nameCustomSectionIndex;
1105 metadata_->moduleName = moduleEnv_->moduleName;
1106 metadata_->funcNames = std::move(moduleEnv_->funcNames);
1107 metadata_->omitsBoundsChecks = moduleEnv_->hugeMemoryEnabled();
1108 metadata_->usesDuplicateImports = moduleEnv_->usesDuplicateImports;
1109
1110 // Copy over additional debug information.
1111
1112 if (compilerEnv_->debugEnabled()) {
1113 metadata_->debugEnabled = true;
1114
1115 const size_t numFuncs = moduleEnv_->funcs.length();
1116 if (!metadata_->debugFuncArgTypes.resize(numFuncs)) {
1117 return nullptr;
1118 }
1119 if (!metadata_->debugFuncReturnTypes.resize(numFuncs)) {
1120 return nullptr;
1121 }
1122 for (size_t i = 0; i < numFuncs; i++) {
1123 if (!metadata_->debugFuncArgTypes[i].appendAll(
1124 moduleEnv_->funcs[i].type->args())) {
1125 return nullptr;
1126 }
1127 if (!metadata_->debugFuncReturnTypes[i].appendAll(
1128 moduleEnv_->funcs[i].type->results())) {
1129 return nullptr;
1130 }
1131 }
1132
1133 static_assert(sizeof(ModuleHash) <= sizeof(mozilla::SHA1Sum::Hash),
1134 "The ModuleHash size shall not exceed the SHA1 hash size.");
1135 mozilla::SHA1Sum::Hash hash;
1136 mozilla::SHA1Sum sha1Sum;
1137 sha1Sum.update(bytecode.begin(), bytecode.length());
1138 sha1Sum.finish(hash);
1139 memcpy(metadata_->debugHash, hash, sizeof(ModuleHash));
1140 }
1141
1142 MOZ_ASSERT_IF(moduleEnv_->nameCustomSectionIndex, !!metadata_->namePayload);
1143
1144 // Metadata shouldn't be mutably modified after finishMetadata().
1145 SharedMetadata metadata = metadata_;
1146 metadata_ = nullptr;
1147 return metadata;
1148 }
1149
finishModule(const ShareableBytes & bytecode,JS::OptimizedEncodingListener * maybeTier2Listener)1150 SharedModule ModuleGenerator::finishModule(
1151 const ShareableBytes& bytecode,
1152 JS::OptimizedEncodingListener* maybeTier2Listener) {
1153 MOZ_ASSERT(mode() == CompileMode::Once || mode() == CompileMode::Tier1);
1154
1155 UniqueCodeTier codeTier = finishCodeTier();
1156 if (!codeTier) {
1157 return nullptr;
1158 }
1159
1160 JumpTables jumpTables;
1161 if (!jumpTables.init(mode(), codeTier->segment(),
1162 codeTier->metadata().codeRanges)) {
1163 return nullptr;
1164 }
1165
1166 // Copy over data from the Bytecode, which is going away at the end of
1167 // compilation.
1168
1169 DataSegmentVector dataSegments;
1170 if (!dataSegments.reserve(moduleEnv_->dataSegments.length())) {
1171 return nullptr;
1172 }
1173 for (const DataSegmentEnv& srcSeg : moduleEnv_->dataSegments) {
1174 MutableDataSegment dstSeg = js_new<DataSegment>();
1175 if (!dstSeg) {
1176 return nullptr;
1177 }
1178 if (!dstSeg->init(bytecode, srcSeg)) {
1179 return nullptr;
1180 }
1181 dataSegments.infallibleAppend(std::move(dstSeg));
1182 }
1183
1184 CustomSectionVector customSections;
1185 if (!customSections.reserve(moduleEnv_->customSections.length())) {
1186 return nullptr;
1187 }
1188 for (const CustomSectionEnv& srcSec : moduleEnv_->customSections) {
1189 CustomSection sec;
1190 if (!sec.name.append(bytecode.begin() + srcSec.nameOffset,
1191 srcSec.nameLength)) {
1192 return nullptr;
1193 }
1194 MutableBytes payload = js_new<ShareableBytes>();
1195 if (!payload) {
1196 return nullptr;
1197 }
1198 if (!payload->append(bytecode.begin() + srcSec.payloadOffset,
1199 srcSec.payloadLength)) {
1200 return nullptr;
1201 }
1202 sec.payload = std::move(payload);
1203 customSections.infallibleAppend(std::move(sec));
1204 }
1205
1206 if (moduleEnv_->nameCustomSectionIndex) {
1207 metadata_->namePayload =
1208 customSections[*moduleEnv_->nameCustomSectionIndex].payload;
1209 }
1210
1211 SharedMetadata metadata = finishMetadata(bytecode.bytes);
1212 if (!metadata) {
1213 return nullptr;
1214 }
1215
1216 MutableCode code =
1217 js_new<Code>(std::move(codeTier), *metadata, std::move(jumpTables));
1218 if (!code || !code->initialize(*linkData_)) {
1219 return nullptr;
1220 }
1221
1222 // See Module debugCodeClaimed_ comments for why we need to make a separate
1223 // debug copy.
1224
1225 UniqueBytes debugUnlinkedCode;
1226 UniqueLinkData debugLinkData;
1227 const ShareableBytes* debugBytecode = nullptr;
1228 if (compilerEnv_->debugEnabled()) {
1229 MOZ_ASSERT(mode() == CompileMode::Once);
1230 MOZ_ASSERT(tier() == Tier::Debug);
1231
1232 debugUnlinkedCode = js::MakeUnique<Bytes>();
1233 if (!debugUnlinkedCode || !debugUnlinkedCode->resize(masm_.bytesNeeded())) {
1234 return nullptr;
1235 }
1236
1237 masm_.executableCopy(debugUnlinkedCode->begin());
1238
1239 debugLinkData = std::move(linkData_);
1240 debugBytecode = &bytecode;
1241 }
1242
1243 // All the components are finished, so create the complete Module and start
1244 // tier-2 compilation if requested.
1245
1246 MutableModule module = js_new<Module>(
1247 *code, std::move(moduleEnv_->imports), std::move(moduleEnv_->exports),
1248 std::move(dataSegments), std::move(moduleEnv_->elemSegments),
1249 std::move(customSections), std::move(debugUnlinkedCode),
1250 std::move(debugLinkData), debugBytecode);
1251 if (!module) {
1252 return nullptr;
1253 }
1254
1255 if (mode() == CompileMode::Tier1) {
1256 module->startTier2(*compileArgs_, bytecode, maybeTier2Listener);
1257 } else if (tier() == Tier::Serialized && maybeTier2Listener) {
1258 module->serialize(*linkData_, *maybeTier2Listener);
1259 }
1260
1261 return module;
1262 }
1263
finishTier2(const Module & module)1264 bool ModuleGenerator::finishTier2(const Module& module) {
1265 MOZ_ASSERT(mode() == CompileMode::Tier2);
1266 MOZ_ASSERT(tier() == Tier::Optimized);
1267 MOZ_ASSERT(!compilerEnv_->debugEnabled());
1268
1269 if (cancelled_ && *cancelled_) {
1270 return false;
1271 }
1272
1273 UniqueCodeTier codeTier = finishCodeTier();
1274 if (!codeTier) {
1275 return false;
1276 }
1277
1278 if (MOZ_UNLIKELY(JitOptions.wasmDelayTier2)) {
1279 // Introduce an artificial delay when testing wasmDelayTier2, since we
1280 // want to exercise both tier1 and tier2 code in this case.
1281 ThisThread::SleepMilliseconds(500);
1282 }
1283
1284 return module.finishTier2(*linkData_, std::move(codeTier));
1285 }
1286
sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const1287 size_t CompiledCode::sizeOfExcludingThis(
1288 mozilla::MallocSizeOf mallocSizeOf) const {
1289 size_t trapSitesSize = 0;
1290 for (const TrapSiteVector& vec : trapSites) {
1291 trapSitesSize += vec.sizeOfExcludingThis(mallocSizeOf);
1292 }
1293
1294 return bytes.sizeOfExcludingThis(mallocSizeOf) +
1295 codeRanges.sizeOfExcludingThis(mallocSizeOf) +
1296 callSites.sizeOfExcludingThis(mallocSizeOf) +
1297 callSiteTargets.sizeOfExcludingThis(mallocSizeOf) + trapSitesSize +
1298 symbolicAccesses.sizeOfExcludingThis(mallocSizeOf) +
1299 #ifdef ENABLE_WASM_EXCEPTIONS
1300 tryNotes.sizeOfExcludingThis(mallocSizeOf) +
1301 #endif
1302 codeLabels.sizeOfExcludingThis(mallocSizeOf);
1303 }
1304
sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const1305 size_t CompileTask::sizeOfExcludingThis(
1306 mozilla::MallocSizeOf mallocSizeOf) const {
1307 return lifo.sizeOfExcludingThis(mallocSizeOf) +
1308 inputs.sizeOfExcludingThis(mallocSizeOf) +
1309 output.sizeOfExcludingThis(mallocSizeOf);
1310 }
1311