1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 *
4 * Copyright 2015 Mozilla Foundation
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19 #include "wasm/WasmGenerator.h"
20
21 #include "mozilla/CheckedInt.h"
22 #include "mozilla/EnumeratedRange.h"
23 #include "mozilla/SHA1.h"
24 #include "mozilla/Unused.h"
25
26 #include <algorithm>
27 #include <thread>
28
29 #include "util/Memory.h"
30 #include "util/Text.h"
31 #include "wasm/WasmBaselineCompile.h"
32 #include "wasm/WasmCompile.h"
33 #include "wasm/WasmCraneliftCompile.h"
34 #include "wasm/WasmIonCompile.h"
35 #include "wasm/WasmStubs.h"
36
37 #include "jit/MacroAssembler-inl.h"
38
39 using namespace js;
40 using namespace js::jit;
41 using namespace js::wasm;
42
43 using mozilla::CheckedInt;
44 using mozilla::MakeEnumeratedRange;
45 using mozilla::Unused;
46
swap(MacroAssembler & masm)47 bool CompiledCode::swap(MacroAssembler& masm) {
48 MOZ_ASSERT(bytes.empty());
49 if (!masm.swapBuffer(bytes)) {
50 return false;
51 }
52
53 callSites.swap(masm.callSites());
54 callSiteTargets.swap(masm.callSiteTargets());
55 trapSites.swap(masm.trapSites());
56 symbolicAccesses.swap(masm.symbolicAccesses());
57 codeLabels.swap(masm.codeLabels());
58 return true;
59 }
60
swapCranelift(MacroAssembler & masm,CraneliftReusableData & data)61 bool CompiledCode::swapCranelift(MacroAssembler& masm,
62 CraneliftReusableData& data) {
63 if (!swap(masm)) {
64 return false;
65 }
66 std::swap(data, craneliftReusableData);
67 return true;
68 }
69
70 // ****************************************************************************
71 // ModuleGenerator
72
73 static const unsigned GENERATOR_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
74 static const unsigned COMPILATION_LIFO_DEFAULT_CHUNK_SIZE = 64 * 1024;
75 static const uint32_t BAD_CODE_RANGE = UINT32_MAX;
76
ModuleGenerator(const CompileArgs & args,ModuleEnvironment * env,const Atomic<bool> * cancelled,UniqueChars * error)77 ModuleGenerator::ModuleGenerator(const CompileArgs& args,
78 ModuleEnvironment* env,
79 const Atomic<bool>* cancelled,
80 UniqueChars* error)
81 : compileArgs_(&args),
82 error_(error),
83 cancelled_(cancelled),
84 env_(env),
85 linkData_(nullptr),
86 metadataTier_(nullptr),
87 taskState_(mutexid::WasmCompileTaskState),
88 lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE),
89 masmAlloc_(&lifo_),
90 masm_(masmAlloc_, /* limitedSize= */ false),
91 debugTrapCodeOffset_(),
92 lastPatchedCallSite_(0),
93 startOfUnpatchedCallsites_(0),
94 parallel_(false),
95 outstanding_(0),
96 currentTask_(nullptr),
97 batchedBytecode_(0),
98 finishedFuncDefs_(false) {
99 MOZ_ASSERT(IsCompilingWasm());
100 }
101
~ModuleGenerator()102 ModuleGenerator::~ModuleGenerator() {
103 MOZ_ASSERT_IF(finishedFuncDefs_, !batchedBytecode_);
104 MOZ_ASSERT_IF(finishedFuncDefs_, !currentTask_);
105
106 if (parallel_) {
107 if (outstanding_) {
108 // Remove any pending compilation tasks from the worklist.
109 {
110 AutoLockHelperThreadState lock;
111 CompileTaskPtrFifo& worklist =
112 HelperThreadState().wasmWorklist(lock, mode());
113 auto pred = [this](CompileTask* task) {
114 return &task->state == &taskState_;
115 };
116 size_t removed = worklist.eraseIf(pred);
117 MOZ_ASSERT(outstanding_ >= removed);
118 outstanding_ -= removed;
119 }
120
121 // Wait until all active compilation tasks have finished.
122 {
123 auto taskState = taskState_.lock();
124 while (true) {
125 MOZ_ASSERT(outstanding_ >= taskState->finished.length());
126 outstanding_ -= taskState->finished.length();
127 taskState->finished.clear();
128
129 MOZ_ASSERT(outstanding_ >= taskState->numFailed);
130 outstanding_ -= taskState->numFailed;
131 taskState->numFailed = 0;
132
133 if (!outstanding_) {
134 break;
135 }
136
137 taskState.wait(/* failed or finished */);
138 }
139 }
140 }
141 } else {
142 MOZ_ASSERT(!outstanding_);
143 }
144
145 // Propagate error state.
146 if (error_ && !*error_) {
147 *error_ = std::move(taskState_.lock()->errorMessage);
148 }
149 }
150
allocateGlobalBytes(uint32_t bytes,uint32_t align,uint32_t * globalDataOffset)151 bool ModuleGenerator::allocateGlobalBytes(uint32_t bytes, uint32_t align,
152 uint32_t* globalDataOffset) {
153 CheckedInt<uint32_t> newGlobalDataLength(metadata_->globalDataLength);
154
155 newGlobalDataLength +=
156 ComputeByteAlignment(newGlobalDataLength.value(), align);
157 if (!newGlobalDataLength.isValid()) {
158 return false;
159 }
160
161 *globalDataOffset = newGlobalDataLength.value();
162 newGlobalDataLength += bytes;
163
164 if (!newGlobalDataLength.isValid()) {
165 return false;
166 }
167
168 metadata_->globalDataLength = newGlobalDataLength.value();
169 return true;
170 }
171
init(Metadata * maybeAsmJSMetadata)172 bool ModuleGenerator::init(Metadata* maybeAsmJSMetadata) {
173 // Perform fallible metadata, linkdata, assumption allocations.
174
175 MOZ_ASSERT(isAsmJS() == !!maybeAsmJSMetadata);
176 if (maybeAsmJSMetadata) {
177 metadata_ = maybeAsmJSMetadata;
178 } else {
179 metadata_ = js_new<Metadata>();
180 if (!metadata_) {
181 return false;
182 }
183 }
184
185 if (compileArgs_->scriptedCaller.filename) {
186 metadata_->filename =
187 DuplicateString(compileArgs_->scriptedCaller.filename.get());
188 if (!metadata_->filename) {
189 return false;
190 }
191
192 metadata_->filenameIsURL = compileArgs_->scriptedCaller.filenameIsURL;
193 } else {
194 MOZ_ASSERT(!compileArgs_->scriptedCaller.filenameIsURL);
195 }
196
197 if (compileArgs_->sourceMapURL) {
198 metadata_->sourceMapURL = DuplicateString(compileArgs_->sourceMapURL.get());
199 if (!metadata_->sourceMapURL) {
200 return false;
201 }
202 }
203
204 linkData_ = js::MakeUnique<LinkData>(tier());
205 if (!linkData_) {
206 return false;
207 }
208
209 metadataTier_ = js::MakeUnique<MetadataTier>(tier());
210 if (!metadataTier_) {
211 return false;
212 }
213
214 // funcToCodeRange maps function indices to code-range indices and all
215 // elements will be initialized by the time module generation is finished.
216
217 if (!metadataTier_->funcToCodeRange.appendN(BAD_CODE_RANGE,
218 env_->funcTypes.length())) {
219 return false;
220 }
221
222 // Pre-reserve space for large Vectors to avoid the significant cost of the
223 // final reallocs. In particular, the MacroAssembler can be enormous, so be
224 // extra conservative. Since large over-reservations may fail when the
225 // actual allocations will succeed, ignore OOM failures. Note,
226 // shrinkStorageToFit calls at the end will trim off unneeded capacity.
227
228 size_t codeSectionSize = env_->codeSection ? env_->codeSection->size : 0;
229
230 size_t estimatedCodeSize =
231 1.2 * EstimateCompiledCodeSize(tier(), codeSectionSize);
232 Unused << masm_.reserve(std::min(estimatedCodeSize, MaxCodeBytesPerProcess));
233
234 Unused << metadataTier_->codeRanges.reserve(2 * env_->numFuncDefs());
235
236 const size_t ByteCodesPerCallSite = 50;
237 Unused << metadataTier_->callSites.reserve(codeSectionSize /
238 ByteCodesPerCallSite);
239
240 const size_t ByteCodesPerOOBTrap = 10;
241 Unused << metadataTier_->trapSites[Trap::OutOfBounds].reserve(
242 codeSectionSize / ByteCodesPerOOBTrap);
243
244 // Allocate space in TlsData for declarations that need it.
245
246 MOZ_ASSERT(metadata_->globalDataLength == 0);
247
248 for (size_t i = 0; i < env_->funcImportGlobalDataOffsets.length(); i++) {
249 uint32_t globalDataOffset;
250 if (!allocateGlobalBytes(sizeof(FuncImportTls), sizeof(void*),
251 &globalDataOffset)) {
252 return false;
253 }
254
255 env_->funcImportGlobalDataOffsets[i] = globalDataOffset;
256
257 FuncType copy;
258 if (!copy.clone(*env_->funcTypes[i])) {
259 return false;
260 }
261 if (!metadataTier_->funcImports.emplaceBack(std::move(copy),
262 globalDataOffset)) {
263 return false;
264 }
265 }
266
267 for (TableDesc& table : env_->tables) {
268 if (!allocateGlobalBytes(sizeof(TableTls), sizeof(void*),
269 &table.globalDataOffset)) {
270 return false;
271 }
272 }
273
274 if (!isAsmJS()) {
275 for (TypeDef& td : env_->types) {
276 if (!td.isFuncType()) {
277 continue;
278 }
279
280 FuncTypeWithId& funcType = td.funcType();
281 if (FuncTypeIdDesc::isGlobal(funcType)) {
282 uint32_t globalDataOffset;
283 if (!allocateGlobalBytes(sizeof(void*), sizeof(void*),
284 &globalDataOffset)) {
285 return false;
286 }
287
288 funcType.id = FuncTypeIdDesc::global(funcType, globalDataOffset);
289
290 FuncType copy;
291 if (!copy.clone(funcType)) {
292 return false;
293 }
294
295 if (!metadata_->funcTypeIds.emplaceBack(std::move(copy), funcType.id)) {
296 return false;
297 }
298 } else {
299 funcType.id = FuncTypeIdDesc::immediate(funcType);
300 }
301 }
302 }
303
304 for (GlobalDesc& global : env_->globals) {
305 if (global.isConstant()) {
306 continue;
307 }
308
309 uint32_t width =
310 global.isIndirect() ? sizeof(void*) : SizeOf(global.type());
311
312 uint32_t globalDataOffset;
313 if (!allocateGlobalBytes(width, width, &globalDataOffset)) {
314 return false;
315 }
316
317 global.setOffset(globalDataOffset);
318 }
319
320 // Accumulate all exported functions:
321 // - explicitly marked as such;
322 // - implicitly exported by being an element of function tables;
323 // - implicitly exported by being the start function;
324 // The FuncExportVector stored in Metadata needs to be sorted (to allow
325 // O(log(n)) lookup at runtime) and deduplicated. Use a vector with invalid
326 // entries for every single function, that we'll fill as we go through the
327 // exports, and in which we'll remove invalid entries after the fact.
328
329 static_assert(((uint64_t(MaxFuncs) << 1) | 1) < uint64_t(UINT32_MAX),
330 "bit packing won't work in ExportedFunc");
331
332 class ExportedFunc {
333 uint32_t value;
334
335 public:
336 ExportedFunc() : value(UINT32_MAX) {}
337 ExportedFunc(uint32_t index, bool isExplicit)
338 : value((index << 1) | (isExplicit ? 1 : 0)) {}
339 uint32_t index() const { return value >> 1; }
340 bool isExplicit() const { return value & 0x1; }
341 bool operator<(const ExportedFunc& other) const {
342 return index() < other.index();
343 }
344 bool operator==(const ExportedFunc& other) const {
345 return index() == other.index();
346 }
347 bool isInvalid() const { return value == UINT32_MAX; }
348 void mergeExplicit(bool explicitBit) {
349 if (!isExplicit() && explicitBit) {
350 value |= 0x1;
351 }
352 }
353 };
354
355 Vector<ExportedFunc, 8, SystemAllocPolicy> exportedFuncs;
356 if (!exportedFuncs.resize(env_->numFuncs())) {
357 return false;
358 }
359
360 auto addOrMerge = [&exportedFuncs](ExportedFunc newEntry) {
361 uint32_t index = newEntry.index();
362 if (exportedFuncs[index].isInvalid()) {
363 exportedFuncs[index] = newEntry;
364 } else {
365 exportedFuncs[index].mergeExplicit(newEntry.isExplicit());
366 }
367 };
368
369 for (const Export& exp : env_->exports) {
370 if (exp.kind() == DefinitionKind::Function) {
371 addOrMerge(ExportedFunc(exp.funcIndex(), true));
372 }
373 }
374
375 if (env_->startFuncIndex) {
376 addOrMerge(ExportedFunc(*env_->startFuncIndex, true));
377 }
378
379 for (const ElemSegment* seg : env_->elemSegments) {
380 // For now, the segments always carry function indices regardless of the
381 // segment's declared element type; this works because the only legal
382 // element types are funcref and anyref and the only legal values are
383 // functions and null. We always add functions in segments as exported
384 // functions, regardless of the segment's type. In the future, if we make
385 // the representation of AnyRef segments different, we will have to consider
386 // function values in those segments specially.
387 bool isAsmJS =
388 seg->active() && env_->tables[seg->tableIndex].kind == TableKind::AsmJS;
389 if (!isAsmJS) {
390 for (uint32_t funcIndex : seg->elemFuncIndices) {
391 if (funcIndex != NullFuncIndex) {
392 addOrMerge(ExportedFunc(funcIndex, false));
393 }
394 }
395 }
396 }
397
398 for (const GlobalDesc& global : env_->globals) {
399 if (global.isVariable() &&
400 global.initExpr().kind() == InitExpr::Kind::RefFunc) {
401 addOrMerge(ExportedFunc(global.initExpr().refFuncIndex(), false));
402 }
403 }
404
405 auto* newEnd =
406 std::remove_if(exportedFuncs.begin(), exportedFuncs.end(),
407 [](const ExportedFunc& exp) { return exp.isInvalid(); });
408 exportedFuncs.erase(newEnd, exportedFuncs.end());
409
410 if (!metadataTier_->funcExports.reserve(exportedFuncs.length())) {
411 return false;
412 }
413
414 for (const ExportedFunc& funcIndex : exportedFuncs) {
415 FuncType funcType;
416 if (!funcType.clone(*env_->funcTypes[funcIndex.index()])) {
417 return false;
418 }
419 metadataTier_->funcExports.infallibleEmplaceBack(
420 std::move(funcType), funcIndex.index(), funcIndex.isExplicit());
421 }
422
423 // Determine whether parallel or sequential compilation is to be used and
424 // initialize the CompileTasks that will be used in either mode.
425
426 GlobalHelperThreadState& threads = HelperThreadState();
427 MOZ_ASSERT(threads.threadCount > 1);
428
429 uint32_t numTasks;
430 if (CanUseExtraThreads() && threads.cpuCount > 1) {
431 parallel_ = true;
432 numTasks = 2 * threads.maxWasmCompilationThreads();
433 } else {
434 numTasks = 1;
435 }
436
437 if (!tasks_.initCapacity(numTasks)) {
438 return false;
439 }
440 for (size_t i = 0; i < numTasks; i++) {
441 tasks_.infallibleEmplaceBack(*env_, taskState_,
442 COMPILATION_LIFO_DEFAULT_CHUNK_SIZE);
443 }
444
445 if (!freeTasks_.reserve(numTasks)) {
446 return false;
447 }
448 for (size_t i = 0; i < numTasks; i++) {
449 freeTasks_.infallibleAppend(&tasks_[i]);
450 }
451
452 // Fill in function stubs for each import so that imported functions can be
453 // used in all the places that normal function definitions can (table
454 // elements, export calls, etc).
455
456 CompiledCode& importCode = tasks_[0].output;
457 MOZ_ASSERT(importCode.empty());
458
459 if (!GenerateImportFunctions(*env_, metadataTier_->funcImports,
460 &importCode)) {
461 return false;
462 }
463
464 if (!linkCompiledCode(importCode)) {
465 return false;
466 }
467
468 importCode.clear();
469 return true;
470 }
471
funcIsCompiled(uint32_t funcIndex) const472 bool ModuleGenerator::funcIsCompiled(uint32_t funcIndex) const {
473 return metadataTier_->funcToCodeRange[funcIndex] != BAD_CODE_RANGE;
474 }
475
funcCodeRange(uint32_t funcIndex) const476 const CodeRange& ModuleGenerator::funcCodeRange(uint32_t funcIndex) const {
477 MOZ_ASSERT(funcIsCompiled(funcIndex));
478 const CodeRange& cr =
479 metadataTier_->codeRanges[metadataTier_->funcToCodeRange[funcIndex]];
480 MOZ_ASSERT(cr.isFunction());
481 return cr;
482 }
483
InRange(uint32_t caller,uint32_t callee)484 static bool InRange(uint32_t caller, uint32_t callee) {
485 // We assume JumpImmediateRange is defined conservatively enough that the
486 // slight difference between 'caller' (which is really the return address
487 // offset) and the actual base of the relative displacement computation
488 // isn't significant.
489 uint32_t range = std::min(JitOptions.jumpThreshold, JumpImmediateRange);
490 if (caller < callee) {
491 return callee - caller < range;
492 }
493 return caller - callee < range;
494 }
495
496 typedef HashMap<uint32_t, uint32_t, DefaultHasher<uint32_t>, SystemAllocPolicy>
497 OffsetMap;
498 typedef EnumeratedArray<Trap, Trap::Limit, Maybe<uint32_t>>
499 TrapMaybeOffsetArray;
500
linkCallSites()501 bool ModuleGenerator::linkCallSites() {
502 masm_.haltingAlign(CodeAlignment);
503
504 // Create far jumps for calls that have relative offsets that may otherwise
505 // go out of range. This method is called both between function bodies (at a
506 // frequency determined by the ISA's jump range) and once at the very end of
507 // a module's codegen after all possible calls/traps have been emitted.
508
509 OffsetMap existingCallFarJumps;
510 for (; lastPatchedCallSite_ < metadataTier_->callSites.length();
511 lastPatchedCallSite_++) {
512 const CallSite& callSite = metadataTier_->callSites[lastPatchedCallSite_];
513 const CallSiteTarget& target = callSiteTargets_[lastPatchedCallSite_];
514 uint32_t callerOffset = callSite.returnAddressOffset();
515 switch (callSite.kind()) {
516 case CallSiteDesc::Dynamic:
517 case CallSiteDesc::Symbolic:
518 break;
519 case CallSiteDesc::Func: {
520 if (funcIsCompiled(target.funcIndex())) {
521 uint32_t calleeOffset =
522 funcCodeRange(target.funcIndex()).funcNormalEntry();
523 if (InRange(callerOffset, calleeOffset)) {
524 masm_.patchCall(callerOffset, calleeOffset);
525 break;
526 }
527 }
528
529 OffsetMap::AddPtr p =
530 existingCallFarJumps.lookupForAdd(target.funcIndex());
531 if (!p) {
532 Offsets offsets;
533 offsets.begin = masm_.currentOffset();
534 if (!callFarJumps_.emplaceBack(target.funcIndex(),
535 masm_.farJumpWithPatch())) {
536 return false;
537 }
538 offsets.end = masm_.currentOffset();
539 if (masm_.oom()) {
540 return false;
541 }
542 if (!metadataTier_->codeRanges.emplaceBack(CodeRange::FarJumpIsland,
543 offsets)) {
544 return false;
545 }
546 if (!existingCallFarJumps.add(p, target.funcIndex(), offsets.begin)) {
547 return false;
548 }
549 }
550
551 masm_.patchCall(callerOffset, p->value());
552 break;
553 }
554 case CallSiteDesc::Breakpoint:
555 case CallSiteDesc::EnterFrame:
556 case CallSiteDesc::LeaveFrame: {
557 Uint32Vector& jumps = metadataTier_->debugTrapFarJumpOffsets;
558 if (jumps.empty() || !InRange(jumps.back(), callerOffset)) {
559 // See BaseCompiler::insertBreakablePoint for why we must
560 // reload the TLS register on this path.
561 Offsets offsets;
562 offsets.begin = masm_.currentOffset();
563 masm_.loadPtr(Address(FramePointer, offsetof(Frame, tls)),
564 WasmTlsReg);
565 CodeOffset jumpOffset = masm_.farJumpWithPatch();
566 offsets.end = masm_.currentOffset();
567 if (masm_.oom()) {
568 return false;
569 }
570 if (!metadataTier_->codeRanges.emplaceBack(CodeRange::FarJumpIsland,
571 offsets)) {
572 return false;
573 }
574 if (!debugTrapFarJumps_.emplaceBack(jumpOffset)) {
575 return false;
576 }
577 if (!jumps.emplaceBack(offsets.begin)) {
578 return false;
579 }
580 }
581 break;
582 }
583 }
584 }
585
586 masm_.flushBuffer();
587 return !masm_.oom();
588 }
589
noteCodeRange(uint32_t codeRangeIndex,const CodeRange & codeRange)590 void ModuleGenerator::noteCodeRange(uint32_t codeRangeIndex,
591 const CodeRange& codeRange) {
592 switch (codeRange.kind()) {
593 case CodeRange::Function:
594 MOZ_ASSERT(metadataTier_->funcToCodeRange[codeRange.funcIndex()] ==
595 BAD_CODE_RANGE);
596 metadataTier_->funcToCodeRange[codeRange.funcIndex()] = codeRangeIndex;
597 break;
598 case CodeRange::InterpEntry:
599 metadataTier_->lookupFuncExport(codeRange.funcIndex())
600 .initEagerInterpEntryOffset(codeRange.begin());
601 break;
602 case CodeRange::JitEntry:
603 // Nothing to do: jit entries are linked in the jump tables.
604 break;
605 case CodeRange::ImportJitExit:
606 metadataTier_->funcImports[codeRange.funcIndex()].initJitExitOffset(
607 codeRange.begin());
608 break;
609 case CodeRange::ImportInterpExit:
610 metadataTier_->funcImports[codeRange.funcIndex()].initInterpExitOffset(
611 codeRange.begin());
612 break;
613 case CodeRange::DebugTrap:
614 MOZ_ASSERT(!debugTrapCodeOffset_);
615 debugTrapCodeOffset_ = codeRange.begin();
616 break;
617 case CodeRange::TrapExit:
618 MOZ_ASSERT(!linkData_->trapOffset);
619 linkData_->trapOffset = codeRange.begin();
620 break;
621 case CodeRange::Throw:
622 // Jumped to by other stubs, so nothing to do.
623 break;
624 case CodeRange::FarJumpIsland:
625 case CodeRange::BuiltinThunk:
626 MOZ_CRASH("Unexpected CodeRange kind");
627 }
628 }
629
630 template <class Vec, class Op>
AppendForEach(Vec * dstVec,const Vec & srcVec,Op op)631 static bool AppendForEach(Vec* dstVec, const Vec& srcVec, Op op) {
632 if (!dstVec->growByUninitialized(srcVec.length())) {
633 return false;
634 }
635
636 using T = typename Vec::ElementType;
637
638 const T* src = srcVec.begin();
639
640 T* dstBegin = dstVec->begin();
641 T* dstEnd = dstVec->end();
642 T* dstStart = dstEnd - srcVec.length();
643
644 for (T* dst = dstStart; dst != dstEnd; dst++, src++) {
645 new (dst) T(*src);
646 op(dst - dstBegin, dst);
647 }
648
649 return true;
650 }
651
linkCompiledCode(CompiledCode & code)652 bool ModuleGenerator::linkCompiledCode(CompiledCode& code) {
653 // Before merging in new code, if calls in a prior code range might go out of
654 // range, insert far jumps to extend the range.
655
656 if (!InRange(startOfUnpatchedCallsites_,
657 masm_.size() + code.bytes.length())) {
658 startOfUnpatchedCallsites_ = masm_.size();
659 if (!linkCallSites()) {
660 return false;
661 }
662 }
663
664 // All code offsets in 'code' must be incremented by their position in the
665 // overall module when the code was appended.
666
667 masm_.haltingAlign(CodeAlignment);
668 const size_t offsetInModule = masm_.size();
669 if (!masm_.appendRawCode(code.bytes.begin(), code.bytes.length())) {
670 return false;
671 }
672
673 auto codeRangeOp = [=](uint32_t codeRangeIndex, CodeRange* codeRange) {
674 codeRange->offsetBy(offsetInModule);
675 noteCodeRange(codeRangeIndex, *codeRange);
676 };
677 if (!AppendForEach(&metadataTier_->codeRanges, code.codeRanges,
678 codeRangeOp)) {
679 return false;
680 }
681
682 auto callSiteOp = [=](uint32_t, CallSite* cs) {
683 cs->offsetBy(offsetInModule);
684 };
685 if (!AppendForEach(&metadataTier_->callSites, code.callSites, callSiteOp)) {
686 return false;
687 }
688
689 if (!callSiteTargets_.appendAll(code.callSiteTargets)) {
690 return false;
691 }
692
693 for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
694 auto trapSiteOp = [=](uint32_t, TrapSite* ts) {
695 ts->offsetBy(offsetInModule);
696 };
697 if (!AppendForEach(&metadataTier_->trapSites[trap], code.trapSites[trap],
698 trapSiteOp)) {
699 return false;
700 }
701 }
702
703 for (const SymbolicAccess& access : code.symbolicAccesses) {
704 uint32_t patchAt = offsetInModule + access.patchAt.offset();
705 if (!linkData_->symbolicLinks[access.target].append(patchAt)) {
706 return false;
707 }
708 }
709
710 for (const CodeLabel& codeLabel : code.codeLabels) {
711 LinkData::InternalLink link;
712 link.patchAtOffset = offsetInModule + codeLabel.patchAt().offset();
713 link.targetOffset = offsetInModule + codeLabel.target().offset();
714 #ifdef JS_CODELABEL_LINKMODE
715 link.mode = codeLabel.linkMode();
716 #endif
717 if (!linkData_->internalLinks.append(link)) {
718 return false;
719 }
720 }
721
722 for (size_t i = 0; i < code.stackMaps.length(); i++) {
723 StackMaps::Maplet maplet = code.stackMaps.move(i);
724 maplet.offsetBy(offsetInModule);
725 if (!metadataTier_->stackMaps.add(maplet)) {
726 // This function is now the only owner of maplet.map, so we'd better
727 // free it right now.
728 maplet.map->destroy();
729 return false;
730 }
731 }
732
733 return true;
734 }
735
ExecuteCompileTask(CompileTask * task,UniqueChars * error)736 static bool ExecuteCompileTask(CompileTask* task, UniqueChars* error) {
737 MOZ_ASSERT(task->lifo.isEmpty());
738 MOZ_ASSERT(task->output.empty());
739
740 switch (task->env.tier()) {
741 case Tier::Optimized:
742 switch (task->env.optimizedBackend()) {
743 case OptimizedBackend::Cranelift:
744 if (!CraneliftCompileFunctions(task->env, task->lifo, task->inputs,
745 &task->output, error)) {
746 return false;
747 }
748 break;
749 case OptimizedBackend::Ion:
750 if (!IonCompileFunctions(task->env, task->lifo, task->inputs,
751 &task->output, error)) {
752 return false;
753 }
754 break;
755 }
756 break;
757 case Tier::Baseline:
758 if (!BaselineCompileFunctions(task->env, task->lifo, task->inputs,
759 &task->output, error)) {
760 return false;
761 }
762 break;
763 }
764
765 MOZ_ASSERT(task->lifo.isEmpty());
766 MOZ_ASSERT(task->inputs.length() == task->output.codeRanges.length());
767 task->inputs.clear();
768 return true;
769 }
770
ExecuteCompileTaskFromHelperThread(CompileTask * task)771 void wasm::ExecuteCompileTaskFromHelperThread(CompileTask* task) {
772 TraceLoggerThread* logger = TraceLoggerForCurrentThread();
773 AutoTraceLog logCompile(logger, TraceLogger_WasmCompilation);
774
775 UniqueChars error;
776 bool ok = ExecuteCompileTask(task, &error);
777
778 auto taskState = task->state.lock();
779
780 if (!ok || !taskState->finished.append(task)) {
781 taskState->numFailed++;
782 if (!taskState->errorMessage) {
783 taskState->errorMessage = std::move(error);
784 }
785 }
786
787 taskState.notify_one(/* failed or finished */);
788 }
789
locallyCompileCurrentTask()790 bool ModuleGenerator::locallyCompileCurrentTask() {
791 if (!ExecuteCompileTask(currentTask_, error_)) {
792 return false;
793 }
794 if (!finishTask(currentTask_)) {
795 return false;
796 }
797 currentTask_ = nullptr;
798 batchedBytecode_ = 0;
799 return true;
800 }
801
finishTask(CompileTask * task)802 bool ModuleGenerator::finishTask(CompileTask* task) {
803 masm_.haltingAlign(CodeAlignment);
804
805 if (!linkCompiledCode(task->output)) {
806 return false;
807 }
808
809 task->output.clear();
810
811 MOZ_ASSERT(task->inputs.empty());
812 MOZ_ASSERT(task->output.empty());
813 MOZ_ASSERT(task->lifo.isEmpty());
814 freeTasks_.infallibleAppend(task);
815 return true;
816 }
817
launchBatchCompile()818 bool ModuleGenerator::launchBatchCompile() {
819 MOZ_ASSERT(currentTask_);
820
821 if (cancelled_ && *cancelled_) {
822 return false;
823 }
824
825 if (!parallel_) {
826 return locallyCompileCurrentTask();
827 }
828
829 if (!StartOffThreadWasmCompile(currentTask_, mode())) {
830 return false;
831 }
832 outstanding_++;
833 currentTask_ = nullptr;
834 batchedBytecode_ = 0;
835 return true;
836 }
837
finishOutstandingTask()838 bool ModuleGenerator::finishOutstandingTask() {
839 MOZ_ASSERT(parallel_);
840
841 CompileTask* task = nullptr;
842 {
843 auto taskState = taskState_.lock();
844 while (true) {
845 MOZ_ASSERT(outstanding_ > 0);
846
847 if (taskState->numFailed > 0) {
848 return false;
849 }
850
851 if (!taskState->finished.empty()) {
852 outstanding_--;
853 task = taskState->finished.popCopy();
854 break;
855 }
856
857 taskState.wait(/* failed or finished */);
858 }
859 }
860
861 // Call outside of the compilation lock.
862 return finishTask(task);
863 }
864
compileFuncDef(uint32_t funcIndex,uint32_t lineOrBytecode,const uint8_t * begin,const uint8_t * end,Uint32Vector && lineNums)865 bool ModuleGenerator::compileFuncDef(uint32_t funcIndex,
866 uint32_t lineOrBytecode,
867 const uint8_t* begin, const uint8_t* end,
868 Uint32Vector&& lineNums) {
869 MOZ_ASSERT(!finishedFuncDefs_);
870 MOZ_ASSERT(funcIndex < env_->numFuncs());
871
872 uint32_t threshold;
873 switch (tier()) {
874 case Tier::Baseline:
875 threshold = JitOptions.wasmBatchBaselineThreshold;
876 break;
877 case Tier::Optimized:
878 switch (env_->optimizedBackend()) {
879 case OptimizedBackend::Ion:
880 threshold = JitOptions.wasmBatchIonThreshold;
881 break;
882 case OptimizedBackend::Cranelift:
883 threshold = JitOptions.wasmBatchCraneliftThreshold;
884 break;
885 default:
886 MOZ_CRASH("Invalid optimizedBackend value");
887 }
888 break;
889 default:
890 MOZ_CRASH("Invalid tier value");
891 break;
892 }
893
894 uint32_t funcBytecodeLength = end - begin;
895
896 // Do not go over the threshold if we can avoid it: spin off the compilation
897 // before appending the function if we would go over. (Very large single
898 // functions may still exceed the threshold but this is fine; it'll be very
899 // uncommon and is in any case safely handled by the MacroAssembler's buffer
900 // limit logic.)
901
902 if (currentTask_ && currentTask_->inputs.length() &&
903 batchedBytecode_ + funcBytecodeLength > threshold) {
904 if (!launchBatchCompile()) {
905 return false;
906 }
907 }
908
909 if (!currentTask_) {
910 if (freeTasks_.empty() && !finishOutstandingTask()) {
911 return false;
912 }
913 currentTask_ = freeTasks_.popCopy();
914 }
915
916 if (!currentTask_->inputs.emplaceBack(funcIndex, lineOrBytecode, begin, end,
917 std::move(lineNums))) {
918 return false;
919 }
920
921 batchedBytecode_ += funcBytecodeLength;
922 MOZ_ASSERT(batchedBytecode_ <= MaxCodeSectionBytes);
923 return true;
924 }
925
finishFuncDefs()926 bool ModuleGenerator::finishFuncDefs() {
927 MOZ_ASSERT(!finishedFuncDefs_);
928
929 if (currentTask_ && !locallyCompileCurrentTask()) {
930 return false;
931 }
932
933 finishedFuncDefs_ = true;
934 return true;
935 }
936
finishCodegen()937 bool ModuleGenerator::finishCodegen() {
938 // Now that all functions and stubs are generated and their CodeRanges
939 // known, patch all calls (which can emit far jumps) and far jumps. Linking
940 // can emit tiny far-jump stubs, so there is an ordering dependency here.
941
942 if (!linkCallSites()) {
943 return false;
944 }
945
946 for (CallFarJump far : callFarJumps_) {
947 masm_.patchFarJump(far.jump,
948 funcCodeRange(far.funcIndex).funcNormalEntry());
949 }
950
951 for (CodeOffset farJump : debugTrapFarJumps_) {
952 masm_.patchFarJump(farJump, debugTrapCodeOffset_);
953 }
954
955 // None of the linking or far-jump operations should emit masm metadata.
956
957 MOZ_ASSERT(masm_.callSites().empty());
958 MOZ_ASSERT(masm_.callSiteTargets().empty());
959 MOZ_ASSERT(masm_.trapSites().empty());
960 MOZ_ASSERT(masm_.symbolicAccesses().empty());
961 MOZ_ASSERT(masm_.codeLabels().empty());
962
963 masm_.finish();
964 return !masm_.oom();
965 }
966
finishMetadataTier()967 bool ModuleGenerator::finishMetadataTier() {
968 // The stack maps aren't yet sorted. Do so now, since we'll need to
969 // binary-search them at GC time.
970 metadataTier_->stackMaps.sort();
971
972 #ifdef DEBUG
973 // Check that the stack map contains no duplicates, since that could lead to
974 // ambiguities about stack slot pointerness.
975 uint8_t* previousNextInsnAddr = nullptr;
976 for (size_t i = 0; i < metadataTier_->stackMaps.length(); i++) {
977 const StackMaps::Maplet& maplet = metadataTier_->stackMaps.get(i);
978 MOZ_ASSERT_IF(i > 0, uintptr_t(maplet.nextInsnAddr) >
979 uintptr_t(previousNextInsnAddr));
980 previousNextInsnAddr = maplet.nextInsnAddr;
981 }
982
983 // Assert all sorted metadata is sorted.
984 uint32_t last = 0;
985 for (const CodeRange& codeRange : metadataTier_->codeRanges) {
986 MOZ_ASSERT(codeRange.begin() >= last);
987 last = codeRange.end();
988 }
989
990 last = 0;
991 for (const CallSite& callSite : metadataTier_->callSites) {
992 MOZ_ASSERT(callSite.returnAddressOffset() >= last);
993 last = callSite.returnAddressOffset();
994 }
995
996 for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
997 last = 0;
998 for (const TrapSite& trapSite : metadataTier_->trapSites[trap]) {
999 MOZ_ASSERT(trapSite.pcOffset >= last);
1000 last = trapSite.pcOffset;
1001 }
1002 }
1003
1004 last = 0;
1005 for (uint32_t debugTrapFarJumpOffset :
1006 metadataTier_->debugTrapFarJumpOffsets) {
1007 MOZ_ASSERT(debugTrapFarJumpOffset >= last);
1008 last = debugTrapFarJumpOffset;
1009 }
1010 #endif
1011
1012 // These Vectors can get large and the excess capacity can be significant,
1013 // so realloc them down to size.
1014
1015 metadataTier_->funcToCodeRange.shrinkStorageToFit();
1016 metadataTier_->codeRanges.shrinkStorageToFit();
1017 metadataTier_->callSites.shrinkStorageToFit();
1018 metadataTier_->trapSites.shrinkStorageToFit();
1019 metadataTier_->debugTrapFarJumpOffsets.shrinkStorageToFit();
1020 for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
1021 metadataTier_->trapSites[trap].shrinkStorageToFit();
1022 }
1023
1024 return true;
1025 }
1026
finishCodeTier()1027 UniqueCodeTier ModuleGenerator::finishCodeTier() {
1028 MOZ_ASSERT(finishedFuncDefs_);
1029
1030 while (outstanding_ > 0) {
1031 if (!finishOutstandingTask()) {
1032 return nullptr;
1033 }
1034 }
1035
1036 #ifdef DEBUG
1037 for (uint32_t codeRangeIndex : metadataTier_->funcToCodeRange) {
1038 MOZ_ASSERT(codeRangeIndex != BAD_CODE_RANGE);
1039 }
1040 #endif
1041
1042 // Now that all imports/exports are known, we can generate a special
1043 // CompiledCode containing stubs.
1044
1045 CompiledCode& stubCode = tasks_[0].output;
1046 MOZ_ASSERT(stubCode.empty());
1047
1048 if (!GenerateStubs(*env_, metadataTier_->funcImports,
1049 metadataTier_->funcExports, &stubCode)) {
1050 return nullptr;
1051 }
1052
1053 if (!linkCompiledCode(stubCode)) {
1054 return nullptr;
1055 }
1056
1057 // Finish linking and metadata.
1058
1059 if (!finishCodegen()) {
1060 return nullptr;
1061 }
1062
1063 if (!finishMetadataTier()) {
1064 return nullptr;
1065 }
1066
1067 UniqueModuleSegment segment =
1068 ModuleSegment::create(tier(), masm_, *linkData_);
1069 if (!segment) {
1070 return nullptr;
1071 }
1072
1073 metadataTier_->stackMaps.offsetBy(uintptr_t(segment->base()));
1074
1075 #ifdef DEBUG
1076 // Check that each stack map is associated with a plausible instruction.
1077 for (size_t i = 0; i < metadataTier_->stackMaps.length(); i++) {
1078 MOZ_ASSERT(IsValidStackMapKey(env_->debugEnabled(),
1079 metadataTier_->stackMaps.get(i).nextInsnAddr),
1080 "wasm stack map does not reference a valid insn");
1081 }
1082 #endif
1083
1084 return js::MakeUnique<CodeTier>(std::move(metadataTier_), std::move(segment));
1085 }
1086
finishMetadata(const Bytes & bytecode)1087 SharedMetadata ModuleGenerator::finishMetadata(const Bytes& bytecode) {
1088 // Finish initialization of Metadata, which is only needed for constructing
1089 // the initial Module, not for tier-2 compilation.
1090 MOZ_ASSERT(mode() != CompileMode::Tier2);
1091
1092 // Copy over data from the ModuleEnvironment.
1093
1094 metadata_->memoryUsage = env_->memoryUsage;
1095 metadata_->minMemoryLength = env_->minMemoryLength;
1096 metadata_->maxMemoryLength = env_->maxMemoryLength;
1097 metadata_->startFuncIndex = env_->startFuncIndex;
1098 metadata_->tables = std::move(env_->tables);
1099 metadata_->globals = std::move(env_->globals);
1100 metadata_->nameCustomSectionIndex = env_->nameCustomSectionIndex;
1101 metadata_->moduleName = env_->moduleName;
1102 metadata_->funcNames = std::move(env_->funcNames);
1103 metadata_->omitsBoundsChecks = env_->hugeMemoryEnabled();
1104 metadata_->v128Enabled = env_->v128Enabled();
1105
1106 // Copy over additional debug information.
1107
1108 if (env_->debugEnabled()) {
1109 metadata_->debugEnabled = true;
1110
1111 const size_t numFuncTypes = env_->funcTypes.length();
1112 if (!metadata_->debugFuncArgTypes.resize(numFuncTypes)) {
1113 return nullptr;
1114 }
1115 if (!metadata_->debugFuncReturnTypes.resize(numFuncTypes)) {
1116 return nullptr;
1117 }
1118 for (size_t i = 0; i < numFuncTypes; i++) {
1119 if (!metadata_->debugFuncArgTypes[i].appendAll(
1120 env_->funcTypes[i]->args())) {
1121 return nullptr;
1122 }
1123 if (!metadata_->debugFuncReturnTypes[i].appendAll(
1124 env_->funcTypes[i]->results())) {
1125 return nullptr;
1126 }
1127 }
1128
1129 static_assert(sizeof(ModuleHash) <= sizeof(mozilla::SHA1Sum::Hash),
1130 "The ModuleHash size shall not exceed the SHA1 hash size.");
1131 mozilla::SHA1Sum::Hash hash;
1132 mozilla::SHA1Sum sha1Sum;
1133 sha1Sum.update(bytecode.begin(), bytecode.length());
1134 sha1Sum.finish(hash);
1135 memcpy(metadata_->debugHash, hash, sizeof(ModuleHash));
1136 }
1137
1138 MOZ_ASSERT_IF(env_->nameCustomSectionIndex, !!metadata_->namePayload);
1139
1140 // Metadata shouldn't be mutably modified after finishMetadata().
1141 SharedMetadata metadata = metadata_;
1142 metadata_ = nullptr;
1143 return metadata;
1144 }
1145
finishModule(const ShareableBytes & bytecode,JS::OptimizedEncodingListener * maybeTier2Listener)1146 SharedModule ModuleGenerator::finishModule(
1147 const ShareableBytes& bytecode,
1148 JS::OptimizedEncodingListener* maybeTier2Listener) {
1149 MOZ_ASSERT(mode() == CompileMode::Once || mode() == CompileMode::Tier1);
1150
1151 UniqueCodeTier codeTier = finishCodeTier();
1152 if (!codeTier) {
1153 return nullptr;
1154 }
1155
1156 JumpTables jumpTables;
1157 if (!jumpTables.init(mode(), codeTier->segment(),
1158 codeTier->metadata().codeRanges)) {
1159 return nullptr;
1160 }
1161
1162 // Copy over data from the Bytecode, which is going away at the end of
1163 // compilation.
1164
1165 DataSegmentVector dataSegments;
1166 if (!dataSegments.reserve(env_->dataSegments.length())) {
1167 return nullptr;
1168 }
1169 for (const DataSegmentEnv& srcSeg : env_->dataSegments) {
1170 MutableDataSegment dstSeg = js_new<DataSegment>(srcSeg);
1171 if (!dstSeg) {
1172 return nullptr;
1173 }
1174 if (!dstSeg->bytes.append(bytecode.begin() + srcSeg.bytecodeOffset,
1175 srcSeg.length)) {
1176 return nullptr;
1177 }
1178 dataSegments.infallibleAppend(std::move(dstSeg));
1179 }
1180
1181 CustomSectionVector customSections;
1182 if (!customSections.reserve(env_->customSections.length())) {
1183 return nullptr;
1184 }
1185 for (const CustomSectionEnv& srcSec : env_->customSections) {
1186 CustomSection sec;
1187 if (!sec.name.append(bytecode.begin() + srcSec.nameOffset,
1188 srcSec.nameLength)) {
1189 return nullptr;
1190 }
1191 MutableBytes payload = js_new<ShareableBytes>();
1192 if (!payload) {
1193 return nullptr;
1194 }
1195 if (!payload->append(bytecode.begin() + srcSec.payloadOffset,
1196 srcSec.payloadLength)) {
1197 return nullptr;
1198 }
1199 sec.payload = std::move(payload);
1200 customSections.infallibleAppend(std::move(sec));
1201 }
1202
1203 if (env_->nameCustomSectionIndex) {
1204 metadata_->namePayload =
1205 customSections[*env_->nameCustomSectionIndex].payload;
1206 }
1207
1208 SharedMetadata metadata = finishMetadata(bytecode.bytes);
1209 if (!metadata) {
1210 return nullptr;
1211 }
1212
1213 StructTypeVector structTypes;
1214 for (TypeDef& td : env_->types) {
1215 if (td.isStructType() && !structTypes.append(std::move(td.structType()))) {
1216 return nullptr;
1217 }
1218 }
1219
1220 MutableCode code =
1221 js_new<Code>(std::move(codeTier), *metadata, std::move(jumpTables),
1222 std::move(structTypes));
1223 if (!code || !code->initialize(*linkData_)) {
1224 return nullptr;
1225 }
1226
1227 // See Module debugCodeClaimed_ comments for why we need to make a separate
1228 // debug copy.
1229
1230 UniqueBytes debugUnlinkedCode;
1231 UniqueLinkData debugLinkData;
1232 const ShareableBytes* debugBytecode = nullptr;
1233 if (env_->debugEnabled()) {
1234 MOZ_ASSERT(mode() == CompileMode::Once);
1235 MOZ_ASSERT(tier() == Tier::Debug);
1236
1237 debugUnlinkedCode = js::MakeUnique<Bytes>();
1238 if (!debugUnlinkedCode || !debugUnlinkedCode->resize(masm_.bytesNeeded())) {
1239 return nullptr;
1240 }
1241
1242 masm_.executableCopy(debugUnlinkedCode->begin());
1243
1244 debugLinkData = std::move(linkData_);
1245 debugBytecode = &bytecode;
1246 }
1247
1248 // All the components are finished, so create the complete Module and start
1249 // tier-2 compilation if requested.
1250
1251 MutableModule module =
1252 js_new<Module>(*code, std::move(env_->imports), std::move(env_->exports),
1253 std::move(dataSegments), std::move(env_->elemSegments),
1254 std::move(customSections), std::move(debugUnlinkedCode),
1255 std::move(debugLinkData), debugBytecode);
1256 if (!module) {
1257 return nullptr;
1258 }
1259
1260 if (mode() == CompileMode::Tier1) {
1261 module->startTier2(*compileArgs_, bytecode, maybeTier2Listener);
1262 } else if (tier() == Tier::Serialized && maybeTier2Listener) {
1263 module->serialize(*linkData_, *maybeTier2Listener);
1264 }
1265
1266 return module;
1267 }
1268
finishTier2(const Module & module)1269 bool ModuleGenerator::finishTier2(const Module& module) {
1270 MOZ_ASSERT(mode() == CompileMode::Tier2);
1271 MOZ_ASSERT(tier() == Tier::Optimized);
1272 MOZ_ASSERT(!env_->debugEnabled());
1273
1274 if (cancelled_ && *cancelled_) {
1275 return false;
1276 }
1277
1278 UniqueCodeTier codeTier = finishCodeTier();
1279 if (!codeTier) {
1280 return false;
1281 }
1282
1283 if (MOZ_UNLIKELY(JitOptions.wasmDelayTier2)) {
1284 // Introduce an artificial delay when testing wasmDelayTier2, since we
1285 // want to exercise both tier1 and tier2 code in this case.
1286 std::this_thread::sleep_for(std::chrono::milliseconds(500));
1287 }
1288
1289 return module.finishTier2(*linkData_, std::move(codeTier));
1290 }
1291
runTask()1292 void CompileTask::runTask() { ExecuteCompileTaskFromHelperThread(this); }
1293
sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const1294 size_t CompiledCode::sizeOfExcludingThis(
1295 mozilla::MallocSizeOf mallocSizeOf) const {
1296 size_t trapSitesSize = 0;
1297 for (const TrapSiteVector& vec : trapSites) {
1298 trapSitesSize += vec.sizeOfExcludingThis(mallocSizeOf);
1299 }
1300
1301 return bytes.sizeOfExcludingThis(mallocSizeOf) +
1302 codeRanges.sizeOfExcludingThis(mallocSizeOf) +
1303 callSites.sizeOfExcludingThis(mallocSizeOf) +
1304 callSiteTargets.sizeOfExcludingThis(mallocSizeOf) + trapSitesSize +
1305 symbolicAccesses.sizeOfExcludingThis(mallocSizeOf) +
1306 codeLabels.sizeOfExcludingThis(mallocSizeOf);
1307 }
1308
sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const1309 size_t CompileTask::sizeOfExcludingThis(
1310 mozilla::MallocSizeOf mallocSizeOf) const {
1311 return lifo.sizeOfExcludingThis(mallocSizeOf) +
1312 inputs.sizeOfExcludingThis(mallocSizeOf) +
1313 output.sizeOfExcludingThis(mallocSizeOf);
1314 }
1315