1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 /*
8  * [SMDOC] Garbage Collector
9  *
10  * This code implements an incremental mark-and-sweep garbage collector, with
11  * most sweeping carried out in the background on a parallel thread.
12  *
13  * Full vs. zone GC
14  * ----------------
15  *
16  * The collector can collect all zones at once, or a subset. These types of
17  * collection are referred to as a full GC and a zone GC respectively.
18  *
19  * It is possible for an incremental collection that started out as a full GC to
20  * become a zone GC if new zones are created during the course of the
21  * collection.
22  *
23  * Incremental collection
24  * ----------------------
25  *
26  * For a collection to be carried out incrementally the following conditions
27  * must be met:
28  *  - the collection must be run by calling js::GCSlice() rather than js::GC()
29  *  - the GC parameter JSGC_INCREMENTAL_GC_ENABLED must be true.
30  *
31  * The last condition is an engine-internal mechanism to ensure that incremental
32  * collection is not carried out without the correct barriers being implemented.
33  * For more information see 'Incremental marking' below.
34  *
35  * If the collection is not incremental, all foreground activity happens inside
36  * a single call to GC() or GCSlice(). However the collection is not complete
37  * until the background sweeping activity has finished.
38  *
39  * An incremental collection proceeds as a series of slices, interleaved with
40  * mutator activity, i.e. running JavaScript code. Slices are limited by a time
41  * budget. The slice finishes as soon as possible after the requested time has
42  * passed.
43  *
44  * Collector states
45  * ----------------
46  *
47  * The collector proceeds through the following states, the current state being
48  * held in JSRuntime::gcIncrementalState:
49  *
50  *  - Prepare    - unmarks GC things, discards JIT code and other setup
51  *  - MarkRoots  - marks the stack and other roots
52  *  - Mark       - incrementally marks reachable things
53  *  - Sweep      - sweeps zones in groups and continues marking unswept zones
54  *  - Finalize   - performs background finalization, concurrent with mutator
55  *  - Compact    - incrementally compacts by zone
56  *  - Decommit   - performs background decommit and chunk removal
57  *
58  * Roots are marked in the first MarkRoots slice; this is the start of the GC
59  * proper. The following states can take place over one or more slices.
60  *
61  * In other words an incremental collection proceeds like this:
62  *
63  * Slice 1:   Prepare:    Starts background task to unmark GC things
64  *
65  *          ... JS code runs, background unmarking finishes ...
66  *
67  * Slice 2:   MarkRoots:  Roots are pushed onto the mark stack.
68  *            Mark:       The mark stack is processed by popping an element,
69  *                        marking it, and pushing its children.
70  *
71  *          ... JS code runs ...
72  *
73  * Slice 3:   Mark:       More mark stack processing.
74  *
75  *          ... JS code runs ...
76  *
77  * Slice n-1: Mark:       More mark stack processing.
78  *
79  *          ... JS code runs ...
80  *
81  * Slice n:   Mark:       Mark stack is completely drained.
82  *            Sweep:      Select first group of zones to sweep and sweep them.
83  *
84  *          ... JS code runs ...
85  *
86  * Slice n+1: Sweep:      Mark objects in unswept zones that were newly
87  *                        identified as alive (see below). Then sweep more zone
88  *                        sweep groups.
89  *
90  *          ... JS code runs ...
91  *
92  * Slice n+2: Sweep:      Mark objects in unswept zones that were newly
93  *                        identified as alive. Then sweep more zones.
94  *
95  *          ... JS code runs ...
96  *
97  * Slice m:   Sweep:      Sweeping is finished, and background sweeping
98  *                        started on the helper thread.
99  *
100  *          ... JS code runs, remaining sweeping done on background thread ...
101  *
102  * When background sweeping finishes the GC is complete.
103  *
104  * Incremental marking
105  * -------------------
106  *
107  * Incremental collection requires close collaboration with the mutator (i.e.,
108  * JS code) to guarantee correctness.
109  *
110  *  - During an incremental GC, if a memory location (except a root) is written
111  *    to, then the value it previously held must be marked. Write barriers
112  *    ensure this.
113  *
114  *  - Any object that is allocated during incremental GC must start out marked.
115  *
116  *  - Roots are marked in the first slice and hence don't need write barriers.
117  *    Roots are things like the C stack and the VM stack.
118  *
119  * The problem that write barriers solve is that between slices the mutator can
120  * change the object graph. We must ensure that it cannot do this in such a way
121  * that makes us fail to mark a reachable object (marking an unreachable object
122  * is tolerable).
123  *
124  * We use a snapshot-at-the-beginning algorithm to do this. This means that we
125  * promise to mark at least everything that is reachable at the beginning of
126  * collection. To implement it we mark the old contents of every non-root memory
127  * location written to by the mutator while the collection is in progress, using
128  * write barriers. This is described in gc/Barrier.h.
129  *
130  * Incremental sweeping
131  * --------------------
132  *
133  * Sweeping is difficult to do incrementally because object finalizers must be
134  * run at the start of sweeping, before any mutator code runs. The reason is
135  * that some objects use their finalizers to remove themselves from caches. If
136  * mutator code was allowed to run after the start of sweeping, it could observe
137  * the state of the cache and create a new reference to an object that was just
138  * about to be destroyed.
139  *
140  * Sweeping all finalizable objects in one go would introduce long pauses, so
141  * instead sweeping broken up into groups of zones. Zones which are not yet
142  * being swept are still marked, so the issue above does not apply.
143  *
144  * The order of sweeping is restricted by cross compartment pointers - for
145  * example say that object |a| from zone A points to object |b| in zone B and
146  * neither object was marked when we transitioned to the Sweep phase. Imagine we
147  * sweep B first and then return to the mutator. It's possible that the mutator
148  * could cause |a| to become alive through a read barrier (perhaps it was a
149  * shape that was accessed via a shape table). Then we would need to mark |b|,
150  * which |a| points to, but |b| has already been swept.
151  *
152  * So if there is such a pointer then marking of zone B must not finish before
153  * marking of zone A.  Pointers which form a cycle between zones therefore
154  * restrict those zones to being swept at the same time, and these are found
155  * using Tarjan's algorithm for finding the strongly connected components of a
156  * graph.
157  *
158  * GC things without finalizers, and things with finalizers that are able to run
159  * in the background, are swept on the background thread. This accounts for most
160  * of the sweeping work.
161  *
162  * Reset
163  * -----
164  *
165  * During incremental collection it is possible, although unlikely, for
166  * conditions to change such that incremental collection is no longer safe. In
167  * this case, the collection is 'reset' by resetIncrementalGC(). If we are in
168  * the mark state, this just stops marking, but if we have started sweeping
169  * already, we continue non-incrementally until we have swept the current sweep
170  * group. Following a reset, a new collection is started.
171  *
172  * Compacting GC
173  * -------------
174  *
175  * Compacting GC happens at the end of a major GC as part of the last slice.
176  * There are three parts:
177  *
178  *  - Arenas are selected for compaction.
179  *  - The contents of those arenas are moved to new arenas.
180  *  - All references to moved things are updated.
181  *
182  * Collecting Atoms
183  * ----------------
184  *
185  * Atoms are collected differently from other GC things. They are contained in
186  * a special zone and things in other zones may have pointers to them that are
187  * not recorded in the cross compartment pointer map. Each zone holds a bitmap
188  * with the atoms it might be keeping alive, and atoms are only collected if
189  * they are not included in any zone's atom bitmap. See AtomMarking.cpp for how
190  * this bitmap is managed.
191  */
192 
193 #include "gc/GC-inl.h"
194 
195 #include "mozilla/DebugOnly.h"
196 #include "mozilla/MacroForEach.h"
197 #include "mozilla/MemoryReporting.h"
198 #include "mozilla/Range.h"
199 #include "mozilla/ScopeExit.h"
200 #include "mozilla/TextUtils.h"
201 #include "mozilla/TimeStamp.h"
202 
203 #include <algorithm>
204 #include <initializer_list>
205 #include <iterator>
206 #include <stdlib.h>
207 #include <string.h>
208 #include <utility>
209 #if !defined(XP_WIN) && !defined(__wasi__)
210 #  include <sys/mman.h>
211 #  include <unistd.h>
212 #endif
213 
214 #include "jsapi.h"  // JS_AbortIfWrongThread
215 #include "jstypes.h"
216 
217 #include "debugger/DebugAPI.h"
218 #include "gc/ClearEdgesTracer.h"
219 #include "gc/FindSCCs.h"
220 #include "gc/FreeOp.h"
221 #include "gc/GCInternals.h"
222 #include "gc/GCLock.h"
223 #include "gc/GCProbes.h"
224 #include "gc/Memory.h"
225 #include "gc/ParallelWork.h"
226 #include "gc/Policy.h"
227 #include "gc/WeakMap.h"
228 #include "jit/ExecutableAllocator.h"
229 #include "jit/JitCode.h"
230 #include "jit/JitRealm.h"
231 #include "jit/ProcessExecutableMemory.h"
232 #include "js/HeapAPI.h"             // JS::GCCellPtr
233 #include "js/Object.h"              // JS::GetClass
234 #include "js/PropertyAndElement.h"  // JS_DefineProperty
235 #include "js/SliceBudget.h"
236 #include "util/DifferentialTesting.h"
237 #include "util/Poison.h"
238 #include "util/WindowsWrapper.h"
239 #include "vm/BigIntType.h"
240 #include "vm/EnvironmentObject.h"
241 #include "vm/GetterSetter.h"
242 #include "vm/HelperThreadState.h"
243 #include "vm/JitActivation.h"
244 #include "vm/JSAtom.h"
245 #include "vm/JSObject.h"
246 #include "vm/JSScript.h"
247 #include "vm/Printer.h"
248 #include "vm/PropMap.h"
249 #include "vm/ProxyObject.h"
250 #include "vm/Realm.h"
251 #include "vm/Shape.h"
252 #include "vm/StringType.h"
253 #include "vm/SymbolType.h"
254 #include "vm/Time.h"
255 #include "vm/TraceLogging.h"
256 #include "vm/WrapperObject.h"
257 
258 #include "gc/Heap-inl.h"
259 #include "gc/Marking-inl.h"
260 #include "gc/Nursery-inl.h"
261 #include "gc/ObjectKind-inl.h"
262 #include "gc/PrivateIterators-inl.h"
263 #include "gc/Zone-inl.h"
264 #include "vm/GeckoProfiler-inl.h"
265 #include "vm/JSContext-inl.h"
266 #include "vm/Realm-inl.h"
267 #include "vm/Stack-inl.h"
268 
269 using namespace js;
270 using namespace js::gc;
271 
272 using mozilla::MakeScopeExit;
273 using mozilla::Maybe;
274 using mozilla::Nothing;
275 using mozilla::Some;
276 using mozilla::TimeDuration;
277 using mozilla::TimeStamp;
278 
279 using JS::AutoGCRooter;
280 
281 const AllocKind gc::slotsToThingKind[] = {
282     // clang-format off
283     /*  0 */ AllocKind::OBJECT0,  AllocKind::OBJECT2,  AllocKind::OBJECT2,  AllocKind::OBJECT4,
284     /*  4 */ AllocKind::OBJECT4,  AllocKind::OBJECT8,  AllocKind::OBJECT8,  AllocKind::OBJECT8,
285     /*  8 */ AllocKind::OBJECT8,  AllocKind::OBJECT12, AllocKind::OBJECT12, AllocKind::OBJECT12,
286     /* 12 */ AllocKind::OBJECT12, AllocKind::OBJECT16, AllocKind::OBJECT16, AllocKind::OBJECT16,
287     /* 16 */ AllocKind::OBJECT16
288     // clang-format on
289 };
290 
291 static_assert(std::size(slotsToThingKind) == SLOTS_TO_THING_KIND_LIMIT,
292               "We have defined a slot count for each kind.");
293 
294 #ifdef DEBUG
verifyAllChunks()295 void GCRuntime::verifyAllChunks() {
296   AutoLockGC lock(this);
297   fullChunks(lock).verifyChunks();
298   availableChunks(lock).verifyChunks();
299   emptyChunks(lock).verifyChunks();
300 }
301 #endif
302 
tooManyEmptyChunks(const AutoLockGC & lock)303 inline bool GCRuntime::tooManyEmptyChunks(const AutoLockGC& lock) {
304   return emptyChunks(lock).count() > tunables.minEmptyChunkCount(lock);
305 }
306 
expireEmptyChunkPool(const AutoLockGC & lock)307 ChunkPool GCRuntime::expireEmptyChunkPool(const AutoLockGC& lock) {
308   MOZ_ASSERT(emptyChunks(lock).verify());
309   MOZ_ASSERT(tunables.minEmptyChunkCount(lock) <=
310              tunables.maxEmptyChunkCount());
311 
312   ChunkPool expired;
313   while (tooManyEmptyChunks(lock)) {
314     TenuredChunk* chunk = emptyChunks(lock).pop();
315     prepareToFreeChunk(chunk->info);
316     expired.push(chunk);
317   }
318 
319   MOZ_ASSERT(expired.verify());
320   MOZ_ASSERT(emptyChunks(lock).verify());
321   MOZ_ASSERT(emptyChunks(lock).count() <= tunables.maxEmptyChunkCount());
322   MOZ_ASSERT(emptyChunks(lock).count() <= tunables.minEmptyChunkCount(lock));
323   return expired;
324 }
325 
FreeChunkPool(ChunkPool & pool)326 static void FreeChunkPool(ChunkPool& pool) {
327   for (ChunkPool::Iter iter(pool); !iter.done();) {
328     TenuredChunk* chunk = iter.get();
329     iter.next();
330     pool.remove(chunk);
331     MOZ_ASSERT(chunk->unused());
332     UnmapPages(static_cast<void*>(chunk), ChunkSize);
333   }
334   MOZ_ASSERT(pool.count() == 0);
335 }
336 
freeEmptyChunks(const AutoLockGC & lock)337 void GCRuntime::freeEmptyChunks(const AutoLockGC& lock) {
338   FreeChunkPool(emptyChunks(lock));
339 }
340 
prepareToFreeChunk(TenuredChunkInfo & info)341 inline void GCRuntime::prepareToFreeChunk(TenuredChunkInfo& info) {
342   MOZ_ASSERT(numArenasFreeCommitted >= info.numArenasFreeCommitted);
343   numArenasFreeCommitted -= info.numArenasFreeCommitted;
344   stats().count(gcstats::COUNT_DESTROY_CHUNK);
345 #ifdef DEBUG
346   /*
347    * Let FreeChunkPool detect a missing prepareToFreeChunk call before it
348    * frees chunk.
349    */
350   info.numArenasFreeCommitted = 0;
351 #endif
352 }
353 
releaseArena(Arena * arena,const AutoLockGC & lock)354 void GCRuntime::releaseArena(Arena* arena, const AutoLockGC& lock) {
355   MOZ_ASSERT(arena->allocated());
356   MOZ_ASSERT(!arena->onDelayedMarkingList());
357 
358   arena->zone->gcHeapSize.removeGCArena();
359   arena->release(lock);
360   arena->chunk()->releaseArena(this, arena, lock);
361 }
362 
GCRuntime(JSRuntime * rt)363 GCRuntime::GCRuntime(JSRuntime* rt)
364     : rt(rt),
365       atomsZone(nullptr),
366       systemZone(nullptr),
367       heapState_(JS::HeapState::Idle),
368       stats_(this),
369       marker(rt),
370       barrierTracer(rt),
371       sweepingTracer(rt),
372       heapSize(nullptr),
373       helperThreadRatio(TuningDefaults::HelperThreadRatio),
374       maxHelperThreads(TuningDefaults::MaxHelperThreads),
375       helperThreadCount(1),
376       createBudgetCallback(nullptr),
377       rootsHash(256),
378       nextCellUniqueId_(LargestTaggedNullCellPointer +
379                         1),  // Ensure disjoint from null tagged pointers.
380       numArenasFreeCommitted(0),
381       verifyPreData(nullptr),
382       lastGCStartTime_(ReallyNow()),
383       lastGCEndTime_(ReallyNow()),
384       incrementalGCEnabled(TuningDefaults::IncrementalGCEnabled),
385       perZoneGCEnabled(TuningDefaults::PerZoneGCEnabled),
386       numActiveZoneIters(0),
387       cleanUpEverything(false),
388       grayBitsValid(true),
389       majorGCTriggerReason(JS::GCReason::NO_REASON),
390       minorGCNumber(0),
391       majorGCNumber(0),
392       number(0),
393       sliceNumber(0),
394       isFull(false),
395       incrementalState(gc::State::NotActive),
396       initialState(gc::State::NotActive),
397       useZeal(false),
398       lastMarkSlice(false),
399       safeToYield(true),
400       markOnBackgroundThreadDuringSweeping(false),
401       sweepOnBackgroundThread(false),
402 #ifdef DEBUG
403       hadShutdownGC(false),
404 #endif
405       requestSliceAfterBackgroundTask(false),
406       lifoBlocksToFree((size_t)JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
407       lifoBlocksToFreeAfterMinorGC(
408           (size_t)JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
409       sweepGroupIndex(0),
410       sweepGroups(nullptr),
411       currentSweepGroup(nullptr),
412       sweepZone(nullptr),
413       abortSweepAfterCurrentGroup(false),
414       sweepMarkResult(IncrementalProgress::NotFinished),
415       startedCompacting(false),
416       zonesCompacted(0),
417 #ifdef DEBUG
418       relocatedArenasToRelease(nullptr),
419 #endif
420 #ifdef JS_GC_ZEAL
421       markingValidator(nullptr),
422 #endif
423       defaultTimeBudgetMS_(TuningDefaults::DefaultTimeBudgetMS),
424       incrementalAllowed(true),
425       compactingEnabled(TuningDefaults::CompactingEnabled),
426       rootsRemoved(false),
427 #ifdef JS_GC_ZEAL
428       zealModeBits(0),
429       zealFrequency(0),
430       nextScheduled(0),
431       deterministicOnly(false),
432       zealSliceBudget(0),
433       selectedForMarking(rt),
434 #endif
435       fullCompartmentChecks(false),
436       gcCallbackDepth(0),
437       alwaysPreserveCode(false),
438       lowMemoryState(false),
439       lock(mutexid::GCLock),
440       allocTask(this, emptyChunks_.ref()),
441       unmarkTask(this),
442       markTask(this),
443       sweepTask(this),
444       freeTask(this),
445       decommitTask(this),
446       nursery_(this),
447       storeBuffer_(rt, nursery()) {
448   marker.setIncrementalGCEnabled(incrementalGCEnabled);
449 }
450 
451 using CharRange = mozilla::Range<const char>;
452 using CharRangeVector = Vector<CharRange, 0, SystemAllocPolicy>;
453 
SplitStringBy(CharRange text,char delimiter,CharRangeVector * result)454 static bool SplitStringBy(CharRange text, char delimiter,
455                           CharRangeVector* result) {
456   auto start = text.begin();
457   for (auto ptr = start; ptr != text.end(); ptr++) {
458     if (*ptr == delimiter) {
459       if (!result->emplaceBack(start, ptr)) {
460         return false;
461       }
462       start = ptr + 1;
463     }
464   }
465 
466   return result->emplaceBack(start, text.end());
467 }
468 
ParseTimeDuration(CharRange text,TimeDuration * durationOut)469 static bool ParseTimeDuration(CharRange text, TimeDuration* durationOut) {
470   const char* str = text.begin().get();
471   char* end;
472   *durationOut = TimeDuration::FromMilliseconds(strtol(str, &end, 10));
473   return str != end && end == text.end().get();
474 }
475 
PrintProfileHelpAndExit(const char * envName,const char * helpText)476 static void PrintProfileHelpAndExit(const char* envName, const char* helpText) {
477   fprintf(stderr, "%s=N[,(main|all)]\n", envName);
478   fprintf(stderr, "%s", helpText);
479   exit(0);
480 }
481 
ReadProfileEnv(const char * envName,const char * helpText,bool * enableOut,bool * workersOut,TimeDuration * thresholdOut)482 void js::gc::ReadProfileEnv(const char* envName, const char* helpText,
483                             bool* enableOut, bool* workersOut,
484                             TimeDuration* thresholdOut) {
485   *enableOut = false;
486   *workersOut = false;
487   *thresholdOut = TimeDuration();
488 
489   const char* env = getenv(envName);
490   if (!env) {
491     return;
492   }
493 
494   if (strcmp(env, "help") == 0) {
495     PrintProfileHelpAndExit(envName, helpText);
496   }
497 
498   CharRangeVector parts;
499   auto text = CharRange(env, strlen(env));
500   if (!SplitStringBy(text, ',', &parts)) {
501     MOZ_CRASH("OOM parsing environment variable");
502   }
503 
504   if (parts.length() == 0 || parts.length() > 2) {
505     PrintProfileHelpAndExit(envName, helpText);
506   }
507 
508   *enableOut = true;
509 
510   if (!ParseTimeDuration(parts[0], thresholdOut)) {
511     PrintProfileHelpAndExit(envName, helpText);
512   }
513 
514   if (parts.length() == 2) {
515     const char* threads = parts[1].begin().get();
516     if (strcmp(threads, "all") == 0) {
517       *workersOut = true;
518     } else if (strcmp(threads, "main") != 0) {
519       PrintProfileHelpAndExit(envName, helpText);
520     }
521   }
522 }
523 
ShouldPrintProfile(JSRuntime * runtime,bool enable,bool profileWorkers,TimeDuration threshold,TimeDuration duration)524 bool js::gc::ShouldPrintProfile(JSRuntime* runtime, bool enable,
525                                 bool profileWorkers, TimeDuration threshold,
526                                 TimeDuration duration) {
527   return enable && (runtime->isMainRuntime() || profileWorkers) &&
528          duration >= threshold;
529 }
530 
531 #ifdef JS_GC_ZEAL
532 
getZealBits(uint32_t * zealBits,uint32_t * frequency,uint32_t * scheduled)533 void GCRuntime::getZealBits(uint32_t* zealBits, uint32_t* frequency,
534                             uint32_t* scheduled) {
535   *zealBits = zealModeBits;
536   *frequency = zealFrequency;
537   *scheduled = nextScheduled;
538 }
539 
540 const char gc::ZealModeHelpText[] =
541     "  Specifies how zealous the garbage collector should be. Some of these "
542     "modes can\n"
543     "  be set simultaneously, by passing multiple level options, e.g. \"2;4\" "
544     "will activate\n"
545     "  both modes 2 and 4. Modes can be specified by name or number.\n"
546     "  \n"
547     "  Values:\n"
548     "    0:  (None) Normal amount of collection (resets all modes)\n"
549     "    1:  (RootsChange) Collect when roots are added or removed\n"
550     "    2:  (Alloc) Collect when every N allocations (default: 100)\n"
551     "    4:  (VerifierPre) Verify pre write barriers between instructions\n"
552     "    6:  (YieldBeforeRootMarking) Incremental GC in two slices that yields "
553     "before root marking\n"
554     "    7:  (GenerationalGC) Collect the nursery every N nursery allocations\n"
555     "    8:  (YieldBeforeMarking) Incremental GC in two slices that yields "
556     "between\n"
557     "        the root marking and marking phases\n"
558     "    9:  (YieldBeforeSweeping) Incremental GC in two slices that yields "
559     "between\n"
560     "        the marking and sweeping phases\n"
561     "    10: (IncrementalMultipleSlices) Incremental GC in many slices\n"
562     "    11: (IncrementalMarkingValidator) Verify incremental marking\n"
563     "    12: (ElementsBarrier) Use the individual element post-write barrier\n"
564     "        regardless of elements size\n"
565     "    13: (CheckHashTablesOnMinorGC) Check internal hashtables on minor GC\n"
566     "    14: (Compact) Perform a shrinking collection every N allocations\n"
567     "    15: (CheckHeapAfterGC) Walk the heap to check its integrity after "
568     "every GC\n"
569     "    16: (CheckNursery) Check nursery integrity on minor GC\n"
570     "    17: (YieldBeforeSweepingAtoms) Incremental GC in two slices that "
571     "yields\n"
572     "        before sweeping the atoms table\n"
573     "    18: (CheckGrayMarking) Check gray marking invariants after every GC\n"
574     "    19: (YieldBeforeSweepingCaches) Incremental GC in two slices that "
575     "yields\n"
576     "        before sweeping weak caches\n"
577     "    21: (YieldBeforeSweepingObjects) Incremental GC in two slices that "
578     "yields\n"
579     "        before sweeping foreground finalized objects\n"
580     "    22: (YieldBeforeSweepingNonObjects) Incremental GC in two slices that "
581     "yields\n"
582     "        before sweeping non-object GC things\n"
583     "    23: (YieldBeforeSweepingPropMapTrees) Incremental GC in two slices "
584     "that "
585     "yields\n"
586     "        before sweeping shape trees\n"
587     "    24: (CheckWeakMapMarking) Check weak map marking invariants after "
588     "every GC\n"
589     "    25: (YieldWhileGrayMarking) Incremental GC in two slices that yields\n"
590     "        during gray marking\n";
591 
592 // The set of zeal modes that control incremental slices. These modes are
593 // mutually exclusive.
594 static const mozilla::EnumSet<ZealMode> IncrementalSliceZealModes = {
595     ZealMode::YieldBeforeRootMarking,
596     ZealMode::YieldBeforeMarking,
597     ZealMode::YieldBeforeSweeping,
598     ZealMode::IncrementalMultipleSlices,
599     ZealMode::YieldBeforeSweepingAtoms,
600     ZealMode::YieldBeforeSweepingCaches,
601     ZealMode::YieldBeforeSweepingObjects,
602     ZealMode::YieldBeforeSweepingNonObjects,
603     ZealMode::YieldBeforeSweepingPropMapTrees};
604 
setZeal(uint8_t zeal,uint32_t frequency)605 void GCRuntime::setZeal(uint8_t zeal, uint32_t frequency) {
606   MOZ_ASSERT(zeal <= unsigned(ZealMode::Limit));
607 
608   if (verifyPreData) {
609     VerifyBarriers(rt, PreBarrierVerifier);
610   }
611 
612   if (zeal == 0) {
613     if (hasZealMode(ZealMode::GenerationalGC)) {
614       evictNursery(JS::GCReason::DEBUG_GC);
615       nursery().leaveZealMode();
616     }
617 
618     if (isIncrementalGCInProgress()) {
619       finishGC(JS::GCReason::DEBUG_GC);
620     }
621   }
622 
623   ZealMode zealMode = ZealMode(zeal);
624   if (zealMode == ZealMode::GenerationalGC) {
625     evictNursery(JS::GCReason::DEBUG_GC);
626     nursery().enterZealMode();
627   }
628 
629   // Some modes are mutually exclusive. If we're setting one of those, we
630   // first reset all of them.
631   if (IncrementalSliceZealModes.contains(zealMode)) {
632     for (auto mode : IncrementalSliceZealModes) {
633       clearZealMode(mode);
634     }
635   }
636 
637   bool schedule = zealMode >= ZealMode::Alloc;
638   if (zeal != 0) {
639     zealModeBits |= 1 << unsigned(zeal);
640   } else {
641     zealModeBits = 0;
642   }
643   zealFrequency = frequency;
644   nextScheduled = schedule ? frequency : 0;
645 }
646 
unsetZeal(uint8_t zeal)647 void GCRuntime::unsetZeal(uint8_t zeal) {
648   MOZ_ASSERT(zeal <= unsigned(ZealMode::Limit));
649   ZealMode zealMode = ZealMode(zeal);
650 
651   if (!hasZealMode(zealMode)) {
652     return;
653   }
654 
655   if (verifyPreData) {
656     VerifyBarriers(rt, PreBarrierVerifier);
657   }
658 
659   if (zealMode == ZealMode::GenerationalGC) {
660     evictNursery(JS::GCReason::DEBUG_GC);
661     nursery().leaveZealMode();
662   }
663 
664   clearZealMode(zealMode);
665 
666   if (zealModeBits == 0) {
667     if (isIncrementalGCInProgress()) {
668       finishGC(JS::GCReason::DEBUG_GC);
669     }
670 
671     zealFrequency = 0;
672     nextScheduled = 0;
673   }
674 }
675 
setNextScheduled(uint32_t count)676 void GCRuntime::setNextScheduled(uint32_t count) { nextScheduled = count; }
677 
ParseZealModeName(CharRange text,uint32_t * modeOut)678 static bool ParseZealModeName(CharRange text, uint32_t* modeOut) {
679   struct ModeInfo {
680     const char* name;
681     size_t length;
682     uint32_t value;
683   };
684 
685   static const ModeInfo zealModes[] = {{"None", 0},
686 #  define ZEAL_MODE(name, value) {#  name, strlen(#  name), value},
687                                        JS_FOR_EACH_ZEAL_MODE(ZEAL_MODE)
688 #  undef ZEAL_MODE
689   };
690 
691   for (auto mode : zealModes) {
692     if (text.length() == mode.length &&
693         memcmp(text.begin().get(), mode.name, mode.length) == 0) {
694       *modeOut = mode.value;
695       return true;
696     }
697   }
698 
699   return false;
700 }
701 
ParseZealModeNumericParam(CharRange text,uint32_t * paramOut)702 static bool ParseZealModeNumericParam(CharRange text, uint32_t* paramOut) {
703   if (text.length() == 0) {
704     return false;
705   }
706 
707   for (auto c : text) {
708     if (!mozilla::IsAsciiDigit(c)) {
709       return false;
710     }
711   }
712 
713   *paramOut = atoi(text.begin().get());
714   return true;
715 }
716 
PrintZealHelpAndFail()717 static bool PrintZealHelpAndFail() {
718   fprintf(stderr, "Format: JS_GC_ZEAL=level(;level)*[,N]\n");
719   fputs(ZealModeHelpText, stderr);
720   return false;
721 }
722 
parseAndSetZeal(const char * str)723 bool GCRuntime::parseAndSetZeal(const char* str) {
724   // Set the zeal mode from a string consisting of one or more mode specifiers
725   // separated by ';', optionally followed by a ',' and the trigger frequency.
726   // The mode specifiers can by a mode name or its number.
727 
728   auto text = CharRange(str, strlen(str));
729 
730   CharRangeVector parts;
731   if (!SplitStringBy(text, ',', &parts)) {
732     return false;
733   }
734 
735   if (parts.length() == 0 || parts.length() > 2) {
736     return PrintZealHelpAndFail();
737   }
738 
739   uint32_t frequency = JS_DEFAULT_ZEAL_FREQ;
740   if (parts.length() == 2 && !ParseZealModeNumericParam(parts[1], &frequency)) {
741     return PrintZealHelpAndFail();
742   }
743 
744   CharRangeVector modes;
745   if (!SplitStringBy(parts[0], ';', &modes)) {
746     return false;
747   }
748 
749   for (const auto& descr : modes) {
750     uint32_t mode;
751     if (!ParseZealModeName(descr, &mode) &&
752         !(ParseZealModeNumericParam(descr, &mode) &&
753           mode <= unsigned(ZealMode::Limit))) {
754       return PrintZealHelpAndFail();
755     }
756 
757     setZeal(mode, frequency);
758   }
759 
760   return true;
761 }
762 
AllocKindName(AllocKind kind)763 const char* js::gc::AllocKindName(AllocKind kind) {
764   static const char* const names[] = {
765 #  define EXPAND_THING_NAME(allocKind, _1, _2, _3, _4, _5, _6) #  allocKind,
766       FOR_EACH_ALLOCKIND(EXPAND_THING_NAME)
767 #  undef EXPAND_THING_NAME
768   };
769   static_assert(std::size(names) == AllocKindCount,
770                 "names array should have an entry for every AllocKind");
771 
772   size_t i = size_t(kind);
773   MOZ_ASSERT(i < std::size(names));
774   return names[i];
775 }
776 
DumpArenaInfo()777 void js::gc::DumpArenaInfo() {
778   fprintf(stderr, "Arena header size: %zu\n\n", ArenaHeaderSize);
779 
780   fprintf(stderr, "GC thing kinds:\n");
781   fprintf(stderr, "%25s %8s %8s %8s\n",
782           "AllocKind:", "Size:", "Count:", "Padding:");
783   for (auto kind : AllAllocKinds()) {
784     fprintf(stderr, "%25s %8zu %8zu %8zu\n", AllocKindName(kind),
785             Arena::thingSize(kind), Arena::thingsPerArena(kind),
786             Arena::firstThingOffset(kind) - ArenaHeaderSize);
787   }
788 }
789 
790 #endif  // JS_GC_ZEAL
791 
init(uint32_t maxbytes)792 bool GCRuntime::init(uint32_t maxbytes) {
793   MOZ_ASSERT(SystemPageSize());
794   Arena::checkLookupTables();
795 
796   {
797     AutoLockGCBgAlloc lock(this);
798 
799     MOZ_ALWAYS_TRUE(tunables.setParameter(JSGC_MAX_BYTES, maxbytes, lock));
800 
801     const char* size = getenv("JSGC_MARK_STACK_LIMIT");
802     if (size) {
803       setMarkStackLimit(atoi(size), lock);
804     }
805 
806     if (!nursery().init(lock)) {
807       return false;
808     }
809 
810     const char* pretenureThresholdStr = getenv("JSGC_PRETENURE_THRESHOLD");
811     if (pretenureThresholdStr && pretenureThresholdStr[0]) {
812       char* last;
813       long pretenureThreshold = strtol(pretenureThresholdStr, &last, 10);
814       if (last[0] || !tunables.setParameter(JSGC_PRETENURE_THRESHOLD,
815                                             pretenureThreshold, lock)) {
816         fprintf(stderr, "Invalid value for JSGC_PRETENURE_THRESHOLD: %s\n",
817                 pretenureThresholdStr);
818       }
819     }
820   }
821 
822 #ifdef JS_GC_ZEAL
823   const char* zealSpec = getenv("JS_GC_ZEAL");
824   if (zealSpec && zealSpec[0] && !parseAndSetZeal(zealSpec)) {
825     return false;
826   }
827 #endif
828 
829   if (!marker.init() || !initSweepActions()) {
830     return false;
831   }
832 
833   gcprobes::Init(this);
834 
835   updateHelperThreadCount();
836 
837   return true;
838 }
839 
finish()840 void GCRuntime::finish() {
841   MOZ_ASSERT(inPageLoadCount == 0);
842 
843   // Wait for nursery background free to end and disable it to release memory.
844   if (nursery().isEnabled()) {
845     nursery().disable();
846   }
847 
848   // Wait until the background finalization and allocation stops and the
849   // helper thread shuts down before we forcefully release any remaining GC
850   // memory.
851   sweepTask.join();
852   freeTask.join();
853   allocTask.cancelAndWait();
854   decommitTask.cancelAndWait();
855 
856 #ifdef JS_GC_ZEAL
857   // Free memory associated with GC verification.
858   finishVerifier();
859 #endif
860 
861   // Delete all remaining zones.
862   if (rt->gcInitialized) {
863     for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
864       AutoSetThreadIsSweeping threadIsSweeping(zone);
865       for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
866         for (RealmsInCompartmentIter realm(comp); !realm.done(); realm.next()) {
867           js_delete(realm.get());
868         }
869         comp->realms().clear();
870         js_delete(comp.get());
871       }
872       zone->compartments().clear();
873       js_delete(zone.get());
874     }
875   }
876 
877   zones().clear();
878 
879   FreeChunkPool(fullChunks_.ref());
880   FreeChunkPool(availableChunks_.ref());
881   FreeChunkPool(emptyChunks_.ref());
882 
883   gcprobes::Finish(this);
884 
885   nursery().printTotalProfileTimes();
886   stats().printTotalProfileTimes();
887 }
888 
889 #ifdef DEBUG
assertNoPermanentSharedThings()890 void GCRuntime::assertNoPermanentSharedThings() {
891   MOZ_ASSERT(atomsZone->cellIterUnsafe<JSAtom>(AllocKind::ATOM).done());
892   MOZ_ASSERT(
893       atomsZone->cellIterUnsafe<JSAtom>(AllocKind::FAT_INLINE_ATOM).done());
894   MOZ_ASSERT(atomsZone->cellIterUnsafe<JS::Symbol>(AllocKind::SYMBOL).done());
895 }
896 #endif
897 
freezePermanentSharedThings()898 void GCRuntime::freezePermanentSharedThings() {
899   // This is called just after permanent atoms and well-known symbols have been
900   // created. At this point all existing atoms and symbols are permanent. Move
901   // the arenas containing these things out of atoms zone arena lists until
902   // shutdown. This has two benefits:
903   //
904   //  - since we won't sweep them, we don't need to mark them at the start of
905   //    every GC.
906   //  - shared things are always marked so we don't have to check whether a
907   //    thing is shared when marking
908 
909   MOZ_ASSERT(atomsZone);
910   MOZ_ASSERT(zones().empty());
911 
912   atomsZone->arenas.clearFreeLists();
913   freezeAtomsZoneArenas<JSAtom>(AllocKind::ATOM, permanentAtoms.ref());
914   freezeAtomsZoneArenas<JSAtom>(AllocKind::FAT_INLINE_ATOM,
915                                 permanentFatInlineAtoms.ref());
916   freezeAtomsZoneArenas<JS::Symbol>(AllocKind::SYMBOL,
917                                     permanentWellKnownSymbols.ref());
918 }
919 
920 template <typename T>
freezeAtomsZoneArenas(AllocKind kind,ArenaList & arenaList)921 void GCRuntime::freezeAtomsZoneArenas(AllocKind kind, ArenaList& arenaList) {
922   for (auto thing = atomsZone->cellIterUnsafe<T>(kind); !thing.done();
923        thing.next()) {
924     MOZ_ASSERT(thing->isPermanentAndMayBeShared());
925     thing->asTenured().markBlack();
926   }
927 
928   arenaList = std::move(atomsZone->arenas.arenaList(kind));
929 }
930 
restorePermanentSharedThings()931 void GCRuntime::restorePermanentSharedThings() {
932   // Move the arenas containing permanent atoms that were removed by
933   // freezePermanentSharedThings() back to the atoms zone arena lists so we can
934   // collect them.
935 
936   MOZ_ASSERT(heapState() == JS::HeapState::MajorCollecting);
937 
938   restoreAtomsZoneArenas(AllocKind::ATOM, permanentAtoms.ref());
939   restoreAtomsZoneArenas(AllocKind::FAT_INLINE_ATOM,
940                          permanentFatInlineAtoms.ref());
941   restoreAtomsZoneArenas(AllocKind::SYMBOL, permanentWellKnownSymbols.ref());
942 }
943 
restoreAtomsZoneArenas(AllocKind kind,ArenaList & arenaList)944 void GCRuntime::restoreAtomsZoneArenas(AllocKind kind, ArenaList& arenaList) {
945   atomsZone->arenas.arenaList(kind).insertListWithCursorAtEnd(arenaList);
946 }
947 
setParameter(JSGCParamKey key,uint32_t value)948 bool GCRuntime::setParameter(JSGCParamKey key, uint32_t value) {
949   MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
950   waitBackgroundSweepEnd();
951   AutoLockGC lock(this);
952   return setParameter(key, value, lock);
953 }
954 
setParameter(JSGCParamKey key,uint32_t value,AutoLockGC & lock)955 bool GCRuntime::setParameter(JSGCParamKey key, uint32_t value,
956                              AutoLockGC& lock) {
957   switch (key) {
958     case JSGC_SLICE_TIME_BUDGET_MS:
959       defaultTimeBudgetMS_ = value;
960       break;
961     case JSGC_MARK_STACK_LIMIT:
962       if (value == 0) {
963         return false;
964       }
965       setMarkStackLimit(value, lock);
966       break;
967     case JSGC_INCREMENTAL_GC_ENABLED:
968       setIncrementalGCEnabled(value != 0);
969       break;
970     case JSGC_PER_ZONE_GC_ENABLED:
971       perZoneGCEnabled = value != 0;
972       break;
973     case JSGC_COMPACTING_ENABLED:
974       compactingEnabled = value != 0;
975       break;
976     case JSGC_INCREMENTAL_WEAKMAP_ENABLED:
977       marker.incrementalWeakMapMarkingEnabled = value != 0;
978       break;
979     case JSGC_HELPER_THREAD_RATIO:
980       if (rt->parentRuntime) {
981         // Don't allow this to be set for worker runtimes.
982         return false;
983       }
984       if (value == 0) {
985         return false;
986       }
987       helperThreadRatio = double(value) / 100.0;
988       updateHelperThreadCount();
989       break;
990     case JSGC_MAX_HELPER_THREADS:
991       if (rt->parentRuntime) {
992         // Don't allow this to be set for worker runtimes.
993         return false;
994       }
995       if (value == 0) {
996         return false;
997       }
998       maxHelperThreads = value;
999       updateHelperThreadCount();
1000       break;
1001     default:
1002       if (!tunables.setParameter(key, value, lock)) {
1003         return false;
1004       }
1005       updateAllGCStartThresholds(lock);
1006   }
1007 
1008   return true;
1009 }
1010 
resetParameter(JSGCParamKey key)1011 void GCRuntime::resetParameter(JSGCParamKey key) {
1012   MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
1013   waitBackgroundSweepEnd();
1014   AutoLockGC lock(this);
1015   resetParameter(key, lock);
1016 }
1017 
resetParameter(JSGCParamKey key,AutoLockGC & lock)1018 void GCRuntime::resetParameter(JSGCParamKey key, AutoLockGC& lock) {
1019   switch (key) {
1020     case JSGC_SLICE_TIME_BUDGET_MS:
1021       defaultTimeBudgetMS_ = TuningDefaults::DefaultTimeBudgetMS;
1022       break;
1023     case JSGC_MARK_STACK_LIMIT:
1024       setMarkStackLimit(MarkStack::DefaultCapacity, lock);
1025       break;
1026     case JSGC_INCREMENTAL_GC_ENABLED:
1027       setIncrementalGCEnabled(TuningDefaults::IncrementalGCEnabled);
1028       break;
1029     case JSGC_PER_ZONE_GC_ENABLED:
1030       perZoneGCEnabled = TuningDefaults::PerZoneGCEnabled;
1031       break;
1032     case JSGC_COMPACTING_ENABLED:
1033       compactingEnabled = TuningDefaults::CompactingEnabled;
1034       break;
1035     case JSGC_INCREMENTAL_WEAKMAP_ENABLED:
1036       marker.incrementalWeakMapMarkingEnabled =
1037           TuningDefaults::IncrementalWeakMapMarkingEnabled;
1038       break;
1039     case JSGC_HELPER_THREAD_RATIO:
1040       if (rt->parentRuntime) {
1041         return;
1042       }
1043       helperThreadRatio = TuningDefaults::HelperThreadRatio;
1044       updateHelperThreadCount();
1045       break;
1046     case JSGC_MAX_HELPER_THREADS:
1047       if (rt->parentRuntime) {
1048         return;
1049       }
1050       maxHelperThreads = TuningDefaults::MaxHelperThreads;
1051       updateHelperThreadCount();
1052       break;
1053     default:
1054       tunables.resetParameter(key, lock);
1055       updateAllGCStartThresholds(lock);
1056   }
1057 }
1058 
getParameter(JSGCParamKey key)1059 uint32_t GCRuntime::getParameter(JSGCParamKey key) {
1060   MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
1061   AutoLockGC lock(this);
1062   return getParameter(key, lock);
1063 }
1064 
getParameter(JSGCParamKey key,const AutoLockGC & lock)1065 uint32_t GCRuntime::getParameter(JSGCParamKey key, const AutoLockGC& lock) {
1066   switch (key) {
1067     case JSGC_MAX_BYTES:
1068       return uint32_t(tunables.gcMaxBytes());
1069     case JSGC_MIN_NURSERY_BYTES:
1070       MOZ_ASSERT(tunables.gcMinNurseryBytes() < UINT32_MAX);
1071       return uint32_t(tunables.gcMinNurseryBytes());
1072     case JSGC_MAX_NURSERY_BYTES:
1073       MOZ_ASSERT(tunables.gcMaxNurseryBytes() < UINT32_MAX);
1074       return uint32_t(tunables.gcMaxNurseryBytes());
1075     case JSGC_BYTES:
1076       return uint32_t(heapSize.bytes());
1077     case JSGC_NURSERY_BYTES:
1078       return nursery().capacity();
1079     case JSGC_NUMBER:
1080       return uint32_t(number);
1081     case JSGC_MAJOR_GC_NUMBER:
1082       return uint32_t(majorGCNumber);
1083     case JSGC_MINOR_GC_NUMBER:
1084       return uint32_t(minorGCNumber);
1085     case JSGC_INCREMENTAL_GC_ENABLED:
1086       return incrementalGCEnabled;
1087     case JSGC_PER_ZONE_GC_ENABLED:
1088       return perZoneGCEnabled;
1089     case JSGC_UNUSED_CHUNKS:
1090       return uint32_t(emptyChunks(lock).count());
1091     case JSGC_TOTAL_CHUNKS:
1092       return uint32_t(fullChunks(lock).count() + availableChunks(lock).count() +
1093                       emptyChunks(lock).count());
1094     case JSGC_SLICE_TIME_BUDGET_MS:
1095       MOZ_RELEASE_ASSERT(defaultTimeBudgetMS_ >= 0);
1096       MOZ_RELEASE_ASSERT(defaultTimeBudgetMS_ <= UINT32_MAX);
1097       return uint32_t(defaultTimeBudgetMS_);
1098     case JSGC_MARK_STACK_LIMIT:
1099       return marker.maxCapacity();
1100     case JSGC_HIGH_FREQUENCY_TIME_LIMIT:
1101       return tunables.highFrequencyThreshold().ToMilliseconds();
1102     case JSGC_SMALL_HEAP_SIZE_MAX:
1103       return tunables.smallHeapSizeMaxBytes() / 1024 / 1024;
1104     case JSGC_LARGE_HEAP_SIZE_MIN:
1105       return tunables.largeHeapSizeMinBytes() / 1024 / 1024;
1106     case JSGC_HIGH_FREQUENCY_SMALL_HEAP_GROWTH:
1107       return uint32_t(tunables.highFrequencySmallHeapGrowth() * 100);
1108     case JSGC_HIGH_FREQUENCY_LARGE_HEAP_GROWTH:
1109       return uint32_t(tunables.highFrequencyLargeHeapGrowth() * 100);
1110     case JSGC_LOW_FREQUENCY_HEAP_GROWTH:
1111       return uint32_t(tunables.lowFrequencyHeapGrowth() * 100);
1112     case JSGC_ALLOCATION_THRESHOLD:
1113       return tunables.gcZoneAllocThresholdBase() / 1024 / 1024;
1114     case JSGC_SMALL_HEAP_INCREMENTAL_LIMIT:
1115       return uint32_t(tunables.smallHeapIncrementalLimit() * 100);
1116     case JSGC_LARGE_HEAP_INCREMENTAL_LIMIT:
1117       return uint32_t(tunables.largeHeapIncrementalLimit() * 100);
1118     case JSGC_MIN_EMPTY_CHUNK_COUNT:
1119       return tunables.minEmptyChunkCount(lock);
1120     case JSGC_MAX_EMPTY_CHUNK_COUNT:
1121       return tunables.maxEmptyChunkCount();
1122     case JSGC_COMPACTING_ENABLED:
1123       return compactingEnabled;
1124     case JSGC_INCREMENTAL_WEAKMAP_ENABLED:
1125       return marker.incrementalWeakMapMarkingEnabled;
1126     case JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION:
1127       return tunables.nurseryFreeThresholdForIdleCollection();
1128     case JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION_PERCENT:
1129       return uint32_t(tunables.nurseryFreeThresholdForIdleCollectionFraction() *
1130                       100.0f);
1131     case JSGC_NURSERY_TIMEOUT_FOR_IDLE_COLLECTION_MS:
1132       return tunables.nurseryTimeoutForIdleCollection().ToMilliseconds();
1133     case JSGC_PRETENURE_THRESHOLD:
1134       return uint32_t(tunables.pretenureThreshold() * 100);
1135     case JSGC_PRETENURE_GROUP_THRESHOLD:
1136       return tunables.pretenureGroupThreshold();
1137     case JSGC_PRETENURE_STRING_THRESHOLD:
1138       return uint32_t(tunables.pretenureStringThreshold() * 100);
1139     case JSGC_STOP_PRETENURE_STRING_THRESHOLD:
1140       return uint32_t(tunables.stopPretenureStringThreshold() * 100);
1141     case JSGC_MIN_LAST_DITCH_GC_PERIOD:
1142       return tunables.minLastDitchGCPeriod().ToSeconds();
1143     case JSGC_ZONE_ALLOC_DELAY_KB:
1144       return tunables.zoneAllocDelayBytes() / 1024;
1145     case JSGC_MALLOC_THRESHOLD_BASE:
1146       return tunables.mallocThresholdBase() / 1024 / 1024;
1147     case JSGC_URGENT_THRESHOLD_MB:
1148       return tunables.urgentThresholdBytes() / 1024 / 1024;
1149     case JSGC_CHUNK_BYTES:
1150       return ChunkSize;
1151     case JSGC_HELPER_THREAD_RATIO:
1152       MOZ_ASSERT(helperThreadRatio > 0.0);
1153       return uint32_t(helperThreadRatio * 100.0);
1154     case JSGC_MAX_HELPER_THREADS:
1155       MOZ_ASSERT(maxHelperThreads <= UINT32_MAX);
1156       return maxHelperThreads;
1157     case JSGC_HELPER_THREAD_COUNT:
1158       return helperThreadCount;
1159     case JSGC_SYSTEM_PAGE_SIZE_KB:
1160       return SystemPageSize() / 1024;
1161     default:
1162       MOZ_CRASH("Unknown parameter key");
1163   }
1164 }
1165 
setMarkStackLimit(size_t limit,AutoLockGC & lock)1166 void GCRuntime::setMarkStackLimit(size_t limit, AutoLockGC& lock) {
1167   MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
1168   AutoUnlockGC unlock(lock);
1169   AutoStopVerifyingBarriers pauseVerification(rt, false);
1170   marker.setMaxCapacity(limit);
1171 }
1172 
setIncrementalGCEnabled(bool enabled)1173 void GCRuntime::setIncrementalGCEnabled(bool enabled) {
1174   incrementalGCEnabled = enabled;
1175   marker.setIncrementalGCEnabled(enabled);
1176 }
1177 
updateHelperThreadCount()1178 void GCRuntime::updateHelperThreadCount() {
1179   if (!CanUseExtraThreads()) {
1180     // startTask will run the work on the main thread if the count is 1.
1181     MOZ_ASSERT(helperThreadCount == 1);
1182     return;
1183   }
1184 
1185   // The count of helper threads used for GC tasks is process wide. Don't set it
1186   // for worker JS runtimes.
1187   if (rt->parentRuntime) {
1188     helperThreadCount = rt->parentRuntime->gc.helperThreadCount;
1189     return;
1190   }
1191 
1192   double cpuCount = GetHelperThreadCPUCount();
1193   size_t target = size_t(cpuCount * helperThreadRatio.ref());
1194   target = std::clamp(target, size_t(1), maxHelperThreads.ref());
1195 
1196   AutoLockHelperThreadState lock;
1197 
1198   // Attempt to create extra threads if possible. This is not supported when
1199   // using an external thread pool.
1200   (void)HelperThreadState().ensureThreadCount(target, lock);
1201 
1202   helperThreadCount = std::min(target, GetHelperThreadCount());
1203   HelperThreadState().setGCParallelThreadCount(helperThreadCount, lock);
1204 }
1205 
addBlackRootsTracer(JSTraceDataOp traceOp,void * data)1206 bool GCRuntime::addBlackRootsTracer(JSTraceDataOp traceOp, void* data) {
1207   AssertHeapIsIdle();
1208   return !!blackRootTracers.ref().append(
1209       Callback<JSTraceDataOp>(traceOp, data));
1210 }
1211 
removeBlackRootsTracer(JSTraceDataOp traceOp,void * data)1212 void GCRuntime::removeBlackRootsTracer(JSTraceDataOp traceOp, void* data) {
1213   // Can be called from finalizers
1214   for (size_t i = 0; i < blackRootTracers.ref().length(); i++) {
1215     Callback<JSTraceDataOp>* e = &blackRootTracers.ref()[i];
1216     if (e->op == traceOp && e->data == data) {
1217       blackRootTracers.ref().erase(e);
1218       break;
1219     }
1220   }
1221 }
1222 
setGrayRootsTracer(JSGrayRootsTracer traceOp,void * data)1223 void GCRuntime::setGrayRootsTracer(JSGrayRootsTracer traceOp, void* data) {
1224   AssertHeapIsIdle();
1225   grayRootTracer.ref() = {traceOp, data};
1226 }
1227 
clearBlackAndGrayRootTracers()1228 void GCRuntime::clearBlackAndGrayRootTracers() {
1229   MOZ_ASSERT(rt->isBeingDestroyed());
1230   blackRootTracers.ref().clear();
1231   setGrayRootsTracer(nullptr, nullptr);
1232 }
1233 
setGCCallback(JSGCCallback callback,void * data)1234 void GCRuntime::setGCCallback(JSGCCallback callback, void* data) {
1235   gcCallback.ref() = {callback, data};
1236 }
1237 
callGCCallback(JSGCStatus status,JS::GCReason reason) const1238 void GCRuntime::callGCCallback(JSGCStatus status, JS::GCReason reason) const {
1239   const auto& callback = gcCallback.ref();
1240   MOZ_ASSERT(callback.op);
1241   callback.op(rt->mainContextFromOwnThread(), status, reason, callback.data);
1242 }
1243 
setObjectsTenuredCallback(JSObjectsTenuredCallback callback,void * data)1244 void GCRuntime::setObjectsTenuredCallback(JSObjectsTenuredCallback callback,
1245                                           void* data) {
1246   tenuredCallback.ref() = {callback, data};
1247 }
1248 
callObjectsTenuredCallback()1249 void GCRuntime::callObjectsTenuredCallback() {
1250   JS::AutoSuppressGCAnalysis nogc;
1251   const auto& callback = tenuredCallback.ref();
1252   if (callback.op) {
1253     callback.op(rt->mainContextFromOwnThread(), callback.data);
1254   }
1255 }
1256 
addFinalizeCallback(JSFinalizeCallback callback,void * data)1257 bool GCRuntime::addFinalizeCallback(JSFinalizeCallback callback, void* data) {
1258   return finalizeCallbacks.ref().append(
1259       Callback<JSFinalizeCallback>(callback, data));
1260 }
1261 
1262 template <typename F>
EraseCallback(CallbackVector<F> & vector,F callback)1263 static void EraseCallback(CallbackVector<F>& vector, F callback) {
1264   for (Callback<F>* p = vector.begin(); p != vector.end(); p++) {
1265     if (p->op == callback) {
1266       vector.erase(p);
1267       return;
1268     }
1269   }
1270 }
1271 
removeFinalizeCallback(JSFinalizeCallback callback)1272 void GCRuntime::removeFinalizeCallback(JSFinalizeCallback callback) {
1273   EraseCallback(finalizeCallbacks.ref(), callback);
1274 }
1275 
callFinalizeCallbacks(JSFreeOp * fop,JSFinalizeStatus status) const1276 void GCRuntime::callFinalizeCallbacks(JSFreeOp* fop,
1277                                       JSFinalizeStatus status) const {
1278   for (auto& p : finalizeCallbacks.ref()) {
1279     p.op(fop, status, p.data);
1280   }
1281 }
1282 
setHostCleanupFinalizationRegistryCallback(JSHostCleanupFinalizationRegistryCallback callback,void * data)1283 void GCRuntime::setHostCleanupFinalizationRegistryCallback(
1284     JSHostCleanupFinalizationRegistryCallback callback, void* data) {
1285   hostCleanupFinalizationRegistryCallback.ref() = {callback, data};
1286 }
1287 
callHostCleanupFinalizationRegistryCallback(JSFunction * doCleanup,GlobalObject * incumbentGlobal)1288 void GCRuntime::callHostCleanupFinalizationRegistryCallback(
1289     JSFunction* doCleanup, GlobalObject* incumbentGlobal) {
1290   JS::AutoSuppressGCAnalysis nogc;
1291   const auto& callback = hostCleanupFinalizationRegistryCallback.ref();
1292   if (callback.op) {
1293     callback.op(doCleanup, incumbentGlobal, callback.data);
1294   }
1295 }
1296 
addWeakPointerZonesCallback(JSWeakPointerZonesCallback callback,void * data)1297 bool GCRuntime::addWeakPointerZonesCallback(JSWeakPointerZonesCallback callback,
1298                                             void* data) {
1299   return updateWeakPointerZonesCallbacks.ref().append(
1300       Callback<JSWeakPointerZonesCallback>(callback, data));
1301 }
1302 
removeWeakPointerZonesCallback(JSWeakPointerZonesCallback callback)1303 void GCRuntime::removeWeakPointerZonesCallback(
1304     JSWeakPointerZonesCallback callback) {
1305   EraseCallback(updateWeakPointerZonesCallbacks.ref(), callback);
1306 }
1307 
callWeakPointerZonesCallbacks(JSTracer * trc) const1308 void GCRuntime::callWeakPointerZonesCallbacks(JSTracer* trc) const {
1309   for (auto const& p : updateWeakPointerZonesCallbacks.ref()) {
1310     p.op(trc, p.data);
1311   }
1312 }
1313 
addWeakPointerCompartmentCallback(JSWeakPointerCompartmentCallback callback,void * data)1314 bool GCRuntime::addWeakPointerCompartmentCallback(
1315     JSWeakPointerCompartmentCallback callback, void* data) {
1316   return updateWeakPointerCompartmentCallbacks.ref().append(
1317       Callback<JSWeakPointerCompartmentCallback>(callback, data));
1318 }
1319 
removeWeakPointerCompartmentCallback(JSWeakPointerCompartmentCallback callback)1320 void GCRuntime::removeWeakPointerCompartmentCallback(
1321     JSWeakPointerCompartmentCallback callback) {
1322   EraseCallback(updateWeakPointerCompartmentCallbacks.ref(), callback);
1323 }
1324 
callWeakPointerCompartmentCallbacks(JSTracer * trc,JS::Compartment * comp) const1325 void GCRuntime::callWeakPointerCompartmentCallbacks(
1326     JSTracer* trc, JS::Compartment* comp) const {
1327   for (auto const& p : updateWeakPointerCompartmentCallbacks.ref()) {
1328     p.op(trc, comp, p.data);
1329   }
1330 }
1331 
setSliceCallback(JS::GCSliceCallback callback)1332 JS::GCSliceCallback GCRuntime::setSliceCallback(JS::GCSliceCallback callback) {
1333   return stats().setSliceCallback(callback);
1334 }
1335 
setNurseryCollectionCallback(JS::GCNurseryCollectionCallback callback)1336 JS::GCNurseryCollectionCallback GCRuntime::setNurseryCollectionCallback(
1337     JS::GCNurseryCollectionCallback callback) {
1338   return stats().setNurseryCollectionCallback(callback);
1339 }
1340 
setDoCycleCollectionCallback(JS::DoCycleCollectionCallback callback)1341 JS::DoCycleCollectionCallback GCRuntime::setDoCycleCollectionCallback(
1342     JS::DoCycleCollectionCallback callback) {
1343   const auto prior = gcDoCycleCollectionCallback.ref();
1344   gcDoCycleCollectionCallback.ref() = {callback, nullptr};
1345   return prior.op;
1346 }
1347 
callDoCycleCollectionCallback(JSContext * cx)1348 void GCRuntime::callDoCycleCollectionCallback(JSContext* cx) {
1349   const auto& callback = gcDoCycleCollectionCallback.ref();
1350   if (callback.op) {
1351     callback.op(cx);
1352   }
1353 }
1354 
addRoot(Value * vp,const char * name)1355 bool GCRuntime::addRoot(Value* vp, const char* name) {
1356   /*
1357    * Sometimes Firefox will hold weak references to objects and then convert
1358    * them to strong references by calling AddRoot (e.g., via PreserveWrapper,
1359    * or ModifyBusyCount in workers). We need a read barrier to cover these
1360    * cases.
1361    */
1362   MOZ_ASSERT(vp);
1363   Value value = *vp;
1364   if (value.isGCThing()) {
1365     ValuePreWriteBarrier(value);
1366   }
1367 
1368   return rootsHash.ref().put(vp, name);
1369 }
1370 
removeRoot(Value * vp)1371 void GCRuntime::removeRoot(Value* vp) {
1372   rootsHash.ref().remove(vp);
1373   notifyRootsRemoved();
1374 }
1375 
1376 /* Compacting GC */
1377 
IsCurrentlyAnimating(const TimeStamp & lastAnimationTime,const TimeStamp & currentTime)1378 bool js::gc::IsCurrentlyAnimating(const TimeStamp& lastAnimationTime,
1379                                   const TimeStamp& currentTime) {
1380   // Assume that we're currently animating if js::NotifyAnimationActivity has
1381   // been called in the last second.
1382   static const auto oneSecond = TimeDuration::FromSeconds(1);
1383   return !lastAnimationTime.IsNull() &&
1384          currentTime < (lastAnimationTime + oneSecond);
1385 }
1386 
DiscardedCodeRecently(Zone * zone,const TimeStamp & currentTime)1387 static bool DiscardedCodeRecently(Zone* zone, const TimeStamp& currentTime) {
1388   static const auto thirtySeconds = TimeDuration::FromSeconds(30);
1389   return !zone->lastDiscardedCodeTime().IsNull() &&
1390          currentTime < (zone->lastDiscardedCodeTime() + thirtySeconds);
1391 }
1392 
shouldCompact()1393 bool GCRuntime::shouldCompact() {
1394   // Compact on shrinking GC if enabled.  Skip compacting in incremental GCs
1395   // if we are currently animating, unless the user is inactive or we're
1396   // responding to memory pressure.
1397 
1398   if (!isShrinkingGC() || !isCompactingGCEnabled()) {
1399     return false;
1400   }
1401 
1402   if (initialReason == JS::GCReason::USER_INACTIVE ||
1403       initialReason == JS::GCReason::MEM_PRESSURE) {
1404     return true;
1405   }
1406 
1407   return !isIncremental ||
1408          !IsCurrentlyAnimating(rt->lastAnimationTime, TimeStamp::Now());
1409 }
1410 
isCompactingGCEnabled() const1411 bool GCRuntime::isCompactingGCEnabled() const {
1412   return compactingEnabled &&
1413          rt->mainContextFromOwnThread()->compactingDisabledCount == 0;
1414 }
1415 
SetCreateGCSliceBudgetCallback(JSContext * cx,JS::CreateSliceBudgetCallback cb)1416 JS_PUBLIC_API void JS::SetCreateGCSliceBudgetCallback(
1417     JSContext* cx, JS::CreateSliceBudgetCallback cb) {
1418   cx->runtime()->gc.createBudgetCallback = cb;
1419 }
1420 
setDeadlineFromNow()1421 void TimeBudget::setDeadlineFromNow() { deadline = ReallyNow() + budget; }
1422 
SliceBudget(TimeBudget time,InterruptRequestFlag * interrupt)1423 SliceBudget::SliceBudget(TimeBudget time, InterruptRequestFlag* interrupt)
1424     : budget(TimeBudget(time)),
1425       interruptRequested(interrupt),
1426       counter(StepsPerExpensiveCheck) {
1427   budget.as<TimeBudget>().setDeadlineFromNow();
1428 }
1429 
SliceBudget(WorkBudget work)1430 SliceBudget::SliceBudget(WorkBudget work)
1431     : budget(work), interruptRequested(nullptr), counter(work.budget) {}
1432 
describe(char * buffer,size_t maxlen) const1433 int SliceBudget::describe(char* buffer, size_t maxlen) const {
1434   if (isUnlimited()) {
1435     return snprintf(buffer, maxlen, "unlimited");
1436   } else if (isWorkBudget()) {
1437     return snprintf(buffer, maxlen, "work(%" PRId64 ")", workBudget());
1438   } else {
1439     const char* interruptStr = "";
1440     if (interruptRequested) {
1441       interruptStr = interrupted ? "INTERRUPTED " : "interruptible ";
1442     }
1443     const char* extra = "";
1444     if (idle) {
1445       extra = extended ? " (started idle but extended)" : " (idle)";
1446     }
1447     return snprintf(buffer, maxlen, "%s%" PRId64 "ms%s", interruptStr,
1448                     timeBudget(), extra);
1449   }
1450 }
1451 
checkOverBudget()1452 bool SliceBudget::checkOverBudget() {
1453   MOZ_ASSERT(counter <= 0);
1454   MOZ_ASSERT(!isUnlimited());
1455 
1456   if (isWorkBudget()) {
1457     return true;
1458   }
1459 
1460   if (interruptRequested && *interruptRequested) {
1461     *interruptRequested = false;
1462     interrupted = true;
1463   }
1464 
1465   if (interrupted) {
1466     return true;
1467   }
1468 
1469   if (ReallyNow() >= budget.as<TimeBudget>().deadline) {
1470     return true;
1471   }
1472 
1473   counter = StepsPerExpensiveCheck;
1474   return false;
1475 }
1476 
requestMajorGC(JS::GCReason reason)1477 void GCRuntime::requestMajorGC(JS::GCReason reason) {
1478   MOZ_ASSERT_IF(reason != JS::GCReason::BG_TASK_FINISHED,
1479                 !CurrentThreadIsPerformingGC());
1480 
1481   if (majorGCRequested()) {
1482     return;
1483   }
1484 
1485   majorGCTriggerReason = reason;
1486   rt->mainContextFromAnyThread()->requestInterrupt(InterruptReason::GC);
1487 }
1488 
requestMinorGC(JS::GCReason reason) const1489 void Nursery::requestMinorGC(JS::GCReason reason) const {
1490   MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime()));
1491 
1492   if (minorGCRequested()) {
1493     return;
1494   }
1495 
1496   minorGCTriggerReason_ = reason;
1497   runtime()->mainContextFromOwnThread()->requestInterrupt(InterruptReason::GC);
1498 }
1499 
triggerGC(JS::GCReason reason)1500 bool GCRuntime::triggerGC(JS::GCReason reason) {
1501   /*
1502    * Don't trigger GCs if this is being called off the main thread from
1503    * onTooMuchMalloc().
1504    */
1505   if (!CurrentThreadCanAccessRuntime(rt)) {
1506     return false;
1507   }
1508 
1509   /* GC is already running. */
1510   if (JS::RuntimeHeapIsCollecting()) {
1511     return false;
1512   }
1513 
1514   JS::PrepareForFullGC(rt->mainContextFromOwnThread());
1515   requestMajorGC(reason);
1516   return true;
1517 }
1518 
maybeTriggerGCAfterAlloc(Zone * zone)1519 void GCRuntime::maybeTriggerGCAfterAlloc(Zone* zone) {
1520   MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
1521   MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
1522 
1523   TriggerResult trigger =
1524       checkHeapThreshold(zone, zone->gcHeapSize, zone->gcHeapThreshold);
1525 
1526   if (trigger.shouldTrigger) {
1527     // Start or continue an in progress incremental GC. We do this to try to
1528     // avoid performing non-incremental GCs on zones which allocate a lot of
1529     // data, even when incremental slices can't be triggered via scheduling in
1530     // the event loop.
1531     triggerZoneGC(zone, JS::GCReason::ALLOC_TRIGGER, trigger.usedBytes,
1532                   trigger.thresholdBytes);
1533   }
1534 }
1535 
MaybeMallocTriggerZoneGC(JSRuntime * rt,ZoneAllocator * zoneAlloc,const HeapSize & heap,const HeapThreshold & threshold,JS::GCReason reason)1536 void js::gc::MaybeMallocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc,
1537                                       const HeapSize& heap,
1538                                       const HeapThreshold& threshold,
1539                                       JS::GCReason reason) {
1540   rt->gc.maybeTriggerGCAfterMalloc(Zone::from(zoneAlloc), heap, threshold,
1541                                    reason);
1542 }
1543 
maybeTriggerGCAfterMalloc(Zone * zone)1544 void GCRuntime::maybeTriggerGCAfterMalloc(Zone* zone) {
1545   if (maybeTriggerGCAfterMalloc(zone, zone->mallocHeapSize,
1546                                 zone->mallocHeapThreshold,
1547                                 JS::GCReason::TOO_MUCH_MALLOC)) {
1548     return;
1549   }
1550 
1551   maybeTriggerGCAfterMalloc(zone, zone->jitHeapSize, zone->jitHeapThreshold,
1552                             JS::GCReason::TOO_MUCH_JIT_CODE);
1553 }
1554 
maybeTriggerGCAfterMalloc(Zone * zone,const HeapSize & heap,const HeapThreshold & threshold,JS::GCReason reason)1555 bool GCRuntime::maybeTriggerGCAfterMalloc(Zone* zone, const HeapSize& heap,
1556                                           const HeapThreshold& threshold,
1557                                           JS::GCReason reason) {
1558   // Ignore malloc during sweeping, for example when we resize hash tables.
1559   if (!CurrentThreadCanAccessRuntime(rt)) {
1560     MOZ_ASSERT(JS::RuntimeHeapIsBusy());
1561     return false;
1562   }
1563 
1564   if (rt->heapState() != JS::HeapState::Idle) {
1565     return false;
1566   }
1567 
1568   TriggerResult trigger = checkHeapThreshold(zone, heap, threshold);
1569   if (!trigger.shouldTrigger) {
1570     return false;
1571   }
1572 
1573   // Trigger a zone GC. budgetIncrementalGC() will work out whether to do an
1574   // incremental or non-incremental collection.
1575   triggerZoneGC(zone, reason, trigger.usedBytes, trigger.thresholdBytes);
1576   return true;
1577 }
1578 
checkHeapThreshold(Zone * zone,const HeapSize & heapSize,const HeapThreshold & heapThreshold)1579 TriggerResult GCRuntime::checkHeapThreshold(
1580     Zone* zone, const HeapSize& heapSize, const HeapThreshold& heapThreshold) {
1581   MOZ_ASSERT_IF(heapThreshold.hasSliceThreshold(), zone->wasGCStarted());
1582 
1583   size_t usedBytes = heapSize.bytes();
1584   size_t thresholdBytes = heapThreshold.hasSliceThreshold()
1585                               ? heapThreshold.sliceBytes()
1586                               : heapThreshold.startBytes();
1587 
1588   // The incremental limit will be checked if we trigger a GC slice.
1589   MOZ_ASSERT(thresholdBytes <= heapThreshold.incrementalLimitBytes());
1590 
1591   return TriggerResult{usedBytes >= thresholdBytes, usedBytes, thresholdBytes};
1592 }
1593 
triggerZoneGC(Zone * zone,JS::GCReason reason,size_t used,size_t threshold)1594 bool GCRuntime::triggerZoneGC(Zone* zone, JS::GCReason reason, size_t used,
1595                               size_t threshold) {
1596   MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
1597 
1598   /* GC is already running. */
1599   if (JS::RuntimeHeapIsBusy()) {
1600     return false;
1601   }
1602 
1603 #ifdef JS_GC_ZEAL
1604   if (hasZealMode(ZealMode::Alloc)) {
1605     MOZ_RELEASE_ASSERT(triggerGC(reason));
1606     return true;
1607   }
1608 #endif
1609 
1610   if (zone->isAtomsZone()) {
1611     stats().recordTrigger(used, threshold);
1612     MOZ_RELEASE_ASSERT(triggerGC(reason));
1613     return true;
1614   }
1615 
1616   stats().recordTrigger(used, threshold);
1617   zone->scheduleGC();
1618   requestMajorGC(reason);
1619   return true;
1620 }
1621 
maybeGC()1622 void GCRuntime::maybeGC() {
1623   MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
1624 
1625 #ifdef JS_GC_ZEAL
1626   if (hasZealMode(ZealMode::Alloc) || hasZealMode(ZealMode::RootsChange)) {
1627     JS::PrepareForFullGC(rt->mainContextFromOwnThread());
1628     gc(JS::GCOptions::Normal, JS::GCReason::DEBUG_GC);
1629     return;
1630   }
1631 #endif
1632 
1633   if (gcIfRequested()) {
1634     return;
1635   }
1636 
1637   if (isIncrementalGCInProgress()) {
1638     return;
1639   }
1640 
1641   bool scheduledZones = false;
1642   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
1643     if (checkEagerAllocTrigger(zone->gcHeapSize, zone->gcHeapThreshold) ||
1644         checkEagerAllocTrigger(zone->mallocHeapSize,
1645                                zone->mallocHeapThreshold)) {
1646       zone->scheduleGC();
1647       scheduledZones = true;
1648     }
1649   }
1650 
1651   if (scheduledZones) {
1652     SliceBudget budget = defaultBudget(JS::GCReason::EAGER_ALLOC_TRIGGER, 0);
1653     startGC(JS::GCOptions::Normal, JS::GCReason::EAGER_ALLOC_TRIGGER, budget);
1654   }
1655 }
1656 
checkEagerAllocTrigger(const HeapSize & size,const HeapThreshold & threshold)1657 bool GCRuntime::checkEagerAllocTrigger(const HeapSize& size,
1658                                        const HeapThreshold& threshold) {
1659   double thresholdBytes =
1660       threshold.eagerAllocTrigger(schedulingState.inHighFrequencyGCMode());
1661   double usedBytes = size.bytes();
1662   if (usedBytes <= 1024 * 1024 || usedBytes < thresholdBytes) {
1663     return false;
1664   }
1665 
1666   stats().recordTrigger(usedBytes, thresholdBytes);
1667   return true;
1668 }
1669 
shouldDecommit() const1670 bool GCRuntime::shouldDecommit() const {
1671   // If we're doing a shrinking GC we always decommit to release as much memory
1672   // as possible.
1673   if (cleanUpEverything) {
1674     return true;
1675   }
1676 
1677   // If we are allocating heavily enough to trigger "high frequency" GC then
1678   // skip decommit so that we do not compete with the mutator.
1679   return !schedulingState.inHighFrequencyGCMode();
1680 }
1681 
startDecommit()1682 void GCRuntime::startDecommit() {
1683   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::DECOMMIT);
1684 
1685 #ifdef DEBUG
1686   MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
1687   MOZ_ASSERT(decommitTask.isIdle());
1688 
1689   {
1690     AutoLockGC lock(this);
1691     MOZ_ASSERT(fullChunks(lock).verify());
1692     MOZ_ASSERT(availableChunks(lock).verify());
1693     MOZ_ASSERT(emptyChunks(lock).verify());
1694 
1695     // Verify that all entries in the empty chunks pool are unused.
1696     for (ChunkPool::Iter chunk(emptyChunks(lock)); !chunk.done();
1697          chunk.next()) {
1698       MOZ_ASSERT(chunk->unused());
1699     }
1700   }
1701 #endif
1702 
1703   if (!shouldDecommit()) {
1704     return;
1705   }
1706 
1707   {
1708     AutoLockGC lock(this);
1709     if (availableChunks(lock).empty() && !tooManyEmptyChunks(lock) &&
1710         emptyChunks(lock).empty()) {
1711       return;  // Nothing to do.
1712     }
1713   }
1714 
1715 #ifdef DEBUG
1716   {
1717     AutoLockHelperThreadState lock;
1718     MOZ_ASSERT(!requestSliceAfterBackgroundTask);
1719   }
1720 #endif
1721 
1722   if (sweepOnBackgroundThread) {
1723     decommitTask.start();
1724     return;
1725   }
1726 
1727   decommitTask.runFromMainThread();
1728 }
1729 
BackgroundDecommitTask(GCRuntime * gc)1730 BackgroundDecommitTask::BackgroundDecommitTask(GCRuntime* gc)
1731     : GCParallelTask(gc, gcstats::PhaseKind::DECOMMIT) {}
1732 
run(AutoLockHelperThreadState & lock)1733 void js::gc::BackgroundDecommitTask::run(AutoLockHelperThreadState& lock) {
1734   {
1735     AutoUnlockHelperThreadState unlock(lock);
1736 
1737     ChunkPool emptyChunksToFree;
1738     {
1739       AutoLockGC gcLock(gc);
1740       emptyChunksToFree = gc->expireEmptyChunkPool(gcLock);
1741     }
1742 
1743     FreeChunkPool(emptyChunksToFree);
1744 
1745     {
1746       AutoLockGC gcLock(gc);
1747 
1748       // To help minimize the total number of chunks needed over time, sort the
1749       // available chunks list so that we allocate into more-used chunks first.
1750       gc->availableChunks(gcLock).sort();
1751 
1752       if (DecommitEnabled()) {
1753         gc->decommitEmptyChunks(cancel_, gcLock);
1754         gc->decommitFreeArenas(cancel_, gcLock);
1755       }
1756     }
1757   }
1758 
1759   gc->maybeRequestGCAfterBackgroundTask(lock);
1760 }
1761 
CanDecommitWholeChunk(TenuredChunk * chunk)1762 static inline bool CanDecommitWholeChunk(TenuredChunk* chunk) {
1763   return chunk->unused() && chunk->info.numArenasFreeCommitted != 0;
1764 }
1765 
1766 // Called from a background thread to decommit free arenas. Releases the GC
1767 // lock.
decommitEmptyChunks(const bool & cancel,AutoLockGC & lock)1768 void GCRuntime::decommitEmptyChunks(const bool& cancel, AutoLockGC& lock) {
1769   Vector<TenuredChunk*, 0, SystemAllocPolicy> chunksToDecommit;
1770   for (ChunkPool::Iter chunk(emptyChunks(lock)); !chunk.done(); chunk.next()) {
1771     if (CanDecommitWholeChunk(chunk) && !chunksToDecommit.append(chunk)) {
1772       onOutOfMallocMemory(lock);
1773       return;
1774     }
1775   }
1776 
1777   for (TenuredChunk* chunk : chunksToDecommit) {
1778     if (cancel) {
1779       break;
1780     }
1781 
1782     // Check whether something used the chunk while lock was released.
1783     if (!CanDecommitWholeChunk(chunk)) {
1784       continue;
1785     }
1786 
1787     // Temporarily remove the chunk while decommitting its memory so that the
1788     // mutator doesn't start allocating from it when we drop the lock.
1789     emptyChunks(lock).remove(chunk);
1790 
1791     {
1792       AutoUnlockGC unlock(lock);
1793       chunk->decommitAllArenas();
1794       MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
1795     }
1796 
1797     emptyChunks(lock).push(chunk);
1798   }
1799 }
1800 
1801 // Called from a background thread to decommit free arenas. Releases the GC
1802 // lock.
decommitFreeArenas(const bool & cancel,AutoLockGC & lock)1803 void GCRuntime::decommitFreeArenas(const bool& cancel, AutoLockGC& lock) {
1804   MOZ_ASSERT(DecommitEnabled());
1805 
1806   // Since we release the GC lock while doing the decommit syscall below,
1807   // it is dangerous to iterate the available list directly, as the active
1808   // thread could modify it concurrently. Instead, we build and pass an
1809   // explicit Vector containing the Chunks we want to visit.
1810   Vector<TenuredChunk*, 0, SystemAllocPolicy> chunksToDecommit;
1811   for (ChunkPool::Iter chunk(availableChunks(lock)); !chunk.done();
1812        chunk.next()) {
1813     if (chunk->info.numArenasFreeCommitted != 0 &&
1814         !chunksToDecommit.append(chunk)) {
1815       onOutOfMallocMemory(lock);
1816       return;
1817     }
1818   }
1819 
1820   for (TenuredChunk* chunk : chunksToDecommit) {
1821     chunk->decommitFreeArenas(this, cancel, lock);
1822   }
1823 }
1824 
1825 // Do all possible decommit immediately from the current thread without
1826 // releasing the GC lock or allocating any memory.
decommitFreeArenasWithoutUnlocking(const AutoLockGC & lock)1827 void GCRuntime::decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock) {
1828   MOZ_ASSERT(DecommitEnabled());
1829   for (ChunkPool::Iter chunk(availableChunks(lock)); !chunk.done();
1830        chunk.next()) {
1831     chunk->decommitFreeArenasWithoutUnlocking(lock);
1832   }
1833   MOZ_ASSERT(availableChunks(lock).verify());
1834 }
1835 
maybeRequestGCAfterBackgroundTask(const AutoLockHelperThreadState & lock)1836 void GCRuntime::maybeRequestGCAfterBackgroundTask(
1837     const AutoLockHelperThreadState& lock) {
1838   if (requestSliceAfterBackgroundTask) {
1839     // Trigger a slice so the main thread can continue the collection
1840     // immediately.
1841     requestSliceAfterBackgroundTask = false;
1842     requestMajorGC(JS::GCReason::BG_TASK_FINISHED);
1843   }
1844 }
1845 
cancelRequestedGCAfterBackgroundTask()1846 void GCRuntime::cancelRequestedGCAfterBackgroundTask() {
1847   MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
1848 
1849 #ifdef DEBUG
1850   {
1851     AutoLockHelperThreadState lock;
1852     MOZ_ASSERT(!requestSliceAfterBackgroundTask);
1853   }
1854 #endif
1855 
1856   majorGCTriggerReason.compareExchange(JS::GCReason::BG_TASK_FINISHED,
1857                                        JS::GCReason::NO_REASON);
1858 }
1859 
isWaitingOnBackgroundTask() const1860 bool GCRuntime::isWaitingOnBackgroundTask() const {
1861   AutoLockHelperThreadState lock;
1862   return requestSliceAfterBackgroundTask;
1863 }
1864 
queueUnusedLifoBlocksForFree(LifoAlloc * lifo)1865 void GCRuntime::queueUnusedLifoBlocksForFree(LifoAlloc* lifo) {
1866   MOZ_ASSERT(JS::RuntimeHeapIsBusy());
1867   AutoLockHelperThreadState lock;
1868   lifoBlocksToFree.ref().transferUnusedFrom(lifo);
1869 }
1870 
queueAllLifoBlocksForFree(LifoAlloc * lifo)1871 void GCRuntime::queueAllLifoBlocksForFree(LifoAlloc* lifo) {
1872   MOZ_ASSERT(JS::RuntimeHeapIsBusy());
1873   AutoLockHelperThreadState lock;
1874   lifoBlocksToFree.ref().transferFrom(lifo);
1875 }
1876 
queueAllLifoBlocksForFreeAfterMinorGC(LifoAlloc * lifo)1877 void GCRuntime::queueAllLifoBlocksForFreeAfterMinorGC(LifoAlloc* lifo) {
1878   lifoBlocksToFreeAfterMinorGC.ref().transferFrom(lifo);
1879 }
1880 
queueBuffersForFreeAfterMinorGC(Nursery::BufferSet & buffers)1881 void GCRuntime::queueBuffersForFreeAfterMinorGC(Nursery::BufferSet& buffers) {
1882   AutoLockHelperThreadState lock;
1883 
1884   if (!buffersToFreeAfterMinorGC.ref().empty()) {
1885     // In the rare case that this hasn't processed the buffers from a previous
1886     // minor GC we have to wait here.
1887     MOZ_ASSERT(!freeTask.isIdle(lock));
1888     freeTask.joinWithLockHeld(lock);
1889   }
1890 
1891   MOZ_ASSERT(buffersToFreeAfterMinorGC.ref().empty());
1892   std::swap(buffersToFreeAfterMinorGC.ref(), buffers);
1893 }
1894 
destroy(JSFreeOp * fop)1895 void Realm::destroy(JSFreeOp* fop) {
1896   JSRuntime* rt = fop->runtime();
1897   if (auto callback = rt->destroyRealmCallback) {
1898     callback(fop, this);
1899   }
1900   if (principals()) {
1901     JS_DropPrincipals(rt->mainContextFromOwnThread(), principals());
1902   }
1903   // Bug 1560019: Malloc memory associated with a zone but not with a specific
1904   // GC thing is not currently tracked.
1905   fop->deleteUntracked(this);
1906 }
1907 
destroy(JSFreeOp * fop)1908 void Compartment::destroy(JSFreeOp* fop) {
1909   JSRuntime* rt = fop->runtime();
1910   if (auto callback = rt->destroyCompartmentCallback) {
1911     callback(fop, this);
1912   }
1913   // Bug 1560019: Malloc memory associated with a zone but not with a specific
1914   // GC thing is not currently tracked.
1915   fop->deleteUntracked(this);
1916   rt->gc.stats().sweptCompartment();
1917 }
1918 
destroy(JSFreeOp * fop)1919 void Zone::destroy(JSFreeOp* fop) {
1920   MOZ_ASSERT(compartments().empty());
1921   JSRuntime* rt = fop->runtime();
1922   if (auto callback = rt->destroyZoneCallback) {
1923     callback(fop, this);
1924   }
1925   // Bug 1560019: Malloc memory associated with a zone but not with a specific
1926   // GC thing is not currently tracked.
1927   fop->deleteUntracked(this);
1928   fop->runtime()->gc.stats().sweptZone();
1929 }
1930 
1931 /*
1932  * It's simpler if we preserve the invariant that every zone (except the atoms
1933  * zone) has at least one compartment, and every compartment has at least one
1934  * realm. If we know we're deleting the entire zone, then sweepCompartments is
1935  * allowed to delete all compartments. In this case, |keepAtleastOne| is false.
1936  * If any cells remain alive in the zone, set |keepAtleastOne| true to prohibit
1937  * sweepCompartments from deleting every compartment. Instead, it preserves an
1938  * arbitrary compartment in the zone.
1939  */
sweepCompartments(JSFreeOp * fop,bool keepAtleastOne,bool destroyingRuntime)1940 void Zone::sweepCompartments(JSFreeOp* fop, bool keepAtleastOne,
1941                              bool destroyingRuntime) {
1942   MOZ_ASSERT(!compartments().empty());
1943   MOZ_ASSERT_IF(destroyingRuntime, !keepAtleastOne);
1944 
1945   Compartment** read = compartments().begin();
1946   Compartment** end = compartments().end();
1947   Compartment** write = read;
1948   while (read < end) {
1949     Compartment* comp = *read++;
1950 
1951     /*
1952      * Don't delete the last compartment and realm if keepAtleastOne is
1953      * still true, meaning all the other compartments were deleted.
1954      */
1955     bool keepAtleastOneRealm = read == end && keepAtleastOne;
1956     comp->sweepRealms(fop, keepAtleastOneRealm, destroyingRuntime);
1957 
1958     if (!comp->realms().empty()) {
1959       *write++ = comp;
1960       keepAtleastOne = false;
1961     } else {
1962       comp->destroy(fop);
1963     }
1964   }
1965   compartments().shrinkTo(write - compartments().begin());
1966   MOZ_ASSERT_IF(keepAtleastOne, !compartments().empty());
1967   MOZ_ASSERT_IF(destroyingRuntime, compartments().empty());
1968 }
1969 
sweepRealms(JSFreeOp * fop,bool keepAtleastOne,bool destroyingRuntime)1970 void Compartment::sweepRealms(JSFreeOp* fop, bool keepAtleastOne,
1971                               bool destroyingRuntime) {
1972   MOZ_ASSERT(!realms().empty());
1973   MOZ_ASSERT_IF(destroyingRuntime, !keepAtleastOne);
1974 
1975   Realm** read = realms().begin();
1976   Realm** end = realms().end();
1977   Realm** write = read;
1978   while (read < end) {
1979     Realm* realm = *read++;
1980 
1981     /*
1982      * Don't delete the last realm if keepAtleastOne is still true, meaning
1983      * all the other realms were deleted.
1984      */
1985     bool dontDelete = read == end && keepAtleastOne;
1986     if ((realm->marked() || dontDelete) && !destroyingRuntime) {
1987       *write++ = realm;
1988       keepAtleastOne = false;
1989     } else {
1990       realm->destroy(fop);
1991     }
1992   }
1993   realms().shrinkTo(write - realms().begin());
1994   MOZ_ASSERT_IF(keepAtleastOne, !realms().empty());
1995   MOZ_ASSERT_IF(destroyingRuntime, realms().empty());
1996 }
1997 
sweepZones(JSFreeOp * fop,bool destroyingRuntime)1998 void GCRuntime::sweepZones(JSFreeOp* fop, bool destroyingRuntime) {
1999   MOZ_ASSERT_IF(destroyingRuntime, numActiveZoneIters == 0);
2000 
2001   if (numActiveZoneIters) {
2002     return;
2003   }
2004 
2005   assertBackgroundSweepingFinished();
2006 
2007   Zone** read = zones().begin();
2008   Zone** end = zones().end();
2009   Zone** write = read;
2010 
2011   while (read < end) {
2012     Zone* zone = *read++;
2013 
2014     if (zone->wasGCStarted()) {
2015       MOZ_ASSERT(!zone->isQueuedForBackgroundSweep());
2016       const bool zoneIsDead =
2017           zone->arenas.arenaListsAreEmpty() && !zone->hasMarkedRealms();
2018       MOZ_ASSERT_IF(destroyingRuntime, zoneIsDead);
2019       if (zoneIsDead) {
2020         AutoSetThreadIsSweeping threadIsSweeping(zone);
2021         zone->arenas.checkEmptyFreeLists();
2022         zone->sweepCompartments(fop, false, destroyingRuntime);
2023         MOZ_ASSERT(zone->compartments().empty());
2024         MOZ_ASSERT(zone->rttValueObjects().empty());
2025         zone->destroy(fop);
2026         continue;
2027       }
2028       zone->sweepCompartments(fop, true, destroyingRuntime);
2029     }
2030     *write++ = zone;
2031   }
2032   zones().shrinkTo(write - zones().begin());
2033 }
2034 
checkEmptyArenaList(AllocKind kind)2035 void ArenaLists::checkEmptyArenaList(AllocKind kind) {
2036   MOZ_ASSERT(arenaList(kind).isEmpty());
2037 }
2038 
purgeRuntimeForMinorGC()2039 void GCRuntime::purgeRuntimeForMinorGC() {
2040   // If external strings become nursery allocable, remember to call
2041   // zone->externalStringCache().purge() (and delete this assert.)
2042   MOZ_ASSERT(!IsNurseryAllocable(AllocKind::EXTERNAL_STRING));
2043 
2044   for (ZonesIter zone(this, SkipAtoms); !zone.done(); zone.next()) {
2045     zone->functionToStringCache().purge();
2046   }
2047 }
2048 
purgeRuntime()2049 void GCRuntime::purgeRuntime() {
2050   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::PURGE);
2051 
2052   for (GCRealmsIter realm(rt); !realm.done(); realm.next()) {
2053     realm->purge();
2054   }
2055 
2056   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
2057     zone->purgeAtomCache();
2058     zone->externalStringCache().purge();
2059     zone->functionToStringCache().purge();
2060     zone->shapeZone().purgeShapeCaches(rt->defaultFreeOp());
2061   }
2062 
2063   JSContext* cx = rt->mainContextFromOwnThread();
2064   queueUnusedLifoBlocksForFree(&cx->tempLifoAlloc());
2065   cx->interpreterStack().purge(rt);
2066   cx->frontendCollectionPool().purge();
2067 
2068   rt->caches().purge();
2069 
2070   if (auto cache = rt->maybeThisRuntimeSharedImmutableStrings()) {
2071     cache->purge();
2072   }
2073 
2074   MOZ_ASSERT(unmarkGrayStack.empty());
2075   unmarkGrayStack.clearAndFree();
2076 
2077   // If we're the main runtime, tell helper threads to free their unused
2078   // memory when they are next idle.
2079   if (!rt->parentRuntime) {
2080     HelperThreadState().triggerFreeUnusedMemory();
2081   }
2082 }
2083 
shouldPreserveJITCode(Realm * realm,const TimeStamp & currentTime,JS::GCReason reason,bool canAllocateMoreCode,bool isActiveCompartment)2084 bool GCRuntime::shouldPreserveJITCode(Realm* realm,
2085                                       const TimeStamp& currentTime,
2086                                       JS::GCReason reason,
2087                                       bool canAllocateMoreCode,
2088                                       bool isActiveCompartment) {
2089   if (cleanUpEverything) {
2090     return false;
2091   }
2092   if (!canAllocateMoreCode) {
2093     return false;
2094   }
2095 
2096   if (isActiveCompartment) {
2097     return true;
2098   }
2099   if (alwaysPreserveCode) {
2100     return true;
2101   }
2102   if (realm->preserveJitCode()) {
2103     return true;
2104   }
2105   if (IsCurrentlyAnimating(realm->lastAnimationTime, currentTime) &&
2106       DiscardedCodeRecently(realm->zone(), currentTime)) {
2107     return true;
2108   }
2109   if (reason == JS::GCReason::DEBUG_GC) {
2110     return true;
2111   }
2112 
2113   return false;
2114 }
2115 
2116 #ifdef DEBUG
2117 class CompartmentCheckTracer final : public JS::CallbackTracer {
2118   void onChild(JS::GCCellPtr thing) override;
2119   bool edgeIsInCrossCompartmentMap(JS::GCCellPtr dst);
2120 
2121  public:
CompartmentCheckTracer(JSRuntime * rt)2122   explicit CompartmentCheckTracer(JSRuntime* rt)
2123       : JS::CallbackTracer(rt, JS::TracerKind::Callback,
2124                            JS::WeakEdgeTraceAction::Skip),
2125         src(nullptr),
2126         zone(nullptr),
2127         compartment(nullptr) {}
2128 
2129   Cell* src;
2130   JS::TraceKind srcKind;
2131   Zone* zone;
2132   Compartment* compartment;
2133 };
2134 
InCrossCompartmentMap(JSRuntime * rt,JSObject * src,JS::GCCellPtr dst)2135 static bool InCrossCompartmentMap(JSRuntime* rt, JSObject* src,
2136                                   JS::GCCellPtr dst) {
2137   // Cross compartment edges are either in the cross compartment map or in a
2138   // debugger weakmap.
2139 
2140   Compartment* srccomp = src->compartment();
2141 
2142   if (dst.is<JSObject>()) {
2143     if (ObjectWrapperMap::Ptr p = srccomp->lookupWrapper(&dst.as<JSObject>())) {
2144       if (*p->value().unsafeGet() == src) {
2145         return true;
2146       }
2147     }
2148   }
2149 
2150   if (DebugAPI::edgeIsInDebuggerWeakmap(rt, src, dst)) {
2151     return true;
2152   }
2153 
2154   return false;
2155 }
2156 
onChild(JS::GCCellPtr thing)2157 void CompartmentCheckTracer::onChild(JS::GCCellPtr thing) {
2158   Compartment* comp =
2159       MapGCThingTyped(thing, [](auto t) { return t->maybeCompartment(); });
2160   if (comp && compartment) {
2161     if (!runtime()->mainContextFromOwnThread()->disableCompartmentCheckTracer) {
2162       MOZ_ASSERT(comp == compartment || edgeIsInCrossCompartmentMap(thing));
2163     }
2164   } else {
2165     TenuredCell* tenured = &thing.asCell()->asTenured();
2166     Zone* thingZone = tenured->zoneFromAnyThread();
2167     MOZ_ASSERT(thingZone == zone || thingZone->isAtomsZone());
2168   }
2169 }
2170 
edgeIsInCrossCompartmentMap(JS::GCCellPtr dst)2171 bool CompartmentCheckTracer::edgeIsInCrossCompartmentMap(JS::GCCellPtr dst) {
2172   return srcKind == JS::TraceKind::Object &&
2173          InCrossCompartmentMap(runtime(), static_cast<JSObject*>(src), dst);
2174 }
2175 
checkForCompartmentMismatches()2176 void GCRuntime::checkForCompartmentMismatches() {
2177   JSContext* cx = rt->mainContextFromOwnThread();
2178   if (cx->disableStrictProxyCheckingCount) {
2179     return;
2180   }
2181 
2182   CompartmentCheckTracer trc(rt);
2183   AutoAssertEmptyNursery empty(cx);
2184   for (ZonesIter zone(this, SkipAtoms); !zone.done(); zone.next()) {
2185     trc.zone = zone;
2186     for (auto thingKind : AllAllocKinds()) {
2187       for (auto i = zone->cellIterUnsafe<TenuredCell>(thingKind, empty);
2188            !i.done(); i.next()) {
2189         trc.src = i.getCell();
2190         trc.srcKind = MapAllocToTraceKind(thingKind);
2191         trc.compartment = MapGCThingTyped(
2192             trc.src, trc.srcKind, [](auto t) { return t->maybeCompartment(); });
2193         JS::TraceChildren(&trc, JS::GCCellPtr(trc.src, trc.srcKind));
2194       }
2195     }
2196   }
2197 }
2198 #endif
2199 
ShouldCleanUpEverything(JS::GCOptions options)2200 static bool ShouldCleanUpEverything(JS::GCOptions options) {
2201   // During shutdown, we must clean everything up, for the sake of leak
2202   // detection. When a runtime has no contexts, or we're doing a GC before a
2203   // shutdown CC, those are strong indications that we're shutting down.
2204   return options == JS::GCOptions::Shutdown || options == JS::GCOptions::Shrink;
2205 }
2206 
ShouldSweepOnBackgroundThread(JS::GCReason reason)2207 static bool ShouldSweepOnBackgroundThread(JS::GCReason reason) {
2208   return reason != JS::GCReason::DESTROY_RUNTIME && CanUseExtraThreads();
2209 }
2210 
startCollection(JS::GCReason reason)2211 void GCRuntime::startCollection(JS::GCReason reason) {
2212   checkGCStateNotInUse();
2213   MOZ_ASSERT_IF(
2214       isShuttingDown(),
2215       isShutdownGC() ||
2216           reason == JS::GCReason::XPCONNECT_SHUTDOWN /* Bug 1650075 */);
2217 
2218   initialReason = reason;
2219   cleanUpEverything = ShouldCleanUpEverything(gcOptions());
2220   sweepOnBackgroundThread = ShouldSweepOnBackgroundThread(reason);
2221   isCompacting = shouldCompact();
2222   rootsRemoved = false;
2223   lastGCStartTime_ = ReallyNow();
2224 
2225 #ifdef DEBUG
2226   if (isShutdownGC()) {
2227     hadShutdownGC = true;
2228   }
2229 
2230   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
2231     zone->gcSweepGroupIndex = 0;
2232   }
2233 #endif
2234 }
2235 
RelazifyFunctions(Zone * zone,AllocKind kind)2236 static void RelazifyFunctions(Zone* zone, AllocKind kind) {
2237   MOZ_ASSERT(kind == AllocKind::FUNCTION ||
2238              kind == AllocKind::FUNCTION_EXTENDED);
2239 
2240   JSRuntime* rt = zone->runtimeFromMainThread();
2241   AutoAssertEmptyNursery empty(rt->mainContextFromOwnThread());
2242 
2243   for (auto i = zone->cellIterUnsafe<JSObject>(kind, empty); !i.done();
2244        i.next()) {
2245     JSFunction* fun = &i->as<JSFunction>();
2246     // When iterating over the GC-heap, we may encounter function objects that
2247     // are incomplete (missing a BaseScript when we expect one). We must check
2248     // for this case before we can call JSFunction::hasBytecode().
2249     if (fun->isIncomplete()) {
2250       continue;
2251     }
2252     if (fun->hasBytecode()) {
2253       fun->maybeRelazify(rt);
2254     }
2255   }
2256 }
2257 
ShouldCollectZone(Zone * zone,JS::GCReason reason)2258 static bool ShouldCollectZone(Zone* zone, JS::GCReason reason) {
2259   // If we are repeating a GC because we noticed dead compartments haven't
2260   // been collected, then only collect zones containing those compartments.
2261   if (reason == JS::GCReason::COMPARTMENT_REVIVED) {
2262     for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
2263       if (comp->gcState.scheduledForDestruction) {
2264         return true;
2265       }
2266     }
2267 
2268     return false;
2269   }
2270 
2271   // Otherwise we only collect scheduled zones.
2272   return zone->isGCScheduled();
2273 }
2274 
prepareZonesForCollection(JS::GCReason reason,bool * isFullOut)2275 bool GCRuntime::prepareZonesForCollection(JS::GCReason reason,
2276                                           bool* isFullOut) {
2277 #ifdef DEBUG
2278   /* Assert that zone state is as we expect */
2279   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
2280     MOZ_ASSERT(!zone->isCollecting());
2281     MOZ_ASSERT_IF(!zone->isAtomsZone(), !zone->compartments().empty());
2282     for (auto i : AllAllocKinds()) {
2283       MOZ_ASSERT(zone->arenas.collectingArenaList(i).isEmpty());
2284     }
2285   }
2286 #endif
2287 
2288   *isFullOut = true;
2289   bool any = false;
2290 
2291   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
2292     /* Set up which zones will be collected. */
2293     bool shouldCollect = ShouldCollectZone(zone, reason);
2294     if (shouldCollect) {
2295       any = true;
2296       zone->changeGCState(Zone::NoGC, Zone::Prepare);
2297     } else {
2298       *isFullOut = false;
2299     }
2300 
2301     zone->setWasCollected(shouldCollect);
2302   }
2303 
2304   /* Check that at least one zone is scheduled for collection. */
2305   return any;
2306 }
2307 
discardJITCodeForGC()2308 void GCRuntime::discardJITCodeForGC() {
2309   size_t nurserySiteResetCount = 0;
2310   size_t pretenuredSiteResetCount = 0;
2311 
2312   js::CancelOffThreadIonCompile(rt, JS::Zone::Prepare);
2313   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
2314     gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_DISCARD_CODE);
2315 
2316     // We may need to reset allocation sites and discard JIT code to recover if
2317     // we find object lifetimes have changed.
2318     PretenuringZone& pz = zone->pretenuring;
2319     bool resetNurserySites = pz.shouldResetNurseryAllocSites();
2320     bool resetPretenuredSites = pz.shouldResetPretenuredAllocSites();
2321 
2322     if (!zone->isPreservingCode()) {
2323       Zone::DiscardOptions options;
2324       options.discardBaselineCode = true;
2325       options.discardJitScripts = true;
2326       options.resetNurseryAllocSites = resetNurserySites;
2327       options.resetPretenuredAllocSites = resetPretenuredSites;
2328       zone->discardJitCode(rt->defaultFreeOp(), options);
2329 
2330     } else if (resetNurserySites || resetPretenuredSites) {
2331       zone->resetAllocSitesAndInvalidate(resetNurserySites,
2332                                          resetPretenuredSites);
2333     }
2334 
2335     if (resetNurserySites) {
2336       nurserySiteResetCount++;
2337     }
2338     if (resetPretenuredSites) {
2339       pretenuredSiteResetCount++;
2340     }
2341   }
2342 
2343   if (nursery().reportPretenuring()) {
2344     if (nurserySiteResetCount) {
2345       fprintf(
2346           stderr,
2347           "GC reset nursery alloc sites and invalidated code in %zu zones\n",
2348           nurserySiteResetCount);
2349     }
2350     if (pretenuredSiteResetCount) {
2351       fprintf(
2352           stderr,
2353           "GC reset pretenured alloc sites and invalidated code in %zu zones\n",
2354           pretenuredSiteResetCount);
2355     }
2356   }
2357 }
2358 
relazifyFunctionsForShrinkingGC()2359 void GCRuntime::relazifyFunctionsForShrinkingGC() {
2360   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::RELAZIFY_FUNCTIONS);
2361   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
2362     RelazifyFunctions(zone, AllocKind::FUNCTION);
2363     RelazifyFunctions(zone, AllocKind::FUNCTION_EXTENDED);
2364   }
2365 }
2366 
purgePropMapTablesForShrinkingGC()2367 void GCRuntime::purgePropMapTablesForShrinkingGC() {
2368   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::PURGE_PROP_MAP_TABLES);
2369   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
2370     if (!canRelocateZone(zone) || zone->keepPropMapTables()) {
2371       continue;
2372     }
2373 
2374     // Note: CompactPropMaps never have a table.
2375     for (auto map = zone->cellIterUnsafe<NormalPropMap>(); !map.done();
2376          map.next()) {
2377       if (map->asLinked()->hasTable()) {
2378         map->asLinked()->purgeTable(rt->defaultFreeOp());
2379       }
2380     }
2381     for (auto map = zone->cellIterUnsafe<DictionaryPropMap>(); !map.done();
2382          map.next()) {
2383       if (map->asLinked()->hasTable()) {
2384         map->asLinked()->purgeTable(rt->defaultFreeOp());
2385       }
2386     }
2387   }
2388 }
2389 
2390 // The debugger keeps track of the URLs for the sources of each realm's scripts.
2391 // These URLs are purged on shrinking GCs.
purgeSourceURLsForShrinkingGC()2392 void GCRuntime::purgeSourceURLsForShrinkingGC() {
2393   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::PURGE_SOURCE_URLS);
2394   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
2395     // URLs are not tracked for realms in the system zone.
2396     if (!canRelocateZone(zone) || zone->isSystemZone()) {
2397       continue;
2398     }
2399     for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
2400       for (RealmsInCompartmentIter realm(comp); !realm.done(); realm.next()) {
2401         GlobalObject* global = realm.get()->unsafeUnbarrieredMaybeGlobal();
2402         if (global) {
2403           global->clearSourceURLSHolder();
2404         }
2405       }
2406     }
2407   }
2408 }
2409 
unmarkWeakMaps()2410 void GCRuntime::unmarkWeakMaps() {
2411   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
2412     /* Unmark all weak maps in the zones being collected. */
2413     WeakMapBase::unmarkZone(zone);
2414   }
2415 }
2416 
beginPreparePhase(JS::GCReason reason,AutoGCSession & session)2417 bool GCRuntime::beginPreparePhase(JS::GCReason reason, AutoGCSession& session) {
2418   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::PREPARE);
2419 
2420   if (!prepareZonesForCollection(reason, &isFull.ref())) {
2421     return false;
2422   }
2423 
2424   if (reason == JS::GCReason::DESTROY_RUNTIME) {
2425     restorePermanentSharedThings();
2426   }
2427 
2428   /*
2429    * Start a parallel task to clear all mark state for the zones we are
2430    * collecting. This is linear in the size of the heap we are collecting and so
2431    * can be slow. This happens concurrently with the mutator and GC proper does
2432    * not start until this is complete.
2433    */
2434   unmarkTask.initZones();
2435   unmarkTask.start();
2436 
2437   /*
2438    * Process any queued source compressions during the start of a major
2439    * GC.
2440    *
2441    * Bug 1650075: When we start passing GCOptions::Shutdown for
2442    * GCReason::XPCONNECT_SHUTDOWN GCs we can remove the extra check.
2443    */
2444   if (!isShutdownGC() && reason != JS::GCReason::XPCONNECT_SHUTDOWN) {
2445     StartHandlingCompressionsOnGC(rt);
2446   }
2447 
2448   return true;
2449 }
2450 
BackgroundUnmarkTask(GCRuntime * gc)2451 BackgroundUnmarkTask::BackgroundUnmarkTask(GCRuntime* gc)
2452     : GCParallelTask(gc, gcstats::PhaseKind::UNMARK) {}
2453 
initZones()2454 void BackgroundUnmarkTask::initZones() {
2455   MOZ_ASSERT(isIdle());
2456   MOZ_ASSERT(zones.empty());
2457   MOZ_ASSERT(!isCancelled());
2458 
2459   // We can't safely iterate the zones vector from another thread so we copy the
2460   // zones to be collected into another vector.
2461   AutoEnterOOMUnsafeRegion oomUnsafe;
2462   for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
2463     if (!zones.append(zone.get())) {
2464       oomUnsafe.crash("BackgroundUnmarkTask::initZones");
2465     }
2466 
2467     zone->arenas.clearFreeLists();
2468     zone->arenas.moveArenasToCollectingLists();
2469   }
2470 }
2471 
run(AutoLockHelperThreadState & helperTheadLock)2472 void BackgroundUnmarkTask::run(AutoLockHelperThreadState& helperTheadLock) {
2473   AutoUnlockHelperThreadState unlock(helperTheadLock);
2474 
2475   AutoTraceLog log(TraceLoggerForCurrentThread(), TraceLogger_GCUnmarking);
2476 
2477   for (Zone* zone : zones) {
2478     for (auto kind : AllAllocKinds()) {
2479       ArenaList& arenas = zone->arenas.collectingArenaList(kind);
2480       for (ArenaListIter arena(arenas.head()); !arena.done(); arena.next()) {
2481         arena->unmarkAll();
2482         if (isCancelled()) {
2483           break;
2484         }
2485       }
2486     }
2487   }
2488 
2489   zones.clear();
2490 }
2491 
endPreparePhase(JS::GCReason reason)2492 void GCRuntime::endPreparePhase(JS::GCReason reason) {
2493   MOZ_ASSERT(unmarkTask.isIdle());
2494 
2495   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
2496     /*
2497      * In an incremental GC, clear the area free lists to ensure that subsequent
2498      * allocations refill them and end up marking new cells back. See
2499      * arenaAllocatedDuringGC().
2500      */
2501     zone->arenas.clearFreeLists();
2502 
2503     zone->markedStrings = 0;
2504     zone->finalizedStrings = 0;
2505 
2506     zone->setPreservingCode(false);
2507 
2508 #ifdef JS_GC_ZEAL
2509     if (hasZealMode(ZealMode::YieldBeforeRootMarking)) {
2510       for (auto kind : AllAllocKinds()) {
2511         for (ArenaIter arena(zone, kind); !arena.done(); arena.next()) {
2512           arena->checkNoMarkedCells();
2513         }
2514       }
2515     }
2516 #endif
2517   }
2518 
2519   // Discard JIT code more aggressively if the process is approaching its
2520   // executable code limit.
2521   bool canAllocateMoreCode = jit::CanLikelyAllocateMoreExecutableMemory();
2522   auto currentTime = ReallyNow();
2523 
2524   Compartment* activeCompartment = nullptr;
2525   jit::JitActivationIterator activation(rt->mainContextFromOwnThread());
2526   if (!activation.done()) {
2527     activeCompartment = activation->compartment();
2528   }
2529 
2530   for (CompartmentsIter c(rt); !c.done(); c.next()) {
2531     c->gcState.scheduledForDestruction = false;
2532     c->gcState.maybeAlive = false;
2533     c->gcState.hasEnteredRealm = false;
2534     bool isActiveCompartment = c == activeCompartment;
2535     for (RealmsInCompartmentIter r(c); !r.done(); r.next()) {
2536       if (r->shouldTraceGlobal() || !r->zone()->isGCScheduled()) {
2537         c->gcState.maybeAlive = true;
2538       }
2539       if (shouldPreserveJITCode(r, currentTime, reason, canAllocateMoreCode,
2540                                 isActiveCompartment)) {
2541         r->zone()->setPreservingCode(true);
2542       }
2543       if (r->hasBeenEnteredIgnoringJit()) {
2544         c->gcState.hasEnteredRealm = true;
2545       }
2546     }
2547   }
2548 
2549   /*
2550    * Perform remaining preparation work that must take place in the first true
2551    * GC slice.
2552    */
2553 
2554   {
2555     gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::PREPARE);
2556 
2557     AutoLockHelperThreadState helperLock;
2558 
2559     /* Clear mark state for WeakMaps in parallel with other work. */
2560     AutoRunParallelTask unmarkWeakMaps(this, &GCRuntime::unmarkWeakMaps,
2561                                        gcstats::PhaseKind::UNMARK_WEAKMAPS,
2562                                        helperLock);
2563 
2564     AutoUnlockHelperThreadState unlock(helperLock);
2565 
2566     // Discard JIT code. For incremental collections, the sweep phase will
2567     // also discard JIT code.
2568     discardJITCodeForGC();
2569     startBackgroundFreeAfterMinorGC();
2570 
2571     /*
2572      * Relazify functions after discarding JIT code (we can't relazify
2573      * functions with JIT code) and before the actual mark phase, so that
2574      * the current GC can collect the JSScripts we're unlinking here.  We do
2575      * this only when we're performing a shrinking GC, as too much
2576      * relazification can cause performance issues when we have to reparse
2577      * the same functions over and over.
2578      */
2579     if (isShrinkingGC()) {
2580       relazifyFunctionsForShrinkingGC();
2581       purgePropMapTablesForShrinkingGC();
2582       purgeSourceURLsForShrinkingGC();
2583     }
2584 
2585     /*
2586      * We must purge the runtime at the beginning of an incremental GC. The
2587      * danger if we purge later is that the snapshot invariant of
2588      * incremental GC will be broken, as follows. If some object is
2589      * reachable only through some cache (say the dtoaCache) then it will
2590      * not be part of the snapshot.  If we purge after root marking, then
2591      * the mutator could obtain a pointer to the object and start using
2592      * it. This object might never be marked, so a GC hazard would exist.
2593      */
2594     purgeRuntime();
2595 
2596     if (isShutdownGC()) {
2597       /* Clear any engine roots that may hold external data live. */
2598       for (GCZonesIter zone(this); !zone.done(); zone.next()) {
2599         zone->clearRootsForShutdownGC();
2600       }
2601 
2602 #ifdef DEBUG
2603       marker.markQueue.clear();
2604       marker.queuePos = 0;
2605 #endif
2606     }
2607   }
2608 
2609 #ifdef DEBUG
2610   if (fullCompartmentChecks) {
2611     checkForCompartmentMismatches();
2612   }
2613 #endif
2614 }
2615 
AutoUpdateLiveCompartments(GCRuntime * gc)2616 AutoUpdateLiveCompartments::AutoUpdateLiveCompartments(GCRuntime* gc) : gc(gc) {
2617   for (GCCompartmentsIter c(gc->rt); !c.done(); c.next()) {
2618     c->gcState.hasMarkedCells = false;
2619   }
2620 }
2621 
~AutoUpdateLiveCompartments()2622 AutoUpdateLiveCompartments::~AutoUpdateLiveCompartments() {
2623   for (GCCompartmentsIter c(gc->rt); !c.done(); c.next()) {
2624     if (c->gcState.hasMarkedCells) {
2625       c->gcState.maybeAlive = true;
2626     }
2627   }
2628 }
2629 
beginMarkPhase(AutoGCSession & session)2630 void GCRuntime::beginMarkPhase(AutoGCSession& session) {
2631   /*
2632    * Mark phase.
2633    */
2634   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK);
2635 
2636   // This is the slice we actually start collecting. The number can be used to
2637   // check whether a major GC has started so we must not increment it until we
2638   // get here.
2639   incMajorGcNumber();
2640 
2641   marker.start();
2642   marker.clearMarkCount();
2643   MOZ_ASSERT(marker.isDrained());
2644 
2645   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
2646     // Incremental marking barriers are enabled at this point.
2647     zone->changeGCState(Zone::Prepare, Zone::MarkBlackOnly);
2648 
2649     // Merge arenas allocated during the prepare phase, then move all arenas to
2650     // the collecting arena lists.
2651     zone->arenas.mergeArenasFromCollectingLists();
2652     zone->arenas.moveArenasToCollectingLists();
2653   }
2654 
2655   if (rt->isBeingDestroyed()) {
2656     checkNoRuntimeRoots(session);
2657   } else {
2658     AutoUpdateLiveCompartments updateLive(this);
2659     traceRuntimeForMajorGC(&marker, session);
2660   }
2661 
2662   updateMemoryCountersOnGCStart();
2663   stats().measureInitialHeapSize();
2664 }
2665 
findDeadCompartments()2666 void GCRuntime::findDeadCompartments() {
2667   gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::FIND_DEAD_COMPARTMENTS);
2668 
2669   /*
2670    * This code ensures that if a compartment is "dead", then it will be
2671    * collected in this GC. A compartment is considered dead if its maybeAlive
2672    * flag is false. The maybeAlive flag is set if:
2673    *
2674    *   (1) the compartment has been entered (set in beginMarkPhase() above)
2675    *   (2) the compartment's zone is not being collected (set in
2676    *       beginMarkPhase() above)
2677    *   (3) an object in the compartment was marked during root marking, either
2678    *       as a black root or a gray root. This is arranged by
2679    *       SetCompartmentHasMarkedCells and AutoUpdateLiveCompartments.
2680    *   (4) the compartment has incoming cross-compartment edges from another
2681    *       compartment that has maybeAlive set (set by this method).
2682    *
2683    * If the maybeAlive is false, then we set the scheduledForDestruction flag.
2684    * At the end of the GC, we look for compartments where
2685    * scheduledForDestruction is true. These are compartments that were somehow
2686    * "revived" during the incremental GC. If any are found, we do a special,
2687    * non-incremental GC of those compartments to try to collect them.
2688    *
2689    * Compartments can be revived for a variety of reasons. On reason is bug
2690    * 811587, where a reflector that was dead can be revived by DOM code that
2691    * still refers to the underlying DOM node.
2692    *
2693    * Read barriers and allocations can also cause revival. This might happen
2694    * during a function like JS_TransplantObject, which iterates over all
2695    * compartments, live or dead, and operates on their objects. See bug 803376
2696    * for details on this problem. To avoid the problem, we try to avoid
2697    * allocation and read barriers during JS_TransplantObject and the like.
2698    */
2699 
2700   // Propagate the maybeAlive flag via cross-compartment edges.
2701 
2702   Vector<Compartment*, 0, js::SystemAllocPolicy> workList;
2703 
2704   for (CompartmentsIter comp(rt); !comp.done(); comp.next()) {
2705     if (comp->gcState.maybeAlive) {
2706       if (!workList.append(comp)) {
2707         return;
2708       }
2709     }
2710   }
2711 
2712   while (!workList.empty()) {
2713     Compartment* comp = workList.popCopy();
2714     for (Compartment::WrappedObjectCompartmentEnum e(comp); !e.empty();
2715          e.popFront()) {
2716       Compartment* dest = e.front();
2717       if (!dest->gcState.maybeAlive) {
2718         dest->gcState.maybeAlive = true;
2719         if (!workList.append(dest)) {
2720           return;
2721         }
2722       }
2723     }
2724   }
2725 
2726   // Set scheduledForDestruction based on maybeAlive.
2727 
2728   for (GCCompartmentsIter comp(rt); !comp.done(); comp.next()) {
2729     MOZ_ASSERT(!comp->gcState.scheduledForDestruction);
2730     if (!comp->gcState.maybeAlive) {
2731       comp->gcState.scheduledForDestruction = true;
2732     }
2733   }
2734 }
2735 
updateMemoryCountersOnGCStart()2736 void GCRuntime::updateMemoryCountersOnGCStart() {
2737   heapSize.updateOnGCStart();
2738 
2739   // Update memory counters for the zones we are collecting.
2740   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
2741     zone->updateMemoryCountersOnGCStart();
2742   }
2743 }
2744 
markUntilBudgetExhausted(SliceBudget & sliceBudget,GCMarker::ShouldReportMarkTime reportTime)2745 IncrementalProgress GCRuntime::markUntilBudgetExhausted(
2746     SliceBudget& sliceBudget, GCMarker::ShouldReportMarkTime reportTime) {
2747   // Run a marking slice and return whether the stack is now empty.
2748 
2749   AutoMajorGCProfilerEntry s(this);
2750 
2751 #ifdef DEBUG
2752   AutoSetThreadIsMarking threadIsMarking;
2753 #endif  // DEBUG
2754 
2755   if (marker.processMarkQueue() == GCMarker::QueueYielded) {
2756     return NotFinished;
2757   }
2758 
2759   return marker.markUntilBudgetExhausted(sliceBudget, reportTime) ? Finished
2760                                                                   : NotFinished;
2761 }
2762 
drainMarkStack()2763 void GCRuntime::drainMarkStack() {
2764   auto unlimited = SliceBudget::unlimited();
2765   MOZ_RELEASE_ASSERT(marker.markUntilBudgetExhausted(unlimited));
2766 }
2767 
finishCollection()2768 void GCRuntime::finishCollection() {
2769   assertBackgroundSweepingFinished();
2770 
2771   MOZ_ASSERT(marker.isDrained());
2772   marker.stop();
2773 
2774   maybeStopPretenuring();
2775 
2776   {
2777     AutoLockGC lock(this);
2778     updateGCThresholdsAfterCollection(lock);
2779   }
2780 
2781   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
2782     zone->changeGCState(Zone::Finished, Zone::NoGC);
2783     zone->notifyObservingDebuggers();
2784   }
2785 
2786 #ifdef JS_GC_ZEAL
2787   clearSelectedForMarking();
2788 #endif
2789 
2790   auto currentTime = ReallyNow();
2791   schedulingState.updateHighFrequencyMode(lastGCEndTime_, currentTime,
2792                                           tunables);
2793   lastGCEndTime_ = currentTime;
2794 
2795   checkGCStateNotInUse();
2796 }
2797 
checkGCStateNotInUse()2798 void GCRuntime::checkGCStateNotInUse() {
2799 #ifdef DEBUG
2800   MOZ_ASSERT(!marker.isActive());
2801   MOZ_ASSERT(marker.isDrained());
2802   MOZ_ASSERT(!lastMarkSlice);
2803 
2804   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
2805     if (zone->wasCollected()) {
2806       zone->arenas.checkGCStateNotInUse();
2807     }
2808     MOZ_ASSERT(!zone->wasGCStarted());
2809     MOZ_ASSERT(!zone->needsIncrementalBarrier());
2810     MOZ_ASSERT(!zone->isOnList());
2811   }
2812 
2813   MOZ_ASSERT(zonesToMaybeCompact.ref().isEmpty());
2814   MOZ_ASSERT(cellsToAssertNotGray.ref().empty());
2815 
2816   AutoLockHelperThreadState lock;
2817   MOZ_ASSERT(!requestSliceAfterBackgroundTask);
2818   MOZ_ASSERT(unmarkTask.isIdle(lock));
2819   MOZ_ASSERT(markTask.isIdle(lock));
2820   MOZ_ASSERT(sweepTask.isIdle(lock));
2821   MOZ_ASSERT(decommitTask.isIdle(lock));
2822 #endif
2823 }
2824 
maybeStopPretenuring()2825 void GCRuntime::maybeStopPretenuring() {
2826   nursery().maybeStopPretenuring(this);
2827 
2828   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
2829     if (zone->allocNurseryStrings) {
2830       continue;
2831     }
2832 
2833     // Count the number of strings before the major GC.
2834     size_t numStrings = zone->markedStrings + zone->finalizedStrings;
2835     double rate = double(zone->finalizedStrings) / double(numStrings);
2836     if (rate > tunables.stopPretenureStringThreshold()) {
2837       CancelOffThreadIonCompile(zone);
2838       bool preserving = zone->isPreservingCode();
2839       zone->setPreservingCode(false);
2840       zone->discardJitCode(rt->defaultFreeOp());
2841       zone->setPreservingCode(preserving);
2842       for (RealmsInZoneIter r(zone); !r.done(); r.next()) {
2843         if (jit::JitRealm* jitRealm = r->jitRealm()) {
2844           jitRealm->discardStubs();
2845           jitRealm->setStringsCanBeInNursery(true);
2846         }
2847       }
2848 
2849       zone->markedStrings = 0;
2850       zone->finalizedStrings = 0;
2851       zone->allocNurseryStrings = true;
2852     }
2853   }
2854 }
2855 
updateGCThresholdsAfterCollection(const AutoLockGC & lock)2856 void GCRuntime::updateGCThresholdsAfterCollection(const AutoLockGC& lock) {
2857   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
2858     zone->clearGCSliceThresholds();
2859     zone->updateGCStartThresholds(*this, lock);
2860   }
2861 }
2862 
updateAllGCStartThresholds(const AutoLockGC & lock)2863 void GCRuntime::updateAllGCStartThresholds(const AutoLockGC& lock) {
2864   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
2865     zone->updateGCStartThresholds(*this, lock);
2866   }
2867 }
2868 
GCHeapStateToLabel(JS::HeapState heapState)2869 static const char* GCHeapStateToLabel(JS::HeapState heapState) {
2870   switch (heapState) {
2871     case JS::HeapState::MinorCollecting:
2872       return "js::Nursery::collect";
2873     case JS::HeapState::MajorCollecting:
2874       return "js::GCRuntime::collect";
2875     default:
2876       MOZ_CRASH("Unexpected heap state when pushing GC profiling stack frame");
2877   }
2878   MOZ_ASSERT_UNREACHABLE("Should have exhausted every JS::HeapState variant!");
2879   return nullptr;
2880 }
2881 
GCHeapStateToProfilingCategory(JS::HeapState heapState)2882 static JS::ProfilingCategoryPair GCHeapStateToProfilingCategory(
2883     JS::HeapState heapState) {
2884   return heapState == JS::HeapState::MinorCollecting
2885              ? JS::ProfilingCategoryPair::GCCC_MinorGC
2886              : JS::ProfilingCategoryPair::GCCC_MajorGC;
2887 }
2888 
2889 /* Start a new heap session. */
AutoHeapSession(GCRuntime * gc,JS::HeapState heapState)2890 AutoHeapSession::AutoHeapSession(GCRuntime* gc, JS::HeapState heapState)
2891     : gc(gc), prevState(gc->heapState_) {
2892   MOZ_ASSERT(CurrentThreadCanAccessRuntime(gc->rt));
2893   MOZ_ASSERT(prevState == JS::HeapState::Idle ||
2894              (prevState == JS::HeapState::MajorCollecting &&
2895               heapState == JS::HeapState::MinorCollecting));
2896   MOZ_ASSERT(heapState != JS::HeapState::Idle);
2897 
2898   gc->heapState_ = heapState;
2899 
2900   if (heapState == JS::HeapState::MinorCollecting ||
2901       heapState == JS::HeapState::MajorCollecting) {
2902     profilingStackFrame.emplace(gc->rt->mainContextFromOwnThread(),
2903                                 GCHeapStateToLabel(heapState),
2904                                 GCHeapStateToProfilingCategory(heapState));
2905   }
2906 }
2907 
~AutoHeapSession()2908 AutoHeapSession::~AutoHeapSession() {
2909   MOZ_ASSERT(JS::RuntimeHeapIsBusy());
2910   gc->heapState_ = prevState;
2911 }
2912 
MajorGCStateToLabel(State state)2913 static const char* MajorGCStateToLabel(State state) {
2914   switch (state) {
2915     case State::Mark:
2916       return "js::GCRuntime::markUntilBudgetExhausted";
2917     case State::Sweep:
2918       return "js::GCRuntime::performSweepActions";
2919     case State::Compact:
2920       return "js::GCRuntime::compactPhase";
2921     default:
2922       MOZ_CRASH("Unexpected heap state when pushing GC profiling stack frame");
2923   }
2924 
2925   MOZ_ASSERT_UNREACHABLE("Should have exhausted every State variant!");
2926   return nullptr;
2927 }
2928 
MajorGCStateToProfilingCategory(State state)2929 static JS::ProfilingCategoryPair MajorGCStateToProfilingCategory(State state) {
2930   switch (state) {
2931     case State::Mark:
2932       return JS::ProfilingCategoryPair::GCCC_MajorGC_Mark;
2933     case State::Sweep:
2934       return JS::ProfilingCategoryPair::GCCC_MajorGC_Sweep;
2935     case State::Compact:
2936       return JS::ProfilingCategoryPair::GCCC_MajorGC_Compact;
2937     default:
2938       MOZ_CRASH("Unexpected heap state when pushing GC profiling stack frame");
2939   }
2940 }
2941 
AutoMajorGCProfilerEntry(GCRuntime * gc)2942 AutoMajorGCProfilerEntry::AutoMajorGCProfilerEntry(GCRuntime* gc)
2943     : AutoGeckoProfilerEntry(gc->rt->mainContextFromAnyThread(),
2944                              MajorGCStateToLabel(gc->state()),
2945                              MajorGCStateToProfilingCategory(gc->state())) {
2946   MOZ_ASSERT(gc->heapState() == JS::HeapState::MajorCollecting);
2947 }
2948 
resetIncrementalGC(GCAbortReason reason)2949 GCRuntime::IncrementalResult GCRuntime::resetIncrementalGC(
2950     GCAbortReason reason) {
2951   // Drop as much work as possible from an ongoing incremental GC so
2952   // we can start a new GC after it has finished.
2953   if (incrementalState == State::NotActive) {
2954     return IncrementalResult::Ok;
2955   }
2956 
2957   AutoGCSession session(this, JS::HeapState::MajorCollecting);
2958 
2959   switch (incrementalState) {
2960     case State::NotActive:
2961     case State::MarkRoots:
2962     case State::Finish:
2963       MOZ_CRASH("Unexpected GC state in resetIncrementalGC");
2964       break;
2965 
2966     case State::Prepare:
2967       unmarkTask.cancelAndWait();
2968 
2969       for (GCZonesIter zone(this); !zone.done(); zone.next()) {
2970         zone->changeGCState(Zone::Prepare, Zone::NoGC);
2971         zone->clearGCSliceThresholds();
2972         zone->arenas.clearFreeLists();
2973         zone->arenas.mergeArenasFromCollectingLists();
2974       }
2975 
2976       incrementalState = State::NotActive;
2977       checkGCStateNotInUse();
2978       break;
2979 
2980     case State::Mark: {
2981       // Cancel any ongoing marking.
2982       marker.reset();
2983 
2984       for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
2985         resetGrayList(c);
2986       }
2987 
2988       for (GCZonesIter zone(this); !zone.done(); zone.next()) {
2989         zone->changeGCState(Zone::MarkBlackOnly, Zone::NoGC);
2990         zone->clearGCSliceThresholds();
2991         zone->arenas.unmarkPreMarkedFreeCells();
2992         zone->arenas.mergeArenasFromCollectingLists();
2993       }
2994 
2995       {
2996         AutoLockHelperThreadState lock;
2997         lifoBlocksToFree.ref().freeAll();
2998       }
2999 
3000       lastMarkSlice = false;
3001       incrementalState = State::Finish;
3002 
3003       MOZ_ASSERT(!marker.shouldCheckCompartments());
3004 
3005       break;
3006     }
3007 
3008     case State::Sweep: {
3009       // Finish sweeping the current sweep group, then abort.
3010       for (CompartmentsIter c(rt); !c.done(); c.next()) {
3011         c->gcState.scheduledForDestruction = false;
3012       }
3013 
3014       abortSweepAfterCurrentGroup = true;
3015       isCompacting = false;
3016 
3017       break;
3018     }
3019 
3020     case State::Finalize: {
3021       isCompacting = false;
3022       break;
3023     }
3024 
3025     case State::Compact: {
3026       // Skip any remaining zones that would have been compacted.
3027       MOZ_ASSERT(isCompacting);
3028       startedCompacting = true;
3029       zonesToMaybeCompact.ref().clear();
3030       break;
3031     }
3032 
3033     case State::Decommit: {
3034       break;
3035     }
3036   }
3037 
3038   stats().reset(reason);
3039 
3040   return IncrementalResult::ResetIncremental;
3041 }
3042 
AutoDisableBarriers(GCRuntime * gc)3043 AutoDisableBarriers::AutoDisableBarriers(GCRuntime* gc) : gc(gc) {
3044   /*
3045    * Clear needsIncrementalBarrier early so we don't do any write barriers
3046    * during sweeping.
3047    */
3048   for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
3049     if (zone->isGCMarking()) {
3050       MOZ_ASSERT(zone->needsIncrementalBarrier());
3051       zone->setNeedsIncrementalBarrier(false);
3052     }
3053     MOZ_ASSERT(!zone->needsIncrementalBarrier());
3054   }
3055 }
3056 
~AutoDisableBarriers()3057 AutoDisableBarriers::~AutoDisableBarriers() {
3058   for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
3059     MOZ_ASSERT(!zone->needsIncrementalBarrier());
3060     if (zone->isGCMarking()) {
3061       zone->setNeedsIncrementalBarrier(true);
3062     }
3063   }
3064 }
3065 
NeedToCollectNursery(GCRuntime * gc)3066 static bool NeedToCollectNursery(GCRuntime* gc) {
3067   return !gc->nursery().isEmpty() || !gc->storeBuffer().isEmpty();
3068 }
3069 
3070 #ifdef DEBUG
DescribeBudget(const SliceBudget & budget)3071 static const char* DescribeBudget(const SliceBudget& budget) {
3072   MOZ_ASSERT(TlsContext.get()->isMainThreadContext());
3073   constexpr size_t length = 32;
3074   static char buffer[length];
3075   budget.describe(buffer, length);
3076   return buffer;
3077 }
3078 #endif
3079 
ShouldPauseMutatorWhileWaiting(const SliceBudget & budget,JS::GCReason reason,bool budgetWasIncreased)3080 static bool ShouldPauseMutatorWhileWaiting(const SliceBudget& budget,
3081                                            JS::GCReason reason,
3082                                            bool budgetWasIncreased) {
3083   // When we're nearing the incremental limit at which we will finish the
3084   // collection synchronously, pause the main thread if there is only background
3085   // GC work happening. This allows the GC to catch up and avoid hitting the
3086   // limit.
3087   return budget.isTimeBudget() &&
3088          (reason == JS::GCReason::ALLOC_TRIGGER ||
3089           reason == JS::GCReason::TOO_MUCH_MALLOC) &&
3090          budgetWasIncreased;
3091 }
3092 
incrementalSlice(SliceBudget & budget,JS::GCReason reason,bool budgetWasIncreased)3093 void GCRuntime::incrementalSlice(SliceBudget& budget, JS::GCReason reason,
3094                                  bool budgetWasIncreased) {
3095   MOZ_ASSERT_IF(isIncrementalGCInProgress(), isIncremental);
3096 
3097   AutoSetThreadIsPerformingGC performingGC;
3098 
3099   AutoGCSession session(this, JS::HeapState::MajorCollecting);
3100 
3101   bool destroyingRuntime = (reason == JS::GCReason::DESTROY_RUNTIME);
3102 
3103   initialState = incrementalState;
3104   isIncremental = !budget.isUnlimited();
3105 
3106 #ifdef JS_GC_ZEAL
3107   // Do the incremental collection type specified by zeal mode if the collection
3108   // was triggered by runDebugGC() and incremental GC has not been cancelled by
3109   // resetIncrementalGC().
3110   useZeal = isIncremental && reason == JS::GCReason::DEBUG_GC;
3111 #endif
3112 
3113 #ifdef DEBUG
3114   stats().log("Incremental: %d, lastMarkSlice: %d, useZeal: %d, budget: %s",
3115               bool(isIncremental), bool(lastMarkSlice), bool(useZeal),
3116               DescribeBudget(budget));
3117 #endif
3118 
3119   if (useZeal && hasIncrementalTwoSliceZealMode()) {
3120     // Yields between slices occurs at predetermined points in these modes; the
3121     // budget is not used. |isIncremental| is still true.
3122     stats().log("Using unlimited budget for two-slice zeal mode");
3123     budget = SliceBudget::unlimited();
3124   }
3125 
3126   bool shouldPauseMutator =
3127       ShouldPauseMutatorWhileWaiting(budget, reason, budgetWasIncreased);
3128 
3129   switch (incrementalState) {
3130     case State::NotActive:
3131       startCollection(reason);
3132 
3133       incrementalState = State::Prepare;
3134       if (!beginPreparePhase(reason, session)) {
3135         incrementalState = State::NotActive;
3136         break;
3137       }
3138 
3139       if (useZeal && hasZealMode(ZealMode::YieldBeforeRootMarking)) {
3140         break;
3141       }
3142 
3143       [[fallthrough]];
3144 
3145     case State::Prepare:
3146       if (waitForBackgroundTask(unmarkTask, budget, shouldPauseMutator,
3147                                 DontTriggerSliceWhenFinished) == NotFinished) {
3148         break;
3149       }
3150 
3151       incrementalState = State::MarkRoots;
3152       [[fallthrough]];
3153 
3154     case State::MarkRoots:
3155       if (NeedToCollectNursery(this)) {
3156         collectNurseryFromMajorGC(reason);
3157       }
3158 
3159       endPreparePhase(reason);
3160       beginMarkPhase(session);
3161       incrementalState = State::Mark;
3162 
3163       if (useZeal && hasZealMode(ZealMode::YieldBeforeMarking) &&
3164           isIncremental) {
3165         break;
3166       }
3167 
3168       [[fallthrough]];
3169 
3170     case State::Mark:
3171       if (mightSweepInThisSlice(budget.isUnlimited())) {
3172         // Trace wrapper rooters before marking if we might start sweeping in
3173         // this slice.
3174         rt->mainContextFromOwnThread()->traceWrapperGCRooters(&marker);
3175       }
3176 
3177       {
3178         gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK);
3179         if (markUntilBudgetExhausted(budget) == NotFinished) {
3180           break;
3181         }
3182       }
3183 
3184       MOZ_ASSERT(marker.isDrained());
3185 
3186       /*
3187        * There are a number of reasons why we break out of collection here,
3188        * either ending the slice or to run a new interation of the loop in
3189        * GCRuntime::collect()
3190        */
3191 
3192       /*
3193        * In incremental GCs where we have already performed more than one
3194        * slice we yield after marking with the aim of starting the sweep in
3195        * the next slice, since the first slice of sweeping can be expensive.
3196        *
3197        * This is modified by the various zeal modes.  We don't yield in
3198        * YieldBeforeMarking mode and we always yield in YieldBeforeSweeping
3199        * mode.
3200        *
3201        * We will need to mark anything new on the stack when we resume, so
3202        * we stay in Mark state.
3203        */
3204       if (isIncremental && !lastMarkSlice) {
3205         if ((initialState == State::Mark &&
3206              !(useZeal && hasZealMode(ZealMode::YieldBeforeMarking))) ||
3207             (useZeal && hasZealMode(ZealMode::YieldBeforeSweeping))) {
3208           lastMarkSlice = true;
3209           stats().log("Yielding before starting sweeping");
3210           break;
3211         }
3212       }
3213 
3214       incrementalState = State::Sweep;
3215       lastMarkSlice = false;
3216 
3217       beginSweepPhase(reason, session);
3218 
3219       [[fallthrough]];
3220 
3221     case State::Sweep:
3222       if (storeBuffer().mayHavePointersToDeadCells()) {
3223         collectNurseryFromMajorGC(reason);
3224       }
3225 
3226       if (initialState == State::Sweep) {
3227         rt->mainContextFromOwnThread()->traceWrapperGCRooters(&marker);
3228       }
3229 
3230       if (performSweepActions(budget) == NotFinished) {
3231         break;
3232       }
3233 
3234       endSweepPhase(destroyingRuntime);
3235 
3236       incrementalState = State::Finalize;
3237 
3238       [[fallthrough]];
3239 
3240     case State::Finalize:
3241       if (waitForBackgroundTask(sweepTask, budget, shouldPauseMutator,
3242                                 TriggerSliceWhenFinished) == NotFinished) {
3243         break;
3244       }
3245 
3246       assertBackgroundSweepingFinished();
3247 
3248       {
3249         // Sweep the zones list now that background finalization is finished to
3250         // remove and free dead zones, compartments and realms.
3251         gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::SWEEP);
3252         gcstats::AutoPhase ap2(stats(), gcstats::PhaseKind::DESTROY);
3253         JSFreeOp fop(rt);
3254         sweepZones(&fop, destroyingRuntime);
3255       }
3256 
3257       MOZ_ASSERT(!startedCompacting);
3258       incrementalState = State::Compact;
3259 
3260       // Always yield before compacting since it is not incremental.
3261       if (isCompacting && !budget.isUnlimited()) {
3262         break;
3263       }
3264 
3265       [[fallthrough]];
3266 
3267     case State::Compact:
3268       if (isCompacting) {
3269         if (NeedToCollectNursery(this)) {
3270           collectNurseryFromMajorGC(reason);
3271         }
3272 
3273         storeBuffer().checkEmpty();
3274         if (!startedCompacting) {
3275           beginCompactPhase();
3276         }
3277 
3278         if (compactPhase(reason, budget, session) == NotFinished) {
3279           break;
3280         }
3281 
3282         endCompactPhase();
3283       }
3284 
3285       startDecommit();
3286       incrementalState = State::Decommit;
3287 
3288       [[fallthrough]];
3289 
3290     case State::Decommit:
3291       if (waitForBackgroundTask(decommitTask, budget, shouldPauseMutator,
3292                                 TriggerSliceWhenFinished) == NotFinished) {
3293         break;
3294       }
3295 
3296       incrementalState = State::Finish;
3297 
3298       [[fallthrough]];
3299 
3300     case State::Finish:
3301       finishCollection();
3302       incrementalState = State::NotActive;
3303       break;
3304   }
3305 
3306   MOZ_ASSERT(safeToYield);
3307   MOZ_ASSERT(marker.markColor() == MarkColor::Black);
3308 }
3309 
collectNurseryFromMajorGC(JS::GCReason reason)3310 void GCRuntime::collectNurseryFromMajorGC(JS::GCReason reason) {
3311   collectNursery(gcOptions(), reason,
3312                  gcstats::PhaseKind::EVICT_NURSERY_FOR_MAJOR_GC);
3313 }
3314 
hasForegroundWork() const3315 bool GCRuntime::hasForegroundWork() const {
3316   switch (incrementalState) {
3317     case State::NotActive:
3318       // Incremental GC is not running and no work is pending.
3319       return false;
3320     case State::Prepare:
3321       // We yield in the Prepare state after starting unmarking.
3322       return !unmarkTask.wasStarted();
3323     case State::Finalize:
3324       // We yield in the Finalize state to wait for background sweeping.
3325       return !isBackgroundSweeping();
3326     case State::Decommit:
3327       // We yield in the Decommit state to wait for background decommit.
3328       return !decommitTask.wasStarted();
3329     default:
3330       // In all other states there is still work to do.
3331       return true;
3332   }
3333 }
3334 
waitForBackgroundTask(GCParallelTask & task,const SliceBudget & budget,bool shouldPauseMutator,ShouldTriggerSliceWhenFinished triggerSlice)3335 IncrementalProgress GCRuntime::waitForBackgroundTask(
3336     GCParallelTask& task, const SliceBudget& budget, bool shouldPauseMutator,
3337     ShouldTriggerSliceWhenFinished triggerSlice) {
3338   // Wait here in non-incremental collections, or if we want to pause the
3339   // mutator to let the GC catch up.
3340   if (budget.isUnlimited() || shouldPauseMutator) {
3341     gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::WAIT_BACKGROUND_THREAD);
3342     Maybe<TimeStamp> deadline;
3343     if (budget.isTimeBudget()) {
3344       deadline.emplace(budget.deadline());
3345     }
3346     task.join(deadline);
3347   }
3348 
3349   // In incremental collections, yield if the task has not finished and
3350   // optionally request a slice to notify us when this happens.
3351   if (!budget.isUnlimited()) {
3352     AutoLockHelperThreadState lock;
3353     if (task.wasStarted(lock)) {
3354       if (triggerSlice) {
3355         requestSliceAfterBackgroundTask = true;
3356       }
3357       return NotFinished;
3358     }
3359 
3360     task.joinWithLockHeld(lock);
3361   }
3362 
3363   MOZ_ASSERT(task.isIdle());
3364 
3365   if (triggerSlice) {
3366     cancelRequestedGCAfterBackgroundTask();
3367   }
3368 
3369   return Finished;
3370 }
3371 
IsIncrementalGCUnsafe(JSRuntime * rt)3372 GCAbortReason gc::IsIncrementalGCUnsafe(JSRuntime* rt) {
3373   MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC);
3374 
3375   if (!rt->gc.isIncrementalGCAllowed()) {
3376     return GCAbortReason::IncrementalDisabled;
3377   }
3378 
3379   return GCAbortReason::None;
3380 }
3381 
checkZoneIsScheduled(Zone * zone,JS::GCReason reason,const char * trigger)3382 inline void GCRuntime::checkZoneIsScheduled(Zone* zone, JS::GCReason reason,
3383                                             const char* trigger) {
3384 #ifdef DEBUG
3385   if (zone->isGCScheduled()) {
3386     return;
3387   }
3388 
3389   fprintf(stderr,
3390           "checkZoneIsScheduled: Zone %p not scheduled as expected in %s GC "
3391           "for %s trigger\n",
3392           zone, JS::ExplainGCReason(reason), trigger);
3393   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
3394     fprintf(stderr, "  Zone %p:%s%s\n", zone.get(),
3395             zone->isAtomsZone() ? " atoms" : "",
3396             zone->isGCScheduled() ? " scheduled" : "");
3397   }
3398   fflush(stderr);
3399   MOZ_CRASH("Zone not scheduled");
3400 #endif
3401 }
3402 
budgetIncrementalGC(bool nonincrementalByAPI,JS::GCReason reason,SliceBudget & budget)3403 GCRuntime::IncrementalResult GCRuntime::budgetIncrementalGC(
3404     bool nonincrementalByAPI, JS::GCReason reason, SliceBudget& budget) {
3405   if (nonincrementalByAPI) {
3406     stats().nonincremental(GCAbortReason::NonIncrementalRequested);
3407     budget = SliceBudget::unlimited();
3408 
3409     // Reset any in progress incremental GC if this was triggered via the
3410     // API. This isn't required for correctness, but sometimes during tests
3411     // the caller expects this GC to collect certain objects, and we need
3412     // to make sure to collect everything possible.
3413     if (reason != JS::GCReason::ALLOC_TRIGGER) {
3414       return resetIncrementalGC(GCAbortReason::NonIncrementalRequested);
3415     }
3416 
3417     return IncrementalResult::Ok;
3418   }
3419 
3420   if (reason == JS::GCReason::ABORT_GC) {
3421     budget = SliceBudget::unlimited();
3422     stats().nonincremental(GCAbortReason::AbortRequested);
3423     return resetIncrementalGC(GCAbortReason::AbortRequested);
3424   }
3425 
3426   if (!budget.isUnlimited()) {
3427     GCAbortReason unsafeReason = IsIncrementalGCUnsafe(rt);
3428     if (unsafeReason == GCAbortReason::None) {
3429       if (reason == JS::GCReason::COMPARTMENT_REVIVED) {
3430         unsafeReason = GCAbortReason::CompartmentRevived;
3431       } else if (!incrementalGCEnabled) {
3432         unsafeReason = GCAbortReason::ModeChange;
3433       }
3434     }
3435 
3436     if (unsafeReason != GCAbortReason::None) {
3437       budget = SliceBudget::unlimited();
3438       stats().nonincremental(unsafeReason);
3439       return resetIncrementalGC(unsafeReason);
3440     }
3441   }
3442 
3443   GCAbortReason resetReason = GCAbortReason::None;
3444   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
3445     if (zone->gcHeapSize.bytes() >=
3446         zone->gcHeapThreshold.incrementalLimitBytes()) {
3447       checkZoneIsScheduled(zone, reason, "GC bytes");
3448       budget = SliceBudget::unlimited();
3449       stats().nonincremental(GCAbortReason::GCBytesTrigger);
3450       if (zone->wasGCStarted() && zone->gcState() > Zone::Sweep) {
3451         resetReason = GCAbortReason::GCBytesTrigger;
3452       }
3453     }
3454 
3455     if (zone->mallocHeapSize.bytes() >=
3456         zone->mallocHeapThreshold.incrementalLimitBytes()) {
3457       checkZoneIsScheduled(zone, reason, "malloc bytes");
3458       budget = SliceBudget::unlimited();
3459       stats().nonincremental(GCAbortReason::MallocBytesTrigger);
3460       if (zone->wasGCStarted() && zone->gcState() > Zone::Sweep) {
3461         resetReason = GCAbortReason::MallocBytesTrigger;
3462       }
3463     }
3464 
3465     if (zone->jitHeapSize.bytes() >=
3466         zone->jitHeapThreshold.incrementalLimitBytes()) {
3467       checkZoneIsScheduled(zone, reason, "JIT code bytes");
3468       budget = SliceBudget::unlimited();
3469       stats().nonincremental(GCAbortReason::JitCodeBytesTrigger);
3470       if (zone->wasGCStarted() && zone->gcState() > Zone::Sweep) {
3471         resetReason = GCAbortReason::JitCodeBytesTrigger;
3472       }
3473     }
3474 
3475     if (isIncrementalGCInProgress() &&
3476         zone->isGCScheduled() != zone->wasGCStarted()) {
3477       budget = SliceBudget::unlimited();
3478       resetReason = GCAbortReason::ZoneChange;
3479     }
3480   }
3481 
3482   if (resetReason != GCAbortReason::None) {
3483     return resetIncrementalGC(resetReason);
3484   }
3485 
3486   return IncrementalResult::Ok;
3487 }
3488 
maybeIncreaseSliceBudget(SliceBudget & budget)3489 bool GCRuntime::maybeIncreaseSliceBudget(SliceBudget& budget) {
3490   if (js::SupportDifferentialTesting()) {
3491     return false;
3492   }
3493 
3494   if (!budget.isTimeBudget() || !isIncrementalGCInProgress()) {
3495     return false;
3496   }
3497 
3498   bool wasIncreasedForLongCollections =
3499       maybeIncreaseSliceBudgetForLongCollections(budget);
3500   bool wasIncreasedForUgentCollections =
3501       maybeIncreaseSliceBudgetForUrgentCollections(budget);
3502 
3503   return wasIncreasedForLongCollections || wasIncreasedForUgentCollections;
3504 }
3505 
ExtendBudget(SliceBudget & budget,double newDuration)3506 static void ExtendBudget(SliceBudget& budget, double newDuration) {
3507   bool idleTriggered = budget.idle;
3508   budget = SliceBudget(TimeBudget(newDuration), nullptr);  // Uninterruptible.
3509   budget.idle = idleTriggered;
3510   budget.extended = true;
3511 }
3512 
maybeIncreaseSliceBudgetForLongCollections(SliceBudget & budget)3513 bool GCRuntime::maybeIncreaseSliceBudgetForLongCollections(
3514     SliceBudget& budget) {
3515   // For long-running collections, enforce a minimum time budget that increases
3516   // linearly with time up to a maximum.
3517 
3518   // All times are in milliseconds.
3519   struct BudgetAtTime {
3520     double time;
3521     double budget;
3522   };
3523   const BudgetAtTime MinBudgetStart{1500, 0.0};
3524   const BudgetAtTime MinBudgetEnd{2500, 100.0};
3525 
3526   double totalTime = (ReallyNow() - lastGCStartTime()).ToMilliseconds();
3527 
3528   double minBudget =
3529       LinearInterpolate(totalTime, MinBudgetStart.time, MinBudgetStart.budget,
3530                         MinBudgetEnd.time, MinBudgetEnd.budget);
3531 
3532   if (budget.timeBudget() >= minBudget) {
3533     return false;
3534   }
3535 
3536   ExtendBudget(budget, minBudget);
3537   return true;
3538 }
3539 
maybeIncreaseSliceBudgetForUrgentCollections(SliceBudget & budget)3540 bool GCRuntime::maybeIncreaseSliceBudgetForUrgentCollections(
3541     SliceBudget& budget) {
3542   // Enforce a minimum time budget based on how close we are to the incremental
3543   // limit.
3544 
3545   size_t minBytesRemaining = SIZE_MAX;
3546   for (AllZonesIter zone(this); !zone.done(); zone.next()) {
3547     if (!zone->wasGCStarted()) {
3548       continue;
3549     }
3550     size_t gcBytesRemaining =
3551         zone->gcHeapThreshold.incrementalBytesRemaining(zone->gcHeapSize);
3552     minBytesRemaining = std::min(minBytesRemaining, gcBytesRemaining);
3553     size_t mallocBytesRemaining =
3554         zone->mallocHeapThreshold.incrementalBytesRemaining(
3555             zone->mallocHeapSize);
3556     minBytesRemaining = std::min(minBytesRemaining, mallocBytesRemaining);
3557   }
3558 
3559   if (minBytesRemaining < tunables.urgentThresholdBytes() &&
3560       minBytesRemaining != 0) {
3561     // Increase budget based on the reciprocal of the fraction remaining.
3562     double fractionRemaining =
3563         double(minBytesRemaining) / double(tunables.urgentThresholdBytes());
3564     double minBudget = double(defaultSliceBudgetMS()) / fractionRemaining;
3565     if (budget.timeBudget() < minBudget) {
3566       ExtendBudget(budget, minBudget);
3567       return true;
3568     }
3569   }
3570 
3571   return false;
3572 }
3573 
ScheduleZones(GCRuntime * gc)3574 static void ScheduleZones(GCRuntime* gc) {
3575   for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) {
3576     if (!gc->isPerZoneGCEnabled()) {
3577       zone->scheduleGC();
3578     }
3579 
3580     // To avoid resets, continue to collect any zones that were being
3581     // collected in a previous slice.
3582     if (gc->isIncrementalGCInProgress() && zone->wasGCStarted()) {
3583       zone->scheduleGC();
3584     }
3585 
3586     // This is a heuristic to reduce the total number of collections.
3587     bool inHighFrequencyMode = gc->schedulingState.inHighFrequencyGCMode();
3588     if (zone->gcHeapSize.bytes() >=
3589             zone->gcHeapThreshold.eagerAllocTrigger(inHighFrequencyMode) ||
3590         zone->mallocHeapSize.bytes() >=
3591             zone->mallocHeapThreshold.eagerAllocTrigger(inHighFrequencyMode) ||
3592         zone->jitHeapSize.bytes() >= zone->jitHeapThreshold.startBytes()) {
3593       zone->scheduleGC();
3594     }
3595   }
3596 }
3597 
UnscheduleZones(GCRuntime * gc)3598 static void UnscheduleZones(GCRuntime* gc) {
3599   for (ZonesIter zone(gc->rt, WithAtoms); !zone.done(); zone.next()) {
3600     zone->unscheduleGC();
3601   }
3602 }
3603 
3604 class js::gc::AutoCallGCCallbacks {
3605   GCRuntime& gc_;
3606   JS::GCReason reason_;
3607 
3608  public:
AutoCallGCCallbacks(GCRuntime & gc,JS::GCReason reason)3609   explicit AutoCallGCCallbacks(GCRuntime& gc, JS::GCReason reason)
3610       : gc_(gc), reason_(reason) {
3611     gc_.maybeCallGCCallback(JSGC_BEGIN, reason);
3612   }
~AutoCallGCCallbacks()3613   ~AutoCallGCCallbacks() { gc_.maybeCallGCCallback(JSGC_END, reason_); }
3614 };
3615 
maybeCallGCCallback(JSGCStatus status,JS::GCReason reason)3616 void GCRuntime::maybeCallGCCallback(JSGCStatus status, JS::GCReason reason) {
3617   if (!gcCallback.ref().op) {
3618     return;
3619   }
3620 
3621   if (isIncrementalGCInProgress()) {
3622     return;
3623   }
3624 
3625   if (gcCallbackDepth == 0) {
3626     // Save scheduled zone information in case the callback clears it.
3627     for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
3628       zone->gcScheduledSaved_ = zone->gcScheduled_;
3629     }
3630   }
3631 
3632   // Save and clear GC options in case the callback reenters GC.
3633   JS::GCOptions options = gcOptions();
3634   maybeGcOptions = Nothing();
3635 
3636   gcCallbackDepth++;
3637 
3638   callGCCallback(status, reason);
3639 
3640   MOZ_ASSERT(gcCallbackDepth != 0);
3641   gcCallbackDepth--;
3642 
3643   // Restore the original GC options.
3644   maybeGcOptions = Some(options);
3645 
3646   if (gcCallbackDepth == 0) {
3647     // Ensure any zone that was originally scheduled stays scheduled.
3648     for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
3649       zone->gcScheduled_ = zone->gcScheduled_ || zone->gcScheduledSaved_;
3650     }
3651   }
3652 }
3653 
3654 /*
3655  * We disable inlining to ensure that the bottom of the stack with possible GC
3656  * roots recorded in MarkRuntime excludes any pointers we use during the marking
3657  * implementation.
3658  */
gcCycle(bool nonincrementalByAPI,const SliceBudget & budgetArg,JS::GCReason reason)3659 MOZ_NEVER_INLINE GCRuntime::IncrementalResult GCRuntime::gcCycle(
3660     bool nonincrementalByAPI, const SliceBudget& budgetArg,
3661     JS::GCReason reason) {
3662   // Assert if this is a GC unsafe region.
3663   rt->mainContextFromOwnThread()->verifyIsSafeToGC();
3664 
3665   // It's ok if threads other than the main thread have suppressGC set, as
3666   // they are operating on zones which will not be collected from here.
3667   MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC);
3668 
3669   // This reason is used internally. See below.
3670   MOZ_ASSERT(reason != JS::GCReason::RESET);
3671 
3672   // Background finalization and decommit are finished by definition before we
3673   // can start a new major GC.  Background allocation may still be running, but
3674   // that's OK because chunk pools are protected by the GC lock.
3675   if (!isIncrementalGCInProgress()) {
3676     assertBackgroundSweepingFinished();
3677     MOZ_ASSERT(decommitTask.isIdle());
3678   }
3679 
3680   // Note that GC callbacks are allowed to re-enter GC.
3681   AutoCallGCCallbacks callCallbacks(*this, reason);
3682 
3683   // Increase slice budget for long running collections before it is recorded by
3684   // AutoGCSlice.
3685   SliceBudget budget(budgetArg);
3686   bool budgetWasIncreased = maybeIncreaseSliceBudget(budget);
3687 
3688   ScheduleZones(this);
3689   gcstats::AutoGCSlice agc(stats(), scanZonesBeforeGC(), gcOptions(), budget,
3690                            reason, budgetWasIncreased);
3691 
3692   IncrementalResult result =
3693       budgetIncrementalGC(nonincrementalByAPI, reason, budget);
3694   if (result == IncrementalResult::ResetIncremental) {
3695     if (incrementalState == State::NotActive) {
3696       // The collection was reset and has finished.
3697       return result;
3698     }
3699 
3700     // The collection was reset but we must finish up some remaining work.
3701     reason = JS::GCReason::RESET;
3702   }
3703 
3704   majorGCTriggerReason = JS::GCReason::NO_REASON;
3705   MOZ_ASSERT(!stats().hasTrigger());
3706 
3707   incGcNumber();
3708   incGcSliceNumber();
3709 
3710   gcprobes::MajorGCStart();
3711   incrementalSlice(budget, reason, budgetWasIncreased);
3712   gcprobes::MajorGCEnd();
3713 
3714   MOZ_ASSERT_IF(result == IncrementalResult::ResetIncremental,
3715                 !isIncrementalGCInProgress());
3716   return result;
3717 }
3718 
mightSweepInThisSlice(bool nonIncremental)3719 inline bool GCRuntime::mightSweepInThisSlice(bool nonIncremental) {
3720   MOZ_ASSERT(incrementalState < State::Sweep);
3721   return nonIncremental || lastMarkSlice || hasIncrementalTwoSliceZealMode();
3722 }
3723 
3724 #ifdef JS_GC_ZEAL
IsDeterministicGCReason(JS::GCReason reason)3725 static bool IsDeterministicGCReason(JS::GCReason reason) {
3726   switch (reason) {
3727     case JS::GCReason::API:
3728     case JS::GCReason::DESTROY_RUNTIME:
3729     case JS::GCReason::LAST_DITCH:
3730     case JS::GCReason::TOO_MUCH_MALLOC:
3731     case JS::GCReason::TOO_MUCH_WASM_MEMORY:
3732     case JS::GCReason::TOO_MUCH_JIT_CODE:
3733     case JS::GCReason::ALLOC_TRIGGER:
3734     case JS::GCReason::DEBUG_GC:
3735     case JS::GCReason::CC_FORCED:
3736     case JS::GCReason::SHUTDOWN_CC:
3737     case JS::GCReason::ABORT_GC:
3738     case JS::GCReason::DISABLE_GENERATIONAL_GC:
3739     case JS::GCReason::FINISH_GC:
3740     case JS::GCReason::PREPARE_FOR_TRACING:
3741       return true;
3742 
3743     default:
3744       return false;
3745   }
3746 }
3747 #endif
3748 
scanZonesBeforeGC()3749 gcstats::ZoneGCStats GCRuntime::scanZonesBeforeGC() {
3750   gcstats::ZoneGCStats zoneStats;
3751   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
3752     zoneStats.zoneCount++;
3753     zoneStats.compartmentCount += zone->compartments().length();
3754     if (zone->isGCScheduled()) {
3755       zoneStats.collectedZoneCount++;
3756       zoneStats.collectedCompartmentCount += zone->compartments().length();
3757     }
3758   }
3759 
3760   return zoneStats;
3761 }
3762 
3763 // The GC can only clean up scheduledForDestruction realms that were marked live
3764 // by a barrier (e.g. by RemapWrappers from a navigation event). It is also
3765 // common to have realms held live because they are part of a cycle in gecko,
3766 // e.g. involving the HTMLDocument wrapper. In this case, we need to run the
3767 // CycleCollector in order to remove these edges before the realm can be freed.
maybeDoCycleCollection()3768 void GCRuntime::maybeDoCycleCollection() {
3769   const static float ExcessiveGrayRealms = 0.8f;
3770   const static size_t LimitGrayRealms = 200;
3771 
3772   size_t realmsTotal = 0;
3773   size_t realmsGray = 0;
3774   for (RealmsIter realm(rt); !realm.done(); realm.next()) {
3775     ++realmsTotal;
3776     GlobalObject* global = realm->unsafeUnbarrieredMaybeGlobal();
3777     if (global && global->isMarkedGray()) {
3778       ++realmsGray;
3779     }
3780   }
3781   float grayFraction = float(realmsGray) / float(realmsTotal);
3782   if (grayFraction > ExcessiveGrayRealms || realmsGray > LimitGrayRealms) {
3783     callDoCycleCollectionCallback(rt->mainContextFromOwnThread());
3784   }
3785 }
3786 
checkCanCallAPI()3787 void GCRuntime::checkCanCallAPI() {
3788   MOZ_RELEASE_ASSERT(CurrentThreadCanAccessRuntime(rt));
3789 
3790   /* If we attempt to invoke the GC while we are running in the GC, assert. */
3791   MOZ_RELEASE_ASSERT(!JS::RuntimeHeapIsBusy());
3792 }
3793 
checkIfGCAllowedInCurrentState(JS::GCReason reason)3794 bool GCRuntime::checkIfGCAllowedInCurrentState(JS::GCReason reason) {
3795   if (rt->mainContextFromOwnThread()->suppressGC) {
3796     return false;
3797   }
3798 
3799   // Only allow shutdown GCs when we're destroying the runtime. This keeps
3800   // the GC callback from triggering a nested GC and resetting global state.
3801   if (rt->isBeingDestroyed() && !isShutdownGC()) {
3802     return false;
3803   }
3804 
3805 #ifdef JS_GC_ZEAL
3806   if (deterministicOnly && !IsDeterministicGCReason(reason)) {
3807     return false;
3808   }
3809 #endif
3810 
3811   return true;
3812 }
3813 
shouldRepeatForDeadZone(JS::GCReason reason)3814 bool GCRuntime::shouldRepeatForDeadZone(JS::GCReason reason) {
3815   MOZ_ASSERT_IF(reason == JS::GCReason::COMPARTMENT_REVIVED, !isIncremental);
3816   MOZ_ASSERT(!isIncrementalGCInProgress());
3817 
3818   if (!isIncremental) {
3819     return false;
3820   }
3821 
3822   for (CompartmentsIter c(rt); !c.done(); c.next()) {
3823     if (c->gcState.scheduledForDestruction) {
3824       return true;
3825     }
3826   }
3827 
3828   return false;
3829 }
3830 
3831 struct MOZ_RAII AutoSetZoneSliceThresholds {
AutoSetZoneSliceThresholdsAutoSetZoneSliceThresholds3832   explicit AutoSetZoneSliceThresholds(GCRuntime* gc) : gc(gc) {
3833     // On entry, zones that are already collecting should have a slice threshold
3834     // set.
3835     for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) {
3836       MOZ_ASSERT(zone->wasGCStarted() ==
3837                  zone->gcHeapThreshold.hasSliceThreshold());
3838       MOZ_ASSERT(zone->wasGCStarted() ==
3839                  zone->mallocHeapThreshold.hasSliceThreshold());
3840     }
3841   }
3842 
~AutoSetZoneSliceThresholdsAutoSetZoneSliceThresholds3843   ~AutoSetZoneSliceThresholds() {
3844     // On exit, update the thresholds for all collecting zones.
3845     bool waitingOnBGTask = gc->isWaitingOnBackgroundTask();
3846     for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) {
3847       if (zone->wasGCStarted()) {
3848         zone->setGCSliceThresholds(*gc, waitingOnBGTask);
3849       } else {
3850         MOZ_ASSERT(!zone->gcHeapThreshold.hasSliceThreshold());
3851         MOZ_ASSERT(!zone->mallocHeapThreshold.hasSliceThreshold());
3852       }
3853     }
3854   }
3855 
3856   GCRuntime* gc;
3857 };
3858 
collect(bool nonincrementalByAPI,const SliceBudget & budget,JS::GCReason reason)3859 void GCRuntime::collect(bool nonincrementalByAPI, const SliceBudget& budget,
3860                         JS::GCReason reason) {
3861   mozilla::TimeStamp startTime = TimeStamp::Now();
3862   auto timer = MakeScopeExit([&] {
3863     if (Realm* realm = rt->mainContextFromOwnThread()->realm()) {
3864       realm->timers.gcTime += TimeStamp::Now() - startTime;
3865     }
3866   });
3867 
3868   auto clearGCOptions = MakeScopeExit([&] {
3869     if (!isIncrementalGCInProgress()) {
3870       maybeGcOptions = Nothing();
3871     }
3872   });
3873 
3874   MOZ_ASSERT(reason != JS::GCReason::NO_REASON);
3875 
3876   // Checks run for each request, even if we do not actually GC.
3877   checkCanCallAPI();
3878 
3879   // Check if we are allowed to GC at this time before proceeding.
3880   if (!checkIfGCAllowedInCurrentState(reason)) {
3881     return;
3882   }
3883 
3884   stats().log("GC starting in state %s", StateName(incrementalState));
3885 
3886   AutoTraceLog logGC(TraceLoggerForCurrentThread(), TraceLogger_GC);
3887   AutoStopVerifyingBarriers av(rt, isShutdownGC());
3888   AutoMaybeLeaveAtomsZone leaveAtomsZone(rt->mainContextFromOwnThread());
3889   AutoSetZoneSliceThresholds sliceThresholds(this);
3890 
3891   schedulingState.updateHighFrequencyModeForReason(reason);
3892 
3893   bool repeat;
3894   do {
3895     IncrementalResult cycleResult =
3896         gcCycle(nonincrementalByAPI, budget, reason);
3897 
3898     if (reason == JS::GCReason::ABORT_GC) {
3899       MOZ_ASSERT(!isIncrementalGCInProgress());
3900       stats().log("GC aborted by request");
3901       break;
3902     }
3903 
3904     /*
3905      * Sometimes when we finish a GC we need to immediately start a new one.
3906      * This happens in the following cases:
3907      *  - when we reset the current GC
3908      *  - when finalizers drop roots during shutdown
3909      *  - when zones that we thought were dead at the start of GC are
3910      *    not collected (see the large comment in beginMarkPhase)
3911      */
3912     repeat = false;
3913     if (!isIncrementalGCInProgress()) {
3914       if (cycleResult == ResetIncremental) {
3915         repeat = true;
3916       } else if (rootsRemoved && isShutdownGC()) {
3917         /* Need to re-schedule all zones for GC. */
3918         JS::PrepareForFullGC(rt->mainContextFromOwnThread());
3919         repeat = true;
3920         reason = JS::GCReason::ROOTS_REMOVED;
3921       } else if (shouldRepeatForDeadZone(reason)) {
3922         repeat = true;
3923         reason = JS::GCReason::COMPARTMENT_REVIVED;
3924       }
3925     }
3926   } while (repeat);
3927 
3928   if (reason == JS::GCReason::COMPARTMENT_REVIVED) {
3929     maybeDoCycleCollection();
3930   }
3931 
3932 #ifdef JS_GC_ZEAL
3933   if (hasZealMode(ZealMode::CheckHeapAfterGC)) {
3934     gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::TRACE_HEAP);
3935     CheckHeapAfterGC(rt);
3936   }
3937   if (hasZealMode(ZealMode::CheckGrayMarking) && !isIncrementalGCInProgress()) {
3938     MOZ_RELEASE_ASSERT(CheckGrayMarkingState(rt));
3939   }
3940 #endif
3941   stats().log("GC ending in state %s", StateName(incrementalState));
3942 
3943   UnscheduleZones(this);
3944 }
3945 
defaultBudget(JS::GCReason reason,int64_t millis)3946 SliceBudget GCRuntime::defaultBudget(JS::GCReason reason, int64_t millis) {
3947   // millis == 0 means use internal GC scheduling logic to come up with
3948   // a duration for the slice budget. This may end up still being zero
3949   // based on preferences.
3950   if (millis == 0) {
3951     millis = defaultSliceBudgetMS();
3952   }
3953 
3954   // If the embedding has registered a callback for creating SliceBudgets,
3955   // then use it.
3956   if (createBudgetCallback) {
3957     return createBudgetCallback(reason, millis);
3958   }
3959 
3960   // Otherwise, the preference can request an unlimited duration slice.
3961   if (millis == 0) {
3962     return SliceBudget::unlimited();
3963   }
3964 
3965   return SliceBudget(TimeBudget(millis));
3966 }
3967 
gc(JS::GCOptions options,JS::GCReason reason)3968 void GCRuntime::gc(JS::GCOptions options, JS::GCReason reason) {
3969   if (!isIncrementalGCInProgress()) {
3970     setGCOptions(options);
3971   }
3972 
3973   collect(true, SliceBudget::unlimited(), reason);
3974 }
3975 
startGC(JS::GCOptions options,JS::GCReason reason,const js::SliceBudget & budget)3976 void GCRuntime::startGC(JS::GCOptions options, JS::GCReason reason,
3977                         const js::SliceBudget& budget) {
3978   MOZ_ASSERT(!isIncrementalGCInProgress());
3979   setGCOptions(options);
3980 
3981   if (!JS::IsIncrementalGCEnabled(rt->mainContextFromOwnThread())) {
3982     collect(true, SliceBudget::unlimited(), reason);
3983     return;
3984   }
3985 
3986   collect(false, budget, reason);
3987 }
3988 
setGCOptions(JS::GCOptions options)3989 void GCRuntime::setGCOptions(JS::GCOptions options) {
3990   MOZ_ASSERT(maybeGcOptions == Nothing());
3991   maybeGcOptions = Some(options);
3992 }
3993 
gcSlice(JS::GCReason reason,const js::SliceBudget & budget)3994 void GCRuntime::gcSlice(JS::GCReason reason, const js::SliceBudget& budget) {
3995   MOZ_ASSERT(isIncrementalGCInProgress());
3996   collect(false, budget, reason);
3997 }
3998 
finishGC(JS::GCReason reason)3999 void GCRuntime::finishGC(JS::GCReason reason) {
4000   MOZ_ASSERT(isIncrementalGCInProgress());
4001 
4002   // If we're not collecting because we're out of memory then skip the
4003   // compacting phase if we need to finish an ongoing incremental GC
4004   // non-incrementally to avoid janking the browser.
4005   if (!IsOOMReason(initialReason)) {
4006     if (incrementalState == State::Compact) {
4007       abortGC();
4008       return;
4009     }
4010 
4011     isCompacting = false;
4012   }
4013 
4014   collect(false, SliceBudget::unlimited(), reason);
4015 }
4016 
abortGC()4017 void GCRuntime::abortGC() {
4018   MOZ_ASSERT(isIncrementalGCInProgress());
4019   checkCanCallAPI();
4020   MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC);
4021 
4022   collect(false, SliceBudget::unlimited(), JS::GCReason::ABORT_GC);
4023 }
4024 
ZonesSelected(GCRuntime * gc)4025 static bool ZonesSelected(GCRuntime* gc) {
4026   for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) {
4027     if (zone->isGCScheduled()) {
4028       return true;
4029     }
4030   }
4031   return false;
4032 }
4033 
startDebugGC(JS::GCOptions options,const SliceBudget & budget)4034 void GCRuntime::startDebugGC(JS::GCOptions options, const SliceBudget& budget) {
4035   MOZ_ASSERT(!isIncrementalGCInProgress());
4036   setGCOptions(options);
4037 
4038   if (!ZonesSelected(this)) {
4039     JS::PrepareForFullGC(rt->mainContextFromOwnThread());
4040   }
4041 
4042   collect(false, budget, JS::GCReason::DEBUG_GC);
4043 }
4044 
debugGCSlice(const SliceBudget & budget)4045 void GCRuntime::debugGCSlice(const SliceBudget& budget) {
4046   MOZ_ASSERT(isIncrementalGCInProgress());
4047 
4048   if (!ZonesSelected(this)) {
4049     JS::PrepareForIncrementalGC(rt->mainContextFromOwnThread());
4050   }
4051 
4052   collect(false, budget, JS::GCReason::DEBUG_GC);
4053 }
4054 
4055 /* Schedule a full GC unless a zone will already be collected. */
PrepareForDebugGC(JSRuntime * rt)4056 void js::PrepareForDebugGC(JSRuntime* rt) {
4057   if (!ZonesSelected(&rt->gc)) {
4058     JS::PrepareForFullGC(rt->mainContextFromOwnThread());
4059   }
4060 }
4061 
onOutOfMallocMemory()4062 void GCRuntime::onOutOfMallocMemory() {
4063   // Stop allocating new chunks.
4064   allocTask.cancelAndWait();
4065 
4066   // Make sure we release anything queued for release.
4067   decommitTask.join();
4068   nursery().joinDecommitTask();
4069 
4070   // Wait for background free of nursery huge slots to finish.
4071   sweepTask.join();
4072 
4073   AutoLockGC lock(this);
4074   onOutOfMallocMemory(lock);
4075 }
4076 
onOutOfMallocMemory(const AutoLockGC & lock)4077 void GCRuntime::onOutOfMallocMemory(const AutoLockGC& lock) {
4078 #ifdef DEBUG
4079   // Release any relocated arenas we may be holding on to, without releasing
4080   // the GC lock.
4081   releaseHeldRelocatedArenasWithoutUnlocking(lock);
4082 #endif
4083 
4084   // Throw away any excess chunks we have lying around.
4085   freeEmptyChunks(lock);
4086 
4087   // Immediately decommit as many arenas as possible in the hopes that this
4088   // might let the OS scrape together enough pages to satisfy the failing
4089   // malloc request.
4090   if (DecommitEnabled()) {
4091     decommitFreeArenasWithoutUnlocking(lock);
4092   }
4093 }
4094 
minorGC(JS::GCReason reason,gcstats::PhaseKind phase)4095 void GCRuntime::minorGC(JS::GCReason reason, gcstats::PhaseKind phase) {
4096   MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
4097 
4098   MOZ_ASSERT_IF(reason == JS::GCReason::EVICT_NURSERY,
4099                 !rt->mainContextFromOwnThread()->suppressGC);
4100   if (rt->mainContextFromOwnThread()->suppressGC) {
4101     return;
4102   }
4103 
4104   incGcNumber();
4105 
4106   collectNursery(JS::GCOptions::Normal, reason, phase);
4107 
4108 #ifdef JS_GC_ZEAL
4109   if (hasZealMode(ZealMode::CheckHeapAfterGC)) {
4110     gcstats::AutoPhase ap(stats(), phase);
4111     CheckHeapAfterGC(rt);
4112   }
4113 #endif
4114 
4115   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
4116     maybeTriggerGCAfterAlloc(zone);
4117     maybeTriggerGCAfterMalloc(zone);
4118   }
4119 }
4120 
collectNursery(JS::GCOptions options,JS::GCReason reason,gcstats::PhaseKind phase)4121 void GCRuntime::collectNursery(JS::GCOptions options, JS::GCReason reason,
4122                                gcstats::PhaseKind phase) {
4123   AutoMaybeLeaveAtomsZone leaveAtomsZone(rt->mainContextFromOwnThread());
4124 
4125   // Note that we aren't collecting the updated alloc counts from any helper
4126   // threads.  We should be but I'm not sure where to add that
4127   // synchronisation.
4128   uint32_t numAllocs =
4129       rt->mainContextFromOwnThread()->getAndResetAllocsThisZoneSinceMinorGC();
4130   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
4131     numAllocs += zone->getAndResetTenuredAllocsSinceMinorGC();
4132   }
4133   stats().setAllocsSinceMinorGCTenured(numAllocs);
4134 
4135   gcstats::AutoPhase ap(stats(), phase);
4136 
4137   nursery().clearMinorGCRequest();
4138   TraceLoggerThread* logger = TraceLoggerForCurrentThread();
4139   AutoTraceLog logMinorGC(logger, TraceLogger_MinorGC);
4140   nursery().collect(options, reason);
4141   MOZ_ASSERT(nursery().isEmpty());
4142 
4143   startBackgroundFreeAfterMinorGC();
4144 }
4145 
startBackgroundFreeAfterMinorGC()4146 void GCRuntime::startBackgroundFreeAfterMinorGC() {
4147   MOZ_ASSERT(nursery().isEmpty());
4148 
4149   {
4150     AutoLockHelperThreadState lock;
4151 
4152     lifoBlocksToFree.ref().transferFrom(&lifoBlocksToFreeAfterMinorGC.ref());
4153 
4154     if (lifoBlocksToFree.ref().isEmpty() &&
4155         buffersToFreeAfterMinorGC.ref().empty()) {
4156       return;
4157     }
4158   }
4159 
4160   startBackgroundFree();
4161 }
4162 
gcIfRequested()4163 bool GCRuntime::gcIfRequested() {
4164   // This method returns whether a major GC was performed.
4165 
4166   if (nursery().minorGCRequested()) {
4167     minorGC(nursery().minorGCTriggerReason());
4168   }
4169 
4170   if (majorGCRequested()) {
4171     SliceBudget budget = defaultBudget(majorGCTriggerReason, 0);
4172     if (!isIncrementalGCInProgress()) {
4173       startGC(JS::GCOptions::Normal, majorGCTriggerReason, budget);
4174     } else {
4175       gcSlice(majorGCTriggerReason, budget);
4176     }
4177     return true;
4178   }
4179 
4180   return false;
4181 }
4182 
FinishGC(JSContext * cx,JS::GCReason reason)4183 void js::gc::FinishGC(JSContext* cx, JS::GCReason reason) {
4184   // Calling this when GC is suppressed won't have any effect.
4185   MOZ_ASSERT(!cx->suppressGC);
4186 
4187   // GC callbacks may run arbitrary code, including JS. Check this regardless of
4188   // whether we GC for this invocation.
4189   MOZ_ASSERT(cx->isNurseryAllocAllowed());
4190 
4191   if (JS::IsIncrementalGCInProgress(cx)) {
4192     JS::PrepareForIncrementalGC(cx);
4193     JS::FinishIncrementalGC(cx, reason);
4194   }
4195 }
4196 
WaitForBackgroundTasks(JSContext * cx)4197 void js::gc::WaitForBackgroundTasks(JSContext* cx) {
4198   cx->runtime()->gc.waitForBackgroundTasks();
4199 }
4200 
waitForBackgroundTasks()4201 void GCRuntime::waitForBackgroundTasks() {
4202   MOZ_ASSERT(!isIncrementalGCInProgress());
4203   MOZ_ASSERT(sweepTask.isIdle());
4204   MOZ_ASSERT(decommitTask.isIdle());
4205   MOZ_ASSERT(markTask.isIdle());
4206 
4207   allocTask.join();
4208   freeTask.join();
4209   nursery().joinDecommitTask();
4210 }
4211 
NewRealm(JSContext * cx,JSPrincipals * principals,const JS::RealmOptions & options)4212 Realm* js::NewRealm(JSContext* cx, JSPrincipals* principals,
4213                     const JS::RealmOptions& options) {
4214   JSRuntime* rt = cx->runtime();
4215   JS_AbortIfWrongThread(cx);
4216 
4217   UniquePtr<Zone> zoneHolder;
4218   UniquePtr<Compartment> compHolder;
4219 
4220   Compartment* comp = nullptr;
4221   Zone* zone = nullptr;
4222   JS::CompartmentSpecifier compSpec =
4223       options.creationOptions().compartmentSpecifier();
4224   switch (compSpec) {
4225     case JS::CompartmentSpecifier::NewCompartmentInSystemZone:
4226       // systemZone might be null here, in which case we'll make a zone and
4227       // set this field below.
4228       zone = rt->gc.systemZone;
4229       break;
4230     case JS::CompartmentSpecifier::NewCompartmentInExistingZone:
4231       zone = options.creationOptions().zone();
4232       MOZ_ASSERT(zone);
4233       break;
4234     case JS::CompartmentSpecifier::ExistingCompartment:
4235       comp = options.creationOptions().compartment();
4236       zone = comp->zone();
4237       break;
4238     case JS::CompartmentSpecifier::NewCompartmentAndZone:
4239       break;
4240   }
4241 
4242   if (!zone) {
4243     Zone::Kind kind = Zone::NormalZone;
4244     const JSPrincipals* trusted = rt->trustedPrincipals();
4245     if (compSpec == JS::CompartmentSpecifier::NewCompartmentInSystemZone ||
4246         (principals && principals == trusted)) {
4247       kind = Zone::SystemZone;
4248     }
4249 
4250     zoneHolder = MakeUnique<Zone>(cx->runtime(), kind);
4251     if (!zoneHolder || !zoneHolder->init()) {
4252       ReportOutOfMemory(cx);
4253       return nullptr;
4254     }
4255 
4256     zone = zoneHolder.get();
4257   }
4258 
4259   bool invisibleToDebugger = options.creationOptions().invisibleToDebugger();
4260   if (comp) {
4261     // Debugger visibility is per-compartment, not per-realm, so make sure the
4262     // new realm's visibility matches its compartment's.
4263     MOZ_ASSERT(comp->invisibleToDebugger() == invisibleToDebugger);
4264   } else {
4265     compHolder = cx->make_unique<JS::Compartment>(zone, invisibleToDebugger);
4266     if (!compHolder) {
4267       return nullptr;
4268     }
4269 
4270     comp = compHolder.get();
4271   }
4272 
4273   UniquePtr<Realm> realm(cx->new_<Realm>(comp, options));
4274   if (!realm || !realm->init(cx, principals)) {
4275     return nullptr;
4276   }
4277 
4278   // Make sure we don't put system and non-system realms in the same
4279   // compartment.
4280   if (!compHolder) {
4281     MOZ_RELEASE_ASSERT(realm->isSystem() == IsSystemCompartment(comp));
4282   }
4283 
4284   AutoLockGC lock(rt);
4285 
4286   // Reserve space in the Vectors before we start mutating them.
4287   if (!comp->realms().reserve(comp->realms().length() + 1) ||
4288       (compHolder &&
4289        !zone->compartments().reserve(zone->compartments().length() + 1)) ||
4290       (zoneHolder && !rt->gc.zones().reserve(rt->gc.zones().length() + 1))) {
4291     ReportOutOfMemory(cx);
4292     return nullptr;
4293   }
4294 
4295   // After this everything must be infallible.
4296 
4297   comp->realms().infallibleAppend(realm.get());
4298 
4299   if (compHolder) {
4300     zone->compartments().infallibleAppend(compHolder.release());
4301   }
4302 
4303   if (zoneHolder) {
4304     rt->gc.zones().infallibleAppend(zoneHolder.release());
4305 
4306     // Lazily set the runtime's system zone.
4307     if (compSpec == JS::CompartmentSpecifier::NewCompartmentInSystemZone) {
4308       MOZ_RELEASE_ASSERT(!rt->gc.systemZone);
4309       MOZ_ASSERT(zone->isSystemZone());
4310       rt->gc.systemZone = zone;
4311     }
4312   }
4313 
4314   return realm.release();
4315 }
4316 
runDebugGC()4317 void GCRuntime::runDebugGC() {
4318 #ifdef JS_GC_ZEAL
4319   if (rt->mainContextFromOwnThread()->suppressGC) {
4320     return;
4321   }
4322 
4323   if (hasZealMode(ZealMode::GenerationalGC)) {
4324     return minorGC(JS::GCReason::DEBUG_GC);
4325   }
4326 
4327   PrepareForDebugGC(rt);
4328 
4329   auto budget = SliceBudget::unlimited();
4330   if (hasZealMode(ZealMode::IncrementalMultipleSlices)) {
4331     /*
4332      * Start with a small slice limit and double it every slice. This
4333      * ensure that we get multiple slices, and collection runs to
4334      * completion.
4335      */
4336     if (!isIncrementalGCInProgress()) {
4337       zealSliceBudget = zealFrequency / 2;
4338     } else {
4339       zealSliceBudget *= 2;
4340     }
4341     budget = SliceBudget(WorkBudget(zealSliceBudget));
4342 
4343     js::gc::State initialState = incrementalState;
4344     if (!isIncrementalGCInProgress()) {
4345       setGCOptions(JS::GCOptions::Shrink);
4346     }
4347     collect(false, budget, JS::GCReason::DEBUG_GC);
4348 
4349     /* Reset the slice size when we get to the sweep or compact phases. */
4350     if ((initialState == State::Mark && incrementalState == State::Sweep) ||
4351         (initialState == State::Sweep && incrementalState == State::Compact)) {
4352       zealSliceBudget = zealFrequency / 2;
4353     }
4354   } else if (hasIncrementalTwoSliceZealMode()) {
4355     // These modes trigger incremental GC that happens in two slices and the
4356     // supplied budget is ignored by incrementalSlice.
4357     budget = SliceBudget(WorkBudget(1));
4358 
4359     if (!isIncrementalGCInProgress()) {
4360       setGCOptions(JS::GCOptions::Normal);
4361     }
4362     collect(false, budget, JS::GCReason::DEBUG_GC);
4363   } else if (hasZealMode(ZealMode::Compact)) {
4364     gc(JS::GCOptions::Shrink, JS::GCReason::DEBUG_GC);
4365   } else {
4366     gc(JS::GCOptions::Normal, JS::GCReason::DEBUG_GC);
4367   }
4368 
4369 #endif
4370 }
4371 
setFullCompartmentChecks(bool enabled)4372 void GCRuntime::setFullCompartmentChecks(bool enabled) {
4373   MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting());
4374   fullCompartmentChecks = enabled;
4375 }
4376 
notifyRootsRemoved()4377 void GCRuntime::notifyRootsRemoved() {
4378   rootsRemoved = true;
4379 
4380 #ifdef JS_GC_ZEAL
4381   /* Schedule a GC to happen "soon". */
4382   if (hasZealMode(ZealMode::RootsChange)) {
4383     nextScheduled = 1;
4384   }
4385 #endif
4386 }
4387 
4388 #ifdef JS_GC_ZEAL
selectForMarking(JSObject * object)4389 bool GCRuntime::selectForMarking(JSObject* object) {
4390   MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting());
4391   return selectedForMarking.ref().get().append(object);
4392 }
4393 
clearSelectedForMarking()4394 void GCRuntime::clearSelectedForMarking() {
4395   selectedForMarking.ref().get().clearAndFree();
4396 }
4397 
setDeterministic(bool enabled)4398 void GCRuntime::setDeterministic(bool enabled) {
4399   MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting());
4400   deterministicOnly = enabled;
4401 }
4402 #endif
4403 
4404 #ifdef DEBUG
4405 
AutoAssertNoNurseryAlloc()4406 AutoAssertNoNurseryAlloc::AutoAssertNoNurseryAlloc() {
4407   TlsContext.get()->disallowNurseryAlloc();
4408 }
4409 
~AutoAssertNoNurseryAlloc()4410 AutoAssertNoNurseryAlloc::~AutoAssertNoNurseryAlloc() {
4411   TlsContext.get()->allowNurseryAlloc();
4412 }
4413 
4414 #endif  // DEBUG
4415 
4416 #ifdef JSGC_HASH_TABLE_CHECKS
checkHashTablesAfterMovingGC()4417 void GCRuntime::checkHashTablesAfterMovingGC() {
4418   /*
4419    * Check that internal hash tables no longer have any pointers to things
4420    * that have been moved.
4421    */
4422   rt->geckoProfiler().checkStringsMapAfterMovingGC();
4423   for (ZonesIter zone(this, SkipAtoms); !zone.done(); zone.next()) {
4424     zone->checkUniqueIdTableAfterMovingGC();
4425     zone->shapeZone().checkTablesAfterMovingGC();
4426     zone->checkAllCrossCompartmentWrappersAfterMovingGC();
4427     zone->checkScriptMapsAfterMovingGC();
4428 
4429     // Note: CompactPropMaps never have a table.
4430     JS::AutoCheckCannotGC nogc;
4431     for (auto map = zone->cellIterUnsafe<NormalPropMap>(); !map.done();
4432          map.next()) {
4433       if (PropMapTable* table = map->asLinked()->maybeTable(nogc)) {
4434         table->checkAfterMovingGC();
4435       }
4436     }
4437     for (auto map = zone->cellIterUnsafe<DictionaryPropMap>(); !map.done();
4438          map.next()) {
4439       if (PropMapTable* table = map->asLinked()->maybeTable(nogc)) {
4440         table->checkAfterMovingGC();
4441       }
4442     }
4443   }
4444 
4445   for (CompartmentsIter c(this); !c.done(); c.next()) {
4446     for (RealmsInCompartmentIter r(c); !r.done(); r.next()) {
4447       r->dtoaCache.checkCacheAfterMovingGC();
4448       if (r->debugEnvs()) {
4449         r->debugEnvs()->checkHashTablesAfterMovingGC();
4450       }
4451     }
4452   }
4453 }
4454 #endif
4455 
4456 #ifdef DEBUG
hasZone(Zone * target)4457 bool GCRuntime::hasZone(Zone* target) {
4458   for (AllZonesIter zone(this); !zone.done(); zone.next()) {
4459     if (zone == target) {
4460       return true;
4461     }
4462   }
4463   return false;
4464 }
4465 #endif
4466 
checkCondition(JSContext * cx)4467 void AutoAssertEmptyNursery::checkCondition(JSContext* cx) {
4468   if (!noAlloc) {
4469     noAlloc.emplace();
4470   }
4471   this->cx = cx;
4472   MOZ_ASSERT(cx->nursery().isEmpty());
4473 }
4474 
AutoEmptyNursery(JSContext * cx)4475 AutoEmptyNursery::AutoEmptyNursery(JSContext* cx) : AutoAssertEmptyNursery() {
4476   MOZ_ASSERT(!cx->suppressGC);
4477   cx->runtime()->gc.stats().suspendPhases();
4478   cx->runtime()->gc.evictNursery(JS::GCReason::EVICT_NURSERY);
4479   cx->runtime()->gc.stats().resumePhases();
4480   checkCondition(cx);
4481 }
4482 
4483 #ifdef DEBUG
4484 
4485 namespace js {
4486 
4487 // We don't want jsfriendapi.h to depend on GenericPrinter,
4488 // so these functions are declared directly in the cpp.
4489 
4490 extern JS_PUBLIC_API void DumpString(JSString* str, js::GenericPrinter& out);
4491 
4492 }  // namespace js
4493 
dump(js::GenericPrinter & out) const4494 void js::gc::Cell::dump(js::GenericPrinter& out) const {
4495   switch (getTraceKind()) {
4496     case JS::TraceKind::Object:
4497       reinterpret_cast<const JSObject*>(this)->dump(out);
4498       break;
4499 
4500     case JS::TraceKind::String:
4501       js::DumpString(reinterpret_cast<JSString*>(const_cast<Cell*>(this)), out);
4502       break;
4503 
4504     case JS::TraceKind::Shape:
4505       reinterpret_cast<const Shape*>(this)->dump(out);
4506       break;
4507 
4508     default:
4509       out.printf("%s(%p)\n", JS::GCTraceKindToAscii(getTraceKind()),
4510                  (void*)this);
4511   }
4512 }
4513 
4514 // For use in a debugger.
dump() const4515 void js::gc::Cell::dump() const {
4516   js::Fprinter out(stderr);
4517   dump(out);
4518 }
4519 #endif
4520 
CanCheckGrayBits(const Cell * cell)4521 static inline bool CanCheckGrayBits(const Cell* cell) {
4522   MOZ_ASSERT(cell);
4523   if (!cell->isTenured()) {
4524     return false;
4525   }
4526 
4527   auto tc = &cell->asTenured();
4528   auto rt = tc->runtimeFromAnyThread();
4529   if (!CurrentThreadCanAccessRuntime(rt) || !rt->gc.areGrayBitsValid()) {
4530     return false;
4531   }
4532 
4533   // If the zone's mark bits are being cleared concurrently we can't depend on
4534   // the contents.
4535   return !tc->zone()->isGCPreparing();
4536 }
4537 
CellIsMarkedGrayIfKnown(const Cell * cell)4538 JS_PUBLIC_API bool js::gc::detail::CellIsMarkedGrayIfKnown(const Cell* cell) {
4539   // We ignore the gray marking state of cells and return false in the
4540   // following cases:
4541   //
4542   // 1) When OOM has caused us to clear the gcGrayBitsValid_ flag.
4543   //
4544   // 2) When we are in an incremental GC and examine a cell that is in a zone
4545   // that is not being collected. Gray targets of CCWs that are marked black
4546   // by a barrier will eventually be marked black in the next GC slice.
4547   //
4548   // 3) When we are not on the runtime's main thread. Helper threads might
4549   // call this while parsing, and they are not allowed to inspect the
4550   // runtime's incremental state. The objects being operated on are not able
4551   // to be collected and will not be marked any color.
4552 
4553   if (!CanCheckGrayBits(cell)) {
4554     return false;
4555   }
4556 
4557   auto tc = &cell->asTenured();
4558   auto rt = tc->runtimeFromMainThread();
4559   if (rt->gc.isIncrementalGCInProgress() && !tc->zone()->wasGCStarted()) {
4560     return false;
4561   }
4562 
4563   return detail::CellIsMarkedGray(tc);
4564 }
4565 
4566 #ifdef DEBUG
4567 
AssertCellIsNotGray(const Cell * cell)4568 JS_PUBLIC_API void js::gc::detail::AssertCellIsNotGray(const Cell* cell) {
4569   // Check that a cell is not marked gray.
4570   //
4571   // Since this is a debug-only check, take account of the eventual mark state
4572   // of cells that will be marked black by the next GC slice in an incremental
4573   // GC. For performance reasons we don't do this in CellIsMarkedGrayIfKnown.
4574 
4575   if (!CanCheckGrayBits(cell)) {
4576     return;
4577   }
4578 
4579   // TODO: I'd like to AssertHeapIsIdle() here, but this ends up getting
4580   // called during GC and while iterating the heap for memory reporting.
4581   MOZ_ASSERT(!JS::RuntimeHeapIsCycleCollecting());
4582 
4583   auto tc = &cell->asTenured();
4584   if (tc->zone()->isGCMarkingBlackAndGray()) {
4585     // We are doing gray marking in the cell's zone. Even if the cell is
4586     // currently marked gray it may eventually be marked black. Delay checking
4587     // non-black cells until we finish gray marking.
4588 
4589     if (!tc->isMarkedBlack()) {
4590       JSRuntime* rt = tc->zone()->runtimeFromMainThread();
4591       AutoEnterOOMUnsafeRegion oomUnsafe;
4592       if (!rt->gc.cellsToAssertNotGray.ref().append(cell)) {
4593         oomUnsafe.crash("Can't append to delayed gray checks list");
4594       }
4595     }
4596     return;
4597   }
4598 
4599   MOZ_ASSERT(!tc->isMarkedGray());
4600 }
4601 
ObjectIsMarkedBlack(const JSObject * obj)4602 extern JS_PUBLIC_API bool js::gc::detail::ObjectIsMarkedBlack(
4603     const JSObject* obj) {
4604   return obj->isMarkedBlack();
4605 }
4606 
4607 #endif
4608 
ClearEdgesTracer(JSRuntime * rt)4609 js::gc::ClearEdgesTracer::ClearEdgesTracer(JSRuntime* rt)
4610     : GenericTracerImpl(rt, JS::TracerKind::ClearEdges,
4611                         JS::WeakMapTraceAction::TraceKeysAndValues) {}
4612 
ClearEdgesTracer()4613 js::gc::ClearEdgesTracer::ClearEdgesTracer()
4614     : ClearEdgesTracer(TlsContext.get()->runtime()) {}
4615 
4616 template <typename T>
onEdge(T * thing)4617 T* js::gc::ClearEdgesTracer::onEdge(T* thing) {
4618   // We don't handle removing pointers to nursery edges from the store buffer
4619   // with this tracer. Check that this doesn't happen.
4620   MOZ_ASSERT(!IsInsideNursery(thing));
4621 
4622   // Fire the pre-barrier since we're removing an edge from the graph.
4623   InternalBarrierMethods<T*>::preBarrier(thing);
4624 
4625   // Return nullptr to clear the edge.
4626   return nullptr;
4627 }
4628 
setPerformanceHint(PerformanceHint hint)4629 void GCRuntime::setPerformanceHint(PerformanceHint hint) {
4630   bool wasInPageLoad = inPageLoadCount != 0;
4631 
4632   if (hint == PerformanceHint::InPageLoad) {
4633     inPageLoadCount++;
4634   } else {
4635     MOZ_ASSERT(inPageLoadCount);
4636     inPageLoadCount--;
4637   }
4638 
4639   bool inPageLoad = inPageLoadCount != 0;
4640   if (inPageLoad == wasInPageLoad) {
4641     return;
4642   }
4643 
4644   AutoLockGC lock(this);
4645   schedulingState.inPageLoad = inPageLoad;
4646   atomsZone->updateGCStartThresholds(*this, lock);
4647   maybeTriggerGCAfterAlloc(atomsZone);
4648 }
4649