1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 /*
8  * [SMDOC] Garbage Collector
9  *
10  * This code implements an incremental mark-and-sweep garbage collector, with
11  * most sweeping carried out in the background on a parallel thread.
12  *
13  * Full vs. zone GC
14  * ----------------
15  *
16  * The collector can collect all zones at once, or a subset. These types of
17  * collection are referred to as a full GC and a zone GC respectively.
18  *
19  * It is possible for an incremental collection that started out as a full GC to
20  * become a zone GC if new zones are created during the course of the
21  * collection.
22  *
23  * Incremental collection
24  * ----------------------
25  *
26  * For a collection to be carried out incrementally the following conditions
27  * must be met:
28  *  - the collection must be run by calling js::GCSlice() rather than js::GC()
29  *  - the GC parameter JSGC_INCREMENTAL_GC_ENABLED must be true.
30  *
31  * The last condition is an engine-internal mechanism to ensure that incremental
32  * collection is not carried out without the correct barriers being implemented.
33  * For more information see 'Incremental marking' below.
34  *
35  * If the collection is not incremental, all foreground activity happens inside
36  * a single call to GC() or GCSlice(). However the collection is not complete
37  * until the background sweeping activity has finished.
38  *
39  * An incremental collection proceeds as a series of slices, interleaved with
40  * mutator activity, i.e. running JavaScript code. Slices are limited by a time
41  * budget. The slice finishes as soon as possible after the requested time has
42  * passed.
43  *
44  * Collector states
45  * ----------------
46  *
47  * The collector proceeds through the following states, the current state being
48  * held in JSRuntime::gcIncrementalState:
49  *
50  *  - Prepare    - unmarks GC things, discards JIT code and other setup
51  *  - MarkRoots  - marks the stack and other roots
52  *  - Mark       - incrementally marks reachable things
53  *  - Sweep      - sweeps zones in groups and continues marking unswept zones
54  *  - Finalize   - performs background finalization, concurrent with mutator
55  *  - Compact    - incrementally compacts by zone
56  *  - Decommit   - performs background decommit and chunk removal
57  *
58  * Roots are marked in the first MarkRoots slice; this is the start of the GC
59  * proper. The following states can take place over one or more slices.
60  *
61  * In other words an incremental collection proceeds like this:
62  *
63  * Slice 1:   Prepare:    Starts background task to unmark GC things
64  *
65  *          ... JS code runs, background unmarking finishes ...
66  *
67  * Slice 2:   MarkRoots:  Roots are pushed onto the mark stack.
68  *            Mark:       The mark stack is processed by popping an element,
69  *                        marking it, and pushing its children.
70  *
71  *          ... JS code runs ...
72  *
73  * Slice 3:   Mark:       More mark stack processing.
74  *
75  *          ... JS code runs ...
76  *
77  * Slice n-1: Mark:       More mark stack processing.
78  *
79  *          ... JS code runs ...
80  *
81  * Slice n:   Mark:       Mark stack is completely drained.
82  *            Sweep:      Select first group of zones to sweep and sweep them.
83  *
84  *          ... JS code runs ...
85  *
86  * Slice n+1: Sweep:      Mark objects in unswept zones that were newly
87  *                        identified as alive (see below). Then sweep more zone
88  *                        sweep groups.
89  *
90  *          ... JS code runs ...
91  *
92  * Slice n+2: Sweep:      Mark objects in unswept zones that were newly
93  *                        identified as alive. Then sweep more zones.
94  *
95  *          ... JS code runs ...
96  *
97  * Slice m:   Sweep:      Sweeping is finished, and background sweeping
98  *                        started on the helper thread.
99  *
100  *          ... JS code runs, remaining sweeping done on background thread ...
101  *
102  * When background sweeping finishes the GC is complete.
103  *
104  * Incremental marking
105  * -------------------
106  *
107  * Incremental collection requires close collaboration with the mutator (i.e.,
108  * JS code) to guarantee correctness.
109  *
110  *  - During an incremental GC, if a memory location (except a root) is written
111  *    to, then the value it previously held must be marked. Write barriers
112  *    ensure this.
113  *
114  *  - Any object that is allocated during incremental GC must start out marked.
115  *
116  *  - Roots are marked in the first slice and hence don't need write barriers.
117  *    Roots are things like the C stack and the VM stack.
118  *
119  * The problem that write barriers solve is that between slices the mutator can
120  * change the object graph. We must ensure that it cannot do this in such a way
121  * that makes us fail to mark a reachable object (marking an unreachable object
122  * is tolerable).
123  *
124  * We use a snapshot-at-the-beginning algorithm to do this. This means that we
125  * promise to mark at least everything that is reachable at the beginning of
126  * collection. To implement it we mark the old contents of every non-root memory
127  * location written to by the mutator while the collection is in progress, using
128  * write barriers. This is described in gc/Barrier.h.
129  *
130  * Incremental sweeping
131  * --------------------
132  *
133  * Sweeping is difficult to do incrementally because object finalizers must be
134  * run at the start of sweeping, before any mutator code runs. The reason is
135  * that some objects use their finalizers to remove themselves from caches. If
136  * mutator code was allowed to run after the start of sweeping, it could observe
137  * the state of the cache and create a new reference to an object that was just
138  * about to be destroyed.
139  *
140  * Sweeping all finalizable objects in one go would introduce long pauses, so
141  * instead sweeping broken up into groups of zones. Zones which are not yet
142  * being swept are still marked, so the issue above does not apply.
143  *
144  * The order of sweeping is restricted by cross compartment pointers - for
145  * example say that object |a| from zone A points to object |b| in zone B and
146  * neither object was marked when we transitioned to the Sweep phase. Imagine we
147  * sweep B first and then return to the mutator. It's possible that the mutator
148  * could cause |a| to become alive through a read barrier (perhaps it was a
149  * shape that was accessed via a shape table). Then we would need to mark |b|,
150  * which |a| points to, but |b| has already been swept.
151  *
152  * So if there is such a pointer then marking of zone B must not finish before
153  * marking of zone A.  Pointers which form a cycle between zones therefore
154  * restrict those zones to being swept at the same time, and these are found
155  * using Tarjan's algorithm for finding the strongly connected components of a
156  * graph.
157  *
158  * GC things without finalizers, and things with finalizers that are able to run
159  * in the background, are swept on the background thread. This accounts for most
160  * of the sweeping work.
161  *
162  * Reset
163  * -----
164  *
165  * During incremental collection it is possible, although unlikely, for
166  * conditions to change such that incremental collection is no longer safe. In
167  * this case, the collection is 'reset' by resetIncrementalGC(). If we are in
168  * the mark state, this just stops marking, but if we have started sweeping
169  * already, we continue non-incrementally until we have swept the current sweep
170  * group. Following a reset, a new collection is started.
171  *
172  * Compacting GC
173  * -------------
174  *
175  * Compacting GC happens at the end of a major GC as part of the last slice.
176  * There are three parts:
177  *
178  *  - Arenas are selected for compaction.
179  *  - The contents of those arenas are moved to new arenas.
180  *  - All references to moved things are updated.
181  *
182  * Collecting Atoms
183  * ----------------
184  *
185  * Atoms are collected differently from other GC things. They are contained in
186  * a special zone and things in other zones may have pointers to them that are
187  * not recorded in the cross compartment pointer map. Each zone holds a bitmap
188  * with the atoms it might be keeping alive, and atoms are only collected if
189  * they are not included in any zone's atom bitmap. See AtomMarking.cpp for how
190  * this bitmap is managed.
191  */
192 
193 #include "gc/GC-inl.h"
194 
195 #include "mozilla/DebugOnly.h"
196 #include "mozilla/MacroForEach.h"
197 #include "mozilla/MemoryReporting.h"
198 #include "mozilla/Range.h"
199 #include "mozilla/ScopeExit.h"
200 #include "mozilla/TextUtils.h"
201 #include "mozilla/TimeStamp.h"
202 
203 #include <algorithm>
204 #include <initializer_list>
205 #include <iterator>
206 #include <stdlib.h>
207 #include <string.h>
208 #include <utility>
209 #if !defined(XP_WIN) && !defined(__wasi__)
210 #  include <sys/mman.h>
211 #  include <unistd.h>
212 #endif
213 
214 #include "jstypes.h"
215 
216 #include "builtin/FinalizationRegistryObject.h"
217 #include "builtin/WeakRefObject.h"
218 #include "debugger/DebugAPI.h"
219 #include "gc/ClearEdgesTracer.h"
220 #include "gc/FindSCCs.h"
221 #include "gc/FreeOp.h"
222 #include "gc/GCInternals.h"
223 #include "gc/GCLock.h"
224 #include "gc/GCProbes.h"
225 #include "gc/Memory.h"
226 #include "gc/ParallelWork.h"
227 #include "gc/Policy.h"
228 #include "gc/WeakMap.h"
229 #include "jit/BaselineJIT.h"
230 #include "jit/JitCode.h"
231 #include "jit/JitcodeMap.h"
232 #include "jit/JitRealm.h"
233 #include "jit/JitRuntime.h"
234 #include "jit/JitZone.h"
235 #include "jit/MacroAssembler.h"  // js::jit::CodeAlignment
236 #include "js/HeapAPI.h"          // JS::GCCellPtr
237 #include "js/Object.h"           // JS::GetClass
238 #include "js/SliceBudget.h"
239 #include "proxy/DeadObjectProxy.h"
240 #include "util/DifferentialTesting.h"
241 #include "util/Poison.h"
242 #include "util/Windows.h"
243 #include "vm/BigIntType.h"
244 #include "vm/GeckoProfiler.h"
245 #include "vm/GetterSetter.h"
246 #include "vm/HelperThreadState.h"
247 #include "vm/JSAtom.h"
248 #include "vm/JSContext.h"
249 #include "vm/JSObject.h"
250 #include "vm/JSScript.h"
251 #include "vm/Printer.h"
252 #include "vm/PropMap.h"
253 #include "vm/ProxyObject.h"
254 #include "vm/Realm.h"
255 #include "vm/Shape.h"
256 #include "vm/StringType.h"
257 #include "vm/SymbolType.h"
258 #include "vm/Time.h"
259 #include "vm/TraceLogging.h"
260 #include "vm/WrapperObject.h"
261 #include "wasm/TypedObject.h"
262 
263 #include "gc/Heap-inl.h"
264 #include "gc/Marking-inl.h"
265 #include "gc/Nursery-inl.h"
266 #include "gc/PrivateIterators-inl.h"
267 #include "gc/Zone-inl.h"
268 #include "vm/GeckoProfiler-inl.h"
269 #include "vm/JSObject-inl.h"
270 #include "vm/JSScript-inl.h"
271 #include "vm/PropMap-inl.h"
272 #include "vm/Stack-inl.h"
273 #include "vm/StringType-inl.h"
274 
275 using namespace js;
276 using namespace js::gc;
277 
278 using mozilla::Maybe;
279 using mozilla::Nothing;
280 using mozilla::Some;
281 using mozilla::TimeDuration;
282 using mozilla::TimeStamp;
283 
284 using JS::AutoGCRooter;
285 
286 /* Increase the IGC marking slice time if we are in highFrequencyGC mode. */
287 static constexpr int IGC_MARK_SLICE_MULTIPLIER = 2;
288 
289 const AllocKind gc::slotsToThingKind[] = {
290     // clang-format off
291     /*  0 */ AllocKind::OBJECT0,  AllocKind::OBJECT2,  AllocKind::OBJECT2,  AllocKind::OBJECT4,
292     /*  4 */ AllocKind::OBJECT4,  AllocKind::OBJECT8,  AllocKind::OBJECT8,  AllocKind::OBJECT8,
293     /*  8 */ AllocKind::OBJECT8,  AllocKind::OBJECT12, AllocKind::OBJECT12, AllocKind::OBJECT12,
294     /* 12 */ AllocKind::OBJECT12, AllocKind::OBJECT16, AllocKind::OBJECT16, AllocKind::OBJECT16,
295     /* 16 */ AllocKind::OBJECT16
296     // clang-format on
297 };
298 
299 // Check that reserved bits of a Cell are compatible with our typical allocators
300 // since most derived classes will store a pointer in the first word.
301 static const size_t MinFirstWordAlignment = 1u << CellFlagBitsReservedForGC;
302 static_assert(js::detail::LIFO_ALLOC_ALIGN >= MinFirstWordAlignment,
303               "CellFlagBitsReservedForGC should support LifoAlloc");
304 static_assert(CellAlignBytes >= MinFirstWordAlignment,
305               "CellFlagBitsReservedForGC should support gc::Cell");
306 static_assert(js::jit::CodeAlignment >= MinFirstWordAlignment,
307               "CellFlagBitsReservedForGC should support JIT code");
308 static_assert(js::gc::JSClassAlignBytes >= MinFirstWordAlignment,
309               "CellFlagBitsReservedForGC should support JSClass pointers");
310 static_assert(js::ScopeDataAlignBytes >= MinFirstWordAlignment,
311               "CellFlagBitsReservedForGC should support scope data pointers");
312 
313 static_assert(std::size(slotsToThingKind) == SLOTS_TO_THING_KIND_LIMIT,
314               "We have defined a slot count for each kind.");
315 
316 #define CHECK_THING_SIZE(allocKind, traceKind, type, sizedType, bgFinal,       \
317                          nursery, compact)                                     \
318   static_assert(sizeof(sizedType) >= SortedArenaList::MinThingSize,            \
319                 #sizedType " is smaller than SortedArenaList::MinThingSize!"); \
320   static_assert(sizeof(sizedType) >= sizeof(FreeSpan),                         \
321                 #sizedType " is smaller than FreeSpan");                       \
322   static_assert(sizeof(sizedType) % CellAlignBytes == 0,                       \
323                 "Size of " #sizedType " is not a multiple of CellAlignBytes"); \
324   static_assert(sizeof(sizedType) >= MinCellSize,                              \
325                 "Size of " #sizedType " is smaller than the minimum size");
326 FOR_EACH_ALLOCKIND(CHECK_THING_SIZE);
327 #undef CHECK_THING_SIZE
328 
329 template <typename T>
330 struct ArenaLayout {
thingSizeArenaLayout331   static constexpr size_t thingSize() { return sizeof(T); }
thingsPerArenaArenaLayout332   static constexpr size_t thingsPerArena() {
333     return (ArenaSize - ArenaHeaderSize) / thingSize();
334   }
firstThingOffsetArenaLayout335   static constexpr size_t firstThingOffset() {
336     return ArenaSize - thingSize() * thingsPerArena();
337   }
338 };
339 
340 const uint8_t Arena::ThingSizes[] = {
341 #define EXPAND_THING_SIZE(_1, _2, _3, sizedType, _4, _5, _6) \
342   ArenaLayout<sizedType>::thingSize(),
343     FOR_EACH_ALLOCKIND(EXPAND_THING_SIZE)
344 #undef EXPAND_THING_SIZE
345 };
346 
347 const uint8_t Arena::FirstThingOffsets[] = {
348 #define EXPAND_FIRST_THING_OFFSET(_1, _2, _3, sizedType, _4, _5, _6) \
349   ArenaLayout<sizedType>::firstThingOffset(),
350     FOR_EACH_ALLOCKIND(EXPAND_FIRST_THING_OFFSET)
351 #undef EXPAND_FIRST_THING_OFFSET
352 };
353 
354 const uint8_t Arena::ThingsPerArena[] = {
355 #define EXPAND_THINGS_PER_ARENA(_1, _2, _3, sizedType, _4, _5, _6) \
356   ArenaLayout<sizedType>::thingsPerArena(),
357     FOR_EACH_ALLOCKIND(EXPAND_THINGS_PER_ARENA)
358 #undef EXPAND_THINGS_PER_ARENA
359 };
360 
361 FreeSpan FreeLists::emptySentinel;
362 
363 struct js::gc::FinalizePhase {
364   gcstats::PhaseKind statsPhase;
365   AllocKinds kinds;
366 };
367 
368 /*
369  * Finalization order for objects swept incrementally on the main thread.
370  */
371 static constexpr FinalizePhase ForegroundObjectFinalizePhase = {
372     gcstats::PhaseKind::SWEEP_OBJECT,
373     {AllocKind::OBJECT0, AllocKind::OBJECT2, AllocKind::OBJECT4,
374      AllocKind::OBJECT8, AllocKind::OBJECT12, AllocKind::OBJECT16}};
375 
376 /*
377  * Finalization order for GC things swept incrementally on the main thread.
378  */
379 static constexpr FinalizePhase ForegroundNonObjectFinalizePhase = {
380     gcstats::PhaseKind::SWEEP_SCRIPT, {AllocKind::SCRIPT, AllocKind::JITCODE}};
381 
382 /*
383  * Finalization order for GC things swept on the background thread.
384  */
385 static constexpr FinalizePhase BackgroundFinalizePhases[] = {
386     {gcstats::PhaseKind::SWEEP_OBJECT,
387      {AllocKind::FUNCTION, AllocKind::FUNCTION_EXTENDED,
388       AllocKind::OBJECT0_BACKGROUND, AllocKind::OBJECT2_BACKGROUND,
389       AllocKind::ARRAYBUFFER4, AllocKind::OBJECT4_BACKGROUND,
390       AllocKind::ARRAYBUFFER8, AllocKind::OBJECT8_BACKGROUND,
391       AllocKind::ARRAYBUFFER12, AllocKind::OBJECT12_BACKGROUND,
392       AllocKind::ARRAYBUFFER16, AllocKind::OBJECT16_BACKGROUND}},
393     {gcstats::PhaseKind::SWEEP_SCOPE,
394      {
395          AllocKind::SCOPE,
396      }},
397     {gcstats::PhaseKind::SWEEP_REGEXP_SHARED,
398      {
399          AllocKind::REGEXP_SHARED,
400      }},
401     {gcstats::PhaseKind::SWEEP_STRING,
402      {AllocKind::FAT_INLINE_STRING, AllocKind::STRING,
403       AllocKind::EXTERNAL_STRING, AllocKind::FAT_INLINE_ATOM, AllocKind::ATOM,
404       AllocKind::SYMBOL, AllocKind::BIGINT}},
405     {gcstats::PhaseKind::SWEEP_SHAPE,
406      {AllocKind::SHAPE, AllocKind::BASE_SHAPE, AllocKind::GETTER_SETTER,
407       AllocKind::COMPACT_PROP_MAP, AllocKind::NORMAL_PROP_MAP,
408       AllocKind::DICT_PROP_MAP}}};
409 
unmarkAll()410 void Arena::unmarkAll() {
411   MarkBitmapWord* arenaBits = chunk()->markBits.arenaBits(this);
412   for (size_t i = 0; i < ArenaBitmapWords; i++) {
413     arenaBits[i] = 0;
414   }
415 }
416 
unmarkPreMarkedFreeCells()417 void Arena::unmarkPreMarkedFreeCells() {
418   for (ArenaFreeCellIter cell(this); !cell.done(); cell.next()) {
419     MOZ_ASSERT(cell->isMarkedBlack());
420     cell->unmark();
421   }
422 }
423 
424 #ifdef DEBUG
425 
checkNoMarkedFreeCells()426 void Arena::checkNoMarkedFreeCells() {
427   for (ArenaFreeCellIter cell(this); !cell.done(); cell.next()) {
428     MOZ_ASSERT(!cell->isMarkedAny());
429   }
430 }
431 
checkAllCellsMarkedBlack()432 void Arena::checkAllCellsMarkedBlack() {
433   for (ArenaCellIter cell(this); !cell.done(); cell.next()) {
434     MOZ_ASSERT(cell->isMarkedBlack());
435   }
436 }
437 
438 #endif
439 
440 #if defined(DEBUG) || defined(JS_GC_ZEAL)
checkNoMarkedCells()441 void Arena::checkNoMarkedCells() {
442   for (ArenaCellIter cell(this); !cell.done(); cell.next()) {
443     MOZ_ASSERT(!cell->isMarkedAny());
444   }
445 }
446 #endif
447 
448 /* static */
staticAsserts()449 void Arena::staticAsserts() {
450   static_assert(size_t(AllocKind::LIMIT) <= 255,
451                 "All AllocKinds and AllocKind::LIMIT must fit in a uint8_t.");
452   static_assert(std::size(ThingSizes) == AllocKindCount,
453                 "We haven't defined all thing sizes.");
454   static_assert(std::size(FirstThingOffsets) == AllocKindCount,
455                 "We haven't defined all offsets.");
456   static_assert(std::size(ThingsPerArena) == AllocKindCount,
457                 "We haven't defined all counts.");
458 }
459 
460 /* static */
checkLookupTables()461 inline void Arena::checkLookupTables() {
462 #ifdef DEBUG
463   for (size_t i = 0; i < AllocKindCount; i++) {
464     MOZ_ASSERT(
465         FirstThingOffsets[i] + ThingsPerArena[i] * ThingSizes[i] == ArenaSize,
466         "Inconsistent arena lookup table data");
467   }
468 #endif
469 }
470 
471 template <typename T>
finalize(JSFreeOp * fop,AllocKind thingKind,size_t thingSize)472 inline size_t Arena::finalize(JSFreeOp* fop, AllocKind thingKind,
473                               size_t thingSize) {
474   /* Enforce requirements on size of T. */
475   MOZ_ASSERT(thingSize % CellAlignBytes == 0);
476   MOZ_ASSERT(thingSize >= MinCellSize);
477   MOZ_ASSERT(thingSize <= 255);
478 
479   MOZ_ASSERT(allocated());
480   MOZ_ASSERT(thingKind == getAllocKind());
481   MOZ_ASSERT(thingSize == getThingSize());
482   MOZ_ASSERT(!onDelayedMarkingList_);
483 
484   uint_fast16_t firstThing = firstThingOffset(thingKind);
485   uint_fast16_t firstThingOrSuccessorOfLastMarkedThing = firstThing;
486   uint_fast16_t lastThing = ArenaSize - thingSize;
487 
488   FreeSpan newListHead;
489   FreeSpan* newListTail = &newListHead;
490   size_t nmarked = 0, nfinalized = 0;
491 
492   for (ArenaCellIterUnderFinalize cell(this); !cell.done(); cell.next()) {
493     T* t = cell.as<T>();
494     if (t->asTenured().isMarkedAny()) {
495       uint_fast16_t thing = uintptr_t(t) & ArenaMask;
496       if (thing != firstThingOrSuccessorOfLastMarkedThing) {
497         // We just finished passing over one or more free things,
498         // so record a new FreeSpan.
499         newListTail->initBounds(firstThingOrSuccessorOfLastMarkedThing,
500                                 thing - thingSize, this);
501         newListTail = newListTail->nextSpanUnchecked(this);
502       }
503       firstThingOrSuccessorOfLastMarkedThing = thing + thingSize;
504       nmarked++;
505     } else {
506       t->finalize(fop);
507       AlwaysPoison(t, JS_SWEPT_TENURED_PATTERN, thingSize,
508                    MemCheckKind::MakeUndefined);
509       gcprobes::TenuredFinalize(t);
510       nfinalized++;
511     }
512   }
513 
514   if constexpr (std::is_same_v<T, JSObject>) {
515     if (isNewlyCreated) {
516       zone->pretenuring.updateCellCountsInNewlyCreatedArenas(
517           nmarked + nfinalized, nmarked);
518     }
519   }
520   isNewlyCreated = 0;
521 
522   if (thingKind == AllocKind::STRING ||
523       thingKind == AllocKind::FAT_INLINE_STRING) {
524     zone->markedStrings += nmarked;
525     zone->finalizedStrings += nfinalized;
526   }
527 
528   if (nmarked == 0) {
529     // Do nothing. The caller will update the arena appropriately.
530     MOZ_ASSERT(newListTail == &newListHead);
531     DebugOnlyPoison(data, JS_SWEPT_TENURED_PATTERN, sizeof(data),
532                     MemCheckKind::MakeUndefined);
533     return nmarked;
534   }
535 
536   MOZ_ASSERT(firstThingOrSuccessorOfLastMarkedThing != firstThing);
537   uint_fast16_t lastMarkedThing =
538       firstThingOrSuccessorOfLastMarkedThing - thingSize;
539   if (lastThing == lastMarkedThing) {
540     // If the last thing was marked, we will have already set the bounds of
541     // the final span, and we just need to terminate the list.
542     newListTail->initAsEmpty();
543   } else {
544     // Otherwise, end the list with a span that covers the final stretch of free
545     // things.
546     newListTail->initFinal(firstThingOrSuccessorOfLastMarkedThing, lastThing,
547                            this);
548   }
549 
550   firstFreeSpan = newListHead;
551 #ifdef DEBUG
552   size_t nfree = numFreeThings(thingSize);
553   MOZ_ASSERT(nfree + nmarked == thingsPerArena(thingKind));
554 #endif
555   return nmarked;
556 }
557 
558 // Finalize arenas from src list, releasing empty arenas if keepArenas wasn't
559 // specified and inserting the others into the appropriate destination size
560 // bins.
561 template <typename T>
FinalizeTypedArenas(JSFreeOp * fop,Arena ** src,SortedArenaList & dest,AllocKind thingKind,SliceBudget & budget)562 static inline bool FinalizeTypedArenas(JSFreeOp* fop, Arena** src,
563                                        SortedArenaList& dest,
564                                        AllocKind thingKind,
565                                        SliceBudget& budget) {
566   AutoSetThreadIsFinalizing setThreadUse;
567 
568   size_t thingSize = Arena::thingSize(thingKind);
569   size_t thingsPerArena = Arena::thingsPerArena(thingKind);
570 
571   while (Arena* arena = *src) {
572     Arena* next = arena->next;
573     MOZ_ASSERT_IF(next, next->zone == arena->zone);
574     *src = next;
575 
576     size_t nmarked = arena->finalize<T>(fop, thingKind, thingSize);
577     size_t nfree = thingsPerArena - nmarked;
578 
579     if (nmarked) {
580       dest.insertAt(arena, nfree);
581     } else {
582       arena->chunk()->recycleArena(arena, dest, thingsPerArena);
583     }
584 
585     budget.step(thingsPerArena);
586     if (budget.isOverBudget()) {
587       return false;
588     }
589   }
590 
591   return true;
592 }
593 
594 /*
595  * Finalize the list of areans.
596  */
FinalizeArenas(JSFreeOp * fop,Arena ** src,SortedArenaList & dest,AllocKind thingKind,SliceBudget & budget)597 static bool FinalizeArenas(JSFreeOp* fop, Arena** src, SortedArenaList& dest,
598                            AllocKind thingKind, SliceBudget& budget) {
599   switch (thingKind) {
600 #define EXPAND_CASE(allocKind, traceKind, type, sizedType, bgFinal, nursery, \
601                     compact)                                                 \
602   case AllocKind::allocKind:                                                 \
603     return FinalizeTypedArenas<type>(fop, src, dest, thingKind, budget);
604     FOR_EACH_ALLOCKIND(EXPAND_CASE)
605 #undef EXPAND_CASE
606 
607     default:
608       MOZ_CRASH("Invalid alloc kind");
609   }
610 }
611 
pop()612 TenuredChunk* ChunkPool::pop() {
613   MOZ_ASSERT(bool(head_) == bool(count_));
614   if (!count_) {
615     return nullptr;
616   }
617   return remove(head_);
618 }
619 
push(TenuredChunk * chunk)620 void ChunkPool::push(TenuredChunk* chunk) {
621   MOZ_ASSERT(!chunk->info.next);
622   MOZ_ASSERT(!chunk->info.prev);
623 
624   chunk->info.next = head_;
625   if (head_) {
626     head_->info.prev = chunk;
627   }
628   head_ = chunk;
629   ++count_;
630 }
631 
remove(TenuredChunk * chunk)632 TenuredChunk* ChunkPool::remove(TenuredChunk* chunk) {
633   MOZ_ASSERT(count_ > 0);
634   MOZ_ASSERT(contains(chunk));
635 
636   if (head_ == chunk) {
637     head_ = chunk->info.next;
638   }
639   if (chunk->info.prev) {
640     chunk->info.prev->info.next = chunk->info.next;
641   }
642   if (chunk->info.next) {
643     chunk->info.next->info.prev = chunk->info.prev;
644   }
645   chunk->info.next = chunk->info.prev = nullptr;
646   --count_;
647 
648   return chunk;
649 }
650 
651 // We could keep the chunk pool sorted, but that's likely to be more expensive.
652 // This sort is nlogn, but keeping it sorted is likely to be m*n, with m being
653 // the number of operations (likely higher than n).
sort()654 void ChunkPool::sort() {
655   // Only sort if the list isn't already sorted.
656   if (!isSorted()) {
657     head_ = mergeSort(head(), count());
658 
659     // Fixup prev pointers.
660     TenuredChunk* prev = nullptr;
661     for (TenuredChunk* cur = head_; cur; cur = cur->info.next) {
662       cur->info.prev = prev;
663       prev = cur;
664     }
665   }
666 
667   MOZ_ASSERT(verify());
668   MOZ_ASSERT(isSorted());
669 }
670 
mergeSort(TenuredChunk * list,size_t count)671 TenuredChunk* ChunkPool::mergeSort(TenuredChunk* list, size_t count) {
672   MOZ_ASSERT(bool(list) == bool(count));
673 
674   if (count < 2) {
675     return list;
676   }
677 
678   size_t half = count / 2;
679 
680   // Split;
681   TenuredChunk* front = list;
682   TenuredChunk* back;
683   {
684     TenuredChunk* cur = list;
685     for (size_t i = 0; i < half - 1; i++) {
686       MOZ_ASSERT(cur);
687       cur = cur->info.next;
688     }
689     back = cur->info.next;
690     cur->info.next = nullptr;
691   }
692 
693   front = mergeSort(front, half);
694   back = mergeSort(back, count - half);
695 
696   // Merge
697   list = nullptr;
698   TenuredChunk** cur = &list;
699   while (front || back) {
700     if (!front) {
701       *cur = back;
702       break;
703     }
704     if (!back) {
705       *cur = front;
706       break;
707     }
708 
709     // Note that the sort is stable due to the <= here. Nothing depends on
710     // this but it could.
711     if (front->info.numArenasFree <= back->info.numArenasFree) {
712       *cur = front;
713       front = front->info.next;
714       cur = &(*cur)->info.next;
715     } else {
716       *cur = back;
717       back = back->info.next;
718       cur = &(*cur)->info.next;
719     }
720   }
721 
722   return list;
723 }
724 
isSorted() const725 bool ChunkPool::isSorted() const {
726   uint32_t last = 1;
727   for (TenuredChunk* cursor = head_; cursor; cursor = cursor->info.next) {
728     if (cursor->info.numArenasFree < last) {
729       return false;
730     }
731     last = cursor->info.numArenasFree;
732   }
733   return true;
734 }
735 
736 #ifdef DEBUG
737 
contains(TenuredChunk * chunk) const738 bool ChunkPool::contains(TenuredChunk* chunk) const {
739   verify();
740   for (TenuredChunk* cursor = head_; cursor; cursor = cursor->info.next) {
741     if (cursor == chunk) {
742       return true;
743     }
744   }
745   return false;
746 }
747 
verify() const748 bool ChunkPool::verify() const {
749   MOZ_ASSERT(bool(head_) == bool(count_));
750   uint32_t count = 0;
751   for (TenuredChunk* cursor = head_; cursor;
752        cursor = cursor->info.next, ++count) {
753     MOZ_ASSERT_IF(cursor->info.prev, cursor->info.prev->info.next == cursor);
754     MOZ_ASSERT_IF(cursor->info.next, cursor->info.next->info.prev == cursor);
755   }
756   MOZ_ASSERT(count_ == count);
757   return true;
758 }
759 
verifyAllChunks()760 void GCRuntime::verifyAllChunks() {
761   AutoLockGC lock(this);
762   fullChunks(lock).verifyChunks();
763   availableChunks(lock).verifyChunks();
764   emptyChunks(lock).verifyChunks();
765 }
766 
verifyChunks() const767 void ChunkPool::verifyChunks() const {
768   for (TenuredChunk* chunk = head_; chunk; chunk = chunk->info.next) {
769     chunk->verify();
770   }
771 }
772 
verify() const773 void TenuredChunk::verify() const {
774   size_t freeCount = 0;
775   size_t freeCommittedCount = 0;
776   for (size_t i = 0; i < ArenasPerChunk; ++i) {
777     if (decommittedPages[pageIndex(i)]) {
778       // Free but not committed.
779       freeCount++;
780       continue;
781     }
782 
783     if (!arenas[i].allocated()) {
784       // Free and committed.
785       freeCount++;
786       freeCommittedCount++;
787     }
788   }
789 
790   MOZ_ASSERT(freeCount == info.numArenasFree);
791   MOZ_ASSERT(freeCommittedCount == info.numArenasFreeCommitted);
792 
793   size_t freeListCount = 0;
794   for (Arena* arena = info.freeArenasHead; arena; arena = arena->next) {
795     freeListCount++;
796   }
797 
798   MOZ_ASSERT(freeListCount == info.numArenasFreeCommitted);
799 }
800 
801 #endif
802 
next()803 void ChunkPool::Iter::next() {
804   MOZ_ASSERT(!done());
805   current_ = current_->info.next;
806 }
807 
tooManyEmptyChunks(const AutoLockGC & lock)808 inline bool GCRuntime::tooManyEmptyChunks(const AutoLockGC& lock) {
809   return emptyChunks(lock).count() > tunables.minEmptyChunkCount(lock);
810 }
811 
expireEmptyChunkPool(const AutoLockGC & lock)812 ChunkPool GCRuntime::expireEmptyChunkPool(const AutoLockGC& lock) {
813   MOZ_ASSERT(emptyChunks(lock).verify());
814   MOZ_ASSERT(tunables.minEmptyChunkCount(lock) <=
815              tunables.maxEmptyChunkCount());
816 
817   ChunkPool expired;
818   while (tooManyEmptyChunks(lock)) {
819     TenuredChunk* chunk = emptyChunks(lock).pop();
820     prepareToFreeChunk(chunk->info);
821     expired.push(chunk);
822   }
823 
824   MOZ_ASSERT(expired.verify());
825   MOZ_ASSERT(emptyChunks(lock).verify());
826   MOZ_ASSERT(emptyChunks(lock).count() <= tunables.maxEmptyChunkCount());
827   MOZ_ASSERT(emptyChunks(lock).count() <= tunables.minEmptyChunkCount(lock));
828   return expired;
829 }
830 
FreeChunkPool(ChunkPool & pool)831 static void FreeChunkPool(ChunkPool& pool) {
832   for (ChunkPool::Iter iter(pool); !iter.done();) {
833     TenuredChunk* chunk = iter.get();
834     iter.next();
835     pool.remove(chunk);
836     MOZ_ASSERT(!chunk->info.numArenasFreeCommitted);
837     UnmapPages(static_cast<void*>(chunk), ChunkSize);
838   }
839   MOZ_ASSERT(pool.count() == 0);
840 }
841 
freeEmptyChunks(const AutoLockGC & lock)842 void GCRuntime::freeEmptyChunks(const AutoLockGC& lock) {
843   FreeChunkPool(emptyChunks(lock));
844 }
845 
prepareToFreeChunk(TenuredChunkInfo & info)846 inline void GCRuntime::prepareToFreeChunk(TenuredChunkInfo& info) {
847   MOZ_ASSERT(numArenasFreeCommitted >= info.numArenasFreeCommitted);
848   numArenasFreeCommitted -= info.numArenasFreeCommitted;
849   stats().count(gcstats::COUNT_DESTROY_CHUNK);
850 #ifdef DEBUG
851   /*
852    * Let FreeChunkPool detect a missing prepareToFreeChunk call before it
853    * frees chunk.
854    */
855   info.numArenasFreeCommitted = 0;
856 #endif
857 }
858 
updateOnArenaFree()859 inline void GCRuntime::updateOnArenaFree() { ++numArenasFreeCommitted; }
860 
isPageFree(size_t pageIndex) const861 bool TenuredChunk::isPageFree(size_t pageIndex) const {
862   if (decommittedPages[pageIndex]) {
863     return true;
864   }
865 
866   size_t arenaIndex = pageIndex * ArenasPerPage;
867   for (size_t i = 0; i < ArenasPerPage; i++) {
868     if (arenas[arenaIndex + i].allocated()) {
869       return false;
870     }
871   }
872 
873   return true;
874 }
875 
isPageFree(const Arena * arena) const876 bool TenuredChunk::isPageFree(const Arena* arena) const {
877   MOZ_ASSERT(arena);
878   // arena must come from the freeArenasHead list.
879   MOZ_ASSERT(!arena->allocated());
880   size_t count = 1;
881   size_t expectedPage = pageIndex(arena);
882 
883   Arena* nextArena = arena->next;
884   while (nextArena && (pageIndex(nextArena) == expectedPage)) {
885     count++;
886     if (count == ArenasPerPage) {
887       break;
888     }
889     nextArena = nextArena->next;
890   }
891 
892   return count == ArenasPerPage;
893 }
894 
addArenaToFreeList(GCRuntime * gc,Arena * arena)895 void TenuredChunk::addArenaToFreeList(GCRuntime* gc, Arena* arena) {
896   MOZ_ASSERT(!arena->allocated());
897   arena->next = info.freeArenasHead;
898   info.freeArenasHead = arena;
899   ++info.numArenasFreeCommitted;
900   ++info.numArenasFree;
901   gc->updateOnArenaFree();
902 }
903 
addArenasInPageToFreeList(GCRuntime * gc,size_t pageIndex)904 void TenuredChunk::addArenasInPageToFreeList(GCRuntime* gc, size_t pageIndex) {
905   MOZ_ASSERT(isPageFree(pageIndex));
906 
907   size_t arenaIndex = pageIndex * ArenasPerPage;
908   for (size_t i = 0; i < ArenasPerPage; i++) {
909     Arena* a = &arenas[arenaIndex + i];
910     MOZ_ASSERT(!a->allocated());
911     a->next = info.freeArenasHead;
912     info.freeArenasHead = a;
913     // These arenas are already free, don't need to update numArenasFree.
914     ++info.numArenasFreeCommitted;
915     gc->updateOnArenaFree();
916   }
917 }
918 
rebuildFreeArenasList()919 void TenuredChunk::rebuildFreeArenasList() {
920   if (info.numArenasFreeCommitted == 0) {
921     MOZ_ASSERT(!info.freeArenasHead);
922     return;
923   }
924 
925   mozilla::BitSet<ArenasPerChunk, uint32_t> freeArenas;
926   freeArenas.ResetAll();
927 
928   Arena* arena = info.freeArenasHead;
929   while (arena) {
930     freeArenas[arenaIndex(arena->address())] = true;
931     arena = arena->next;
932   }
933 
934   info.freeArenasHead = nullptr;
935   Arena** freeCursor = &info.freeArenasHead;
936 
937   for (size_t i = 0; i < PagesPerChunk; i++) {
938     for (size_t j = 0; j < ArenasPerPage; j++) {
939       size_t arenaIndex = i * ArenasPerPage + j;
940       if (freeArenas[arenaIndex]) {
941         *freeCursor = &arenas[arenaIndex];
942         freeCursor = &arenas[arenaIndex].next;
943       }
944     }
945   }
946 
947   *freeCursor = nullptr;
948 }
949 
decommitFreeArenas(GCRuntime * gc,const bool & cancel,AutoLockGC & lock)950 void TenuredChunk::decommitFreeArenas(GCRuntime* gc, const bool& cancel,
951                                       AutoLockGC& lock) {
952   MOZ_ASSERT(DecommitEnabled());
953 
954   // We didn't traverse all arenas in the chunk to prevent accessing the arena
955   // mprotect'ed during compaction in debug build. Instead, we traverse through
956   // freeArenasHead list.
957   Arena** freeCursor = &info.freeArenasHead;
958   while (*freeCursor && !cancel) {
959     if ((ArenasPerPage > 1) && !isPageFree(*freeCursor)) {
960       freeCursor = &(*freeCursor)->next;
961       continue;
962     }
963 
964     // Find the next free arena after this page.
965     Arena* nextArena = *freeCursor;
966     for (size_t i = 0; i < ArenasPerPage; i++) {
967       nextArena = nextArena->next;
968       MOZ_ASSERT_IF(i != ArenasPerPage - 1, isPageFree(pageIndex(nextArena)));
969     }
970 
971     size_t pIndex = pageIndex(*freeCursor);
972 
973     // Remove the free arenas from the list.
974     *freeCursor = nextArena;
975 
976     info.numArenasFreeCommitted -= ArenasPerPage;
977     // When we unlock below, other thread might acquire the lock and do
978     // allocations. But it may find out the numArenasFree > 0 but there isn't
979     // any free arena (Because we mark the decommit bit after the
980     // MarkPagesUnusedSoft call). So we reduce the numArenasFree before unlock
981     // and add it back once we acquire the lock again.
982     info.numArenasFree -= ArenasPerPage;
983     updateChunkListAfterAlloc(gc, lock);
984 
985     bool ok = decommitOneFreePage(gc, pIndex, lock);
986 
987     info.numArenasFree += ArenasPerPage;
988     updateChunkListAfterFree(gc, ArenasPerPage, lock);
989 
990     if (!ok) {
991       break;
992     }
993 
994     // When we unlock in decommit, freeArenasHead might be updated by other
995     // threads doing allocations.
996     // Because the free list is sorted, we check if the freeArenasHead has
997     // bypassed the page we did in decommit, if so, we forward to the updated
998     // freeArenasHead, otherwise we just continue to check the next free arena
999     // in the list.
1000     //
1001     // If the freeArenasHead is nullptr, we set it to the largest index so
1002     // freeArena will be set to freeArenasHead as well.
1003     size_t latestIndex =
1004         info.freeArenasHead ? pageIndex(info.freeArenasHead) : PagesPerChunk;
1005     if (latestIndex > pIndex) {
1006       freeCursor = &info.freeArenasHead;
1007     }
1008   }
1009 }
1010 
markArenasInPageDecommitted(size_t pageIndex)1011 void TenuredChunk::markArenasInPageDecommitted(size_t pageIndex) {
1012   // The arenas within this page are already free, and numArenasFreeCommitted is
1013   // subtracted in decommitFreeArenas.
1014   decommittedPages[pageIndex] = true;
1015 }
1016 
recycleArena(Arena * arena,SortedArenaList & dest,size_t thingsPerArena)1017 void TenuredChunk::recycleArena(Arena* arena, SortedArenaList& dest,
1018                                 size_t thingsPerArena) {
1019   arena->setAsFullyUnused();
1020   dest.insertAt(arena, thingsPerArena);
1021 }
1022 
releaseArena(GCRuntime * gc,Arena * arena,const AutoLockGC & lock)1023 void TenuredChunk::releaseArena(GCRuntime* gc, Arena* arena,
1024                                 const AutoLockGC& lock) {
1025   addArenaToFreeList(gc, arena);
1026   updateChunkListAfterFree(gc, 1, lock);
1027 }
1028 
decommitOneFreePage(GCRuntime * gc,size_t pageIndex,AutoLockGC & lock)1029 bool TenuredChunk::decommitOneFreePage(GCRuntime* gc, size_t pageIndex,
1030                                        AutoLockGC& lock) {
1031   MOZ_ASSERT(DecommitEnabled());
1032 
1033 #ifdef DEBUG
1034   size_t index = pageIndex * ArenasPerPage;
1035   for (size_t i = 0; i < ArenasPerPage; i++) {
1036     MOZ_ASSERT(!arenas[index + i].allocated());
1037   }
1038 #endif
1039 
1040   bool ok;
1041   {
1042     AutoUnlockGC unlock(lock);
1043     ok = MarkPagesUnusedSoft(pageAddress(pageIndex), PageSize);
1044   }
1045 
1046   if (ok) {
1047     markArenasInPageDecommitted(pageIndex);
1048   } else {
1049     addArenasInPageToFreeList(gc, pageIndex);
1050   }
1051 
1052   return ok;
1053 }
1054 
decommitFreeArenasWithoutUnlocking(const AutoLockGC & lock)1055 void TenuredChunk::decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock) {
1056   MOZ_ASSERT(DecommitEnabled());
1057 
1058   info.freeArenasHead = nullptr;
1059   Arena** freeCursor = &info.freeArenasHead;
1060 
1061   for (size_t i = 0; i < PagesPerChunk; i++) {
1062     if (decommittedPages[i]) {
1063       continue;
1064     }
1065 
1066     if (!isPageFree(i) || js::oom::ShouldFailWithOOM() ||
1067         !MarkPagesUnusedSoft(pageAddress(i), SystemPageSize())) {
1068       // Find out the free arenas and add it to freeArenasHead.
1069       for (size_t j = 0; j < ArenasPerPage; j++) {
1070         size_t arenaIndex = i * ArenasPerPage + j;
1071         if (!arenas[arenaIndex].allocated()) {
1072           *freeCursor = &arenas[arenaIndex];
1073           freeCursor = &arenas[arenaIndex].next;
1074         }
1075       }
1076       continue;
1077     }
1078 
1079     decommittedPages[i] = true;
1080     MOZ_ASSERT(info.numArenasFreeCommitted >= ArenasPerPage);
1081     info.numArenasFreeCommitted -= ArenasPerPage;
1082   }
1083 
1084   *freeCursor = nullptr;
1085 
1086 #ifdef DEBUG
1087   verify();
1088 #endif
1089 }
1090 
updateChunkListAfterAlloc(GCRuntime * gc,const AutoLockGC & lock)1091 void TenuredChunk::updateChunkListAfterAlloc(GCRuntime* gc,
1092                                              const AutoLockGC& lock) {
1093   if (MOZ_UNLIKELY(!hasAvailableArenas())) {
1094     gc->availableChunks(lock).remove(this);
1095     gc->fullChunks(lock).push(this);
1096   }
1097 }
1098 
updateChunkListAfterFree(GCRuntime * gc,size_t numArenasFree,const AutoLockGC & lock)1099 void TenuredChunk::updateChunkListAfterFree(GCRuntime* gc, size_t numArenasFree,
1100                                             const AutoLockGC& lock) {
1101   if (info.numArenasFree == numArenasFree) {
1102     gc->fullChunks(lock).remove(this);
1103     gc->availableChunks(lock).push(this);
1104   } else if (!unused()) {
1105     MOZ_ASSERT(gc->availableChunks(lock).contains(this));
1106   } else {
1107     MOZ_ASSERT(unused());
1108     gc->availableChunks(lock).remove(this);
1109     decommitAllArenas();
1110     MOZ_ASSERT(info.numArenasFreeCommitted == 0);
1111     gc->recycleChunk(this, lock);
1112   }
1113 }
1114 
releaseArena(Arena * arena,const AutoLockGC & lock)1115 void GCRuntime::releaseArena(Arena* arena, const AutoLockGC& lock) {
1116   MOZ_ASSERT(arena->allocated());
1117   MOZ_ASSERT(!arena->onDelayedMarkingList());
1118 
1119   arena->zone->gcHeapSize.removeGCArena();
1120   arena->release(lock);
1121   arena->chunk()->releaseArena(this, arena, lock);
1122 }
1123 
GCRuntime(JSRuntime * rt)1124 GCRuntime::GCRuntime(JSRuntime* rt)
1125     : rt(rt),
1126       systemZone(nullptr),
1127       atomsZone(nullptr),
1128       heapState_(JS::HeapState::Idle),
1129       stats_(this),
1130       marker(rt),
1131       barrierTracer(rt),
1132       heapSize(nullptr),
1133       helperThreadRatio(TuningDefaults::HelperThreadRatio),
1134       maxHelperThreads(TuningDefaults::MaxHelperThreads),
1135       helperThreadCount(1),
1136       rootsHash(256),
1137       nextCellUniqueId_(LargestTaggedNullCellPointer +
1138                         1),  // Ensure disjoint from null tagged pointers.
1139       numArenasFreeCommitted(0),
1140       verifyPreData(nullptr),
1141       lastGCStartTime_(ReallyNow()),
1142       lastGCEndTime_(ReallyNow()),
1143       incrementalGCEnabled(TuningDefaults::IncrementalGCEnabled),
1144       perZoneGCEnabled(TuningDefaults::PerZoneGCEnabled),
1145       numActiveZoneIters(0),
1146       cleanUpEverything(false),
1147       grayBufferState(GCRuntime::GrayBufferState::Unused),
1148       grayBitsValid(false),
1149       majorGCTriggerReason(JS::GCReason::NO_REASON),
1150       fullGCForAtomsRequested_(false),
1151       minorGCNumber(0),
1152       majorGCNumber(0),
1153       number(0),
1154       sliceNumber(0),
1155       isFull(false),
1156       incrementalState(gc::State::NotActive),
1157       initialState(gc::State::NotActive),
1158       useZeal(false),
1159       lastMarkSlice(false),
1160       safeToYield(true),
1161       markOnBackgroundThreadDuringSweeping(false),
1162       sweepOnBackgroundThread(false),
1163       requestSliceAfterBackgroundTask(false),
1164       lifoBlocksToFree((size_t)JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
1165       lifoBlocksToFreeAfterMinorGC(
1166           (size_t)JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
1167       sweepGroupIndex(0),
1168       sweepGroups(nullptr),
1169       currentSweepGroup(nullptr),
1170       sweepZone(nullptr),
1171       hasMarkedGrayRoots(false),
1172       abortSweepAfterCurrentGroup(false),
1173       sweepMarkResult(IncrementalProgress::NotFinished),
1174       startedCompacting(false),
1175       zonesCompacted(0),
1176 #ifdef DEBUG
1177       relocatedArenasToRelease(nullptr),
1178 #endif
1179 #ifdef JS_GC_ZEAL
1180       markingValidator(nullptr),
1181 #endif
1182       defaultTimeBudgetMS_(TuningDefaults::DefaultTimeBudgetMS),
1183       incrementalAllowed(true),
1184       compactingEnabled(TuningDefaults::CompactingEnabled),
1185       rootsRemoved(false),
1186 #ifdef JS_GC_ZEAL
1187       zealModeBits(0),
1188       zealFrequency(0),
1189       nextScheduled(0),
1190       deterministicOnly(false),
1191       zealSliceBudget(0),
1192       selectedForMarking(rt),
1193 #endif
1194       fullCompartmentChecks(false),
1195       gcCallbackDepth(0),
1196       alwaysPreserveCode(false),
1197       lowMemoryState(false),
1198       lock(mutexid::GCLock),
1199       allocTask(this, emptyChunks_.ref()),
1200       unmarkTask(this),
1201       markTask(this),
1202       sweepTask(this),
1203       freeTask(this),
1204       decommitTask(this),
1205       nursery_(this),
1206       storeBuffer_(rt, nursery()) {
1207   marker.setIncrementalGCEnabled(incrementalGCEnabled);
1208 }
1209 
1210 using CharRange = mozilla::Range<const char>;
1211 using CharRangeVector = Vector<CharRange, 0, SystemAllocPolicy>;
1212 
SplitStringBy(CharRange text,char delimiter,CharRangeVector * result)1213 static bool SplitStringBy(CharRange text, char delimiter,
1214                           CharRangeVector* result) {
1215   auto start = text.begin();
1216   for (auto ptr = start; ptr != text.end(); ptr++) {
1217     if (*ptr == delimiter) {
1218       if (!result->emplaceBack(start, ptr)) {
1219         return false;
1220       }
1221       start = ptr + 1;
1222     }
1223   }
1224 
1225   return result->emplaceBack(start, text.end());
1226 }
1227 
ParseTimeDuration(CharRange text,TimeDuration * durationOut)1228 static bool ParseTimeDuration(CharRange text, TimeDuration* durationOut) {
1229   const char* str = text.begin().get();
1230   char* end;
1231   *durationOut = TimeDuration::FromMilliseconds(strtol(str, &end, 10));
1232   return str != end && end == text.end().get();
1233 }
1234 
PrintProfileHelpAndExit(const char * envName,const char * helpText)1235 static void PrintProfileHelpAndExit(const char* envName, const char* helpText) {
1236   fprintf(stderr, "%s=N[,(main|all)]\n", envName);
1237   fprintf(stderr, "%s", helpText);
1238   exit(0);
1239 }
1240 
ReadProfileEnv(const char * envName,const char * helpText,bool * enableOut,bool * workersOut,TimeDuration * thresholdOut)1241 void js::gc::ReadProfileEnv(const char* envName, const char* helpText,
1242                             bool* enableOut, bool* workersOut,
1243                             TimeDuration* thresholdOut) {
1244   *enableOut = false;
1245   *workersOut = false;
1246   *thresholdOut = TimeDuration();
1247 
1248   const char* env = getenv(envName);
1249   if (!env) {
1250     return;
1251   }
1252 
1253   if (strcmp(env, "help") == 0) {
1254     PrintProfileHelpAndExit(envName, helpText);
1255   }
1256 
1257   CharRangeVector parts;
1258   auto text = CharRange(env, strlen(env));
1259   if (!SplitStringBy(text, ',', &parts)) {
1260     MOZ_CRASH("OOM parsing environment variable");
1261   }
1262 
1263   if (parts.length() == 0 || parts.length() > 2) {
1264     PrintProfileHelpAndExit(envName, helpText);
1265   }
1266 
1267   *enableOut = true;
1268 
1269   if (!ParseTimeDuration(parts[0], thresholdOut)) {
1270     PrintProfileHelpAndExit(envName, helpText);
1271   }
1272 
1273   if (parts.length() == 2) {
1274     const char* threads = parts[1].begin().get();
1275     if (strcmp(threads, "all") == 0) {
1276       *workersOut = true;
1277     } else if (strcmp(threads, "main") != 0) {
1278       PrintProfileHelpAndExit(envName, helpText);
1279     }
1280   }
1281 }
1282 
ShouldPrintProfile(JSRuntime * runtime,bool enable,bool profileWorkers,TimeDuration threshold,TimeDuration duration)1283 bool js::gc::ShouldPrintProfile(JSRuntime* runtime, bool enable,
1284                                 bool profileWorkers, TimeDuration threshold,
1285                                 TimeDuration duration) {
1286   return enable && (runtime->isMainRuntime() || profileWorkers) &&
1287          duration >= threshold;
1288 }
1289 
1290 #ifdef JS_GC_ZEAL
1291 
getZealBits(uint32_t * zealBits,uint32_t * frequency,uint32_t * scheduled)1292 void GCRuntime::getZealBits(uint32_t* zealBits, uint32_t* frequency,
1293                             uint32_t* scheduled) {
1294   *zealBits = zealModeBits;
1295   *frequency = zealFrequency;
1296   *scheduled = nextScheduled;
1297 }
1298 
1299 const char gc::ZealModeHelpText[] =
1300     "  Specifies how zealous the garbage collector should be. Some of these "
1301     "modes can\n"
1302     "  be set simultaneously, by passing multiple level options, e.g. \"2;4\" "
1303     "will activate\n"
1304     "  both modes 2 and 4. Modes can be specified by name or number.\n"
1305     "  \n"
1306     "  Values:\n"
1307     "    0:  (None) Normal amount of collection (resets all modes)\n"
1308     "    1:  (RootsChange) Collect when roots are added or removed\n"
1309     "    2:  (Alloc) Collect when every N allocations (default: 100)\n"
1310     "    4:  (VerifierPre) Verify pre write barriers between instructions\n"
1311     "    6:  (YieldBeforeRootMarking) Incremental GC in two slices that yields "
1312     "before root marking\n"
1313     "    7:  (GenerationalGC) Collect the nursery every N nursery allocations\n"
1314     "    8:  (YieldBeforeMarking) Incremental GC in two slices that yields "
1315     "between\n"
1316     "        the root marking and marking phases\n"
1317     "    9:  (YieldBeforeSweeping) Incremental GC in two slices that yields "
1318     "between\n"
1319     "        the marking and sweeping phases\n"
1320     "    10: (IncrementalMultipleSlices) Incremental GC in many slices\n"
1321     "    11: (IncrementalMarkingValidator) Verify incremental marking\n"
1322     "    12: (ElementsBarrier) Use the individual element post-write barrier\n"
1323     "        regardless of elements size\n"
1324     "    13: (CheckHashTablesOnMinorGC) Check internal hashtables on minor GC\n"
1325     "    14: (Compact) Perform a shrinking collection every N allocations\n"
1326     "    15: (CheckHeapAfterGC) Walk the heap to check its integrity after "
1327     "every GC\n"
1328     "    16: (CheckNursery) Check nursery integrity on minor GC\n"
1329     "    17: (YieldBeforeSweepingAtoms) Incremental GC in two slices that "
1330     "yields\n"
1331     "        before sweeping the atoms table\n"
1332     "    18: (CheckGrayMarking) Check gray marking invariants after every GC\n"
1333     "    19: (YieldBeforeSweepingCaches) Incremental GC in two slices that "
1334     "yields\n"
1335     "        before sweeping weak caches\n"
1336     "    21: (YieldBeforeSweepingObjects) Incremental GC in two slices that "
1337     "yields\n"
1338     "        before sweeping foreground finalized objects\n"
1339     "    22: (YieldBeforeSweepingNonObjects) Incremental GC in two slices that "
1340     "yields\n"
1341     "        before sweeping non-object GC things\n"
1342     "    23: (YieldBeforeSweepingPropMapTrees) Incremental GC in two slices "
1343     "that "
1344     "yields\n"
1345     "        before sweeping shape trees\n"
1346     "    24: (CheckWeakMapMarking) Check weak map marking invariants after "
1347     "every GC\n"
1348     "    25: (YieldWhileGrayMarking) Incremental GC in two slices that yields\n"
1349     "        during gray marking\n";
1350 
1351 // The set of zeal modes that control incremental slices. These modes are
1352 // mutually exclusive.
1353 static const mozilla::EnumSet<ZealMode> IncrementalSliceZealModes = {
1354     ZealMode::YieldBeforeRootMarking,
1355     ZealMode::YieldBeforeMarking,
1356     ZealMode::YieldBeforeSweeping,
1357     ZealMode::IncrementalMultipleSlices,
1358     ZealMode::YieldBeforeSweepingAtoms,
1359     ZealMode::YieldBeforeSweepingCaches,
1360     ZealMode::YieldBeforeSweepingObjects,
1361     ZealMode::YieldBeforeSweepingNonObjects,
1362     ZealMode::YieldBeforeSweepingPropMapTrees};
1363 
setZeal(uint8_t zeal,uint32_t frequency)1364 void GCRuntime::setZeal(uint8_t zeal, uint32_t frequency) {
1365   MOZ_ASSERT(zeal <= unsigned(ZealMode::Limit));
1366 
1367   if (verifyPreData) {
1368     VerifyBarriers(rt, PreBarrierVerifier);
1369   }
1370 
1371   if (zeal == 0) {
1372     if (hasZealMode(ZealMode::GenerationalGC)) {
1373       evictNursery(JS::GCReason::DEBUG_GC);
1374       nursery().leaveZealMode();
1375     }
1376 
1377     if (isIncrementalGCInProgress()) {
1378       finishGC(JS::GCReason::DEBUG_GC);
1379     }
1380   }
1381 
1382   ZealMode zealMode = ZealMode(zeal);
1383   if (zealMode == ZealMode::GenerationalGC) {
1384     evictNursery(JS::GCReason::DEBUG_GC);
1385     nursery().enterZealMode();
1386   }
1387 
1388   // Some modes are mutually exclusive. If we're setting one of those, we
1389   // first reset all of them.
1390   if (IncrementalSliceZealModes.contains(zealMode)) {
1391     for (auto mode : IncrementalSliceZealModes) {
1392       clearZealMode(mode);
1393     }
1394   }
1395 
1396   bool schedule = zealMode >= ZealMode::Alloc;
1397   if (zeal != 0) {
1398     zealModeBits |= 1 << unsigned(zeal);
1399   } else {
1400     zealModeBits = 0;
1401   }
1402   zealFrequency = frequency;
1403   nextScheduled = schedule ? frequency : 0;
1404 }
1405 
unsetZeal(uint8_t zeal)1406 void GCRuntime::unsetZeal(uint8_t zeal) {
1407   MOZ_ASSERT(zeal <= unsigned(ZealMode::Limit));
1408   ZealMode zealMode = ZealMode(zeal);
1409 
1410   if (!hasZealMode(zealMode)) {
1411     return;
1412   }
1413 
1414   if (verifyPreData) {
1415     VerifyBarriers(rt, PreBarrierVerifier);
1416   }
1417 
1418   if (zealMode == ZealMode::GenerationalGC) {
1419     evictNursery(JS::GCReason::DEBUG_GC);
1420     nursery().leaveZealMode();
1421   }
1422 
1423   clearZealMode(zealMode);
1424 
1425   if (zealModeBits == 0) {
1426     if (isIncrementalGCInProgress()) {
1427       finishGC(JS::GCReason::DEBUG_GC);
1428     }
1429 
1430     zealFrequency = 0;
1431     nextScheduled = 0;
1432   }
1433 }
1434 
setNextScheduled(uint32_t count)1435 void GCRuntime::setNextScheduled(uint32_t count) { nextScheduled = count; }
1436 
ParseZealModeName(CharRange text,uint32_t * modeOut)1437 static bool ParseZealModeName(CharRange text, uint32_t* modeOut) {
1438   struct ModeInfo {
1439     const char* name;
1440     size_t length;
1441     uint32_t value;
1442   };
1443 
1444   static const ModeInfo zealModes[] = {{"None", 0},
1445 #  define ZEAL_MODE(name, value) {#  name, strlen(#  name), value},
1446                                        JS_FOR_EACH_ZEAL_MODE(ZEAL_MODE)
1447 #  undef ZEAL_MODE
1448   };
1449 
1450   for (auto mode : zealModes) {
1451     if (text.length() == mode.length &&
1452         memcmp(text.begin().get(), mode.name, mode.length) == 0) {
1453       *modeOut = mode.value;
1454       return true;
1455     }
1456   }
1457 
1458   return false;
1459 }
1460 
ParseZealModeNumericParam(CharRange text,uint32_t * paramOut)1461 static bool ParseZealModeNumericParam(CharRange text, uint32_t* paramOut) {
1462   if (text.length() == 0) {
1463     return false;
1464   }
1465 
1466   for (auto c : text) {
1467     if (!mozilla::IsAsciiDigit(c)) {
1468       return false;
1469     }
1470   }
1471 
1472   *paramOut = atoi(text.begin().get());
1473   return true;
1474 }
1475 
PrintZealHelpAndFail()1476 static bool PrintZealHelpAndFail() {
1477   fprintf(stderr, "Format: JS_GC_ZEAL=level(;level)*[,N]\n");
1478   fputs(ZealModeHelpText, stderr);
1479   return false;
1480 }
1481 
parseAndSetZeal(const char * str)1482 bool GCRuntime::parseAndSetZeal(const char* str) {
1483   // Set the zeal mode from a string consisting of one or more mode specifiers
1484   // separated by ';', optionally followed by a ',' and the trigger frequency.
1485   // The mode specifiers can by a mode name or its number.
1486 
1487   auto text = CharRange(str, strlen(str));
1488 
1489   CharRangeVector parts;
1490   if (!SplitStringBy(text, ',', &parts)) {
1491     return false;
1492   }
1493 
1494   if (parts.length() == 0 || parts.length() > 2) {
1495     return PrintZealHelpAndFail();
1496   }
1497 
1498   uint32_t frequency = JS_DEFAULT_ZEAL_FREQ;
1499   if (parts.length() == 2 && !ParseZealModeNumericParam(parts[1], &frequency)) {
1500     return PrintZealHelpAndFail();
1501   }
1502 
1503   CharRangeVector modes;
1504   if (!SplitStringBy(parts[0], ';', &modes)) {
1505     return false;
1506   }
1507 
1508   for (const auto& descr : modes) {
1509     uint32_t mode;
1510     if (!ParseZealModeName(descr, &mode) &&
1511         !(ParseZealModeNumericParam(descr, &mode) &&
1512           mode <= unsigned(ZealMode::Limit))) {
1513       return PrintZealHelpAndFail();
1514     }
1515 
1516     setZeal(mode, frequency);
1517   }
1518 
1519   return true;
1520 }
1521 
AllocKindName(AllocKind kind)1522 const char* js::gc::AllocKindName(AllocKind kind) {
1523   static const char* const names[] = {
1524 #  define EXPAND_THING_NAME(allocKind, _1, _2, _3, _4, _5, _6) #  allocKind,
1525       FOR_EACH_ALLOCKIND(EXPAND_THING_NAME)
1526 #  undef EXPAND_THING_NAME
1527   };
1528   static_assert(std::size(names) == AllocKindCount,
1529                 "names array should have an entry for every AllocKind");
1530 
1531   size_t i = size_t(kind);
1532   MOZ_ASSERT(i < std::size(names));
1533   return names[i];
1534 }
1535 
DumpArenaInfo()1536 void js::gc::DumpArenaInfo() {
1537   fprintf(stderr, "Arena header size: %zu\n\n", ArenaHeaderSize);
1538 
1539   fprintf(stderr, "GC thing kinds:\n");
1540   fprintf(stderr, "%25s %8s %8s %8s\n",
1541           "AllocKind:", "Size:", "Count:", "Padding:");
1542   for (auto kind : AllAllocKinds()) {
1543     fprintf(stderr, "%25s %8zu %8zu %8zu\n", AllocKindName(kind),
1544             Arena::thingSize(kind), Arena::thingsPerArena(kind),
1545             Arena::firstThingOffset(kind) - ArenaHeaderSize);
1546   }
1547 }
1548 
1549 #endif  // JS_GC_ZEAL
1550 
init(uint32_t maxbytes)1551 bool GCRuntime::init(uint32_t maxbytes) {
1552   MOZ_ASSERT(SystemPageSize());
1553   Arena::checkLookupTables();
1554 
1555   {
1556     AutoLockGCBgAlloc lock(this);
1557 
1558     MOZ_ALWAYS_TRUE(tunables.setParameter(JSGC_MAX_BYTES, maxbytes, lock));
1559 
1560     const char* size = getenv("JSGC_MARK_STACK_LIMIT");
1561     if (size) {
1562       setMarkStackLimit(atoi(size), lock);
1563     }
1564 
1565     if (!nursery().init(lock)) {
1566       return false;
1567     }
1568 
1569     const char* pretenureThresholdStr = getenv("JSGC_PRETENURE_THRESHOLD");
1570     if (pretenureThresholdStr && pretenureThresholdStr[0]) {
1571       char* last;
1572       long pretenureThreshold = strtol(pretenureThresholdStr, &last, 10);
1573       if (last[0] || !tunables.setParameter(JSGC_PRETENURE_THRESHOLD,
1574                                             pretenureThreshold, lock)) {
1575         fprintf(stderr, "Invalid value for JSGC_PRETENURE_THRESHOLD: %s\n",
1576                 pretenureThresholdStr);
1577       }
1578     }
1579   }
1580 
1581 #ifdef JS_GC_ZEAL
1582   const char* zealSpec = getenv("JS_GC_ZEAL");
1583   if (zealSpec && zealSpec[0] && !parseAndSetZeal(zealSpec)) {
1584     return false;
1585   }
1586 #endif
1587 
1588   if (!marker.init() || !initSweepActions()) {
1589     return false;
1590   }
1591 
1592   gcprobes::Init(this);
1593 
1594   updateHelperThreadCount();
1595 
1596   return true;
1597 }
1598 
freezeSelfHostingZone()1599 void GCRuntime::freezeSelfHostingZone() {
1600   MOZ_ASSERT(!selfHostingZoneFrozen);
1601   MOZ_ASSERT(!isIncrementalGCInProgress());
1602 
1603   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
1604     MOZ_ASSERT(!zone->isGCScheduled());
1605     if (zone->isSelfHostingZone()) {
1606       zone->scheduleGC();
1607     }
1608   }
1609 
1610   gc(JS::GCOptions::Shrink, JS::GCReason::INIT_SELF_HOSTING);
1611   selfHostingZoneFrozen = true;
1612 }
1613 
finish()1614 void GCRuntime::finish() {
1615   MOZ_ASSERT(inPageLoadCount == 0);
1616 
1617   // Wait for nursery background free to end and disable it to release memory.
1618   if (nursery().isEnabled()) {
1619     nursery().disable();
1620   }
1621 
1622   // Wait until the background finalization and allocation stops and the
1623   // helper thread shuts down before we forcefully release any remaining GC
1624   // memory.
1625   sweepTask.join();
1626   freeTask.join();
1627   allocTask.cancelAndWait();
1628   decommitTask.cancelAndWait();
1629 
1630 #ifdef JS_GC_ZEAL
1631   // Free memory associated with GC verification.
1632   finishVerifier();
1633 #endif
1634 
1635   // Delete all remaining zones.
1636   if (rt->gcInitialized) {
1637     for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
1638       AutoSetThreadIsSweeping threadIsSweeping(zone);
1639       for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
1640         for (RealmsInCompartmentIter realm(comp); !realm.done(); realm.next()) {
1641           js_delete(realm.get());
1642         }
1643         comp->realms().clear();
1644         js_delete(comp.get());
1645       }
1646       zone->compartments().clear();
1647       js_delete(zone.get());
1648     }
1649   }
1650 
1651   zones().clear();
1652 
1653   FreeChunkPool(fullChunks_.ref());
1654   FreeChunkPool(availableChunks_.ref());
1655   FreeChunkPool(emptyChunks_.ref());
1656 
1657   gcprobes::Finish(this);
1658 
1659   nursery().printTotalProfileTimes();
1660   stats().printTotalProfileTimes();
1661 }
1662 
setParameter(JSGCParamKey key,uint32_t value)1663 bool GCRuntime::setParameter(JSGCParamKey key, uint32_t value) {
1664   MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
1665   waitBackgroundSweepEnd();
1666   AutoLockGC lock(this);
1667   return setParameter(key, value, lock);
1668 }
1669 
setParameter(JSGCParamKey key,uint32_t value,AutoLockGC & lock)1670 bool GCRuntime::setParameter(JSGCParamKey key, uint32_t value,
1671                              AutoLockGC& lock) {
1672   switch (key) {
1673     case JSGC_SLICE_TIME_BUDGET_MS:
1674       defaultTimeBudgetMS_ = value;
1675       break;
1676     case JSGC_MARK_STACK_LIMIT:
1677       if (value == 0) {
1678         return false;
1679       }
1680       setMarkStackLimit(value, lock);
1681       break;
1682     case JSGC_INCREMENTAL_GC_ENABLED:
1683       setIncrementalGCEnabled(value != 0);
1684       break;
1685     case JSGC_PER_ZONE_GC_ENABLED:
1686       perZoneGCEnabled = value != 0;
1687       break;
1688     case JSGC_COMPACTING_ENABLED:
1689       compactingEnabled = value != 0;
1690       break;
1691     case JSGC_INCREMENTAL_WEAKMAP_ENABLED:
1692       marker.incrementalWeakMapMarkingEnabled = value != 0;
1693       break;
1694     case JSGC_HELPER_THREAD_RATIO:
1695       if (rt->parentRuntime) {
1696         // Don't allow this to be set for worker runtimes.
1697         return false;
1698       }
1699       if (value == 0) {
1700         return false;
1701       }
1702       helperThreadRatio = double(value) / 100.0;
1703       updateHelperThreadCount();
1704       break;
1705     case JSGC_MAX_HELPER_THREADS:
1706       if (rt->parentRuntime) {
1707         // Don't allow this to be set for worker runtimes.
1708         return false;
1709       }
1710       if (value == 0) {
1711         return false;
1712       }
1713       maxHelperThreads = value;
1714       updateHelperThreadCount();
1715       break;
1716     default:
1717       if (!tunables.setParameter(key, value, lock)) {
1718         return false;
1719       }
1720       updateAllGCStartThresholds(lock);
1721   }
1722 
1723   return true;
1724 }
1725 
resetParameter(JSGCParamKey key)1726 void GCRuntime::resetParameter(JSGCParamKey key) {
1727   MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
1728   waitBackgroundSweepEnd();
1729   AutoLockGC lock(this);
1730   resetParameter(key, lock);
1731 }
1732 
resetParameter(JSGCParamKey key,AutoLockGC & lock)1733 void GCRuntime::resetParameter(JSGCParamKey key, AutoLockGC& lock) {
1734   switch (key) {
1735     case JSGC_SLICE_TIME_BUDGET_MS:
1736       defaultTimeBudgetMS_ = TuningDefaults::DefaultTimeBudgetMS;
1737       break;
1738     case JSGC_MARK_STACK_LIMIT:
1739       setMarkStackLimit(MarkStack::DefaultCapacity, lock);
1740       break;
1741     case JSGC_INCREMENTAL_GC_ENABLED:
1742       setIncrementalGCEnabled(TuningDefaults::IncrementalGCEnabled);
1743       break;
1744     case JSGC_PER_ZONE_GC_ENABLED:
1745       perZoneGCEnabled = TuningDefaults::PerZoneGCEnabled;
1746       break;
1747     case JSGC_COMPACTING_ENABLED:
1748       compactingEnabled = TuningDefaults::CompactingEnabled;
1749       break;
1750     case JSGC_INCREMENTAL_WEAKMAP_ENABLED:
1751       marker.incrementalWeakMapMarkingEnabled =
1752           TuningDefaults::IncrementalWeakMapMarkingEnabled;
1753       break;
1754     case JSGC_HELPER_THREAD_RATIO:
1755       if (rt->parentRuntime) {
1756         return;
1757       }
1758       helperThreadRatio = TuningDefaults::HelperThreadRatio;
1759       updateHelperThreadCount();
1760       break;
1761     case JSGC_MAX_HELPER_THREADS:
1762       if (rt->parentRuntime) {
1763         return;
1764       }
1765       maxHelperThreads = TuningDefaults::MaxHelperThreads;
1766       updateHelperThreadCount();
1767       break;
1768     default:
1769       tunables.resetParameter(key, lock);
1770       updateAllGCStartThresholds(lock);
1771   }
1772 }
1773 
getParameter(JSGCParamKey key)1774 uint32_t GCRuntime::getParameter(JSGCParamKey key) {
1775   MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
1776   AutoLockGC lock(this);
1777   return getParameter(key, lock);
1778 }
1779 
getParameter(JSGCParamKey key,const AutoLockGC & lock)1780 uint32_t GCRuntime::getParameter(JSGCParamKey key, const AutoLockGC& lock) {
1781   switch (key) {
1782     case JSGC_MAX_BYTES:
1783       return uint32_t(tunables.gcMaxBytes());
1784     case JSGC_MIN_NURSERY_BYTES:
1785       MOZ_ASSERT(tunables.gcMinNurseryBytes() < UINT32_MAX);
1786       return uint32_t(tunables.gcMinNurseryBytes());
1787     case JSGC_MAX_NURSERY_BYTES:
1788       MOZ_ASSERT(tunables.gcMaxNurseryBytes() < UINT32_MAX);
1789       return uint32_t(tunables.gcMaxNurseryBytes());
1790     case JSGC_BYTES:
1791       return uint32_t(heapSize.bytes());
1792     case JSGC_NURSERY_BYTES:
1793       return nursery().capacity();
1794     case JSGC_NUMBER:
1795       return uint32_t(number);
1796     case JSGC_MAJOR_GC_NUMBER:
1797       return uint32_t(majorGCNumber);
1798     case JSGC_MINOR_GC_NUMBER:
1799       return uint32_t(minorGCNumber);
1800     case JSGC_INCREMENTAL_GC_ENABLED:
1801       return incrementalGCEnabled;
1802     case JSGC_PER_ZONE_GC_ENABLED:
1803       return perZoneGCEnabled;
1804     case JSGC_UNUSED_CHUNKS:
1805       return uint32_t(emptyChunks(lock).count());
1806     case JSGC_TOTAL_CHUNKS:
1807       return uint32_t(fullChunks(lock).count() + availableChunks(lock).count() +
1808                       emptyChunks(lock).count());
1809     case JSGC_SLICE_TIME_BUDGET_MS:
1810       MOZ_RELEASE_ASSERT(defaultTimeBudgetMS_ >= 0);
1811       MOZ_RELEASE_ASSERT(defaultTimeBudgetMS_ <= UINT32_MAX);
1812       return uint32_t(defaultTimeBudgetMS_);
1813     case JSGC_MARK_STACK_LIMIT:
1814       return marker.maxCapacity();
1815     case JSGC_HIGH_FREQUENCY_TIME_LIMIT:
1816       return tunables.highFrequencyThreshold().ToMilliseconds();
1817     case JSGC_SMALL_HEAP_SIZE_MAX:
1818       return tunables.smallHeapSizeMaxBytes() / 1024 / 1024;
1819     case JSGC_LARGE_HEAP_SIZE_MIN:
1820       return tunables.largeHeapSizeMinBytes() / 1024 / 1024;
1821     case JSGC_HIGH_FREQUENCY_SMALL_HEAP_GROWTH:
1822       return uint32_t(tunables.highFrequencySmallHeapGrowth() * 100);
1823     case JSGC_HIGH_FREQUENCY_LARGE_HEAP_GROWTH:
1824       return uint32_t(tunables.highFrequencyLargeHeapGrowth() * 100);
1825     case JSGC_LOW_FREQUENCY_HEAP_GROWTH:
1826       return uint32_t(tunables.lowFrequencyHeapGrowth() * 100);
1827     case JSGC_ALLOCATION_THRESHOLD:
1828       return tunables.gcZoneAllocThresholdBase() / 1024 / 1024;
1829     case JSGC_SMALL_HEAP_INCREMENTAL_LIMIT:
1830       return uint32_t(tunables.smallHeapIncrementalLimit() * 100);
1831     case JSGC_LARGE_HEAP_INCREMENTAL_LIMIT:
1832       return uint32_t(tunables.largeHeapIncrementalLimit() * 100);
1833     case JSGC_MIN_EMPTY_CHUNK_COUNT:
1834       return tunables.minEmptyChunkCount(lock);
1835     case JSGC_MAX_EMPTY_CHUNK_COUNT:
1836       return tunables.maxEmptyChunkCount();
1837     case JSGC_COMPACTING_ENABLED:
1838       return compactingEnabled;
1839     case JSGC_INCREMENTAL_WEAKMAP_ENABLED:
1840       return marker.incrementalWeakMapMarkingEnabled;
1841     case JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION:
1842       return tunables.nurseryFreeThresholdForIdleCollection();
1843     case JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION_PERCENT:
1844       return uint32_t(tunables.nurseryFreeThresholdForIdleCollectionFraction() *
1845                       100.0f);
1846     case JSGC_NURSERY_TIMEOUT_FOR_IDLE_COLLECTION_MS:
1847       return tunables.nurseryTimeoutForIdleCollection().ToMilliseconds();
1848     case JSGC_PRETENURE_THRESHOLD:
1849       return uint32_t(tunables.pretenureThreshold() * 100);
1850     case JSGC_PRETENURE_GROUP_THRESHOLD:
1851       return tunables.pretenureGroupThreshold();
1852     case JSGC_PRETENURE_STRING_THRESHOLD:
1853       return uint32_t(tunables.pretenureStringThreshold() * 100);
1854     case JSGC_STOP_PRETENURE_STRING_THRESHOLD:
1855       return uint32_t(tunables.stopPretenureStringThreshold() * 100);
1856     case JSGC_MIN_LAST_DITCH_GC_PERIOD:
1857       return tunables.minLastDitchGCPeriod().ToSeconds();
1858     case JSGC_ZONE_ALLOC_DELAY_KB:
1859       return tunables.zoneAllocDelayBytes() / 1024;
1860     case JSGC_MALLOC_THRESHOLD_BASE:
1861       return tunables.mallocThresholdBase() / 1024 / 1024;
1862     case JSGC_MALLOC_GROWTH_FACTOR:
1863       return uint32_t(tunables.mallocGrowthFactor() * 100);
1864     case JSGC_CHUNK_BYTES:
1865       return ChunkSize;
1866     case JSGC_HELPER_THREAD_RATIO:
1867       MOZ_ASSERT(helperThreadRatio > 0.0);
1868       return uint32_t(helperThreadRatio * 100.0);
1869     case JSGC_MAX_HELPER_THREADS:
1870       MOZ_ASSERT(maxHelperThreads <= UINT32_MAX);
1871       return maxHelperThreads;
1872     case JSGC_HELPER_THREAD_COUNT:
1873       return helperThreadCount;
1874     case JSGC_SYSTEM_PAGE_SIZE_KB:
1875       return SystemPageSize() / 1024;
1876     default:
1877       MOZ_CRASH("Unknown parameter key");
1878   }
1879 }
1880 
setMarkStackLimit(size_t limit,AutoLockGC & lock)1881 void GCRuntime::setMarkStackLimit(size_t limit, AutoLockGC& lock) {
1882   MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
1883   AutoUnlockGC unlock(lock);
1884   AutoStopVerifyingBarriers pauseVerification(rt, false);
1885   marker.setMaxCapacity(limit);
1886 }
1887 
setIncrementalGCEnabled(bool enabled)1888 void GCRuntime::setIncrementalGCEnabled(bool enabled) {
1889   incrementalGCEnabled = enabled;
1890   marker.setIncrementalGCEnabled(enabled);
1891 }
1892 
updateHelperThreadCount()1893 void GCRuntime::updateHelperThreadCount() {
1894   if (!CanUseExtraThreads()) {
1895     // startTask will run the work on the main thread if the count is 1.
1896     MOZ_ASSERT(helperThreadCount == 1);
1897     return;
1898   }
1899 
1900   // The count of helper threads used for GC tasks is process wide. Don't set it
1901   // for worker JS runtimes.
1902   if (rt->parentRuntime) {
1903     helperThreadCount = rt->parentRuntime->gc.helperThreadCount;
1904     return;
1905   }
1906 
1907   double cpuCount = GetHelperThreadCPUCount();
1908   size_t target = size_t(cpuCount * helperThreadRatio.ref());
1909   target = std::clamp(target, size_t(1), maxHelperThreads.ref());
1910 
1911   AutoLockHelperThreadState lock;
1912 
1913   // Attempt to create extra threads if possible. This is not supported when
1914   // using an external thread pool.
1915   (void)HelperThreadState().ensureThreadCount(target, lock);
1916 
1917   helperThreadCount = std::min(target, GetHelperThreadCount());
1918   HelperThreadState().setGCParallelThreadCount(helperThreadCount, lock);
1919 }
1920 
addBlackRootsTracer(JSTraceDataOp traceOp,void * data)1921 bool GCRuntime::addBlackRootsTracer(JSTraceDataOp traceOp, void* data) {
1922   AssertHeapIsIdle();
1923   return !!blackRootTracers.ref().append(
1924       Callback<JSTraceDataOp>(traceOp, data));
1925 }
1926 
removeBlackRootsTracer(JSTraceDataOp traceOp,void * data)1927 void GCRuntime::removeBlackRootsTracer(JSTraceDataOp traceOp, void* data) {
1928   // Can be called from finalizers
1929   for (size_t i = 0; i < blackRootTracers.ref().length(); i++) {
1930     Callback<JSTraceDataOp>* e = &blackRootTracers.ref()[i];
1931     if (e->op == traceOp && e->data == data) {
1932       blackRootTracers.ref().erase(e);
1933       break;
1934     }
1935   }
1936 }
1937 
setGrayRootsTracer(JSTraceDataOp traceOp,void * data)1938 void GCRuntime::setGrayRootsTracer(JSTraceDataOp traceOp, void* data) {
1939   AssertHeapIsIdle();
1940   grayRootTracer.ref() = {traceOp, data};
1941 }
1942 
clearBlackAndGrayRootTracers()1943 void GCRuntime::clearBlackAndGrayRootTracers() {
1944   MOZ_ASSERT(rt->isBeingDestroyed());
1945   blackRootTracers.ref().clear();
1946   setGrayRootsTracer(nullptr, nullptr);
1947 }
1948 
setGCCallback(JSGCCallback callback,void * data)1949 void GCRuntime::setGCCallback(JSGCCallback callback, void* data) {
1950   gcCallback.ref() = {callback, data};
1951 }
1952 
callGCCallback(JSGCStatus status,JS::GCReason reason) const1953 void GCRuntime::callGCCallback(JSGCStatus status, JS::GCReason reason) const {
1954   const auto& callback = gcCallback.ref();
1955   MOZ_ASSERT(callback.op);
1956   callback.op(rt->mainContextFromOwnThread(), status, reason, callback.data);
1957 }
1958 
setObjectsTenuredCallback(JSObjectsTenuredCallback callback,void * data)1959 void GCRuntime::setObjectsTenuredCallback(JSObjectsTenuredCallback callback,
1960                                           void* data) {
1961   tenuredCallback.ref() = {callback, data};
1962 }
1963 
callObjectsTenuredCallback()1964 void GCRuntime::callObjectsTenuredCallback() {
1965   JS::AutoSuppressGCAnalysis nogc;
1966   const auto& callback = tenuredCallback.ref();
1967   if (callback.op) {
1968     callback.op(rt->mainContextFromOwnThread(), callback.data);
1969   }
1970 }
1971 
addFinalizeCallback(JSFinalizeCallback callback,void * data)1972 bool GCRuntime::addFinalizeCallback(JSFinalizeCallback callback, void* data) {
1973   return finalizeCallbacks.ref().append(
1974       Callback<JSFinalizeCallback>(callback, data));
1975 }
1976 
1977 template <typename F>
EraseCallback(CallbackVector<F> & vector,F callback)1978 static void EraseCallback(CallbackVector<F>& vector, F callback) {
1979   for (Callback<F>* p = vector.begin(); p != vector.end(); p++) {
1980     if (p->op == callback) {
1981       vector.erase(p);
1982       return;
1983     }
1984   }
1985 }
1986 
removeFinalizeCallback(JSFinalizeCallback callback)1987 void GCRuntime::removeFinalizeCallback(JSFinalizeCallback callback) {
1988   EraseCallback(finalizeCallbacks.ref(), callback);
1989 }
1990 
callFinalizeCallbacks(JSFreeOp * fop,JSFinalizeStatus status) const1991 void GCRuntime::callFinalizeCallbacks(JSFreeOp* fop,
1992                                       JSFinalizeStatus status) const {
1993   for (auto& p : finalizeCallbacks.ref()) {
1994     p.op(fop, status, p.data);
1995   }
1996 }
1997 
setHostCleanupFinalizationRegistryCallback(JSHostCleanupFinalizationRegistryCallback callback,void * data)1998 void GCRuntime::setHostCleanupFinalizationRegistryCallback(
1999     JSHostCleanupFinalizationRegistryCallback callback, void* data) {
2000   hostCleanupFinalizationRegistryCallback.ref() = {callback, data};
2001 }
2002 
callHostCleanupFinalizationRegistryCallback(JSFunction * doCleanup,GlobalObject * incumbentGlobal)2003 void GCRuntime::callHostCleanupFinalizationRegistryCallback(
2004     JSFunction* doCleanup, GlobalObject* incumbentGlobal) {
2005   JS::AutoSuppressGCAnalysis nogc;
2006   const auto& callback = hostCleanupFinalizationRegistryCallback.ref();
2007   if (callback.op) {
2008     callback.op(doCleanup, incumbentGlobal, callback.data);
2009   }
2010 }
2011 
addWeakPointerZonesCallback(JSWeakPointerZonesCallback callback,void * data)2012 bool GCRuntime::addWeakPointerZonesCallback(JSWeakPointerZonesCallback callback,
2013                                             void* data) {
2014   return updateWeakPointerZonesCallbacks.ref().append(
2015       Callback<JSWeakPointerZonesCallback>(callback, data));
2016 }
2017 
removeWeakPointerZonesCallback(JSWeakPointerZonesCallback callback)2018 void GCRuntime::removeWeakPointerZonesCallback(
2019     JSWeakPointerZonesCallback callback) {
2020   EraseCallback(updateWeakPointerZonesCallbacks.ref(), callback);
2021 }
2022 
callWeakPointerZonesCallbacks() const2023 void GCRuntime::callWeakPointerZonesCallbacks() const {
2024   JSContext* cx = rt->mainContextFromOwnThread();
2025   for (auto const& p : updateWeakPointerZonesCallbacks.ref()) {
2026     p.op(cx, p.data);
2027   }
2028 }
2029 
addWeakPointerCompartmentCallback(JSWeakPointerCompartmentCallback callback,void * data)2030 bool GCRuntime::addWeakPointerCompartmentCallback(
2031     JSWeakPointerCompartmentCallback callback, void* data) {
2032   return updateWeakPointerCompartmentCallbacks.ref().append(
2033       Callback<JSWeakPointerCompartmentCallback>(callback, data));
2034 }
2035 
removeWeakPointerCompartmentCallback(JSWeakPointerCompartmentCallback callback)2036 void GCRuntime::removeWeakPointerCompartmentCallback(
2037     JSWeakPointerCompartmentCallback callback) {
2038   EraseCallback(updateWeakPointerCompartmentCallbacks.ref(), callback);
2039 }
2040 
callWeakPointerCompartmentCallbacks(JS::Compartment * comp) const2041 void GCRuntime::callWeakPointerCompartmentCallbacks(
2042     JS::Compartment* comp) const {
2043   JSContext* cx = rt->mainContextFromOwnThread();
2044   for (auto const& p : updateWeakPointerCompartmentCallbacks.ref()) {
2045     p.op(cx, comp, p.data);
2046   }
2047 }
2048 
setSliceCallback(JS::GCSliceCallback callback)2049 JS::GCSliceCallback GCRuntime::setSliceCallback(JS::GCSliceCallback callback) {
2050   return stats().setSliceCallback(callback);
2051 }
2052 
setNurseryCollectionCallback(JS::GCNurseryCollectionCallback callback)2053 JS::GCNurseryCollectionCallback GCRuntime::setNurseryCollectionCallback(
2054     JS::GCNurseryCollectionCallback callback) {
2055   return stats().setNurseryCollectionCallback(callback);
2056 }
2057 
setDoCycleCollectionCallback(JS::DoCycleCollectionCallback callback)2058 JS::DoCycleCollectionCallback GCRuntime::setDoCycleCollectionCallback(
2059     JS::DoCycleCollectionCallback callback) {
2060   const auto prior = gcDoCycleCollectionCallback.ref();
2061   gcDoCycleCollectionCallback.ref() = {callback, nullptr};
2062   return prior.op;
2063 }
2064 
callDoCycleCollectionCallback(JSContext * cx)2065 void GCRuntime::callDoCycleCollectionCallback(JSContext* cx) {
2066   const auto& callback = gcDoCycleCollectionCallback.ref();
2067   if (callback.op) {
2068     callback.op(cx);
2069   }
2070 }
2071 
addRoot(Value * vp,const char * name)2072 bool GCRuntime::addRoot(Value* vp, const char* name) {
2073   /*
2074    * Sometimes Firefox will hold weak references to objects and then convert
2075    * them to strong references by calling AddRoot (e.g., via PreserveWrapper,
2076    * or ModifyBusyCount in workers). We need a read barrier to cover these
2077    * cases.
2078    */
2079   MOZ_ASSERT(vp);
2080   Value value = *vp;
2081   if (value.isGCThing()) {
2082     ValuePreWriteBarrier(value);
2083   }
2084 
2085   return rootsHash.ref().put(vp, name);
2086 }
2087 
removeRoot(Value * vp)2088 void GCRuntime::removeRoot(Value* vp) {
2089   rootsHash.ref().remove(vp);
2090   notifyRootsRemoved();
2091 }
2092 
AddRawValueRoot(JSContext * cx,Value * vp,const char * name)2093 extern JS_PUBLIC_API bool js::AddRawValueRoot(JSContext* cx, Value* vp,
2094                                               const char* name) {
2095   MOZ_ASSERT(vp);
2096   MOZ_ASSERT(name);
2097   bool ok = cx->runtime()->gc.addRoot(vp, name);
2098   if (!ok) {
2099     JS_ReportOutOfMemory(cx);
2100   }
2101   return ok;
2102 }
2103 
RemoveRawValueRoot(JSContext * cx,Value * vp)2104 extern JS_PUBLIC_API void js::RemoveRawValueRoot(JSContext* cx, Value* vp) {
2105   cx->runtime()->gc.removeRoot(vp);
2106 }
2107 
2108 /* Compacting GC */
2109 
IsCurrentlyAnimating(const TimeStamp & lastAnimationTime,const TimeStamp & currentTime)2110 bool js::gc::IsCurrentlyAnimating(const TimeStamp& lastAnimationTime,
2111                                   const TimeStamp& currentTime) {
2112   // Assume that we're currently animating if js::NotifyAnimationActivity has
2113   // been called in the last second.
2114   static const auto oneSecond = TimeDuration::FromSeconds(1);
2115   return !lastAnimationTime.IsNull() &&
2116          currentTime < (lastAnimationTime + oneSecond);
2117 }
2118 
shouldCompact()2119 bool GCRuntime::shouldCompact() {
2120   // Compact on shrinking GC if enabled.  Skip compacting in incremental GCs
2121   // if we are currently animating, unless the user is inactive or we're
2122   // responding to memory pressure.
2123 
2124   if (gcOptions != JS::GCOptions::Shrink || !isCompactingGCEnabled()) {
2125     return false;
2126   }
2127 
2128   if (initialReason == JS::GCReason::USER_INACTIVE ||
2129       initialReason == JS::GCReason::MEM_PRESSURE) {
2130     return true;
2131   }
2132 
2133   return !isIncremental ||
2134          !IsCurrentlyAnimating(rt->lastAnimationTime, TimeStamp::Now());
2135 }
2136 
isCompactingGCEnabled() const2137 bool GCRuntime::isCompactingGCEnabled() const {
2138   return compactingEnabled &&
2139          rt->mainContextFromOwnThread()->compactingDisabledCount == 0;
2140 }
2141 
AutoDisableCompactingGC(JSContext * cx)2142 AutoDisableCompactingGC::AutoDisableCompactingGC(JSContext* cx) : cx(cx) {
2143   ++cx->compactingDisabledCount;
2144   if (cx->runtime()->gc.isIncrementalGCInProgress() &&
2145       cx->runtime()->gc.isCompactingGc()) {
2146     FinishGC(cx);
2147   }
2148 }
2149 
~AutoDisableCompactingGC()2150 AutoDisableCompactingGC::~AutoDisableCompactingGC() {
2151   MOZ_ASSERT(cx->compactingDisabledCount > 0);
2152   --cx->compactingDisabledCount;
2153 }
2154 
canRelocateZone(Zone * zone) const2155 bool GCRuntime::canRelocateZone(Zone* zone) const {
2156   if (zone->isAtomsZone()) {
2157     return false;
2158   }
2159 
2160   if (zone->isSelfHostingZone() && selfHostingZoneFrozen) {
2161     return false;
2162   }
2163 
2164   return true;
2165 }
2166 
2167 #ifdef DEBUG
dump()2168 void js::gc::ArenaList::dump() {
2169   fprintf(stderr, "ArenaList %p:", this);
2170   if (cursorp_ == &head_) {
2171     fprintf(stderr, " *");
2172   }
2173   for (Arena* arena = head(); arena; arena = arena->next) {
2174     fprintf(stderr, " %p", arena);
2175     if (cursorp_ == &arena->next) {
2176       fprintf(stderr, " *");
2177     }
2178   }
2179   fprintf(stderr, "\n");
2180 }
2181 #endif
2182 
removeRemainingArenas(Arena ** arenap)2183 Arena* ArenaList::removeRemainingArenas(Arena** arenap) {
2184   // This is only ever called to remove arenas that are after the cursor, so
2185   // we don't need to update it.
2186 #ifdef DEBUG
2187   for (Arena* arena = *arenap; arena; arena = arena->next) {
2188     MOZ_ASSERT(cursorp_ != &arena->next);
2189   }
2190 #endif
2191   Arena* remainingArenas = *arenap;
2192   *arenap = nullptr;
2193   check();
2194   return remainingArenas;
2195 }
2196 
ShouldRelocateAllArenas(JS::GCReason reason)2197 static bool ShouldRelocateAllArenas(JS::GCReason reason) {
2198   return reason == JS::GCReason::DEBUG_GC;
2199 }
2200 
2201 /*
2202  * Choose which arenas to relocate all cells from. Return an arena cursor that
2203  * can be passed to removeRemainingArenas().
2204  */
pickArenasToRelocate(size_t & arenaTotalOut,size_t & relocTotalOut)2205 Arena** ArenaList::pickArenasToRelocate(size_t& arenaTotalOut,
2206                                         size_t& relocTotalOut) {
2207   // Relocate the greatest number of arenas such that the number of used cells
2208   // in relocated arenas is less than or equal to the number of free cells in
2209   // unrelocated arenas. In other words we only relocate cells we can move
2210   // into existing arenas, and we choose the least full areans to relocate.
2211   //
2212   // This is made easier by the fact that the arena list has been sorted in
2213   // descending order of number of used cells, so we will always relocate a
2214   // tail of the arena list. All we need to do is find the point at which to
2215   // start relocating.
2216 
2217   check();
2218 
2219   if (isCursorAtEnd()) {
2220     return nullptr;
2221   }
2222 
2223   Arena** arenap = cursorp_;      // Next arena to consider for relocation.
2224   size_t previousFreeCells = 0;   // Count of free cells before arenap.
2225   size_t followingUsedCells = 0;  // Count of used cells after arenap.
2226   size_t fullArenaCount = 0;      // Number of full arenas (not relocated).
2227   size_t nonFullArenaCount =
2228       0;  // Number of non-full arenas (considered for relocation).
2229   size_t arenaIndex = 0;  // Index of the next arena to consider.
2230 
2231   for (Arena* arena = head_; arena != *cursorp_; arena = arena->next) {
2232     fullArenaCount++;
2233   }
2234 
2235   for (Arena* arena = *cursorp_; arena; arena = arena->next) {
2236     followingUsedCells += arena->countUsedCells();
2237     nonFullArenaCount++;
2238   }
2239 
2240   mozilla::DebugOnly<size_t> lastFreeCells(0);
2241   size_t cellsPerArena = Arena::thingsPerArena((*arenap)->getAllocKind());
2242 
2243   while (*arenap) {
2244     Arena* arena = *arenap;
2245     if (followingUsedCells <= previousFreeCells) {
2246       break;
2247     }
2248 
2249     size_t freeCells = arena->countFreeCells();
2250     size_t usedCells = cellsPerArena - freeCells;
2251     followingUsedCells -= usedCells;
2252 #ifdef DEBUG
2253     MOZ_ASSERT(freeCells >= lastFreeCells);
2254     lastFreeCells = freeCells;
2255 #endif
2256     previousFreeCells += freeCells;
2257     arenap = &arena->next;
2258     arenaIndex++;
2259   }
2260 
2261   size_t relocCount = nonFullArenaCount - arenaIndex;
2262   MOZ_ASSERT(relocCount < nonFullArenaCount);
2263   MOZ_ASSERT((relocCount == 0) == (!*arenap));
2264   arenaTotalOut += fullArenaCount + nonFullArenaCount;
2265   relocTotalOut += relocCount;
2266 
2267   return arenap;
2268 }
2269 
2270 #ifdef DEBUG
PtrIsInRange(const void * ptr,const void * start,size_t length)2271 inline bool PtrIsInRange(const void* ptr, const void* start, size_t length) {
2272   return uintptr_t(ptr) - uintptr_t(start) < length;
2273 }
2274 #endif
2275 
RelocateCell(Zone * zone,TenuredCell * src,AllocKind thingKind,size_t thingSize)2276 static void RelocateCell(Zone* zone, TenuredCell* src, AllocKind thingKind,
2277                          size_t thingSize) {
2278   JS::AutoSuppressGCAnalysis nogc(TlsContext.get());
2279 
2280   // Allocate a new cell.
2281   MOZ_ASSERT(zone == src->zone());
2282   TenuredCell* dst = AllocateCellInGC(zone, thingKind);
2283 
2284   // Copy source cell contents to destination.
2285   memcpy(dst, src, thingSize);
2286 
2287   // Move any uid attached to the object.
2288   src->zone()->transferUniqueId(dst, src);
2289 
2290   if (IsObjectAllocKind(thingKind)) {
2291     auto* srcObj = static_cast<JSObject*>(static_cast<Cell*>(src));
2292     auto* dstObj = static_cast<JSObject*>(static_cast<Cell*>(dst));
2293 
2294     if (srcObj->is<NativeObject>()) {
2295       NativeObject* srcNative = &srcObj->as<NativeObject>();
2296       NativeObject* dstNative = &dstObj->as<NativeObject>();
2297 
2298       // Fixup the pointer to inline object elements if necessary.
2299       if (srcNative->hasFixedElements()) {
2300         uint32_t numShifted =
2301             srcNative->getElementsHeader()->numShiftedElements();
2302         dstNative->setFixedElements(numShifted);
2303       }
2304     } else if (srcObj->is<ProxyObject>()) {
2305       if (srcObj->as<ProxyObject>().usingInlineValueArray()) {
2306         dstObj->as<ProxyObject>().setInlineValueArray();
2307       }
2308     }
2309 
2310     // Call object moved hook if present.
2311     if (JSObjectMovedOp op = srcObj->getClass()->extObjectMovedOp()) {
2312       op(dstObj, srcObj);
2313     }
2314 
2315     MOZ_ASSERT_IF(
2316         dstObj->is<NativeObject>(),
2317         !PtrIsInRange(
2318             (const Value*)dstObj->as<NativeObject>().getDenseElements(), src,
2319             thingSize));
2320   }
2321 
2322   // Copy the mark bits.
2323   dst->copyMarkBitsFrom(src);
2324 
2325   // Poison the source cell contents except for the forwarding flag and pointer
2326   // which will be stored in the first word. We can't do this for native object
2327   // with fixed elements because this would overwrite the element flags and
2328   // these are needed when updating COW elements referred to by other objects.
2329 #ifdef DEBUG
2330   JSObject* srcObj = IsObjectAllocKind(thingKind)
2331                          ? static_cast<JSObject*>(static_cast<Cell*>(src))
2332                          : nullptr;
2333   if (!srcObj || !srcObj->is<NativeObject>() ||
2334       !srcObj->as<NativeObject>().hasFixedElements()) {
2335     AlwaysPoison(reinterpret_cast<uint8_t*>(src) + sizeof(uintptr_t),
2336                  JS_MOVED_TENURED_PATTERN, thingSize - sizeof(uintptr_t),
2337                  MemCheckKind::MakeNoAccess);
2338   }
2339 #endif
2340 
2341   // Mark source cell as forwarded and leave a pointer to the destination.
2342   RelocationOverlay::forwardCell(src, dst);
2343 }
2344 
RelocateArena(Arena * arena,SliceBudget & sliceBudget)2345 static void RelocateArena(Arena* arena, SliceBudget& sliceBudget) {
2346   MOZ_ASSERT(arena->allocated());
2347   MOZ_ASSERT(!arena->onDelayedMarkingList());
2348   MOZ_ASSERT(arena->bufferedCells()->isEmpty());
2349 
2350   Zone* zone = arena->zone;
2351 
2352   AllocKind thingKind = arena->getAllocKind();
2353   size_t thingSize = arena->getThingSize();
2354 
2355   for (ArenaCellIterUnderGC cell(arena); !cell.done(); cell.next()) {
2356     RelocateCell(zone, cell, thingKind, thingSize);
2357     sliceBudget.step();
2358   }
2359 
2360 #ifdef DEBUG
2361   for (ArenaCellIterUnderGC cell(arena); !cell.done(); cell.next()) {
2362     TenuredCell* src = cell;
2363     MOZ_ASSERT(src->isForwarded());
2364     TenuredCell* dest = Forwarded(src);
2365     MOZ_ASSERT(src->isMarkedBlack() == dest->isMarkedBlack());
2366     MOZ_ASSERT(src->isMarkedGray() == dest->isMarkedGray());
2367   }
2368 #endif
2369 }
2370 
2371 /*
2372  * Relocate all arenas identified by pickArenasToRelocate: for each arena,
2373  * relocate each cell within it, then add it to a list of relocated arenas.
2374  */
relocateArenas(Arena * toRelocate,Arena * relocated,SliceBudget & sliceBudget,gcstats::Statistics & stats)2375 Arena* ArenaList::relocateArenas(Arena* toRelocate, Arena* relocated,
2376                                  SliceBudget& sliceBudget,
2377                                  gcstats::Statistics& stats) {
2378   check();
2379 
2380   while (Arena* arena = toRelocate) {
2381     toRelocate = arena->next;
2382     RelocateArena(arena, sliceBudget);
2383     // Prepend to list of relocated arenas
2384     arena->next = relocated;
2385     relocated = arena;
2386     stats.count(gcstats::COUNT_ARENA_RELOCATED);
2387   }
2388 
2389   check();
2390 
2391   return relocated;
2392 }
2393 
2394 // Skip compacting zones unless we can free a certain proportion of their GC
2395 // heap memory.
2396 static const float MIN_ZONE_RECLAIM_PERCENT = 2.0;
2397 
ShouldRelocateZone(size_t arenaCount,size_t relocCount,JS::GCReason reason)2398 static bool ShouldRelocateZone(size_t arenaCount, size_t relocCount,
2399                                JS::GCReason reason) {
2400   if (relocCount == 0) {
2401     return false;
2402   }
2403 
2404   if (IsOOMReason(reason)) {
2405     return true;
2406   }
2407 
2408   return (relocCount * 100.0f) / arenaCount >= MIN_ZONE_RECLAIM_PERCENT;
2409 }
2410 
CompactingAllocKinds()2411 static AllocKinds CompactingAllocKinds() {
2412   AllocKinds result;
2413   for (AllocKind kind : AllAllocKinds()) {
2414     if (IsCompactingKind(kind)) {
2415       result += kind;
2416     }
2417   }
2418   return result;
2419 }
2420 
relocateArenas(Arena * & relocatedListOut,JS::GCReason reason,SliceBudget & sliceBudget,gcstats::Statistics & stats)2421 bool ArenaLists::relocateArenas(Arena*& relocatedListOut, JS::GCReason reason,
2422                                 SliceBudget& sliceBudget,
2423                                 gcstats::Statistics& stats) {
2424   // This is only called from the main thread while we are doing a GC, so
2425   // there is no need to lock.
2426   MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime()));
2427   MOZ_ASSERT(runtime()->gc.isHeapCompacting());
2428   MOZ_ASSERT(!runtime()->gc.isBackgroundSweeping());
2429 
2430   // Relocate all compatible kinds
2431   AllocKinds allocKindsToRelocate = CompactingAllocKinds();
2432 
2433   // Clear all the free lists.
2434   clearFreeLists();
2435 
2436   if (ShouldRelocateAllArenas(reason)) {
2437     zone_->prepareForCompacting();
2438     for (auto kind : allocKindsToRelocate) {
2439       ArenaList& al = arenaList(kind);
2440       Arena* allArenas = al.head();
2441       al.clear();
2442       relocatedListOut =
2443           al.relocateArenas(allArenas, relocatedListOut, sliceBudget, stats);
2444     }
2445   } else {
2446     size_t arenaCount = 0;
2447     size_t relocCount = 0;
2448     AllAllocKindArray<Arena**> toRelocate;
2449 
2450     for (auto kind : allocKindsToRelocate) {
2451       toRelocate[kind] =
2452           arenaList(kind).pickArenasToRelocate(arenaCount, relocCount);
2453     }
2454 
2455     if (!ShouldRelocateZone(arenaCount, relocCount, reason)) {
2456       return false;
2457     }
2458 
2459     zone_->prepareForCompacting();
2460     for (auto kind : allocKindsToRelocate) {
2461       if (toRelocate[kind]) {
2462         ArenaList& al = arenaList(kind);
2463         Arena* arenas = al.removeRemainingArenas(toRelocate[kind]);
2464         relocatedListOut =
2465             al.relocateArenas(arenas, relocatedListOut, sliceBudget, stats);
2466       }
2467     }
2468   }
2469 
2470   return true;
2471 }
2472 
relocateArenas(Zone * zone,JS::GCReason reason,Arena * & relocatedListOut,SliceBudget & sliceBudget)2473 bool GCRuntime::relocateArenas(Zone* zone, JS::GCReason reason,
2474                                Arena*& relocatedListOut,
2475                                SliceBudget& sliceBudget) {
2476   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::COMPACT_MOVE);
2477 
2478   MOZ_ASSERT(!zone->isPreservingCode());
2479   MOZ_ASSERT(canRelocateZone(zone));
2480 
2481   js::CancelOffThreadIonCompile(rt, JS::Zone::Compact);
2482 
2483   if (!zone->arenas.relocateArenas(relocatedListOut, reason, sliceBudget,
2484                                    stats())) {
2485     return false;
2486   }
2487 
2488 #ifdef DEBUG
2489   // Check that we did as much compaction as we should have. There
2490   // should always be less than one arena's worth of free cells.
2491   for (auto kind : CompactingAllocKinds()) {
2492     ArenaList& al = zone->arenas.arenaList(kind);
2493     size_t freeCells = 0;
2494     for (Arena* arena = al.arenaAfterCursor(); arena; arena = arena->next) {
2495       freeCells += arena->countFreeCells();
2496     }
2497     MOZ_ASSERT(freeCells < Arena::thingsPerArena(kind));
2498   }
2499 #endif
2500 
2501   return true;
2502 }
2503 
2504 template <typename T>
onEdge(T * thing)2505 inline T* MovingTracer::onEdge(T* thing) {
2506   if (thing->runtimeFromAnyThread() == runtime() && IsForwarded(thing)) {
2507     thing = Forwarded(thing);
2508   }
2509 
2510   return thing;
2511 }
2512 
onObjectEdge(JSObject * obj)2513 JSObject* MovingTracer::onObjectEdge(JSObject* obj) { return onEdge(obj); }
onShapeEdge(Shape * shape)2514 Shape* MovingTracer::onShapeEdge(Shape* shape) { return onEdge(shape); }
onStringEdge(JSString * string)2515 JSString* MovingTracer::onStringEdge(JSString* string) {
2516   return onEdge(string);
2517 }
onScriptEdge(js::BaseScript * script)2518 js::BaseScript* MovingTracer::onScriptEdge(js::BaseScript* script) {
2519   return onEdge(script);
2520 }
onBaseShapeEdge(BaseShape * base)2521 BaseShape* MovingTracer::onBaseShapeEdge(BaseShape* base) {
2522   return onEdge(base);
2523 }
onGetterSetterEdge(GetterSetter * gs)2524 GetterSetter* MovingTracer::onGetterSetterEdge(GetterSetter* gs) {
2525   return onEdge(gs);
2526 }
onPropMapEdge(js::PropMap * map)2527 PropMap* MovingTracer::onPropMapEdge(js::PropMap* map) { return onEdge(map); }
onScopeEdge(Scope * scope)2528 Scope* MovingTracer::onScopeEdge(Scope* scope) { return onEdge(scope); }
onRegExpSharedEdge(RegExpShared * shared)2529 RegExpShared* MovingTracer::onRegExpSharedEdge(RegExpShared* shared) {
2530   return onEdge(shared);
2531 }
onBigIntEdge(BigInt * bi)2532 BigInt* MovingTracer::onBigIntEdge(BigInt* bi) { return onEdge(bi); }
onSymbolEdge(JS::Symbol * sym)2533 JS::Symbol* MovingTracer::onSymbolEdge(JS::Symbol* sym) {
2534   MOZ_ASSERT(!sym->isForwarded());
2535   return sym;
2536 }
onJitCodeEdge(jit::JitCode * jit)2537 jit::JitCode* MovingTracer::onJitCodeEdge(jit::JitCode* jit) {
2538   MOZ_ASSERT(!jit->isForwarded());
2539   return jit;
2540 }
2541 
prepareForCompacting()2542 void Zone::prepareForCompacting() {
2543   JSFreeOp* fop = runtimeFromMainThread()->defaultFreeOp();
2544   discardJitCode(fop);
2545 }
2546 
sweepZoneAfterCompacting(MovingTracer * trc,Zone * zone)2547 void GCRuntime::sweepZoneAfterCompacting(MovingTracer* trc, Zone* zone) {
2548   MOZ_ASSERT(zone->isCollecting());
2549   sweepFinalizationRegistries(zone);
2550   zone->weakRefMap().sweep(&storeBuffer());
2551 
2552   {
2553     zone->sweepWeakMaps();
2554     for (auto* cache : zone->weakCaches()) {
2555       cache->sweep(nullptr);
2556     }
2557   }
2558 
2559   if (jit::JitZone* jitZone = zone->jitZone()) {
2560     jitZone->traceWeak(trc);
2561   }
2562 
2563   for (RealmsInZoneIter r(zone); !r.done(); r.next()) {
2564     r->traceWeakRegExps(trc);
2565     r->traceWeakSavedStacks(trc);
2566     r->tracekWeakVarNames(trc);
2567     r->traceWeakObjects(trc);
2568     r->traceWeakSelfHostingScriptSource(trc);
2569     r->sweepDebugEnvironments();
2570     r->traceWeakEdgesInJitRealm(trc);
2571     r->traceWeakObjectRealm(trc);
2572     r->traceWeakTemplateObjects(trc);
2573   }
2574 }
2575 
2576 template <typename T>
UpdateCellPointers(MovingTracer * trc,T * cell)2577 static inline void UpdateCellPointers(MovingTracer* trc, T* cell) {
2578   // We only update unmoved GC things or the new copy of moved GC things, never
2579   // the old copy. If this happened it could clear the forwarded flag which
2580   // could lead to pointers to the old copy not being updated.
2581   MOZ_ASSERT(!cell->isForwarded());
2582 
2583   cell->fixupAfterMovingGC();
2584   cell->traceChildren(trc);
2585 }
2586 
2587 template <typename T>
UpdateArenaPointersTyped(MovingTracer * trc,Arena * arena)2588 static void UpdateArenaPointersTyped(MovingTracer* trc, Arena* arena) {
2589   for (ArenaCellIterUnderGC cell(arena); !cell.done(); cell.next()) {
2590     UpdateCellPointers(trc, cell.as<T>());
2591   }
2592 }
2593 
CanUpdateKindInBackground(AllocKind kind)2594 static bool CanUpdateKindInBackground(AllocKind kind) {
2595   // We try to update as many GC things in parallel as we can, but there are
2596   // kinds for which this might not be safe:
2597   //  - we assume JSObjects that are foreground finalized are not safe to
2598   //    update in parallel
2599   //  - updating a SharedPropMap touches child maps in
2600   //    SharedPropMap::fixupAfterMovingGC
2601   return js::gc::IsBackgroundFinalized(kind) && !IsShapeAllocKind(kind) &&
2602          kind != AllocKind::BASE_SHAPE;
2603 }
2604 
2605 /*
2606  * Update the internal pointers for all cells in an arena.
2607  */
UpdateArenaPointers(MovingTracer * trc,Arena * arena)2608 static void UpdateArenaPointers(MovingTracer* trc, Arena* arena) {
2609   AllocKind kind = arena->getAllocKind();
2610 
2611   MOZ_ASSERT_IF(!CanUpdateKindInBackground(kind),
2612                 CurrentThreadCanAccessRuntime(trc->runtime()));
2613 
2614   switch (kind) {
2615 #define EXPAND_CASE(allocKind, traceKind, type, sizedType, bgFinal, nursery, \
2616                     compact)                                                 \
2617   case AllocKind::allocKind:                                                 \
2618     UpdateArenaPointersTyped<type>(trc, arena);                              \
2619     return;
2620     FOR_EACH_ALLOCKIND(EXPAND_CASE)
2621 #undef EXPAND_CASE
2622 
2623     default:
2624       MOZ_CRASH("Invalid alloc kind for UpdateArenaPointers");
2625   }
2626 }
2627 
2628 struct ArenaListSegment {
2629   Arena* begin;
2630   Arena* end;
2631 };
2632 
2633 /*
2634  * Update the internal pointers for all arenas in a segment of an arena list.
2635  *
2636  * Returns the number of steps to count against the slice budget.
2637  */
UpdateArenaListSegmentPointers(GCRuntime * gc,const ArenaListSegment & arenas)2638 static size_t UpdateArenaListSegmentPointers(GCRuntime* gc,
2639                                              const ArenaListSegment& arenas) {
2640   MOZ_ASSERT(arenas.begin);
2641   MovingTracer trc(gc->rt);
2642   size_t count = 0;
2643   for (Arena* arena = arenas.begin; arena != arenas.end; arena = arena->next) {
2644     UpdateArenaPointers(&trc, arena);
2645     count++;
2646   }
2647   return count * 256;
2648 }
2649 
2650 class ArenasToUpdate {
2651   // Maximum number of arenas to update in one block.
2652 #ifdef DEBUG
2653   static const unsigned MaxArenasToProcess = 16;
2654 #else
2655   static const unsigned MaxArenasToProcess = 256;
2656 #endif
2657 
2658  public:
2659   explicit ArenasToUpdate(Zone* zone);
2660   ArenasToUpdate(Zone* zone, const AllocKinds& kinds);
2661 
done() const2662   bool done() const { return !segmentBegin; }
2663 
get() const2664   ArenaListSegment get() const {
2665     MOZ_ASSERT(!done());
2666     return {segmentBegin, segmentEnd};
2667   }
2668 
2669   void next();
2670 
2671  private:
2672   Maybe<AllocKinds> kinds;            // Selects which thing kinds to update.
2673   Zone* zone;                         // Zone to process.
2674   AllocKind kind = AllocKind::FIRST;  // Current alloc kind to process.
2675   Arena* segmentBegin = nullptr;
2676   Arena* segmentEnd = nullptr;
2677 
nextAllocKind(AllocKind i)2678   static AllocKind nextAllocKind(AllocKind i) {
2679     return AllocKind(uint8_t(i) + 1);
2680   }
2681 
2682   void settle();
2683   void findSegmentEnd();
2684 };
2685 
ArenasToUpdate(Zone * zone)2686 ArenasToUpdate::ArenasToUpdate(Zone* zone) : zone(zone) { settle(); }
2687 
ArenasToUpdate(Zone * zone,const AllocKinds & kinds)2688 ArenasToUpdate::ArenasToUpdate(Zone* zone, const AllocKinds& kinds)
2689     : kinds(Some(kinds)), zone(zone) {
2690   settle();
2691 }
2692 
settle()2693 void ArenasToUpdate::settle() {
2694   // Called when we have set |kind| to a new kind. Sets |arena| to the next
2695   // arena or null if there are no more arenas to update.
2696 
2697   MOZ_ASSERT(!segmentBegin);
2698 
2699   for (; kind < AllocKind::LIMIT; kind = nextAllocKind(kind)) {
2700     if (kinds && !kinds.ref().contains(kind)) {
2701       continue;
2702     }
2703 
2704     Arena* arena = zone->arenas.getFirstArena(kind);
2705     if (arena) {
2706       segmentBegin = arena;
2707       findSegmentEnd();
2708       break;
2709     }
2710   }
2711 }
2712 
findSegmentEnd()2713 void ArenasToUpdate::findSegmentEnd() {
2714   // Take up to MaxArenasToProcess arenas from the list starting at
2715   // |segmentBegin| and set |segmentEnd|.
2716   Arena* arena = segmentBegin;
2717   for (size_t i = 0; arena && i < MaxArenasToProcess; i++) {
2718     arena = arena->next;
2719   }
2720   segmentEnd = arena;
2721 }
2722 
next()2723 void ArenasToUpdate::next() {
2724   MOZ_ASSERT(!done());
2725 
2726   segmentBegin = segmentEnd;
2727   if (segmentBegin) {
2728     findSegmentEnd();
2729     return;
2730   }
2731 
2732   kind = nextAllocKind(kind);
2733   settle();
2734 }
2735 
ForegroundUpdateKinds(AllocKinds kinds)2736 static AllocKinds ForegroundUpdateKinds(AllocKinds kinds) {
2737   AllocKinds result;
2738   for (AllocKind kind : kinds) {
2739     if (!CanUpdateKindInBackground(kind)) {
2740       result += kind;
2741     }
2742   }
2743   return result;
2744 }
2745 
updateRttValueObjects(MovingTracer * trc,Zone * zone)2746 void GCRuntime::updateRttValueObjects(MovingTracer* trc, Zone* zone) {
2747   // We need to update each type descriptor object and any objects stored in
2748   // its reserved slots, since some of these contain array objects that also
2749   // need to be updated. Do not update any non-reserved slots, since they might
2750   // point back to unprocessed descriptor objects.
2751 
2752   zone->rttValueObjects().sweep(nullptr);
2753 
2754   for (auto r = zone->rttValueObjects().all(); !r.empty(); r.popFront()) {
2755     RttValue* obj = &MaybeForwardedObjectAs<RttValue>(r.front());
2756     UpdateCellPointers(trc, obj);
2757     for (size_t i = 0; i < RttValue::SlotCount; i++) {
2758       Value value = obj->getSlot(i);
2759       if (value.isObject()) {
2760         UpdateCellPointers(trc, &value.toObject());
2761       }
2762     }
2763   }
2764 }
2765 
updateCellPointers(Zone * zone,AllocKinds kinds)2766 void GCRuntime::updateCellPointers(Zone* zone, AllocKinds kinds) {
2767   AllocKinds fgKinds = ForegroundUpdateKinds(kinds);
2768   AllocKinds bgKinds = kinds - fgKinds;
2769 
2770   ArenasToUpdate fgArenas(zone, fgKinds);
2771   ArenasToUpdate bgArenas(zone, bgKinds);
2772 
2773   AutoLockHelperThreadState lock;
2774 
2775   AutoRunParallelWork bgTasks(this, UpdateArenaListSegmentPointers,
2776                               gcstats::PhaseKind::COMPACT_UPDATE_CELLS,
2777                               bgArenas, SliceBudget::unlimited(), lock);
2778 
2779   AutoUnlockHelperThreadState unlock(lock);
2780 
2781   for (; !fgArenas.done(); fgArenas.next()) {
2782     UpdateArenaListSegmentPointers(this, fgArenas.get());
2783   }
2784 }
2785 
2786 // After cells have been relocated any pointers to a cell's old locations must
2787 // be updated to point to the new location.  This happens by iterating through
2788 // all cells in heap and tracing their children (non-recursively) to update
2789 // them.
2790 //
2791 // This is complicated by the fact that updating a GC thing sometimes depends on
2792 // making use of other GC things.  After a moving GC these things may not be in
2793 // a valid state since they may contain pointers which have not been updated
2794 // yet.
2795 //
2796 // The main dependencies are:
2797 //
2798 //   - Updating a JSObject makes use of its shape
2799 //   - Updating a typed object makes use of its type descriptor object
2800 //
2801 // This means we require at least three phases for update:
2802 //
2803 //  1) shapes
2804 //  2) typed object type descriptor objects
2805 //  3) all other objects
2806 //
2807 // Also, there can be data races calling IsForwarded() on the new location of a
2808 // cell whose first word is being updated in parallel on another thread. This
2809 // easiest way to avoid this is to not store a GC pointer in the first word of a
2810 // cell. Otherwise this can be avoided by updating different kinds of cell in
2811 // different phases.
2812 //
2813 // Since we want to minimize the number of phases, arrange kinds into three
2814 // arbitrary phases.
2815 
2816 static constexpr AllocKinds UpdatePhaseOne{AllocKind::SCRIPT,
2817                                            AllocKind::BASE_SHAPE,
2818                                            AllocKind::SHAPE,
2819                                            AllocKind::STRING,
2820                                            AllocKind::JITCODE,
2821                                            AllocKind::REGEXP_SHARED,
2822                                            AllocKind::SCOPE,
2823                                            AllocKind::GETTER_SETTER,
2824                                            AllocKind::COMPACT_PROP_MAP,
2825                                            AllocKind::NORMAL_PROP_MAP,
2826                                            AllocKind::DICT_PROP_MAP};
2827 
2828 // UpdatePhaseTwo is typed object descriptor objects.
2829 
2830 static constexpr AllocKinds UpdatePhaseThree{AllocKind::FUNCTION,
2831                                              AllocKind::FUNCTION_EXTENDED,
2832                                              AllocKind::OBJECT0,
2833                                              AllocKind::OBJECT0_BACKGROUND,
2834                                              AllocKind::OBJECT2,
2835                                              AllocKind::OBJECT2_BACKGROUND,
2836                                              AllocKind::ARRAYBUFFER4,
2837                                              AllocKind::OBJECT4,
2838                                              AllocKind::OBJECT4_BACKGROUND,
2839                                              AllocKind::ARRAYBUFFER8,
2840                                              AllocKind::OBJECT8,
2841                                              AllocKind::OBJECT8_BACKGROUND,
2842                                              AllocKind::ARRAYBUFFER12,
2843                                              AllocKind::OBJECT12,
2844                                              AllocKind::OBJECT12_BACKGROUND,
2845                                              AllocKind::ARRAYBUFFER16,
2846                                              AllocKind::OBJECT16,
2847                                              AllocKind::OBJECT16_BACKGROUND};
2848 
updateAllCellPointers(MovingTracer * trc,Zone * zone)2849 void GCRuntime::updateAllCellPointers(MovingTracer* trc, Zone* zone) {
2850   updateCellPointers(zone, UpdatePhaseOne);
2851 
2852   // UpdatePhaseTwo: Update RttValues before all other objects as typed
2853   // objects access these objects when we trace them.
2854   updateRttValueObjects(trc, zone);
2855 
2856   updateCellPointers(zone, UpdatePhaseThree);
2857 }
2858 
2859 /*
2860  * Update pointers to relocated cells in a single zone by doing a traversal of
2861  * that zone's arenas and calling per-zone sweep hooks.
2862  *
2863  * The latter is necessary to update weak references which are not marked as
2864  * part of the traversal.
2865  */
updateZonePointersToRelocatedCells(Zone * zone)2866 void GCRuntime::updateZonePointersToRelocatedCells(Zone* zone) {
2867   MOZ_ASSERT(!rt->isBeingDestroyed());
2868   MOZ_ASSERT(zone->isGCCompacting());
2869 
2870   AutoTouchingGrayThings tgt;
2871 
2872   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::COMPACT_UPDATE);
2873   MovingTracer trc(rt);
2874 
2875   zone->fixupAfterMovingGC();
2876   zone->fixupScriptMapsAfterMovingGC(&trc);
2877 
2878   // Fixup compartment global pointers as these get accessed during marking.
2879   for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
2880     comp->fixupAfterMovingGC(&trc);
2881   }
2882 
2883   zone->externalStringCache().purge();
2884   zone->functionToStringCache().purge();
2885   zone->shapeZone().purgeShapeCaches(rt->defaultFreeOp());
2886   rt->caches().stringToAtomCache.purge();
2887 
2888   // Iterate through all cells that can contain relocatable pointers to update
2889   // them. Since updating each cell is independent we try to parallelize this
2890   // as much as possible.
2891   updateAllCellPointers(&trc, zone);
2892 
2893   // Mark roots to update them.
2894   {
2895     gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_ROOTS);
2896 
2897     WeakMapBase::traceZone(zone, &trc);
2898   }
2899 
2900   // Sweep everything to fix up weak pointers.
2901   sweepZoneAfterCompacting(&trc, zone);
2902 
2903   // Call callbacks to get the rest of the system to fixup other untraced
2904   // pointers.
2905   for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
2906     callWeakPointerCompartmentCallbacks(comp);
2907   }
2908 }
2909 
2910 /*
2911  * Update runtime-wide pointers to relocated cells.
2912  */
updateRuntimePointersToRelocatedCells(AutoGCSession & session)2913 void GCRuntime::updateRuntimePointersToRelocatedCells(AutoGCSession& session) {
2914   MOZ_ASSERT(!rt->isBeingDestroyed());
2915 
2916   gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::COMPACT_UPDATE);
2917   MovingTracer trc(rt);
2918 
2919   Zone::fixupAllCrossCompartmentWrappersAfterMovingGC(&trc);
2920 
2921   rt->geckoProfiler().fixupStringsMapAfterMovingGC();
2922 
2923   // Mark roots to update them.
2924 
2925   traceRuntimeForMajorGC(&trc, session);
2926 
2927   {
2928     gcstats::AutoPhase ap2(stats(), gcstats::PhaseKind::MARK_ROOTS);
2929     DebugAPI::traceAllForMovingGC(&trc);
2930     DebugAPI::traceCrossCompartmentEdges(&trc);
2931 
2932     // Mark all gray roots. We call the trace callback to get the current set.
2933     traceEmbeddingGrayRoots(&trc);
2934     Compartment::traceIncomingCrossCompartmentEdgesForZoneGC(
2935         &trc, Compartment::GrayEdges);
2936   }
2937 
2938   // Sweep everything to fix up weak pointers.
2939   DebugAPI::sweepAll(rt->defaultFreeOp());
2940   jit::JitRuntime::TraceWeakJitcodeGlobalTable(rt, &trc);
2941   for (JS::detail::WeakCacheBase* cache : rt->weakCaches()) {
2942     cache->sweep(nullptr);
2943   }
2944 
2945   // Type inference may put more blocks here to free.
2946   {
2947     AutoLockHelperThreadState lock;
2948     lifoBlocksToFree.ref().freeAll();
2949   }
2950 
2951   // Call callbacks to get the rest of the system to fixup other untraced
2952   // pointers.
2953   callWeakPointerZonesCallbacks();
2954 }
2955 
clearRelocatedArenas(Arena * arenaList,JS::GCReason reason)2956 void GCRuntime::clearRelocatedArenas(Arena* arenaList, JS::GCReason reason) {
2957   AutoLockGC lock(this);
2958   clearRelocatedArenasWithoutUnlocking(arenaList, reason, lock);
2959 }
2960 
clearRelocatedArenasWithoutUnlocking(Arena * arenaList,JS::GCReason reason,const AutoLockGC & lock)2961 void GCRuntime::clearRelocatedArenasWithoutUnlocking(Arena* arenaList,
2962                                                      JS::GCReason reason,
2963                                                      const AutoLockGC& lock) {
2964   // Clear the relocated arenas, now containing only forwarding pointers
2965   while (arenaList) {
2966     Arena* arena = arenaList;
2967     arenaList = arenaList->next;
2968 
2969     // Clear the mark bits
2970     arena->unmarkAll();
2971 
2972     // Mark arena as empty
2973     arena->setAsFullyUnused();
2974 
2975 #ifdef DEBUG
2976     // The cell contents have been partially marked no access in RelocateCell,
2977     // so we need to mark the region as undefined again so we can poison it.
2978     SetMemCheckKind(reinterpret_cast<void*>(arena->thingsStart()),
2979                     arena->getThingsSpan(), MemCheckKind::MakeUndefined);
2980 #endif
2981 
2982     AlwaysPoison(reinterpret_cast<void*>(arena->thingsStart()),
2983                  JS_MOVED_TENURED_PATTERN, arena->getThingsSpan(),
2984                  MemCheckKind::MakeNoAccess);
2985 
2986     // Don't count arenas as being freed by the GC if we purposely moved
2987     // everything to new arenas, as that will already have allocated a similar
2988     // number of arenas. This only happens for collections triggered by GC zeal.
2989     bool allArenasRelocated = ShouldRelocateAllArenas(reason);
2990     arena->zone->gcHeapSize.removeBytes(ArenaSize, !allArenasRelocated);
2991 
2992     // Release the arena but don't return it to the chunk yet.
2993     arena->release(lock);
2994   }
2995 }
2996 
2997 #ifdef DEBUG
2998 
2999 // In debug mode we don't always release relocated arenas straight away.
3000 // Sometimes protect them instead and hold onto them until the next GC sweep
3001 // phase to catch any pointers to them that didn't get forwarded.
3002 
CanProtectArenas()3003 static inline bool CanProtectArenas() {
3004   // On some systems the page size is larger than the size of an arena so we
3005   // can't change the mapping permissions per arena.
3006   return SystemPageSize() <= ArenaSize;
3007 }
3008 
ShouldProtectRelocatedArenas(JS::GCReason reason)3009 static inline bool ShouldProtectRelocatedArenas(JS::GCReason reason) {
3010   // For zeal mode collections we don't release the relocated arenas
3011   // immediately. Instead we protect them and keep them around until the next
3012   // collection so we can catch any stray accesses to them.
3013   return reason == JS::GCReason::DEBUG_GC && CanProtectArenas();
3014 }
3015 
protectOrReleaseRelocatedArenas(Arena * arenaList,JS::GCReason reason)3016 void GCRuntime::protectOrReleaseRelocatedArenas(Arena* arenaList,
3017                                                 JS::GCReason reason) {
3018   if (ShouldProtectRelocatedArenas(reason)) {
3019     protectAndHoldArenas(arenaList);
3020     return;
3021   }
3022 
3023   releaseRelocatedArenas(arenaList);
3024 }
3025 
protectAndHoldArenas(Arena * arenaList)3026 void GCRuntime::protectAndHoldArenas(Arena* arenaList) {
3027   for (Arena* arena = arenaList; arena;) {
3028     MOZ_ASSERT(!arena->allocated());
3029     Arena* next = arena->next;
3030     if (!next) {
3031       // Prepend to hold list before we protect the memory.
3032       AutoLockGC lock(this);
3033       arena->next = relocatedArenasToRelease;
3034       relocatedArenasToRelease = arenaList;
3035     }
3036     ProtectPages(arena, ArenaSize);
3037     arena = next;
3038   }
3039 }
3040 
unprotectHeldRelocatedArenas(const AutoLockGC & lock)3041 void GCRuntime::unprotectHeldRelocatedArenas(const AutoLockGC& lock) {
3042   for (Arena* arena = relocatedArenasToRelease; arena; arena = arena->next) {
3043     UnprotectPages(arena, ArenaSize);
3044     MOZ_ASSERT(!arena->allocated());
3045   }
3046 }
3047 
releaseHeldRelocatedArenas()3048 void GCRuntime::releaseHeldRelocatedArenas() {
3049   AutoLockGC lock(this);
3050   unprotectHeldRelocatedArenas(lock);
3051   Arena* arenas = relocatedArenasToRelease;
3052   relocatedArenasToRelease = nullptr;
3053   releaseRelocatedArenasWithoutUnlocking(arenas, lock);
3054 }
3055 
releaseHeldRelocatedArenasWithoutUnlocking(const AutoLockGC & lock)3056 void GCRuntime::releaseHeldRelocatedArenasWithoutUnlocking(
3057     const AutoLockGC& lock) {
3058   unprotectHeldRelocatedArenas(lock);
3059   releaseRelocatedArenasWithoutUnlocking(relocatedArenasToRelease, lock);
3060   relocatedArenasToRelease = nullptr;
3061 }
3062 
3063 #endif
3064 
releaseRelocatedArenas(Arena * arenaList)3065 void GCRuntime::releaseRelocatedArenas(Arena* arenaList) {
3066   AutoLockGC lock(this);
3067   releaseRelocatedArenasWithoutUnlocking(arenaList, lock);
3068 }
3069 
releaseRelocatedArenasWithoutUnlocking(Arena * arenaList,const AutoLockGC & lock)3070 void GCRuntime::releaseRelocatedArenasWithoutUnlocking(Arena* arenaList,
3071                                                        const AutoLockGC& lock) {
3072   // Release relocated arenas previously cleared with clearRelocatedArenas().
3073   while (arenaList) {
3074     Arena* arena = arenaList;
3075     arenaList = arenaList->next;
3076 
3077     // We already updated the memory accounting so just call
3078     // Chunk::releaseArena.
3079     arena->chunk()->releaseArena(this, arena, lock);
3080   }
3081 }
3082 
FreeLists()3083 FreeLists::FreeLists() {
3084   for (auto i : AllAllocKinds()) {
3085     freeLists_[i] = &emptySentinel;
3086   }
3087 }
3088 
ArenaLists(Zone * zone)3089 ArenaLists::ArenaLists(Zone* zone)
3090     : zone_(zone),
3091       freeLists_(zone),
3092       arenaLists_(zone),
3093       newArenasInMarkPhase_(zone),
3094       arenasToSweep_(),
3095       incrementalSweptArenaKind(zone, AllocKind::LIMIT),
3096       incrementalSweptArenas(zone),
3097       gcCompactPropMapArenasToUpdate(zone, nullptr),
3098       gcNormalPropMapArenasToUpdate(zone, nullptr),
3099       savedEmptyArenas(zone, nullptr) {
3100   for (auto i : AllAllocKinds()) {
3101     concurrentUse(i) = ConcurrentUse::None;
3102     arenasToSweep(i) = nullptr;
3103   }
3104 }
3105 
ReleaseArenas(JSRuntime * rt,Arena * arena,const AutoLockGC & lock)3106 void ReleaseArenas(JSRuntime* rt, Arena* arena, const AutoLockGC& lock) {
3107   Arena* next;
3108   for (; arena; arena = next) {
3109     next = arena->next;
3110     rt->gc.releaseArena(arena, lock);
3111   }
3112 }
3113 
ReleaseArenaList(JSRuntime * rt,ArenaList & arenaList,const AutoLockGC & lock)3114 void ReleaseArenaList(JSRuntime* rt, ArenaList& arenaList,
3115                       const AutoLockGC& lock) {
3116   ReleaseArenas(rt, arenaList.head(), lock);
3117   arenaList.clear();
3118 }
3119 
~ArenaLists()3120 ArenaLists::~ArenaLists() {
3121   AutoLockGC lock(runtime());
3122 
3123   for (auto i : AllAllocKinds()) {
3124     /*
3125      * We can only call this during the shutdown after the last GC when
3126      * the background finalization is disabled.
3127      */
3128     MOZ_ASSERT(concurrentUse(i) == ConcurrentUse::None);
3129     ReleaseArenaList(runtime(), arenaList(i), lock);
3130   }
3131   ReleaseArenaList(runtime(), incrementalSweptArenas.ref(), lock);
3132 
3133   ReleaseArenas(runtime(), savedEmptyArenas, lock);
3134 }
3135 
queueForForegroundSweep(JSFreeOp * fop,const FinalizePhase & phase)3136 void ArenaLists::queueForForegroundSweep(JSFreeOp* fop,
3137                                          const FinalizePhase& phase) {
3138   gcstats::AutoPhase ap(fop->runtime()->gc.stats(), phase.statsPhase);
3139   for (auto kind : phase.kinds) {
3140     queueForForegroundSweep(kind);
3141   }
3142 }
3143 
queueForForegroundSweep(AllocKind thingKind)3144 void ArenaLists::queueForForegroundSweep(AllocKind thingKind) {
3145   MOZ_ASSERT(!IsBackgroundFinalized(thingKind));
3146   MOZ_ASSERT(concurrentUse(thingKind) == ConcurrentUse::None);
3147   MOZ_ASSERT(!arenasToSweep(thingKind));
3148 
3149   arenasToSweep(thingKind) = arenaList(thingKind).head();
3150   arenaList(thingKind).clear();
3151 }
3152 
queueForBackgroundSweep(JSFreeOp * fop,const FinalizePhase & phase)3153 void ArenaLists::queueForBackgroundSweep(JSFreeOp* fop,
3154                                          const FinalizePhase& phase) {
3155   gcstats::AutoPhase ap(fop->runtime()->gc.stats(), phase.statsPhase);
3156   for (auto kind : phase.kinds) {
3157     queueForBackgroundSweep(kind);
3158   }
3159 }
3160 
queueForBackgroundSweep(AllocKind thingKind)3161 inline void ArenaLists::queueForBackgroundSweep(AllocKind thingKind) {
3162   MOZ_ASSERT(IsBackgroundFinalized(thingKind));
3163   MOZ_ASSERT(concurrentUse(thingKind) == ConcurrentUse::None);
3164 
3165   ArenaList* al = &arenaList(thingKind);
3166   arenasToSweep(thingKind) = al->head();
3167   arenaList(thingKind).clear();
3168 
3169   if (arenasToSweep(thingKind)) {
3170     concurrentUse(thingKind) = ConcurrentUse::BackgroundFinalize;
3171   } else {
3172     arenaList(thingKind) = std::move(newArenasInMarkPhase(thingKind));
3173   }
3174 }
3175 
3176 /*static*/
backgroundFinalize(JSFreeOp * fop,Arena * listHead,Arena ** empty)3177 void ArenaLists::backgroundFinalize(JSFreeOp* fop, Arena* listHead,
3178                                     Arena** empty) {
3179   MOZ_ASSERT(listHead);
3180   MOZ_ASSERT(empty);
3181 
3182   AllocKind thingKind = listHead->getAllocKind();
3183   Zone* zone = listHead->zone;
3184 
3185   size_t thingsPerArena = Arena::thingsPerArena(thingKind);
3186   SortedArenaList finalizedSorted(thingsPerArena);
3187 
3188   auto unlimited = SliceBudget::unlimited();
3189   FinalizeArenas(fop, &listHead, finalizedSorted, thingKind, unlimited);
3190   MOZ_ASSERT(!listHead);
3191 
3192   finalizedSorted.extractEmpty(empty);
3193 
3194   // When arenas are queued for background finalization, all arenas are moved to
3195   // arenasToSweep, leaving the arena list empty. However, new arenas may be
3196   // allocated before background finalization finishes; now that finalization is
3197   // complete, we want to merge these lists back together.
3198   ArenaLists* lists = &zone->arenas;
3199   ArenaList& al = lists->arenaList(thingKind);
3200 
3201   // Flatten |finalizedSorted| into a regular ArenaList.
3202   ArenaList finalized = finalizedSorted.toArenaList();
3203 
3204   // We must take the GC lock to be able to safely modify the ArenaList;
3205   // however, this does not by itself make the changes visible to all threads,
3206   // as not all threads take the GC lock to read the ArenaLists.
3207   // That safety is provided by the ReleaseAcquire memory ordering of the
3208   // background finalize state, which we explicitly set as the final step.
3209   {
3210     AutoLockGC lock(lists->runtimeFromAnyThread());
3211     MOZ_ASSERT(lists->concurrentUse(thingKind) ==
3212                ConcurrentUse::BackgroundFinalize);
3213 
3214     // Join |al| and |finalized| into a single list.
3215     ArenaList allocatedDuringSweep = std::move(al);
3216     al = std::move(finalized);
3217     al.insertListWithCursorAtEnd(lists->newArenasInMarkPhase(thingKind));
3218     al.insertListWithCursorAtEnd(allocatedDuringSweep);
3219 
3220     lists->newArenasInMarkPhase(thingKind).clear();
3221     lists->arenasToSweep(thingKind) = nullptr;
3222   }
3223 
3224   lists->concurrentUse(thingKind) = ConcurrentUse::None;
3225 }
3226 
takeSweptEmptyArenas()3227 Arena* ArenaLists::takeSweptEmptyArenas() {
3228   Arena* arenas = savedEmptyArenas;
3229   savedEmptyArenas = nullptr;
3230   return arenas;
3231 }
3232 
queueForegroundThingsForSweep()3233 void ArenaLists::queueForegroundThingsForSweep() {
3234   gcCompactPropMapArenasToUpdate = arenasToSweep(AllocKind::COMPACT_PROP_MAP);
3235   gcNormalPropMapArenasToUpdate = arenasToSweep(AllocKind::NORMAL_PROP_MAP);
3236 }
3237 
checkGCStateNotInUse()3238 void ArenaLists::checkGCStateNotInUse() {
3239   // Called before and after collection to check the state is as expected.
3240 #ifdef DEBUG
3241   checkSweepStateNotInUse();
3242   for (auto i : AllAllocKinds()) {
3243     MOZ_ASSERT(newArenasInMarkPhase(i).isEmpty());
3244   }
3245 #endif
3246 }
3247 
checkSweepStateNotInUse()3248 void ArenaLists::checkSweepStateNotInUse() {
3249 #ifdef DEBUG
3250   checkNoArenasToUpdate();
3251   MOZ_ASSERT(incrementalSweptArenaKind == AllocKind::LIMIT);
3252   MOZ_ASSERT(incrementalSweptArenas.ref().isEmpty());
3253   MOZ_ASSERT(!savedEmptyArenas);
3254   for (auto i : AllAllocKinds()) {
3255     MOZ_ASSERT(concurrentUse(i) == ConcurrentUse::None);
3256     MOZ_ASSERT(!arenasToSweep(i));
3257   }
3258 #endif
3259 }
3260 
checkNoArenasToUpdate()3261 void ArenaLists::checkNoArenasToUpdate() {
3262   MOZ_ASSERT(!gcCompactPropMapArenasToUpdate);
3263   MOZ_ASSERT(!gcNormalPropMapArenasToUpdate);
3264 }
3265 
checkNoArenasToUpdateForKind(AllocKind kind)3266 void ArenaLists::checkNoArenasToUpdateForKind(AllocKind kind) {
3267 #ifdef DEBUG
3268   switch (kind) {
3269     case AllocKind::COMPACT_PROP_MAP:
3270       MOZ_ASSERT(!gcCompactPropMapArenasToUpdate);
3271       break;
3272     case AllocKind::NORMAL_PROP_MAP:
3273       MOZ_ASSERT(!gcNormalPropMapArenasToUpdate);
3274       break;
3275     default:
3276       break;
3277   }
3278 #endif
3279 }
3280 
SliceBudget(TimeBudget time,int64_t stepsPerTimeCheckArg)3281 SliceBudget::SliceBudget(TimeBudget time, int64_t stepsPerTimeCheckArg)
3282     : budget(TimeBudget(time)),
3283       stepsPerTimeCheck(stepsPerTimeCheckArg),
3284       counter(stepsPerTimeCheckArg) {
3285   budget.as<TimeBudget>().deadline =
3286       ReallyNow() + TimeDuration::FromMilliseconds(timeBudget());
3287 }
3288 
SliceBudget(WorkBudget work)3289 SliceBudget::SliceBudget(WorkBudget work)
3290     : budget(work), counter(work.budget) {}
3291 
describe(char * buffer,size_t maxlen) const3292 int SliceBudget::describe(char* buffer, size_t maxlen) const {
3293   if (isUnlimited()) {
3294     return snprintf(buffer, maxlen, "unlimited");
3295   } else if (isWorkBudget()) {
3296     return snprintf(buffer, maxlen, "work(%" PRId64 ")", workBudget());
3297   } else {
3298     return snprintf(buffer, maxlen, "%" PRId64 "ms", timeBudget());
3299   }
3300 }
3301 
checkOverBudget()3302 bool SliceBudget::checkOverBudget() {
3303   MOZ_ASSERT(counter <= 0);
3304   MOZ_ASSERT(!isUnlimited());
3305 
3306   if (isWorkBudget()) {
3307     return true;
3308   }
3309 
3310   if (ReallyNow() >= budget.as<TimeBudget>().deadline) {
3311     return true;
3312   }
3313 
3314   counter = stepsPerTimeCheck;
3315   return false;
3316 }
3317 
requestMajorGC(JS::GCReason reason)3318 void GCRuntime::requestMajorGC(JS::GCReason reason) {
3319   MOZ_ASSERT_IF(reason != JS::GCReason::BG_TASK_FINISHED,
3320                 !CurrentThreadIsPerformingGC());
3321 
3322   if (majorGCRequested()) {
3323     return;
3324   }
3325 
3326   majorGCTriggerReason = reason;
3327   rt->mainContextFromAnyThread()->requestInterrupt(InterruptReason::GC);
3328 }
3329 
requestMinorGC(JS::GCReason reason) const3330 void Nursery::requestMinorGC(JS::GCReason reason) const {
3331   MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime()));
3332 
3333   if (minorGCRequested()) {
3334     return;
3335   }
3336 
3337   minorGCTriggerReason_ = reason;
3338   runtime()->mainContextFromOwnThread()->requestInterrupt(InterruptReason::GC);
3339 }
3340 
triggerGC(JS::GCReason reason)3341 bool GCRuntime::triggerGC(JS::GCReason reason) {
3342   /*
3343    * Don't trigger GCs if this is being called off the main thread from
3344    * onTooMuchMalloc().
3345    */
3346   if (!CurrentThreadCanAccessRuntime(rt)) {
3347     return false;
3348   }
3349 
3350   /* GC is already running. */
3351   if (JS::RuntimeHeapIsCollecting()) {
3352     return false;
3353   }
3354 
3355   JS::PrepareForFullGC(rt->mainContextFromOwnThread());
3356   requestMajorGC(reason);
3357   return true;
3358 }
3359 
maybeTriggerGCAfterAlloc(Zone * zone)3360 void GCRuntime::maybeTriggerGCAfterAlloc(Zone* zone) {
3361   if (!CurrentThreadCanAccessRuntime(rt)) {
3362     // Zones in use by a helper thread can't be collected.
3363     MOZ_ASSERT(zone->usedByHelperThread() || zone->isAtomsZone());
3364     return;
3365   }
3366 
3367   MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
3368 
3369   TriggerResult trigger =
3370       checkHeapThreshold(zone, zone->gcHeapSize, zone->gcHeapThreshold);
3371 
3372   if (trigger.shouldTrigger) {
3373     // Start or continue an in progress incremental GC. We do this to try to
3374     // avoid performing non-incremental GCs on zones which allocate a lot of
3375     // data, even when incremental slices can't be triggered via scheduling in
3376     // the event loop.
3377     triggerZoneGC(zone, JS::GCReason::ALLOC_TRIGGER, trigger.usedBytes,
3378                   trigger.thresholdBytes);
3379   }
3380 }
3381 
MaybeMallocTriggerZoneGC(JSRuntime * rt,ZoneAllocator * zoneAlloc,const HeapSize & heap,const HeapThreshold & threshold,JS::GCReason reason)3382 void js::gc::MaybeMallocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc,
3383                                       const HeapSize& heap,
3384                                       const HeapThreshold& threshold,
3385                                       JS::GCReason reason) {
3386   rt->gc.maybeTriggerGCAfterMalloc(Zone::from(zoneAlloc), heap, threshold,
3387                                    reason);
3388 }
3389 
maybeTriggerGCAfterMalloc(Zone * zone)3390 void GCRuntime::maybeTriggerGCAfterMalloc(Zone* zone) {
3391   if (maybeTriggerGCAfterMalloc(zone, zone->mallocHeapSize,
3392                                 zone->mallocHeapThreshold,
3393                                 JS::GCReason::TOO_MUCH_MALLOC)) {
3394     return;
3395   }
3396 
3397   maybeTriggerGCAfterMalloc(zone, zone->jitHeapSize, zone->jitHeapThreshold,
3398                             JS::GCReason::TOO_MUCH_JIT_CODE);
3399 }
3400 
maybeTriggerGCAfterMalloc(Zone * zone,const HeapSize & heap,const HeapThreshold & threshold,JS::GCReason reason)3401 bool GCRuntime::maybeTriggerGCAfterMalloc(Zone* zone, const HeapSize& heap,
3402                                           const HeapThreshold& threshold,
3403                                           JS::GCReason reason) {
3404   if (!CurrentThreadCanAccessRuntime(rt)) {
3405     // Zones in use by a helper thread can't be collected. Also ignore malloc
3406     // during sweeping, for example when we resize hash tables.
3407     MOZ_ASSERT(zone->usedByHelperThread() || zone->isAtomsZone() ||
3408                JS::RuntimeHeapIsBusy());
3409     return false;
3410   }
3411 
3412   if (rt->heapState() != JS::HeapState::Idle) {
3413     return false;
3414   }
3415 
3416   TriggerResult trigger = checkHeapThreshold(zone, heap, threshold);
3417   if (!trigger.shouldTrigger) {
3418     return false;
3419   }
3420 
3421   // Trigger a zone GC. budgetIncrementalGC() will work out whether to do an
3422   // incremental or non-incremental collection.
3423   triggerZoneGC(zone, reason, trigger.usedBytes, trigger.thresholdBytes);
3424   return true;
3425 }
3426 
checkHeapThreshold(Zone * zone,const HeapSize & heapSize,const HeapThreshold & heapThreshold)3427 TriggerResult GCRuntime::checkHeapThreshold(
3428     Zone* zone, const HeapSize& heapSize, const HeapThreshold& heapThreshold) {
3429   MOZ_ASSERT_IF(heapThreshold.hasSliceThreshold(), zone->wasGCStarted());
3430 
3431   size_t usedBytes = heapSize.bytes();
3432   size_t thresholdBytes = heapThreshold.hasSliceThreshold()
3433                               ? heapThreshold.sliceBytes()
3434                               : heapThreshold.startBytes();
3435 
3436   // The incremental limit will be checked if we trigger a GC slice.
3437   MOZ_ASSERT(thresholdBytes <= heapThreshold.incrementalLimitBytes());
3438 
3439   return TriggerResult{usedBytes >= thresholdBytes, usedBytes, thresholdBytes};
3440 }
3441 
triggerZoneGC(Zone * zone,JS::GCReason reason,size_t used,size_t threshold)3442 bool GCRuntime::triggerZoneGC(Zone* zone, JS::GCReason reason, size_t used,
3443                               size_t threshold) {
3444   MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
3445 
3446   /* GC is already running. */
3447   if (JS::RuntimeHeapIsBusy()) {
3448     return false;
3449   }
3450 
3451 #ifdef JS_GC_ZEAL
3452   if (hasZealMode(ZealMode::Alloc)) {
3453     MOZ_RELEASE_ASSERT(triggerGC(reason));
3454     return true;
3455   }
3456 #endif
3457 
3458   if (zone->isAtomsZone()) {
3459     /* We can't do a zone GC of just the atoms zone. */
3460     if (rt->hasHelperThreadZones()) {
3461       /* We can't collect atoms while off-thread parsing is allocating. */
3462       fullGCForAtomsRequested_ = true;
3463       return false;
3464     }
3465     stats().recordTrigger(used, threshold);
3466     MOZ_RELEASE_ASSERT(triggerGC(reason));
3467     return true;
3468   }
3469 
3470   stats().recordTrigger(used, threshold);
3471   zone->scheduleGC();
3472   requestMajorGC(reason);
3473   return true;
3474 }
3475 
maybeGC()3476 void GCRuntime::maybeGC() {
3477   MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
3478 
3479 #ifdef JS_GC_ZEAL
3480   if (hasZealMode(ZealMode::Alloc) || hasZealMode(ZealMode::RootsChange)) {
3481     JS::PrepareForFullGC(rt->mainContextFromOwnThread());
3482     gc(JS::GCOptions::Normal, JS::GCReason::DEBUG_GC);
3483     return;
3484   }
3485 #endif
3486 
3487   if (gcIfRequested()) {
3488     return;
3489   }
3490 
3491   if (isIncrementalGCInProgress()) {
3492     return;
3493   }
3494 
3495   bool scheduledZones = false;
3496   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
3497     if (checkEagerAllocTrigger(zone->gcHeapSize, zone->gcHeapThreshold) ||
3498         checkEagerAllocTrigger(zone->mallocHeapSize,
3499                                zone->mallocHeapThreshold)) {
3500       zone->scheduleGC();
3501       scheduledZones = true;
3502     }
3503   }
3504 
3505   if (scheduledZones) {
3506     startGC(JS::GCOptions::Normal, JS::GCReason::EAGER_ALLOC_TRIGGER);
3507   }
3508 }
3509 
checkEagerAllocTrigger(const HeapSize & size,const HeapThreshold & threshold)3510 bool GCRuntime::checkEagerAllocTrigger(const HeapSize& size,
3511                                        const HeapThreshold& threshold) {
3512   double thresholdBytes =
3513       threshold.eagerAllocTrigger(schedulingState.inHighFrequencyGCMode());
3514   double usedBytes = size.bytes();
3515   if (usedBytes <= 1024 * 1024 || usedBytes < thresholdBytes) {
3516     return false;
3517   }
3518 
3519   stats().recordTrigger(usedBytes, thresholdBytes);
3520   return true;
3521 }
3522 
triggerFullGCForAtoms(JSContext * cx)3523 void GCRuntime::triggerFullGCForAtoms(JSContext* cx) {
3524   MOZ_ASSERT(fullGCForAtomsRequested_);
3525   MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
3526   MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
3527   MOZ_ASSERT(cx->canCollectAtoms());
3528   fullGCForAtomsRequested_ = false;
3529   MOZ_RELEASE_ASSERT(triggerGC(JS::GCReason::DELAYED_ATOMS_GC));
3530 }
3531 
startDecommit()3532 void GCRuntime::startDecommit() {
3533   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::DECOMMIT);
3534 
3535 #ifdef DEBUG
3536   MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
3537   MOZ_ASSERT(decommitTask.isIdle());
3538 
3539   {
3540     AutoLockGC lock(this);
3541     MOZ_ASSERT(fullChunks(lock).verify());
3542     MOZ_ASSERT(availableChunks(lock).verify());
3543     MOZ_ASSERT(emptyChunks(lock).verify());
3544 
3545     // Verify that all entries in the empty chunks pool are already decommitted.
3546     for (ChunkPool::Iter chunk(emptyChunks(lock)); !chunk.done();
3547          chunk.next()) {
3548       MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
3549     }
3550   }
3551 #endif
3552 
3553   // If we are allocating heavily enough to trigger "high frequency" GC, then
3554   // skip decommit so that we do not compete with the mutator. However if we're
3555   // doing a shrinking GC we always decommit to release as much memory as
3556   // possible.
3557   if (schedulingState.inHighFrequencyGCMode() && !cleanUpEverything) {
3558     return;
3559   }
3560 
3561   {
3562     AutoLockGC lock(this);
3563     if (availableChunks(lock).empty() && !tooManyEmptyChunks(lock)) {
3564       return;  // Nothing to do.
3565     }
3566   }
3567 
3568 #ifdef DEBUG
3569   {
3570     AutoLockHelperThreadState lock;
3571     MOZ_ASSERT(!requestSliceAfterBackgroundTask);
3572   }
3573 #endif
3574 
3575   if (sweepOnBackgroundThread) {
3576     decommitTask.start();
3577     return;
3578   }
3579 
3580   decommitTask.runFromMainThread();
3581 }
3582 
run(AutoLockHelperThreadState & lock)3583 void js::gc::BackgroundDecommitTask::run(AutoLockHelperThreadState& lock) {
3584   {
3585     AutoUnlockHelperThreadState unlock(lock);
3586 
3587     ChunkPool emptyChunksToFree;
3588     {
3589       AutoLockGC gcLock(gc);
3590 
3591       // To help minimize the total number of chunks needed over time, sort the
3592       // available chunks list so that we allocate into more-used chunks first.
3593       gc->availableChunks(gcLock).sort();
3594 
3595       if (DecommitEnabled()) {
3596         gc->decommitFreeArenas(cancel_, gcLock);
3597       }
3598 
3599       emptyChunksToFree = gc->expireEmptyChunkPool(gcLock);
3600     }
3601 
3602     FreeChunkPool(emptyChunksToFree);
3603   }
3604 
3605   gc->maybeRequestGCAfterBackgroundTask(lock);
3606 }
3607 
3608 // Called from a background thread to decommit free arenas. Releases the GC
3609 // lock.
decommitFreeArenas(const bool & cancel,AutoLockGC & lock)3610 void GCRuntime::decommitFreeArenas(const bool& cancel, AutoLockGC& lock) {
3611   MOZ_ASSERT(DecommitEnabled());
3612 
3613   // Since we release the GC lock while doing the decommit syscall below,
3614   // it is dangerous to iterate the available list directly, as the active
3615   // thread could modify it concurrently. Instead, we build and pass an
3616   // explicit Vector containing the Chunks we want to visit.
3617   Vector<TenuredChunk*, 0, SystemAllocPolicy> chunksToDecommit;
3618   for (ChunkPool::Iter chunk(availableChunks(lock)); !chunk.done();
3619        chunk.next()) {
3620     if (chunk->info.numArenasFreeCommitted != 0 &&
3621         !chunksToDecommit.append(chunk)) {
3622       onOutOfMallocMemory(lock);
3623       return;
3624     }
3625   }
3626 
3627   for (TenuredChunk* chunk : chunksToDecommit) {
3628     chunk->rebuildFreeArenasList();
3629     chunk->decommitFreeArenas(this, cancel, lock);
3630   }
3631 }
3632 
3633 // Do all possible decommit immediately from the current thread without
3634 // releasing the GC lock or allocating any memory.
decommitFreeArenasWithoutUnlocking(const AutoLockGC & lock)3635 void GCRuntime::decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock) {
3636   MOZ_ASSERT(DecommitEnabled());
3637   for (ChunkPool::Iter chunk(availableChunks(lock)); !chunk.done();
3638        chunk.next()) {
3639     chunk->decommitFreeArenasWithoutUnlocking(lock);
3640   }
3641   MOZ_ASSERT(availableChunks(lock).verify());
3642 }
3643 
maybeRequestGCAfterBackgroundTask(const AutoLockHelperThreadState & lock)3644 void GCRuntime::maybeRequestGCAfterBackgroundTask(
3645     const AutoLockHelperThreadState& lock) {
3646   if (requestSliceAfterBackgroundTask) {
3647     // Trigger a slice so the main thread can continue the collection
3648     // immediately.
3649     requestSliceAfterBackgroundTask = false;
3650     requestMajorGC(JS::GCReason::BG_TASK_FINISHED);
3651   }
3652 }
3653 
cancelRequestedGCAfterBackgroundTask()3654 void GCRuntime::cancelRequestedGCAfterBackgroundTask() {
3655   MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
3656 
3657 #ifdef DEBUG
3658   {
3659     AutoLockHelperThreadState lock;
3660     MOZ_ASSERT(!requestSliceAfterBackgroundTask);
3661   }
3662 #endif
3663 
3664   majorGCTriggerReason.compareExchange(JS::GCReason::BG_TASK_FINISHED,
3665                                        JS::GCReason::NO_REASON);
3666 }
3667 
sweepBackgroundThings(ZoneList & zones)3668 void GCRuntime::sweepBackgroundThings(ZoneList& zones) {
3669   if (zones.isEmpty()) {
3670     return;
3671   }
3672 
3673   JSFreeOp fop(nullptr);
3674 
3675   // Sweep zones in order. The atoms zone must be finalized last as other
3676   // zones may have direct pointers into it.
3677   while (!zones.isEmpty()) {
3678     Zone* zone = zones.removeFront();
3679     MOZ_ASSERT(zone->isGCFinished());
3680 
3681     Arena* emptyArenas = zone->arenas.takeSweptEmptyArenas();
3682 
3683     AutoSetThreadIsSweeping threadIsSweeping(zone);
3684 
3685     // We must finalize thing kinds in the order specified by
3686     // BackgroundFinalizePhases.
3687     for (auto phase : BackgroundFinalizePhases) {
3688       for (auto kind : phase.kinds) {
3689         Arena* arenas = zone->arenas.arenasToSweep(kind);
3690         MOZ_RELEASE_ASSERT(uintptr_t(arenas) != uintptr_t(-1));
3691         if (arenas) {
3692           ArenaLists::backgroundFinalize(&fop, arenas, &emptyArenas);
3693         }
3694       }
3695     }
3696 
3697     // Release any arenas that are now empty.
3698     //
3699     // Empty arenas are only released after everything has been finalized so
3700     // that it's still possible to get a thing's zone after the thing has been
3701     // finalized. The HeapPtr destructor depends on this, and this allows
3702     // HeapPtrs between things of different alloc kind regardless of
3703     // finalization order.
3704     //
3705     // Periodically drop and reaquire the GC lock every so often to avoid
3706     // blocking the main thread from allocating chunks.
3707     static const size_t LockReleasePeriod = 32;
3708 
3709     while (emptyArenas) {
3710       AutoLockGC lock(this);
3711       for (size_t i = 0; i < LockReleasePeriod && emptyArenas; i++) {
3712         Arena* arena = emptyArenas;
3713         emptyArenas = emptyArenas->next;
3714         releaseArena(arena, lock);
3715       }
3716     }
3717   }
3718 }
3719 
assertBackgroundSweepingFinished()3720 void GCRuntime::assertBackgroundSweepingFinished() {
3721 #ifdef DEBUG
3722   {
3723     AutoLockHelperThreadState lock;
3724     MOZ_ASSERT(backgroundSweepZones.ref().isEmpty());
3725   }
3726 
3727   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
3728     for (auto i : AllAllocKinds()) {
3729       MOZ_ASSERT(!zone->arenas.arenasToSweep(i));
3730       MOZ_ASSERT(zone->arenas.doneBackgroundFinalize(i));
3731     }
3732   }
3733 #endif
3734 }
3735 
queueZonesAndStartBackgroundSweep(ZoneList & zones)3736 void GCRuntime::queueZonesAndStartBackgroundSweep(ZoneList& zones) {
3737   {
3738     AutoLockHelperThreadState lock;
3739     MOZ_ASSERT(!requestSliceAfterBackgroundTask);
3740     backgroundSweepZones.ref().transferFrom(zones);
3741     if (sweepOnBackgroundThread) {
3742       sweepTask.startOrRunIfIdle(lock);
3743     }
3744   }
3745   if (!sweepOnBackgroundThread) {
3746     sweepTask.join();
3747     sweepTask.runFromMainThread();
3748   }
3749 }
3750 
run(AutoLockHelperThreadState & lock)3751 void BackgroundSweepTask::run(AutoLockHelperThreadState& lock) {
3752   AutoTraceLog logSweeping(TraceLoggerForCurrentThread(),
3753                            TraceLogger_GCSweeping);
3754 
3755   gc->sweepFromBackgroundThread(lock);
3756 }
3757 
sweepFromBackgroundThread(AutoLockHelperThreadState & lock)3758 void GCRuntime::sweepFromBackgroundThread(AutoLockHelperThreadState& lock) {
3759   do {
3760     ZoneList zones;
3761     zones.transferFrom(backgroundSweepZones.ref());
3762 
3763     AutoUnlockHelperThreadState unlock(lock);
3764     sweepBackgroundThings(zones);
3765 
3766     // The main thread may call queueZonesAndStartBackgroundSweep() while this
3767     // is running so we must check there is no more work after releasing the
3768     // lock.
3769   } while (!backgroundSweepZones.ref().isEmpty());
3770 
3771   maybeRequestGCAfterBackgroundTask(lock);
3772 }
3773 
waitBackgroundSweepEnd()3774 void GCRuntime::waitBackgroundSweepEnd() {
3775   sweepTask.join();
3776   if (state() != State::Sweep) {
3777     assertBackgroundSweepingFinished();
3778   }
3779 }
3780 
queueUnusedLifoBlocksForFree(LifoAlloc * lifo)3781 void GCRuntime::queueUnusedLifoBlocksForFree(LifoAlloc* lifo) {
3782   MOZ_ASSERT(JS::RuntimeHeapIsBusy());
3783   AutoLockHelperThreadState lock;
3784   lifoBlocksToFree.ref().transferUnusedFrom(lifo);
3785 }
3786 
queueAllLifoBlocksForFree(LifoAlloc * lifo)3787 void GCRuntime::queueAllLifoBlocksForFree(LifoAlloc* lifo) {
3788   MOZ_ASSERT(JS::RuntimeHeapIsBusy());
3789   AutoLockHelperThreadState lock;
3790   lifoBlocksToFree.ref().transferFrom(lifo);
3791 }
3792 
queueAllLifoBlocksForFreeAfterMinorGC(LifoAlloc * lifo)3793 void GCRuntime::queueAllLifoBlocksForFreeAfterMinorGC(LifoAlloc* lifo) {
3794   lifoBlocksToFreeAfterMinorGC.ref().transferFrom(lifo);
3795 }
3796 
queueBuffersForFreeAfterMinorGC(Nursery::BufferSet & buffers)3797 void GCRuntime::queueBuffersForFreeAfterMinorGC(Nursery::BufferSet& buffers) {
3798   AutoLockHelperThreadState lock;
3799 
3800   if (!buffersToFreeAfterMinorGC.ref().empty()) {
3801     // In the rare case that this hasn't processed the buffers from a previous
3802     // minor GC we have to wait here.
3803     MOZ_ASSERT(!freeTask.isIdle(lock));
3804     freeTask.joinWithLockHeld(lock);
3805   }
3806 
3807   MOZ_ASSERT(buffersToFreeAfterMinorGC.ref().empty());
3808   std::swap(buffersToFreeAfterMinorGC.ref(), buffers);
3809 }
3810 
startBackgroundFree()3811 void GCRuntime::startBackgroundFree() {
3812   AutoLockHelperThreadState lock;
3813   freeTask.startOrRunIfIdle(lock);
3814 }
3815 
run(AutoLockHelperThreadState & lock)3816 void BackgroundFreeTask::run(AutoLockHelperThreadState& lock) {
3817   AutoTraceLog logFreeing(TraceLoggerForCurrentThread(), TraceLogger_GCFree);
3818 
3819   gc->freeFromBackgroundThread(lock);
3820 }
3821 
freeFromBackgroundThread(AutoLockHelperThreadState & lock)3822 void GCRuntime::freeFromBackgroundThread(AutoLockHelperThreadState& lock) {
3823   do {
3824     LifoAlloc lifoBlocks(JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE);
3825     lifoBlocks.transferFrom(&lifoBlocksToFree.ref());
3826 
3827     Nursery::BufferSet buffers;
3828     std::swap(buffers, buffersToFreeAfterMinorGC.ref());
3829 
3830     AutoUnlockHelperThreadState unlock(lock);
3831 
3832     lifoBlocks.freeAll();
3833 
3834     JSFreeOp* fop = TlsContext.get()->defaultFreeOp();
3835     for (Nursery::BufferSet::Range r = buffers.all(); !r.empty();
3836          r.popFront()) {
3837       // Malloc memory associated with nursery objects is not tracked as these
3838       // are assumed to be short lived.
3839       fop->freeUntracked(r.front());
3840     }
3841   } while (!lifoBlocksToFree.ref().isEmpty() ||
3842            !buffersToFreeAfterMinorGC.ref().empty());
3843 }
3844 
waitBackgroundFreeEnd()3845 void GCRuntime::waitBackgroundFreeEnd() { freeTask.join(); }
3846 
3847 /* static */
needsSweep(Cell ** cellp,uint64_t *)3848 bool UniqueIdGCPolicy::needsSweep(Cell** cellp, uint64_t*) {
3849   Cell* cell = *cellp;
3850   return MapGCThingTyped(cell, cell->getTraceKind(), [](auto t) {
3851     mozilla::DebugOnly<const Cell*> prior = t;
3852     bool result = IsAboutToBeFinalizedUnbarriered(&t);
3853     // Sweep should not have to deal with moved pointers, since moving GC
3854     // handles updating the UID table manually.
3855     MOZ_ASSERT(t == prior);
3856     return result;
3857   });
3858 }
3859 
sweepUniqueIds()3860 void JS::Zone::sweepUniqueIds() { uniqueIds().sweep(); }
3861 
destroy(JSFreeOp * fop)3862 void Realm::destroy(JSFreeOp* fop) {
3863   JSRuntime* rt = fop->runtime();
3864   if (auto callback = rt->destroyRealmCallback) {
3865     callback(fop, this);
3866   }
3867   if (principals()) {
3868     JS_DropPrincipals(rt->mainContextFromOwnThread(), principals());
3869   }
3870   // Bug 1560019: Malloc memory associated with a zone but not with a specific
3871   // GC thing is not currently tracked.
3872   fop->deleteUntracked(this);
3873 }
3874 
destroy(JSFreeOp * fop)3875 void Compartment::destroy(JSFreeOp* fop) {
3876   JSRuntime* rt = fop->runtime();
3877   if (auto callback = rt->destroyCompartmentCallback) {
3878     callback(fop, this);
3879   }
3880   // Bug 1560019: Malloc memory associated with a zone but not with a specific
3881   // GC thing is not currently tracked.
3882   fop->deleteUntracked(this);
3883   rt->gc.stats().sweptCompartment();
3884 }
3885 
destroy(JSFreeOp * fop)3886 void Zone::destroy(JSFreeOp* fop) {
3887   MOZ_ASSERT(compartments().empty());
3888   JSRuntime* rt = fop->runtime();
3889   if (auto callback = rt->destroyZoneCallback) {
3890     callback(fop, this);
3891   }
3892   // Bug 1560019: Malloc memory associated with a zone but not with a specific
3893   // GC thing is not currently tracked.
3894   fop->deleteUntracked(this);
3895   fop->runtime()->gc.stats().sweptZone();
3896 }
3897 
3898 /*
3899  * It's simpler if we preserve the invariant that every zone (except the atoms
3900  * zone) has at least one compartment, and every compartment has at least one
3901  * realm. If we know we're deleting the entire zone, then sweepCompartments is
3902  * allowed to delete all compartments. In this case, |keepAtleastOne| is false.
3903  * If any cells remain alive in the zone, set |keepAtleastOne| true to prohibit
3904  * sweepCompartments from deleting every compartment. Instead, it preserves an
3905  * arbitrary compartment in the zone.
3906  */
sweepCompartments(JSFreeOp * fop,bool keepAtleastOne,bool destroyingRuntime)3907 void Zone::sweepCompartments(JSFreeOp* fop, bool keepAtleastOne,
3908                              bool destroyingRuntime) {
3909   MOZ_ASSERT(!compartments().empty());
3910   MOZ_ASSERT_IF(destroyingRuntime, !keepAtleastOne);
3911 
3912   Compartment** read = compartments().begin();
3913   Compartment** end = compartments().end();
3914   Compartment** write = read;
3915   while (read < end) {
3916     Compartment* comp = *read++;
3917 
3918     /*
3919      * Don't delete the last compartment and realm if keepAtleastOne is
3920      * still true, meaning all the other compartments were deleted.
3921      */
3922     bool keepAtleastOneRealm = read == end && keepAtleastOne;
3923     comp->sweepRealms(fop, keepAtleastOneRealm, destroyingRuntime);
3924 
3925     if (!comp->realms().empty()) {
3926       *write++ = comp;
3927       keepAtleastOne = false;
3928     } else {
3929       comp->destroy(fop);
3930     }
3931   }
3932   compartments().shrinkTo(write - compartments().begin());
3933   MOZ_ASSERT_IF(keepAtleastOne, !compartments().empty());
3934   MOZ_ASSERT_IF(destroyingRuntime, compartments().empty());
3935 }
3936 
sweepRealms(JSFreeOp * fop,bool keepAtleastOne,bool destroyingRuntime)3937 void Compartment::sweepRealms(JSFreeOp* fop, bool keepAtleastOne,
3938                               bool destroyingRuntime) {
3939   MOZ_ASSERT(!realms().empty());
3940   MOZ_ASSERT_IF(destroyingRuntime, !keepAtleastOne);
3941 
3942   Realm** read = realms().begin();
3943   Realm** end = realms().end();
3944   Realm** write = read;
3945   while (read < end) {
3946     Realm* realm = *read++;
3947 
3948     /*
3949      * Don't delete the last realm if keepAtleastOne is still true, meaning
3950      * all the other realms were deleted.
3951      */
3952     bool dontDelete = read == end && keepAtleastOne;
3953     if ((realm->marked() || dontDelete) && !destroyingRuntime) {
3954       *write++ = realm;
3955       keepAtleastOne = false;
3956     } else {
3957       realm->destroy(fop);
3958     }
3959   }
3960   realms().shrinkTo(write - realms().begin());
3961   MOZ_ASSERT_IF(keepAtleastOne, !realms().empty());
3962   MOZ_ASSERT_IF(destroyingRuntime, realms().empty());
3963 }
3964 
deleteEmptyZone(Zone * zone)3965 void GCRuntime::deleteEmptyZone(Zone* zone) {
3966   MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
3967   MOZ_ASSERT(zone->compartments().empty());
3968   for (auto& i : zones()) {
3969     if (i == zone) {
3970       zones().erase(&i);
3971       zone->destroy(rt->defaultFreeOp());
3972       return;
3973     }
3974   }
3975   MOZ_CRASH("Zone not found");
3976 }
3977 
sweepZones(JSFreeOp * fop,bool destroyingRuntime)3978 void GCRuntime::sweepZones(JSFreeOp* fop, bool destroyingRuntime) {
3979   MOZ_ASSERT_IF(destroyingRuntime, numActiveZoneIters == 0);
3980 
3981   if (numActiveZoneIters) {
3982     return;
3983   }
3984 
3985   assertBackgroundSweepingFinished();
3986 
3987   Zone** read = zones().begin();
3988   Zone** end = zones().end();
3989   Zone** write = read;
3990 
3991   while (read < end) {
3992     Zone* zone = *read++;
3993 
3994     if (zone->wasGCStarted()) {
3995       MOZ_ASSERT(!zone->isQueuedForBackgroundSweep());
3996       const bool zoneIsDead =
3997           zone->arenas.arenaListsAreEmpty() && !zone->hasMarkedRealms();
3998       MOZ_ASSERT_IF(destroyingRuntime, zoneIsDead);
3999       if (zoneIsDead) {
4000         AutoSetThreadIsSweeping threadIsSweeping(zone);
4001         zone->arenas.checkEmptyFreeLists();
4002         zone->sweepCompartments(fop, false, destroyingRuntime);
4003         MOZ_ASSERT(zone->compartments().empty());
4004         MOZ_ASSERT(zone->rttValueObjects().empty());
4005         zone->destroy(fop);
4006         continue;
4007       }
4008       zone->sweepCompartments(fop, true, destroyingRuntime);
4009     }
4010     *write++ = zone;
4011   }
4012   zones().shrinkTo(write - zones().begin());
4013 }
4014 
checkEmptyArenaList(AllocKind kind)4015 void ArenaLists::checkEmptyArenaList(AllocKind kind) {
4016   MOZ_ASSERT(arenaList(kind).isEmpty());
4017 }
4018 
4019 class MOZ_RAII AutoRunParallelTask : public GCParallelTask {
4020   // This class takes a pointer to a member function of GCRuntime.
4021   using TaskFunc = JS_MEMBER_FN_PTR_TYPE(GCRuntime, void);
4022 
4023   TaskFunc func_;
4024   gcstats::PhaseKind phase_;
4025   AutoLockHelperThreadState& lock_;
4026 
4027  public:
AutoRunParallelTask(GCRuntime * gc,TaskFunc func,gcstats::PhaseKind phase,AutoLockHelperThreadState & lock)4028   AutoRunParallelTask(GCRuntime* gc, TaskFunc func, gcstats::PhaseKind phase,
4029                       AutoLockHelperThreadState& lock)
4030       : GCParallelTask(gc), func_(func), phase_(phase), lock_(lock) {
4031     gc->startTask(*this, phase_, lock_);
4032   }
4033 
~AutoRunParallelTask()4034   ~AutoRunParallelTask() { gc->joinTask(*this, phase_, lock_); }
4035 
run(AutoLockHelperThreadState & lock)4036   void run(AutoLockHelperThreadState& lock) override {
4037     AutoUnlockHelperThreadState unlock(lock);
4038 
4039     // The hazard analysis can't tell what the call to func_ will do but it's
4040     // not allowed to GC.
4041     JS::AutoSuppressGCAnalysis nogc;
4042 
4043     // Call pointer to member function on |gc|.
4044     JS_CALL_MEMBER_FN_PTR(gc, func_);
4045   }
4046 };
4047 
purgeRuntimeForMinorGC()4048 void GCRuntime::purgeRuntimeForMinorGC() {
4049   // If external strings become nursery allocable, remember to call
4050   // zone->externalStringCache().purge() (and delete this assert.)
4051   MOZ_ASSERT(!IsNurseryAllocable(AllocKind::EXTERNAL_STRING));
4052 
4053   for (ZonesIter zone(this, SkipAtoms); !zone.done(); zone.next()) {
4054     zone->functionToStringCache().purge();
4055   }
4056 
4057   rt->caches().purgeForMinorGC(rt);
4058 }
4059 
purgeRuntime()4060 void GCRuntime::purgeRuntime() {
4061   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::PURGE);
4062 
4063   for (GCRealmsIter realm(rt); !realm.done(); realm.next()) {
4064     realm->purge();
4065   }
4066 
4067   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
4068     zone->purgeAtomCache();
4069     zone->externalStringCache().purge();
4070     zone->functionToStringCache().purge();
4071     zone->shapeZone().purgeShapeCaches(rt->defaultFreeOp());
4072   }
4073 
4074   JSContext* cx = rt->mainContextFromOwnThread();
4075   queueUnusedLifoBlocksForFree(&cx->tempLifoAlloc());
4076   cx->interpreterStack().purge(rt);
4077   cx->frontendCollectionPool().purge();
4078 
4079   rt->caches().purge();
4080 
4081   if (auto cache = rt->maybeThisRuntimeSharedImmutableStrings()) {
4082     cache->purge();
4083   }
4084 
4085   MOZ_ASSERT(unmarkGrayStack.empty());
4086   unmarkGrayStack.clearAndFree();
4087 
4088   // If we're the main runtime, tell helper threads to free their unused
4089   // memory when they are next idle.
4090   if (!rt->parentRuntime) {
4091     HelperThreadState().triggerFreeUnusedMemory();
4092   }
4093 }
4094 
shouldPreserveJITCode(Realm * realm,const TimeStamp & currentTime,JS::GCReason reason,bool canAllocateMoreCode,bool isActiveCompartment)4095 bool GCRuntime::shouldPreserveJITCode(Realm* realm,
4096                                       const TimeStamp& currentTime,
4097                                       JS::GCReason reason,
4098                                       bool canAllocateMoreCode,
4099                                       bool isActiveCompartment) {
4100   if (cleanUpEverything) {
4101     return false;
4102   }
4103   if (!canAllocateMoreCode) {
4104     return false;
4105   }
4106 
4107   if (isActiveCompartment) {
4108     return true;
4109   }
4110   if (alwaysPreserveCode) {
4111     return true;
4112   }
4113   if (realm->preserveJitCode()) {
4114     return true;
4115   }
4116   if (IsCurrentlyAnimating(realm->lastAnimationTime, currentTime)) {
4117     return true;
4118   }
4119   if (reason == JS::GCReason::DEBUG_GC) {
4120     return true;
4121   }
4122 
4123   return false;
4124 }
4125 
4126 #ifdef DEBUG
4127 class CompartmentCheckTracer final : public JS::CallbackTracer {
4128   void onChild(const JS::GCCellPtr& thing) override;
4129 
4130  public:
CompartmentCheckTracer(JSRuntime * rt)4131   explicit CompartmentCheckTracer(JSRuntime* rt)
4132       : JS::CallbackTracer(rt, JS::TracerKind::Callback,
4133                            JS::WeakEdgeTraceAction::Skip),
4134         src(nullptr),
4135         zone(nullptr),
4136         compartment(nullptr) {}
4137 
4138   Cell* src;
4139   JS::TraceKind srcKind;
4140   Zone* zone;
4141   Compartment* compartment;
4142 };
4143 
InCrossCompartmentMap(JSRuntime * rt,JSObject * src,JS::GCCellPtr dst)4144 static bool InCrossCompartmentMap(JSRuntime* rt, JSObject* src,
4145                                   JS::GCCellPtr dst) {
4146   // Cross compartment edges are either in the cross compartment map or in a
4147   // debugger weakmap.
4148 
4149   Compartment* srccomp = src->compartment();
4150 
4151   if (dst.is<JSObject>()) {
4152     if (ObjectWrapperMap::Ptr p = srccomp->lookupWrapper(&dst.as<JSObject>())) {
4153       if (*p->value().unsafeGet() == src) {
4154         return true;
4155       }
4156     }
4157   }
4158 
4159   if (DebugAPI::edgeIsInDebuggerWeakmap(rt, src, dst)) {
4160     return true;
4161   }
4162 
4163   return false;
4164 }
4165 
onChild(const JS::GCCellPtr & thing)4166 void CompartmentCheckTracer::onChild(const JS::GCCellPtr& thing) {
4167   Compartment* comp =
4168       MapGCThingTyped(thing, [](auto t) { return t->maybeCompartment(); });
4169   if (comp && compartment) {
4170     MOZ_ASSERT(
4171         comp == compartment ||
4172         runtime()->mainContextFromOwnThread()->disableCompartmentCheckTracer ||
4173         (srcKind == JS::TraceKind::Object &&
4174          InCrossCompartmentMap(runtime(), static_cast<JSObject*>(src), thing)));
4175   } else {
4176     TenuredCell* tenured = &thing.asCell()->asTenured();
4177     Zone* thingZone = tenured->zoneFromAnyThread();
4178     MOZ_ASSERT(thingZone == zone || thingZone->isAtomsZone());
4179   }
4180 }
4181 
checkForCompartmentMismatches()4182 void GCRuntime::checkForCompartmentMismatches() {
4183   JSContext* cx = rt->mainContextFromOwnThread();
4184   if (cx->disableStrictProxyCheckingCount) {
4185     return;
4186   }
4187 
4188   CompartmentCheckTracer trc(rt);
4189   AutoAssertEmptyNursery empty(cx);
4190   for (ZonesIter zone(this, SkipAtoms); !zone.done(); zone.next()) {
4191     trc.zone = zone;
4192     for (auto thingKind : AllAllocKinds()) {
4193       for (auto i = zone->cellIterUnsafe<TenuredCell>(thingKind, empty);
4194            !i.done(); i.next()) {
4195         trc.src = i.getCell();
4196         trc.srcKind = MapAllocToTraceKind(thingKind);
4197         trc.compartment = MapGCThingTyped(
4198             trc.src, trc.srcKind, [](auto t) { return t->maybeCompartment(); });
4199         JS::TraceChildren(&trc, JS::GCCellPtr(trc.src, trc.srcKind));
4200       }
4201     }
4202   }
4203 }
4204 #endif
4205 
RelazifyFunctions(Zone * zone,AllocKind kind)4206 static void RelazifyFunctions(Zone* zone, AllocKind kind) {
4207   MOZ_ASSERT(kind == AllocKind::FUNCTION ||
4208              kind == AllocKind::FUNCTION_EXTENDED);
4209 
4210   JSRuntime* rt = zone->runtimeFromMainThread();
4211   AutoAssertEmptyNursery empty(rt->mainContextFromOwnThread());
4212 
4213   for (auto i = zone->cellIterUnsafe<JSObject>(kind, empty); !i.done();
4214        i.next()) {
4215     JSFunction* fun = &i->as<JSFunction>();
4216     // When iterating over the GC-heap, we may encounter function objects that
4217     // are incomplete (missing a BaseScript when we expect one). We must check
4218     // for this case before we can call JSFunction::hasBytecode().
4219     if (fun->isIncomplete()) {
4220       continue;
4221     }
4222     if (fun->hasBytecode()) {
4223       fun->maybeRelazify(rt);
4224     }
4225   }
4226 }
4227 
ShouldCollectZone(Zone * zone,JS::GCReason reason)4228 static bool ShouldCollectZone(Zone* zone, JS::GCReason reason) {
4229   // If we are repeating a GC because we noticed dead compartments haven't
4230   // been collected, then only collect zones containing those compartments.
4231   if (reason == JS::GCReason::COMPARTMENT_REVIVED) {
4232     for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
4233       if (comp->gcState.scheduledForDestruction) {
4234         return true;
4235       }
4236     }
4237 
4238     return false;
4239   }
4240 
4241   // Otherwise we only collect scheduled zones.
4242   if (!zone->isGCScheduled()) {
4243     return false;
4244   }
4245 
4246   // If canCollectAtoms() is false then parsing is currently happening on
4247   // another thread, in which case we don't have information about which atoms
4248   // are roots, so we must skip collecting atoms.
4249   //
4250   // Note that only affects the first slice of an incremental GC since root
4251   // marking is completed before we return to the mutator.
4252   //
4253   // Off-thread parsing is inhibited after the start of GC which prevents
4254   // races between creating atoms during parsing and sweeping atoms on the
4255   // main thread.
4256   //
4257   // Otherwise, we always schedule a GC in the atoms zone so that atoms which
4258   // the other collected zones are using are marked, and we can update the
4259   // set of atoms in use by the other collected zones at the end of the GC.
4260   if (zone->isAtomsZone()) {
4261     return TlsContext.get()->canCollectAtoms();
4262   }
4263 
4264   return zone->canCollect();
4265 }
4266 
prepareZonesForCollection(JS::GCReason reason,bool * isFullOut)4267 bool GCRuntime::prepareZonesForCollection(JS::GCReason reason,
4268                                           bool* isFullOut) {
4269 #ifdef DEBUG
4270   /* Assert that zone state is as we expect */
4271   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
4272     MOZ_ASSERT(!zone->isCollecting());
4273     MOZ_ASSERT_IF(!zone->isAtomsZone(), !zone->compartments().empty());
4274     for (auto i : AllAllocKinds()) {
4275       MOZ_ASSERT(!zone->arenas.arenasToSweep(i));
4276     }
4277   }
4278 #endif
4279 
4280   *isFullOut = true;
4281   bool any = false;
4282 
4283   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
4284     /* Set up which zones will be collected. */
4285     bool shouldCollect = ShouldCollectZone(zone, reason);
4286     if (shouldCollect) {
4287       MOZ_ASSERT(zone->canCollect());
4288       any = true;
4289       zone->changeGCState(Zone::NoGC, Zone::Prepare);
4290     } else if (zone->canCollect()) {
4291       *isFullOut = false;
4292     }
4293 
4294     zone->setWasCollected(shouldCollect);
4295   }
4296 
4297   /*
4298    * Check that we do collect the atoms zone if we triggered a GC for that
4299    * purpose.
4300    */
4301   MOZ_ASSERT_IF(reason == JS::GCReason::DELAYED_ATOMS_GC,
4302                 atomsZone->isGCPreparing());
4303 
4304   /* Check that at least one zone is scheduled for collection. */
4305   return any;
4306 }
4307 
discardJITCodeForGC()4308 void GCRuntime::discardJITCodeForGC() {
4309   size_t nurserySiteResetCount = 0;
4310   size_t pretenuredSiteResetCount = 0;
4311 
4312   js::CancelOffThreadIonCompile(rt, JS::Zone::Prepare);
4313   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
4314     gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_DISCARD_CODE);
4315 
4316     // We may need to reset allocation sites and discard JIT code to recover if
4317     // we find object lifetimes have changed.
4318     PretenuringZone& pz = zone->pretenuring;
4319     bool resetNurserySites = pz.shouldResetNurseryAllocSites();
4320     bool resetPretenuredSites = pz.shouldResetPretenuredAllocSites();
4321 
4322     if (!zone->isPreservingCode()) {
4323       Zone::DiscardOptions options;
4324       options.discardBaselineCode = true;
4325       options.discardJitScripts = true;
4326       options.resetNurseryAllocSites = resetNurserySites;
4327       options.resetPretenuredAllocSites = resetPretenuredSites;
4328       zone->discardJitCode(rt->defaultFreeOp(), options);
4329 
4330     } else if (resetNurserySites || resetPretenuredSites) {
4331       zone->resetAllocSitesAndInvalidate(resetNurserySites,
4332                                          resetPretenuredSites);
4333     }
4334 
4335     if (resetNurserySites) {
4336       nurserySiteResetCount++;
4337     }
4338     if (resetPretenuredSites) {
4339       pretenuredSiteResetCount++;
4340     }
4341   }
4342 
4343   if (nursery().reportPretenuring()) {
4344     if (nurserySiteResetCount) {
4345       fprintf(
4346           stderr,
4347           "GC reset nursery alloc sites and invalidated code in %zu zones\n",
4348           nurserySiteResetCount);
4349     }
4350     if (pretenuredSiteResetCount) {
4351       fprintf(
4352           stderr,
4353           "GC reset pretenured alloc sites and invalidated code in %zu zones\n",
4354           pretenuredSiteResetCount);
4355     }
4356   }
4357 }
4358 
relazifyFunctionsForShrinkingGC()4359 void GCRuntime::relazifyFunctionsForShrinkingGC() {
4360   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::RELAZIFY_FUNCTIONS);
4361   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
4362     if (zone->isSelfHostingZone()) {
4363       continue;
4364     }
4365     RelazifyFunctions(zone, AllocKind::FUNCTION);
4366     RelazifyFunctions(zone, AllocKind::FUNCTION_EXTENDED);
4367   }
4368 }
4369 
purgePropMapTablesForShrinkingGC()4370 void GCRuntime::purgePropMapTablesForShrinkingGC() {
4371   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::PURGE_PROP_MAP_TABLES);
4372   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
4373     if (!canRelocateZone(zone) || zone->keepPropMapTables()) {
4374       continue;
4375     }
4376 
4377     // Note: CompactPropMaps never have a table.
4378     for (auto map = zone->cellIterUnsafe<NormalPropMap>(); !map.done();
4379          map.next()) {
4380       if (map->asLinked()->hasTable()) {
4381         map->asLinked()->purgeTable(rt->defaultFreeOp());
4382       }
4383     }
4384     for (auto map = zone->cellIterUnsafe<DictionaryPropMap>(); !map.done();
4385          map.next()) {
4386       if (map->asLinked()->hasTable()) {
4387         map->asLinked()->purgeTable(rt->defaultFreeOp());
4388       }
4389     }
4390   }
4391 }
4392 
4393 // The debugger keeps track of the URLs for the sources of each realm's scripts.
4394 // These URLs are purged on shrinking GCs.
purgeSourceURLsForShrinkingGC()4395 void GCRuntime::purgeSourceURLsForShrinkingGC() {
4396   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::PURGE_SOURCE_URLS);
4397   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
4398     // URLs are not tracked for realms in the system zone.
4399     if (!canRelocateZone(zone) || zone->isSystemZone()) {
4400       continue;
4401     }
4402     for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
4403       for (RealmsInCompartmentIter realm(comp); !realm.done(); realm.next()) {
4404         GlobalObject* global = realm.get()->unsafeUnbarrieredMaybeGlobal();
4405         if (global) {
4406           global->clearSourceURLSHolder();
4407         }
4408       }
4409     }
4410   }
4411 }
4412 
unmarkWeakMaps()4413 void GCRuntime::unmarkWeakMaps() {
4414   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
4415     /* Unmark all weak maps in the zones being collected. */
4416     WeakMapBase::unmarkZone(zone);
4417   }
4418 }
4419 
beginPreparePhase(JS::GCReason reason,AutoGCSession & session)4420 bool GCRuntime::beginPreparePhase(JS::GCReason reason, AutoGCSession& session) {
4421   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::PREPARE);
4422 
4423   if (!prepareZonesForCollection(reason, &isFull.ref())) {
4424     return false;
4425   }
4426 
4427   /* Check it's safe to access the atoms zone if we are collecting it. */
4428   if (atomsZone->isCollecting()) {
4429     session.maybeCheckAtomsAccess.emplace(rt);
4430   }
4431 
4432   /*
4433    * Start a parallel task to clear all mark state for the zones we are
4434    * collecting. This is linear in the size of the heap we are collecting and so
4435    * can be slow. This happens concurrently with the mutator and GC proper does
4436    * not start until this is complete.
4437    */
4438   setParallelUnmarkEnabled(true);
4439   unmarkTask.initZones();
4440   unmarkTask.start();
4441 
4442   /*
4443    * Process any queued source compressions during the start of a major
4444    * GC.
4445    */
4446   if (!IsShutdownReason(reason) && reason != JS::GCReason::ROOTS_REMOVED &&
4447       reason != JS::GCReason::XPCONNECT_SHUTDOWN) {
4448     StartHandlingCompressionsOnGC(rt);
4449   }
4450 
4451   return true;
4452 }
4453 
initZones()4454 void BackgroundUnmarkTask::initZones() {
4455   MOZ_ASSERT(isIdle());
4456   MOZ_ASSERT(zones.empty());
4457   MOZ_ASSERT(!isCancelled());
4458 
4459   // We can't safely iterate the zones vector from another thread so we copy the
4460   // zones to be collected into another vector.
4461   AutoEnterOOMUnsafeRegion oomUnsafe;
4462   for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
4463     if (!zones.append(zone.get())) {
4464       oomUnsafe.crash("BackgroundUnmarkTask::initZones");
4465     }
4466   }
4467 }
4468 
run(AutoLockHelperThreadState & helperTheadLock)4469 void BackgroundUnmarkTask::run(AutoLockHelperThreadState& helperTheadLock) {
4470   AutoUnlockHelperThreadState unlock(helperTheadLock);
4471 
4472   AutoTraceLog log(TraceLoggerForCurrentThread(), TraceLogger_GCUnmarking);
4473 
4474   // We need to hold the GC lock while traversing the arena lists.
4475   AutoLockGC gcLock(gc);
4476 
4477   unmarkZones(gcLock);
4478   zones.clear();
4479 }
4480 
unmarkZones(AutoLockGC & lock)4481 void BackgroundUnmarkTask::unmarkZones(AutoLockGC& lock) {
4482   for (Zone* zone : zones) {
4483     for (auto kind : AllAllocKinds()) {
4484       for (ArenaIter arena(zone, kind); !arena.done(); arena.next()) {
4485         AutoUnlockGC unlock(lock);
4486         arena->unmarkAll();
4487         if (isCancelled()) {
4488           return;
4489         }
4490       }
4491     }
4492   }
4493 }
4494 
endPreparePhase(JS::GCReason reason)4495 void GCRuntime::endPreparePhase(JS::GCReason reason) {
4496   MOZ_ASSERT(unmarkTask.isIdle());
4497   setParallelUnmarkEnabled(false);
4498 
4499   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
4500     /*
4501      * In an incremental GC, clear the area free lists to ensure that subsequent
4502      * allocations refill them and end up marking new cells back. See
4503      * arenaAllocatedDuringGC().
4504      */
4505     zone->arenas.clearFreeLists();
4506 
4507     zone->arenas.checkGCStateNotInUse();
4508 
4509     zone->markedStrings = 0;
4510     zone->finalizedStrings = 0;
4511 
4512     zone->setPreservingCode(false);
4513 
4514 #ifdef JS_GC_ZEAL
4515     if (hasZealMode(ZealMode::YieldBeforeRootMarking)) {
4516       for (auto kind : AllAllocKinds()) {
4517         for (ArenaIter arena(zone, kind); !arena.done(); arena.next()) {
4518           arena->checkNoMarkedCells();
4519         }
4520       }
4521     }
4522 #endif
4523   }
4524 
4525   // Discard JIT code more aggressively if the process is approaching its
4526   // executable code limit.
4527   bool canAllocateMoreCode = jit::CanLikelyAllocateMoreExecutableMemory();
4528   auto currentTime = ReallyNow();
4529 
4530   Compartment* activeCompartment = nullptr;
4531   jit::JitActivationIterator activation(rt->mainContextFromOwnThread());
4532   if (!activation.done()) {
4533     activeCompartment = activation->compartment();
4534   }
4535 
4536   for (CompartmentsIter c(rt); !c.done(); c.next()) {
4537     c->gcState.scheduledForDestruction = false;
4538     c->gcState.maybeAlive = false;
4539     c->gcState.hasEnteredRealm = false;
4540     bool isActiveCompartment = c == activeCompartment;
4541     for (RealmsInCompartmentIter r(c); !r.done(); r.next()) {
4542       if (r->shouldTraceGlobal() || !r->zone()->isGCScheduled()) {
4543         c->gcState.maybeAlive = true;
4544       }
4545       if (shouldPreserveJITCode(r, currentTime, reason, canAllocateMoreCode,
4546                                 isActiveCompartment)) {
4547         r->zone()->setPreservingCode(true);
4548       }
4549       if (r->hasBeenEnteredIgnoringJit()) {
4550         c->gcState.hasEnteredRealm = true;
4551       }
4552     }
4553   }
4554 
4555   /*
4556    * Perform remaining preparation work that must take place in the first true
4557    * GC slice.
4558    */
4559 
4560   {
4561     gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::PREPARE);
4562 
4563     AutoLockHelperThreadState helperLock;
4564 
4565     /* Clear mark state for WeakMaps in parallel with other work. */
4566     AutoRunParallelTask unmarkWeakMaps(this, &GCRuntime::unmarkWeakMaps,
4567                                        gcstats::PhaseKind::UNMARK_WEAKMAPS,
4568                                        helperLock);
4569 
4570     /*
4571      * Buffer gray roots for incremental collections. This is linear in the
4572      * number of roots which can be in the tens of thousands. Do this in
4573      * parallel with the rest of this block.
4574      */
4575     Maybe<AutoRunParallelTask> bufferGrayRootsTask;
4576     if (isIncremental) {
4577       bufferGrayRootsTask.emplace(this, &GCRuntime::bufferGrayRoots,
4578                                   gcstats::PhaseKind::BUFFER_GRAY_ROOTS,
4579                                   helperLock);
4580     }
4581     AutoUnlockHelperThreadState unlock(helperLock);
4582 
4583     // Discard JIT code. For incremental collections, the sweep phase will
4584     // also discard JIT code.
4585     discardJITCodeForGC();
4586     startBackgroundFreeAfterMinorGC();
4587 
4588     /*
4589      * Relazify functions after discarding JIT code (we can't relazify
4590      * functions with JIT code) and before the actual mark phase, so that
4591      * the current GC can collect the JSScripts we're unlinking here.  We do
4592      * this only when we're performing a shrinking GC, as too much
4593      * relazification can cause performance issues when we have to reparse
4594      * the same functions over and over.
4595      */
4596     if (gcOptions == JS::GCOptions::Shrink) {
4597       relazifyFunctionsForShrinkingGC();
4598       purgePropMapTablesForShrinkingGC();
4599       purgeSourceURLsForShrinkingGC();
4600     }
4601 
4602     /*
4603      * We must purge the runtime at the beginning of an incremental GC. The
4604      * danger if we purge later is that the snapshot invariant of
4605      * incremental GC will be broken, as follows. If some object is
4606      * reachable only through some cache (say the dtoaCache) then it will
4607      * not be part of the snapshot.  If we purge after root marking, then
4608      * the mutator could obtain a pointer to the object and start using
4609      * it. This object might never be marked, so a GC hazard would exist.
4610      */
4611     purgeRuntime();
4612 
4613     if (IsShutdownReason(reason)) {
4614       /* Clear any engine roots that may hold external data live. */
4615       for (GCZonesIter zone(this); !zone.done(); zone.next()) {
4616         zone->clearRootsForShutdownGC();
4617       }
4618     }
4619   }
4620 
4621 #ifdef DEBUG
4622   if (fullCompartmentChecks) {
4623     checkForCompartmentMismatches();
4624   }
4625 #endif
4626 }
4627 
beginMarkPhase(AutoGCSession & session)4628 void GCRuntime::beginMarkPhase(AutoGCSession& session) {
4629   /*
4630    * Mark phase.
4631    */
4632   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK);
4633 
4634   // This is the slice we actually start collecting. The number can be used to
4635   // check whether a major GC has started so we must not increment it until we
4636   // get here.
4637   incMajorGcNumber();
4638 
4639   marker.start();
4640   marker.clearMarkCount();
4641   MOZ_ASSERT(marker.isDrained());
4642 
4643   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
4644     // Incremental marking barriers are enabled at this point.
4645     zone->changeGCState(Zone::Prepare, Zone::MarkBlackOnly);
4646   }
4647 
4648   if (rt->isBeingDestroyed()) {
4649     checkNoRuntimeRoots(session);
4650   } else {
4651     traceRuntimeForMajorGC(&marker, session);
4652   }
4653 
4654   if (isIncremental) {
4655     findDeadCompartments();
4656   }
4657 
4658   updateMemoryCountersOnGCStart();
4659   stats().measureInitialHeapSize();
4660 }
4661 
findDeadCompartments()4662 void GCRuntime::findDeadCompartments() {
4663   gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::MARK_ROOTS);
4664   gcstats::AutoPhase ap2(stats(), gcstats::PhaseKind::MARK_COMPARTMENTS);
4665 
4666   /*
4667    * This code ensures that if a compartment is "dead", then it will be
4668    * collected in this GC. A compartment is considered dead if its maybeAlive
4669    * flag is false. The maybeAlive flag is set if:
4670    *
4671    *   (1) the compartment has been entered (set in beginMarkPhase() above)
4672    *   (2) the compartment's zone is not being collected (set in
4673    *       beginMarkPhase() above)
4674    *   (3) an object in the compartment was marked during root marking, either
4675    *       as a black root or a gray root (set in RootMarking.cpp), or
4676    *   (4) the compartment has incoming cross-compartment edges from another
4677    *       compartment that has maybeAlive set (set by this method).
4678    *
4679    * If the maybeAlive is false, then we set the scheduledForDestruction flag.
4680    * At the end of the GC, we look for compartments where
4681    * scheduledForDestruction is true. These are compartments that were somehow
4682    * "revived" during the incremental GC. If any are found, we do a special,
4683    * non-incremental GC of those compartments to try to collect them.
4684    *
4685    * Compartments can be revived for a variety of reasons. On reason is bug
4686    * 811587, where a reflector that was dead can be revived by DOM code that
4687    * still refers to the underlying DOM node.
4688    *
4689    * Read barriers and allocations can also cause revival. This might happen
4690    * during a function like JS_TransplantObject, which iterates over all
4691    * compartments, live or dead, and operates on their objects. See bug 803376
4692    * for details on this problem. To avoid the problem, we try to avoid
4693    * allocation and read barriers during JS_TransplantObject and the like.
4694    */
4695 
4696   // Propagate the maybeAlive flag via cross-compartment edges.
4697 
4698   Vector<Compartment*, 0, js::SystemAllocPolicy> workList;
4699 
4700   for (CompartmentsIter comp(rt); !comp.done(); comp.next()) {
4701     if (comp->gcState.maybeAlive) {
4702       if (!workList.append(comp)) {
4703         return;
4704       }
4705     }
4706   }
4707 
4708   while (!workList.empty()) {
4709     Compartment* comp = workList.popCopy();
4710     for (Compartment::WrappedObjectCompartmentEnum e(comp); !e.empty();
4711          e.popFront()) {
4712       Compartment* dest = e.front();
4713       if (!dest->gcState.maybeAlive) {
4714         dest->gcState.maybeAlive = true;
4715         if (!workList.append(dest)) {
4716           return;
4717         }
4718       }
4719     }
4720   }
4721 
4722   // Set scheduledForDestruction based on maybeAlive.
4723 
4724   for (GCCompartmentsIter comp(rt); !comp.done(); comp.next()) {
4725     MOZ_ASSERT(!comp->gcState.scheduledForDestruction);
4726     if (!comp->gcState.maybeAlive) {
4727       comp->gcState.scheduledForDestruction = true;
4728     }
4729   }
4730 }
4731 
updateMemoryCountersOnGCStart()4732 void GCRuntime::updateMemoryCountersOnGCStart() {
4733   heapSize.updateOnGCStart();
4734 
4735   // Update memory counters for the zones we are collecting.
4736   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
4737     zone->updateMemoryCountersOnGCStart();
4738   }
4739 }
4740 
4741 template <class ZoneIterT>
markWeakReferences(SliceBudget & incrementalBudget)4742 IncrementalProgress GCRuntime::markWeakReferences(
4743     SliceBudget& incrementalBudget) {
4744   gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::SWEEP_MARK_WEAK);
4745 
4746   auto unlimited = SliceBudget::unlimited();
4747   SliceBudget& budget =
4748       marker.incrementalWeakMapMarkingEnabled ? incrementalBudget : unlimited;
4749 
4750   // We may have already entered weak marking mode.
4751   if (!marker.isWeakMarking() && marker.enterWeakMarkingMode()) {
4752     // Do not rely on the information about not-yet-marked weak keys that have
4753     // been collected by barriers. Clear out the gcEphemeronEdges entries and
4754     // rebuild the full table. Note that this a cross-zone operation; delegate
4755     // zone entries will be populated by map zone traversals, so everything
4756     // needs to be cleared first, then populated.
4757     if (!marker.incrementalWeakMapMarkingEnabled) {
4758       for (ZoneIterT zone(this); !zone.done(); zone.next()) {
4759         AutoEnterOOMUnsafeRegion oomUnsafe;
4760         if (!zone->gcEphemeronEdges().clear()) {
4761           oomUnsafe.crash("clearing weak keys when entering weak marking mode");
4762         }
4763       }
4764     }
4765 
4766     for (ZoneIterT zone(this); !zone.done(); zone.next()) {
4767       if (zone->enterWeakMarkingMode(&marker, budget) == NotFinished) {
4768         MOZ_ASSERT(marker.incrementalWeakMapMarkingEnabled);
4769         marker.leaveWeakMarkingMode();
4770         return NotFinished;
4771       }
4772     }
4773   }
4774 
4775   // This is not strictly necessary; if we yield here, we could run the mutator
4776   // in weak marking mode and unmark gray would end up doing the key lookups.
4777   // But it seems better to not slow down barriers. Re-entering weak marking
4778   // mode will be fast since already-processed markables have been removed.
4779   auto leaveOnExit =
4780       mozilla::MakeScopeExit([&] { marker.leaveWeakMarkingMode(); });
4781 
4782   bool markedAny = true;
4783   while (markedAny) {
4784     if (!marker.markUntilBudgetExhausted(budget)) {
4785       MOZ_ASSERT(marker.incrementalWeakMapMarkingEnabled);
4786       return NotFinished;
4787     }
4788 
4789     markedAny = false;
4790 
4791     if (!marker.isWeakMarking()) {
4792       for (ZoneIterT zone(this); !zone.done(); zone.next()) {
4793         markedAny |= WeakMapBase::markZoneIteratively(zone, &marker);
4794       }
4795     }
4796 
4797     markedAny |= jit::JitRuntime::MarkJitcodeGlobalTableIteratively(&marker);
4798   }
4799   MOZ_ASSERT(marker.isDrained());
4800 
4801   return Finished;
4802 }
4803 
markWeakReferencesInCurrentGroup(SliceBudget & budget)4804 IncrementalProgress GCRuntime::markWeakReferencesInCurrentGroup(
4805     SliceBudget& budget) {
4806   return markWeakReferences<SweepGroupZonesIter>(budget);
4807 }
4808 
4809 template <class ZoneIterT>
markGrayRoots(gcstats::PhaseKind phase)4810 void GCRuntime::markGrayRoots(gcstats::PhaseKind phase) {
4811   MOZ_ASSERT(marker.markColor() == MarkColor::Gray);
4812 
4813   gcstats::AutoPhase ap(stats(), phase);
4814   if (hasValidGrayRootsBuffer()) {
4815     for (ZoneIterT zone(this); !zone.done(); zone.next()) {
4816       markBufferedGrayRoots(zone);
4817     }
4818   } else {
4819     MOZ_ASSERT(!isIncremental);
4820     traceEmbeddingGrayRoots(&marker);
4821     Compartment::traceIncomingCrossCompartmentEdgesForZoneGC(
4822         &marker, Compartment::GrayEdges);
4823   }
4824 }
4825 
markAllWeakReferences()4826 IncrementalProgress GCRuntime::markAllWeakReferences() {
4827   SliceBudget budget = SliceBudget::unlimited();
4828   return markWeakReferences<GCZonesIter>(budget);
4829 }
4830 
markAllGrayReferences(gcstats::PhaseKind phase)4831 void GCRuntime::markAllGrayReferences(gcstats::PhaseKind phase) {
4832   markGrayRoots<GCZonesIter>(phase);
4833   drainMarkStack();
4834 }
4835 
dropStringWrappers()4836 void GCRuntime::dropStringWrappers() {
4837   /*
4838    * String "wrappers" are dropped on GC because their presence would require
4839    * us to sweep the wrappers in all compartments every time we sweep a
4840    * compartment group.
4841    */
4842   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
4843     zone->dropStringWrappersOnGC();
4844   }
4845 }
4846 
4847 /*
4848  * Group zones that must be swept at the same time.
4849  *
4850  * From the point of view of the mutator, groups of zones transition atomically
4851  * from marking to sweeping. If compartment A has an edge to an unmarked object
4852  * in compartment B, then we must not start sweeping A in a later slice than we
4853  * start sweeping B. That's because a write barrier in A could lead to the
4854  * unmarked object in B becoming marked. However, if we had already swept that
4855  * object, we would be in trouble.
4856  *
4857  * If we consider these dependencies as a graph, then all the compartments in
4858  * any strongly-connected component of this graph must start sweeping in the
4859  * same slice.
4860  *
4861  * Tarjan's algorithm is used to calculate the components.
4862  */
4863 
findSweepGroupEdges()4864 bool Compartment::findSweepGroupEdges() {
4865   Zone* source = zone();
4866   for (WrappedObjectCompartmentEnum e(this); !e.empty(); e.popFront()) {
4867     Compartment* targetComp = e.front();
4868     Zone* target = targetComp->zone();
4869 
4870     if (!target->isGCMarking() || source->hasSweepGroupEdgeTo(target)) {
4871       continue;
4872     }
4873 
4874     for (ObjectWrapperEnum e(this, targetComp); !e.empty(); e.popFront()) {
4875       JSObject* key = e.front().mutableKey();
4876       MOZ_ASSERT(key->zone() == target);
4877 
4878       // Add an edge to the wrapped object's zone to ensure that the wrapper
4879       // zone is not still being marked when we start sweeping the wrapped zone.
4880       // As an optimization, if the wrapped object is already marked black there
4881       // is no danger of later marking and we can skip this.
4882       if (key->isMarkedBlack()) {
4883         continue;
4884       }
4885 
4886       if (!source->addSweepGroupEdgeTo(target)) {
4887         return false;
4888       }
4889 
4890       // We don't need to consider any more wrappers for this target
4891       // compartment since we already added an edge.
4892       break;
4893     }
4894   }
4895 
4896   return true;
4897 }
4898 
findSweepGroupEdges(Zone * atomsZone)4899 bool Zone::findSweepGroupEdges(Zone* atomsZone) {
4900   // Any zone may have a pointer to an atom in the atoms zone, and these aren't
4901   // in the cross compartment map.
4902   if (atomsZone->wasGCStarted() && !addSweepGroupEdgeTo(atomsZone)) {
4903     return false;
4904   }
4905 
4906   for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next()) {
4907     if (!comp->findSweepGroupEdges()) {
4908       return false;
4909     }
4910   }
4911 
4912   return WeakMapBase::findSweepGroupEdgesForZone(this);
4913 }
4914 
AddEdgesForMarkQueue(GCMarker & marker)4915 static bool AddEdgesForMarkQueue(GCMarker& marker) {
4916 #ifdef DEBUG
4917   // For testing only.
4918   //
4919   // Add edges between all objects mentioned in the test mark queue, since
4920   // otherwise they will get marked in a different order than their sweep
4921   // groups. Note that this is only done at the beginning of an incremental
4922   // collection, so it is possible for objects to be added later that do not
4923   // follow the sweep group ordering. These objects will wait until their sweep
4924   // group comes up, or will be skipped if their sweep group is already past.
4925   JS::Zone* prevZone = nullptr;
4926   for (size_t i = 0; i < marker.markQueue.length(); i++) {
4927     Value val = marker.markQueue[i].get().unbarrieredGet();
4928     if (!val.isObject()) {
4929       continue;
4930     }
4931     JSObject* obj = &val.toObject();
4932     JS::Zone* zone = obj->zone();
4933     if (!zone->isGCMarking()) {
4934       continue;
4935     }
4936     if (prevZone && prevZone != zone) {
4937       if (!prevZone->addSweepGroupEdgeTo(zone)) {
4938         return false;
4939       }
4940     }
4941     prevZone = zone;
4942   }
4943 #endif
4944   return true;
4945 }
4946 
findSweepGroupEdges()4947 bool GCRuntime::findSweepGroupEdges() {
4948   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
4949     if (!zone->findSweepGroupEdges(atomsZone)) {
4950       return false;
4951     }
4952   }
4953 
4954   if (!AddEdgesForMarkQueue(marker)) {
4955     return false;
4956   }
4957 
4958   return DebugAPI::findSweepGroupEdges(rt);
4959 }
4960 
groupZonesForSweeping(JS::GCReason reason)4961 void GCRuntime::groupZonesForSweeping(JS::GCReason reason) {
4962 #ifdef DEBUG
4963   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
4964     MOZ_ASSERT(zone->gcSweepGroupEdges().empty());
4965   }
4966 #endif
4967 
4968   JSContext* cx = rt->mainContextFromOwnThread();
4969   ZoneComponentFinder finder(cx);
4970   if (!isIncremental || !findSweepGroupEdges()) {
4971     finder.useOneComponent();
4972   }
4973 
4974   // Use one component for two-slice zeal modes.
4975   if (useZeal && hasIncrementalTwoSliceZealMode()) {
4976     finder.useOneComponent();
4977   }
4978 
4979   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
4980     MOZ_ASSERT(zone->isGCMarking());
4981     finder.addNode(zone);
4982   }
4983   sweepGroups = finder.getResultsList();
4984   currentSweepGroup = sweepGroups;
4985   sweepGroupIndex = 1;
4986 
4987   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
4988     zone->clearSweepGroupEdges();
4989   }
4990 
4991 #ifdef DEBUG
4992   unsigned idx = sweepGroupIndex;
4993   for (Zone* head = currentSweepGroup; head; head = head->nextGroup()) {
4994     for (Zone* zone = head; zone; zone = zone->nextNodeInGroup()) {
4995       MOZ_ASSERT(zone->isGCMarking());
4996       zone->gcSweepGroupIndex = idx;
4997     }
4998     idx++;
4999   }
5000 
5001   MOZ_ASSERT_IF(!isIncremental, !currentSweepGroup->nextGroup());
5002   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
5003     MOZ_ASSERT(zone->gcSweepGroupEdges().empty());
5004   }
5005 #endif
5006 }
5007 
5008 static void ResetGrayList(Compartment* comp);
5009 
getNextSweepGroup()5010 void GCRuntime::getNextSweepGroup() {
5011   currentSweepGroup = currentSweepGroup->nextGroup();
5012   ++sweepGroupIndex;
5013   if (!currentSweepGroup) {
5014     abortSweepAfterCurrentGroup = false;
5015     return;
5016   }
5017 
5018   MOZ_ASSERT_IF(abortSweepAfterCurrentGroup, !isIncremental);
5019   if (!isIncremental) {
5020     ZoneComponentFinder::mergeGroups(currentSweepGroup);
5021   }
5022 
5023   for (Zone* zone = currentSweepGroup; zone; zone = zone->nextNodeInGroup()) {
5024     MOZ_ASSERT(zone->isGCMarkingBlackOnly());
5025     MOZ_ASSERT(!zone->isQueuedForBackgroundSweep());
5026   }
5027 
5028   if (abortSweepAfterCurrentGroup) {
5029     joinTask(markTask, gcstats::PhaseKind::SWEEP_MARK);
5030 
5031     // Abort collection of subsequent sweep groups.
5032     for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
5033       MOZ_ASSERT(!zone->gcNextGraphComponent);
5034       zone->changeGCState(Zone::MarkBlackOnly, Zone::NoGC);
5035       zone->arenas.unmarkPreMarkedFreeCells();
5036       zone->arenas.mergeNewArenasInMarkPhase();
5037       zone->gcGrayRoots().Clear();
5038       zone->clearGCSliceThresholds();
5039     }
5040 
5041     for (SweepGroupCompartmentsIter comp(rt); !comp.done(); comp.next()) {
5042       ResetGrayList(comp);
5043     }
5044 
5045     abortSweepAfterCurrentGroup = false;
5046     currentSweepGroup = nullptr;
5047   }
5048 
5049   hasMarkedGrayRoots = false;
5050 }
5051 
5052 /*
5053  * Gray marking:
5054  *
5055  * At the end of collection, anything reachable from a gray root that has not
5056  * otherwise been marked black must be marked gray.
5057  *
5058  * This means that when marking things gray we must not allow marking to leave
5059  * the current compartment group, as that could result in things being marked
5060  * gray when they might subsequently be marked black.  To achieve this, when we
5061  * find a cross compartment pointer we don't mark the referent but add it to a
5062  * singly-linked list of incoming gray pointers that is stored with each
5063  * compartment.
5064  *
5065  * The list head is stored in Compartment::gcIncomingGrayPointers and contains
5066  * cross compartment wrapper objects. The next pointer is stored in the second
5067  * extra slot of the cross compartment wrapper.
5068  *
5069  * The list is created during gray marking when one of the
5070  * MarkCrossCompartmentXXX functions is called for a pointer that leaves the
5071  * current compartent group.  This calls DelayCrossCompartmentGrayMarking to
5072  * push the referring object onto the list.
5073  *
5074  * The list is traversed and then unlinked in
5075  * GCRuntime::markIncomingCrossCompartmentPointers.
5076  */
5077 
IsGrayListObject(JSObject * obj)5078 static bool IsGrayListObject(JSObject* obj) {
5079   MOZ_ASSERT(obj);
5080   return obj->is<CrossCompartmentWrapperObject>() && !IsDeadProxyObject(obj);
5081 }
5082 
5083 /* static */
grayLinkReservedSlot(JSObject * obj)5084 unsigned ProxyObject::grayLinkReservedSlot(JSObject* obj) {
5085   MOZ_ASSERT(IsGrayListObject(obj));
5086   return CrossCompartmentWrapperObject::GrayLinkReservedSlot;
5087 }
5088 
5089 #ifdef DEBUG
AssertNotOnGrayList(JSObject * obj)5090 static void AssertNotOnGrayList(JSObject* obj) {
5091   MOZ_ASSERT_IF(
5092       IsGrayListObject(obj),
5093       GetProxyReservedSlot(obj, ProxyObject::grayLinkReservedSlot(obj))
5094           .isUndefined());
5095 }
5096 #endif
5097 
AssertNoWrappersInGrayList(JSRuntime * rt)5098 static void AssertNoWrappersInGrayList(JSRuntime* rt) {
5099 #ifdef DEBUG
5100   for (CompartmentsIter c(rt); !c.done(); c.next()) {
5101     MOZ_ASSERT(!c->gcIncomingGrayPointers);
5102     for (Compartment::ObjectWrapperEnum e(c); !e.empty(); e.popFront()) {
5103       AssertNotOnGrayList(e.front().value().unbarrieredGet());
5104     }
5105   }
5106 #endif
5107 }
5108 
CrossCompartmentPointerReferent(JSObject * obj)5109 static JSObject* CrossCompartmentPointerReferent(JSObject* obj) {
5110   MOZ_ASSERT(IsGrayListObject(obj));
5111   return &obj->as<ProxyObject>().private_().toObject();
5112 }
5113 
NextIncomingCrossCompartmentPointer(JSObject * prev,bool unlink)5114 static JSObject* NextIncomingCrossCompartmentPointer(JSObject* prev,
5115                                                      bool unlink) {
5116   unsigned slot = ProxyObject::grayLinkReservedSlot(prev);
5117   JSObject* next = GetProxyReservedSlot(prev, slot).toObjectOrNull();
5118   MOZ_ASSERT_IF(next, IsGrayListObject(next));
5119 
5120   if (unlink) {
5121     SetProxyReservedSlot(prev, slot, UndefinedValue());
5122   }
5123 
5124   return next;
5125 }
5126 
DelayCrossCompartmentGrayMarking(JSObject * src)5127 void js::gc::DelayCrossCompartmentGrayMarking(JSObject* src) {
5128   MOZ_ASSERT(IsGrayListObject(src));
5129   MOZ_ASSERT(src->isMarkedGray());
5130 
5131   AutoTouchingGrayThings tgt;
5132 
5133   /* Called from MarkCrossCompartmentXXX functions. */
5134   unsigned slot = ProxyObject::grayLinkReservedSlot(src);
5135   JSObject* dest = CrossCompartmentPointerReferent(src);
5136   Compartment* comp = dest->compartment();
5137 
5138   if (GetProxyReservedSlot(src, slot).isUndefined()) {
5139     SetProxyReservedSlot(src, slot,
5140                          ObjectOrNullValue(comp->gcIncomingGrayPointers));
5141     comp->gcIncomingGrayPointers = src;
5142   } else {
5143     MOZ_ASSERT(GetProxyReservedSlot(src, slot).isObjectOrNull());
5144   }
5145 
5146 #ifdef DEBUG
5147   /*
5148    * Assert that the object is in our list, also walking the list to check its
5149    * integrity.
5150    */
5151   JSObject* obj = comp->gcIncomingGrayPointers;
5152   bool found = false;
5153   while (obj) {
5154     if (obj == src) {
5155       found = true;
5156     }
5157     obj = NextIncomingCrossCompartmentPointer(obj, false);
5158   }
5159   MOZ_ASSERT(found);
5160 #endif
5161 }
5162 
markIncomingCrossCompartmentPointers(MarkColor color)5163 void GCRuntime::markIncomingCrossCompartmentPointers(MarkColor color) {
5164   gcstats::AutoPhase ap(stats(),
5165                         color == MarkColor::Black
5166                             ? gcstats::PhaseKind::SWEEP_MARK_INCOMING_BLACK
5167                             : gcstats::PhaseKind::SWEEP_MARK_INCOMING_GRAY);
5168 
5169   bool unlinkList = color == MarkColor::Gray;
5170 
5171   for (SweepGroupCompartmentsIter c(rt); !c.done(); c.next()) {
5172     MOZ_ASSERT(c->zone()->isGCMarking());
5173     MOZ_ASSERT_IF(color == MarkColor::Gray,
5174                   c->zone()->isGCMarkingBlackAndGray());
5175     MOZ_ASSERT_IF(c->gcIncomingGrayPointers,
5176                   IsGrayListObject(c->gcIncomingGrayPointers));
5177 
5178     for (JSObject* src = c->gcIncomingGrayPointers; src;
5179          src = NextIncomingCrossCompartmentPointer(src, unlinkList)) {
5180       JSObject* dst = CrossCompartmentPointerReferent(src);
5181       MOZ_ASSERT(dst->compartment() == c);
5182 
5183       if (color == MarkColor::Gray) {
5184         if (src->asTenured().isMarkedGray()) {
5185           TraceManuallyBarrieredEdge(&marker, &dst,
5186                                      "cross-compartment gray pointer");
5187         }
5188       } else {
5189         if (src->asTenured().isMarkedBlack()) {
5190           TraceManuallyBarrieredEdge(&marker, &dst,
5191                                      "cross-compartment black pointer");
5192         }
5193       }
5194     }
5195 
5196     if (unlinkList) {
5197       c->gcIncomingGrayPointers = nullptr;
5198     }
5199   }
5200 }
5201 
RemoveFromGrayList(JSObject * wrapper)5202 static bool RemoveFromGrayList(JSObject* wrapper) {
5203   AutoTouchingGrayThings tgt;
5204 
5205   if (!IsGrayListObject(wrapper)) {
5206     return false;
5207   }
5208 
5209   unsigned slot = ProxyObject::grayLinkReservedSlot(wrapper);
5210   if (GetProxyReservedSlot(wrapper, slot).isUndefined()) {
5211     return false; /* Not on our list. */
5212   }
5213 
5214   JSObject* tail = GetProxyReservedSlot(wrapper, slot).toObjectOrNull();
5215   SetProxyReservedSlot(wrapper, slot, UndefinedValue());
5216 
5217   Compartment* comp = CrossCompartmentPointerReferent(wrapper)->compartment();
5218   JSObject* obj = comp->gcIncomingGrayPointers;
5219   if (obj == wrapper) {
5220     comp->gcIncomingGrayPointers = tail;
5221     return true;
5222   }
5223 
5224   while (obj) {
5225     unsigned slot = ProxyObject::grayLinkReservedSlot(obj);
5226     JSObject* next = GetProxyReservedSlot(obj, slot).toObjectOrNull();
5227     if (next == wrapper) {
5228       js::detail::SetProxyReservedSlotUnchecked(obj, slot,
5229                                                 ObjectOrNullValue(tail));
5230       return true;
5231     }
5232     obj = next;
5233   }
5234 
5235   MOZ_CRASH("object not found in gray link list");
5236 }
5237 
ResetGrayList(Compartment * comp)5238 static void ResetGrayList(Compartment* comp) {
5239   JSObject* src = comp->gcIncomingGrayPointers;
5240   while (src) {
5241     src = NextIncomingCrossCompartmentPointer(src, true);
5242   }
5243   comp->gcIncomingGrayPointers = nullptr;
5244 }
5245 
5246 #ifdef DEBUG
HasIncomingCrossCompartmentPointers(JSRuntime * rt)5247 static bool HasIncomingCrossCompartmentPointers(JSRuntime* rt) {
5248   for (SweepGroupCompartmentsIter c(rt); !c.done(); c.next()) {
5249     if (c->gcIncomingGrayPointers) {
5250       return true;
5251     }
5252   }
5253 
5254   return false;
5255 }
5256 #endif
5257 
NotifyGCNukeWrapper(JSObject * wrapper)5258 void js::NotifyGCNukeWrapper(JSObject* wrapper) {
5259   MOZ_ASSERT(IsCrossCompartmentWrapper(wrapper));
5260 
5261   /*
5262    * References to target of wrapper are being removed, we no longer have to
5263    * remember to mark it.
5264    */
5265   RemoveFromGrayList(wrapper);
5266 
5267   /*
5268    * Clean up WeakRef maps which might include this wrapper.
5269    */
5270   JSObject* target = UncheckedUnwrapWithoutExpose(wrapper);
5271   if (target->is<WeakRefObject>()) {
5272     WeakRefObject* weakRef = &target->as<WeakRefObject>();
5273     GCRuntime* gc = &weakRef->runtimeFromMainThread()->gc;
5274     if (weakRef->target() && gc->unregisterWeakRefWrapper(wrapper)) {
5275       weakRef->setTarget(nullptr);
5276     }
5277   }
5278 
5279   /*
5280    * Clean up FinalizationRecord record objects which might be the target of
5281    * this wrapper.
5282    */
5283   if (target->is<FinalizationRecordObject>()) {
5284     auto* record = &target->as<FinalizationRecordObject>();
5285     FinalizationRegistryObject::unregisterRecord(record);
5286   }
5287 }
5288 
5289 enum {
5290   JS_GC_SWAP_OBJECT_A_REMOVED = 1 << 0,
5291   JS_GC_SWAP_OBJECT_B_REMOVED = 1 << 1
5292 };
5293 
NotifyGCPreSwap(JSObject * a,JSObject * b)5294 unsigned js::NotifyGCPreSwap(JSObject* a, JSObject* b) {
5295   /*
5296    * Two objects in the same compartment are about to have had their contents
5297    * swapped.  If either of them are in our gray pointer list, then we remove
5298    * them from the lists, returning a bitset indicating what happened.
5299    */
5300   return (RemoveFromGrayList(a) ? JS_GC_SWAP_OBJECT_A_REMOVED : 0) |
5301          (RemoveFromGrayList(b) ? JS_GC_SWAP_OBJECT_B_REMOVED : 0);
5302 }
5303 
NotifyGCPostSwap(JSObject * a,JSObject * b,unsigned removedFlags)5304 void js::NotifyGCPostSwap(JSObject* a, JSObject* b, unsigned removedFlags) {
5305   /*
5306    * Two objects in the same compartment have had their contents swapped.  If
5307    * either of them were in our gray pointer list, we re-add them again.
5308    */
5309   if (removedFlags & JS_GC_SWAP_OBJECT_A_REMOVED) {
5310     DelayCrossCompartmentGrayMarking(b);
5311   }
5312   if (removedFlags & JS_GC_SWAP_OBJECT_B_REMOVED) {
5313     DelayCrossCompartmentGrayMarking(a);
5314   }
5315 }
5316 
MaybeCheckWeakMapMarking(GCRuntime * gc)5317 static inline void MaybeCheckWeakMapMarking(GCRuntime* gc) {
5318 #if defined(JS_GC_ZEAL) || defined(DEBUG)
5319 
5320   bool shouldCheck;
5321 #  if defined(DEBUG)
5322   shouldCheck = true;
5323 #  else
5324   shouldCheck = gc->hasZealMode(ZealMode::CheckWeakMapMarking);
5325 #  endif
5326 
5327   if (shouldCheck) {
5328     for (SweepGroupZonesIter zone(gc); !zone.done(); zone.next()) {
5329       MOZ_RELEASE_ASSERT(WeakMapBase::checkMarkingForZone(zone));
5330     }
5331   }
5332 
5333 #endif
5334 }
5335 
markGrayReferencesInCurrentGroup(JSFreeOp * fop,SliceBudget & budget)5336 IncrementalProgress GCRuntime::markGrayReferencesInCurrentGroup(
5337     JSFreeOp* fop, SliceBudget& budget) {
5338   MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping);
5339   MOZ_ASSERT(marker.isDrained());
5340 
5341   MOZ_ASSERT(marker.markColor() == MarkColor::Black);
5342 
5343   if (hasMarkedGrayRoots) {
5344     return Finished;
5345   }
5346 
5347   MOZ_ASSERT(cellsToAssertNotGray.ref().empty());
5348 
5349   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_MARK);
5350 
5351   // Mark any incoming gray pointers from previously swept compartments that
5352   // have subsequently been marked black. This can occur when gray cells
5353   // become black by the action of UnmarkGray.
5354   markIncomingCrossCompartmentPointers(MarkColor::Black);
5355   drainMarkStack();
5356 
5357   // Change state of current group to MarkGray to restrict marking to this
5358   // group.  Note that there may be pointers to the atoms zone, and
5359   // these will be marked through, as they are not marked with
5360   // TraceCrossCompartmentEdge.
5361   for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
5362     zone->changeGCState(Zone::MarkBlackOnly, Zone::MarkBlackAndGray);
5363   }
5364 
5365   AutoSetMarkColor setColorGray(marker, MarkColor::Gray);
5366   marker.setMainStackColor(MarkColor::Gray);
5367 
5368   // Mark incoming gray pointers from previously swept compartments.
5369   markIncomingCrossCompartmentPointers(MarkColor::Gray);
5370 
5371   markGrayRoots<SweepGroupZonesIter>(gcstats::PhaseKind::SWEEP_MARK_GRAY);
5372 
5373   hasMarkedGrayRoots = true;
5374 
5375 #ifdef JS_GC_ZEAL
5376   if (shouldYieldForZeal(ZealMode::YieldWhileGrayMarking)) {
5377     return NotFinished;
5378   }
5379 #endif
5380 
5381   if (markUntilBudgetExhausted(budget) == NotFinished) {
5382     return NotFinished;
5383   }
5384   marker.setMainStackColor(MarkColor::Black);
5385   return Finished;
5386 }
5387 
endMarkingSweepGroup(JSFreeOp * fop,SliceBudget & budget)5388 IncrementalProgress GCRuntime::endMarkingSweepGroup(JSFreeOp* fop,
5389                                                     SliceBudget& budget) {
5390   MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping);
5391   MOZ_ASSERT(marker.isDrained());
5392 
5393   MOZ_ASSERT(marker.markColor() == MarkColor::Black);
5394   MOZ_ASSERT(!HasIncomingCrossCompartmentPointers(rt));
5395 
5396   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_MARK);
5397 
5398   if (markWeakReferencesInCurrentGroup(budget) == NotFinished) {
5399     return NotFinished;
5400   }
5401 
5402   AutoSetMarkColor setColorGray(marker, MarkColor::Gray);
5403   marker.setMainStackColor(MarkColor::Gray);
5404 
5405   // Mark transitively inside the current compartment group.
5406   if (markWeakReferencesInCurrentGroup(budget) == NotFinished) {
5407     return NotFinished;
5408   }
5409 
5410   MOZ_ASSERT(marker.isDrained());
5411 
5412   // We must not yield after this point before we start sweeping the group.
5413   safeToYield = false;
5414 
5415   MaybeCheckWeakMapMarking(this);
5416 
5417   return Finished;
5418 }
5419 
5420 // Causes the given WeakCache to be swept when run.
5421 class ImmediateSweepWeakCacheTask : public GCParallelTask {
5422   Zone* zone;
5423   JS::detail::WeakCacheBase& cache;
5424 
5425   ImmediateSweepWeakCacheTask(const ImmediateSweepWeakCacheTask&) = delete;
5426 
5427  public:
ImmediateSweepWeakCacheTask(GCRuntime * gc,Zone * zone,JS::detail::WeakCacheBase & wc)5428   ImmediateSweepWeakCacheTask(GCRuntime* gc, Zone* zone,
5429                               JS::detail::WeakCacheBase& wc)
5430       : GCParallelTask(gc), zone(zone), cache(wc) {}
5431 
ImmediateSweepWeakCacheTask(ImmediateSweepWeakCacheTask && other)5432   ImmediateSweepWeakCacheTask(ImmediateSweepWeakCacheTask&& other)
5433       : GCParallelTask(std::move(other)),
5434         zone(other.zone),
5435         cache(other.cache) {}
5436 
run(AutoLockHelperThreadState & lock)5437   void run(AutoLockHelperThreadState& lock) override {
5438     AutoUnlockHelperThreadState unlock(lock);
5439     AutoSetThreadIsSweeping threadIsSweeping(zone);
5440     cache.sweep(&gc->storeBuffer());
5441   }
5442 };
5443 
updateAtomsBitmap()5444 void GCRuntime::updateAtomsBitmap() {
5445   DenseBitmap marked;
5446   if (atomMarking.computeBitmapFromChunkMarkBits(rt, marked)) {
5447     for (GCZonesIter zone(this); !zone.done(); zone.next()) {
5448       atomMarking.refineZoneBitmapForCollectedZone(zone, marked);
5449     }
5450   } else {
5451     // Ignore OOM in computeBitmapFromChunkMarkBits. The
5452     // refineZoneBitmapForCollectedZone call can only remove atoms from the
5453     // zone bitmap, so it is conservative to just not call it.
5454   }
5455 
5456   atomMarking.markAtomsUsedByUncollectedZones(rt);
5457 
5458   // For convenience sweep these tables non-incrementally as part of bitmap
5459   // sweeping; they are likely to be much smaller than the main atoms table.
5460   rt->symbolRegistry().sweep();
5461   SweepingTracer trc(rt);
5462   for (RealmsIter realm(this); !realm.done(); realm.next()) {
5463     realm->tracekWeakVarNames(&trc);
5464   }
5465 }
5466 
sweepCCWrappers()5467 void GCRuntime::sweepCCWrappers() {
5468   AutoSetThreadIsSweeping threadIsSweeping;  // This can touch all zones.
5469   for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
5470     zone->sweepAllCrossCompartmentWrappers();
5471   }
5472 }
5473 
sweepMisc()5474 void GCRuntime::sweepMisc() {
5475   SweepingTracer trc(rt);
5476   for (SweepGroupRealmsIter r(this); !r.done(); r.next()) {
5477     AutoSetThreadIsSweeping threadIsSweeping(r->zone());
5478     r->traceWeakObjects(&trc);
5479     r->traceWeakTemplateObjects(&trc);
5480     r->traceWeakSavedStacks(&trc);
5481     r->traceWeakSelfHostingScriptSource(&trc);
5482     r->traceWeakObjectRealm(&trc);
5483     r->traceWeakRegExps(&trc);
5484   }
5485 }
5486 
sweepCompressionTasks()5487 void GCRuntime::sweepCompressionTasks() {
5488   JSRuntime* runtime = rt;
5489 
5490   // Attach finished compression tasks.
5491   AutoLockHelperThreadState lock;
5492   AttachFinishedCompressions(runtime, lock);
5493   SweepPendingCompressions(lock);
5494 }
5495 
sweepWeakMaps()5496 void GCRuntime::sweepWeakMaps() {
5497   AutoSetThreadIsSweeping threadIsSweeping;  // This may touch any zone.
5498   for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
5499     /* No need to look up any more weakmap keys from this sweep group. */
5500     AutoEnterOOMUnsafeRegion oomUnsafe;
5501     if (!zone->gcEphemeronEdges().clear()) {
5502       oomUnsafe.crash("clearing weak keys in beginSweepingSweepGroup()");
5503     }
5504 
5505     // Lock the storebuffer since this may access it when rehashing or resizing
5506     // the tables.
5507     AutoLockStoreBuffer lock(&storeBuffer());
5508     zone->sweepWeakMaps();
5509   }
5510 }
5511 
sweepUniqueIds()5512 void GCRuntime::sweepUniqueIds() {
5513   for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
5514     AutoSetThreadIsSweeping threadIsSweeping(zone);
5515     zone->sweepUniqueIds();
5516   }
5517 }
5518 
sweepWeakRefs()5519 void GCRuntime::sweepWeakRefs() {
5520   for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
5521     AutoSetThreadIsSweeping threadIsSweeping(zone);
5522     zone->weakRefMap().sweep(&storeBuffer());
5523   }
5524 }
5525 
sweepFinalizationRegistriesOnMainThread()5526 void GCRuntime::sweepFinalizationRegistriesOnMainThread() {
5527   // This calls back into the browser which expects to be called from the main
5528   // thread.
5529   gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::SWEEP_COMPARTMENTS);
5530   gcstats::AutoPhase ap2(stats(),
5531                          gcstats::PhaseKind::SWEEP_FINALIZATION_REGISTRIES);
5532   AutoLockStoreBuffer lock(&storeBuffer());
5533   for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
5534     sweepFinalizationRegistries(zone);
5535   }
5536 }
5537 
startTask(GCParallelTask & task,gcstats::PhaseKind phase,AutoLockHelperThreadState & lock)5538 void GCRuntime::startTask(GCParallelTask& task, gcstats::PhaseKind phase,
5539                           AutoLockHelperThreadState& lock) {
5540   if (!CanUseExtraThreads()) {
5541     AutoUnlockHelperThreadState unlock(lock);
5542     task.runFromMainThread();
5543     stats().recordParallelPhase(phase, task.duration());
5544     return;
5545   }
5546 
5547   task.startWithLockHeld(lock);
5548 }
5549 
joinTask(GCParallelTask & task,gcstats::PhaseKind phase,AutoLockHelperThreadState & lock)5550 void GCRuntime::joinTask(GCParallelTask& task, gcstats::PhaseKind phase,
5551                          AutoLockHelperThreadState& lock) {
5552   // This is similar to GCParallelTask::joinWithLockHeld but handles recording
5553   // execution and wait time.
5554 
5555   if (task.isIdle(lock)) {
5556     return;
5557   }
5558 
5559   if (task.isDispatched(lock)) {
5560     // If the task was dispatched but has not yet started then cancel the task
5561     // and run it from the main thread. This stops us from blocking here when
5562     // the helper threads are busy with other tasks.
5563     task.cancelDispatchedTask(lock);
5564     AutoUnlockHelperThreadState unlock(lock);
5565     task.runFromMainThread();
5566   } else {
5567     // Otherwise wait for the task to complete.
5568     gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::JOIN_PARALLEL_TASKS);
5569     task.joinRunningOrFinishedTask(lock);
5570   }
5571 
5572   stats().recordParallelPhase(phase, task.duration());
5573 }
5574 
joinTask(GCParallelTask & task,gcstats::PhaseKind phase)5575 void GCRuntime::joinTask(GCParallelTask& task, gcstats::PhaseKind phase) {
5576   AutoLockHelperThreadState lock;
5577   joinTask(task, phase, lock);
5578 }
5579 
sweepDebuggerOnMainThread(JSFreeOp * fop)5580 void GCRuntime::sweepDebuggerOnMainThread(JSFreeOp* fop) {
5581   AutoLockStoreBuffer lock(&storeBuffer());
5582 
5583   // Detach unreachable debuggers and global objects from each other.
5584   // This can modify weakmaps and so must happen before weakmap sweeping.
5585   DebugAPI::sweepAll(fop);
5586 
5587   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_COMPARTMENTS);
5588 
5589   // Sweep debug environment information. This performs lookups in the Zone's
5590   // unique IDs table and so must not happen in parallel with sweeping that
5591   // table.
5592   {
5593     gcstats::AutoPhase ap2(stats(), gcstats::PhaseKind::SWEEP_MISC);
5594     for (SweepGroupRealmsIter r(rt); !r.done(); r.next()) {
5595       r->sweepDebugEnvironments();
5596     }
5597   }
5598 }
5599 
sweepJitDataOnMainThread(JSFreeOp * fop)5600 void GCRuntime::sweepJitDataOnMainThread(JSFreeOp* fop) {
5601   SweepingTracer trc(rt);
5602   {
5603     gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_JIT_DATA);
5604 
5605     if (initialState != State::NotActive) {
5606       // Cancel any active or pending off thread compilations. We also did
5607       // this before marking (in DiscardJITCodeForGC) so this is a no-op
5608       // for non-incremental GCs.
5609       js::CancelOffThreadIonCompile(rt, JS::Zone::Sweep);
5610     }
5611 
5612     // Bug 1071218: the following method has not yet been refactored to
5613     // work on a single zone-group at once.
5614 
5615     // Sweep entries containing about-to-be-finalized JitCode and
5616     // update relocated TypeSet::Types inside the JitcodeGlobalTable.
5617     jit::JitRuntime::TraceWeakJitcodeGlobalTable(rt, &trc);
5618   }
5619 
5620   if (initialState != State::NotActive) {
5621     gcstats::AutoPhase apdc(stats(), gcstats::PhaseKind::SWEEP_DISCARD_CODE);
5622     for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
5623       zone->discardJitCode(fop);
5624     }
5625   }
5626 
5627   // JitZone/JitRealm must be swept *after* discarding JIT code, because
5628   // Zone::discardJitCode might access CacheIRStubInfos deleted here.
5629   {
5630     gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_JIT_DATA);
5631 
5632     for (SweepGroupRealmsIter r(rt); !r.done(); r.next()) {
5633       r->traceWeakEdgesInJitRealm(&trc);
5634     }
5635 
5636     for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
5637       if (jit::JitZone* jitZone = zone->jitZone()) {
5638         jitZone->traceWeak(&trc);
5639       }
5640     }
5641   }
5642 }
5643 
5644 using WeakCacheTaskVector =
5645     mozilla::Vector<ImmediateSweepWeakCacheTask, 0, SystemAllocPolicy>;
5646 
5647 // Call a functor for all weak caches that need to be swept in the current
5648 // sweep group.
5649 template <typename Functor>
IterateWeakCaches(JSRuntime * rt,Functor f)5650 static inline bool IterateWeakCaches(JSRuntime* rt, Functor f) {
5651   for (SweepGroupZonesIter zone(rt); !zone.done(); zone.next()) {
5652     for (JS::detail::WeakCacheBase* cache : zone->weakCaches()) {
5653       if (!f(cache, zone.get())) {
5654         return false;
5655       }
5656     }
5657   }
5658 
5659   for (JS::detail::WeakCacheBase* cache : rt->weakCaches()) {
5660     if (!f(cache, nullptr)) {
5661       return false;
5662     }
5663   }
5664 
5665   return true;
5666 }
5667 
PrepareWeakCacheTasks(JSRuntime * rt,WeakCacheTaskVector * immediateTasks)5668 static bool PrepareWeakCacheTasks(JSRuntime* rt,
5669                                   WeakCacheTaskVector* immediateTasks) {
5670   // Start incremental sweeping for caches that support it or add to a vector
5671   // of sweep tasks to run on a helper thread.
5672 
5673   MOZ_ASSERT(immediateTasks->empty());
5674 
5675   bool ok =
5676       IterateWeakCaches(rt, [&](JS::detail::WeakCacheBase* cache, Zone* zone) {
5677         if (!cache->needsSweep()) {
5678           return true;
5679         }
5680 
5681         // Caches that support incremental sweeping will be swept later.
5682         if (zone && cache->setNeedsIncrementalBarrier(true)) {
5683           return true;
5684         }
5685 
5686         return immediateTasks->emplaceBack(&rt->gc, zone, *cache);
5687       });
5688 
5689   if (!ok) {
5690     immediateTasks->clearAndFree();
5691   }
5692 
5693   return ok;
5694 }
5695 
SweepAllWeakCachesOnMainThread(JSRuntime * rt)5696 static void SweepAllWeakCachesOnMainThread(JSRuntime* rt) {
5697   // If we ran out of memory, do all the work on the main thread.
5698   gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PhaseKind::SWEEP_WEAK_CACHES);
5699   IterateWeakCaches(rt, [&](JS::detail::WeakCacheBase* cache, Zone* zone) {
5700     if (cache->needsIncrementalBarrier()) {
5701       cache->setNeedsIncrementalBarrier(false);
5702     }
5703     cache->sweep(&rt->gc.storeBuffer());
5704     return true;
5705   });
5706 }
5707 
beginSweepingSweepGroup(JSFreeOp * fop,SliceBudget & budget)5708 IncrementalProgress GCRuntime::beginSweepingSweepGroup(JSFreeOp* fop,
5709                                                        SliceBudget& budget) {
5710   /*
5711    * Begin sweeping the group of zones in currentSweepGroup, performing
5712    * actions that must be done before yielding to caller.
5713    */
5714 
5715   using namespace gcstats;
5716 
5717   AutoSCC scc(stats(), sweepGroupIndex);
5718 
5719   bool sweepingAtoms = false;
5720   for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
5721     /* Set the GC state to sweeping. */
5722     zone->changeGCState(Zone::MarkBlackAndGray, Zone::Sweep);
5723 
5724     /* Purge the ArenaLists before sweeping. */
5725     zone->arenas.checkSweepStateNotInUse();
5726     zone->arenas.unmarkPreMarkedFreeCells();
5727     zone->arenas.clearFreeLists();
5728 
5729     if (zone->isAtomsZone()) {
5730       sweepingAtoms = true;
5731     }
5732   }
5733 
5734 #ifdef JS_GC_ZEAL
5735   validateIncrementalMarking();
5736 #endif
5737 
5738 #ifdef DEBUG
5739   for (auto cell : cellsToAssertNotGray.ref()) {
5740     JS::AssertCellIsNotGray(cell);
5741   }
5742   cellsToAssertNotGray.ref().clearAndFree();
5743 #endif
5744 
5745   {
5746     AutoLockStoreBuffer lock(&storeBuffer());
5747 
5748     AutoPhase ap(stats(), PhaseKind::FINALIZE_START);
5749     callFinalizeCallbacks(fop, JSFINALIZE_GROUP_PREPARE);
5750     {
5751       AutoPhase ap2(stats(), PhaseKind::WEAK_ZONES_CALLBACK);
5752       callWeakPointerZonesCallbacks();
5753     }
5754     {
5755       AutoPhase ap2(stats(), PhaseKind::WEAK_COMPARTMENT_CALLBACK);
5756       for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
5757         for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
5758           callWeakPointerCompartmentCallbacks(comp);
5759         }
5760       }
5761     }
5762     callFinalizeCallbacks(fop, JSFINALIZE_GROUP_START);
5763   }
5764 
5765   // Updating the atom marking bitmaps. This marks atoms referenced by
5766   // uncollected zones so cannot be done in parallel with the other sweeping
5767   // work below.
5768   if (sweepingAtoms) {
5769     AutoPhase ap(stats(), PhaseKind::UPDATE_ATOMS_BITMAP);
5770     updateAtomsBitmap();
5771   }
5772 
5773   AutoSetThreadIsSweeping threadIsSweeping;
5774 
5775   sweepDebuggerOnMainThread(fop);
5776 
5777   {
5778     AutoLockHelperThreadState lock;
5779 
5780     AutoPhase ap(stats(), PhaseKind::SWEEP_COMPARTMENTS);
5781 
5782     AutoRunParallelTask sweepCCWrappers(this, &GCRuntime::sweepCCWrappers,
5783                                         PhaseKind::SWEEP_CC_WRAPPER, lock);
5784     AutoRunParallelTask sweepMisc(this, &GCRuntime::sweepMisc,
5785                                   PhaseKind::SWEEP_MISC, lock);
5786     AutoRunParallelTask sweepCompTasks(this, &GCRuntime::sweepCompressionTasks,
5787                                        PhaseKind::SWEEP_COMPRESSION, lock);
5788     AutoRunParallelTask sweepWeakMaps(this, &GCRuntime::sweepWeakMaps,
5789                                       PhaseKind::SWEEP_WEAKMAPS, lock);
5790     AutoRunParallelTask sweepUniqueIds(this, &GCRuntime::sweepUniqueIds,
5791                                        PhaseKind::SWEEP_UNIQUEIDS, lock);
5792     AutoRunParallelTask sweepWeakRefs(this, &GCRuntime::sweepWeakRefs,
5793                                       PhaseKind::SWEEP_WEAKREFS, lock);
5794 
5795     WeakCacheTaskVector sweepCacheTasks;
5796     bool canSweepWeakCachesOffThread =
5797         PrepareWeakCacheTasks(rt, &sweepCacheTasks);
5798     if (canSweepWeakCachesOffThread) {
5799       weakCachesToSweep.ref().emplace(currentSweepGroup);
5800       for (auto& task : sweepCacheTasks) {
5801         startTask(task, PhaseKind::SWEEP_WEAK_CACHES, lock);
5802       }
5803     }
5804 
5805     {
5806       AutoUnlockHelperThreadState unlock(lock);
5807       sweepJitDataOnMainThread(fop);
5808 
5809       if (!canSweepWeakCachesOffThread) {
5810         MOZ_ASSERT(sweepCacheTasks.empty());
5811         SweepAllWeakCachesOnMainThread(rt);
5812       }
5813     }
5814 
5815     for (auto& task : sweepCacheTasks) {
5816       joinTask(task, PhaseKind::SWEEP_WEAK_CACHES, lock);
5817     }
5818   }
5819 
5820   if (sweepingAtoms) {
5821     startSweepingAtomsTable();
5822   }
5823 
5824   // FinalizationRegistry sweeping touches weak maps and so must not run in
5825   // parallel with that. This triggers a read barrier and can add marking work
5826   // for zones that are still marking.
5827   sweepFinalizationRegistriesOnMainThread();
5828 
5829   // Queue all GC things in all zones for sweeping, either on the foreground
5830   // or on the background thread.
5831 
5832   for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
5833     zone->arenas.queueForForegroundSweep(fop, ForegroundObjectFinalizePhase);
5834     zone->arenas.queueForForegroundSweep(fop, ForegroundNonObjectFinalizePhase);
5835     for (const auto& phase : BackgroundFinalizePhases) {
5836       zone->arenas.queueForBackgroundSweep(fop, phase);
5837     }
5838 
5839     zone->arenas.queueForegroundThingsForSweep();
5840   }
5841 
5842   MOZ_ASSERT(!sweepZone);
5843 
5844   safeToYield = true;
5845   markOnBackgroundThreadDuringSweeping = CanUseExtraThreads();
5846 
5847   return Finished;
5848 }
5849 
5850 #ifdef JS_GC_ZEAL
shouldYieldForZeal(ZealMode mode)5851 bool GCRuntime::shouldYieldForZeal(ZealMode mode) {
5852   bool yield = useZeal && hasZealMode(mode);
5853 
5854   // Only yield on the first sweep slice for this mode.
5855   bool firstSweepSlice = initialState != State::Sweep;
5856   if (mode == ZealMode::IncrementalMultipleSlices && !firstSweepSlice) {
5857     yield = false;
5858   }
5859 
5860   return yield;
5861 }
5862 #endif
5863 
endSweepingSweepGroup(JSFreeOp * fop,SliceBudget & budget)5864 IncrementalProgress GCRuntime::endSweepingSweepGroup(JSFreeOp* fop,
5865                                                      SliceBudget& budget) {
5866   // This is to prevent a race between markTask checking the zone state and
5867   // us changing it below.
5868   if (joinBackgroundMarkTask() == NotFinished) {
5869     return NotFinished;
5870   }
5871 
5872   MOZ_ASSERT(marker.isDrained());
5873 
5874   // Disable background marking during sweeping until we start sweeping the next
5875   // zone group.
5876   markOnBackgroundThreadDuringSweeping = false;
5877 
5878   {
5879     gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::FINALIZE_END);
5880     AutoLockStoreBuffer lock(&storeBuffer());
5881     JSFreeOp fop(rt);
5882     callFinalizeCallbacks(&fop, JSFINALIZE_GROUP_END);
5883   }
5884 
5885   /* Free LIFO blocks on a background thread if possible. */
5886   startBackgroundFree();
5887 
5888   /* Update the GC state for zones we have swept. */
5889   for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
5890     if (jit::JitZone* jitZone = zone->jitZone()) {
5891       // Clear out any small pools that we're hanging on to.
5892       jitZone->execAlloc().purge();
5893     }
5894     AutoLockGC lock(this);
5895     zone->changeGCState(Zone::Sweep, Zone::Finished);
5896     zone->arenas.unmarkPreMarkedFreeCells();
5897     zone->arenas.checkNoArenasToUpdate();
5898     zone->pretenuring.clearCellCountsInNewlyCreatedArenas();
5899   }
5900 
5901   /*
5902    * Start background thread to sweep zones if required, sweeping the atoms
5903    * zone last if present.
5904    */
5905   bool sweepAtomsZone = false;
5906   ZoneList zones;
5907   for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
5908     if (zone->isAtomsZone()) {
5909       sweepAtomsZone = true;
5910     } else {
5911       zones.append(zone);
5912     }
5913   }
5914   if (sweepAtomsZone) {
5915     zones.append(atomsZone);
5916   }
5917 
5918   queueZonesAndStartBackgroundSweep(zones);
5919 
5920   return Finished;
5921 }
5922 
markDuringSweeping(JSFreeOp * fop,SliceBudget & budget)5923 IncrementalProgress GCRuntime::markDuringSweeping(JSFreeOp* fop,
5924                                                   SliceBudget& budget) {
5925   MOZ_ASSERT(markTask.isIdle());
5926 
5927   if (marker.isDrained()) {
5928     return Finished;
5929   }
5930 
5931   if (markOnBackgroundThreadDuringSweeping) {
5932     AutoLockHelperThreadState lock;
5933     MOZ_ASSERT(markTask.isIdle(lock));
5934     markTask.setBudget(budget);
5935     markTask.startOrRunIfIdle(lock);
5936     return Finished;  // This means don't yield to the mutator here.
5937   }
5938 
5939   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_MARK);
5940   return markUntilBudgetExhausted(budget);
5941 }
5942 
beginSweepPhase(JS::GCReason reason,AutoGCSession & session)5943 void GCRuntime::beginSweepPhase(JS::GCReason reason, AutoGCSession& session) {
5944   /*
5945    * Sweep phase.
5946    *
5947    * Finalize as we sweep, outside of lock but with RuntimeHeapIsBusy()
5948    * true so that any attempt to allocate a GC-thing from a finalizer will
5949    * fail, rather than nest badly and leave the unmarked newborn to be swept.
5950    */
5951 
5952   MOZ_ASSERT(!abortSweepAfterCurrentGroup);
5953   MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping);
5954 
5955 #ifdef DEBUG
5956   releaseHeldRelocatedArenas();
5957   verifyAllChunks();
5958 #endif
5959 
5960 #ifdef JS_GC_ZEAL
5961   computeNonIncrementalMarkingForValidation(session);
5962 #endif
5963 
5964   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP);
5965 
5966   hasMarkedGrayRoots = false;
5967 
5968   AssertNoWrappersInGrayList(rt);
5969   dropStringWrappers();
5970 
5971   groupZonesForSweeping(reason);
5972 
5973   sweepActions->assertFinished();
5974 }
5975 
foregroundFinalize(JSFreeOp * fop,AllocKind thingKind,SliceBudget & sliceBudget,SortedArenaList & sweepList)5976 bool ArenaLists::foregroundFinalize(JSFreeOp* fop, AllocKind thingKind,
5977                                     SliceBudget& sliceBudget,
5978                                     SortedArenaList& sweepList) {
5979   checkNoArenasToUpdateForKind(thingKind);
5980 
5981   // Arenas are released for use for new allocations as soon as the finalizers
5982   // for that allocation kind have run. This means that a cell's finalizer can
5983   // safely use IsAboutToBeFinalized to check other cells of the same alloc
5984   // kind, but not of different alloc kinds: the other arena may have already
5985   // had new objects allocated in it, and since we allocate black,
5986   // IsAboutToBeFinalized will return false even though the referent we intended
5987   // to check is long gone.
5988   if (!FinalizeArenas(fop, &arenasToSweep(thingKind), sweepList, thingKind,
5989                       sliceBudget)) {
5990     // Copy the current contents of sweepList so that ArenaIter can find them.
5991     incrementalSweptArenaKind = thingKind;
5992     incrementalSweptArenas.ref().clear();
5993     incrementalSweptArenas = sweepList.toArenaList();
5994     return false;
5995   }
5996 
5997   // Clear the list of swept arenas now these are moving back to the main arena
5998   // lists.
5999   incrementalSweptArenaKind = AllocKind::LIMIT;
6000   incrementalSweptArenas.ref().clear();
6001 
6002   sweepList.extractEmpty(&savedEmptyArenas.ref());
6003 
6004   ArenaList& al = arenaList(thingKind);
6005   ArenaList allocatedDuringSweep = std::move(al);
6006   al = sweepList.toArenaList();
6007   al.insertListWithCursorAtEnd(newArenasInMarkPhase(thingKind));
6008   al.insertListWithCursorAtEnd(allocatedDuringSweep);
6009 
6010   newArenasInMarkPhase(thingKind).clear();
6011 
6012   return true;
6013 }
6014 
run(AutoLockHelperThreadState & lock)6015 void js::gc::BackgroundMarkTask::run(AutoLockHelperThreadState& lock) {
6016   AutoUnlockHelperThreadState unlock(lock);
6017 
6018   // Time reporting is handled separately for parallel tasks.
6019   gc->sweepMarkResult =
6020       gc->markUntilBudgetExhausted(this->budget, GCMarker::DontReportMarkTime);
6021 }
6022 
joinBackgroundMarkTask()6023 IncrementalProgress GCRuntime::joinBackgroundMarkTask() {
6024   AutoLockHelperThreadState lock;
6025   if (markTask.isIdle(lock)) {
6026     return Finished;
6027   }
6028 
6029   joinTask(markTask, gcstats::PhaseKind::SWEEP_MARK, lock);
6030 
6031   IncrementalProgress result = sweepMarkResult;
6032   sweepMarkResult = Finished;
6033   return result;
6034 }
6035 
markUntilBudgetExhausted(SliceBudget & sliceBudget,GCMarker::ShouldReportMarkTime reportTime)6036 IncrementalProgress GCRuntime::markUntilBudgetExhausted(
6037     SliceBudget& sliceBudget, GCMarker::ShouldReportMarkTime reportTime) {
6038   // Run a marking slice and return whether the stack is now empty.
6039 
6040   AutoMajorGCProfilerEntry s(this);
6041 
6042 #ifdef DEBUG
6043   AutoSetThreadIsMarking threadIsMarking;
6044 #endif  // DEBUG
6045 
6046   if (marker.processMarkQueue() == GCMarker::QueueYielded) {
6047     return NotFinished;
6048   }
6049 
6050   return marker.markUntilBudgetExhausted(sliceBudget, reportTime) ? Finished
6051                                                                   : NotFinished;
6052 }
6053 
drainMarkStack()6054 void GCRuntime::drainMarkStack() {
6055   auto unlimited = SliceBudget::unlimited();
6056   MOZ_RELEASE_ASSERT(marker.markUntilBudgetExhausted(unlimited));
6057 }
6058 
6059 template <typename T>
SweepThing(JSFreeOp * fop,T * thing)6060 static void SweepThing(JSFreeOp* fop, T* thing) {
6061   if (!thing->isMarkedAny()) {
6062     thing->sweep(fop);
6063   }
6064 }
6065 
6066 template <typename T>
SweepArenaList(JSFreeOp * fop,Arena ** arenasToSweep,SliceBudget & sliceBudget)6067 static bool SweepArenaList(JSFreeOp* fop, Arena** arenasToSweep,
6068                            SliceBudget& sliceBudget) {
6069   while (Arena* arena = *arenasToSweep) {
6070     MOZ_ASSERT(arena->zone->isGCSweeping());
6071 
6072     for (ArenaCellIterUnderGC cell(arena); !cell.done(); cell.next()) {
6073       SweepThing(fop, cell.as<T>());
6074     }
6075 
6076     Arena* next = arena->next;
6077     MOZ_ASSERT_IF(next, next->zone == arena->zone);
6078     *arenasToSweep = next;
6079 
6080     AllocKind kind = MapTypeToFinalizeKind<T>::kind;
6081     sliceBudget.step(Arena::thingsPerArena(kind));
6082     if (sliceBudget.isOverBudget()) {
6083       return false;
6084     }
6085   }
6086 
6087   return true;
6088 }
6089 
startSweepingAtomsTable()6090 void GCRuntime::startSweepingAtomsTable() {
6091   auto& maybeAtoms = maybeAtomsToSweep.ref();
6092   MOZ_ASSERT(maybeAtoms.isNothing());
6093 
6094   AtomsTable* atomsTable = rt->atomsForSweeping();
6095   if (!atomsTable) {
6096     return;
6097   }
6098 
6099   // Create secondary tables to hold new atoms added while we're sweeping the
6100   // main tables incrementally.
6101   if (!atomsTable->startIncrementalSweep()) {
6102     SweepingTracer trc(rt);
6103     atomsTable->traceWeak(&trc);
6104     return;
6105   }
6106 
6107   // Initialize remaining atoms to sweep.
6108   maybeAtoms.emplace(*atomsTable);
6109 }
6110 
sweepAtomsTable(JSFreeOp * fop,SliceBudget & budget)6111 IncrementalProgress GCRuntime::sweepAtomsTable(JSFreeOp* fop,
6112                                                SliceBudget& budget) {
6113   if (!atomsZone->isGCSweeping()) {
6114     return Finished;
6115   }
6116 
6117   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_ATOMS_TABLE);
6118 
6119   auto& maybeAtoms = maybeAtomsToSweep.ref();
6120   if (!maybeAtoms) {
6121     return Finished;
6122   }
6123 
6124   if (!rt->atomsForSweeping()->sweepIncrementally(maybeAtoms.ref(), budget)) {
6125     return NotFinished;
6126   }
6127 
6128   maybeAtoms.reset();
6129 
6130   return Finished;
6131 }
6132 
IncrementalSweepWeakCache(GCRuntime * gc,const WeakCacheToSweep & item)6133 static size_t IncrementalSweepWeakCache(GCRuntime* gc,
6134                                         const WeakCacheToSweep& item) {
6135   AutoSetThreadIsSweeping threadIsSweeping(item.zone);
6136 
6137   JS::detail::WeakCacheBase* cache = item.cache;
6138   MOZ_ASSERT(cache->needsIncrementalBarrier());
6139   size_t steps = cache->sweep(&gc->storeBuffer());
6140   cache->setNeedsIncrementalBarrier(false);
6141 
6142   return steps;
6143 }
6144 
WeakCacheSweepIterator(JS::Zone * sweepGroup)6145 WeakCacheSweepIterator::WeakCacheSweepIterator(JS::Zone* sweepGroup)
6146     : sweepZone(sweepGroup), sweepCache(sweepZone->weakCaches().getFirst()) {
6147   settle();
6148 }
6149 
done() const6150 bool WeakCacheSweepIterator::done() const { return !sweepZone; }
6151 
get() const6152 WeakCacheToSweep WeakCacheSweepIterator::get() const {
6153   MOZ_ASSERT(!done());
6154 
6155   return {sweepCache, sweepZone};
6156 }
6157 
next()6158 void WeakCacheSweepIterator::next() {
6159   MOZ_ASSERT(!done());
6160 
6161   sweepCache = sweepCache->getNext();
6162   settle();
6163 }
6164 
settle()6165 void WeakCacheSweepIterator::settle() {
6166   while (sweepZone) {
6167     while (sweepCache && !sweepCache->needsIncrementalBarrier()) {
6168       sweepCache = sweepCache->getNext();
6169     }
6170 
6171     if (sweepCache) {
6172       break;
6173     }
6174 
6175     sweepZone = sweepZone->nextNodeInGroup();
6176     if (sweepZone) {
6177       sweepCache = sweepZone->weakCaches().getFirst();
6178     }
6179   }
6180 
6181   MOZ_ASSERT((!sweepZone && !sweepCache) ||
6182              (sweepCache && sweepCache->needsIncrementalBarrier()));
6183 }
6184 
sweepWeakCaches(JSFreeOp * fop,SliceBudget & budget)6185 IncrementalProgress GCRuntime::sweepWeakCaches(JSFreeOp* fop,
6186                                                SliceBudget& budget) {
6187   if (weakCachesToSweep.ref().isNothing()) {
6188     return Finished;
6189   }
6190 
6191   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_COMPARTMENTS);
6192 
6193   WeakCacheSweepIterator& work = weakCachesToSweep.ref().ref();
6194 
6195   AutoLockHelperThreadState lock;
6196 
6197   {
6198     AutoRunParallelWork runWork(this, IncrementalSweepWeakCache,
6199                                 gcstats::PhaseKind::SWEEP_WEAK_CACHES, work,
6200                                 budget, lock);
6201     AutoUnlockHelperThreadState unlock(lock);
6202   }
6203 
6204   if (work.done()) {
6205     weakCachesToSweep.ref().reset();
6206     return Finished;
6207   }
6208 
6209   return NotFinished;
6210 }
6211 
finalizeAllocKind(JSFreeOp * fop,SliceBudget & budget)6212 IncrementalProgress GCRuntime::finalizeAllocKind(JSFreeOp* fop,
6213                                                  SliceBudget& budget) {
6214   MOZ_ASSERT(sweepZone->isGCSweeping());
6215 
6216   // Set the number of things per arena for this AllocKind.
6217   size_t thingsPerArena = Arena::thingsPerArena(sweepAllocKind);
6218   auto& sweepList = incrementalSweepList.ref();
6219   sweepList.setThingsPerArena(thingsPerArena);
6220 
6221   AutoSetThreadIsSweeping threadIsSweeping(sweepZone);
6222 
6223   if (!sweepZone->arenas.foregroundFinalize(fop, sweepAllocKind, budget,
6224                                             sweepList)) {
6225     return NotFinished;
6226   }
6227 
6228   // Reset the slots of the sweep list that we used.
6229   sweepList.reset(thingsPerArena);
6230 
6231   return Finished;
6232 }
6233 
sweepPropMapTree(JSFreeOp * fop,SliceBudget & budget)6234 IncrementalProgress GCRuntime::sweepPropMapTree(JSFreeOp* fop,
6235                                                 SliceBudget& budget) {
6236   // Remove dead SharedPropMaps from the tree, but don't finalize them yet.
6237 
6238   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_PROP_MAP);
6239 
6240   ArenaLists& al = sweepZone->arenas;
6241 
6242   if (!SweepArenaList<CompactPropMap>(
6243           fop, &al.gcCompactPropMapArenasToUpdate.ref(), budget)) {
6244     return NotFinished;
6245   }
6246   if (!SweepArenaList<NormalPropMap>(
6247           fop, &al.gcNormalPropMapArenasToUpdate.ref(), budget)) {
6248     return NotFinished;
6249   }
6250 
6251   return Finished;
6252 }
6253 
6254 // An iterator for a standard container that provides an STL-like begin()/end()
6255 // interface. This iterator provides a done()/get()/next() style interface.
6256 template <typename Container>
6257 class ContainerIter {
6258   using Iter = decltype(std::declval<const Container>().begin());
6259   using Elem = decltype(*std::declval<Iter>());
6260 
6261   Iter iter;
6262   const Iter end;
6263 
6264  public:
ContainerIter(const Container & container)6265   explicit ContainerIter(const Container& container)
6266       : iter(container.begin()), end(container.end()) {}
6267 
done() const6268   bool done() const { return iter == end; }
6269 
get() const6270   Elem get() const { return *iter; }
6271 
next()6272   void next() {
6273     MOZ_ASSERT(!done());
6274     ++iter;
6275   }
6276 };
6277 
6278 // IncrementalIter is a template class that makes a normal iterator into one
6279 // that can be used to perform incremental work by using external state that
6280 // persists between instantiations. The state is only initialised on the first
6281 // use and subsequent uses carry on from the previous state.
6282 template <typename Iter>
6283 struct IncrementalIter {
6284   using State = Maybe<Iter>;
6285   using Elem = decltype(std::declval<Iter>().get());
6286 
6287  private:
6288   State& maybeIter;
6289 
6290  public:
6291   template <typename... Args>
IncrementalIterIncrementalIter6292   explicit IncrementalIter(State& maybeIter, Args&&... args)
6293       : maybeIter(maybeIter) {
6294     if (maybeIter.isNothing()) {
6295       maybeIter.emplace(std::forward<Args>(args)...);
6296     }
6297   }
6298 
~IncrementalIterIncrementalIter6299   ~IncrementalIter() {
6300     if (done()) {
6301       maybeIter.reset();
6302     }
6303   }
6304 
doneIncrementalIter6305   bool done() const { return maybeIter.ref().done(); }
6306 
getIncrementalIter6307   Elem get() const { return maybeIter.ref().get(); }
6308 
nextIncrementalIter6309   void next() { maybeIter.ref().next(); }
6310 };
6311 
6312 // Iterate through the sweep groups created by
6313 // GCRuntime::groupZonesForSweeping().
6314 class js::gc::SweepGroupsIter {
6315   GCRuntime* gc;
6316 
6317  public:
SweepGroupsIter(JSRuntime * rt)6318   explicit SweepGroupsIter(JSRuntime* rt) : gc(&rt->gc) {
6319     MOZ_ASSERT(gc->currentSweepGroup);
6320   }
6321 
done() const6322   bool done() const { return !gc->currentSweepGroup; }
6323 
get() const6324   Zone* get() const { return gc->currentSweepGroup; }
6325 
next()6326   void next() {
6327     MOZ_ASSERT(!done());
6328     gc->getNextSweepGroup();
6329   }
6330 };
6331 
6332 namespace sweepaction {
6333 
6334 // Implementation of the SweepAction interface that calls a method on GCRuntime.
6335 class SweepActionCall final : public SweepAction {
6336   using Method = IncrementalProgress (GCRuntime::*)(JSFreeOp* fop,
6337                                                     SliceBudget& budget);
6338 
6339   Method method;
6340 
6341  public:
SweepActionCall(Method m)6342   explicit SweepActionCall(Method m) : method(m) {}
run(Args & args)6343   IncrementalProgress run(Args& args) override {
6344     return (args.gc->*method)(args.fop, args.budget);
6345   }
assertFinished() const6346   void assertFinished() const override {}
6347 };
6348 
6349 // Implementation of the SweepAction interface that yields in a specified zeal
6350 // mode.
6351 class SweepActionMaybeYield final : public SweepAction {
6352 #ifdef JS_GC_ZEAL
6353   ZealMode mode;
6354   bool isYielding;
6355 #endif
6356 
6357  public:
SweepActionMaybeYield(ZealMode mode)6358   explicit SweepActionMaybeYield(ZealMode mode)
6359 #ifdef JS_GC_ZEAL
6360       : mode(mode),
6361         isYielding(false)
6362 #endif
6363   {
6364   }
6365 
run(Args & args)6366   IncrementalProgress run(Args& args) override {
6367 #ifdef JS_GC_ZEAL
6368     if (!isYielding && args.gc->shouldYieldForZeal(mode)) {
6369       isYielding = true;
6370       return NotFinished;
6371     }
6372 
6373     isYielding = false;
6374 #endif
6375     return Finished;
6376   }
6377 
assertFinished() const6378   void assertFinished() const override { MOZ_ASSERT(!isYielding); }
6379 
6380   // These actions should be skipped if GC zeal is not configured.
6381 #ifndef JS_GC_ZEAL
shouldSkip()6382   bool shouldSkip() override { return true; }
6383 #endif
6384 };
6385 
6386 // Implementation of the SweepAction interface that calls a list of actions in
6387 // sequence.
6388 class SweepActionSequence final : public SweepAction {
6389   using ActionVector = Vector<UniquePtr<SweepAction>, 0, SystemAllocPolicy>;
6390   using Iter = IncrementalIter<ContainerIter<ActionVector>>;
6391 
6392   ActionVector actions;
6393   typename Iter::State iterState;
6394 
6395  public:
init(UniquePtr<SweepAction> * acts,size_t count)6396   bool init(UniquePtr<SweepAction>* acts, size_t count) {
6397     for (size_t i = 0; i < count; i++) {
6398       auto& action = acts[i];
6399       if (!action) {
6400         return false;
6401       }
6402       if (action->shouldSkip()) {
6403         continue;
6404       }
6405       if (!actions.emplaceBack(std::move(action))) {
6406         return false;
6407       }
6408     }
6409     return true;
6410   }
6411 
run(Args & args)6412   IncrementalProgress run(Args& args) override {
6413     for (Iter iter(iterState, actions); !iter.done(); iter.next()) {
6414       if (iter.get()->run(args) == NotFinished) {
6415         return NotFinished;
6416       }
6417     }
6418     return Finished;
6419   }
6420 
assertFinished() const6421   void assertFinished() const override {
6422     MOZ_ASSERT(iterState.isNothing());
6423     for (const auto& action : actions) {
6424       action->assertFinished();
6425     }
6426   }
6427 };
6428 
6429 template <typename Iter, typename Init>
6430 class SweepActionForEach final : public SweepAction {
6431   using Elem = decltype(std::declval<Iter>().get());
6432   using IncrIter = IncrementalIter<Iter>;
6433 
6434   Init iterInit;
6435   Elem* elemOut;
6436   UniquePtr<SweepAction> action;
6437   typename IncrIter::State iterState;
6438 
6439  public:
SweepActionForEach(const Init & init,Elem * maybeElemOut,UniquePtr<SweepAction> action)6440   SweepActionForEach(const Init& init, Elem* maybeElemOut,
6441                      UniquePtr<SweepAction> action)
6442       : iterInit(init), elemOut(maybeElemOut), action(std::move(action)) {}
6443 
run(Args & args)6444   IncrementalProgress run(Args& args) override {
6445     MOZ_ASSERT_IF(elemOut, *elemOut == Elem());
6446     auto clearElem = mozilla::MakeScopeExit([&] { setElem(Elem()); });
6447     for (IncrIter iter(iterState, iterInit); !iter.done(); iter.next()) {
6448       setElem(iter.get());
6449       if (action->run(args) == NotFinished) {
6450         return NotFinished;
6451       }
6452     }
6453     return Finished;
6454   }
6455 
assertFinished() const6456   void assertFinished() const override {
6457     MOZ_ASSERT(iterState.isNothing());
6458     MOZ_ASSERT_IF(elemOut, *elemOut == Elem());
6459     action->assertFinished();
6460   }
6461 
6462  private:
setElem(const Elem & value)6463   void setElem(const Elem& value) {
6464     if (elemOut) {
6465       *elemOut = value;
6466     }
6467   }
6468 };
6469 
Call(IncrementalProgress (GCRuntime::* method)(JSFreeOp * fop,SliceBudget & budget))6470 static UniquePtr<SweepAction> Call(IncrementalProgress (GCRuntime::*method)(
6471     JSFreeOp* fop, SliceBudget& budget)) {
6472   return MakeUnique<SweepActionCall>(method);
6473 }
6474 
MaybeYield(ZealMode zealMode)6475 static UniquePtr<SweepAction> MaybeYield(ZealMode zealMode) {
6476   return MakeUnique<SweepActionMaybeYield>(zealMode);
6477 }
6478 
6479 template <typename... Rest>
Sequence(UniquePtr<SweepAction> first,Rest...rest)6480 static UniquePtr<SweepAction> Sequence(UniquePtr<SweepAction> first,
6481                                        Rest... rest) {
6482   UniquePtr<SweepAction> actions[] = {std::move(first), std::move(rest)...};
6483   auto seq = MakeUnique<SweepActionSequence>();
6484   if (!seq || !seq->init(actions, std::size(actions))) {
6485     return nullptr;
6486   }
6487 
6488   return UniquePtr<SweepAction>(std::move(seq));
6489 }
6490 
RepeatForSweepGroup(JSRuntime * rt,UniquePtr<SweepAction> action)6491 static UniquePtr<SweepAction> RepeatForSweepGroup(
6492     JSRuntime* rt, UniquePtr<SweepAction> action) {
6493   if (!action) {
6494     return nullptr;
6495   }
6496 
6497   using Action = SweepActionForEach<SweepGroupsIter, JSRuntime*>;
6498   return js::MakeUnique<Action>(rt, nullptr, std::move(action));
6499 }
6500 
ForEachZoneInSweepGroup(JSRuntime * rt,Zone ** zoneOut,UniquePtr<SweepAction> action)6501 static UniquePtr<SweepAction> ForEachZoneInSweepGroup(
6502     JSRuntime* rt, Zone** zoneOut, UniquePtr<SweepAction> action) {
6503   if (!action) {
6504     return nullptr;
6505   }
6506 
6507   using Action = SweepActionForEach<SweepGroupZonesIter, JSRuntime*>;
6508   return js::MakeUnique<Action>(rt, zoneOut, std::move(action));
6509 }
6510 
ForEachAllocKind(AllocKinds kinds,AllocKind * kindOut,UniquePtr<SweepAction> action)6511 static UniquePtr<SweepAction> ForEachAllocKind(AllocKinds kinds,
6512                                                AllocKind* kindOut,
6513                                                UniquePtr<SweepAction> action) {
6514   if (!action) {
6515     return nullptr;
6516   }
6517 
6518   using Action = SweepActionForEach<ContainerIter<AllocKinds>, AllocKinds>;
6519   return js::MakeUnique<Action>(kinds, kindOut, std::move(action));
6520 }
6521 
6522 }  // namespace sweepaction
6523 
initSweepActions()6524 bool GCRuntime::initSweepActions() {
6525   using namespace sweepaction;
6526   using sweepaction::Call;
6527 
6528   sweepActions.ref() = RepeatForSweepGroup(
6529       rt,
6530       Sequence(
6531           Call(&GCRuntime::markGrayReferencesInCurrentGroup),
6532           Call(&GCRuntime::endMarkingSweepGroup),
6533           Call(&GCRuntime::beginSweepingSweepGroup),
6534           MaybeYield(ZealMode::IncrementalMultipleSlices),
6535           MaybeYield(ZealMode::YieldBeforeSweepingAtoms),
6536           Call(&GCRuntime::sweepAtomsTable),
6537           MaybeYield(ZealMode::YieldBeforeSweepingCaches),
6538           Call(&GCRuntime::sweepWeakCaches),
6539           ForEachZoneInSweepGroup(
6540               rt, &sweepZone.ref(),
6541               Sequence(MaybeYield(ZealMode::YieldBeforeSweepingObjects),
6542                        ForEachAllocKind(ForegroundObjectFinalizePhase.kinds,
6543                                         &sweepAllocKind.ref(),
6544                                         Call(&GCRuntime::finalizeAllocKind)),
6545                        MaybeYield(ZealMode::YieldBeforeSweepingNonObjects),
6546                        ForEachAllocKind(ForegroundNonObjectFinalizePhase.kinds,
6547                                         &sweepAllocKind.ref(),
6548                                         Call(&GCRuntime::finalizeAllocKind)),
6549                        MaybeYield(ZealMode::YieldBeforeSweepingPropMapTrees),
6550                        Call(&GCRuntime::sweepPropMapTree))),
6551           Call(&GCRuntime::endSweepingSweepGroup)));
6552 
6553   return sweepActions != nullptr;
6554 }
6555 
performSweepActions(SliceBudget & budget)6556 IncrementalProgress GCRuntime::performSweepActions(SliceBudget& budget) {
6557   AutoMajorGCProfilerEntry s(this);
6558   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP);
6559   JSFreeOp fop(rt);
6560 
6561   // Don't trigger pre-barriers when finalizing.
6562   AutoDisableBarriers disableBarriers(this);
6563 
6564   // Drain the mark stack, possibly in a parallel task if we're in a part of
6565   // sweeping that allows it.
6566   //
6567   // In the first sweep slice where we must not yield to the mutator until we've
6568   // starting sweeping a sweep group but in that case the stack must be empty
6569   // already.
6570 
6571   MOZ_ASSERT(initialState <= State::Sweep);
6572   MOZ_ASSERT_IF(initialState != State::Sweep, marker.isDrained());
6573   if (initialState == State::Sweep &&
6574       markDuringSweeping(&fop, budget) == NotFinished) {
6575     return NotFinished;
6576   }
6577 
6578   // Then continue running sweep actions.
6579 
6580   SweepAction::Args args{this, &fop, budget};
6581   IncrementalProgress sweepProgress = sweepActions->run(args);
6582   IncrementalProgress markProgress = joinBackgroundMarkTask();
6583 
6584   if (sweepProgress == Finished && markProgress == Finished) {
6585     return Finished;
6586   }
6587 
6588   MOZ_ASSERT(isIncremental);
6589   return NotFinished;
6590 }
6591 
allCCVisibleZonesWereCollected()6592 bool GCRuntime::allCCVisibleZonesWereCollected() {
6593   // Calculate whether the gray marking state is now valid.
6594   //
6595   // The gray bits change from invalid to valid if we finished a full GC from
6596   // the point of view of the cycle collector. We ignore the following:
6597   //
6598   //  - Helper thread zones, as these are not reachable from the main heap.
6599   //  - The atoms zone, since strings and symbols are never marked gray.
6600   //  - Empty zones.
6601   //
6602   // These exceptions ensure that when the CC requests a full GC the gray mark
6603   // state ends up valid even it we don't collect all of the zones.
6604 
6605   for (ZonesIter zone(this, SkipAtoms); !zone.done(); zone.next()) {
6606     if (!zone->isCollecting() && !zone->usedByHelperThread() &&
6607         !zone->arenas.arenaListsAreEmpty()) {
6608       return false;
6609     }
6610   }
6611 
6612   return true;
6613 }
6614 
endSweepPhase(bool destroyingRuntime)6615 void GCRuntime::endSweepPhase(bool destroyingRuntime) {
6616   MOZ_ASSERT(!markOnBackgroundThreadDuringSweeping);
6617 
6618   sweepActions->assertFinished();
6619 
6620   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP);
6621   JSFreeOp fop(rt);
6622 
6623   MOZ_ASSERT_IF(destroyingRuntime, !sweepOnBackgroundThread);
6624 
6625   {
6626     gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::DESTROY);
6627 
6628     /*
6629      * Sweep script filenames after sweeping functions in the generic loop
6630      * above. In this way when a scripted function's finalizer destroys the
6631      * script and calls rt->destroyScriptHook, the hook can still access the
6632      * script's filename. See bug 323267.
6633      */
6634     SweepScriptData(rt);
6635   }
6636 
6637   {
6638     gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::FINALIZE_END);
6639     AutoLockStoreBuffer lock(&storeBuffer());
6640     callFinalizeCallbacks(&fop, JSFINALIZE_COLLECTION_END);
6641 
6642     if (allCCVisibleZonesWereCollected()) {
6643       grayBitsValid = true;
6644     }
6645   }
6646 
6647 #ifdef JS_GC_ZEAL
6648   finishMarkingValidation();
6649 #endif
6650 
6651 #ifdef DEBUG
6652   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
6653     for (auto i : AllAllocKinds()) {
6654       MOZ_ASSERT_IF(!IsBackgroundFinalized(i) || !sweepOnBackgroundThread,
6655                     !zone->arenas.arenasToSweep(i));
6656     }
6657   }
6658 #endif
6659 
6660   AssertNoWrappersInGrayList(rt);
6661 }
6662 
beginCompactPhase()6663 void GCRuntime::beginCompactPhase() {
6664   MOZ_ASSERT(!isBackgroundSweeping());
6665   assertBackgroundSweepingFinished();
6666 
6667   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::COMPACT);
6668 
6669   MOZ_ASSERT(zonesToMaybeCompact.ref().isEmpty());
6670   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
6671     if (canRelocateZone(zone)) {
6672       zonesToMaybeCompact.ref().append(zone);
6673     }
6674   }
6675 
6676   startedCompacting = true;
6677   zonesCompacted = 0;
6678 
6679 #ifdef DEBUG
6680   AutoLockGC lock(this);
6681   MOZ_ASSERT(!relocatedArenasToRelease);
6682 #endif
6683 }
6684 
compactPhase(JS::GCReason reason,SliceBudget & sliceBudget,AutoGCSession & session)6685 IncrementalProgress GCRuntime::compactPhase(JS::GCReason reason,
6686                                             SliceBudget& sliceBudget,
6687                                             AutoGCSession& session) {
6688   assertBackgroundSweepingFinished();
6689   MOZ_ASSERT(startedCompacting);
6690 
6691   AutoMajorGCProfilerEntry s(this);
6692   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::COMPACT);
6693 
6694   // TODO: JSScripts can move. If the sampler interrupts the GC in the
6695   // middle of relocating an arena, invalid JSScript pointers may be
6696   // accessed. Suppress all sampling until a finer-grained solution can be
6697   // found. See bug 1295775.
6698   AutoSuppressProfilerSampling suppressSampling(rt->mainContextFromOwnThread());
6699 
6700   ZoneList relocatedZones;
6701   Arena* relocatedArenas = nullptr;
6702   while (!zonesToMaybeCompact.ref().isEmpty()) {
6703     Zone* zone = zonesToMaybeCompact.ref().front();
6704     zonesToMaybeCompact.ref().removeFront();
6705 
6706     MOZ_ASSERT(nursery().isEmpty());
6707     zone->changeGCState(Zone::Finished, Zone::Compact);
6708 
6709     if (relocateArenas(zone, reason, relocatedArenas, sliceBudget)) {
6710       updateZonePointersToRelocatedCells(zone);
6711       relocatedZones.append(zone);
6712       zonesCompacted++;
6713     } else {
6714       zone->changeGCState(Zone::Compact, Zone::Finished);
6715     }
6716 
6717     if (sliceBudget.isOverBudget()) {
6718       break;
6719     }
6720   }
6721 
6722   if (!relocatedZones.isEmpty()) {
6723     updateRuntimePointersToRelocatedCells(session);
6724 
6725     do {
6726       Zone* zone = relocatedZones.front();
6727       relocatedZones.removeFront();
6728       zone->changeGCState(Zone::Compact, Zone::Finished);
6729     } while (!relocatedZones.isEmpty());
6730   }
6731 
6732   clearRelocatedArenas(relocatedArenas, reason);
6733 
6734 #ifdef DEBUG
6735   protectOrReleaseRelocatedArenas(relocatedArenas, reason);
6736 #else
6737   releaseRelocatedArenas(relocatedArenas);
6738 #endif
6739 
6740   // Clear caches that can contain cell pointers.
6741   rt->caches().purgeForCompaction();
6742 
6743 #ifdef DEBUG
6744   checkHashTablesAfterMovingGC();
6745 #endif
6746 
6747   return zonesToMaybeCompact.ref().isEmpty() ? Finished : NotFinished;
6748 }
6749 
endCompactPhase()6750 void GCRuntime::endCompactPhase() { startedCompacting = false; }
6751 
finishCollection()6752 void GCRuntime::finishCollection() {
6753   assertBackgroundSweepingFinished();
6754 
6755   MOZ_ASSERT(marker.isDrained());
6756   marker.stop();
6757 
6758   clearBufferedGrayRoots();
6759 
6760   maybeStopPretenuring();
6761 
6762   {
6763     AutoLockGC lock(this);
6764     updateGCThresholdsAfterCollection(lock);
6765   }
6766 
6767   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
6768     zone->changeGCState(Zone::Finished, Zone::NoGC);
6769     zone->notifyObservingDebuggers();
6770   }
6771 
6772 #ifdef JS_GC_ZEAL
6773   clearSelectedForMarking();
6774 #endif
6775 
6776   auto currentTime = ReallyNow();
6777   schedulingState.updateHighFrequencyMode(lastGCEndTime_, currentTime,
6778                                           tunables);
6779   lastGCEndTime_ = currentTime;
6780 
6781   checkGCStateNotInUse();
6782 }
6783 
checkGCStateNotInUse()6784 void GCRuntime::checkGCStateNotInUse() {
6785 #ifdef DEBUG
6786   MOZ_ASSERT(!marker.isActive());
6787 
6788   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
6789     if (zone->wasCollected()) {
6790       zone->arenas.checkGCStateNotInUse();
6791     }
6792     MOZ_ASSERT(!zone->wasGCStarted());
6793     MOZ_ASSERT(!zone->needsIncrementalBarrier());
6794     MOZ_ASSERT(!zone->isOnList());
6795   }
6796 
6797   MOZ_ASSERT(zonesToMaybeCompact.ref().isEmpty());
6798   MOZ_ASSERT(cellsToAssertNotGray.ref().empty());
6799 
6800   AutoLockHelperThreadState lock;
6801   MOZ_ASSERT(!requestSliceAfterBackgroundTask);
6802 #endif
6803 }
6804 
maybeStopPretenuring()6805 void GCRuntime::maybeStopPretenuring() {
6806   nursery().maybeStopPretenuring(this);
6807 
6808   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
6809     if (zone->allocNurseryStrings) {
6810       continue;
6811     }
6812 
6813     // Count the number of strings before the major GC.
6814     size_t numStrings = zone->markedStrings + zone->finalizedStrings;
6815     double rate = double(zone->finalizedStrings) / double(numStrings);
6816     if (rate > tunables.stopPretenureStringThreshold()) {
6817       CancelOffThreadIonCompile(zone);
6818       bool preserving = zone->isPreservingCode();
6819       zone->setPreservingCode(false);
6820       zone->discardJitCode(rt->defaultFreeOp());
6821       zone->setPreservingCode(preserving);
6822       for (RealmsInZoneIter r(zone); !r.done(); r.next()) {
6823         if (jit::JitRealm* jitRealm = r->jitRealm()) {
6824           jitRealm->discardStubs();
6825           jitRealm->setStringsCanBeInNursery(true);
6826         }
6827       }
6828 
6829       zone->markedStrings = 0;
6830       zone->finalizedStrings = 0;
6831       zone->allocNurseryStrings = true;
6832     }
6833   }
6834 }
6835 
updateGCThresholdsAfterCollection(const AutoLockGC & lock)6836 void GCRuntime::updateGCThresholdsAfterCollection(const AutoLockGC& lock) {
6837   for (GCZonesIter zone(this); !zone.done(); zone.next()) {
6838     zone->clearGCSliceThresholds();
6839     zone->updateGCStartThresholds(*this, gcOptions, lock);
6840   }
6841 }
6842 
updateAllGCStartThresholds(const AutoLockGC & lock)6843 void GCRuntime::updateAllGCStartThresholds(const AutoLockGC& lock) {
6844   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
6845     zone->updateGCStartThresholds(*this, JS::GCOptions::Normal, lock);
6846   }
6847 }
6848 
GCHeapStateToLabel(JS::HeapState heapState)6849 static const char* GCHeapStateToLabel(JS::HeapState heapState) {
6850   switch (heapState) {
6851     case JS::HeapState::MinorCollecting:
6852       return "js::Nursery::collect";
6853     case JS::HeapState::MajorCollecting:
6854       return "js::GCRuntime::collect";
6855     default:
6856       MOZ_CRASH("Unexpected heap state when pushing GC profiling stack frame");
6857   }
6858   MOZ_ASSERT_UNREACHABLE("Should have exhausted every JS::HeapState variant!");
6859   return nullptr;
6860 }
6861 
GCHeapStateToProfilingCategory(JS::HeapState heapState)6862 static JS::ProfilingCategoryPair GCHeapStateToProfilingCategory(
6863     JS::HeapState heapState) {
6864   return heapState == JS::HeapState::MinorCollecting
6865              ? JS::ProfilingCategoryPair::GCCC_MinorGC
6866              : JS::ProfilingCategoryPair::GCCC_MajorGC;
6867 }
6868 
6869 /* Start a new heap session. */
AutoHeapSession(GCRuntime * gc,JS::HeapState heapState)6870 AutoHeapSession::AutoHeapSession(GCRuntime* gc, JS::HeapState heapState)
6871     : gc(gc), prevState(gc->heapState_) {
6872   MOZ_ASSERT(CurrentThreadCanAccessRuntime(gc->rt));
6873   MOZ_ASSERT(prevState == JS::HeapState::Idle ||
6874              (prevState == JS::HeapState::MajorCollecting &&
6875               heapState == JS::HeapState::MinorCollecting));
6876   MOZ_ASSERT(heapState != JS::HeapState::Idle);
6877 
6878   gc->heapState_ = heapState;
6879 
6880   if (heapState == JS::HeapState::MinorCollecting ||
6881       heapState == JS::HeapState::MajorCollecting) {
6882     profilingStackFrame.emplace(gc->rt->mainContextFromOwnThread(),
6883                                 GCHeapStateToLabel(heapState),
6884                                 GCHeapStateToProfilingCategory(heapState));
6885   }
6886 }
6887 
~AutoHeapSession()6888 AutoHeapSession::~AutoHeapSession() {
6889   MOZ_ASSERT(JS::RuntimeHeapIsBusy());
6890   gc->heapState_ = prevState;
6891 }
6892 
MajorGCStateToLabel(State state)6893 static const char* MajorGCStateToLabel(State state) {
6894   switch (state) {
6895     case State::Mark:
6896       return "js::GCRuntime::markUntilBudgetExhausted";
6897     case State::Sweep:
6898       return "js::GCRuntime::performSweepActions";
6899     case State::Compact:
6900       return "js::GCRuntime::compactPhase";
6901     default:
6902       MOZ_CRASH("Unexpected heap state when pushing GC profiling stack frame");
6903   }
6904 
6905   MOZ_ASSERT_UNREACHABLE("Should have exhausted every State variant!");
6906   return nullptr;
6907 }
6908 
MajorGCStateToProfilingCategory(State state)6909 static JS::ProfilingCategoryPair MajorGCStateToProfilingCategory(State state) {
6910   switch (state) {
6911     case State::Mark:
6912       return JS::ProfilingCategoryPair::GCCC_MajorGC_Mark;
6913     case State::Sweep:
6914       return JS::ProfilingCategoryPair::GCCC_MajorGC_Sweep;
6915     case State::Compact:
6916       return JS::ProfilingCategoryPair::GCCC_MajorGC_Compact;
6917     default:
6918       MOZ_CRASH("Unexpected heap state when pushing GC profiling stack frame");
6919   }
6920 }
6921 
AutoMajorGCProfilerEntry(GCRuntime * gc)6922 AutoMajorGCProfilerEntry::AutoMajorGCProfilerEntry(GCRuntime* gc)
6923     : AutoGeckoProfilerEntry(gc->rt->mainContextFromAnyThread(),
6924                              MajorGCStateToLabel(gc->state()),
6925                              MajorGCStateToProfilingCategory(gc->state())) {
6926   MOZ_ASSERT(gc->heapState() == JS::HeapState::MajorCollecting);
6927 }
6928 
RuntimeHeapState()6929 JS_PUBLIC_API JS::HeapState JS::RuntimeHeapState() {
6930   return TlsContext.get()->runtime()->gc.heapState();
6931 }
6932 
resetIncrementalGC(GCAbortReason reason)6933 GCRuntime::IncrementalResult GCRuntime::resetIncrementalGC(
6934     GCAbortReason reason) {
6935   // Drop as much work as possible from an ongoing incremental GC so
6936   // we can start a new GC after it has finished.
6937   if (incrementalState == State::NotActive) {
6938     return IncrementalResult::Ok;
6939   }
6940 
6941   AutoGCSession session(this, JS::HeapState::MajorCollecting);
6942 
6943   switch (incrementalState) {
6944     case State::NotActive:
6945     case State::MarkRoots:
6946     case State::Finish:
6947       MOZ_CRASH("Unexpected GC state in resetIncrementalGC");
6948       break;
6949 
6950     case State::Prepare:
6951       unmarkTask.cancelAndWait();
6952       setParallelUnmarkEnabled(false);
6953 
6954       for (GCZonesIter zone(this); !zone.done(); zone.next()) {
6955         zone->changeGCState(Zone::Prepare, Zone::NoGC);
6956         zone->clearGCSliceThresholds();
6957       }
6958 
6959       incrementalState = State::NotActive;
6960       checkGCStateNotInUse();
6961       break;
6962 
6963     case State::Mark: {
6964       // Cancel any ongoing marking.
6965       marker.reset();
6966       clearBufferedGrayRoots();
6967 
6968       for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
6969         ResetGrayList(c);
6970       }
6971 
6972       for (GCZonesIter zone(this); !zone.done(); zone.next()) {
6973         zone->changeGCState(Zone::MarkBlackOnly, Zone::NoGC);
6974         zone->clearGCSliceThresholds();
6975         zone->arenas.unmarkPreMarkedFreeCells();
6976         zone->arenas.mergeNewArenasInMarkPhase();
6977       }
6978 
6979       {
6980         AutoLockHelperThreadState lock;
6981         lifoBlocksToFree.ref().freeAll();
6982       }
6983 
6984       lastMarkSlice = false;
6985       incrementalState = State::Finish;
6986 
6987       MOZ_ASSERT(!marker.shouldCheckCompartments());
6988 
6989       break;
6990     }
6991 
6992     case State::Sweep: {
6993       // Finish sweeping the current sweep group, then abort.
6994       for (CompartmentsIter c(rt); !c.done(); c.next()) {
6995         c->gcState.scheduledForDestruction = false;
6996       }
6997 
6998       abortSweepAfterCurrentGroup = true;
6999       isCompacting = false;
7000 
7001       break;
7002     }
7003 
7004     case State::Finalize: {
7005       isCompacting = false;
7006       break;
7007     }
7008 
7009     case State::Compact: {
7010       // Skip any remaining zones that would have been compacted.
7011       MOZ_ASSERT(isCompacting);
7012       startedCompacting = true;
7013       zonesToMaybeCompact.ref().clear();
7014       break;
7015     }
7016 
7017     case State::Decommit: {
7018       break;
7019     }
7020   }
7021 
7022   stats().reset(reason);
7023 
7024   return IncrementalResult::ResetIncremental;
7025 }
7026 
AutoDisableBarriers(GCRuntime * gc)7027 AutoDisableBarriers::AutoDisableBarriers(GCRuntime* gc) : gc(gc) {
7028   /*
7029    * Clear needsIncrementalBarrier early so we don't do any write barriers
7030    * during sweeping.
7031    */
7032   for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
7033     if (zone->isGCMarking()) {
7034       MOZ_ASSERT(zone->needsIncrementalBarrier());
7035       zone->setNeedsIncrementalBarrier(false);
7036     }
7037     MOZ_ASSERT(!zone->needsIncrementalBarrier());
7038   }
7039 }
7040 
~AutoDisableBarriers()7041 AutoDisableBarriers::~AutoDisableBarriers() {
7042   for (GCZonesIter zone(gc); !zone.done(); zone.next()) {
7043     MOZ_ASSERT(!zone->needsIncrementalBarrier());
7044     if (zone->isGCMarking()) {
7045       zone->setNeedsIncrementalBarrier(true);
7046     }
7047   }
7048 }
7049 
ShouldCleanUpEverything(JS::GCReason reason,JS::GCOptions options)7050 static bool ShouldCleanUpEverything(JS::GCReason reason,
7051                                     JS::GCOptions options) {
7052   // During shutdown, we must clean everything up, for the sake of leak
7053   // detection. When a runtime has no contexts, or we're doing a GC before a
7054   // shutdown CC, those are strong indications that we're shutting down.
7055   return IsShutdownReason(reason) || options == JS::GCOptions::Shrink;
7056 }
7057 
ShouldSweepOnBackgroundThread(JS::GCReason reason)7058 static bool ShouldSweepOnBackgroundThread(JS::GCReason reason) {
7059   return reason != JS::GCReason::DESTROY_RUNTIME && CanUseExtraThreads();
7060 }
7061 
NeedToCollectNursery(GCRuntime * gc)7062 static bool NeedToCollectNursery(GCRuntime* gc) {
7063   return !gc->nursery().isEmpty() || !gc->storeBuffer().isEmpty();
7064 }
7065 
7066 #ifdef DEBUG
DescribeBudget(const SliceBudget & budget)7067 static const char* DescribeBudget(const SliceBudget& budget) {
7068   MOZ_ASSERT(TlsContext.get()->isMainThreadContext());
7069   constexpr size_t length = 32;
7070   static char buffer[length];
7071   budget.describe(buffer, length);
7072   return buffer;
7073 }
7074 #endif
7075 
incrementalSlice(SliceBudget & budget,const MaybeGCOptions & options,JS::GCReason reason)7076 void GCRuntime::incrementalSlice(SliceBudget& budget,
7077                                  const MaybeGCOptions& options,
7078                                  JS::GCReason reason) {
7079   MOZ_ASSERT_IF(isIncrementalGCInProgress(), isIncremental);
7080 
7081   AutoSetThreadIsPerformingGC performingGC;
7082 
7083   AutoGCSession session(this, JS::HeapState::MajorCollecting);
7084 
7085   // We don't allow off-thread parsing to start while we're doing an
7086   // incremental GC of the atoms zone.
7087   if (rt->activeGCInAtomsZone()) {
7088     session.maybeCheckAtomsAccess.emplace(rt);
7089   }
7090 
7091   bool destroyingRuntime = (reason == JS::GCReason::DESTROY_RUNTIME);
7092 
7093   initialState = incrementalState;
7094   isIncremental = !budget.isUnlimited();
7095 
7096 #ifdef JS_GC_ZEAL
7097   // Do the incremental collection type specified by zeal mode if the collection
7098   // was triggered by runDebugGC() and incremental GC has not been cancelled by
7099   // resetIncrementalGC().
7100   useZeal = isIncremental && reason == JS::GCReason::DEBUG_GC;
7101 #endif
7102 
7103 #ifdef DEBUG
7104   stats().log("Incremental: %d, lastMarkSlice: %d, useZeal: %d, budget: %s",
7105               bool(isIncremental), bool(lastMarkSlice), bool(useZeal),
7106               DescribeBudget(budget));
7107 #endif
7108 
7109   if (useZeal && hasIncrementalTwoSliceZealMode()) {
7110     // Yields between slices occurs at predetermined points in these modes; the
7111     // budget is not used. |isIncremental| is still true.
7112     stats().log("Using unlimited budget for two-slice zeal mode");
7113     budget = SliceBudget::unlimited();
7114   }
7115 
7116   switch (incrementalState) {
7117     case State::NotActive:
7118       MOZ_ASSERT(marker.isDrained());
7119       gcOptions = options.valueOr(JS::GCOptions::Normal);
7120       initialReason = reason;
7121       cleanUpEverything = ShouldCleanUpEverything(reason, gcOptions);
7122       sweepOnBackgroundThread = ShouldSweepOnBackgroundThread(reason);
7123       isCompacting = shouldCompact();
7124       MOZ_ASSERT(!lastMarkSlice);
7125       rootsRemoved = false;
7126       lastGCStartTime_ = ReallyNow();
7127 
7128 #ifdef DEBUG
7129       for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
7130         zone->gcSweepGroupIndex = 0;
7131       }
7132 #endif
7133 
7134       incrementalState = State::Prepare;
7135       if (!beginPreparePhase(reason, session)) {
7136         incrementalState = State::NotActive;
7137         break;
7138       }
7139 
7140       if (useZeal && hasZealMode(ZealMode::YieldBeforeRootMarking)) {
7141         break;
7142       }
7143 
7144       [[fallthrough]];
7145 
7146     case State::Prepare:
7147       if (waitForBackgroundTask(unmarkTask, budget,
7148                                 DontTriggerSliceWhenFinished) == NotFinished) {
7149         break;
7150       }
7151 
7152       incrementalState = State::MarkRoots;
7153       [[fallthrough]];
7154 
7155     case State::MarkRoots:
7156       if (NeedToCollectNursery(this)) {
7157         collectNurseryFromMajorGC(options, reason);
7158       }
7159 
7160       endPreparePhase(reason);
7161 
7162       beginMarkPhase(session);
7163 
7164       // If we needed delayed marking for gray roots, then collect until done.
7165       if (isIncremental && !hasValidGrayRootsBuffer()) {
7166         budget = SliceBudget::unlimited();
7167         isIncremental = false;
7168         stats().nonincremental(GCAbortReason::GrayRootBufferingFailed);
7169       }
7170 
7171       incrementalState = State::Mark;
7172 
7173       if (useZeal && hasZealMode(ZealMode::YieldBeforeMarking) &&
7174           isIncremental) {
7175         break;
7176       }
7177 
7178       [[fallthrough]];
7179 
7180     case State::Mark:
7181       if (mightSweepInThisSlice(budget.isUnlimited())) {
7182         // Trace wrapper rooters before marking if we might start sweeping in
7183         // this slice.
7184         rt->mainContextFromOwnThread()->traceWrapperGCRooters(&marker);
7185       }
7186 
7187       {
7188         gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK);
7189         if (markUntilBudgetExhausted(budget) == NotFinished) {
7190           break;
7191         }
7192       }
7193 
7194       MOZ_ASSERT(marker.isDrained());
7195 
7196       /*
7197        * There are a number of reasons why we break out of collection here,
7198        * either ending the slice or to run a new interation of the loop in
7199        * GCRuntime::collect()
7200        */
7201 
7202       /*
7203        * In incremental GCs where we have already performed more than one
7204        * slice we yield after marking with the aim of starting the sweep in
7205        * the next slice, since the first slice of sweeping can be expensive.
7206        *
7207        * This is modified by the various zeal modes.  We don't yield in
7208        * YieldBeforeMarking mode and we always yield in YieldBeforeSweeping
7209        * mode.
7210        *
7211        * We will need to mark anything new on the stack when we resume, so
7212        * we stay in Mark state.
7213        */
7214       if (isIncremental && !lastMarkSlice) {
7215         if ((initialState == State::Mark &&
7216              !(useZeal && hasZealMode(ZealMode::YieldBeforeMarking))) ||
7217             (useZeal && hasZealMode(ZealMode::YieldBeforeSweeping))) {
7218           lastMarkSlice = true;
7219           stats().log("Yielding before starting sweeping");
7220           break;
7221         }
7222       }
7223 
7224       incrementalState = State::Sweep;
7225       lastMarkSlice = false;
7226 
7227       beginSweepPhase(reason, session);
7228 
7229       [[fallthrough]];
7230 
7231     case State::Sweep:
7232       if (storeBuffer().mayHavePointersToDeadCells()) {
7233         collectNurseryFromMajorGC(options, reason);
7234       }
7235 
7236       if (initialState == State::Sweep) {
7237         rt->mainContextFromOwnThread()->traceWrapperGCRooters(&marker);
7238       }
7239 
7240       if (performSweepActions(budget) == NotFinished) {
7241         break;
7242       }
7243 
7244       endSweepPhase(destroyingRuntime);
7245 
7246       incrementalState = State::Finalize;
7247 
7248       [[fallthrough]];
7249 
7250     case State::Finalize:
7251       if (waitForBackgroundTask(sweepTask, budget, TriggerSliceWhenFinished) ==
7252           NotFinished) {
7253         break;
7254       }
7255 
7256       assertBackgroundSweepingFinished();
7257 
7258       {
7259         // Sweep the zones list now that background finalization is finished to
7260         // remove and free dead zones, compartments and realms.
7261         gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::SWEEP);
7262         gcstats::AutoPhase ap2(stats(), gcstats::PhaseKind::DESTROY);
7263         JSFreeOp fop(rt);
7264         sweepZones(&fop, destroyingRuntime);
7265       }
7266 
7267       MOZ_ASSERT(!startedCompacting);
7268       incrementalState = State::Compact;
7269 
7270       // Always yield before compacting since it is not incremental.
7271       if (isCompacting && !budget.isUnlimited()) {
7272         break;
7273       }
7274 
7275       [[fallthrough]];
7276 
7277     case State::Compact:
7278       if (isCompacting) {
7279         if (NeedToCollectNursery(this)) {
7280           collectNurseryFromMajorGC(options, reason);
7281         }
7282 
7283         storeBuffer().checkEmpty();
7284         if (!startedCompacting) {
7285           beginCompactPhase();
7286         }
7287 
7288         if (compactPhase(reason, budget, session) == NotFinished) {
7289           break;
7290         }
7291 
7292         endCompactPhase();
7293       }
7294 
7295       startDecommit();
7296       incrementalState = State::Decommit;
7297 
7298       [[fallthrough]];
7299 
7300     case State::Decommit:
7301       if (waitForBackgroundTask(decommitTask, budget,
7302                                 TriggerSliceWhenFinished) == NotFinished) {
7303         break;
7304       }
7305 
7306       incrementalState = State::Finish;
7307 
7308       [[fallthrough]];
7309 
7310     case State::Finish:
7311       finishCollection();
7312       incrementalState = State::NotActive;
7313       break;
7314   }
7315 
7316   MOZ_ASSERT(safeToYield);
7317   MOZ_ASSERT(marker.markColor() == MarkColor::Black);
7318 }
7319 
collectNurseryFromMajorGC(const MaybeGCOptions & options,JS::GCReason reason)7320 void GCRuntime::collectNurseryFromMajorGC(const MaybeGCOptions& options,
7321                                           JS::GCReason reason) {
7322   collectNursery(options.valueOr(JS::GCOptions::Normal), reason,
7323                  gcstats::PhaseKind::EVICT_NURSERY_FOR_MAJOR_GC);
7324 }
7325 
hasForegroundWork() const7326 bool GCRuntime::hasForegroundWork() const {
7327   switch (incrementalState) {
7328     case State::NotActive:
7329       // Incremental GC is not running and no work is pending.
7330       return false;
7331     case State::Prepare:
7332       // We yield in the Prepare state after starting unmarking.
7333       return !unmarkTask.wasStarted();
7334     case State::Finalize:
7335       // We yield in the Finalize state to wait for background sweeping.
7336       return !isBackgroundSweeping();
7337     case State::Decommit:
7338       // We yield in the Decommit state to wait for background decommit.
7339       return !decommitTask.wasStarted();
7340     default:
7341       // In all other states there is still work to do.
7342       return true;
7343   }
7344 }
7345 
waitForBackgroundTask(GCParallelTask & task,const SliceBudget & budget,ShouldTriggerSliceWhenFinished triggerSlice)7346 IncrementalProgress GCRuntime::waitForBackgroundTask(
7347     GCParallelTask& task, const SliceBudget& budget,
7348     ShouldTriggerSliceWhenFinished triggerSlice) {
7349   // In incremental collections, yield if the task has not finished and request
7350   // a slice to notify us when this happens.
7351   if (!budget.isUnlimited()) {
7352     AutoLockHelperThreadState lock;
7353     if (task.wasStarted(lock)) {
7354       if (triggerSlice) {
7355         requestSliceAfterBackgroundTask = true;
7356       }
7357       return NotFinished;
7358     }
7359   }
7360 
7361   // Otherwise in non-incremental collections, wait here.
7362   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::WAIT_BACKGROUND_THREAD);
7363   task.join();
7364   if (triggerSlice) {
7365     cancelRequestedGCAfterBackgroundTask();
7366   }
7367 
7368   return Finished;
7369 }
7370 
IsIncrementalGCUnsafe(JSRuntime * rt)7371 GCAbortReason gc::IsIncrementalGCUnsafe(JSRuntime* rt) {
7372   MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC);
7373 
7374   if (!rt->gc.isIncrementalGCAllowed()) {
7375     return GCAbortReason::IncrementalDisabled;
7376   }
7377 
7378   return GCAbortReason::None;
7379 }
7380 
checkZoneIsScheduled(Zone * zone,JS::GCReason reason,const char * trigger)7381 inline void GCRuntime::checkZoneIsScheduled(Zone* zone, JS::GCReason reason,
7382                                             const char* trigger) {
7383 #ifdef DEBUG
7384   if (zone->isGCScheduled()) {
7385     return;
7386   }
7387 
7388   fprintf(stderr,
7389           "checkZoneIsScheduled: Zone %p not scheduled as expected in %s GC "
7390           "for %s trigger\n",
7391           zone, JS::ExplainGCReason(reason), trigger);
7392   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
7393     fprintf(stderr, "  Zone %p:%s%s\n", zone.get(),
7394             zone->isAtomsZone() ? " atoms" : "",
7395             zone->isGCScheduled() ? " scheduled" : "");
7396   }
7397   fflush(stderr);
7398   MOZ_CRASH("Zone not scheduled");
7399 #endif
7400 }
7401 
budgetIncrementalGC(bool nonincrementalByAPI,JS::GCReason reason,SliceBudget & budget)7402 GCRuntime::IncrementalResult GCRuntime::budgetIncrementalGC(
7403     bool nonincrementalByAPI, JS::GCReason reason, SliceBudget& budget) {
7404   if (nonincrementalByAPI) {
7405     stats().nonincremental(GCAbortReason::NonIncrementalRequested);
7406     budget = SliceBudget::unlimited();
7407 
7408     // Reset any in progress incremental GC if this was triggered via the
7409     // API. This isn't required for correctness, but sometimes during tests
7410     // the caller expects this GC to collect certain objects, and we need
7411     // to make sure to collect everything possible.
7412     if (reason != JS::GCReason::ALLOC_TRIGGER) {
7413       return resetIncrementalGC(GCAbortReason::NonIncrementalRequested);
7414     }
7415 
7416     return IncrementalResult::Ok;
7417   }
7418 
7419   if (reason == JS::GCReason::ABORT_GC) {
7420     budget = SliceBudget::unlimited();
7421     stats().nonincremental(GCAbortReason::AbortRequested);
7422     return resetIncrementalGC(GCAbortReason::AbortRequested);
7423   }
7424 
7425   if (!budget.isUnlimited()) {
7426     GCAbortReason unsafeReason = IsIncrementalGCUnsafe(rt);
7427     if (unsafeReason == GCAbortReason::None) {
7428       if (reason == JS::GCReason::COMPARTMENT_REVIVED) {
7429         unsafeReason = GCAbortReason::CompartmentRevived;
7430       } else if (!incrementalGCEnabled) {
7431         unsafeReason = GCAbortReason::ModeChange;
7432       }
7433     }
7434 
7435     if (unsafeReason != GCAbortReason::None) {
7436       budget = SliceBudget::unlimited();
7437       stats().nonincremental(unsafeReason);
7438       return resetIncrementalGC(unsafeReason);
7439     }
7440   }
7441 
7442   GCAbortReason resetReason = GCAbortReason::None;
7443   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
7444     if (!zone->canCollect()) {
7445       continue;
7446     }
7447 
7448     if (zone->gcHeapSize.bytes() >=
7449         zone->gcHeapThreshold.incrementalLimitBytes()) {
7450       checkZoneIsScheduled(zone, reason, "GC bytes");
7451       budget = SliceBudget::unlimited();
7452       stats().nonincremental(GCAbortReason::GCBytesTrigger);
7453       if (zone->wasGCStarted() && zone->gcState() > Zone::Sweep) {
7454         resetReason = GCAbortReason::GCBytesTrigger;
7455       }
7456     }
7457 
7458     if (zone->mallocHeapSize.bytes() >=
7459         zone->mallocHeapThreshold.incrementalLimitBytes()) {
7460       checkZoneIsScheduled(zone, reason, "malloc bytes");
7461       budget = SliceBudget::unlimited();
7462       stats().nonincremental(GCAbortReason::MallocBytesTrigger);
7463       if (zone->wasGCStarted() && zone->gcState() > Zone::Sweep) {
7464         resetReason = GCAbortReason::MallocBytesTrigger;
7465       }
7466     }
7467 
7468     if (zone->jitHeapSize.bytes() >=
7469         zone->jitHeapThreshold.incrementalLimitBytes()) {
7470       checkZoneIsScheduled(zone, reason, "JIT code bytes");
7471       budget = SliceBudget::unlimited();
7472       stats().nonincremental(GCAbortReason::JitCodeBytesTrigger);
7473       if (zone->wasGCStarted() && zone->gcState() > Zone::Sweep) {
7474         resetReason = GCAbortReason::JitCodeBytesTrigger;
7475       }
7476     }
7477 
7478     if (isIncrementalGCInProgress() &&
7479         zone->isGCScheduled() != zone->wasGCStarted()) {
7480       budget = SliceBudget::unlimited();
7481       resetReason = GCAbortReason::ZoneChange;
7482     }
7483   }
7484 
7485   if (resetReason != GCAbortReason::None) {
7486     return resetIncrementalGC(resetReason);
7487   }
7488 
7489   return IncrementalResult::Ok;
7490 }
7491 
maybeIncreaseSliceBudget(SliceBudget & budget)7492 void GCRuntime::maybeIncreaseSliceBudget(SliceBudget& budget) {
7493   if (js::SupportDifferentialTesting()) {
7494     return;
7495   }
7496 
7497   // Increase time budget for long-running incremental collections. Enforce a
7498   // minimum time budget that increases linearly with time/slice count up to a
7499   // maximum.
7500 
7501   if (budget.isTimeBudget() && isIncrementalGCInProgress()) {
7502     // All times are in milliseconds.
7503     struct BudgetAtTime {
7504       double time;
7505       double budget;
7506     };
7507     const BudgetAtTime MinBudgetStart{1500, 0.0};
7508     const BudgetAtTime MinBudgetEnd{2500, 100.0};
7509 
7510     double totalTime = (ReallyNow() - lastGCStartTime()).ToMilliseconds();
7511 
7512     double minBudget =
7513         LinearInterpolate(totalTime, MinBudgetStart.time, MinBudgetStart.budget,
7514                           MinBudgetEnd.time, MinBudgetEnd.budget);
7515 
7516     if (budget.timeBudget() < minBudget) {
7517       budget = SliceBudget(TimeBudget(minBudget));
7518     }
7519   }
7520 }
7521 
ScheduleZones(GCRuntime * gc)7522 static void ScheduleZones(GCRuntime* gc) {
7523   for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) {
7524     if (!zone->canCollect()) {
7525       continue;
7526     }
7527 
7528     if (!gc->isPerZoneGCEnabled()) {
7529       zone->scheduleGC();
7530     }
7531 
7532     // To avoid resets, continue to collect any zones that were being
7533     // collected in a previous slice.
7534     if (gc->isIncrementalGCInProgress() && zone->wasGCStarted()) {
7535       zone->scheduleGC();
7536     }
7537 
7538     // This is a heuristic to reduce the total number of collections.
7539     bool inHighFrequencyMode = gc->schedulingState.inHighFrequencyGCMode();
7540     if (zone->gcHeapSize.bytes() >=
7541             zone->gcHeapThreshold.eagerAllocTrigger(inHighFrequencyMode) ||
7542         zone->mallocHeapSize.bytes() >=
7543             zone->mallocHeapThreshold.eagerAllocTrigger(inHighFrequencyMode) ||
7544         zone->jitHeapSize.bytes() >= zone->jitHeapThreshold.startBytes()) {
7545       zone->scheduleGC();
7546     }
7547   }
7548 }
7549 
UnscheduleZones(GCRuntime * gc)7550 static void UnscheduleZones(GCRuntime* gc) {
7551   for (ZonesIter zone(gc->rt, WithAtoms); !zone.done(); zone.next()) {
7552     zone->unscheduleGC();
7553   }
7554 }
7555 
7556 class js::gc::AutoCallGCCallbacks {
7557   GCRuntime& gc_;
7558   JS::GCReason reason_;
7559 
7560  public:
AutoCallGCCallbacks(GCRuntime & gc,JS::GCReason reason)7561   explicit AutoCallGCCallbacks(GCRuntime& gc, JS::GCReason reason)
7562       : gc_(gc), reason_(reason) {
7563     gc_.maybeCallGCCallback(JSGC_BEGIN, reason);
7564   }
~AutoCallGCCallbacks()7565   ~AutoCallGCCallbacks() { gc_.maybeCallGCCallback(JSGC_END, reason_); }
7566 };
7567 
maybeCallGCCallback(JSGCStatus status,JS::GCReason reason)7568 void GCRuntime::maybeCallGCCallback(JSGCStatus status, JS::GCReason reason) {
7569   if (!gcCallback.ref().op) {
7570     return;
7571   }
7572 
7573   if (isIncrementalGCInProgress()) {
7574     return;
7575   }
7576 
7577   if (gcCallbackDepth == 0) {
7578     // Save scheduled zone information in case the callback clears it.
7579     for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
7580       zone->gcScheduledSaved_ = zone->gcScheduled_;
7581     }
7582   }
7583 
7584   gcCallbackDepth++;
7585 
7586   callGCCallback(status, reason);
7587 
7588   MOZ_ASSERT(gcCallbackDepth != 0);
7589   gcCallbackDepth--;
7590 
7591   if (gcCallbackDepth == 0) {
7592     // Ensure any zone that was originally scheduled stays scheduled.
7593     for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
7594       zone->gcScheduled_ = zone->gcScheduled_ || zone->gcScheduledSaved_;
7595     }
7596   }
7597 }
7598 
7599 /*
7600  * We disable inlining to ensure that the bottom of the stack with possible GC
7601  * roots recorded in MarkRuntime excludes any pointers we use during the marking
7602  * implementation.
7603  */
gcCycle(bool nonincrementalByAPI,const SliceBudget & budgetArg,const MaybeGCOptions & options,JS::GCReason reason)7604 MOZ_NEVER_INLINE GCRuntime::IncrementalResult GCRuntime::gcCycle(
7605     bool nonincrementalByAPI, const SliceBudget& budgetArg,
7606     const MaybeGCOptions& options, JS::GCReason reason) {
7607   // Assert if this is a GC unsafe region.
7608   rt->mainContextFromOwnThread()->verifyIsSafeToGC();
7609 
7610   // It's ok if threads other than the main thread have suppressGC set, as
7611   // they are operating on zones which will not be collected from here.
7612   MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC);
7613 
7614   // This reason is used internally. See below.
7615   MOZ_ASSERT(reason != JS::GCReason::RESET);
7616 
7617   // Background finalization and decommit are finished by definition before we
7618   // can start a new major GC.  Background allocation may still be running, but
7619   // that's OK because chunk pools are protected by the GC lock.
7620   if (!isIncrementalGCInProgress()) {
7621     assertBackgroundSweepingFinished();
7622     MOZ_ASSERT(decommitTask.isIdle());
7623   }
7624 
7625   // Note that GC callbacks are allowed to re-enter GC.
7626   AutoCallGCCallbacks callCallbacks(*this, reason);
7627 
7628   // Increase slice budget for long running collections before it is recorded by
7629   // AutoGCSlice.
7630   SliceBudget budget(budgetArg);
7631   maybeIncreaseSliceBudget(budget);
7632 
7633   ScheduleZones(this);
7634   gcstats::AutoGCSlice agc(stats(), scanZonesBeforeGC(),
7635                            options.valueOr(gcOptions), budget, reason);
7636 
7637   IncrementalResult result =
7638       budgetIncrementalGC(nonincrementalByAPI, reason, budget);
7639   if (result == IncrementalResult::ResetIncremental) {
7640     if (incrementalState == State::NotActive) {
7641       // The collection was reset and has finished.
7642       return result;
7643     }
7644 
7645     // The collection was reset but we must finish up some remaining work.
7646     reason = JS::GCReason::RESET;
7647   }
7648 
7649   majorGCTriggerReason = JS::GCReason::NO_REASON;
7650   MOZ_ASSERT(!stats().hasTrigger());
7651 
7652   incGcNumber();
7653   incGcSliceNumber();
7654 
7655   gcprobes::MajorGCStart();
7656   incrementalSlice(budget, options, reason);
7657   gcprobes::MajorGCEnd();
7658 
7659   MOZ_ASSERT_IF(result == IncrementalResult::ResetIncremental,
7660                 !isIncrementalGCInProgress());
7661   return result;
7662 }
7663 
waitForBackgroundTasksBeforeSlice()7664 void GCRuntime::waitForBackgroundTasksBeforeSlice() {
7665   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::WAIT_BACKGROUND_THREAD);
7666 
7667   // Background finalization and decommit are finished by definition before we
7668   // can start a new major GC.
7669   if (!isIncrementalGCInProgress()) {
7670     assertBackgroundSweepingFinished();
7671     MOZ_ASSERT(decommitTask.isIdle());
7672   }
7673 
7674   // We must also wait for background allocation to finish so we can avoid
7675   // taking the GC lock when manipulating the chunks during the GC.  The
7676   // background alloc task can run between slices, so we must wait for it at the
7677   // start of every slice.
7678   //
7679   // TODO: Is this still necessary?
7680   allocTask.cancelAndWait();
7681 }
7682 
mightSweepInThisSlice(bool nonIncremental)7683 inline bool GCRuntime::mightSweepInThisSlice(bool nonIncremental) {
7684   MOZ_ASSERT(incrementalState < State::Sweep);
7685   return nonIncremental || lastMarkSlice || hasIncrementalTwoSliceZealMode();
7686 }
7687 
7688 #ifdef JS_GC_ZEAL
IsDeterministicGCReason(JS::GCReason reason)7689 static bool IsDeterministicGCReason(JS::GCReason reason) {
7690   switch (reason) {
7691     case JS::GCReason::API:
7692     case JS::GCReason::DESTROY_RUNTIME:
7693     case JS::GCReason::LAST_DITCH:
7694     case JS::GCReason::TOO_MUCH_MALLOC:
7695     case JS::GCReason::TOO_MUCH_WASM_MEMORY:
7696     case JS::GCReason::TOO_MUCH_JIT_CODE:
7697     case JS::GCReason::ALLOC_TRIGGER:
7698     case JS::GCReason::DEBUG_GC:
7699     case JS::GCReason::CC_FORCED:
7700     case JS::GCReason::SHUTDOWN_CC:
7701     case JS::GCReason::ABORT_GC:
7702     case JS::GCReason::DISABLE_GENERATIONAL_GC:
7703     case JS::GCReason::FINISH_GC:
7704     case JS::GCReason::PREPARE_FOR_TRACING:
7705       return true;
7706 
7707     default:
7708       return false;
7709   }
7710 }
7711 #endif
7712 
scanZonesBeforeGC()7713 gcstats::ZoneGCStats GCRuntime::scanZonesBeforeGC() {
7714   gcstats::ZoneGCStats zoneStats;
7715   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
7716     zoneStats.zoneCount++;
7717     zoneStats.compartmentCount += zone->compartments().length();
7718     if (zone->canCollect()) {
7719       zoneStats.collectableZoneCount++;
7720       if (zone->isGCScheduled()) {
7721         zoneStats.collectedZoneCount++;
7722         zoneStats.collectedCompartmentCount += zone->compartments().length();
7723       }
7724     }
7725   }
7726 
7727   return zoneStats;
7728 }
7729 
7730 // The GC can only clean up scheduledForDestruction realms that were marked live
7731 // by a barrier (e.g. by RemapWrappers from a navigation event). It is also
7732 // common to have realms held live because they are part of a cycle in gecko,
7733 // e.g. involving the HTMLDocument wrapper. In this case, we need to run the
7734 // CycleCollector in order to remove these edges before the realm can be freed.
maybeDoCycleCollection()7735 void GCRuntime::maybeDoCycleCollection() {
7736   const static float ExcessiveGrayRealms = 0.8f;
7737   const static size_t LimitGrayRealms = 200;
7738 
7739   size_t realmsTotal = 0;
7740   size_t realmsGray = 0;
7741   for (RealmsIter realm(rt); !realm.done(); realm.next()) {
7742     ++realmsTotal;
7743     GlobalObject* global = realm->unsafeUnbarrieredMaybeGlobal();
7744     if (global && global->isMarkedGray()) {
7745       ++realmsGray;
7746     }
7747   }
7748   float grayFraction = float(realmsGray) / float(realmsTotal);
7749   if (grayFraction > ExcessiveGrayRealms || realmsGray > LimitGrayRealms) {
7750     callDoCycleCollectionCallback(rt->mainContextFromOwnThread());
7751   }
7752 }
7753 
checkCanCallAPI()7754 void GCRuntime::checkCanCallAPI() {
7755   MOZ_RELEASE_ASSERT(CurrentThreadCanAccessRuntime(rt));
7756 
7757   /* If we attempt to invoke the GC while we are running in the GC, assert. */
7758   MOZ_RELEASE_ASSERT(!JS::RuntimeHeapIsBusy());
7759 }
7760 
checkIfGCAllowedInCurrentState(JS::GCReason reason)7761 bool GCRuntime::checkIfGCAllowedInCurrentState(JS::GCReason reason) {
7762   if (rt->mainContextFromOwnThread()->suppressGC) {
7763     return false;
7764   }
7765 
7766   // Only allow shutdown GCs when we're destroying the runtime. This keeps
7767   // the GC callback from triggering a nested GC and resetting global state.
7768   if (rt->isBeingDestroyed() && !IsShutdownReason(reason)) {
7769     return false;
7770   }
7771 
7772 #ifdef JS_GC_ZEAL
7773   if (deterministicOnly && !IsDeterministicGCReason(reason)) {
7774     return false;
7775   }
7776 #endif
7777 
7778   return true;
7779 }
7780 
shouldRepeatForDeadZone(JS::GCReason reason)7781 bool GCRuntime::shouldRepeatForDeadZone(JS::GCReason reason) {
7782   MOZ_ASSERT_IF(reason == JS::GCReason::COMPARTMENT_REVIVED, !isIncremental);
7783   MOZ_ASSERT(!isIncrementalGCInProgress());
7784 
7785   if (!isIncremental) {
7786     return false;
7787   }
7788 
7789   for (CompartmentsIter c(rt); !c.done(); c.next()) {
7790     if (c->gcState.scheduledForDestruction) {
7791       return true;
7792     }
7793   }
7794 
7795   return false;
7796 }
7797 
7798 struct MOZ_RAII AutoSetZoneSliceThresholds {
AutoSetZoneSliceThresholdsAutoSetZoneSliceThresholds7799   explicit AutoSetZoneSliceThresholds(GCRuntime* gc) : gc(gc) {
7800     // On entry, zones that are already collecting should have a slice threshold
7801     // set.
7802     for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) {
7803       MOZ_ASSERT(zone->wasGCStarted() ==
7804                  zone->gcHeapThreshold.hasSliceThreshold());
7805       MOZ_ASSERT(zone->wasGCStarted() ==
7806                  zone->mallocHeapThreshold.hasSliceThreshold());
7807     }
7808   }
7809 
~AutoSetZoneSliceThresholdsAutoSetZoneSliceThresholds7810   ~AutoSetZoneSliceThresholds() {
7811     // On exit, update the thresholds for all collecting zones.
7812     for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) {
7813       if (zone->wasGCStarted()) {
7814         zone->setGCSliceThresholds(*gc);
7815       } else {
7816         MOZ_ASSERT(!zone->gcHeapThreshold.hasSliceThreshold());
7817         MOZ_ASSERT(!zone->mallocHeapThreshold.hasSliceThreshold());
7818       }
7819     }
7820   }
7821 
7822   GCRuntime* gc;
7823 };
7824 
collect(bool nonincrementalByAPI,const SliceBudget & budget,const MaybeGCOptions & optionsArg,JS::GCReason reason)7825 void GCRuntime::collect(bool nonincrementalByAPI, const SliceBudget& budget,
7826                         const MaybeGCOptions& optionsArg, JS::GCReason reason) {
7827   mozilla::TimeStamp startTime = TimeStamp::Now();
7828   auto timer = mozilla::MakeScopeExit([&] {
7829     if (Realm* realm = rt->mainContextFromOwnThread()->realm()) {
7830       realm->timers.gcTime += TimeStamp::Now() - startTime;
7831     }
7832   });
7833 
7834   MOZ_ASSERT(reason != JS::GCReason::NO_REASON);
7835 
7836   MaybeGCOptions options = optionsArg;
7837   MOZ_ASSERT_IF(!isIncrementalGCInProgress(), options.isSome());
7838 
7839   // Checks run for each request, even if we do not actually GC.
7840   checkCanCallAPI();
7841 
7842   // Check if we are allowed to GC at this time before proceeding.
7843   if (!checkIfGCAllowedInCurrentState(reason)) {
7844     return;
7845   }
7846 
7847   stats().log("GC starting in state %s", StateName(incrementalState));
7848 
7849   AutoTraceLog logGC(TraceLoggerForCurrentThread(), TraceLogger_GC);
7850   AutoStopVerifyingBarriers av(rt, IsShutdownReason(reason));
7851   AutoEnqueuePendingParseTasksAfterGC aept(*this);
7852   AutoMaybeLeaveAtomsZone leaveAtomsZone(rt->mainContextFromOwnThread());
7853   AutoSetZoneSliceThresholds sliceThresholds(this);
7854 
7855 #ifdef DEBUG
7856   if (IsShutdownReason(reason)) {
7857     marker.markQueue.clear();
7858     marker.queuePos = 0;
7859   }
7860 #endif
7861 
7862   bool repeat;
7863   do {
7864     IncrementalResult cycleResult =
7865         gcCycle(nonincrementalByAPI, budget, options, reason);
7866 
7867     if (reason == JS::GCReason::ABORT_GC) {
7868       MOZ_ASSERT(!isIncrementalGCInProgress());
7869       stats().log("GC aborted by request");
7870       break;
7871     }
7872 
7873     /*
7874      * Sometimes when we finish a GC we need to immediately start a new one.
7875      * This happens in the following cases:
7876      *  - when we reset the current GC
7877      *  - when finalizers drop roots during shutdown
7878      *  - when zones that we thought were dead at the start of GC are
7879      *    not collected (see the large comment in beginMarkPhase)
7880      */
7881     repeat = false;
7882     if (!isIncrementalGCInProgress()) {
7883       if (cycleResult == ResetIncremental) {
7884         repeat = true;
7885       } else if (rootsRemoved && IsShutdownReason(reason)) {
7886         /* Need to re-schedule all zones for GC. */
7887         JS::PrepareForFullGC(rt->mainContextFromOwnThread());
7888         repeat = true;
7889         reason = JS::GCReason::ROOTS_REMOVED;
7890       } else if (shouldRepeatForDeadZone(reason)) {
7891         repeat = true;
7892         reason = JS::GCReason::COMPARTMENT_REVIVED;
7893       }
7894     }
7895 
7896     if (repeat) {
7897       options = Some(gcOptions);
7898     }
7899   } while (repeat);
7900 
7901   if (reason == JS::GCReason::COMPARTMENT_REVIVED) {
7902     maybeDoCycleCollection();
7903   }
7904 
7905 #ifdef JS_GC_ZEAL
7906   if (hasZealMode(ZealMode::CheckHeapAfterGC)) {
7907     gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::TRACE_HEAP);
7908     CheckHeapAfterGC(rt);
7909   }
7910   if (hasZealMode(ZealMode::CheckGrayMarking) && !isIncrementalGCInProgress()) {
7911     MOZ_RELEASE_ASSERT(CheckGrayMarkingState(rt));
7912   }
7913 #endif
7914   stats().log("GC ending in state %s", StateName(incrementalState));
7915 
7916   UnscheduleZones(this);
7917 }
7918 
7919 js::AutoEnqueuePendingParseTasksAfterGC::
~AutoEnqueuePendingParseTasksAfterGC()7920     ~AutoEnqueuePendingParseTasksAfterGC() {
7921   if (!OffThreadParsingMustWaitForGC(gc_.rt)) {
7922     EnqueuePendingParseTasksAfterGC(gc_.rt);
7923   }
7924 }
7925 
defaultBudget(JS::GCReason reason,int64_t millis)7926 SliceBudget GCRuntime::defaultBudget(JS::GCReason reason, int64_t millis) {
7927   if (millis == 0) {
7928     if (reason == JS::GCReason::ALLOC_TRIGGER) {
7929       millis = defaultSliceBudgetMS();
7930     } else if (schedulingState.inHighFrequencyGCMode()) {
7931       millis = defaultSliceBudgetMS() * IGC_MARK_SLICE_MULTIPLIER;
7932     } else {
7933       millis = defaultSliceBudgetMS();
7934     }
7935   }
7936 
7937   if (millis == 0) {
7938     return SliceBudget::unlimited();
7939   }
7940 
7941   return SliceBudget(TimeBudget(millis));
7942 }
7943 
gc(JS::GCOptions options,JS::GCReason reason)7944 void GCRuntime::gc(JS::GCOptions options, JS::GCReason reason) {
7945   collect(true, SliceBudget::unlimited(), mozilla::Some(options), reason);
7946 }
7947 
startGC(JS::GCOptions options,JS::GCReason reason,int64_t millis)7948 void GCRuntime::startGC(JS::GCOptions options, JS::GCReason reason,
7949                         int64_t millis) {
7950   MOZ_ASSERT(!isIncrementalGCInProgress());
7951   if (!JS::IsIncrementalGCEnabled(rt->mainContextFromOwnThread())) {
7952     gc(options, reason);
7953     return;
7954   }
7955   collect(false, defaultBudget(reason, millis), Some(options), reason);
7956 }
7957 
gcSlice(JS::GCReason reason,int64_t millis)7958 void GCRuntime::gcSlice(JS::GCReason reason, int64_t millis) {
7959   MOZ_ASSERT(isIncrementalGCInProgress());
7960   collect(false, defaultBudget(reason, millis), Nothing(), reason);
7961 }
7962 
finishGC(JS::GCReason reason)7963 void GCRuntime::finishGC(JS::GCReason reason) {
7964   MOZ_ASSERT(isIncrementalGCInProgress());
7965 
7966   // If we're not collecting because we're out of memory then skip the
7967   // compacting phase if we need to finish an ongoing incremental GC
7968   // non-incrementally to avoid janking the browser.
7969   if (!IsOOMReason(initialReason)) {
7970     if (incrementalState == State::Compact) {
7971       abortGC();
7972       return;
7973     }
7974 
7975     isCompacting = false;
7976   }
7977 
7978   collect(false, SliceBudget::unlimited(), Nothing(), reason);
7979 }
7980 
abortGC()7981 void GCRuntime::abortGC() {
7982   MOZ_ASSERT(isIncrementalGCInProgress());
7983   checkCanCallAPI();
7984   MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC);
7985 
7986   collect(false, SliceBudget::unlimited(), Nothing(), JS::GCReason::ABORT_GC);
7987 }
7988 
ZonesSelected(GCRuntime * gc)7989 static bool ZonesSelected(GCRuntime* gc) {
7990   for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) {
7991     if (zone->isGCScheduled()) {
7992       return true;
7993     }
7994   }
7995   return false;
7996 }
7997 
startDebugGC(JS::GCOptions options,SliceBudget & budget)7998 void GCRuntime::startDebugGC(JS::GCOptions options, SliceBudget& budget) {
7999   MOZ_ASSERT(!isIncrementalGCInProgress());
8000   if (!ZonesSelected(this)) {
8001     JS::PrepareForFullGC(rt->mainContextFromOwnThread());
8002   }
8003   collect(false, budget, Some(options), JS::GCReason::DEBUG_GC);
8004 }
8005 
debugGCSlice(SliceBudget & budget)8006 void GCRuntime::debugGCSlice(SliceBudget& budget) {
8007   MOZ_ASSERT(isIncrementalGCInProgress());
8008   if (!ZonesSelected(this)) {
8009     JS::PrepareForIncrementalGC(rt->mainContextFromOwnThread());
8010   }
8011   collect(false, budget, Nothing(), JS::GCReason::DEBUG_GC);
8012 }
8013 
8014 /* Schedule a full GC unless a zone will already be collected. */
PrepareForDebugGC(JSRuntime * rt)8015 void js::PrepareForDebugGC(JSRuntime* rt) {
8016   if (!ZonesSelected(&rt->gc)) {
8017     JS::PrepareForFullGC(rt->mainContextFromOwnThread());
8018   }
8019 }
8020 
onOutOfMallocMemory()8021 void GCRuntime::onOutOfMallocMemory() {
8022   // Stop allocating new chunks.
8023   allocTask.cancelAndWait();
8024 
8025   // Make sure we release anything queued for release.
8026   decommitTask.join();
8027   nursery().joinDecommitTask();
8028 
8029   // Wait for background free of nursery huge slots to finish.
8030   sweepTask.join();
8031 
8032   AutoLockGC lock(this);
8033   onOutOfMallocMemory(lock);
8034 }
8035 
onOutOfMallocMemory(const AutoLockGC & lock)8036 void GCRuntime::onOutOfMallocMemory(const AutoLockGC& lock) {
8037 #ifdef DEBUG
8038   // Release any relocated arenas we may be holding on to, without releasing
8039   // the GC lock.
8040   releaseHeldRelocatedArenasWithoutUnlocking(lock);
8041 #endif
8042 
8043   // Throw away any excess chunks we have lying around.
8044   freeEmptyChunks(lock);
8045 
8046   // Immediately decommit as many arenas as possible in the hopes that this
8047   // might let the OS scrape together enough pages to satisfy the failing
8048   // malloc request.
8049   if (DecommitEnabled()) {
8050     decommitFreeArenasWithoutUnlocking(lock);
8051   }
8052 }
8053 
minorGC(JS::GCReason reason,gcstats::PhaseKind phase)8054 void GCRuntime::minorGC(JS::GCReason reason, gcstats::PhaseKind phase) {
8055   MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
8056 
8057   MOZ_ASSERT_IF(reason == JS::GCReason::EVICT_NURSERY,
8058                 !rt->mainContextFromOwnThread()->suppressGC);
8059   if (rt->mainContextFromOwnThread()->suppressGC) {
8060     return;
8061   }
8062 
8063   incGcNumber();
8064 
8065   collectNursery(JS::GCOptions::Normal, reason, phase);
8066 
8067 #ifdef JS_GC_ZEAL
8068   if (hasZealMode(ZealMode::CheckHeapAfterGC)) {
8069     gcstats::AutoPhase ap(stats(), phase);
8070     CheckHeapAfterGC(rt);
8071   }
8072 #endif
8073 
8074   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
8075     maybeTriggerGCAfterAlloc(zone);
8076     maybeTriggerGCAfterMalloc(zone);
8077   }
8078 }
8079 
collectNursery(JS::GCOptions options,JS::GCReason reason,gcstats::PhaseKind phase)8080 void GCRuntime::collectNursery(JS::GCOptions options, JS::GCReason reason,
8081                                gcstats::PhaseKind phase) {
8082   AutoMaybeLeaveAtomsZone leaveAtomsZone(rt->mainContextFromOwnThread());
8083 
8084   // Note that we aren't collecting the updated alloc counts from any helper
8085   // threads.  We should be but I'm not sure where to add that
8086   // synchronisation.
8087   uint32_t numAllocs =
8088       rt->mainContextFromOwnThread()->getAndResetAllocsThisZoneSinceMinorGC();
8089   for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
8090     numAllocs += zone->getAndResetTenuredAllocsSinceMinorGC();
8091   }
8092   stats().setAllocsSinceMinorGCTenured(numAllocs);
8093 
8094   gcstats::AutoPhase ap(stats(), phase);
8095 
8096   nursery().clearMinorGCRequest();
8097   TraceLoggerThread* logger = TraceLoggerForCurrentThread();
8098   AutoTraceLog logMinorGC(logger, TraceLogger_MinorGC);
8099   nursery().collect(options, reason);
8100   MOZ_ASSERT(nursery().isEmpty());
8101 
8102   startBackgroundFreeAfterMinorGC();
8103 }
8104 
startBackgroundFreeAfterMinorGC()8105 void GCRuntime::startBackgroundFreeAfterMinorGC() {
8106   MOZ_ASSERT(nursery().isEmpty());
8107 
8108   {
8109     AutoLockHelperThreadState lock;
8110 
8111     lifoBlocksToFree.ref().transferFrom(&lifoBlocksToFreeAfterMinorGC.ref());
8112 
8113     if (lifoBlocksToFree.ref().isEmpty() &&
8114         buffersToFreeAfterMinorGC.ref().empty()) {
8115       return;
8116     }
8117   }
8118 
8119   startBackgroundFree();
8120 }
8121 
AutoDisableGenerationalGC(JSContext * cx)8122 JS::AutoDisableGenerationalGC::AutoDisableGenerationalGC(JSContext* cx)
8123     : cx(cx) {
8124   if (!cx->generationalDisabled) {
8125     cx->runtime()->gc.evictNursery(JS::GCReason::DISABLE_GENERATIONAL_GC);
8126     cx->nursery().disable();
8127   }
8128   ++cx->generationalDisabled;
8129 }
8130 
~AutoDisableGenerationalGC()8131 JS::AutoDisableGenerationalGC::~AutoDisableGenerationalGC() {
8132   if (--cx->generationalDisabled == 0 &&
8133       cx->runtime()->gc.tunables.gcMaxNurseryBytes() > 0) {
8134     cx->nursery().enable();
8135   }
8136 }
8137 
IsGenerationalGCEnabled(JSRuntime * rt)8138 JS_PUBLIC_API bool JS::IsGenerationalGCEnabled(JSRuntime* rt) {
8139   return !rt->mainContextFromOwnThread()->generationalDisabled;
8140 }
8141 
gcIfRequested()8142 bool GCRuntime::gcIfRequested() {
8143   // This method returns whether a major GC was performed.
8144 
8145   if (nursery().minorGCRequested()) {
8146     minorGC(nursery().minorGCTriggerReason());
8147   }
8148 
8149   if (majorGCRequested()) {
8150     if (majorGCTriggerReason == JS::GCReason::DELAYED_ATOMS_GC &&
8151         !rt->mainContextFromOwnThread()->canCollectAtoms()) {
8152       // A GC was requested to collect the atoms zone, but it's no longer
8153       // possible. Skip this collection.
8154       majorGCTriggerReason = JS::GCReason::NO_REASON;
8155       return false;
8156     }
8157 
8158     if (!isIncrementalGCInProgress()) {
8159       startGC(JS::GCOptions::Normal, majorGCTriggerReason);
8160     } else {
8161       gcSlice(majorGCTriggerReason);
8162     }
8163     return true;
8164   }
8165 
8166   return false;
8167 }
8168 
FinishGC(JSContext * cx,JS::GCReason reason)8169 void js::gc::FinishGC(JSContext* cx, JS::GCReason reason) {
8170   // Calling this when GC is suppressed won't have any effect.
8171   MOZ_ASSERT(!cx->suppressGC);
8172 
8173   // GC callbacks may run arbitrary code, including JS. Check this regardless of
8174   // whether we GC for this invocation.
8175   MOZ_ASSERT(cx->isNurseryAllocAllowed());
8176 
8177   if (JS::IsIncrementalGCInProgress(cx)) {
8178     JS::PrepareForIncrementalGC(cx);
8179     JS::FinishIncrementalGC(cx, reason);
8180   }
8181 }
8182 
WaitForBackgroundTasks(JSContext * cx)8183 void js::gc::WaitForBackgroundTasks(JSContext* cx) {
8184   cx->runtime()->gc.waitForBackgroundTasks();
8185 }
8186 
waitForBackgroundTasks()8187 void GCRuntime::waitForBackgroundTasks() {
8188   MOZ_ASSERT(!isIncrementalGCInProgress());
8189   MOZ_ASSERT(sweepTask.isIdle());
8190   MOZ_ASSERT(decommitTask.isIdle());
8191   MOZ_ASSERT(markTask.isIdle());
8192 
8193   allocTask.join();
8194   freeTask.join();
8195   nursery().joinDecommitTask();
8196 }
8197 
NewRealm(JSContext * cx,JSPrincipals * principals,const JS::RealmOptions & options)8198 Realm* js::NewRealm(JSContext* cx, JSPrincipals* principals,
8199                     const JS::RealmOptions& options) {
8200   JSRuntime* rt = cx->runtime();
8201   JS_AbortIfWrongThread(cx);
8202 
8203   UniquePtr<Zone> zoneHolder;
8204   UniquePtr<Compartment> compHolder;
8205 
8206   Compartment* comp = nullptr;
8207   Zone* zone = nullptr;
8208   JS::CompartmentSpecifier compSpec =
8209       options.creationOptions().compartmentSpecifier();
8210   switch (compSpec) {
8211     case JS::CompartmentSpecifier::NewCompartmentInSystemZone:
8212       // systemZone might be null here, in which case we'll make a zone and
8213       // set this field below.
8214       zone = rt->gc.systemZone;
8215       break;
8216     case JS::CompartmentSpecifier::NewCompartmentInExistingZone:
8217       zone = options.creationOptions().zone();
8218       MOZ_ASSERT(zone);
8219       break;
8220     case JS::CompartmentSpecifier::ExistingCompartment:
8221       comp = options.creationOptions().compartment();
8222       zone = comp->zone();
8223       break;
8224     case JS::CompartmentSpecifier::NewCompartmentAndZone:
8225     case JS::CompartmentSpecifier::NewCompartmentInSelfHostingZone:
8226       break;
8227   }
8228 
8229   if (!zone) {
8230     Zone::Kind kind = Zone::NormalZone;
8231     const JSPrincipals* trusted = rt->trustedPrincipals();
8232     if (compSpec == JS::CompartmentSpecifier::NewCompartmentInSelfHostingZone) {
8233       MOZ_ASSERT(!rt->hasInitializedSelfHosting());
8234       kind = Zone::SelfHostingZone;
8235     } else if (compSpec ==
8236                    JS::CompartmentSpecifier::NewCompartmentInSystemZone ||
8237                (principals && principals == trusted)) {
8238       kind = Zone::SystemZone;
8239     }
8240 
8241     zoneHolder = MakeUnique<Zone>(cx->runtime(), kind);
8242     if (!zoneHolder || !zoneHolder->init()) {
8243       ReportOutOfMemory(cx);
8244       return nullptr;
8245     }
8246 
8247     zone = zoneHolder.get();
8248   }
8249 
8250   bool invisibleToDebugger = options.creationOptions().invisibleToDebugger();
8251   if (comp) {
8252     // Debugger visibility is per-compartment, not per-realm, so make sure the
8253     // new realm's visibility matches its compartment's.
8254     MOZ_ASSERT(comp->invisibleToDebugger() == invisibleToDebugger);
8255   } else {
8256     compHolder = cx->make_unique<JS::Compartment>(zone, invisibleToDebugger);
8257     if (!compHolder) {
8258       return nullptr;
8259     }
8260 
8261     comp = compHolder.get();
8262   }
8263 
8264   UniquePtr<Realm> realm(cx->new_<Realm>(comp, options));
8265   if (!realm || !realm->init(cx, principals)) {
8266     return nullptr;
8267   }
8268 
8269   // Make sure we don't put system and non-system realms in the same
8270   // compartment.
8271   if (!compHolder) {
8272     MOZ_RELEASE_ASSERT(realm->isSystem() == IsSystemCompartment(comp));
8273   }
8274 
8275   AutoLockGC lock(rt);
8276 
8277   // Reserve space in the Vectors before we start mutating them.
8278   if (!comp->realms().reserve(comp->realms().length() + 1) ||
8279       (compHolder &&
8280        !zone->compartments().reserve(zone->compartments().length() + 1)) ||
8281       (zoneHolder && !rt->gc.zones().reserve(rt->gc.zones().length() + 1))) {
8282     ReportOutOfMemory(cx);
8283     return nullptr;
8284   }
8285 
8286   // After this everything must be infallible.
8287 
8288   comp->realms().infallibleAppend(realm.get());
8289 
8290   if (compHolder) {
8291     zone->compartments().infallibleAppend(compHolder.release());
8292   }
8293 
8294   if (zoneHolder) {
8295     rt->gc.zones().infallibleAppend(zoneHolder.release());
8296 
8297     // Lazily set the runtime's system zone.
8298     if (compSpec == JS::CompartmentSpecifier::NewCompartmentInSystemZone) {
8299       MOZ_RELEASE_ASSERT(!rt->gc.systemZone);
8300       MOZ_ASSERT(zone->isSystemZone());
8301       rt->gc.systemZone = zone;
8302     }
8303   }
8304 
8305   return realm.release();
8306 }
8307 
MergeRealms(Realm * source,Realm * target)8308 void gc::MergeRealms(Realm* source, Realm* target) {
8309   JSRuntime* rt = source->runtimeFromMainThread();
8310   rt->gc.mergeRealms(source, target);
8311   rt->gc.maybeTriggerGCAfterAlloc(target->zone());
8312   rt->gc.maybeTriggerGCAfterMalloc(target->zone());
8313 }
8314 
mergeRealms(Realm * source,Realm * target)8315 void GCRuntime::mergeRealms(Realm* source, Realm* target) {
8316   // The source realm must be specifically flagged as mergable.  This
8317   // also implies that the realm is not visible to the debugger.
8318   MOZ_ASSERT(source->creationOptions().mergeable());
8319   MOZ_ASSERT(source->creationOptions().invisibleToDebugger());
8320 
8321   MOZ_ASSERT(!source->hasBeenEnteredIgnoringJit());
8322   MOZ_ASSERT(source->zone()->compartments().length() == 1);
8323 
8324   JSContext* cx = rt->mainContextFromOwnThread();
8325 
8326   MOZ_ASSERT(!source->zone()->wasGCStarted());
8327   JS::AutoAssertNoGC nogc(cx);
8328 
8329   AutoTraceSession session(rt);
8330 
8331   // Cleanup tables and other state in the source realm/zone that will be
8332   // meaningless after merging into the target realm/zone.
8333 
8334   source->clearTables();
8335   source->zone()->clearTables();
8336   source->unsetIsDebuggee();
8337 
8338 #ifdef DEBUG
8339   // Release any relocated arenas which we may be holding on to as they might
8340   // be in the source zone
8341   releaseHeldRelocatedArenas();
8342 #endif
8343 
8344   // Fixup realm pointers in source to refer to target, and make sure
8345   // type information generations are in sync.
8346 
8347   GlobalObject* global = target->maybeGlobal();
8348   MOZ_ASSERT(global);
8349   AssertTargetIsNotGray(global);
8350 
8351   for (auto baseShape = source->zone()->cellIterUnsafe<BaseShape>();
8352        !baseShape.done(); baseShape.next()) {
8353     baseShape->setRealmForMergeRealms(target);
8354 
8355     // Replace placeholder object prototypes with the correct prototype in
8356     // the target realm.
8357     TaggedProto proto = baseShape->proto();
8358     if (proto.isObject()) {
8359       JSObject* obj = proto.toObject();
8360       if (GlobalObject::isOffThreadPrototypePlaceholder(obj)) {
8361         JSObject* targetProto =
8362             global->getPrototypeForOffThreadPlaceholder(obj);
8363         MOZ_ASSERT(targetProto->isUsedAsPrototype());
8364         baseShape->setProtoForMergeRealms(TaggedProto(targetProto));
8365       }
8366     }
8367   }
8368 
8369   // Fixup zone pointers in source's zone to refer to target's zone.
8370 
8371   bool targetZoneIsCollecting = target->zone()->gcState() > Zone::Prepare;
8372   for (auto thingKind : AllAllocKinds()) {
8373     for (ArenaIter aiter(source->zone(), thingKind); !aiter.done();
8374          aiter.next()) {
8375       Arena* arena = aiter.get();
8376       arena->zone = target->zone();
8377       if (MOZ_UNLIKELY(targetZoneIsCollecting)) {
8378         // If we are currently collecting the target zone then we must
8379         // treat all merged things as if they were allocated during the
8380         // collection.
8381         for (ArenaCellIter cell(arena); !cell.done(); cell.next()) {
8382           MOZ_ASSERT(!cell->isMarkedAny());
8383           cell->markBlack();
8384         }
8385       }
8386     }
8387   }
8388 
8389   // The source should be the only realm in its zone.
8390   for (RealmsInZoneIter r(source->zone()); !r.done(); r.next()) {
8391     MOZ_ASSERT(r.get() == source);
8392   }
8393 
8394   // Merge the allocator, stats and UIDs in source's zone into target's zone.
8395   target->zone()->arenas.adoptArenas(&source->zone()->arenas,
8396                                      targetZoneIsCollecting);
8397   target->zone()->addTenuredAllocsSinceMinorGC(
8398       source->zone()->getAndResetTenuredAllocsSinceMinorGC());
8399   target->zone()->gcHeapSize.adopt(source->zone()->gcHeapSize);
8400   target->zone()->adoptUniqueIds(source->zone());
8401   target->zone()->adoptMallocBytes(source->zone());
8402 
8403   // Atoms which are marked in source's zone are now marked in target's zone.
8404   atomMarking.adoptMarkedAtoms(target->zone(), source->zone());
8405 
8406   // The source Realm is a parse-only realm and should not have collected any
8407   // zone-tracked metadata.
8408   Zone* sourceZone = source->zone();
8409   MOZ_ASSERT(!sourceZone->scriptLCovMap);
8410   MOZ_ASSERT(!sourceZone->scriptCountsMap);
8411   MOZ_ASSERT(!sourceZone->debugScriptMap);
8412 #ifdef MOZ_VTUNE
8413   MOZ_ASSERT(!sourceZone->scriptVTuneIdMap);
8414 #endif
8415 #ifdef JS_CACHEIR_SPEW
8416   MOZ_ASSERT(!sourceZone->scriptFinalWarmUpCountMap);
8417 #endif
8418 
8419   // The source realm is now completely empty, and is the only realm in its
8420   // compartment, which is the only compartment in its zone. Delete realm,
8421   // compartment and zone without waiting for this to be cleaned up by a full
8422   // GC.
8423 
8424   sourceZone->deleteEmptyCompartment(source->compartment());
8425   deleteEmptyZone(sourceZone);
8426 }
8427 
runDebugGC()8428 void GCRuntime::runDebugGC() {
8429 #ifdef JS_GC_ZEAL
8430   if (rt->mainContextFromOwnThread()->suppressGC) {
8431     return;
8432   }
8433 
8434   if (hasZealMode(ZealMode::GenerationalGC)) {
8435     return minorGC(JS::GCReason::DEBUG_GC);
8436   }
8437 
8438   PrepareForDebugGC(rt);
8439 
8440   auto budget = SliceBudget::unlimited();
8441   if (hasZealMode(ZealMode::IncrementalMultipleSlices)) {
8442     /*
8443      * Start with a small slice limit and double it every slice. This
8444      * ensure that we get multiple slices, and collection runs to
8445      * completion.
8446      */
8447     if (!isIncrementalGCInProgress()) {
8448       zealSliceBudget = zealFrequency / 2;
8449     } else {
8450       zealSliceBudget *= 2;
8451     }
8452     budget = SliceBudget(WorkBudget(zealSliceBudget));
8453 
8454     js::gc::State initialState = incrementalState;
8455     MaybeGCOptions options =
8456         isIncrementalGCInProgress() ? Nothing() : Some(JS::GCOptions::Shrink);
8457     collect(false, budget, options, JS::GCReason::DEBUG_GC);
8458 
8459     /* Reset the slice size when we get to the sweep or compact phases. */
8460     if ((initialState == State::Mark && incrementalState == State::Sweep) ||
8461         (initialState == State::Sweep && incrementalState == State::Compact)) {
8462       zealSliceBudget = zealFrequency / 2;
8463     }
8464   } else if (hasIncrementalTwoSliceZealMode()) {
8465     // These modes trigger incremental GC that happens in two slices and the
8466     // supplied budget is ignored by incrementalSlice.
8467     budget = SliceBudget(WorkBudget(1));
8468 
8469     MaybeGCOptions options =
8470         isIncrementalGCInProgress() ? Nothing() : Some(JS::GCOptions::Normal);
8471     collect(false, budget, options, JS::GCReason::DEBUG_GC);
8472   } else if (hasZealMode(ZealMode::Compact)) {
8473     gc(JS::GCOptions::Shrink, JS::GCReason::DEBUG_GC);
8474   } else {
8475     gc(JS::GCOptions::Normal, JS::GCReason::DEBUG_GC);
8476   }
8477 
8478 #endif
8479 }
8480 
setFullCompartmentChecks(bool enabled)8481 void GCRuntime::setFullCompartmentChecks(bool enabled) {
8482   MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting());
8483   fullCompartmentChecks = enabled;
8484 }
8485 
notifyRootsRemoved()8486 void GCRuntime::notifyRootsRemoved() {
8487   rootsRemoved = true;
8488 
8489 #ifdef JS_GC_ZEAL
8490   /* Schedule a GC to happen "soon". */
8491   if (hasZealMode(ZealMode::RootsChange)) {
8492     nextScheduled = 1;
8493   }
8494 #endif
8495 }
8496 
8497 #ifdef JS_GC_ZEAL
selectForMarking(JSObject * object)8498 bool GCRuntime::selectForMarking(JSObject* object) {
8499   MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting());
8500   return selectedForMarking.ref().get().append(object);
8501 }
8502 
clearSelectedForMarking()8503 void GCRuntime::clearSelectedForMarking() {
8504   selectedForMarking.ref().get().clearAndFree();
8505 }
8506 
setDeterministic(bool enabled)8507 void GCRuntime::setDeterministic(bool enabled) {
8508   MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting());
8509   deterministicOnly = enabled;
8510 }
8511 #endif
8512 
8513 #ifdef DEBUG
8514 
8515 /* Should only be called manually under gdb */
PreventGCDuringInteractiveDebug()8516 void PreventGCDuringInteractiveDebug() { TlsContext.get()->suppressGC++; }
8517 
8518 #endif
8519 
ReleaseAllJITCode(JSFreeOp * fop)8520 void js::ReleaseAllJITCode(JSFreeOp* fop) {
8521   js::CancelOffThreadIonCompile(fop->runtime());
8522 
8523   for (ZonesIter zone(fop->runtime(), SkipAtoms); !zone.done(); zone.next()) {
8524     zone->setPreservingCode(false);
8525     zone->discardJitCode(fop);
8526   }
8527 
8528   for (RealmsIter realm(fop->runtime()); !realm.done(); realm.next()) {
8529     if (jit::JitRealm* jitRealm = realm->jitRealm()) {
8530       jitRealm->discardStubs();
8531     }
8532   }
8533 }
8534 
adoptArenas(ArenaLists * fromArenaLists,bool targetZoneIsCollecting)8535 void ArenaLists::adoptArenas(ArenaLists* fromArenaLists,
8536                              bool targetZoneIsCollecting) {
8537   // GC may be active so take the lock here so we can mutate the arena lists.
8538   AutoLockGC lock(runtime());
8539 
8540   fromArenaLists->clearFreeLists();
8541 
8542   for (auto thingKind : AllAllocKinds()) {
8543     MOZ_ASSERT(fromArenaLists->concurrentUse(thingKind) == ConcurrentUse::None);
8544     ArenaList* fromList = &fromArenaLists->arenaList(thingKind);
8545     ArenaList* toList = &arenaList(thingKind);
8546     fromList->check();
8547     toList->check();
8548     Arena* next;
8549     for (Arena* fromArena = fromList->head(); fromArena; fromArena = next) {
8550       // Copy fromArena->next before releasing/reinserting.
8551       next = fromArena->next;
8552 
8553 #ifdef DEBUG
8554       MOZ_ASSERT(!fromArena->isEmpty());
8555       if (targetZoneIsCollecting) {
8556         fromArena->checkAllCellsMarkedBlack();
8557       } else {
8558         fromArena->checkNoMarkedCells();
8559       }
8560 #endif
8561 
8562       // If the target zone is being collected then we need to add the
8563       // arenas before the cursor because the collector assumes that the
8564       // cursor is always at the end of the list. This has the side-effect
8565       // of preventing allocation into any non-full arenas until the end
8566       // of the next GC.
8567       if (targetZoneIsCollecting) {
8568         toList->insertBeforeCursor(fromArena);
8569       } else {
8570         toList->insertAtCursor(fromArena);
8571       }
8572     }
8573     fromList->clear();
8574     toList->check();
8575   }
8576 }
8577 
AutoSuppressGC(JSContext * cx)8578 AutoSuppressGC::AutoSuppressGC(JSContext* cx)
8579     : suppressGC_(cx->suppressGC.ref()) {
8580   suppressGC_++;
8581 }
8582 
8583 #ifdef DEBUG
AutoDisableProxyCheck()8584 AutoDisableProxyCheck::AutoDisableProxyCheck() {
8585   TlsContext.get()->disableStrictProxyChecking();
8586 }
8587 
~AutoDisableProxyCheck()8588 AutoDisableProxyCheck::~AutoDisableProxyCheck() {
8589   TlsContext.get()->enableStrictProxyChecking();
8590 }
8591 
AssertGCThingMustBeTenured(JSObject * obj)8592 JS_PUBLIC_API void JS::AssertGCThingMustBeTenured(JSObject* obj) {
8593   MOZ_ASSERT(obj->isTenured() &&
8594              (!IsNurseryAllocable(obj->asTenured().getAllocKind()) ||
8595               obj->getClass()->hasFinalize()));
8596 }
8597 
AssertGCThingIsNotNurseryAllocable(Cell * cell)8598 JS_PUBLIC_API void JS::AssertGCThingIsNotNurseryAllocable(Cell* cell) {
8599   MOZ_ASSERT(cell);
8600   MOZ_ASSERT(!cell->is<JSObject>() && !cell->is<JSString>() &&
8601              !cell->is<JS::BigInt>());
8602 }
8603 
AssertGCThingHasType(js::gc::Cell * cell,JS::TraceKind kind)8604 JS_PUBLIC_API void js::gc::AssertGCThingHasType(js::gc::Cell* cell,
8605                                                 JS::TraceKind kind) {
8606   if (!cell) {
8607     MOZ_ASSERT(kind == JS::TraceKind::Null);
8608     return;
8609   }
8610 
8611   MOZ_ASSERT(IsCellPointerValid(cell));
8612   MOZ_ASSERT(cell->getTraceKind() == kind);
8613 }
8614 #endif
8615 
8616 #ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
8617 
AutoAssertNoGC(JSContext * maybecx)8618 JS::AutoAssertNoGC::AutoAssertNoGC(JSContext* maybecx)
8619     : cx_(maybecx ? maybecx : TlsContext.get()) {
8620   if (cx_) {
8621     cx_->inUnsafeRegion++;
8622   }
8623 }
8624 
~AutoAssertNoGC()8625 JS::AutoAssertNoGC::~AutoAssertNoGC() {
8626   if (cx_) {
8627     MOZ_ASSERT(cx_->inUnsafeRegion > 0);
8628     cx_->inUnsafeRegion--;
8629   }
8630 }
8631 
8632 #endif  // MOZ_DIAGNOSTIC_ASSERT_ENABLED
8633 
8634 #ifdef DEBUG
8635 
AutoAssertNoNurseryAlloc()8636 AutoAssertNoNurseryAlloc::AutoAssertNoNurseryAlloc() {
8637   TlsContext.get()->disallowNurseryAlloc();
8638 }
8639 
~AutoAssertNoNurseryAlloc()8640 AutoAssertNoNurseryAlloc::~AutoAssertNoNurseryAlloc() {
8641   TlsContext.get()->allowNurseryAlloc();
8642 }
8643 
AutoEnterCycleCollection(JSRuntime * rt)8644 JS::AutoEnterCycleCollection::AutoEnterCycleCollection(JSRuntime* rt)
8645     : runtime_(rt) {
8646   MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
8647   MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
8648   runtime_->gc.heapState_ = HeapState::CycleCollecting;
8649 }
8650 
~AutoEnterCycleCollection()8651 JS::AutoEnterCycleCollection::~AutoEnterCycleCollection() {
8652   MOZ_ASSERT(JS::RuntimeHeapIsCycleCollecting());
8653   runtime_->gc.heapState_ = HeapState::Idle;
8654 }
8655 
AutoAssertGCCallback()8656 JS::AutoAssertGCCallback::AutoAssertGCCallback() : AutoSuppressGCAnalysis() {
8657   MOZ_ASSERT(JS::RuntimeHeapIsCollecting());
8658 }
8659 
8660 #endif  // DEBUG
8661 
GCTraceKindToAscii(JS::TraceKind kind)8662 JS_PUBLIC_API const char* JS::GCTraceKindToAscii(JS::TraceKind kind) {
8663   switch (kind) {
8664 #define MAP_NAME(name, _0, _1, _2) \
8665   case JS::TraceKind::name:        \
8666     return "JS " #name;
8667     JS_FOR_EACH_TRACEKIND(MAP_NAME);
8668 #undef MAP_NAME
8669     default:
8670       return "Invalid";
8671   }
8672 }
8673 
GCTraceKindSize(JS::TraceKind kind)8674 JS_PUBLIC_API size_t JS::GCTraceKindSize(JS::TraceKind kind) {
8675   switch (kind) {
8676 #define MAP_SIZE(name, type, _0, _1) \
8677   case JS::TraceKind::name:          \
8678     return sizeof(type);
8679     JS_FOR_EACH_TRACEKIND(MAP_SIZE);
8680 #undef MAP_SIZE
8681     default:
8682       return 0;
8683   }
8684 }
8685 
GCCellPtr(const Value & v)8686 JS::GCCellPtr::GCCellPtr(const Value& v)
8687     : GCCellPtr(v.toGCThing(), v.traceKind()) {}
8688 
outOfLineKind() const8689 JS::TraceKind JS::GCCellPtr::outOfLineKind() const {
8690   MOZ_ASSERT((ptr & OutOfLineTraceKindMask) == OutOfLineTraceKindMask);
8691   MOZ_ASSERT(asCell()->isTenured());
8692   return MapAllocToTraceKind(asCell()->asTenured().getAllocKind());
8693 }
8694 
8695 #ifdef JSGC_HASH_TABLE_CHECKS
checkHashTablesAfterMovingGC()8696 void GCRuntime::checkHashTablesAfterMovingGC() {
8697   /*
8698    * Check that internal hash tables no longer have any pointers to things
8699    * that have been moved.
8700    */
8701   rt->geckoProfiler().checkStringsMapAfterMovingGC();
8702   for (ZonesIter zone(this, SkipAtoms); !zone.done(); zone.next()) {
8703     zone->checkUniqueIdTableAfterMovingGC();
8704     zone->shapeZone().checkTablesAfterMovingGC();
8705     zone->checkAllCrossCompartmentWrappersAfterMovingGC();
8706     zone->checkScriptMapsAfterMovingGC();
8707 
8708     // Note: CompactPropMaps never have a table.
8709     JS::AutoCheckCannotGC nogc;
8710     for (auto map = zone->cellIterUnsafe<NormalPropMap>(); !map.done();
8711          map.next()) {
8712       if (PropMapTable* table = map->asLinked()->maybeTable(nogc)) {
8713         table->checkAfterMovingGC();
8714       }
8715     }
8716     for (auto map = zone->cellIterUnsafe<DictionaryPropMap>(); !map.done();
8717          map.next()) {
8718       if (PropMapTable* table = map->asLinked()->maybeTable(nogc)) {
8719         table->checkAfterMovingGC();
8720       }
8721     }
8722   }
8723 
8724   for (CompartmentsIter c(this); !c.done(); c.next()) {
8725     for (RealmsInCompartmentIter r(c); !r.done(); r.next()) {
8726       r->dtoaCache.checkCacheAfterMovingGC();
8727       if (r->debugEnvs()) {
8728         r->debugEnvs()->checkHashTablesAfterMovingGC();
8729       }
8730     }
8731   }
8732 }
8733 #endif
8734 
8735 #ifdef DEBUG
hasZone(Zone * target)8736 bool GCRuntime::hasZone(Zone* target) {
8737   for (AllZonesIter zone(this); !zone.done(); zone.next()) {
8738     if (zone == target) {
8739       return true;
8740     }
8741   }
8742   return false;
8743 }
8744 #endif
8745 
PrepareZoneForGC(JSContext * cx,Zone * zone)8746 JS_PUBLIC_API void JS::PrepareZoneForGC(JSContext* cx, Zone* zone) {
8747   AssertHeapIsIdle();
8748   CHECK_THREAD(cx);
8749   MOZ_ASSERT(cx->runtime()->gc.hasZone(zone));
8750 
8751   zone->scheduleGC();
8752 }
8753 
PrepareForFullGC(JSContext * cx)8754 JS_PUBLIC_API void JS::PrepareForFullGC(JSContext* cx) {
8755   AssertHeapIsIdle();
8756   CHECK_THREAD(cx);
8757 
8758   for (ZonesIter zone(cx->runtime(), WithAtoms); !zone.done(); zone.next()) {
8759     zone->scheduleGC();
8760   }
8761 }
8762 
PrepareForIncrementalGC(JSContext * cx)8763 JS_PUBLIC_API void JS::PrepareForIncrementalGC(JSContext* cx) {
8764   AssertHeapIsIdle();
8765   CHECK_THREAD(cx);
8766 
8767   if (!JS::IsIncrementalGCInProgress(cx)) {
8768     return;
8769   }
8770 
8771   for (ZonesIter zone(cx->runtime(), WithAtoms); !zone.done(); zone.next()) {
8772     if (zone->wasGCStarted()) {
8773       zone->scheduleGC();
8774     }
8775   }
8776 }
8777 
IsGCScheduled(JSContext * cx)8778 JS_PUBLIC_API bool JS::IsGCScheduled(JSContext* cx) {
8779   AssertHeapIsIdle();
8780   CHECK_THREAD(cx);
8781 
8782   for (ZonesIter zone(cx->runtime(), WithAtoms); !zone.done(); zone.next()) {
8783     if (zone->isGCScheduled()) {
8784       return true;
8785     }
8786   }
8787 
8788   return false;
8789 }
8790 
SkipZoneForGC(JSContext * cx,Zone * zone)8791 JS_PUBLIC_API void JS::SkipZoneForGC(JSContext* cx, Zone* zone) {
8792   AssertHeapIsIdle();
8793   CHECK_THREAD(cx);
8794   MOZ_ASSERT(cx->runtime()->gc.hasZone(zone));
8795 
8796   zone->unscheduleGC();
8797 }
8798 
NonIncrementalGC(JSContext * cx,JS::GCOptions options,GCReason reason)8799 JS_PUBLIC_API void JS::NonIncrementalGC(JSContext* cx, JS::GCOptions options,
8800                                         GCReason reason) {
8801   AssertHeapIsIdle();
8802   CHECK_THREAD(cx);
8803   MOZ_ASSERT(options == JS::GCOptions::Normal ||
8804              options == JS::GCOptions::Shrink);
8805 
8806   cx->runtime()->gc.gc(options, reason);
8807 
8808   MOZ_ASSERT(!IsIncrementalGCInProgress(cx));
8809 }
8810 
StartIncrementalGC(JSContext * cx,JS::GCOptions options,GCReason reason,int64_t millis)8811 JS_PUBLIC_API void JS::StartIncrementalGC(JSContext* cx, JS::GCOptions options,
8812                                           GCReason reason, int64_t millis) {
8813   AssertHeapIsIdle();
8814   CHECK_THREAD(cx);
8815   MOZ_ASSERT(options == JS::GCOptions::Normal ||
8816              options == JS::GCOptions::Shrink);
8817 
8818   cx->runtime()->gc.startGC(options, reason, millis);
8819 }
8820 
IncrementalGCSlice(JSContext * cx,GCReason reason,int64_t millis)8821 JS_PUBLIC_API void JS::IncrementalGCSlice(JSContext* cx, GCReason reason,
8822                                           int64_t millis) {
8823   AssertHeapIsIdle();
8824   CHECK_THREAD(cx);
8825 
8826   cx->runtime()->gc.gcSlice(reason, millis);
8827 }
8828 
IncrementalGCHasForegroundWork(JSContext * cx)8829 JS_PUBLIC_API bool JS::IncrementalGCHasForegroundWork(JSContext* cx) {
8830   AssertHeapIsIdle();
8831   CHECK_THREAD(cx);
8832 
8833   return cx->runtime()->gc.hasForegroundWork();
8834 }
8835 
FinishIncrementalGC(JSContext * cx,GCReason reason)8836 JS_PUBLIC_API void JS::FinishIncrementalGC(JSContext* cx, GCReason reason) {
8837   AssertHeapIsIdle();
8838   CHECK_THREAD(cx);
8839 
8840   cx->runtime()->gc.finishGC(reason);
8841 }
8842 
AbortIncrementalGC(JSContext * cx)8843 JS_PUBLIC_API void JS::AbortIncrementalGC(JSContext* cx) {
8844   AssertHeapIsIdle();
8845   CHECK_THREAD(cx);
8846 
8847   if (IsIncrementalGCInProgress(cx)) {
8848     cx->runtime()->gc.abortGC();
8849   }
8850 }
8851 
formatSliceMessage(JSContext * cx) const8852 char16_t* JS::GCDescription::formatSliceMessage(JSContext* cx) const {
8853   UniqueChars cstr = cx->runtime()->gc.stats().formatCompactSliceMessage();
8854 
8855   size_t nchars = strlen(cstr.get());
8856   UniqueTwoByteChars out(js_pod_malloc<char16_t>(nchars + 1));
8857   if (!out) {
8858     return nullptr;
8859   }
8860   out.get()[nchars] = 0;
8861 
8862   CopyAndInflateChars(out.get(), cstr.get(), nchars);
8863   return out.release();
8864 }
8865 
formatSummaryMessage(JSContext * cx) const8866 char16_t* JS::GCDescription::formatSummaryMessage(JSContext* cx) const {
8867   UniqueChars cstr = cx->runtime()->gc.stats().formatCompactSummaryMessage();
8868 
8869   size_t nchars = strlen(cstr.get());
8870   UniqueTwoByteChars out(js_pod_malloc<char16_t>(nchars + 1));
8871   if (!out) {
8872     return nullptr;
8873   }
8874   out.get()[nchars] = 0;
8875 
8876   CopyAndInflateChars(out.get(), cstr.get(), nchars);
8877   return out.release();
8878 }
8879 
toGCEvent(JSContext * cx) const8880 JS::dbg::GarbageCollectionEvent::Ptr JS::GCDescription::toGCEvent(
8881     JSContext* cx) const {
8882   return JS::dbg::GarbageCollectionEvent::Create(
8883       cx->runtime(), cx->runtime()->gc.stats(),
8884       cx->runtime()->gc.majorGCCount());
8885 }
8886 
startTime(JSContext * cx) const8887 TimeStamp JS::GCDescription::startTime(JSContext* cx) const {
8888   return cx->runtime()->gc.stats().start();
8889 }
8890 
endTime(JSContext * cx) const8891 TimeStamp JS::GCDescription::endTime(JSContext* cx) const {
8892   return cx->runtime()->gc.stats().end();
8893 }
8894 
lastSliceStart(JSContext * cx) const8895 TimeStamp JS::GCDescription::lastSliceStart(JSContext* cx) const {
8896   return cx->runtime()->gc.stats().slices().back().start;
8897 }
8898 
lastSliceEnd(JSContext * cx) const8899 TimeStamp JS::GCDescription::lastSliceEnd(JSContext* cx) const {
8900   return cx->runtime()->gc.stats().slices().back().end;
8901 }
8902 
sliceToJSONProfiler(JSContext * cx) const8903 JS::UniqueChars JS::GCDescription::sliceToJSONProfiler(JSContext* cx) const {
8904   size_t slices = cx->runtime()->gc.stats().slices().length();
8905   MOZ_ASSERT(slices > 0);
8906   return cx->runtime()->gc.stats().renderJsonSlice(slices - 1);
8907 }
8908 
formatJSONProfiler(JSContext * cx) const8909 JS::UniqueChars JS::GCDescription::formatJSONProfiler(JSContext* cx) const {
8910   return cx->runtime()->gc.stats().renderJsonMessage();
8911 }
8912 
MinorGcToJSON(JSContext * cx)8913 JS_PUBLIC_API JS::UniqueChars JS::MinorGcToJSON(JSContext* cx) {
8914   JSRuntime* rt = cx->runtime();
8915   return rt->gc.stats().renderNurseryJson();
8916 }
8917 
SetGCSliceCallback(JSContext * cx,GCSliceCallback callback)8918 JS_PUBLIC_API JS::GCSliceCallback JS::SetGCSliceCallback(
8919     JSContext* cx, GCSliceCallback callback) {
8920   return cx->runtime()->gc.setSliceCallback(callback);
8921 }
8922 
SetDoCycleCollectionCallback(JSContext * cx,JS::DoCycleCollectionCallback callback)8923 JS_PUBLIC_API JS::DoCycleCollectionCallback JS::SetDoCycleCollectionCallback(
8924     JSContext* cx, JS::DoCycleCollectionCallback callback) {
8925   return cx->runtime()->gc.setDoCycleCollectionCallback(callback);
8926 }
8927 
8928 JS_PUBLIC_API JS::GCNurseryCollectionCallback
SetGCNurseryCollectionCallback(JSContext * cx,GCNurseryCollectionCallback callback)8929 JS::SetGCNurseryCollectionCallback(JSContext* cx,
8930                                    GCNurseryCollectionCallback callback) {
8931   return cx->runtime()->gc.setNurseryCollectionCallback(callback);
8932 }
8933 
SetLowMemoryState(JSContext * cx,bool newState)8934 JS_PUBLIC_API void JS::SetLowMemoryState(JSContext* cx, bool newState) {
8935   return cx->runtime()->gc.setLowMemoryState(newState);
8936 }
8937 
DisableIncrementalGC(JSContext * cx)8938 JS_PUBLIC_API void JS::DisableIncrementalGC(JSContext* cx) {
8939   cx->runtime()->gc.disallowIncrementalGC();
8940 }
8941 
IsIncrementalGCEnabled(JSContext * cx)8942 JS_PUBLIC_API bool JS::IsIncrementalGCEnabled(JSContext* cx) {
8943   GCRuntime& gc = cx->runtime()->gc;
8944   return gc.isIncrementalGCEnabled() && gc.isIncrementalGCAllowed();
8945 }
8946 
IsIncrementalGCInProgress(JSContext * cx)8947 JS_PUBLIC_API bool JS::IsIncrementalGCInProgress(JSContext* cx) {
8948   return cx->runtime()->gc.isIncrementalGCInProgress();
8949 }
8950 
IsIncrementalGCInProgress(JSRuntime * rt)8951 JS_PUBLIC_API bool JS::IsIncrementalGCInProgress(JSRuntime* rt) {
8952   return rt->gc.isIncrementalGCInProgress() &&
8953          !rt->gc.isVerifyPreBarriersEnabled();
8954 }
8955 
IsIncrementalBarrierNeeded(JSContext * cx)8956 JS_PUBLIC_API bool JS::IsIncrementalBarrierNeeded(JSContext* cx) {
8957   if (JS::RuntimeHeapIsBusy()) {
8958     return false;
8959   }
8960 
8961   auto state = cx->runtime()->gc.state();
8962   return state != gc::State::NotActive && state <= gc::State::Sweep;
8963 }
8964 
IncrementalPreWriteBarrier(JSObject * obj)8965 JS_PUBLIC_API void JS::IncrementalPreWriteBarrier(JSObject* obj) {
8966   if (!obj) {
8967     return;
8968   }
8969 
8970   AutoGeckoProfilerEntry profilingStackFrame(
8971       TlsContext.get(), "IncrementalPreWriteBarrier(JSObject*)",
8972       JS::ProfilingCategoryPair::GCCC_Barrier);
8973   PreWriteBarrier(obj);
8974 }
8975 
IncrementalPreWriteBarrier(GCCellPtr thing)8976 JS_PUBLIC_API void JS::IncrementalPreWriteBarrier(GCCellPtr thing) {
8977   if (!thing) {
8978     return;
8979   }
8980 
8981   AutoGeckoProfilerEntry profilingStackFrame(
8982       TlsContext.get(), "IncrementalPreWriteBarrier(GCCellPtr)",
8983       JS::ProfilingCategoryPair::GCCC_Barrier);
8984   CellPtrPreWriteBarrier(thing);
8985 }
8986 
WasIncrementalGC(JSRuntime * rt)8987 JS_PUBLIC_API bool JS::WasIncrementalGC(JSRuntime* rt) {
8988   return rt->gc.isIncrementalGc();
8989 }
8990 
NextCellUniqueId(JSRuntime * rt)8991 uint64_t js::gc::NextCellUniqueId(JSRuntime* rt) {
8992   return rt->gc.nextCellUniqueId();
8993 }
8994 
8995 namespace js {
8996 namespace gc {
8997 namespace MemInfo {
8998 
GCBytesGetter(JSContext * cx,unsigned argc,Value * vp)8999 static bool GCBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
9000   CallArgs args = CallArgsFromVp(argc, vp);
9001   args.rval().setNumber(double(cx->runtime()->gc.heapSize.bytes()));
9002   return true;
9003 }
9004 
MallocBytesGetter(JSContext * cx,unsigned argc,Value * vp)9005 static bool MallocBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
9006   CallArgs args = CallArgsFromVp(argc, vp);
9007   double bytes = 0;
9008   for (ZonesIter zone(cx->runtime(), WithAtoms); !zone.done(); zone.next()) {
9009     bytes += zone->mallocHeapSize.bytes();
9010   }
9011   args.rval().setNumber(bytes);
9012   return true;
9013 }
9014 
GCMaxBytesGetter(JSContext * cx,unsigned argc,Value * vp)9015 static bool GCMaxBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
9016   CallArgs args = CallArgsFromVp(argc, vp);
9017   args.rval().setNumber(double(cx->runtime()->gc.tunables.gcMaxBytes()));
9018   return true;
9019 }
9020 
GCHighFreqGetter(JSContext * cx,unsigned argc,Value * vp)9021 static bool GCHighFreqGetter(JSContext* cx, unsigned argc, Value* vp) {
9022   CallArgs args = CallArgsFromVp(argc, vp);
9023   args.rval().setBoolean(
9024       cx->runtime()->gc.schedulingState.inHighFrequencyGCMode());
9025   return true;
9026 }
9027 
GCNumberGetter(JSContext * cx,unsigned argc,Value * vp)9028 static bool GCNumberGetter(JSContext* cx, unsigned argc, Value* vp) {
9029   CallArgs args = CallArgsFromVp(argc, vp);
9030   args.rval().setNumber(double(cx->runtime()->gc.gcNumber()));
9031   return true;
9032 }
9033 
MajorGCCountGetter(JSContext * cx,unsigned argc,Value * vp)9034 static bool MajorGCCountGetter(JSContext* cx, unsigned argc, Value* vp) {
9035   CallArgs args = CallArgsFromVp(argc, vp);
9036   args.rval().setNumber(double(cx->runtime()->gc.majorGCCount()));
9037   return true;
9038 }
9039 
MinorGCCountGetter(JSContext * cx,unsigned argc,Value * vp)9040 static bool MinorGCCountGetter(JSContext* cx, unsigned argc, Value* vp) {
9041   CallArgs args = CallArgsFromVp(argc, vp);
9042   args.rval().setNumber(double(cx->runtime()->gc.minorGCCount()));
9043   return true;
9044 }
9045 
GCSliceCountGetter(JSContext * cx,unsigned argc,Value * vp)9046 static bool GCSliceCountGetter(JSContext* cx, unsigned argc, Value* vp) {
9047   CallArgs args = CallArgsFromVp(argc, vp);
9048   args.rval().setNumber(double(cx->runtime()->gc.gcSliceCount()));
9049   return true;
9050 }
9051 
ZoneGCBytesGetter(JSContext * cx,unsigned argc,Value * vp)9052 static bool ZoneGCBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
9053   CallArgs args = CallArgsFromVp(argc, vp);
9054   args.rval().setNumber(double(cx->zone()->gcHeapSize.bytes()));
9055   return true;
9056 }
9057 
ZoneGCTriggerBytesGetter(JSContext * cx,unsigned argc,Value * vp)9058 static bool ZoneGCTriggerBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
9059   CallArgs args = CallArgsFromVp(argc, vp);
9060   args.rval().setNumber(double(cx->zone()->gcHeapThreshold.startBytes()));
9061   return true;
9062 }
9063 
ZoneGCAllocTriggerGetter(JSContext * cx,unsigned argc,Value * vp)9064 static bool ZoneGCAllocTriggerGetter(JSContext* cx, unsigned argc, Value* vp) {
9065   CallArgs args = CallArgsFromVp(argc, vp);
9066   bool highFrequency =
9067       cx->runtime()->gc.schedulingState.inHighFrequencyGCMode();
9068   args.rval().setNumber(
9069       double(cx->zone()->gcHeapThreshold.eagerAllocTrigger(highFrequency)));
9070   return true;
9071 }
9072 
ZoneMallocBytesGetter(JSContext * cx,unsigned argc,Value * vp)9073 static bool ZoneMallocBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
9074   CallArgs args = CallArgsFromVp(argc, vp);
9075   args.rval().setNumber(double(cx->zone()->mallocHeapSize.bytes()));
9076   return true;
9077 }
9078 
ZoneMallocTriggerBytesGetter(JSContext * cx,unsigned argc,Value * vp)9079 static bool ZoneMallocTriggerBytesGetter(JSContext* cx, unsigned argc,
9080                                          Value* vp) {
9081   CallArgs args = CallArgsFromVp(argc, vp);
9082   args.rval().setNumber(double(cx->zone()->mallocHeapThreshold.startBytes()));
9083   return true;
9084 }
9085 
ZoneGCNumberGetter(JSContext * cx,unsigned argc,Value * vp)9086 static bool ZoneGCNumberGetter(JSContext* cx, unsigned argc, Value* vp) {
9087   CallArgs args = CallArgsFromVp(argc, vp);
9088   args.rval().setNumber(double(cx->zone()->gcNumber()));
9089   return true;
9090 }
9091 
9092 #ifdef DEBUG
DummyGetter(JSContext * cx,unsigned argc,Value * vp)9093 static bool DummyGetter(JSContext* cx, unsigned argc, Value* vp) {
9094   CallArgs args = CallArgsFromVp(argc, vp);
9095   args.rval().setUndefined();
9096   return true;
9097 }
9098 #endif
9099 
9100 } /* namespace MemInfo */
9101 
NewMemoryInfoObject(JSContext * cx)9102 JSObject* NewMemoryInfoObject(JSContext* cx) {
9103   RootedObject obj(cx, JS_NewObject(cx, nullptr));
9104   if (!obj) {
9105     return nullptr;
9106   }
9107 
9108   using namespace MemInfo;
9109   struct NamedGetter {
9110     const char* name;
9111     JSNative getter;
9112   } getters[] = {{"gcBytes", GCBytesGetter},
9113                  {"gcMaxBytes", GCMaxBytesGetter},
9114                  {"mallocBytes", MallocBytesGetter},
9115                  {"gcIsHighFrequencyMode", GCHighFreqGetter},
9116                  {"gcNumber", GCNumberGetter},
9117                  {"majorGCCount", MajorGCCountGetter},
9118                  {"minorGCCount", MinorGCCountGetter},
9119                  {"sliceCount", GCSliceCountGetter}};
9120 
9121   for (auto pair : getters) {
9122     JSNative getter = pair.getter;
9123 
9124 #ifdef DEBUG
9125     if (js::SupportDifferentialTesting()) {
9126       getter = DummyGetter;
9127     }
9128 #endif
9129 
9130     if (!JS_DefineProperty(cx, obj, pair.name, getter, nullptr,
9131                            JSPROP_ENUMERATE)) {
9132       return nullptr;
9133     }
9134   }
9135 
9136   RootedObject zoneObj(cx, JS_NewObject(cx, nullptr));
9137   if (!zoneObj) {
9138     return nullptr;
9139   }
9140 
9141   if (!JS_DefineProperty(cx, obj, "zone", zoneObj, JSPROP_ENUMERATE)) {
9142     return nullptr;
9143   }
9144 
9145   struct NamedZoneGetter {
9146     const char* name;
9147     JSNative getter;
9148   } zoneGetters[] = {{"gcBytes", ZoneGCBytesGetter},
9149                      {"gcTriggerBytes", ZoneGCTriggerBytesGetter},
9150                      {"gcAllocTrigger", ZoneGCAllocTriggerGetter},
9151                      {"mallocBytes", ZoneMallocBytesGetter},
9152                      {"mallocTriggerBytes", ZoneMallocTriggerBytesGetter},
9153                      {"gcNumber", ZoneGCNumberGetter}};
9154 
9155   for (auto pair : zoneGetters) {
9156     JSNative getter = pair.getter;
9157 
9158 #ifdef DEBUG
9159     if (js::SupportDifferentialTesting()) {
9160       getter = DummyGetter;
9161     }
9162 #endif
9163 
9164     if (!JS_DefineProperty(cx, zoneObj, pair.name, getter, nullptr,
9165                            JSPROP_ENUMERATE)) {
9166       return nullptr;
9167     }
9168   }
9169 
9170   return obj;
9171 }
9172 
StateName(State state)9173 const char* StateName(State state) {
9174   switch (state) {
9175 #define MAKE_CASE(name) \
9176   case State::name:     \
9177     return #name;
9178     GCSTATES(MAKE_CASE)
9179 #undef MAKE_CASE
9180   }
9181   MOZ_CRASH("Invalid gc::State enum value");
9182 }
9183 
StateName(JS::Zone::GCState state)9184 const char* StateName(JS::Zone::GCState state) {
9185   switch (state) {
9186     case JS::Zone::NoGC:
9187       return "NoGC";
9188     case JS::Zone::Prepare:
9189       return "Prepare";
9190     case JS::Zone::MarkBlackOnly:
9191       return "MarkBlackOnly";
9192     case JS::Zone::MarkBlackAndGray:
9193       return "MarkBlackAndGray";
9194     case JS::Zone::Sweep:
9195       return "Sweep";
9196     case JS::Zone::Finished:
9197       return "Finished";
9198     case JS::Zone::Compact:
9199       return "Compact";
9200   }
9201   MOZ_CRASH("Invalid Zone::GCState enum value");
9202 }
9203 
checkCondition(JSContext * cx)9204 void AutoAssertEmptyNursery::checkCondition(JSContext* cx) {
9205   if (!noAlloc) {
9206     noAlloc.emplace();
9207   }
9208   this->cx = cx;
9209   MOZ_ASSERT(cx->nursery().isEmpty());
9210 }
9211 
AutoEmptyNursery(JSContext * cx)9212 AutoEmptyNursery::AutoEmptyNursery(JSContext* cx) : AutoAssertEmptyNursery() {
9213   MOZ_ASSERT(!cx->suppressGC);
9214   cx->runtime()->gc.stats().suspendPhases();
9215   cx->runtime()->gc.evictNursery(JS::GCReason::EVICT_NURSERY);
9216   cx->runtime()->gc.stats().resumePhases();
9217   checkCondition(cx);
9218 }
9219 
9220 } /* namespace gc */
9221 } /* namespace js */
9222 
9223 #ifdef DEBUG
9224 
9225 namespace js {
9226 
9227 // We don't want jsfriendapi.h to depend on GenericPrinter,
9228 // so these functions are declared directly in the cpp.
9229 
9230 extern JS_PUBLIC_API void DumpString(JSString* str, js::GenericPrinter& out);
9231 
9232 }  // namespace js
9233 
dump(js::GenericPrinter & out) const9234 void js::gc::Cell::dump(js::GenericPrinter& out) const {
9235   switch (getTraceKind()) {
9236     case JS::TraceKind::Object:
9237       reinterpret_cast<const JSObject*>(this)->dump(out);
9238       break;
9239 
9240     case JS::TraceKind::String:
9241       js::DumpString(reinterpret_cast<JSString*>(const_cast<Cell*>(this)), out);
9242       break;
9243 
9244     case JS::TraceKind::Shape:
9245       reinterpret_cast<const Shape*>(this)->dump(out);
9246       break;
9247 
9248     default:
9249       out.printf("%s(%p)\n", JS::GCTraceKindToAscii(getTraceKind()),
9250                  (void*)this);
9251   }
9252 }
9253 
9254 // For use in a debugger.
dump() const9255 void js::gc::Cell::dump() const {
9256   js::Fprinter out(stderr);
9257   dump(out);
9258 }
9259 #endif
9260 
CanCheckGrayBits(const Cell * cell)9261 static inline bool CanCheckGrayBits(const Cell* cell) {
9262   MOZ_ASSERT(cell);
9263   if (!cell->isTenured()) {
9264     return false;
9265   }
9266 
9267   auto tc = &cell->asTenured();
9268   auto rt = tc->runtimeFromAnyThread();
9269   if (!CurrentThreadCanAccessRuntime(rt) || !rt->gc.areGrayBitsValid()) {
9270     return false;
9271   }
9272 
9273   // If the zone's mark bits are being cleared concurrently we can't depend on
9274   // the contents.
9275   return !tc->zone()->isGCPreparing();
9276 }
9277 
CellIsMarkedGrayIfKnown(const Cell * cell)9278 JS_PUBLIC_API bool js::gc::detail::CellIsMarkedGrayIfKnown(const Cell* cell) {
9279   // We ignore the gray marking state of cells and return false in the
9280   // following cases:
9281   //
9282   // 1) When OOM has caused us to clear the gcGrayBitsValid_ flag.
9283   //
9284   // 2) When we are in an incremental GC and examine a cell that is in a zone
9285   // that is not being collected. Gray targets of CCWs that are marked black
9286   // by a barrier will eventually be marked black in the next GC slice.
9287   //
9288   // 3) When we are not on the runtime's main thread. Helper threads might
9289   // call this while parsing, and they are not allowed to inspect the
9290   // runtime's incremental state. The objects being operated on are not able
9291   // to be collected and will not be marked any color.
9292 
9293   if (!CanCheckGrayBits(cell)) {
9294     return false;
9295   }
9296 
9297   auto tc = &cell->asTenured();
9298   MOZ_ASSERT(!tc->zoneFromAnyThread()->usedByHelperThread());
9299 
9300   auto rt = tc->runtimeFromMainThread();
9301   if (rt->gc.isIncrementalGCInProgress() && !tc->zone()->wasGCStarted()) {
9302     return false;
9303   }
9304 
9305   return detail::CellIsMarkedGray(tc);
9306 }
9307 
9308 #ifdef DEBUG
9309 
AssertCellIsNotGray(const Cell * cell)9310 JS_PUBLIC_API void js::gc::detail::AssertCellIsNotGray(const Cell* cell) {
9311   // Check that a cell is not marked gray.
9312   //
9313   // Since this is a debug-only check, take account of the eventual mark state
9314   // of cells that will be marked black by the next GC slice in an incremental
9315   // GC. For performance reasons we don't do this in CellIsMarkedGrayIfKnown.
9316 
9317   if (!CanCheckGrayBits(cell)) {
9318     return;
9319   }
9320 
9321   // TODO: I'd like to AssertHeapIsIdle() here, but this ends up getting
9322   // called during GC and while iterating the heap for memory reporting.
9323   MOZ_ASSERT(!JS::RuntimeHeapIsCycleCollecting());
9324 
9325   auto tc = &cell->asTenured();
9326   if (tc->zone()->isGCMarkingBlackAndGray()) {
9327     // We are doing gray marking in the cell's zone. Even if the cell is
9328     // currently marked gray it may eventually be marked black. Delay checking
9329     // non-black cells until we finish gray marking.
9330 
9331     if (!tc->isMarkedBlack()) {
9332       JSRuntime* rt = tc->zone()->runtimeFromMainThread();
9333       AutoEnterOOMUnsafeRegion oomUnsafe;
9334       if (!rt->gc.cellsToAssertNotGray.ref().append(cell)) {
9335         oomUnsafe.crash("Can't append to delayed gray checks list");
9336       }
9337     }
9338     return;
9339   }
9340 
9341   MOZ_ASSERT(!tc->isMarkedGray());
9342 }
9343 
ObjectIsMarkedBlack(const JSObject * obj)9344 extern JS_PUBLIC_API bool js::gc::detail::ObjectIsMarkedBlack(
9345     const JSObject* obj) {
9346   return obj->isMarkedBlack();
9347 }
9348 
9349 #endif
9350 
ClearEdgesTracer(JSRuntime * rt)9351 js::gc::ClearEdgesTracer::ClearEdgesTracer(JSRuntime* rt)
9352     : GenericTracer(rt, JS::TracerKind::ClearEdges,
9353                     JS::WeakMapTraceAction::TraceKeysAndValues) {}
9354 
ClearEdgesTracer()9355 js::gc::ClearEdgesTracer::ClearEdgesTracer()
9356     : ClearEdgesTracer(TlsContext.get()->runtime()) {}
9357 
9358 template <typename S>
onEdge(S * thing)9359 inline S* js::gc::ClearEdgesTracer::onEdge(S* thing) {
9360   // We don't handle removing pointers to nursery edges from the store buffer
9361   // with this tracer. Check that this doesn't happen.
9362   MOZ_ASSERT(!IsInsideNursery(thing));
9363 
9364   // Fire the pre-barrier since we're removing an edge from the graph.
9365   InternalBarrierMethods<S*>::preBarrier(thing);
9366 
9367   // Return nullptr to clear the edge.
9368   return nullptr;
9369 }
9370 
onObjectEdge(JSObject * obj)9371 JSObject* js::gc::ClearEdgesTracer::onObjectEdge(JSObject* obj) {
9372   return onEdge(obj);
9373 }
onStringEdge(JSString * str)9374 JSString* js::gc::ClearEdgesTracer::onStringEdge(JSString* str) {
9375   return onEdge(str);
9376 }
onSymbolEdge(JS::Symbol * sym)9377 JS::Symbol* js::gc::ClearEdgesTracer::onSymbolEdge(JS::Symbol* sym) {
9378   return onEdge(sym);
9379 }
onBigIntEdge(JS::BigInt * bi)9380 JS::BigInt* js::gc::ClearEdgesTracer::onBigIntEdge(JS::BigInt* bi) {
9381   return onEdge(bi);
9382 }
onScriptEdge(js::BaseScript * script)9383 js::BaseScript* js::gc::ClearEdgesTracer::onScriptEdge(js::BaseScript* script) {
9384   return onEdge(script);
9385 }
onShapeEdge(js::Shape * shape)9386 js::Shape* js::gc::ClearEdgesTracer::onShapeEdge(js::Shape* shape) {
9387   return onEdge(shape);
9388 }
onBaseShapeEdge(js::BaseShape * base)9389 js::BaseShape* js::gc::ClearEdgesTracer::onBaseShapeEdge(js::BaseShape* base) {
9390   return onEdge(base);
9391 }
onGetterSetterEdge(js::GetterSetter * gs)9392 js::GetterSetter* js::gc::ClearEdgesTracer::onGetterSetterEdge(
9393     js::GetterSetter* gs) {
9394   return onEdge(gs);
9395 }
onPropMapEdge(js::PropMap * map)9396 js::PropMap* js::gc::ClearEdgesTracer::onPropMapEdge(js::PropMap* map) {
9397   return onEdge(map);
9398 }
onJitCodeEdge(js::jit::JitCode * code)9399 js::jit::JitCode* js::gc::ClearEdgesTracer::onJitCodeEdge(
9400     js::jit::JitCode* code) {
9401   return onEdge(code);
9402 }
onScopeEdge(js::Scope * scope)9403 js::Scope* js::gc::ClearEdgesTracer::onScopeEdge(js::Scope* scope) {
9404   return onEdge(scope);
9405 }
onRegExpSharedEdge(js::RegExpShared * shared)9406 js::RegExpShared* js::gc::ClearEdgesTracer::onRegExpSharedEdge(
9407     js::RegExpShared* shared) {
9408   return onEdge(shared);
9409 }
9410 
FinalizeDeadNurseryObject(JSContext * cx,JSObject * obj)9411 JS_PUBLIC_API void js::gc::FinalizeDeadNurseryObject(JSContext* cx,
9412                                                      JSObject* obj) {
9413   CHECK_THREAD(cx);
9414   MOZ_ASSERT(JS::RuntimeHeapIsMinorCollecting());
9415 
9416   MOZ_ASSERT(obj);
9417   MOZ_ASSERT(IsInsideNursery(obj));
9418   mozilla::DebugOnly<JSObject*> prior(obj);
9419   MOZ_ASSERT(IsAboutToBeFinalizedUnbarriered(&prior));
9420   MOZ_ASSERT(obj == prior);
9421 
9422   const JSClass* jsClass = JS::GetClass(obj);
9423   jsClass->doFinalize(cx->defaultFreeOp(), obj);
9424 }
9425 
SetPerformanceHint(JSContext * cx,PerformanceHint hint)9426 JS_PUBLIC_API void js::gc::SetPerformanceHint(JSContext* cx,
9427                                               PerformanceHint hint) {
9428   CHECK_THREAD(cx);
9429   MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
9430 
9431   cx->runtime()->gc.setPerformanceHint(hint);
9432 }
9433 
setPerformanceHint(PerformanceHint hint)9434 void GCRuntime::setPerformanceHint(PerformanceHint hint) {
9435   bool wasInPageLoad = inPageLoadCount != 0;
9436 
9437   if (hint == PerformanceHint::InPageLoad) {
9438     inPageLoadCount++;
9439   } else {
9440     MOZ_ASSERT(inPageLoadCount);
9441     inPageLoadCount--;
9442   }
9443 
9444   bool inPageLoad = inPageLoadCount != 0;
9445   if (inPageLoad == wasInPageLoad) {
9446     return;
9447   }
9448 
9449   AutoLockGC lock(this);
9450   schedulingState.inPageLoad = inPageLoad;
9451   atomsZone->updateGCStartThresholds(*this, gcOptions, lock);
9452   maybeTriggerGCAfterAlloc(atomsZone);
9453 }
9454