1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 /*
8  * This code implements an incremental mark-and-sweep garbage collector, with
9  * most sweeping carried out in the background on a parallel thread.
10  *
11  * Full vs. zone GC
12  * ----------------
13  *
14  * The collector can collect all zones at once, or a subset. These types of
15  * collection are referred to as a full GC and a zone GC respectively.
16  *
17  * The atoms zone is only collected in a full GC since objects in any zone may
18  * have pointers to atoms, and these are not recorded in the cross compartment
19  * pointer map. Also, the atoms zone is not collected if any thread has an
20  * AutoKeepAtoms instance on the stack, or there are any exclusive threads using
21  * the runtime.
22  *
23  * It is possible for an incremental collection that started out as a full GC to
24  * become a zone GC if new zones are created during the course of the
25  * collection.
26  *
27  * Incremental collection
28  * ----------------------
29  *
30  * For a collection to be carried out incrementally the following conditions
31  * must be met:
32  *  - the collection must be run by calling js::GCSlice() rather than js::GC()
33  *  - the GC mode must have been set to JSGC_MODE_INCREMENTAL with
34  *    JS_SetGCParameter()
35  *  - no thread may have an AutoKeepAtoms instance on the stack
36  *
37  * The last condition is an engine-internal mechanism to ensure that incremental
38  * collection is not carried out without the correct barriers being implemented.
39  * For more information see 'Incremental marking' below.
40  *
41  * If the collection is not incremental, all foreground activity happens inside
42  * a single call to GC() or GCSlice(). However the collection is not complete
43  * until the background sweeping activity has finished.
44  *
45  * An incremental collection proceeds as a series of slices, interleaved with
46  * mutator activity, i.e. running JavaScript code. Slices are limited by a time
47  * budget. The slice finishes as soon as possible after the requested time has
48  * passed.
49  *
50  * Collector states
51  * ----------------
52  *
53  * The collector proceeds through the following states, the current state being
54  * held in JSRuntime::gcIncrementalState:
55  *
56  *  - MARK_ROOTS - marks the stack and other roots
57  *  - MARK       - incrementally marks reachable things
58  *  - SWEEP      - sweeps zones in groups and continues marking unswept zones
59  *
60  * The MARK_ROOTS activity always takes place in the first slice. The next two
61  * states can take place over one or more slices.
62  *
63  * In other words an incremental collection proceeds like this:
64  *
65  * Slice 1:   MARK_ROOTS: Roots pushed onto the mark stack.
66  *            MARK:       The mark stack is processed by popping an element,
67  *                        marking it, and pushing its children.
68  *
69  *          ... JS code runs ...
70  *
71  * Slice 2:   MARK:       More mark stack processing.
72  *
73  *          ... JS code runs ...
74  *
75  * Slice n-1: MARK:       More mark stack processing.
76  *
77  *          ... JS code runs ...
78  *
79  * Slice n:   MARK:       Mark stack is completely drained.
80  *            SWEEP:      Select first group of zones to sweep and sweep them.
81  *
82  *          ... JS code runs ...
83  *
84  * Slice n+1: SWEEP:      Mark objects in unswept zones that were newly
85  *                        identified as alive (see below). Then sweep more zone
86  *                        groups.
87  *
88  *          ... JS code runs ...
89  *
90  * Slice n+2: SWEEP:      Mark objects in unswept zones that were newly
91  *                        identified as alive. Then sweep more zone groups.
92  *
93  *          ... JS code runs ...
94  *
95  * Slice m:   SWEEP:      Sweeping is finished, and background sweeping
96  *                        started on the helper thread.
97  *
98  *          ... JS code runs, remaining sweeping done on background thread ...
99  *
100  * When background sweeping finishes the GC is complete.
101  *
102  * Incremental marking
103  * -------------------
104  *
105  * Incremental collection requires close collaboration with the mutator (i.e.,
106  * JS code) to guarantee correctness.
107  *
108  *  - During an incremental GC, if a memory location (except a root) is written
109  *    to, then the value it previously held must be marked. Write barriers
110  *    ensure this.
111  *
112  *  - Any object that is allocated during incremental GC must start out marked.
113  *
114  *  - Roots are marked in the first slice and hence don't need write barriers.
115  *    Roots are things like the C stack and the VM stack.
116  *
117  * The problem that write barriers solve is that between slices the mutator can
118  * change the object graph. We must ensure that it cannot do this in such a way
119  * that makes us fail to mark a reachable object (marking an unreachable object
120  * is tolerable).
121  *
122  * We use a snapshot-at-the-beginning algorithm to do this. This means that we
123  * promise to mark at least everything that is reachable at the beginning of
124  * collection. To implement it we mark the old contents of every non-root memory
125  * location written to by the mutator while the collection is in progress, using
126  * write barriers. This is described in gc/Barrier.h.
127  *
128  * Incremental sweeping
129  * --------------------
130  *
131  * Sweeping is difficult to do incrementally because object finalizers must be
132  * run at the start of sweeping, before any mutator code runs. The reason is
133  * that some objects use their finalizers to remove themselves from caches. If
134  * mutator code was allowed to run after the start of sweeping, it could observe
135  * the state of the cache and create a new reference to an object that was just
136  * about to be destroyed.
137  *
138  * Sweeping all finalizable objects in one go would introduce long pauses, so
139  * instead sweeping broken up into groups of zones. Zones which are not yet
140  * being swept are still marked, so the issue above does not apply.
141  *
142  * The order of sweeping is restricted by cross compartment pointers - for
143  * example say that object |a| from zone A points to object |b| in zone B and
144  * neither object was marked when we transitioned to the SWEEP phase. Imagine we
145  * sweep B first and then return to the mutator. It's possible that the mutator
146  * could cause |a| to become alive through a read barrier (perhaps it was a
147  * shape that was accessed via a shape table). Then we would need to mark |b|,
148  * which |a| points to, but |b| has already been swept.
149  *
150  * So if there is such a pointer then marking of zone B must not finish before
151  * marking of zone A.  Pointers which form a cycle between zones therefore
152  * restrict those zones to being swept at the same time, and these are found
153  * using Tarjan's algorithm for finding the strongly connected components of a
154  * graph.
155  *
156  * GC things without finalizers, and things with finalizers that are able to run
157  * in the background, are swept on the background thread. This accounts for most
158  * of the sweeping work.
159  *
160  * Reset
161  * -----
162  *
163  * During incremental collection it is possible, although unlikely, for
164  * conditions to change such that incremental collection is no longer safe. In
165  * this case, the collection is 'reset' by ResetIncrementalGC(). If we are in
166  * the mark state, this just stops marking, but if we have started sweeping
167  * already, we continue until we have swept the current zone group. Following a
168  * reset, a new non-incremental collection is started.
169  *
170  * Compacting GC
171  * -------------
172  *
173  * Compacting GC happens at the end of a major GC as part of the last slice.
174  * There are three parts:
175  *
176  *  - Arenas are selected for compaction.
177  *  - The contents of those arenas are moved to new arenas.
178  *  - All references to moved things are updated.
179  */
180 
181 #include "jsgcinlines.h"
182 
183 #include "mozilla/ArrayUtils.h"
184 #include "mozilla/DebugOnly.h"
185 #include "mozilla/MacroForEach.h"
186 #include "mozilla/MemoryReporting.h"
187 #include "mozilla/Move.h"
188 
189 #include <ctype.h>
190 #include <string.h>
191 #ifndef XP_WIN
192 # include <sys/mman.h>
193 # include <unistd.h>
194 #endif
195 
196 #include "jsapi.h"
197 #include "jsatom.h"
198 #include "jscntxt.h"
199 #include "jscompartment.h"
200 #include "jsfriendapi.h"
201 #include "jsobj.h"
202 #include "jsprf.h"
203 #include "jsscript.h"
204 #include "jstypes.h"
205 #include "jsutil.h"
206 #include "jswatchpoint.h"
207 #include "jsweakmap.h"
208 #ifdef XP_WIN
209 # include "jswin.h"
210 #endif
211 
212 #include "gc/FindSCCs.h"
213 #include "gc/GCInternals.h"
214 #include "gc/GCTrace.h"
215 #include "gc/Marking.h"
216 #include "gc/Memory.h"
217 #include "jit/BaselineJIT.h"
218 #include "jit/IonCode.h"
219 #include "jit/JitcodeMap.h"
220 #include "js/SliceBudget.h"
221 #include "proxy/DeadObjectProxy.h"
222 #include "vm/Debugger.h"
223 #include "vm/ProxyObject.h"
224 #include "vm/Shape.h"
225 #include "vm/String.h"
226 #include "vm/Symbol.h"
227 #include "vm/Time.h"
228 #include "vm/TraceLogging.h"
229 #include "vm/WrapperObject.h"
230 
231 #include "jsobjinlines.h"
232 #include "jsscriptinlines.h"
233 
234 #include "vm/Stack-inl.h"
235 #include "vm/String-inl.h"
236 
237 using namespace js;
238 using namespace js::gc;
239 
240 using mozilla::ArrayLength;
241 using mozilla::HashCodeScrambler;
242 using mozilla::Maybe;
243 using mozilla::Swap;
244 
245 using JS::AutoGCRooter;
246 
247 /* Perform a Full GC every 20 seconds if MaybeGC is called */
248 static const uint64_t GC_IDLE_FULL_SPAN = 20 * 1000 * 1000;
249 
250 /* Increase the IGC marking slice time if we are in highFrequencyGC mode. */
251 static const int IGC_MARK_SLICE_MULTIPLIER = 2;
252 
253 const AllocKind gc::slotsToThingKind[] = {
254     /*  0 */ AllocKind::OBJECT0,  AllocKind::OBJECT2,  AllocKind::OBJECT2,  AllocKind::OBJECT4,
255     /*  4 */ AllocKind::OBJECT4,  AllocKind::OBJECT8,  AllocKind::OBJECT8,  AllocKind::OBJECT8,
256     /*  8 */ AllocKind::OBJECT8,  AllocKind::OBJECT12, AllocKind::OBJECT12, AllocKind::OBJECT12,
257     /* 12 */ AllocKind::OBJECT12, AllocKind::OBJECT16, AllocKind::OBJECT16, AllocKind::OBJECT16,
258     /* 16 */ AllocKind::OBJECT16
259 };
260 
261 static_assert(JS_ARRAY_LENGTH(slotsToThingKind) == SLOTS_TO_THING_KIND_LIMIT,
262               "We have defined a slot count for each kind.");
263 
264 // Assert that SortedArenaList::MinThingSize is <= the real minimum thing size.
265 #define CHECK_MIN_THING_SIZE_INNER(x_)                                         \
266     static_assert(x_ >= SortedArenaList::MinThingSize,                         \
267     #x_ " is less than SortedArenaList::MinThingSize!");
268 #define CHECK_MIN_THING_SIZE(...) { __VA_ARGS__ }; /* Define the array. */     \
269     MOZ_FOR_EACH(CHECK_MIN_THING_SIZE_INNER, (), (__VA_ARGS__ UINT32_MAX))
270 
271 const uint32_t Arena::ThingSizes[] = CHECK_MIN_THING_SIZE(
272     sizeof(JSFunction),         /* AllocKind::FUNCTION            */
273     sizeof(FunctionExtended),   /* AllocKind::FUNCTION_EXTENDED   */
274     sizeof(JSObject_Slots0),    /* AllocKind::OBJECT0             */
275     sizeof(JSObject_Slots0),    /* AllocKind::OBJECT0_BACKGROUND  */
276     sizeof(JSObject_Slots2),    /* AllocKind::OBJECT2             */
277     sizeof(JSObject_Slots2),    /* AllocKind::OBJECT2_BACKGROUND  */
278     sizeof(JSObject_Slots4),    /* AllocKind::OBJECT4             */
279     sizeof(JSObject_Slots4),    /* AllocKind::OBJECT4_BACKGROUND  */
280     sizeof(JSObject_Slots8),    /* AllocKind::OBJECT8             */
281     sizeof(JSObject_Slots8),    /* AllocKind::OBJECT8_BACKGROUND  */
282     sizeof(JSObject_Slots12),   /* AllocKind::OBJECT12            */
283     sizeof(JSObject_Slots12),   /* AllocKind::OBJECT12_BACKGROUND */
284     sizeof(JSObject_Slots16),   /* AllocKind::OBJECT16            */
285     sizeof(JSObject_Slots16),   /* AllocKind::OBJECT16_BACKGROUND */
286     sizeof(JSScript),           /* AllocKind::SCRIPT              */
287     sizeof(LazyScript),         /* AllocKind::LAZY_SCRIPT         */
288     sizeof(Shape),              /* AllocKind::SHAPE               */
289     sizeof(AccessorShape),      /* AllocKind::ACCESSOR_SHAPE      */
290     sizeof(BaseShape),          /* AllocKind::BASE_SHAPE          */
291     sizeof(ObjectGroup),        /* AllocKind::OBJECT_GROUP        */
292     sizeof(JSFatInlineString),  /* AllocKind::FAT_INLINE_STRING   */
293     sizeof(JSString),           /* AllocKind::STRING              */
294     sizeof(JSExternalString),   /* AllocKind::EXTERNAL_STRING     */
295     sizeof(js::FatInlineAtom),  /* AllocKind::FAT_INLINE_ATOM     */
296     sizeof(js::NormalAtom),     /* AllocKind::ATOM                */
297     sizeof(JS::Symbol),         /* AllocKind::SYMBOL              */
298     sizeof(jit::JitCode),       /* AllocKind::JITCODE             */
299 );
300 
301 #undef CHECK_MIN_THING_SIZE_INNER
302 #undef CHECK_MIN_THING_SIZE
303 
304 #define OFFSET(type) uint32_t(sizeof(ArenaHeader) + (ArenaSize - sizeof(ArenaHeader)) % sizeof(type))
305 
306 const uint32_t Arena::FirstThingOffsets[] = {
307     OFFSET(JSFunction),         /* AllocKind::FUNCTION            */
308     OFFSET(FunctionExtended),   /* AllocKind::FUNCTION_EXTENDED   */
309     OFFSET(JSObject_Slots0),    /* AllocKind::OBJECT0             */
310     OFFSET(JSObject_Slots0),    /* AllocKind::OBJECT0_BACKGROUND  */
311     OFFSET(JSObject_Slots2),    /* AllocKind::OBJECT2             */
312     OFFSET(JSObject_Slots2),    /* AllocKind::OBJECT2_BACKGROUND  */
313     OFFSET(JSObject_Slots4),    /* AllocKind::OBJECT4             */
314     OFFSET(JSObject_Slots4),    /* AllocKind::OBJECT4_BACKGROUND  */
315     OFFSET(JSObject_Slots8),    /* AllocKind::OBJECT8             */
316     OFFSET(JSObject_Slots8),    /* AllocKind::OBJECT8_BACKGROUND  */
317     OFFSET(JSObject_Slots12),   /* AllocKind::OBJECT12            */
318     OFFSET(JSObject_Slots12),   /* AllocKind::OBJECT12_BACKGROUND */
319     OFFSET(JSObject_Slots16),   /* AllocKind::OBJECT16            */
320     OFFSET(JSObject_Slots16),   /* AllocKind::OBJECT16_BACKGROUND */
321     OFFSET(JSScript),           /* AllocKind::SCRIPT              */
322     OFFSET(LazyScript),         /* AllocKind::LAZY_SCRIPT         */
323     OFFSET(Shape),              /* AllocKind::SHAPE               */
324     OFFSET(AccessorShape),      /* AllocKind::ACCESSOR_SHAPE      */
325     OFFSET(BaseShape),          /* AllocKind::BASE_SHAPE          */
326     OFFSET(ObjectGroup),        /* AllocKind::OBJECT_GROUP        */
327     OFFSET(JSFatInlineString),  /* AllocKind::FAT_INLINE_STRING   */
328     OFFSET(JSString),           /* AllocKind::STRING              */
329     OFFSET(JSExternalString),   /* AllocKind::EXTERNAL_STRING     */
330     OFFSET(js::FatInlineAtom),  /* AllocKind::FAT_INLINE_ATOM     */
331     OFFSET(js::NormalAtom),     /* AllocKind::ATOM                */
332     OFFSET(JS::Symbol),         /* AllocKind::SYMBOL              */
333     OFFSET(jit::JitCode),       /* AllocKind::JITCODE             */
334 };
335 
336 #undef OFFSET
337 
338 struct js::gc::FinalizePhase
339 {
340     size_t length;
341     const AllocKind* kinds;
342     gcstats::Phase statsPhase;
343 };
344 
345 #define PHASE(x, p) { ArrayLength(x), x, p }
346 
347 /*
348  * Finalization order for incrementally swept things.
349  */
350 
351 static const AllocKind IncrementalPhaseStrings[] = {
352     AllocKind::EXTERNAL_STRING
353 };
354 
355 static const AllocKind IncrementalPhaseScripts[] = {
356     AllocKind::SCRIPT,
357     AllocKind::LAZY_SCRIPT
358 };
359 
360 static const AllocKind IncrementalPhaseJitCode[] = {
361     AllocKind::JITCODE
362 };
363 
364 static const FinalizePhase IncrementalFinalizePhases[] = {
365     PHASE(IncrementalPhaseStrings, gcstats::PHASE_SWEEP_STRING),
366     PHASE(IncrementalPhaseScripts, gcstats::PHASE_SWEEP_SCRIPT),
367     PHASE(IncrementalPhaseJitCode, gcstats::PHASE_SWEEP_JITCODE)
368 };
369 
370 /*
371  * Finalization order for things swept in the background.
372  */
373 
374 static const AllocKind BackgroundPhaseObjects[] = {
375     AllocKind::FUNCTION,
376     AllocKind::FUNCTION_EXTENDED,
377     AllocKind::OBJECT0_BACKGROUND,
378     AllocKind::OBJECT2_BACKGROUND,
379     AllocKind::OBJECT4_BACKGROUND,
380     AllocKind::OBJECT8_BACKGROUND,
381     AllocKind::OBJECT12_BACKGROUND,
382     AllocKind::OBJECT16_BACKGROUND
383 };
384 
385 static const AllocKind BackgroundPhaseStringsAndSymbols[] = {
386     AllocKind::FAT_INLINE_STRING,
387     AllocKind::STRING,
388     AllocKind::FAT_INLINE_ATOM,
389     AllocKind::ATOM,
390     AllocKind::SYMBOL
391 };
392 
393 static const AllocKind BackgroundPhaseShapes[] = {
394     AllocKind::SHAPE,
395     AllocKind::ACCESSOR_SHAPE,
396     AllocKind::BASE_SHAPE,
397     AllocKind::OBJECT_GROUP
398 };
399 
400 static const FinalizePhase BackgroundFinalizePhases[] = {
401     PHASE(BackgroundPhaseObjects, gcstats::PHASE_SWEEP_OBJECT),
402     PHASE(BackgroundPhaseStringsAndSymbols, gcstats::PHASE_SWEEP_STRING),
403     PHASE(BackgroundPhaseShapes, gcstats::PHASE_SWEEP_SHAPE)
404 };
405 
406 #undef PHASE
407 
408 template<>
409 JSObject*
get() const410 ArenaCellIterImpl::get<JSObject>() const
411 {
412     MOZ_ASSERT(!done());
413     return reinterpret_cast<JSObject*>(getCell());
414 }
415 
416 #ifdef DEBUG
417 void
checkSynchronizedWithFreeList() const418 ArenaHeader::checkSynchronizedWithFreeList() const
419 {
420     /*
421      * Do not allow to access the free list when its real head is still stored
422      * in FreeLists and is not synchronized with this one.
423      */
424     MOZ_ASSERT(allocated());
425 
426     /*
427      * We can be called from the background finalization thread when the free
428      * list in the zone can mutate at any moment. We cannot do any
429      * checks in this case.
430      */
431     if (IsBackgroundFinalized(getAllocKind()) && zone->runtimeFromAnyThread()->gc.onBackgroundThread())
432         return;
433 
434     FreeSpan firstSpan = firstFreeSpan.decompact(arenaAddress());
435     if (firstSpan.isEmpty())
436         return;
437     const FreeList* freeList = zone->arenas.getFreeList(getAllocKind());
438     if (freeList->isEmpty() || firstSpan.arenaAddress() != freeList->arenaAddress())
439         return;
440 
441     /*
442      * Here this arena has free things, FreeList::lists[thingKind] is not
443      * empty and also points to this arena. Thus they must be the same.
444      */
445     MOZ_ASSERT(freeList->isSameNonEmptySpan(firstSpan));
446 }
447 #endif
448 
449 void
unmarkAll()450 ArenaHeader::unmarkAll()
451 {
452     uintptr_t* word = chunk()->bitmap.arenaBits(this);
453     memset(word, 0, ArenaBitmapWords * sizeof(uintptr_t));
454 }
455 
456 /* static */ void
staticAsserts()457 Arena::staticAsserts()
458 {
459     static_assert(JS_ARRAY_LENGTH(ThingSizes) == size_t(AllocKind::LIMIT),
460         "We haven't defined all thing sizes.");
461     static_assert(JS_ARRAY_LENGTH(FirstThingOffsets) == size_t(AllocKind::LIMIT),
462         "We haven't defined all offsets.");
463 }
464 
465 void
setAsFullyUnused(AllocKind thingKind)466 Arena::setAsFullyUnused(AllocKind thingKind)
467 {
468     FreeSpan fullSpan;
469     size_t thingSize = Arena::thingSize(thingKind);
470     fullSpan.initFinal(thingsStart(thingKind), thingsEnd() - thingSize, thingSize);
471     aheader.setFirstFreeSpan(&fullSpan);
472 }
473 
474 template<typename T>
475 inline size_t
finalize(FreeOp * fop,AllocKind thingKind,size_t thingSize)476 Arena::finalize(FreeOp* fop, AllocKind thingKind, size_t thingSize)
477 {
478     /* Enforce requirements on size of T. */
479     MOZ_ASSERT(thingSize % CellSize == 0);
480     MOZ_ASSERT(thingSize <= 255);
481 
482     MOZ_ASSERT(aheader.allocated());
483     MOZ_ASSERT(thingKind == aheader.getAllocKind());
484     MOZ_ASSERT(thingSize == aheader.getThingSize());
485     MOZ_ASSERT(!aheader.hasDelayedMarking);
486     MOZ_ASSERT(!aheader.markOverflow);
487     MOZ_ASSERT(!aheader.allocatedDuringIncremental);
488 
489     uintptr_t firstThing = thingsStart(thingKind);
490     uintptr_t firstThingOrSuccessorOfLastMarkedThing = firstThing;
491     uintptr_t lastThing = thingsEnd() - thingSize;
492 
493     FreeSpan newListHead;
494     FreeSpan* newListTail = &newListHead;
495     size_t nmarked = 0;
496 
497     if (MOZ_UNLIKELY(MemProfiler::enabled())) {
498         for (ArenaCellIterUnderFinalize i(&aheader); !i.done(); i.next()) {
499             T* t = i.get<T>();
500             if (t->asTenured().isMarked())
501                 MemProfiler::MarkTenured(reinterpret_cast<void*>(t));
502         }
503     }
504 
505     for (ArenaCellIterUnderFinalize i(&aheader); !i.done(); i.next()) {
506         T* t = i.get<T>();
507         if (t->asTenured().isMarked()) {
508             uintptr_t thing = reinterpret_cast<uintptr_t>(t);
509             if (thing != firstThingOrSuccessorOfLastMarkedThing) {
510                 // We just finished passing over one or more free things,
511                 // so record a new FreeSpan.
512                 newListTail->initBoundsUnchecked(firstThingOrSuccessorOfLastMarkedThing,
513                                                  thing - thingSize);
514                 newListTail = newListTail->nextSpanUnchecked();
515             }
516             firstThingOrSuccessorOfLastMarkedThing = thing + thingSize;
517             nmarked++;
518         } else {
519             t->finalize(fop);
520             JS_POISON(t, JS_SWEPT_TENURED_PATTERN, thingSize);
521             TraceTenuredFinalize(t);
522         }
523     }
524 
525     if (nmarked == 0) {
526         // Do nothing. The caller will update the arena header appropriately.
527         MOZ_ASSERT(newListTail == &newListHead);
528         JS_EXTRA_POISON(data, JS_SWEPT_TENURED_PATTERN, sizeof(data));
529         return nmarked;
530     }
531 
532     MOZ_ASSERT(firstThingOrSuccessorOfLastMarkedThing != firstThing);
533     uintptr_t lastMarkedThing = firstThingOrSuccessorOfLastMarkedThing - thingSize;
534     if (lastThing == lastMarkedThing) {
535         // If the last thing was marked, we will have already set the bounds of
536         // the final span, and we just need to terminate the list.
537         newListTail->initAsEmpty();
538     } else {
539         // Otherwise, end the list with a span that covers the final stretch of free things.
540         newListTail->initFinal(firstThingOrSuccessorOfLastMarkedThing, lastThing, thingSize);
541     }
542 
543 #ifdef DEBUG
544     size_t nfree = 0;
545     for (const FreeSpan* span = &newListHead; !span->isEmpty(); span = span->nextSpan())
546         nfree += span->length(thingSize);
547     MOZ_ASSERT(nfree + nmarked == thingsPerArena(thingSize));
548 #endif
549     aheader.setFirstFreeSpan(&newListHead);
550     return nmarked;
551 }
552 
553 // Finalize arenas from src list, releasing empty arenas if keepArenas wasn't
554 // specified and inserting the others into the appropriate destination size
555 // bins.
556 template<typename T>
557 static inline bool
FinalizeTypedArenas(FreeOp * fop,ArenaHeader ** src,SortedArenaList & dest,AllocKind thingKind,SliceBudget & budget,ArenaLists::KeepArenasEnum keepArenas)558 FinalizeTypedArenas(FreeOp* fop,
559                     ArenaHeader** src,
560                     SortedArenaList& dest,
561                     AllocKind thingKind,
562                     SliceBudget& budget,
563                     ArenaLists::KeepArenasEnum keepArenas)
564 {
565     // When operating in the foreground, take the lock at the top.
566     Maybe<AutoLockGC> maybeLock;
567     if (!fop->onBackgroundThread())
568         maybeLock.emplace(fop->runtime());
569 
570     // During background sweeping free arenas are released later on in
571     // sweepBackgroundThings().
572     MOZ_ASSERT_IF(fop->onBackgroundThread(), keepArenas == ArenaLists::KEEP_ARENAS);
573 
574     size_t thingSize = Arena::thingSize(thingKind);
575     size_t thingsPerArena = Arena::thingsPerArena(thingSize);
576 
577     while (ArenaHeader* aheader = *src) {
578         *src = aheader->next;
579         size_t nmarked = aheader->getArena()->finalize<T>(fop, thingKind, thingSize);
580         size_t nfree = thingsPerArena - nmarked;
581 
582         if (nmarked)
583             dest.insertAt(aheader, nfree);
584         else if (keepArenas == ArenaLists::KEEP_ARENAS)
585             aheader->chunk()->recycleArena(aheader, dest, thingKind, thingsPerArena);
586         else
587             fop->runtime()->gc.releaseArena(aheader, maybeLock.ref());
588 
589         budget.step(thingsPerArena);
590         if (budget.isOverBudget())
591             return false;
592     }
593 
594     return true;
595 }
596 
597 /*
598  * Finalize the list. On return, |al|'s cursor points to the first non-empty
599  * arena in the list (which may be null if all arenas are full).
600  */
601 static bool
FinalizeArenas(FreeOp * fop,ArenaHeader ** src,SortedArenaList & dest,AllocKind thingKind,SliceBudget & budget,ArenaLists::KeepArenasEnum keepArenas)602 FinalizeArenas(FreeOp* fop,
603                ArenaHeader** src,
604                SortedArenaList& dest,
605                AllocKind thingKind,
606                SliceBudget& budget,
607                ArenaLists::KeepArenasEnum keepArenas)
608 {
609     switch (thingKind) {
610       case AllocKind::FUNCTION:
611       case AllocKind::FUNCTION_EXTENDED:
612       case AllocKind::OBJECT0:
613       case AllocKind::OBJECT0_BACKGROUND:
614       case AllocKind::OBJECT2:
615       case AllocKind::OBJECT2_BACKGROUND:
616       case AllocKind::OBJECT4:
617       case AllocKind::OBJECT4_BACKGROUND:
618       case AllocKind::OBJECT8:
619       case AllocKind::OBJECT8_BACKGROUND:
620       case AllocKind::OBJECT12:
621       case AllocKind::OBJECT12_BACKGROUND:
622       case AllocKind::OBJECT16:
623       case AllocKind::OBJECT16_BACKGROUND:
624         return FinalizeTypedArenas<JSObject>(fop, src, dest, thingKind, budget, keepArenas);
625       case AllocKind::SCRIPT:
626         return FinalizeTypedArenas<JSScript>(fop, src, dest, thingKind, budget, keepArenas);
627       case AllocKind::LAZY_SCRIPT:
628         return FinalizeTypedArenas<LazyScript>(fop, src, dest, thingKind, budget, keepArenas);
629       case AllocKind::SHAPE:
630         return FinalizeTypedArenas<Shape>(fop, src, dest, thingKind, budget, keepArenas);
631       case AllocKind::ACCESSOR_SHAPE:
632         return FinalizeTypedArenas<AccessorShape>(fop, src, dest, thingKind, budget, keepArenas);
633       case AllocKind::BASE_SHAPE:
634         return FinalizeTypedArenas<BaseShape>(fop, src, dest, thingKind, budget, keepArenas);
635       case AllocKind::OBJECT_GROUP:
636         return FinalizeTypedArenas<ObjectGroup>(fop, src, dest, thingKind, budget, keepArenas);
637       case AllocKind::STRING:
638         return FinalizeTypedArenas<JSString>(fop, src, dest, thingKind, budget, keepArenas);
639       case AllocKind::FAT_INLINE_STRING:
640         return FinalizeTypedArenas<JSFatInlineString>(fop, src, dest, thingKind, budget, keepArenas);
641       case AllocKind::EXTERNAL_STRING:
642         return FinalizeTypedArenas<JSExternalString>(fop, src, dest, thingKind, budget, keepArenas);
643       case AllocKind::FAT_INLINE_ATOM:
644         return FinalizeTypedArenas<js::FatInlineAtom>(fop, src, dest, thingKind, budget, keepArenas);
645       case AllocKind::ATOM:
646         return FinalizeTypedArenas<js::NormalAtom>(fop, src, dest, thingKind, budget, keepArenas);
647       case AllocKind::SYMBOL:
648         return FinalizeTypedArenas<JS::Symbol>(fop, src, dest, thingKind, budget, keepArenas);
649       case AllocKind::JITCODE:
650         return FinalizeTypedArenas<jit::JitCode>(fop, src, dest, thingKind, budget, keepArenas);
651       default:
652         MOZ_CRASH("Invalid alloc kind");
653     }
654 }
655 
656 Chunk*
pop()657 ChunkPool::pop()
658 {
659     MOZ_ASSERT(bool(head_) == bool(count_));
660     if (!count_)
661         return nullptr;
662     return remove(head_);
663 }
664 
665 void
push(Chunk * chunk)666 ChunkPool::push(Chunk* chunk)
667 {
668     MOZ_ASSERT(!chunk->info.next);
669     MOZ_ASSERT(!chunk->info.prev);
670 
671     chunk->info.age = 0;
672     chunk->info.next = head_;
673     if (head_)
674         head_->info.prev = chunk;
675     head_ = chunk;
676     ++count_;
677 
678     MOZ_ASSERT(verify());
679 }
680 
681 Chunk*
remove(Chunk * chunk)682 ChunkPool::remove(Chunk* chunk)
683 {
684     MOZ_ASSERT(count_ > 0);
685     MOZ_ASSERT(contains(chunk));
686 
687     if (head_ == chunk)
688         head_ = chunk->info.next;
689     if (chunk->info.prev)
690         chunk->info.prev->info.next = chunk->info.next;
691     if (chunk->info.next)
692         chunk->info.next->info.prev = chunk->info.prev;
693     chunk->info.next = chunk->info.prev = nullptr;
694     --count_;
695 
696     MOZ_ASSERT(verify());
697     return chunk;
698 }
699 
700 #ifdef DEBUG
701 bool
contains(Chunk * chunk) const702 ChunkPool::contains(Chunk* chunk) const
703 {
704     verify();
705     for (Chunk* cursor = head_; cursor; cursor = cursor->info.next) {
706         if (cursor == chunk)
707             return true;
708     }
709     return false;
710 }
711 
712 bool
verify() const713 ChunkPool::verify() const
714 {
715     MOZ_ASSERT(bool(head_) == bool(count_));
716     uint32_t count = 0;
717     for (Chunk* cursor = head_; cursor; cursor = cursor->info.next, ++count) {
718         MOZ_ASSERT_IF(cursor->info.prev, cursor->info.prev->info.next == cursor);
719         MOZ_ASSERT_IF(cursor->info.next, cursor->info.next->info.prev == cursor);
720     }
721     MOZ_ASSERT(count_ == count);
722     return true;
723 }
724 #endif
725 
726 void
next()727 ChunkPool::Iter::next()
728 {
729     MOZ_ASSERT(!done());
730     current_ = current_->info.next;
731 }
732 
733 ChunkPool
expireEmptyChunkPool(bool shrinkBuffers,const AutoLockGC & lock)734 GCRuntime::expireEmptyChunkPool(bool shrinkBuffers, const AutoLockGC& lock)
735 {
736     /*
737      * Return old empty chunks to the system while preserving the order of
738      * other chunks in the list. This way, if the GC runs several times
739      * without emptying the list, the older chunks will stay at the tail
740      * and are more likely to reach the max age.
741      */
742     MOZ_ASSERT(emptyChunks(lock).verify());
743     ChunkPool expired;
744     unsigned freeChunkCount = 0;
745     for (ChunkPool::Iter iter(emptyChunks(lock)); !iter.done();) {
746         Chunk* chunk = iter.get();
747         iter.next();
748 
749         MOZ_ASSERT(chunk->unused());
750         MOZ_ASSERT(!fullChunks(lock).contains(chunk));
751         MOZ_ASSERT(!availableChunks(lock).contains(chunk));
752         if (freeChunkCount >= tunables.maxEmptyChunkCount() ||
753             (freeChunkCount >= tunables.minEmptyChunkCount(lock) &&
754              (shrinkBuffers || chunk->info.age == MAX_EMPTY_CHUNK_AGE)))
755         {
756             emptyChunks(lock).remove(chunk);
757             prepareToFreeChunk(chunk->info);
758             expired.push(chunk);
759         } else {
760             /* Keep the chunk but increase its age. */
761             ++freeChunkCount;
762             ++chunk->info.age;
763         }
764     }
765     MOZ_ASSERT(expired.verify());
766     MOZ_ASSERT(emptyChunks(lock).verify());
767     MOZ_ASSERT(emptyChunks(lock).count() <= tunables.maxEmptyChunkCount());
768     MOZ_ASSERT_IF(shrinkBuffers, emptyChunks(lock).count() <= tunables.minEmptyChunkCount(lock));
769     return expired;
770 }
771 
772 static void
FreeChunkPool(JSRuntime * rt,ChunkPool & pool)773 FreeChunkPool(JSRuntime* rt, ChunkPool& pool)
774 {
775     for (ChunkPool::Iter iter(pool); !iter.done();) {
776         Chunk* chunk = iter.get();
777         iter.next();
778         pool.remove(chunk);
779         MOZ_ASSERT(!chunk->info.numArenasFreeCommitted);
780         UnmapPages(static_cast<void*>(chunk), ChunkSize);
781     }
782     MOZ_ASSERT(pool.count() == 0);
783 }
784 
785 void
freeEmptyChunks(JSRuntime * rt,const AutoLockGC & lock)786 GCRuntime::freeEmptyChunks(JSRuntime* rt, const AutoLockGC& lock)
787 {
788     FreeChunkPool(rt, emptyChunks(lock));
789 }
790 
791 /* static */ Chunk*
allocate(JSRuntime * rt)792 Chunk::allocate(JSRuntime* rt)
793 {
794     Chunk* chunk = static_cast<Chunk*>(MapAlignedPages(ChunkSize, ChunkSize));
795     if (!chunk)
796         return nullptr;
797     chunk->init(rt);
798     rt->gc.stats.count(gcstats::STAT_NEW_CHUNK);
799     return chunk;
800 }
801 
802 inline void
prepareToFreeChunk(ChunkInfo & info)803 GCRuntime::prepareToFreeChunk(ChunkInfo& info)
804 {
805     MOZ_ASSERT(numArenasFreeCommitted >= info.numArenasFreeCommitted);
806     numArenasFreeCommitted -= info.numArenasFreeCommitted;
807     stats.count(gcstats::STAT_DESTROY_CHUNK);
808 #ifdef DEBUG
809     /*
810      * Let FreeChunkPool detect a missing prepareToFreeChunk call before it
811      * frees chunk.
812      */
813     info.numArenasFreeCommitted = 0;
814 #endif
815 }
816 
decommitAllArenas(JSRuntime * rt)817 void Chunk::decommitAllArenas(JSRuntime* rt)
818 {
819     decommittedArenas.clear(true);
820     MarkPagesUnused(&arenas[0], ArenasPerChunk * ArenaSize);
821 
822     info.freeArenasHead = nullptr;
823     info.lastDecommittedArenaOffset = 0;
824     info.numArenasFree = ArenasPerChunk;
825     info.numArenasFreeCommitted = 0;
826 }
827 
828 void
init(JSRuntime * rt)829 Chunk::init(JSRuntime* rt)
830 {
831     JS_POISON(this, JS_FRESH_TENURED_PATTERN, ChunkSize);
832 
833     /*
834      * We clear the bitmap to guard against JS::GCThingIsMarkedGray being called
835      * on uninitialized data, which would happen before the first GC cycle.
836      */
837     bitmap.clear();
838 
839     /*
840      * Decommit the arenas. We do this after poisoning so that if the OS does
841      * not have to recycle the pages, we still get the benefit of poisoning.
842      */
843     decommitAllArenas(rt);
844 
845     /* Initialize the chunk info. */
846     info.init();
847     new (&info.trailer) ChunkTrailer(rt);
848 
849     /* The rest of info fields are initialized in pickChunk. */
850 }
851 
852 /*
853  * Search for and return the next decommitted Arena. Our goal is to keep
854  * lastDecommittedArenaOffset "close" to a free arena. We do this by setting
855  * it to the most recently freed arena when we free, and forcing it to
856  * the last alloc + 1 when we allocate.
857  */
858 uint32_t
findDecommittedArenaOffset()859 Chunk::findDecommittedArenaOffset()
860 {
861     /* Note: lastFreeArenaOffset can be past the end of the list. */
862     for (unsigned i = info.lastDecommittedArenaOffset; i < ArenasPerChunk; i++)
863         if (decommittedArenas.get(i))
864             return i;
865     for (unsigned i = 0; i < info.lastDecommittedArenaOffset; i++)
866         if (decommittedArenas.get(i))
867             return i;
868     MOZ_CRASH("No decommitted arenas found.");
869 }
870 
871 ArenaHeader*
fetchNextDecommittedArena()872 Chunk::fetchNextDecommittedArena()
873 {
874     MOZ_ASSERT(info.numArenasFreeCommitted == 0);
875     MOZ_ASSERT(info.numArenasFree > 0);
876 
877     unsigned offset = findDecommittedArenaOffset();
878     info.lastDecommittedArenaOffset = offset + 1;
879     --info.numArenasFree;
880     decommittedArenas.unset(offset);
881 
882     Arena* arena = &arenas[offset];
883     MarkPagesInUse(arena, ArenaSize);
884     arena->aheader.setAsNotAllocated();
885 
886     return &arena->aheader;
887 }
888 
889 inline void
updateOnFreeArenaAlloc(const ChunkInfo & info)890 GCRuntime::updateOnFreeArenaAlloc(const ChunkInfo& info)
891 {
892     MOZ_ASSERT(info.numArenasFreeCommitted <= numArenasFreeCommitted);
893     --numArenasFreeCommitted;
894 }
895 
896 inline ArenaHeader*
fetchNextFreeArena(JSRuntime * rt)897 Chunk::fetchNextFreeArena(JSRuntime* rt)
898 {
899     MOZ_ASSERT(info.numArenasFreeCommitted > 0);
900     MOZ_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree);
901 
902     ArenaHeader* aheader = info.freeArenasHead;
903     info.freeArenasHead = aheader->next;
904     --info.numArenasFreeCommitted;
905     --info.numArenasFree;
906     rt->gc.updateOnFreeArenaAlloc(info);
907 
908     return aheader;
909 }
910 
911 ArenaHeader*
allocateArena(JSRuntime * rt,Zone * zone,AllocKind thingKind,const AutoLockGC & lock)912 Chunk::allocateArena(JSRuntime* rt, Zone* zone, AllocKind thingKind, const AutoLockGC& lock)
913 {
914     ArenaHeader* aheader = info.numArenasFreeCommitted > 0
915                            ? fetchNextFreeArena(rt)
916                            : fetchNextDecommittedArena();
917     aheader->init(zone, thingKind);
918     updateChunkListAfterAlloc(rt, lock);
919     return aheader;
920 }
921 
922 inline void
updateOnArenaFree(const ChunkInfo & info)923 GCRuntime::updateOnArenaFree(const ChunkInfo& info)
924 {
925     ++numArenasFreeCommitted;
926 }
927 
928 void
addArenaToFreeList(JSRuntime * rt,ArenaHeader * aheader)929 Chunk::addArenaToFreeList(JSRuntime* rt, ArenaHeader* aheader)
930 {
931     MOZ_ASSERT(!aheader->allocated());
932     aheader->next = info.freeArenasHead;
933     info.freeArenasHead = aheader;
934     ++info.numArenasFreeCommitted;
935     ++info.numArenasFree;
936     rt->gc.updateOnArenaFree(info);
937 }
938 
939 void
addArenaToDecommittedList(JSRuntime * rt,const ArenaHeader * aheader)940 Chunk::addArenaToDecommittedList(JSRuntime* rt, const ArenaHeader* aheader)
941 {
942     ++info.numArenasFree;
943     decommittedArenas.set(Chunk::arenaIndex(aheader->arenaAddress()));
944 }
945 
946 void
recycleArena(ArenaHeader * aheader,SortedArenaList & dest,AllocKind thingKind,size_t thingsPerArena)947 Chunk::recycleArena(ArenaHeader* aheader, SortedArenaList& dest, AllocKind thingKind,
948                     size_t thingsPerArena)
949 {
950     aheader->getArena()->setAsFullyUnused(thingKind);
951     dest.insertAt(aheader, thingsPerArena);
952 }
953 
954 void
releaseArena(JSRuntime * rt,ArenaHeader * aheader,const AutoLockGC & lock)955 Chunk::releaseArena(JSRuntime* rt, ArenaHeader* aheader, const AutoLockGC& lock)
956 {
957     MOZ_ASSERT(aheader->allocated());
958     MOZ_ASSERT(!aheader->hasDelayedMarking);
959 
960     aheader->setAsNotAllocated();
961     addArenaToFreeList(rt, aheader);
962     updateChunkListAfterFree(rt, lock);
963 }
964 
965 bool
decommitOneFreeArena(JSRuntime * rt,AutoLockGC & lock)966 Chunk::decommitOneFreeArena(JSRuntime* rt, AutoLockGC& lock)
967 {
968     MOZ_ASSERT(info.numArenasFreeCommitted > 0);
969     ArenaHeader* aheader = fetchNextFreeArena(rt);
970     updateChunkListAfterAlloc(rt, lock);
971 
972     bool ok;
973     {
974         AutoUnlockGC unlock(lock);
975         ok = MarkPagesUnused(aheader->getArena(), ArenaSize);
976     }
977 
978     if (ok)
979         addArenaToDecommittedList(rt, aheader);
980     else
981         addArenaToFreeList(rt, aheader);
982     updateChunkListAfterFree(rt, lock);
983 
984     return ok;
985 }
986 
987 void
decommitAllArenasWithoutUnlocking(const AutoLockGC & lock)988 Chunk::decommitAllArenasWithoutUnlocking(const AutoLockGC& lock)
989 {
990     for (size_t i = 0; i < ArenasPerChunk; ++i) {
991         if (decommittedArenas.get(i) || arenas[i].aheader.allocated())
992             continue;
993 
994         if (MarkPagesUnused(&arenas[i], ArenaSize)) {
995             info.numArenasFreeCommitted--;
996             decommittedArenas.set(i);
997         }
998     }
999 }
1000 
1001 void
updateChunkListAfterAlloc(JSRuntime * rt,const AutoLockGC & lock)1002 Chunk::updateChunkListAfterAlloc(JSRuntime* rt, const AutoLockGC& lock)
1003 {
1004     if (MOZ_UNLIKELY(!hasAvailableArenas())) {
1005         rt->gc.availableChunks(lock).remove(this);
1006         rt->gc.fullChunks(lock).push(this);
1007     }
1008 }
1009 
1010 void
updateChunkListAfterFree(JSRuntime * rt,const AutoLockGC & lock)1011 Chunk::updateChunkListAfterFree(JSRuntime* rt, const AutoLockGC& lock)
1012 {
1013     if (info.numArenasFree == 1) {
1014         rt->gc.fullChunks(lock).remove(this);
1015         rt->gc.availableChunks(lock).push(this);
1016     } else if (!unused()) {
1017         MOZ_ASSERT(!rt->gc.fullChunks(lock).contains(this));
1018         MOZ_ASSERT(rt->gc.availableChunks(lock).contains(this));
1019         MOZ_ASSERT(!rt->gc.emptyChunks(lock).contains(this));
1020     } else {
1021         MOZ_ASSERT(unused());
1022         rt->gc.availableChunks(lock).remove(this);
1023         decommitAllArenas(rt);
1024         rt->gc.emptyChunks(lock).push(this);
1025     }
1026 }
1027 
1028 inline bool
wantBackgroundAllocation(const AutoLockGC & lock) const1029 GCRuntime::wantBackgroundAllocation(const AutoLockGC& lock) const
1030 {
1031     // To minimize memory waste, we do not want to run the background chunk
1032     // allocation if we already have some empty chunks or when the runtime has
1033     // a small heap size (and therefore likely has a small growth rate).
1034     return allocTask.enabled() &&
1035            emptyChunks(lock).count() < tunables.minEmptyChunkCount(lock) &&
1036            (fullChunks(lock).count() + availableChunks(lock).count()) >= 4;
1037 }
1038 
1039 void
startBackgroundAllocTaskIfIdle()1040 GCRuntime::startBackgroundAllocTaskIfIdle()
1041 {
1042     AutoLockHelperThreadState helperLock;
1043     if (allocTask.isRunning())
1044         return;
1045 
1046     // Join the previous invocation of the task. This will return immediately
1047     // if the thread has never been started.
1048     allocTask.joinWithLockHeld();
1049     allocTask.startWithLockHeld();
1050 }
1051 
1052 Chunk*
pickChunk(const AutoLockGC & lock,AutoMaybeStartBackgroundAllocation & maybeStartBackgroundAllocation)1053 GCRuntime::pickChunk(const AutoLockGC& lock,
1054                      AutoMaybeStartBackgroundAllocation& maybeStartBackgroundAllocation)
1055 {
1056     if (availableChunks(lock).count())
1057         return availableChunks(lock).head();
1058 
1059     Chunk* chunk = emptyChunks(lock).pop();
1060     if (!chunk) {
1061         chunk = Chunk::allocate(rt);
1062         if (!chunk)
1063             return nullptr;
1064         MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
1065     }
1066 
1067     MOZ_ASSERT(chunk->unused());
1068     MOZ_ASSERT(!fullChunks(lock).contains(chunk));
1069 
1070     if (wantBackgroundAllocation(lock))
1071         maybeStartBackgroundAllocation.tryToStartBackgroundAllocation(rt);
1072 
1073     chunkAllocationSinceLastGC = true;
1074 
1075     availableChunks(lock).push(chunk);
1076 
1077     return chunk;
1078 }
1079 
1080 ArenaHeader*
allocateArena(Chunk * chunk,Zone * zone,AllocKind thingKind,const AutoLockGC & lock)1081 GCRuntime::allocateArena(Chunk* chunk, Zone* zone, AllocKind thingKind, const AutoLockGC& lock)
1082 {
1083     MOZ_ASSERT(chunk->hasAvailableArenas());
1084 
1085     // Fail the allocation if we are over our heap size limits.
1086     if (!rt->isHeapMinorCollecting() &&
1087         !isHeapCompacting() &&
1088         usage.gcBytes() >= tunables.gcMaxBytes())
1089     {
1090         return nullptr;
1091     }
1092 
1093     ArenaHeader* aheader = chunk->allocateArena(rt, zone, thingKind, lock);
1094     zone->usage.addGCArena();
1095 
1096     // Trigger an incremental slice if needed.
1097     if (!rt->isHeapMinorCollecting() && !isHeapCompacting())
1098         maybeAllocTriggerZoneGC(zone, lock);
1099 
1100     return aheader;
1101 }
1102 
1103 void
releaseArena(ArenaHeader * aheader,const AutoLockGC & lock)1104 GCRuntime::releaseArena(ArenaHeader* aheader, const AutoLockGC& lock)
1105 {
1106     aheader->zone->usage.removeGCArena();
1107     if (isBackgroundSweeping())
1108         aheader->zone->threshold.updateForRemovedArena(tunables);
1109     return aheader->chunk()->releaseArena(rt, aheader, lock);
1110 }
1111 
GCRuntime(JSRuntime * rt)1112 GCRuntime::GCRuntime(JSRuntime* rt) :
1113     rt(rt),
1114     systemZone(nullptr),
1115     nursery(rt),
1116     storeBuffer(rt, nursery),
1117     stats(rt),
1118     marker(rt),
1119     usage(nullptr),
1120     mMemProfiler(rt),
1121     maxMallocBytes(0),
1122     nextCellUniqueId_(LargestTaggedNullCellPointer + 1), // Ensure disjoint from null tagged pointers.
1123     numArenasFreeCommitted(0),
1124     verifyPreData(nullptr),
1125     chunkAllocationSinceLastGC(false),
1126     nextFullGCTime(0),
1127     lastGCTime(PRMJ_Now()),
1128     mode(JSGC_MODE_INCREMENTAL),
1129     numActiveZoneIters(0),
1130     decommitThreshold(32 * 1024 * 1024),
1131     cleanUpEverything(false),
1132     grayBufferState(GCRuntime::GrayBufferState::Unused),
1133     grayBitsValid(false),
1134     majorGCTriggerReason(JS::gcreason::NO_REASON),
1135     minorGCTriggerReason(JS::gcreason::NO_REASON),
1136     fullGCForAtomsRequested_(false),
1137     minorGCNumber(0),
1138     majorGCNumber(0),
1139     jitReleaseNumber(0),
1140     number(0),
1141     startNumber(0),
1142     isFull(false),
1143 #ifdef DEBUG
1144     disableStrictProxyCheckingCount(0),
1145 #endif
1146     incrementalState(gc::NO_INCREMENTAL),
1147     lastMarkSlice(false),
1148     sweepOnBackgroundThread(false),
1149     foundBlackGrayEdges(false),
1150     freeLifoAlloc(JSRuntime::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
1151     zoneGroupIndex(0),
1152     zoneGroups(nullptr),
1153     currentZoneGroup(nullptr),
1154     sweepZone(nullptr),
1155     sweepKindIndex(0),
1156     abortSweepAfterCurrentGroup(false),
1157     arenasAllocatedDuringSweep(nullptr),
1158     startedCompacting(false),
1159     relocatedArenasToRelease(nullptr),
1160 #ifdef JS_GC_MARKING_VALIDATION
1161     markingValidator(nullptr),
1162 #endif
1163     interFrameGC(false),
1164     defaultTimeBudget_(SliceBudget::UnlimitedTimeBudget),
1165     incrementalAllowed(true),
1166     generationalDisabled(0),
1167     compactingEnabled(true),
1168     compactingDisabledCount(0),
1169     manipulatingDeadZones(false),
1170     objectsMarkedInDeadZones(0),
1171     poked(false),
1172 #ifdef JS_GC_ZEAL
1173     zealMode(0),
1174     zealFrequency(0),
1175     nextScheduled(0),
1176     deterministicOnly(false),
1177     incrementalLimit(0),
1178 #endif
1179     validate(true),
1180     fullCompartmentChecks(false),
1181     mallocBytesUntilGC(0),
1182     mallocGCTriggered(false),
1183     alwaysPreserveCode(false),
1184 #ifdef DEBUG
1185     inUnsafeRegion(0),
1186     noGCOrAllocationCheck(0),
1187 #endif
1188     lock(nullptr),
1189     allocTask(rt, emptyChunks_),
1190     helperState(rt)
1191 {
1192     setGCMode(JSGC_MODE_GLOBAL);
1193 }
1194 
1195 #ifdef JS_GC_ZEAL
1196 
1197 void
getZeal(uint8_t * zeal,uint32_t * frequency,uint32_t * scheduled)1198 GCRuntime::getZeal(uint8_t* zeal, uint32_t* frequency, uint32_t* scheduled)
1199 {
1200     *zeal = zealMode;
1201     *frequency = zealFrequency;
1202     *scheduled = nextScheduled;
1203 }
1204 
1205 const char* gc::ZealModeHelpText =
1206     "  Specifies how zealous the garbage collector should be. Values for level:\n"
1207     "    0: Normal amount of collection\n"
1208     "    1: Collect when roots are added or removed\n"
1209     "    2: Collect when every N allocations (default: 100)\n"
1210     "    3: Collect when the window paints (browser only)\n"
1211     "    4: Verify pre write barriers between instructions\n"
1212     "    5: Verify pre write barriers between paints\n"
1213     "    6: Verify stack rooting\n"
1214     "    7: Collect the nursery every N nursery allocations\n"
1215     "    8: Incremental GC in two slices: 1) mark roots 2) finish collection\n"
1216     "    9: Incremental GC in two slices: 1) mark all 2) new marking and finish\n"
1217     "   10: Incremental GC in multiple slices\n"
1218     "   11: unused\n"
1219     "   12: unused\n"
1220     "   13: Check internal hashtables on minor GC\n"
1221     "   14: Perform a shrinking collection every N allocations\n";
1222 
1223 void
setZeal(uint8_t zeal,uint32_t frequency)1224 GCRuntime::setZeal(uint8_t zeal, uint32_t frequency)
1225 {
1226     if (verifyPreData)
1227         VerifyBarriers(rt, PreBarrierVerifier);
1228 
1229     if (zealMode == ZealGenerationalGCValue) {
1230         evictNursery(JS::gcreason::DEBUG_GC);
1231         nursery.leaveZealMode();
1232     }
1233 
1234     if (zeal == ZealGenerationalGCValue)
1235         nursery.enterZealMode();
1236 
1237     bool schedule = zeal >= js::gc::ZealAllocValue;
1238     zealMode = zeal;
1239     zealFrequency = frequency;
1240     nextScheduled = schedule ? frequency : 0;
1241 }
1242 
1243 void
setNextScheduled(uint32_t count)1244 GCRuntime::setNextScheduled(uint32_t count)
1245 {
1246     nextScheduled = count;
1247 }
1248 
1249 bool
parseAndSetZeal(const char * str)1250 GCRuntime::parseAndSetZeal(const char* str)
1251 {
1252     int zeal = -1;
1253     int frequency = -1;
1254 
1255     if (isdigit(str[0])) {
1256         zeal = atoi(str);
1257 
1258         const char* p = strchr(str, ',');
1259         if (!p)
1260             frequency = JS_DEFAULT_ZEAL_FREQ;
1261         else
1262             frequency = atoi(p + 1);
1263     }
1264 
1265     if (zeal < 0 || zeal > ZealLimit || frequency <= 0) {
1266         fprintf(stderr, "Format: JS_GC_ZEAL=level[,N]\n");
1267         fputs(ZealModeHelpText, stderr);
1268         return false;
1269     }
1270 
1271     setZeal(zeal, frequency);
1272     return true;
1273 }
1274 
1275 #endif
1276 
1277 /*
1278  * Lifetime in number of major GCs for type sets attached to scripts containing
1279  * observed types.
1280  */
1281 static const uint64_t JIT_SCRIPT_RELEASE_TYPES_PERIOD = 20;
1282 
1283 bool
init(uint32_t maxbytes,uint32_t maxNurseryBytes)1284 GCRuntime::init(uint32_t maxbytes, uint32_t maxNurseryBytes)
1285 {
1286     InitMemorySubsystem();
1287 
1288     lock = PR_NewLock();
1289     if (!lock)
1290         return false;
1291 
1292     if (!rootsHash.init(256))
1293         return false;
1294 
1295     if (!helperState.init())
1296         return false;
1297 
1298     /*
1299      * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
1300      * for default backward API compatibility.
1301      */
1302     AutoLockGC lock(rt);
1303     tunables.setParameter(JSGC_MAX_BYTES, maxbytes, lock);
1304     setMaxMallocBytes(maxbytes);
1305 
1306     const char* size = getenv("JSGC_MARK_STACK_LIMIT");
1307     if (size)
1308         setMarkStackLimit(atoi(size), lock);
1309 
1310     jitReleaseNumber = majorGCNumber + JIT_SCRIPT_RELEASE_TYPES_PERIOD;
1311 
1312     if (!nursery.init(maxNurseryBytes))
1313         return false;
1314 
1315     if (!nursery.isEnabled()) {
1316         MOZ_ASSERT(nursery.nurserySize() == 0);
1317         ++rt->gc.generationalDisabled;
1318     } else {
1319         MOZ_ASSERT(nursery.nurserySize() > 0);
1320         if (!storeBuffer.enable())
1321             return false;
1322     }
1323 
1324 #ifdef JS_GC_ZEAL
1325     const char* zealSpec = getenv("JS_GC_ZEAL");
1326     if (zealSpec && zealSpec[0] && !parseAndSetZeal(zealSpec))
1327         return false;
1328 #endif
1329 
1330     if (!InitTrace(*this))
1331         return false;
1332 
1333     if (!marker.init(mode))
1334         return false;
1335 
1336     return true;
1337 }
1338 
1339 void
finish()1340 GCRuntime::finish()
1341 {
1342     /* Wait for the nursery sweeping to end. */
1343     if (rt->gc.nursery.isEnabled())
1344         rt->gc.nursery.waitBackgroundFreeEnd();
1345 
1346     /*
1347      * Wait until the background finalization and allocation stops and the
1348      * helper thread shuts down before we forcefully release any remaining GC
1349      * memory.
1350      */
1351     helperState.finish();
1352     allocTask.cancel(GCParallelTask::CancelAndWait);
1353 
1354 #ifdef JS_GC_ZEAL
1355     /* Free memory associated with GC verification. */
1356     finishVerifier();
1357 #endif
1358 
1359     /* Delete all remaining zones. */
1360     if (rt->gcInitialized) {
1361         AutoSetThreadIsSweeping threadIsSweeping;
1362         for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
1363             for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
1364                 js_delete(comp.get());
1365             js_delete(zone.get());
1366         }
1367     }
1368 
1369     zones.clear();
1370 
1371     FreeChunkPool(rt, fullChunks_);
1372     FreeChunkPool(rt, availableChunks_);
1373     FreeChunkPool(rt, emptyChunks_);
1374 
1375     if (lock) {
1376         PR_DestroyLock(lock);
1377         lock = nullptr;
1378     }
1379 
1380     FinishTrace();
1381 }
1382 
1383 template <typename T>
1384 static void
FinishPersistentRootedChain(mozilla::LinkedList<PersistentRooted<T>> & list)1385 FinishPersistentRootedChain(mozilla::LinkedList<PersistentRooted<T>>& list)
1386 {
1387     while (!list.isEmpty())
1388         list.getFirst()->reset();
1389 }
1390 
1391 void
FinishPersistentRootedChains(RootLists & roots)1392 js::gc::FinishPersistentRootedChains(RootLists& roots)
1393 {
1394     FinishPersistentRootedChain(roots.getPersistentRootedList<JSObject*>());
1395     FinishPersistentRootedChain(roots.getPersistentRootedList<JSScript*>());
1396     FinishPersistentRootedChain(roots.getPersistentRootedList<JSString*>());
1397     FinishPersistentRootedChain(roots.getPersistentRootedList<jsid>());
1398     FinishPersistentRootedChain(roots.getPersistentRootedList<Value>());
1399     FinishPersistentRootedChain(roots.heapRoots_[THING_ROOT_TRACEABLE]);
1400 }
1401 
1402 void
finishRoots()1403 GCRuntime::finishRoots()
1404 {
1405     if (rootsHash.initialized())
1406         rootsHash.clear();
1407 
1408     FinishPersistentRootedChains(rt->mainThread.roots);
1409 }
1410 
1411 void
setParameter(JSGCParamKey key,uint32_t value,AutoLockGC & lock)1412 GCRuntime::setParameter(JSGCParamKey key, uint32_t value, AutoLockGC& lock)
1413 {
1414     switch (key) {
1415       case JSGC_MAX_MALLOC_BYTES:
1416         setMaxMallocBytes(value);
1417         for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next())
1418             zone->setGCMaxMallocBytes(maxMallocBytesAllocated() * 0.9);
1419         break;
1420       case JSGC_SLICE_TIME_BUDGET:
1421         defaultTimeBudget_ = value ? value : SliceBudget::UnlimitedTimeBudget;
1422         break;
1423       case JSGC_MARK_STACK_LIMIT:
1424         setMarkStackLimit(value, lock);
1425         break;
1426       case JSGC_DECOMMIT_THRESHOLD:
1427         decommitThreshold = value * 1024 * 1024;
1428         break;
1429       case JSGC_MODE:
1430         mode = JSGCMode(value);
1431         MOZ_ASSERT(mode == JSGC_MODE_GLOBAL ||
1432                    mode == JSGC_MODE_COMPARTMENT ||
1433                    mode == JSGC_MODE_INCREMENTAL);
1434         break;
1435       case JSGC_COMPACTING_ENABLED:
1436         compactingEnabled = value != 0;
1437         break;
1438       default:
1439         tunables.setParameter(key, value, lock);
1440         for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
1441             zone->threshold.updateAfterGC(zone->usage.gcBytes(), GC_NORMAL, tunables,
1442                                           schedulingState, lock);
1443         }
1444     }
1445 }
1446 
1447 void
setParameter(JSGCParamKey key,uint32_t value,const AutoLockGC & lock)1448 GCSchedulingTunables::setParameter(JSGCParamKey key, uint32_t value, const AutoLockGC& lock)
1449 {
1450     switch(key) {
1451       case JSGC_MAX_BYTES:
1452         gcMaxBytes_ = value;
1453         break;
1454       case JSGC_HIGH_FREQUENCY_TIME_LIMIT:
1455         highFrequencyThresholdUsec_ = value * PRMJ_USEC_PER_MSEC;
1456         break;
1457       case JSGC_HIGH_FREQUENCY_LOW_LIMIT:
1458         highFrequencyLowLimitBytes_ = value * 1024 * 1024;
1459         if (highFrequencyLowLimitBytes_ >= highFrequencyHighLimitBytes_)
1460             highFrequencyHighLimitBytes_ = highFrequencyLowLimitBytes_ + 1;
1461         MOZ_ASSERT(highFrequencyHighLimitBytes_ > highFrequencyLowLimitBytes_);
1462         break;
1463       case JSGC_HIGH_FREQUENCY_HIGH_LIMIT:
1464         MOZ_ASSERT(value > 0);
1465         highFrequencyHighLimitBytes_ = value * 1024 * 1024;
1466         if (highFrequencyHighLimitBytes_ <= highFrequencyLowLimitBytes_)
1467             highFrequencyLowLimitBytes_ = highFrequencyHighLimitBytes_ - 1;
1468         MOZ_ASSERT(highFrequencyHighLimitBytes_ > highFrequencyLowLimitBytes_);
1469         break;
1470       case JSGC_HIGH_FREQUENCY_HEAP_GROWTH_MAX:
1471         highFrequencyHeapGrowthMax_ = value / 100.0;
1472         MOZ_ASSERT(highFrequencyHeapGrowthMax_ / 0.85 > 1.0);
1473         break;
1474       case JSGC_HIGH_FREQUENCY_HEAP_GROWTH_MIN:
1475         highFrequencyHeapGrowthMin_ = value / 100.0;
1476         MOZ_ASSERT(highFrequencyHeapGrowthMin_ / 0.85 > 1.0);
1477         break;
1478       case JSGC_LOW_FREQUENCY_HEAP_GROWTH:
1479         lowFrequencyHeapGrowth_ = value / 100.0;
1480         MOZ_ASSERT(lowFrequencyHeapGrowth_ / 0.9 > 1.0);
1481         break;
1482       case JSGC_DYNAMIC_HEAP_GROWTH:
1483         dynamicHeapGrowthEnabled_ = value != 0;
1484         break;
1485       case JSGC_DYNAMIC_MARK_SLICE:
1486         dynamicMarkSliceEnabled_ = value != 0;
1487         break;
1488       case JSGC_ALLOCATION_THRESHOLD:
1489         gcZoneAllocThresholdBase_ = value * 1024 * 1024;
1490         break;
1491       case JSGC_MIN_EMPTY_CHUNK_COUNT:
1492         minEmptyChunkCount_ = value;
1493         if (minEmptyChunkCount_ > maxEmptyChunkCount_)
1494             maxEmptyChunkCount_ = minEmptyChunkCount_;
1495         MOZ_ASSERT(maxEmptyChunkCount_ >= minEmptyChunkCount_);
1496         break;
1497       case JSGC_MAX_EMPTY_CHUNK_COUNT:
1498         maxEmptyChunkCount_ = value;
1499         if (minEmptyChunkCount_ > maxEmptyChunkCount_)
1500             minEmptyChunkCount_ = maxEmptyChunkCount_;
1501         MOZ_ASSERT(maxEmptyChunkCount_ >= minEmptyChunkCount_);
1502         break;
1503       default:
1504         MOZ_CRASH("Unknown GC parameter.");
1505     }
1506 }
1507 
1508 uint32_t
getParameter(JSGCParamKey key,const AutoLockGC & lock)1509 GCRuntime::getParameter(JSGCParamKey key, const AutoLockGC& lock)
1510 {
1511     switch (key) {
1512       case JSGC_MAX_BYTES:
1513         return uint32_t(tunables.gcMaxBytes());
1514       case JSGC_MAX_MALLOC_BYTES:
1515         return maxMallocBytes;
1516       case JSGC_BYTES:
1517         return uint32_t(usage.gcBytes());
1518       case JSGC_MODE:
1519         return uint32_t(mode);
1520       case JSGC_UNUSED_CHUNKS:
1521         return uint32_t(emptyChunks(lock).count());
1522       case JSGC_TOTAL_CHUNKS:
1523         return uint32_t(fullChunks(lock).count() +
1524                         availableChunks(lock).count() +
1525                         emptyChunks(lock).count());
1526       case JSGC_SLICE_TIME_BUDGET:
1527         if (defaultTimeBudget_ == SliceBudget::UnlimitedTimeBudget) {
1528             return 0;
1529         } else {
1530             MOZ_RELEASE_ASSERT(defaultTimeBudget_ >= 0);
1531             MOZ_RELEASE_ASSERT(defaultTimeBudget_ < UINT32_MAX);
1532             return uint32_t(defaultTimeBudget_);
1533         }
1534       case JSGC_MARK_STACK_LIMIT:
1535         return marker.maxCapacity();
1536       case JSGC_HIGH_FREQUENCY_TIME_LIMIT:
1537         return tunables.highFrequencyThresholdUsec();
1538       case JSGC_HIGH_FREQUENCY_LOW_LIMIT:
1539         return tunables.highFrequencyLowLimitBytes() / 1024 / 1024;
1540       case JSGC_HIGH_FREQUENCY_HIGH_LIMIT:
1541         return tunables.highFrequencyHighLimitBytes() / 1024 / 1024;
1542       case JSGC_HIGH_FREQUENCY_HEAP_GROWTH_MAX:
1543         return uint32_t(tunables.highFrequencyHeapGrowthMax() * 100);
1544       case JSGC_HIGH_FREQUENCY_HEAP_GROWTH_MIN:
1545         return uint32_t(tunables.highFrequencyHeapGrowthMin() * 100);
1546       case JSGC_LOW_FREQUENCY_HEAP_GROWTH:
1547         return uint32_t(tunables.lowFrequencyHeapGrowth() * 100);
1548       case JSGC_DYNAMIC_HEAP_GROWTH:
1549         return tunables.isDynamicHeapGrowthEnabled();
1550       case JSGC_DYNAMIC_MARK_SLICE:
1551         return tunables.isDynamicMarkSliceEnabled();
1552       case JSGC_ALLOCATION_THRESHOLD:
1553         return tunables.gcZoneAllocThresholdBase() / 1024 / 1024;
1554       case JSGC_MIN_EMPTY_CHUNK_COUNT:
1555         return tunables.minEmptyChunkCount(lock);
1556       case JSGC_MAX_EMPTY_CHUNK_COUNT:
1557         return tunables.maxEmptyChunkCount();
1558       case JSGC_COMPACTING_ENABLED:
1559         return compactingEnabled;
1560       default:
1561         MOZ_ASSERT(key == JSGC_NUMBER);
1562         return uint32_t(number);
1563     }
1564 }
1565 
1566 void
setMarkStackLimit(size_t limit,AutoLockGC & lock)1567 GCRuntime::setMarkStackLimit(size_t limit, AutoLockGC& lock)
1568 {
1569     MOZ_ASSERT(!rt->isHeapBusy());
1570     AutoUnlockGC unlock(lock);
1571     AutoStopVerifyingBarriers pauseVerification(rt, false);
1572     marker.setMaxCapacity(limit);
1573 }
1574 
1575 bool
addBlackRootsTracer(JSTraceDataOp traceOp,void * data)1576 GCRuntime::addBlackRootsTracer(JSTraceDataOp traceOp, void* data)
1577 {
1578     AssertHeapIsIdle(rt);
1579     return !!blackRootTracers.append(Callback<JSTraceDataOp>(traceOp, data));
1580 }
1581 
1582 void
removeBlackRootsTracer(JSTraceDataOp traceOp,void * data)1583 GCRuntime::removeBlackRootsTracer(JSTraceDataOp traceOp, void* data)
1584 {
1585     // Can be called from finalizers
1586     for (size_t i = 0; i < blackRootTracers.length(); i++) {
1587         Callback<JSTraceDataOp>* e = &blackRootTracers[i];
1588         if (e->op == traceOp && e->data == data) {
1589             blackRootTracers.erase(e);
1590         }
1591     }
1592 }
1593 
1594 void
setGrayRootsTracer(JSTraceDataOp traceOp,void * data)1595 GCRuntime::setGrayRootsTracer(JSTraceDataOp traceOp, void* data)
1596 {
1597     AssertHeapIsIdle(rt);
1598     grayRootTracer.op = traceOp;
1599     grayRootTracer.data = data;
1600 }
1601 
1602 void
setGCCallback(JSGCCallback callback,void * data)1603 GCRuntime::setGCCallback(JSGCCallback callback, void* data)
1604 {
1605     gcCallback.op = callback;
1606     gcCallback.data = data;
1607 }
1608 
1609 void
callGCCallback(JSGCStatus status) const1610 GCRuntime::callGCCallback(JSGCStatus status) const
1611 {
1612     if (gcCallback.op)
1613         gcCallback.op(rt, status, gcCallback.data);
1614 }
1615 
1616 namespace {
1617 
1618 class AutoNotifyGCActivity {
1619   public:
AutoNotifyGCActivity(GCRuntime & gc)1620     explicit AutoNotifyGCActivity(GCRuntime& gc) : gc_(gc) {
1621         if (!gc_.isIncrementalGCInProgress()) {
1622             gcstats::AutoPhase ap(gc_.stats, gcstats::PHASE_GC_BEGIN);
1623             gc_.callGCCallback(JSGC_BEGIN);
1624         }
1625     }
~AutoNotifyGCActivity()1626     ~AutoNotifyGCActivity() {
1627         if (!gc_.isIncrementalGCInProgress()) {
1628             gcstats::AutoPhase ap(gc_.stats, gcstats::PHASE_GC_END);
1629             gc_.callGCCallback(JSGC_END);
1630         }
1631     }
1632 
1633   private:
1634     GCRuntime& gc_;
1635 };
1636 
1637 } // (anon)
1638 
1639 bool
addFinalizeCallback(JSFinalizeCallback callback,void * data)1640 GCRuntime::addFinalizeCallback(JSFinalizeCallback callback, void* data)
1641 {
1642     return finalizeCallbacks.append(Callback<JSFinalizeCallback>(callback, data));
1643 }
1644 
1645 void
removeFinalizeCallback(JSFinalizeCallback callback)1646 GCRuntime::removeFinalizeCallback(JSFinalizeCallback callback)
1647 {
1648     for (Callback<JSFinalizeCallback>* p = finalizeCallbacks.begin();
1649          p < finalizeCallbacks.end(); p++)
1650     {
1651         if (p->op == callback) {
1652             finalizeCallbacks.erase(p);
1653             break;
1654         }
1655     }
1656 }
1657 
1658 void
callFinalizeCallbacks(FreeOp * fop,JSFinalizeStatus status) const1659 GCRuntime::callFinalizeCallbacks(FreeOp* fop, JSFinalizeStatus status) const
1660 {
1661     for (auto& p : finalizeCallbacks) {
1662         p.op(fop, status, !isFull, p.data);
1663     }
1664 }
1665 
1666 bool
addWeakPointerZoneGroupCallback(JSWeakPointerZoneGroupCallback callback,void * data)1667 GCRuntime::addWeakPointerZoneGroupCallback(JSWeakPointerZoneGroupCallback callback, void* data)
1668 {
1669     return updateWeakPointerZoneGroupCallbacks.append(
1670             Callback<JSWeakPointerZoneGroupCallback>(callback, data));
1671 }
1672 
1673 void
removeWeakPointerZoneGroupCallback(JSWeakPointerZoneGroupCallback callback)1674 GCRuntime::removeWeakPointerZoneGroupCallback(JSWeakPointerZoneGroupCallback callback)
1675 {
1676     for (auto& p : updateWeakPointerZoneGroupCallbacks) {
1677         if (p.op == callback) {
1678             updateWeakPointerZoneGroupCallbacks.erase(&p);
1679             break;
1680         }
1681     }
1682 }
1683 
1684 void
callWeakPointerZoneGroupCallbacks() const1685 GCRuntime::callWeakPointerZoneGroupCallbacks() const
1686 {
1687     for (auto const& p : updateWeakPointerZoneGroupCallbacks) {
1688         p.op(rt, p.data);
1689     }
1690 }
1691 
1692 bool
addWeakPointerCompartmentCallback(JSWeakPointerCompartmentCallback callback,void * data)1693 GCRuntime::addWeakPointerCompartmentCallback(JSWeakPointerCompartmentCallback callback, void* data)
1694 {
1695     return updateWeakPointerCompartmentCallbacks.append(
1696             Callback<JSWeakPointerCompartmentCallback>(callback, data));
1697 }
1698 
1699 void
removeWeakPointerCompartmentCallback(JSWeakPointerCompartmentCallback callback)1700 GCRuntime::removeWeakPointerCompartmentCallback(JSWeakPointerCompartmentCallback callback)
1701 {
1702     for (auto& p : updateWeakPointerCompartmentCallbacks) {
1703         if (p.op == callback) {
1704             updateWeakPointerCompartmentCallbacks.erase(&p);
1705             break;
1706         }
1707     }
1708 }
1709 
1710 void
callWeakPointerCompartmentCallbacks(JSCompartment * comp) const1711 GCRuntime::callWeakPointerCompartmentCallbacks(JSCompartment* comp) const
1712 {
1713     for (auto const& p : updateWeakPointerCompartmentCallbacks) {
1714         p.op(rt, comp, p.data);
1715     }
1716 }
1717 
1718 JS::GCSliceCallback
setSliceCallback(JS::GCSliceCallback callback)1719 GCRuntime::setSliceCallback(JS::GCSliceCallback callback) {
1720     return stats.setSliceCallback(callback);
1721 }
1722 
1723 bool
addRoot(Value * vp,const char * name)1724 GCRuntime::addRoot(Value* vp, const char* name)
1725 {
1726     /*
1727      * Sometimes Firefox will hold weak references to objects and then convert
1728      * them to strong references by calling AddRoot (e.g., via PreserveWrapper,
1729      * or ModifyBusyCount in workers). We need a read barrier to cover these
1730      * cases.
1731      */
1732     if (isIncrementalGCInProgress())
1733         HeapValue::writeBarrierPre(*vp);
1734 
1735     return rootsHash.put(vp, name);
1736 }
1737 
1738 void
removeRoot(Value * vp)1739 GCRuntime::removeRoot(Value* vp)
1740 {
1741     rootsHash.remove(vp);
1742     poke();
1743 }
1744 
JS_FRIEND_API(bool)1745 extern JS_FRIEND_API(bool)
1746 js::AddRawValueRoot(JSContext* cx, Value* vp, const char* name)
1747 {
1748     MOZ_ASSERT(vp);
1749     MOZ_ASSERT(name);
1750     bool ok = cx->runtime()->gc.addRoot(vp, name);
1751     if (!ok)
1752         JS_ReportOutOfMemory(cx);
1753     return ok;
1754 }
1755 
JS_FRIEND_API(void)1756 extern JS_FRIEND_API(void)
1757 js::RemoveRawValueRoot(JSContext* cx, Value* vp)
1758 {
1759     cx->runtime()->gc.removeRoot(vp);
1760 }
1761 
1762 void
setMaxMallocBytes(size_t value)1763 GCRuntime::setMaxMallocBytes(size_t value)
1764 {
1765     /*
1766      * For compatibility treat any value that exceeds PTRDIFF_T_MAX to
1767      * mean that value.
1768      */
1769     maxMallocBytes = (ptrdiff_t(value) >= 0) ? value : size_t(-1) >> 1;
1770     resetMallocBytes();
1771     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next())
1772         zone->setGCMaxMallocBytes(value);
1773 }
1774 
1775 void
resetMallocBytes()1776 GCRuntime::resetMallocBytes()
1777 {
1778     mallocBytesUntilGC = ptrdiff_t(maxMallocBytes);
1779     mallocGCTriggered = false;
1780 }
1781 
1782 void
updateMallocCounter(JS::Zone * zone,size_t nbytes)1783 GCRuntime::updateMallocCounter(JS::Zone* zone, size_t nbytes)
1784 {
1785     mallocBytesUntilGC -= ptrdiff_t(nbytes);
1786     if (MOZ_UNLIKELY(isTooMuchMalloc()))
1787         onTooMuchMalloc();
1788     else if (zone)
1789         zone->updateMallocCounter(nbytes);
1790 }
1791 
1792 void
onTooMuchMalloc()1793 GCRuntime::onTooMuchMalloc()
1794 {
1795     if (!mallocGCTriggered)
1796         mallocGCTriggered = triggerGC(JS::gcreason::TOO_MUCH_MALLOC);
1797 }
1798 
1799 double
allocTrigger(bool highFrequencyGC) const1800 ZoneHeapThreshold::allocTrigger(bool highFrequencyGC) const
1801 {
1802     return (highFrequencyGC ? 0.85 : 0.9) * gcTriggerBytes();
1803 }
1804 
1805 /* static */ double
computeZoneHeapGrowthFactorForHeapSize(size_t lastBytes,const GCSchedulingTunables & tunables,const GCSchedulingState & state)1806 ZoneHeapThreshold::computeZoneHeapGrowthFactorForHeapSize(size_t lastBytes,
1807                                                           const GCSchedulingTunables& tunables,
1808                                                           const GCSchedulingState& state)
1809 {
1810     if (!tunables.isDynamicHeapGrowthEnabled())
1811         return 3.0;
1812 
1813     // For small zones, our collection heuristics do not matter much: favor
1814     // something simple in this case.
1815     if (lastBytes < 1 * 1024 * 1024)
1816         return tunables.lowFrequencyHeapGrowth();
1817 
1818     // If GC's are not triggering in rapid succession, use a lower threshold so
1819     // that we will collect garbage sooner.
1820     if (!state.inHighFrequencyGCMode())
1821         return tunables.lowFrequencyHeapGrowth();
1822 
1823     // The heap growth factor depends on the heap size after a GC and the GC
1824     // frequency. For low frequency GCs (more than 1sec between GCs) we let
1825     // the heap grow to 150%. For high frequency GCs we let the heap grow
1826     // depending on the heap size:
1827     //   lastBytes < highFrequencyLowLimit: 300%
1828     //   lastBytes > highFrequencyHighLimit: 150%
1829     //   otherwise: linear interpolation between 300% and 150% based on lastBytes
1830 
1831     // Use shorter names to make the operation comprehensible.
1832     double minRatio = tunables.highFrequencyHeapGrowthMin();
1833     double maxRatio = tunables.highFrequencyHeapGrowthMax();
1834     double lowLimit = tunables.highFrequencyLowLimitBytes();
1835     double highLimit = tunables.highFrequencyHighLimitBytes();
1836 
1837     if (lastBytes <= lowLimit)
1838         return maxRatio;
1839 
1840     if (lastBytes >= highLimit)
1841         return minRatio;
1842 
1843     double factor = maxRatio - ((maxRatio - minRatio) * ((lastBytes - lowLimit) /
1844                                                          (highLimit - lowLimit)));
1845     MOZ_ASSERT(factor >= minRatio);
1846     MOZ_ASSERT(factor <= maxRatio);
1847     return factor;
1848 }
1849 
1850 /* static */ size_t
computeZoneTriggerBytes(double growthFactor,size_t lastBytes,JSGCInvocationKind gckind,const GCSchedulingTunables & tunables,const AutoLockGC & lock)1851 ZoneHeapThreshold::computeZoneTriggerBytes(double growthFactor, size_t lastBytes,
1852                                            JSGCInvocationKind gckind,
1853                                            const GCSchedulingTunables& tunables,
1854                                            const AutoLockGC& lock)
1855 {
1856     size_t base = gckind == GC_SHRINK
1857                 ? Max(lastBytes, tunables.minEmptyChunkCount(lock) * ChunkSize)
1858                 : Max(lastBytes, tunables.gcZoneAllocThresholdBase());
1859     double trigger = double(base) * growthFactor;
1860     return size_t(Min(double(tunables.gcMaxBytes()), trigger));
1861 }
1862 
1863 void
updateAfterGC(size_t lastBytes,JSGCInvocationKind gckind,const GCSchedulingTunables & tunables,const GCSchedulingState & state,const AutoLockGC & lock)1864 ZoneHeapThreshold::updateAfterGC(size_t lastBytes, JSGCInvocationKind gckind,
1865                                  const GCSchedulingTunables& tunables,
1866                                  const GCSchedulingState& state, const AutoLockGC& lock)
1867 {
1868     gcHeapGrowthFactor_ = computeZoneHeapGrowthFactorForHeapSize(lastBytes, tunables, state);
1869     gcTriggerBytes_ = computeZoneTriggerBytes(gcHeapGrowthFactor_, lastBytes, gckind, tunables,
1870                                               lock);
1871 }
1872 
1873 void
updateForRemovedArena(const GCSchedulingTunables & tunables)1874 ZoneHeapThreshold::updateForRemovedArena(const GCSchedulingTunables& tunables)
1875 {
1876     size_t amount = ArenaSize * gcHeapGrowthFactor_;
1877 
1878     MOZ_ASSERT(amount > 0);
1879     MOZ_ASSERT(gcTriggerBytes_ >= amount);
1880 
1881     if (gcTriggerBytes_ - amount < tunables.gcZoneAllocThresholdBase() * gcHeapGrowthFactor_)
1882         return;
1883 
1884     gcTriggerBytes_ -= amount;
1885 }
1886 
1887 void
delayMarkingArena(ArenaHeader * aheader)1888 GCMarker::delayMarkingArena(ArenaHeader* aheader)
1889 {
1890     if (aheader->hasDelayedMarking) {
1891         /* Arena already scheduled to be marked later */
1892         return;
1893     }
1894     aheader->setNextDelayedMarking(unmarkedArenaStackTop);
1895     unmarkedArenaStackTop = aheader;
1896     markLaterArenas++;
1897 }
1898 
1899 void
delayMarkingChildren(const void * thing)1900 GCMarker::delayMarkingChildren(const void* thing)
1901 {
1902     const TenuredCell* cell = TenuredCell::fromPointer(thing);
1903     cell->arenaHeader()->markOverflow = 1;
1904     delayMarkingArena(cell->arenaHeader());
1905 }
1906 
1907 inline void
prepareForIncrementalGC(JSRuntime * rt)1908 ArenaLists::prepareForIncrementalGC(JSRuntime* rt)
1909 {
1910     for (auto i : AllAllocKinds()) {
1911         FreeList* freeList = &freeLists[i];
1912         if (!freeList->isEmpty()) {
1913             ArenaHeader* aheader = freeList->arenaHeader();
1914             aheader->allocatedDuringIncremental = true;
1915             rt->gc.marker.delayMarkingArena(aheader);
1916         }
1917     }
1918 }
1919 
1920 /* Compacting GC */
1921 
1922 bool
shouldCompact()1923 GCRuntime::shouldCompact()
1924 {
1925     // Compact on shrinking GC if enabled, but skip compacting in incremental
1926     // GCs if we are currently animating.
1927     return invocationKind == GC_SHRINK && isCompactingGCEnabled() &&
1928         (!isIncremental || rt->lastAnimationTime + PRMJ_USEC_PER_SEC < PRMJ_Now());
1929 }
1930 
1931 void
disableCompactingGC()1932 GCRuntime::disableCompactingGC()
1933 {
1934     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
1935     ++compactingDisabledCount;
1936 }
1937 
1938 void
enableCompactingGC()1939 GCRuntime::enableCompactingGC()
1940 {
1941     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
1942     MOZ_ASSERT(compactingDisabledCount > 0);
1943     --compactingDisabledCount;
1944 }
1945 
1946 bool
isCompactingGCEnabled() const1947 GCRuntime::isCompactingGCEnabled() const
1948 {
1949     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
1950     return compactingEnabled && compactingDisabledCount == 0;
1951 }
1952 
AutoDisableCompactingGC(JSRuntime * rt)1953 AutoDisableCompactingGC::AutoDisableCompactingGC(JSRuntime* rt)
1954   : gc(rt->gc)
1955 {
1956     gc.disableCompactingGC();
1957 }
1958 
~AutoDisableCompactingGC()1959 AutoDisableCompactingGC::~AutoDisableCompactingGC()
1960 {
1961     gc.enableCompactingGC();
1962 }
1963 
1964 static bool
CanRelocateZone(Zone * zone)1965 CanRelocateZone(Zone* zone)
1966 {
1967     return !zone->isAtomsZone() && !zone->isSelfHostingZone();
1968 }
1969 
1970 static bool
CanRelocateAllocKind(AllocKind kind)1971 CanRelocateAllocKind(AllocKind kind)
1972 {
1973     return IsObjectAllocKind(kind);
1974 }
1975 
countFreeCells()1976 size_t ArenaHeader::countFreeCells()
1977 {
1978     size_t count = 0;
1979     size_t thingSize = getThingSize();
1980     FreeSpan firstSpan(getFirstFreeSpan());
1981     for (const FreeSpan* span = &firstSpan; !span->isEmpty(); span = span->nextSpan())
1982         count += span->length(thingSize);
1983     return count;
1984 }
1985 
countUsedCells()1986 size_t ArenaHeader::countUsedCells()
1987 {
1988     return Arena::thingsPerArena(getThingSize()) - countFreeCells();
1989 }
1990 
1991 ArenaHeader*
removeRemainingArenas(ArenaHeader ** arenap)1992 ArenaList::removeRemainingArenas(ArenaHeader** arenap)
1993 {
1994     // This is only ever called to remove arenas that are after the cursor, so
1995     // we don't need to update it.
1996 #ifdef DEBUG
1997     for (ArenaHeader* arena = *arenap; arena; arena = arena->next)
1998         MOZ_ASSERT(cursorp_ != &arena->next);
1999 #endif
2000     ArenaHeader* remainingArenas = *arenap;
2001     *arenap = nullptr;
2002     check();
2003     return remainingArenas;
2004 }
2005 
2006 static bool
ShouldRelocateAllArenas(JS::gcreason::Reason reason)2007 ShouldRelocateAllArenas(JS::gcreason::Reason reason)
2008 {
2009     return reason == JS::gcreason::DEBUG_GC;
2010 }
2011 
2012 /*
2013  * Choose which arenas to relocate all cells from. Return an arena cursor that
2014  * can be passed to removeRemainingArenas().
2015  */
2016 ArenaHeader**
pickArenasToRelocate(size_t & arenaTotalOut,size_t & relocTotalOut)2017 ArenaList::pickArenasToRelocate(size_t& arenaTotalOut, size_t& relocTotalOut)
2018 {
2019     // Relocate the greatest number of arenas such that the number of used cells
2020     // in relocated arenas is less than or equal to the number of free cells in
2021     // unrelocated arenas. In other words we only relocate cells we can move
2022     // into existing arenas, and we choose the least full areans to relocate.
2023     //
2024     // This is made easier by the fact that the arena list has been sorted in
2025     // descending order of number of used cells, so we will always relocate a
2026     // tail of the arena list. All we need to do is find the point at which to
2027     // start relocating.
2028 
2029     check();
2030 
2031     if (isCursorAtEnd())
2032         return nullptr;
2033 
2034     ArenaHeader** arenap = cursorp_;  // Next arena to consider for relocation.
2035     size_t previousFreeCells = 0;     // Count of free cells before arenap.
2036     size_t followingUsedCells = 0;    // Count of used cells after arenap.
2037     size_t fullArenaCount = 0;        // Number of full arenas (not relocated).
2038     size_t nonFullArenaCount = 0;     // Number of non-full arenas (considered for relocation).
2039     size_t arenaIndex = 0;            // Index of the next arena to consider.
2040 
2041     for (ArenaHeader* arena = head_; arena != *cursorp_; arena = arena->next)
2042         fullArenaCount++;
2043 
2044     for (ArenaHeader* arena = *cursorp_; arena; arena = arena->next) {
2045         followingUsedCells += arena->countUsedCells();
2046         nonFullArenaCount++;
2047     }
2048 
2049     mozilla::DebugOnly<size_t> lastFreeCells(0);
2050     size_t cellsPerArena = Arena::thingsPerArena((*arenap)->getThingSize());
2051 
2052     while (*arenap) {
2053         ArenaHeader* arena = *arenap;
2054         if (followingUsedCells <= previousFreeCells)
2055             break;
2056 
2057         size_t freeCells = arena->countFreeCells();
2058         size_t usedCells = cellsPerArena - freeCells;
2059         followingUsedCells -= usedCells;
2060 #ifdef DEBUG
2061         MOZ_ASSERT(freeCells >= lastFreeCells);
2062         lastFreeCells = freeCells;
2063 #endif
2064         previousFreeCells += freeCells;
2065         arenap = &arena->next;
2066         arenaIndex++;
2067     }
2068 
2069     size_t relocCount = nonFullArenaCount - arenaIndex;
2070     MOZ_ASSERT(relocCount < nonFullArenaCount);
2071     MOZ_ASSERT((relocCount == 0) == (!*arenap));
2072     arenaTotalOut += fullArenaCount + nonFullArenaCount;
2073     relocTotalOut += relocCount;
2074 
2075     return arenap;
2076 }
2077 
2078 #ifdef DEBUG
2079 inline bool
PtrIsInRange(const void * ptr,const void * start,size_t length)2080 PtrIsInRange(const void* ptr, const void* start, size_t length)
2081 {
2082     return uintptr_t(ptr) - uintptr_t(start) < length;
2083 }
2084 #endif
2085 
2086 static TenuredCell*
AllocRelocatedCell(Zone * zone,AllocKind thingKind,size_t thingSize)2087 AllocRelocatedCell(Zone* zone, AllocKind thingKind, size_t thingSize)
2088 {
2089     AutoEnterOOMUnsafeRegion oomUnsafe;
2090     void* dstAlloc = zone->arenas.allocateFromFreeList(thingKind, thingSize);
2091     if (!dstAlloc)
2092         dstAlloc = GCRuntime::refillFreeListInGC(zone, thingKind);
2093     if (!dstAlloc) {
2094         // This can only happen in zeal mode or debug builds as we don't
2095         // otherwise relocate more cells than we have existing free space
2096         // for.
2097         oomUnsafe.crash("Could not allocate new arena while compacting");
2098     }
2099     return TenuredCell::fromPointer(dstAlloc);
2100 }
2101 
2102 static void
RelocateCell(Zone * zone,TenuredCell * src,AllocKind thingKind,size_t thingSize)2103 RelocateCell(Zone* zone, TenuredCell* src, AllocKind thingKind, size_t thingSize)
2104 {
2105     JS::AutoSuppressGCAnalysis nogc(zone->runtimeFromMainThread());
2106 
2107     // Allocate a new cell.
2108     MOZ_ASSERT(zone == src->zone());
2109     TenuredCell* dst = AllocRelocatedCell(zone, thingKind, thingSize);
2110 
2111     // Copy source cell contents to destination.
2112     memcpy(dst, src, thingSize);
2113 
2114     // Move any uid attached to the object.
2115     src->zone()->transferUniqueId(dst, src);
2116 
2117     if (IsObjectAllocKind(thingKind)) {
2118         JSObject* srcObj = static_cast<JSObject*>(static_cast<Cell*>(src));
2119         JSObject* dstObj = static_cast<JSObject*>(static_cast<Cell*>(dst));
2120 
2121         if (srcObj->isNative()) {
2122             NativeObject* srcNative = &srcObj->as<NativeObject>();
2123             NativeObject* dstNative = &dstObj->as<NativeObject>();
2124 
2125             // Fixup the pointer to inline object elements if necessary.
2126             if (srcNative->hasFixedElements())
2127                 dstNative->setFixedElements();
2128 
2129             // For copy-on-write objects that own their elements, fix up the
2130             // owner pointer to point to the relocated object.
2131             if (srcNative->denseElementsAreCopyOnWrite()) {
2132                 HeapPtrNativeObject& owner = dstNative->getElementsHeader()->ownerObject();
2133                 if (owner == srcNative)
2134                     owner = dstNative;
2135             }
2136         }
2137 
2138         // Call object moved hook if present.
2139         if (JSObjectMovedOp op = srcObj->getClass()->ext.objectMovedOp)
2140             op(dstObj, srcObj);
2141 
2142         MOZ_ASSERT_IF(dstObj->isNative(),
2143                       !PtrIsInRange((const Value*)dstObj->as<NativeObject>().getDenseElements(),
2144                                     src, thingSize));
2145     }
2146 
2147     // Copy the mark bits.
2148     dst->copyMarkBitsFrom(src);
2149 
2150     // Mark source cell as forwarded and leave a pointer to the destination.
2151     RelocationOverlay* overlay = RelocationOverlay::fromCell(src);
2152     overlay->forwardTo(dst);
2153 }
2154 
2155 static void
RelocateArena(ArenaHeader * aheader,SliceBudget & sliceBudget)2156 RelocateArena(ArenaHeader* aheader, SliceBudget& sliceBudget)
2157 {
2158     MOZ_ASSERT(aheader->allocated());
2159     MOZ_ASSERT(!aheader->hasDelayedMarking);
2160     MOZ_ASSERT(!aheader->markOverflow);
2161     MOZ_ASSERT(!aheader->allocatedDuringIncremental);
2162 
2163     Zone* zone = aheader->zone;
2164 
2165     AllocKind thingKind = aheader->getAllocKind();
2166     size_t thingSize = aheader->getThingSize();
2167 
2168     for (ArenaCellIterUnderFinalize i(aheader); !i.done(); i.next()) {
2169         RelocateCell(zone, i.getCell(), thingKind, thingSize);
2170         sliceBudget.step();
2171     }
2172 
2173 #ifdef DEBUG
2174     for (ArenaCellIterUnderFinalize i(aheader); !i.done(); i.next()) {
2175         TenuredCell* src = i.getCell();
2176         MOZ_ASSERT(RelocationOverlay::isCellForwarded(src));
2177         TenuredCell* dest = Forwarded(src);
2178         MOZ_ASSERT(src->isMarked(BLACK) == dest->isMarked(BLACK));
2179         MOZ_ASSERT(src->isMarked(GRAY) == dest->isMarked(GRAY));
2180     }
2181 #endif
2182 }
2183 
2184 static inline bool
ShouldProtectRelocatedArenas(JS::gcreason::Reason reason)2185 ShouldProtectRelocatedArenas(JS::gcreason::Reason reason)
2186 {
2187     // For zeal mode collections we don't release the relocated arenas
2188     // immediately. Instead we protect them and keep them around until the next
2189     // collection so we can catch any stray accesses to them.
2190 #ifdef DEBUG
2191     return reason == JS::gcreason::DEBUG_GC;
2192 #else
2193     return false;
2194 #endif
2195 }
2196 
2197 /*
2198  * Relocate all arenas identified by pickArenasToRelocate: for each arena,
2199  * relocate each cell within it, then add it to a list of relocated arenas.
2200  */
2201 ArenaHeader*
relocateArenas(ArenaHeader * toRelocate,ArenaHeader * relocated,SliceBudget & sliceBudget,gcstats::Statistics & stats)2202 ArenaList::relocateArenas(ArenaHeader* toRelocate, ArenaHeader* relocated, SliceBudget& sliceBudget,
2203                           gcstats::Statistics& stats)
2204 {
2205     check();
2206 
2207     while (ArenaHeader* arena = toRelocate) {
2208         toRelocate = arena->next;
2209         RelocateArena(arena, sliceBudget);
2210         // Prepend to list of relocated arenas
2211         arena->next = relocated;
2212         relocated = arena;
2213         stats.count(gcstats::STAT_ARENA_RELOCATED);
2214     }
2215 
2216     check();
2217 
2218     return relocated;
2219 }
2220 
2221 // Skip compacting zones unless we can free a certain proportion of their GC
2222 // heap memory.
2223 static const double MIN_ZONE_RECLAIM_PERCENT = 2.0;
2224 
2225 static bool
IsOOMReason(JS::gcreason::Reason reason)2226 IsOOMReason(JS::gcreason::Reason reason)
2227 {
2228     return reason == JS::gcreason::LAST_DITCH ||
2229            reason == JS::gcreason::MEM_PRESSURE;
2230 }
2231 
2232 static bool
ShouldRelocateZone(size_t arenaCount,size_t relocCount,JS::gcreason::Reason reason)2233 ShouldRelocateZone(size_t arenaCount, size_t relocCount, JS::gcreason::Reason reason)
2234 {
2235     if (relocCount == 0)
2236         return false;
2237 
2238     if (IsOOMReason(reason))
2239         return true;
2240 
2241     return (relocCount * 100.0) / arenaCount >= MIN_ZONE_RECLAIM_PERCENT;
2242 }
2243 
2244 bool
relocateArenas(Zone * zone,ArenaHeader * & relocatedListOut,JS::gcreason::Reason reason,SliceBudget & sliceBudget,gcstats::Statistics & stats)2245 ArenaLists::relocateArenas(Zone* zone, ArenaHeader*& relocatedListOut, JS::gcreason::Reason reason,
2246                            SliceBudget& sliceBudget, gcstats::Statistics& stats)
2247 {
2248     // This is only called from the main thread while we are doing a GC, so
2249     // there is no need to lock.
2250     MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
2251     MOZ_ASSERT(runtime_->gc.isHeapCompacting());
2252     MOZ_ASSERT(!runtime_->gc.isBackgroundSweeping());
2253 
2254     // Flush all the freeLists back into the arena headers
2255     purge();
2256     checkEmptyFreeLists();
2257 
2258     if (ShouldRelocateAllArenas(reason)) {
2259         zone->prepareForCompacting();
2260         for (auto i : AllAllocKinds()) {
2261             if (CanRelocateAllocKind(i)) {
2262                 ArenaList& al = arenaLists[i];
2263                 ArenaHeader* allArenas = al.head();
2264                 al.clear();
2265                 relocatedListOut = al.relocateArenas(allArenas, relocatedListOut, sliceBudget, stats);
2266             }
2267         }
2268     } else {
2269         size_t arenaCount = 0;
2270         size_t relocCount = 0;
2271         AllAllocKindArray<ArenaHeader**> toRelocate;
2272 
2273         for (auto i : AllAllocKinds()) {
2274             toRelocate[i] = nullptr;
2275             if (CanRelocateAllocKind(i))
2276                 toRelocate[i] = arenaLists[i].pickArenasToRelocate(arenaCount, relocCount);
2277         }
2278 
2279         if (!ShouldRelocateZone(arenaCount, relocCount, reason))
2280             return false;
2281 
2282         zone->prepareForCompacting();
2283         for (auto i : AllAllocKinds()) {
2284             if (toRelocate[i]) {
2285                 ArenaList& al = arenaLists[i];
2286                 ArenaHeader* arenas = al.removeRemainingArenas(toRelocate[i]);
2287                 relocatedListOut = al.relocateArenas(arenas, relocatedListOut, sliceBudget, stats);
2288             }
2289         }
2290     }
2291 
2292     // When we allocate new locations for cells, we use
2293     // allocateFromFreeList(). Reset the free list again so that
2294     // AutoCopyFreeListToArenasForGC doesn't complain that the free lists are
2295     // different now.
2296     purge();
2297     checkEmptyFreeLists();
2298 
2299     return true;
2300 }
2301 
2302 bool
relocateArenas(Zone * zone,JS::gcreason::Reason reason,ArenaHeader * & relocatedListOut,SliceBudget & sliceBudget)2303 GCRuntime::relocateArenas(Zone* zone, JS::gcreason::Reason reason, ArenaHeader*& relocatedListOut,
2304                           SliceBudget& sliceBudget)
2305 {
2306     gcstats::AutoPhase ap(stats, gcstats::PHASE_COMPACT_MOVE);
2307 
2308     MOZ_ASSERT(!zone->isPreservingCode());
2309     MOZ_ASSERT(CanRelocateZone(zone));
2310 
2311     jit::StopAllOffThreadCompilations(zone);
2312 
2313     if (!zone->arenas.relocateArenas(zone, relocatedListOut, reason, sliceBudget, stats))
2314         return false;
2315 
2316 #ifdef DEBUG
2317     // Check that we did as much compaction as we should have. There
2318     // should always be less than one arena's worth of free cells.
2319     for (auto i : AllAllocKinds()) {
2320         size_t thingsPerArena = Arena::thingsPerArena(Arena::thingSize(i));
2321         if (CanRelocateAllocKind(i)) {
2322             ArenaList& al = zone->arenas.arenaLists[i];
2323             size_t freeCells = 0;
2324             for (ArenaHeader* arena = al.arenaAfterCursor(); arena; arena = arena->next)
2325                 freeCells += arena->countFreeCells();
2326             MOZ_ASSERT(freeCells < thingsPerArena);
2327         }
2328     }
2329 #endif
2330 
2331     return true;
2332 }
2333 
2334 void
onObjectEdge(JSObject ** objp)2335 MovingTracer::onObjectEdge(JSObject** objp)
2336 {
2337     JSObject* obj = *objp;
2338     if (obj->runtimeFromAnyThread() == runtime() && IsForwarded(obj))
2339         *objp = Forwarded(obj);
2340 }
2341 
2342 void
prepareForCompacting()2343 Zone::prepareForCompacting()
2344 {
2345     FreeOp* fop = runtimeFromMainThread()->defaultFreeOp();
2346     discardJitCode(fop);
2347 }
2348 
2349 void
sweepTypesAfterCompacting(Zone * zone)2350 GCRuntime::sweepTypesAfterCompacting(Zone* zone)
2351 {
2352     FreeOp* fop = rt->defaultFreeOp();
2353     zone->beginSweepTypes(fop, rt->gc.releaseObservedTypes && !zone->isPreservingCode());
2354 
2355     AutoClearTypeInferenceStateOnOOM oom(zone);
2356 
2357     for (ZoneCellIterUnderGC i(zone, AllocKind::SCRIPT); !i.done(); i.next()) {
2358         JSScript* script = i.get<JSScript>();
2359         script->maybeSweepTypes(&oom);
2360     }
2361 
2362     for (ZoneCellIterUnderGC i(zone, AllocKind::OBJECT_GROUP); !i.done(); i.next()) {
2363         ObjectGroup* group = i.get<ObjectGroup>();
2364         group->maybeSweep(&oom);
2365     }
2366 
2367     zone->types.endSweep(rt);
2368 }
2369 
2370 void
sweepZoneAfterCompacting(Zone * zone)2371 GCRuntime::sweepZoneAfterCompacting(Zone* zone)
2372 {
2373     MOZ_ASSERT(zone->isCollecting());
2374     FreeOp* fop = rt->defaultFreeOp();
2375     sweepTypesAfterCompacting(zone);
2376     zone->sweepBreakpoints(fop);
2377     zone->sweepWeakMaps();
2378 
2379     for (CompartmentsInZoneIter c(zone); !c.done(); c.next()) {
2380         c->sweepInnerViews();
2381         c->sweepBaseShapeTable();
2382         c->sweepInitialShapeTable();
2383         c->objectGroups.sweep(fop);
2384         c->sweepRegExps();
2385         c->sweepSavedStacks();
2386         c->sweepGlobalObject(fop);
2387         c->sweepObjectPendingMetadata();
2388         c->sweepSelfHostingScriptSource();
2389         c->sweepDebugScopes();
2390         c->sweepJitCompartment(fop);
2391         c->sweepNativeIterators();
2392         c->sweepTemplateObjects();
2393     }
2394 }
2395 
2396 template <typename T>
2397 static void
UpdateCellPointersTyped(MovingTracer * trc,ArenaHeader * arena,JS::TraceKind traceKind)2398 UpdateCellPointersTyped(MovingTracer* trc, ArenaHeader* arena, JS::TraceKind traceKind)
2399 {
2400     for (ArenaCellIterUnderGC i(arena); !i.done(); i.next()) {
2401         T* cell = reinterpret_cast<T*>(i.getCell());
2402         cell->fixupAfterMovingGC();
2403         TraceChildren(trc, cell, traceKind);
2404     }
2405 }
2406 
2407 /*
2408  * Update the interal pointers for all cells in an arena.
2409  */
2410 static void
UpdateCellPointers(MovingTracer * trc,ArenaHeader * arena)2411 UpdateCellPointers(MovingTracer* trc, ArenaHeader* arena)
2412 {
2413     AllocKind kind = arena->getAllocKind();
2414     JS::TraceKind traceKind = MapAllocToTraceKind(kind);
2415 
2416     switch (kind) {
2417       case AllocKind::FUNCTION:
2418       case AllocKind::FUNCTION_EXTENDED:
2419       case AllocKind::OBJECT0:
2420       case AllocKind::OBJECT0_BACKGROUND:
2421       case AllocKind::OBJECT2:
2422       case AllocKind::OBJECT2_BACKGROUND:
2423       case AllocKind::OBJECT4:
2424       case AllocKind::OBJECT4_BACKGROUND:
2425       case AllocKind::OBJECT8:
2426       case AllocKind::OBJECT8_BACKGROUND:
2427       case AllocKind::OBJECT12:
2428       case AllocKind::OBJECT12_BACKGROUND:
2429       case AllocKind::OBJECT16:
2430       case AllocKind::OBJECT16_BACKGROUND:
2431         UpdateCellPointersTyped<JSObject>(trc, arena, traceKind);
2432         return;
2433       case AllocKind::SCRIPT:
2434         UpdateCellPointersTyped<JSScript>(trc, arena, traceKind);
2435         return;
2436       case AllocKind::LAZY_SCRIPT:
2437         UpdateCellPointersTyped<LazyScript>(trc, arena, traceKind);
2438         return;
2439       case AllocKind::SHAPE:
2440         UpdateCellPointersTyped<Shape>(trc, arena, traceKind);
2441         return;
2442       case AllocKind::ACCESSOR_SHAPE:
2443         UpdateCellPointersTyped<AccessorShape>(trc, arena, traceKind);
2444         return;
2445       case AllocKind::BASE_SHAPE:
2446         UpdateCellPointersTyped<BaseShape>(trc, arena, traceKind);
2447         return;
2448       case AllocKind::OBJECT_GROUP:
2449         UpdateCellPointersTyped<ObjectGroup>(trc, arena, traceKind);
2450         return;
2451       case AllocKind::JITCODE:
2452         UpdateCellPointersTyped<jit::JitCode>(trc, arena, traceKind);
2453         return;
2454       default:
2455         MOZ_CRASH("Invalid alloc kind for UpdateCellPointers");
2456     }
2457 }
2458 
2459 namespace js {
2460 namespace gc {
2461 
2462 struct ArenasToUpdate
2463 {
2464     enum KindsToUpdate {
2465         FOREGROUND = 1,
2466         BACKGROUND = 2,
2467         ALL = FOREGROUND | BACKGROUND
2468     };
2469     ArenasToUpdate(Zone* zone, KindsToUpdate kinds);
donejs::gc::ArenasToUpdate2470     bool done() { return kind == AllocKind::LIMIT; }
2471     ArenaHeader* getArenasToUpdate(AutoLockHelperThreadState& lock, unsigned max);
2472 
2473   private:
2474     KindsToUpdate kinds; // Selects which thing kinds to iterate
2475     Zone* zone;          // Zone to process
2476     AllocKind kind;      // Current alloc kind to process
2477     ArenaHeader* arena;  // Next arena to process
2478 
nextAllocKindjs::gc::ArenasToUpdate2479     AllocKind nextAllocKind(AllocKind i) { return AllocKind(uint8_t(i) + 1); }
2480     bool shouldProcessKind(AllocKind kind);
2481     ArenaHeader* next(AutoLockHelperThreadState& lock);
2482 };
2483 
shouldProcessKind(AllocKind kind)2484 bool ArenasToUpdate::shouldProcessKind(AllocKind kind)
2485 {
2486     MOZ_ASSERT(IsValidAllocKind(kind));
2487 
2488     // GC things that do not contain JSObject pointers don't need updating.
2489     if (kind == AllocKind::FAT_INLINE_STRING ||
2490         kind == AllocKind::STRING ||
2491         kind == AllocKind::EXTERNAL_STRING ||
2492         kind == AllocKind::FAT_INLINE_ATOM ||
2493         kind == AllocKind::ATOM ||
2494         kind == AllocKind::SYMBOL)
2495     {
2496         return false;
2497     }
2498 
2499     // We try to update as many GC things in parallel as we can, but there are
2500     // kinds for which this might not be safe:
2501     //  - we assume JSObjects that are foreground finalized are not safe to
2502     //    update in parallel
2503     //  - updating a shape touches child shapes in fixupShapeTreeAfterMovingGC()
2504     if (js::gc::IsBackgroundFinalized(kind) &&
2505         kind != AllocKind::SHAPE &&
2506         kind != AllocKind::ACCESSOR_SHAPE)
2507     {
2508         return (kinds & BACKGROUND) != 0;
2509     } else {
2510         return (kinds & FOREGROUND) != 0;
2511     }
2512 }
2513 
ArenasToUpdate(Zone * zone,KindsToUpdate kinds)2514 ArenasToUpdate::ArenasToUpdate(Zone* zone, KindsToUpdate kinds)
2515   : kinds(kinds), zone(zone), kind(AllocKind::FIRST), arena(nullptr)
2516 {
2517     MOZ_ASSERT(zone->isGCCompacting());
2518     MOZ_ASSERT(kinds && !(kinds & ~ALL));
2519 }
2520 
2521 ArenaHeader*
next(AutoLockHelperThreadState & lock)2522 ArenasToUpdate::next(AutoLockHelperThreadState& lock)
2523 {
2524     // Find the next arena to update.
2525     //
2526     // This iterates through the GC thing kinds filtered by shouldProcessKind(),
2527     // and then through thea arenas of that kind.  All state is held in the
2528     // object and we just return when we find an arena.
2529 
2530     for (; kind < AllocKind::LIMIT; kind = nextAllocKind(kind)) {
2531         if (shouldProcessKind(kind)) {
2532             if (!arena)
2533                 arena = zone->arenas.getFirstArena(kind);
2534             else
2535                 arena = arena->next;
2536             if (arena)
2537                 return arena;
2538         }
2539     }
2540 
2541     MOZ_ASSERT(!arena);
2542     MOZ_ASSERT(done());
2543     return nullptr;
2544 }
2545 
2546 ArenaHeader*
getArenasToUpdate(AutoLockHelperThreadState & lock,unsigned count)2547 ArenasToUpdate::getArenasToUpdate(AutoLockHelperThreadState& lock, unsigned count)
2548 {
2549     if (done())
2550         return nullptr;
2551 
2552     ArenaHeader* head = nullptr;
2553     ArenaHeader* tail = nullptr;
2554 
2555     for (unsigned i = 0; i < count; ++i) {
2556         ArenaHeader* arena = next(lock);
2557         if (!arena)
2558             break;
2559 
2560         if (tail)
2561             tail->setNextArenaToUpdate(arena);
2562         else
2563             head = arena;
2564         tail = arena;
2565     }
2566 
2567     return head;
2568 }
2569 
2570 struct UpdateCellPointersTask : public GCParallelTask
2571 {
2572     // Number of arenas to update in one block.
2573 #ifdef DEBUG
2574     static const unsigned ArenasToProcess = 16;
2575 #else
2576     static const unsigned ArenasToProcess = 256;
2577 #endif
2578 
UpdateCellPointersTaskjs::gc::UpdateCellPointersTask2579     UpdateCellPointersTask() : rt_(nullptr), source_(nullptr), arenaList_(nullptr) {}
2580     void init(JSRuntime* rt, ArenasToUpdate* source, AutoLockHelperThreadState& lock);
~UpdateCellPointersTaskjs::gc::UpdateCellPointersTask2581     ~UpdateCellPointersTask() override { join(); }
2582 
2583   private:
2584     JSRuntime* rt_;
2585     ArenasToUpdate* source_;
2586     ArenaHeader* arenaList_;
2587 
2588     virtual void run() override;
2589     void getArenasToUpdate(AutoLockHelperThreadState& lock);
2590     void updateArenas();
2591 };
2592 
2593 void
init(JSRuntime * rt,ArenasToUpdate * source,AutoLockHelperThreadState & lock)2594 UpdateCellPointersTask::init(JSRuntime* rt, ArenasToUpdate* source, AutoLockHelperThreadState& lock)
2595 {
2596     rt_ = rt;
2597     source_ = source;
2598     getArenasToUpdate(lock);
2599 }
2600 
2601 void
getArenasToUpdate(AutoLockHelperThreadState & lock)2602 UpdateCellPointersTask::getArenasToUpdate(AutoLockHelperThreadState& lock)
2603 {
2604     arenaList_ = source_->getArenasToUpdate(lock, ArenasToProcess);
2605 }
2606 
2607 void
updateArenas()2608 UpdateCellPointersTask::updateArenas()
2609 {
2610     MovingTracer trc(rt_);
2611     for (ArenaHeader* arena = arenaList_;
2612          arena;
2613          arena = arena->getNextArenaToUpdateAndUnlink())
2614     {
2615         UpdateCellPointers(&trc, arena);
2616     }
2617     arenaList_ = nullptr;
2618 }
2619 
2620 /* virtual */ void
run()2621 UpdateCellPointersTask::run()
2622 {
2623     MOZ_ASSERT(!HelperThreadState().isLocked());
2624     while (arenaList_) {
2625         updateArenas();
2626         {
2627             AutoLockHelperThreadState lock;
2628             getArenasToUpdate(lock);
2629         }
2630     }
2631 }
2632 
2633 } // namespace gc
2634 } // namespace js
2635 
2636 void
updateAllCellPointersParallel(MovingTracer * trc,Zone * zone)2637 GCRuntime::updateAllCellPointersParallel(MovingTracer* trc, Zone* zone)
2638 {
2639     AutoDisableProxyCheck noProxyCheck(rt); // These checks assert when run in parallel.
2640 
2641     const size_t minTasks = 2;
2642     const size_t maxTasks = 8;
2643     size_t targetTaskCount = HelperThreadState().cpuCount / 2;
2644     size_t taskCount = Min(Max(targetTaskCount, minTasks), maxTasks);
2645     UpdateCellPointersTask bgTasks[maxTasks];
2646     UpdateCellPointersTask fgTask;
2647 
2648     ArenasToUpdate fgArenas(zone, ArenasToUpdate::FOREGROUND);
2649     ArenasToUpdate bgArenas(zone, ArenasToUpdate::BACKGROUND);
2650 
2651     unsigned tasksStarted = 0;
2652     {
2653         AutoLockHelperThreadState lock;
2654         unsigned i;
2655         for (i = 0; i < taskCount && !bgArenas.done(); ++i) {
2656             bgTasks[i].init(rt, &bgArenas, lock);
2657             startTask(bgTasks[i], gcstats::PHASE_COMPACT_UPDATE_CELLS);
2658         }
2659         tasksStarted = i;
2660 
2661         fgTask.init(rt, &fgArenas, lock);
2662     }
2663 
2664     fgTask.runFromMainThread(rt);
2665 
2666     {
2667         AutoLockHelperThreadState lock;
2668         for (unsigned i = 0; i < tasksStarted; ++i)
2669             joinTask(bgTasks[i], gcstats::PHASE_COMPACT_UPDATE_CELLS);
2670     }
2671 }
2672 
2673 void
updateAllCellPointersSerial(MovingTracer * trc,Zone * zone)2674 GCRuntime::updateAllCellPointersSerial(MovingTracer* trc, Zone* zone)
2675 {
2676     UpdateCellPointersTask task;
2677     {
2678         AutoLockHelperThreadState lock;
2679         ArenasToUpdate allArenas(zone, ArenasToUpdate::ALL);
2680         task.init(rt, &allArenas, lock);
2681     }
2682     task.runFromMainThread(rt);
2683 }
2684 
2685 /*
2686  * Update pointers to relocated cells by doing a full heap traversal and sweep.
2687  *
2688  * The latter is necessary to update weak references which are not marked as
2689  * part of the traversal.
2690  */
2691 void
updatePointersToRelocatedCells(Zone * zone)2692 GCRuntime::updatePointersToRelocatedCells(Zone* zone)
2693 {
2694     MOZ_ASSERT(zone->isGCCompacting());
2695     MOZ_ASSERT(rt->currentThreadHasExclusiveAccess());
2696 
2697     gcstats::AutoPhase ap(stats, gcstats::PHASE_COMPACT_UPDATE);
2698     MovingTracer trc(rt);
2699 
2700     // Fixup compartment global pointers as these get accessed during marking.
2701     for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
2702         comp->fixupAfterMovingGC();
2703     JSCompartment::fixupCrossCompartmentWrappersAfterMovingGC(&trc);
2704 
2705     // Mark roots to update them.
2706     {
2707         markRuntime(&trc, MarkRuntime);
2708 
2709         gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_ROOTS);
2710         Debugger::markAll(&trc);
2711         Debugger::markIncomingCrossCompartmentEdges(&trc);
2712 
2713         WeakMapBase::markAll(zone, &trc);
2714         for (CompartmentsInZoneIter c(zone); !c.done(); c.next()) {
2715             c->trace(&trc);
2716             if (c->watchpointMap)
2717                 c->watchpointMap->markAll(&trc);
2718         }
2719 
2720         // Mark all gray roots, making sure we call the trace callback to get the
2721         // current set.
2722         if (JSTraceDataOp op = grayRootTracer.op)
2723             (*op)(&trc, grayRootTracer.data);
2724     }
2725 
2726     // Sweep everything to fix up weak pointers
2727     WatchpointMap::sweepAll(rt);
2728     Debugger::sweepAll(rt->defaultFreeOp());
2729     jit::JitRuntime::SweepJitcodeGlobalTable(rt);
2730     rt->gc.sweepZoneAfterCompacting(zone);
2731 
2732     // Type inference may put more blocks here to free.
2733     freeLifoAlloc.freeAll();
2734 
2735     // Clear runtime caches that can contain cell pointers.
2736     // TODO: Should possibly just call purgeRuntime() here.
2737     rt->newObjectCache.purge();
2738     rt->nativeIterCache.purge();
2739 
2740     // Call callbacks to get the rest of the system to fixup other untraced pointers.
2741     callWeakPointerZoneGroupCallbacks();
2742     for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
2743         callWeakPointerCompartmentCallbacks(comp);
2744 
2745     // Finally, iterate through all cells that can contain JSObject pointers to
2746     // update them. Since updating each cell is independent we try to
2747     // parallelize this as much as possible.
2748     if (CanUseExtraThreads())
2749         updateAllCellPointersParallel(&trc, zone);
2750     else
2751         updateAllCellPointersSerial(&trc, zone);
2752 }
2753 
2754 void
protectAndHoldArenas(ArenaHeader * arenaList)2755 GCRuntime::protectAndHoldArenas(ArenaHeader* arenaList)
2756 {
2757     for (ArenaHeader* arena = arenaList; arena; ) {
2758         MOZ_ASSERT(arena->allocated());
2759         ArenaHeader* next = arena->next;
2760         if (!next) {
2761             // Prepend to hold list before we protect the memory.
2762             arena->next = relocatedArenasToRelease;
2763             relocatedArenasToRelease = arenaList;
2764         }
2765         ProtectPages(arena, ArenaSize);
2766         arena = next;
2767     }
2768 }
2769 
2770 void
unprotectHeldRelocatedArenas()2771 GCRuntime::unprotectHeldRelocatedArenas()
2772 {
2773     for (ArenaHeader* arena = relocatedArenasToRelease; arena; arena = arena->next) {
2774         UnprotectPages(arena, ArenaSize);
2775         MOZ_ASSERT(arena->allocated());
2776     }
2777 }
2778 
2779 void
releaseRelocatedArenas(ArenaHeader * arenaList)2780 GCRuntime::releaseRelocatedArenas(ArenaHeader* arenaList)
2781 {
2782     AutoLockGC lock(rt);
2783     releaseRelocatedArenasWithoutUnlocking(arenaList, lock);
2784     expireChunksAndArenas(true, lock);
2785 }
2786 
2787 void
releaseRelocatedArenasWithoutUnlocking(ArenaHeader * arenaList,const AutoLockGC & lock)2788 GCRuntime::releaseRelocatedArenasWithoutUnlocking(ArenaHeader* arenaList, const AutoLockGC& lock)
2789 {
2790     // Release the relocated arenas, now containing only forwarding pointers
2791     unsigned count = 0;
2792     while (arenaList) {
2793         ArenaHeader* aheader = arenaList;
2794         arenaList = arenaList->next;
2795 
2796         // Clear the mark bits
2797         aheader->unmarkAll();
2798 
2799         // Mark arena as empty
2800         AllocKind thingKind = aheader->getAllocKind();
2801         size_t thingSize = aheader->getThingSize();
2802         Arena* arena = aheader->getArena();
2803         FreeSpan fullSpan;
2804         fullSpan.initFinal(arena->thingsStart(thingKind), arena->thingsEnd() - thingSize, thingSize);
2805         aheader->setFirstFreeSpan(&fullSpan);
2806 
2807 #if defined(JS_CRASH_DIAGNOSTICS) || defined(JS_GC_ZEAL)
2808         JS_POISON(reinterpret_cast<void*>(arena->thingsStart(thingKind)),
2809                   JS_MOVED_TENURED_PATTERN, Arena::thingsSpan(thingSize));
2810 #endif
2811 
2812         releaseArena(aheader, lock);
2813         ++count;
2814     }
2815 }
2816 
2817 // In debug mode we don't always release relocated arenas straight away.
2818 // Sometimes protect them instead and hold onto them until the next GC sweep
2819 // phase to catch any pointers to them that didn't get forwarded.
2820 
2821 void
releaseHeldRelocatedArenas()2822 GCRuntime::releaseHeldRelocatedArenas()
2823 {
2824 #ifdef DEBUG
2825     unprotectHeldRelocatedArenas();
2826     releaseRelocatedArenas(relocatedArenasToRelease);
2827     relocatedArenasToRelease = nullptr;
2828 #endif
2829 }
2830 
2831 void
releaseHeldRelocatedArenasWithoutUnlocking(const AutoLockGC & lock)2832 GCRuntime::releaseHeldRelocatedArenasWithoutUnlocking(const AutoLockGC& lock)
2833 {
2834 #ifdef DEBUG
2835     unprotectHeldRelocatedArenas();
2836     releaseRelocatedArenasWithoutUnlocking(relocatedArenasToRelease, lock);
2837     relocatedArenasToRelease = nullptr;
2838 #endif
2839 }
2840 
2841 void
ReleaseArenaList(JSRuntime * rt,ArenaHeader * aheader,const AutoLockGC & lock)2842 ReleaseArenaList(JSRuntime* rt, ArenaHeader* aheader, const AutoLockGC& lock)
2843 {
2844     ArenaHeader* next;
2845     for (; aheader; aheader = next) {
2846         next = aheader->next;
2847         rt->gc.releaseArena(aheader, lock);
2848     }
2849 }
2850 
~ArenaLists()2851 ArenaLists::~ArenaLists()
2852 {
2853     AutoLockGC lock(runtime_);
2854 
2855     for (auto i : AllAllocKinds()) {
2856         /*
2857          * We can only call this during the shutdown after the last GC when
2858          * the background finalization is disabled.
2859          */
2860         MOZ_ASSERT(backgroundFinalizeState[i] == BFS_DONE);
2861         ReleaseArenaList(runtime_, arenaLists[i].head(), lock);
2862     }
2863     ReleaseArenaList(runtime_, incrementalSweptArenas.head(), lock);
2864 
2865     for (auto i : ObjectAllocKinds())
2866         ReleaseArenaList(runtime_, savedObjectArenas[i].head(), lock);
2867     ReleaseArenaList(runtime_, savedEmptyObjectArenas, lock);
2868 }
2869 
2870 void
finalizeNow(FreeOp * fop,const FinalizePhase & phase)2871 ArenaLists::finalizeNow(FreeOp* fop, const FinalizePhase& phase)
2872 {
2873     gcstats::AutoPhase ap(fop->runtime()->gc.stats, phase.statsPhase);
2874     for (unsigned i = 0; i < phase.length; ++i)
2875         finalizeNow(fop, phase.kinds[i], RELEASE_ARENAS, nullptr);
2876 }
2877 
2878 void
finalizeNow(FreeOp * fop,AllocKind thingKind,KeepArenasEnum keepArenas,ArenaHeader ** empty)2879 ArenaLists::finalizeNow(FreeOp* fop, AllocKind thingKind, KeepArenasEnum keepArenas, ArenaHeader** empty)
2880 {
2881     MOZ_ASSERT(!IsBackgroundFinalized(thingKind));
2882     forceFinalizeNow(fop, thingKind, keepArenas, empty);
2883 }
2884 
2885 void
forceFinalizeNow(FreeOp * fop,AllocKind thingKind,KeepArenasEnum keepArenas,ArenaHeader ** empty)2886 ArenaLists::forceFinalizeNow(FreeOp* fop, AllocKind thingKind, KeepArenasEnum keepArenas, ArenaHeader** empty)
2887 {
2888     MOZ_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE);
2889 
2890     ArenaHeader* arenas = arenaLists[thingKind].head();
2891     if (!arenas)
2892         return;
2893     arenaLists[thingKind].clear();
2894 
2895     size_t thingsPerArena = Arena::thingsPerArena(Arena::thingSize(thingKind));
2896     SortedArenaList finalizedSorted(thingsPerArena);
2897 
2898     auto unlimited = SliceBudget::unlimited();
2899     FinalizeArenas(fop, &arenas, finalizedSorted, thingKind, unlimited, keepArenas);
2900     MOZ_ASSERT(!arenas);
2901 
2902     if (empty) {
2903         MOZ_ASSERT(keepArenas == KEEP_ARENAS);
2904         finalizedSorted.extractEmpty(empty);
2905     }
2906 
2907     arenaLists[thingKind] = finalizedSorted.toArenaList();
2908 }
2909 
2910 void
queueForForegroundSweep(FreeOp * fop,const FinalizePhase & phase)2911 ArenaLists::queueForForegroundSweep(FreeOp* fop, const FinalizePhase& phase)
2912 {
2913     gcstats::AutoPhase ap(fop->runtime()->gc.stats, phase.statsPhase);
2914     for (unsigned i = 0; i < phase.length; ++i)
2915         queueForForegroundSweep(fop, phase.kinds[i]);
2916 }
2917 
2918 void
queueForForegroundSweep(FreeOp * fop,AllocKind thingKind)2919 ArenaLists::queueForForegroundSweep(FreeOp* fop, AllocKind thingKind)
2920 {
2921     MOZ_ASSERT(!IsBackgroundFinalized(thingKind));
2922     MOZ_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE);
2923     MOZ_ASSERT(!arenaListsToSweep[thingKind]);
2924 
2925     arenaListsToSweep[thingKind] = arenaLists[thingKind].head();
2926     arenaLists[thingKind].clear();
2927 }
2928 
2929 void
queueForBackgroundSweep(FreeOp * fop,const FinalizePhase & phase)2930 ArenaLists::queueForBackgroundSweep(FreeOp* fop, const FinalizePhase& phase)
2931 {
2932     gcstats::AutoPhase ap(fop->runtime()->gc.stats, phase.statsPhase);
2933     for (unsigned i = 0; i < phase.length; ++i)
2934         queueForBackgroundSweep(fop, phase.kinds[i]);
2935 }
2936 
2937 inline void
queueForBackgroundSweep(FreeOp * fop,AllocKind thingKind)2938 ArenaLists::queueForBackgroundSweep(FreeOp* fop, AllocKind thingKind)
2939 {
2940     MOZ_ASSERT(IsBackgroundFinalized(thingKind));
2941 
2942     ArenaList* al = &arenaLists[thingKind];
2943     if (al->isEmpty()) {
2944         MOZ_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE);
2945         return;
2946     }
2947 
2948     MOZ_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE);
2949 
2950     arenaListsToSweep[thingKind] = al->head();
2951     al->clear();
2952     backgroundFinalizeState[thingKind] = BFS_RUN;
2953 }
2954 
2955 /*static*/ void
backgroundFinalize(FreeOp * fop,ArenaHeader * listHead,ArenaHeader ** empty)2956 ArenaLists::backgroundFinalize(FreeOp* fop, ArenaHeader* listHead, ArenaHeader** empty)
2957 {
2958     MOZ_ASSERT(listHead);
2959     MOZ_ASSERT(empty);
2960 
2961     AllocKind thingKind = listHead->getAllocKind();
2962     Zone* zone = listHead->zone;
2963 
2964     size_t thingsPerArena = Arena::thingsPerArena(Arena::thingSize(thingKind));
2965     SortedArenaList finalizedSorted(thingsPerArena);
2966 
2967     auto unlimited = SliceBudget::unlimited();
2968     FinalizeArenas(fop, &listHead, finalizedSorted, thingKind, unlimited, KEEP_ARENAS);
2969     MOZ_ASSERT(!listHead);
2970 
2971     finalizedSorted.extractEmpty(empty);
2972 
2973     // When arenas are queued for background finalization, all arenas are moved
2974     // to arenaListsToSweep[], leaving the arenaLists[] empty. However, new
2975     // arenas may be allocated before background finalization finishes; now that
2976     // finalization is complete, we want to merge these lists back together.
2977     ArenaLists* lists = &zone->arenas;
2978     ArenaList* al = &lists->arenaLists[thingKind];
2979 
2980     // Flatten |finalizedSorted| into a regular ArenaList.
2981     ArenaList finalized = finalizedSorted.toArenaList();
2982 
2983     // We must take the GC lock to be able to safely modify the ArenaList;
2984     // however, this does not by itself make the changes visible to all threads,
2985     // as not all threads take the GC lock to read the ArenaLists.
2986     // That safety is provided by the ReleaseAcquire memory ordering of the
2987     // background finalize state, which we explicitly set as the final step.
2988     {
2989         AutoLockGC lock(fop->runtime());
2990         MOZ_ASSERT(lists->backgroundFinalizeState[thingKind] == BFS_RUN);
2991 
2992         // Join |al| and |finalized| into a single list.
2993         *al = finalized.insertListWithCursorAtEnd(*al);
2994 
2995         lists->arenaListsToSweep[thingKind] = nullptr;
2996     }
2997 
2998     lists->backgroundFinalizeState[thingKind] = BFS_DONE;
2999 }
3000 
3001 void
queueForegroundObjectsForSweep(FreeOp * fop)3002 ArenaLists::queueForegroundObjectsForSweep(FreeOp* fop)
3003 {
3004     gcstats::AutoPhase ap(fop->runtime()->gc.stats, gcstats::PHASE_SWEEP_OBJECT);
3005 
3006 #ifdef DEBUG
3007     for (auto i : ObjectAllocKinds()) { // Braces needed to appease MSVC 2013.
3008         MOZ_ASSERT(savedObjectArenas[i].isEmpty());
3009     }
3010     MOZ_ASSERT(savedEmptyObjectArenas == nullptr);
3011 #endif
3012 
3013     // Foreground finalized objects must be finalized at the beginning of the
3014     // sweep phase, before control can return to the mutator. Otherwise,
3015     // mutator behavior can resurrect certain objects whose references would
3016     // otherwise have been erased by the finalizer.
3017     finalizeNow(fop, AllocKind::OBJECT0, KEEP_ARENAS, &savedEmptyObjectArenas);
3018     finalizeNow(fop, AllocKind::OBJECT2, KEEP_ARENAS, &savedEmptyObjectArenas);
3019     finalizeNow(fop, AllocKind::OBJECT4, KEEP_ARENAS, &savedEmptyObjectArenas);
3020     finalizeNow(fop, AllocKind::OBJECT8, KEEP_ARENAS, &savedEmptyObjectArenas);
3021     finalizeNow(fop, AllocKind::OBJECT12, KEEP_ARENAS, &savedEmptyObjectArenas);
3022     finalizeNow(fop, AllocKind::OBJECT16, KEEP_ARENAS, &savedEmptyObjectArenas);
3023 
3024     // Prevent the arenas from having new objects allocated into them. We need
3025     // to know which objects are marked while we incrementally sweep dead
3026     // references from type information.
3027     savedObjectArenas[AllocKind::OBJECT0] = arenaLists[AllocKind::OBJECT0].copyAndClear();
3028     savedObjectArenas[AllocKind::OBJECT2] = arenaLists[AllocKind::OBJECT2].copyAndClear();
3029     savedObjectArenas[AllocKind::OBJECT4] = arenaLists[AllocKind::OBJECT4].copyAndClear();
3030     savedObjectArenas[AllocKind::OBJECT8] = arenaLists[AllocKind::OBJECT8].copyAndClear();
3031     savedObjectArenas[AllocKind::OBJECT12] = arenaLists[AllocKind::OBJECT12].copyAndClear();
3032     savedObjectArenas[AllocKind::OBJECT16] = arenaLists[AllocKind::OBJECT16].copyAndClear();
3033 }
3034 
3035 void
mergeForegroundSweptObjectArenas()3036 ArenaLists::mergeForegroundSweptObjectArenas()
3037 {
3038     AutoLockGC lock(runtime_);
3039     ReleaseArenaList(runtime_, savedEmptyObjectArenas, lock);
3040     savedEmptyObjectArenas = nullptr;
3041 
3042     mergeSweptArenas(AllocKind::OBJECT0);
3043     mergeSweptArenas(AllocKind::OBJECT2);
3044     mergeSweptArenas(AllocKind::OBJECT4);
3045     mergeSweptArenas(AllocKind::OBJECT8);
3046     mergeSweptArenas(AllocKind::OBJECT12);
3047     mergeSweptArenas(AllocKind::OBJECT16);
3048 }
3049 
3050 inline void
mergeSweptArenas(AllocKind thingKind)3051 ArenaLists::mergeSweptArenas(AllocKind thingKind)
3052 {
3053     ArenaList* al = &arenaLists[thingKind];
3054     ArenaList* saved = &savedObjectArenas[thingKind];
3055 
3056     *al = saved->insertListWithCursorAtEnd(*al);
3057     saved->clear();
3058 }
3059 
3060 void
queueForegroundThingsForSweep(FreeOp * fop)3061 ArenaLists::queueForegroundThingsForSweep(FreeOp* fop)
3062 {
3063     gcShapeArenasToUpdate = arenaListsToSweep[AllocKind::SHAPE];
3064     gcAccessorShapeArenasToUpdate = arenaListsToSweep[AllocKind::ACCESSOR_SHAPE];
3065     gcObjectGroupArenasToUpdate = arenaListsToSweep[AllocKind::OBJECT_GROUP];
3066     gcScriptArenasToUpdate = arenaListsToSweep[AllocKind::SCRIPT];
3067 }
3068 
3069 /* static */ void*
refillFreeListInGC(Zone * zone,AllocKind thingKind)3070 GCRuntime::refillFreeListInGC(Zone* zone, AllocKind thingKind)
3071 {
3072     /*
3073      * Called by compacting GC to refill a free list while we are in a GC.
3074      */
3075 
3076     MOZ_ASSERT(zone->arenas.freeLists[thingKind].isEmpty());
3077     mozilla::DebugOnly<JSRuntime*> rt = zone->runtimeFromMainThread();
3078     MOZ_ASSERT(rt->isHeapMajorCollecting());
3079     MOZ_ASSERT(!rt->gc.isBackgroundSweeping());
3080 
3081     AutoMaybeStartBackgroundAllocation maybeStartBackgroundAllocation;
3082     return zone->arenas.allocateFromArena(zone, thingKind, maybeStartBackgroundAllocation);
3083 }
3084 
SliceBudget()3085 SliceBudget::SliceBudget()
3086   : timeBudget(UnlimitedTimeBudget), workBudget(UnlimitedWorkBudget)
3087 {
3088     makeUnlimited();
3089 }
3090 
SliceBudget(TimeBudget time)3091 SliceBudget::SliceBudget(TimeBudget time)
3092   : timeBudget(time), workBudget(UnlimitedWorkBudget)
3093 {
3094     if (time.budget < 0) {
3095         makeUnlimited();
3096     } else {
3097         // Note: TimeBudget(0) is equivalent to WorkBudget(CounterReset).
3098         deadline = PRMJ_Now() + time.budget * PRMJ_USEC_PER_MSEC;
3099         counter = CounterReset;
3100     }
3101 }
3102 
SliceBudget(WorkBudget work)3103 SliceBudget::SliceBudget(WorkBudget work)
3104   : timeBudget(UnlimitedTimeBudget), workBudget(work)
3105 {
3106     if (work.budget < 0) {
3107         makeUnlimited();
3108     } else {
3109         deadline = 0;
3110         counter = work.budget;
3111     }
3112 }
3113 
3114 int
describe(char * buffer,size_t maxlen) const3115 SliceBudget::describe(char* buffer, size_t maxlen) const
3116 {
3117     if (isUnlimited())
3118         return JS_snprintf(buffer, maxlen, "unlimited");
3119     else if (isWorkBudget())
3120         return JS_snprintf(buffer, maxlen, "work(%lld)", workBudget.budget);
3121     else
3122         return JS_snprintf(buffer, maxlen, "%lldms", timeBudget.budget);
3123 }
3124 
3125 bool
checkOverBudget()3126 SliceBudget::checkOverBudget()
3127 {
3128     bool over = PRMJ_Now() >= deadline;
3129     if (!over)
3130         counter = CounterReset;
3131     return over;
3132 }
3133 
3134 void
MarkCompartmentActive(InterpreterFrame * fp)3135 js::MarkCompartmentActive(InterpreterFrame* fp)
3136 {
3137     fp->script()->compartment()->zone()->active = true;
3138 }
3139 
3140 void
requestMajorGC(JS::gcreason::Reason reason)3141 GCRuntime::requestMajorGC(JS::gcreason::Reason reason)
3142 {
3143     if (majorGCRequested())
3144         return;
3145 
3146     majorGCTriggerReason = reason;
3147     rt->requestInterrupt(JSRuntime::RequestInterruptUrgent);
3148 }
3149 
3150 void
requestMinorGC(JS::gcreason::Reason reason)3151 GCRuntime::requestMinorGC(JS::gcreason::Reason reason)
3152 {
3153     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
3154     if (minorGCRequested())
3155         return;
3156 
3157     minorGCTriggerReason = reason;
3158     rt->requestInterrupt(JSRuntime::RequestInterruptUrgent);
3159 }
3160 
3161 bool
triggerGC(JS::gcreason::Reason reason)3162 GCRuntime::triggerGC(JS::gcreason::Reason reason)
3163 {
3164     /*
3165      * Don't trigger GCs if this is being called off the main thread from
3166      * onTooMuchMalloc().
3167      */
3168     if (!CurrentThreadCanAccessRuntime(rt))
3169         return false;
3170 
3171     /* GC is already running. */
3172     if (rt->isHeapCollecting())
3173         return false;
3174 
3175     JS::PrepareForFullGC(rt);
3176     requestMajorGC(reason);
3177     return true;
3178 }
3179 
3180 void
maybeAllocTriggerZoneGC(Zone * zone,const AutoLockGC & lock)3181 GCRuntime::maybeAllocTriggerZoneGC(Zone* zone, const AutoLockGC& lock)
3182 {
3183     size_t usedBytes = zone->usage.gcBytes();
3184     size_t thresholdBytes = zone->threshold.gcTriggerBytes();
3185     size_t igcThresholdBytes = thresholdBytes * tunables.zoneAllocThresholdFactor();
3186 
3187     if (usedBytes >= thresholdBytes) {
3188         // The threshold has been surpassed, immediately trigger a GC,
3189         // which will be done non-incrementally.
3190         triggerZoneGC(zone, JS::gcreason::ALLOC_TRIGGER);
3191     } else if (usedBytes >= igcThresholdBytes) {
3192         // Reduce the delay to the start of the next incremental slice.
3193         if (zone->gcDelayBytes < ArenaSize)
3194             zone->gcDelayBytes = 0;
3195         else
3196             zone->gcDelayBytes -= ArenaSize;
3197 
3198         if (!zone->gcDelayBytes) {
3199             // Start or continue an in progress incremental GC. We do this
3200             // to try to avoid performing non-incremental GCs on zones
3201             // which allocate a lot of data, even when incremental slices
3202             // can't be triggered via scheduling in the event loop.
3203             triggerZoneGC(zone, JS::gcreason::ALLOC_TRIGGER);
3204 
3205             // Delay the next slice until a certain amount of allocation
3206             // has been performed.
3207             zone->gcDelayBytes = tunables.zoneAllocDelayBytes();
3208         }
3209     }
3210 }
3211 
3212 bool
triggerZoneGC(Zone * zone,JS::gcreason::Reason reason)3213 GCRuntime::triggerZoneGC(Zone* zone, JS::gcreason::Reason reason)
3214 {
3215     /* Zones in use by a thread with an exclusive context can't be collected. */
3216     if (!CurrentThreadCanAccessRuntime(rt)) {
3217         MOZ_ASSERT(zone->usedByExclusiveThread || zone->isAtomsZone());
3218         return false;
3219     }
3220 
3221     /* GC is already running. */
3222     if (rt->isHeapCollecting())
3223         return false;
3224 
3225 #ifdef JS_GC_ZEAL
3226     if (zealMode == ZealAllocValue) {
3227         triggerGC(reason);
3228         return true;
3229     }
3230 #endif
3231 
3232     if (zone->isAtomsZone()) {
3233         /* We can't do a zone GC of the atoms compartment. */
3234         if (rt->keepAtoms()) {
3235             /* Skip GC and retrigger later, since atoms zone won't be collected
3236              * if keepAtoms is true. */
3237             fullGCForAtomsRequested_ = true;
3238             return false;
3239         }
3240         triggerGC(reason);
3241         return true;
3242     }
3243 
3244     PrepareZoneForGC(zone);
3245     requestMajorGC(reason);
3246     return true;
3247 }
3248 
3249 bool
maybeGC(Zone * zone)3250 GCRuntime::maybeGC(Zone* zone)
3251 {
3252     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
3253 
3254 #ifdef JS_GC_ZEAL
3255     if (zealMode == ZealAllocValue || zealMode == ZealPokeValue) {
3256         JS::PrepareForFullGC(rt);
3257         gc(GC_NORMAL, JS::gcreason::DEBUG_GC);
3258         return true;
3259     }
3260 #endif
3261 
3262     if (gcIfRequested())
3263         return true;
3264 
3265     if (zone->usage.gcBytes() > 1024 * 1024 &&
3266         zone->usage.gcBytes() >= zone->threshold.allocTrigger(schedulingState.inHighFrequencyGCMode()) &&
3267         !isIncrementalGCInProgress() &&
3268         !isBackgroundSweeping())
3269     {
3270         PrepareZoneForGC(zone);
3271         startGC(GC_NORMAL, JS::gcreason::EAGER_ALLOC_TRIGGER);
3272         return true;
3273     }
3274 
3275     return false;
3276 }
3277 
3278 void
maybePeriodicFullGC()3279 GCRuntime::maybePeriodicFullGC()
3280 {
3281     /*
3282      * Trigger a periodic full GC.
3283      *
3284      * This is a source of non-determinism, but is not called from the shell.
3285      *
3286      * Access to the counters and, on 32 bit, setting gcNextFullGCTime below
3287      * is not atomic and a race condition could trigger or suppress the GC. We
3288      * tolerate this.
3289      */
3290 #ifndef JS_MORE_DETERMINISTIC
3291     int64_t now = PRMJ_Now();
3292     if (nextFullGCTime && nextFullGCTime <= now && !isIncrementalGCInProgress()) {
3293         if (chunkAllocationSinceLastGC ||
3294             numArenasFreeCommitted > decommitThreshold)
3295         {
3296             JS::PrepareForFullGC(rt);
3297             startGC(GC_SHRINK, JS::gcreason::PERIODIC_FULL_GC);
3298         } else {
3299             nextFullGCTime = now + GC_IDLE_FULL_SPAN;
3300         }
3301     }
3302 #endif
3303 }
3304 
3305 // Do all possible decommit immediately from the current thread without
3306 // releasing the GC lock or allocating any memory.
3307 void
decommitAllWithoutUnlocking(const AutoLockGC & lock)3308 GCRuntime::decommitAllWithoutUnlocking(const AutoLockGC& lock)
3309 {
3310     MOZ_ASSERT(emptyChunks(lock).count() == 0);
3311     for (ChunkPool::Iter chunk(availableChunks(lock)); !chunk.done(); chunk.next())
3312         chunk->decommitAllArenasWithoutUnlocking(lock);
3313     MOZ_ASSERT(availableChunks(lock).verify());
3314 }
3315 
3316 void
decommitArenas(AutoLockGC & lock)3317 GCRuntime::decommitArenas(AutoLockGC& lock)
3318 {
3319     // Verify that all entries in the empty chunks pool are decommitted.
3320     for (ChunkPool::Iter chunk(emptyChunks(lock)); !chunk.done(); chunk.next())
3321         MOZ_ASSERT(!chunk->info.numArenasFreeCommitted);
3322 
3323     // Build a Vector of all current available Chunks. Since we release the
3324     // gc lock while doing the decommit syscall, it is dangerous to iterate
3325     // the available list directly, as concurrent operations can modify it.
3326     mozilla::Vector<Chunk*> toDecommit;
3327     MOZ_ASSERT(availableChunks(lock).verify());
3328     for (ChunkPool::Iter iter(availableChunks(lock)); !iter.done(); iter.next()) {
3329         if (!toDecommit.append(iter.get())) {
3330             // The OOM handler does a full, immediate decommit, so there is
3331             // nothing more to do here in any case.
3332             return onOutOfMallocMemory(lock);
3333         }
3334     }
3335 
3336     // Start at the tail and stop before the first chunk: we allocate from the
3337     // head and don't want to thrash with the mutator.
3338     for (size_t i = toDecommit.length(); i > 1; --i) {
3339         Chunk* chunk = toDecommit[i - 1];
3340         MOZ_ASSERT(chunk);
3341 
3342         // The arena list is not doubly-linked, so we have to work in the free
3343         // list order and not in the natural order.
3344         while (chunk->info.numArenasFreeCommitted) {
3345             bool ok = chunk->decommitOneFreeArena(rt, lock);
3346 
3347             // FIXME Bug 1095620: add cancellation support when this becomes
3348             // a ParallelTask.
3349             if (/* cancel_ || */ !ok)
3350                 return;
3351         }
3352     }
3353     MOZ_ASSERT(availableChunks(lock).verify());
3354 }
3355 
3356 void
expireChunksAndArenas(bool shouldShrink,AutoLockGC & lock)3357 GCRuntime::expireChunksAndArenas(bool shouldShrink, AutoLockGC& lock)
3358 {
3359     ChunkPool toFree = expireEmptyChunkPool(shouldShrink, lock);
3360     if (toFree.count()) {
3361         AutoUnlockGC unlock(lock);
3362         FreeChunkPool(rt, toFree);
3363     }
3364 
3365     if (shouldShrink)
3366         decommitArenas(lock);
3367 }
3368 
3369 void
sweepBackgroundThings(ZoneList & zones,LifoAlloc & freeBlocks,ThreadType threadType)3370 GCRuntime::sweepBackgroundThings(ZoneList& zones, LifoAlloc& freeBlocks, ThreadType threadType)
3371 {
3372     freeBlocks.freeAll();
3373 
3374     if (zones.isEmpty())
3375         return;
3376 
3377     // We must finalize thing kinds in the order specified by BackgroundFinalizePhases.
3378     ArenaHeader* emptyArenas = nullptr;
3379     FreeOp fop(rt, threadType);
3380     for (unsigned phase = 0 ; phase < ArrayLength(BackgroundFinalizePhases) ; ++phase) {
3381         for (Zone* zone = zones.front(); zone; zone = zone->nextZone()) {
3382             for (unsigned index = 0 ; index < BackgroundFinalizePhases[phase].length ; ++index) {
3383                 AllocKind kind = BackgroundFinalizePhases[phase].kinds[index];
3384                 ArenaHeader* arenas = zone->arenas.arenaListsToSweep[kind];
3385                 if (arenas)
3386                     ArenaLists::backgroundFinalize(&fop, arenas, &emptyArenas);
3387             }
3388         }
3389     }
3390 
3391     AutoLockGC lock(rt);
3392     ReleaseArenaList(rt, emptyArenas, lock);
3393     while (!zones.isEmpty())
3394         zones.removeFront();
3395 }
3396 
3397 void
assertBackgroundSweepingFinished()3398 GCRuntime::assertBackgroundSweepingFinished()
3399 {
3400 #ifdef DEBUG
3401     MOZ_ASSERT(backgroundSweepZones.isEmpty());
3402     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
3403         for (auto i : AllAllocKinds()) {
3404             MOZ_ASSERT(!zone->arenas.arenaListsToSweep[i]);
3405             MOZ_ASSERT(zone->arenas.doneBackgroundFinalize(i));
3406         }
3407     }
3408     MOZ_ASSERT(freeLifoAlloc.computedSizeOfExcludingThis() == 0);
3409 #endif
3410 }
3411 
3412 unsigned
GetCPUCount()3413 js::GetCPUCount()
3414 {
3415     static unsigned ncpus = 0;
3416     if (ncpus == 0) {
3417 # ifdef XP_WIN
3418         SYSTEM_INFO sysinfo;
3419         GetSystemInfo(&sysinfo);
3420         ncpus = unsigned(sysinfo.dwNumberOfProcessors);
3421 # else
3422         long n = sysconf(_SC_NPROCESSORS_ONLN);
3423         ncpus = (n > 0) ? unsigned(n) : 1;
3424 # endif
3425     }
3426     return ncpus;
3427 }
3428 
3429 bool
init()3430 GCHelperState::init()
3431 {
3432     if (!(done = PR_NewCondVar(rt->gc.lock)))
3433         return false;
3434 
3435     return true;
3436 }
3437 
3438 void
finish()3439 GCHelperState::finish()
3440 {
3441     if (!rt->gc.lock) {
3442         MOZ_ASSERT(state_ == IDLE);
3443         return;
3444     }
3445 
3446     // Wait for any lingering background sweeping to finish.
3447     waitBackgroundSweepEnd();
3448 
3449     if (done)
3450         PR_DestroyCondVar(done);
3451 }
3452 
3453 GCHelperState::State
state()3454 GCHelperState::state()
3455 {
3456     MOZ_ASSERT(rt->gc.currentThreadOwnsGCLock());
3457     return state_;
3458 }
3459 
3460 void
setState(State state)3461 GCHelperState::setState(State state)
3462 {
3463     MOZ_ASSERT(rt->gc.currentThreadOwnsGCLock());
3464     state_ = state;
3465 }
3466 
3467 void
startBackgroundThread(State newState)3468 GCHelperState::startBackgroundThread(State newState)
3469 {
3470     MOZ_ASSERT(!thread && state() == IDLE && newState != IDLE);
3471     setState(newState);
3472 
3473     {
3474         AutoEnterOOMUnsafeRegion noOOM;
3475         if (!HelperThreadState().gcHelperWorklist().append(this))
3476             noOOM.crash("Could not add to pending GC helpers list");
3477     }
3478 
3479     HelperThreadState().notifyAll(GlobalHelperThreadState::PRODUCER);
3480 }
3481 
3482 void
waitForBackgroundThread()3483 GCHelperState::waitForBackgroundThread()
3484 {
3485     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
3486 
3487 #ifdef DEBUG
3488     rt->gc.lockOwner.value = nullptr;
3489 #endif
3490     PR_WaitCondVar(done, PR_INTERVAL_NO_TIMEOUT);
3491 #ifdef DEBUG
3492     rt->gc.lockOwner.value = PR_GetCurrentThread();
3493 #endif
3494 }
3495 
3496 void
work()3497 GCHelperState::work()
3498 {
3499     MOZ_ASSERT(CanUseExtraThreads());
3500 
3501     AutoLockGC lock(rt);
3502 
3503     MOZ_ASSERT(!thread);
3504     thread = PR_GetCurrentThread();
3505 
3506     TraceLoggerThread* logger = TraceLoggerForCurrentThread();
3507 
3508     switch (state()) {
3509 
3510       case IDLE:
3511         MOZ_CRASH("GC helper triggered on idle state");
3512         break;
3513 
3514       case SWEEPING: {
3515         AutoTraceLog logSweeping(logger, TraceLogger_GCSweeping);
3516         doSweep(lock);
3517         MOZ_ASSERT(state() == SWEEPING);
3518         break;
3519       }
3520 
3521     }
3522 
3523     setState(IDLE);
3524     thread = nullptr;
3525 
3526     PR_NotifyAllCondVar(done);
3527 }
3528 
BackgroundAllocTask(JSRuntime * rt,ChunkPool & pool)3529 BackgroundAllocTask::BackgroundAllocTask(JSRuntime* rt, ChunkPool& pool)
3530   : runtime(rt),
3531     chunkPool_(pool),
3532     enabled_(CanUseExtraThreads() && GetCPUCount() >= 2)
3533 {
3534 }
3535 
3536 /* virtual */ void
run()3537 BackgroundAllocTask::run()
3538 {
3539     TraceLoggerThread* logger = TraceLoggerForCurrentThread();
3540     AutoTraceLog logAllocation(logger, TraceLogger_GCAllocation);
3541 
3542     AutoLockGC lock(runtime);
3543     while (!cancel_ && runtime->gc.wantBackgroundAllocation(lock)) {
3544         Chunk* chunk;
3545         {
3546             AutoUnlockGC unlock(lock);
3547             chunk = Chunk::allocate(runtime);
3548             if (!chunk)
3549                 break;
3550         }
3551         chunkPool_.push(chunk);
3552     }
3553 }
3554 
3555 void
queueZonesForBackgroundSweep(ZoneList & zones)3556 GCRuntime::queueZonesForBackgroundSweep(ZoneList& zones)
3557 {
3558     AutoLockHelperThreadState helperLock;
3559     AutoLockGC lock(rt);
3560     backgroundSweepZones.transferFrom(zones);
3561     helperState.maybeStartBackgroundSweep(lock);
3562 }
3563 
3564 void
freeUnusedLifoBlocksAfterSweeping(LifoAlloc * lifo)3565 GCRuntime::freeUnusedLifoBlocksAfterSweeping(LifoAlloc* lifo)
3566 {
3567     MOZ_ASSERT(rt->isHeapBusy());
3568     AutoLockGC lock(rt);
3569     freeLifoAlloc.transferUnusedFrom(lifo);
3570 }
3571 
3572 void
freeAllLifoBlocksAfterSweeping(LifoAlloc * lifo)3573 GCRuntime::freeAllLifoBlocksAfterSweeping(LifoAlloc* lifo)
3574 {
3575     MOZ_ASSERT(rt->isHeapBusy());
3576     AutoLockGC lock(rt);
3577     freeLifoAlloc.transferFrom(lifo);
3578 }
3579 
3580 void
maybeStartBackgroundSweep(const AutoLockGC & lock)3581 GCHelperState::maybeStartBackgroundSweep(const AutoLockGC& lock)
3582 {
3583     MOZ_ASSERT(CanUseExtraThreads());
3584 
3585     if (state() == IDLE)
3586         startBackgroundThread(SWEEPING);
3587 }
3588 
3589 void
startBackgroundShrink(const AutoLockGC & lock)3590 GCHelperState::startBackgroundShrink(const AutoLockGC& lock)
3591 {
3592     MOZ_ASSERT(CanUseExtraThreads());
3593     switch (state()) {
3594       case IDLE:
3595         shrinkFlag = true;
3596         startBackgroundThread(SWEEPING);
3597         break;
3598       case SWEEPING:
3599         shrinkFlag = true;
3600         break;
3601       default:
3602         MOZ_CRASH("Invalid GC helper thread state.");
3603     }
3604 }
3605 
3606 void
waitBackgroundSweepEnd()3607 GCHelperState::waitBackgroundSweepEnd()
3608 {
3609     AutoLockGC lock(rt);
3610     while (state() == SWEEPING)
3611         waitForBackgroundThread();
3612     if (!rt->gc.isIncrementalGCInProgress())
3613         rt->gc.assertBackgroundSweepingFinished();
3614 }
3615 
3616 void
doSweep(AutoLockGC & lock)3617 GCHelperState::doSweep(AutoLockGC& lock)
3618 {
3619     // The main thread may call queueZonesForBackgroundSweep() or
3620     // ShrinkGCBuffers() while this is running so we must check there is no more
3621     // work to do before exiting.
3622 
3623     do {
3624         while (!rt->gc.backgroundSweepZones.isEmpty()) {
3625             AutoSetThreadIsSweeping threadIsSweeping;
3626 
3627             ZoneList zones;
3628             zones.transferFrom(rt->gc.backgroundSweepZones);
3629             LifoAlloc freeLifoAlloc(JSRuntime::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE);
3630             freeLifoAlloc.transferFrom(&rt->gc.freeLifoAlloc);
3631 
3632             AutoUnlockGC unlock(lock);
3633             rt->gc.sweepBackgroundThings(zones, freeLifoAlloc, BackgroundThread);
3634         }
3635 
3636         bool shrinking = shrinkFlag;
3637         shrinkFlag = false;
3638         rt->gc.expireChunksAndArenas(shrinking, lock);
3639     } while (!rt->gc.backgroundSweepZones.isEmpty() || shrinkFlag);
3640 }
3641 
3642 bool
onBackgroundThread()3643 GCHelperState::onBackgroundThread()
3644 {
3645     return PR_GetCurrentThread() == thread;
3646 }
3647 
3648 bool
shouldReleaseObservedTypes()3649 GCRuntime::shouldReleaseObservedTypes()
3650 {
3651     bool releaseTypes = false;
3652 
3653 #ifdef JS_GC_ZEAL
3654     if (zealMode != 0)
3655         releaseTypes = true;
3656 #endif
3657 
3658     /* We may miss the exact target GC due to resets. */
3659     if (majorGCNumber >= jitReleaseNumber)
3660         releaseTypes = true;
3661 
3662     if (releaseTypes)
3663         jitReleaseNumber = majorGCNumber + JIT_SCRIPT_RELEASE_TYPES_PERIOD;
3664 
3665     return releaseTypes;
3666 }
3667 
3668 struct IsAboutToBeFinalizedFunctor {
operator ()IsAboutToBeFinalizedFunctor3669     template <typename T> bool operator()(Cell** t) {
3670         mozilla::DebugOnly<const Cell*> prior = *t;
3671         bool result = IsAboutToBeFinalizedUnbarriered(reinterpret_cast<T**>(t));
3672         // Sweep should not have to deal with moved pointers, since moving GC
3673         // handles updating the UID table manually.
3674         MOZ_ASSERT(*t == prior);
3675         return result;
3676     }
3677 };
3678 
3679 /* static */ bool
needsSweep(Cell ** cell,uint64_t *)3680 UniqueIdGCPolicy::needsSweep(Cell** cell, uint64_t*)
3681 {
3682     return DispatchTraceKindTyped(IsAboutToBeFinalizedFunctor(), (*cell)->getTraceKind(), cell);
3683 }
3684 
3685 void
sweepUniqueIds(js::FreeOp * fop)3686 JS::Zone::sweepUniqueIds(js::FreeOp* fop)
3687 {
3688     uniqueIds_.sweep();
3689 }
3690 
3691 /*
3692  * It's simpler if we preserve the invariant that every zone has at least one
3693  * compartment. If we know we're deleting the entire zone, then
3694  * SweepCompartments is allowed to delete all compartments. In this case,
3695  * |keepAtleastOne| is false. If some objects remain in the zone so that it
3696  * cannot be deleted, then we set |keepAtleastOne| to true, which prohibits
3697  * SweepCompartments from deleting every compartment. Instead, it preserves an
3698  * arbitrary compartment in the zone.
3699  */
3700 void
sweepCompartments(FreeOp * fop,bool keepAtleastOne,bool destroyingRuntime)3701 Zone::sweepCompartments(FreeOp* fop, bool keepAtleastOne, bool destroyingRuntime)
3702 {
3703     JSRuntime* rt = runtimeFromMainThread();
3704     JSDestroyCompartmentCallback callback = rt->destroyCompartmentCallback;
3705 
3706     JSCompartment** read = compartments.begin();
3707     JSCompartment** end = compartments.end();
3708     JSCompartment** write = read;
3709     bool foundOne = false;
3710     while (read < end) {
3711         JSCompartment* comp = *read++;
3712         MOZ_ASSERT(!rt->isAtomsCompartment(comp));
3713 
3714         /*
3715          * Don't delete the last compartment if all the ones before it were
3716          * deleted and keepAtleastOne is true.
3717          */
3718         bool dontDelete = read == end && !foundOne && keepAtleastOne;
3719         if ((!comp->marked && !dontDelete) || destroyingRuntime) {
3720             if (callback)
3721                 callback(fop, comp);
3722             if (comp->principals())
3723                 JS_DropPrincipals(rt, comp->principals());
3724             js_delete(comp);
3725         } else {
3726             *write++ = comp;
3727             foundOne = true;
3728         }
3729     }
3730     compartments.resize(write - compartments.begin());
3731     MOZ_ASSERT_IF(keepAtleastOne, !compartments.empty());
3732 }
3733 
3734 void
sweepZones(FreeOp * fop,bool destroyingRuntime)3735 GCRuntime::sweepZones(FreeOp* fop, bool destroyingRuntime)
3736 {
3737     MOZ_ASSERT_IF(destroyingRuntime, rt->gc.numActiveZoneIters == 0);
3738     if (rt->gc.numActiveZoneIters)
3739         return;
3740 
3741     AutoLockGC lock(rt); // Avoid race with background sweeping.
3742 
3743     JSZoneCallback callback = rt->destroyZoneCallback;
3744 
3745     /* Skip the atomsCompartment zone. */
3746     Zone** read = zones.begin() + 1;
3747     Zone** end = zones.end();
3748     Zone** write = read;
3749     MOZ_ASSERT(zones.length() >= 1);
3750     MOZ_ASSERT(zones[0]->isAtomsZone());
3751 
3752     while (read < end) {
3753         Zone* zone = *read++;
3754 
3755         if (zone->wasGCStarted()) {
3756             if ((!zone->isQueuedForBackgroundSweep() &&
3757                  zone->arenas.arenaListsAreEmpty() &&
3758                  !zone->hasMarkedCompartments()) || destroyingRuntime)
3759             {
3760                 zone->arenas.checkEmptyFreeLists();
3761                 AutoUnlockGC unlock(lock);
3762 
3763                 if (callback)
3764                     callback(zone);
3765                 zone->sweepCompartments(fop, false, destroyingRuntime);
3766                 MOZ_ASSERT(zone->compartments.empty());
3767                 fop->delete_(zone);
3768                 continue;
3769             }
3770             zone->sweepCompartments(fop, true, destroyingRuntime);
3771         }
3772         *write++ = zone;
3773     }
3774     zones.resize(write - zones.begin());
3775 }
3776 
3777 void
purgeRuntime()3778 GCRuntime::purgeRuntime()
3779 {
3780     for (GCCompartmentsIter comp(rt); !comp.done(); comp.next())
3781         comp->purge();
3782 
3783     freeUnusedLifoBlocksAfterSweeping(&rt->tempLifoAlloc);
3784 
3785     rt->interpreterStack().purge(rt);
3786     rt->gsnCache.purge();
3787     rt->scopeCoordinateNameCache.purge();
3788     rt->newObjectCache.purge();
3789     rt->nativeIterCache.purge();
3790     rt->uncompressedSourceCache.purge();
3791     rt->evalCache.clear();
3792 
3793     if (!rt->hasActiveCompilations())
3794         rt->parseMapPool().purgeAll();
3795 }
3796 
3797 bool
shouldPreserveJITCode(JSCompartment * comp,int64_t currentTime,JS::gcreason::Reason reason,bool canAllocateMoreCode)3798 GCRuntime::shouldPreserveJITCode(JSCompartment* comp, int64_t currentTime,
3799                                  JS::gcreason::Reason reason, bool canAllocateMoreCode)
3800 {
3801     if (cleanUpEverything)
3802         return false;
3803     if (!canAllocateMoreCode)
3804         return false;
3805 
3806     if (alwaysPreserveCode)
3807         return true;
3808     if (comp->preserveJitCode())
3809         return true;
3810     if (comp->lastAnimationTime + PRMJ_USEC_PER_SEC >= currentTime)
3811         return true;
3812     if (reason == JS::gcreason::DEBUG_GC)
3813         return true;
3814 
3815     return false;
3816 }
3817 
3818 #ifdef DEBUG
3819 class CompartmentCheckTracer : public JS::CallbackTracer
3820 {
3821     void onChild(const JS::GCCellPtr& thing) override;
3822 
3823   public:
CompartmentCheckTracer(JSRuntime * rt)3824     explicit CompartmentCheckTracer(JSRuntime* rt)
3825       : JS::CallbackTracer(rt), src(nullptr), zone(nullptr), compartment(nullptr)
3826     {}
3827 
3828     Cell* src;
3829     JS::TraceKind srcKind;
3830     Zone* zone;
3831     JSCompartment* compartment;
3832 };
3833 
3834 static bool
InCrossCompartmentMap(JSObject * src,Cell * dst,JS::TraceKind dstKind)3835 InCrossCompartmentMap(JSObject* src, Cell* dst, JS::TraceKind dstKind)
3836 {
3837     JSCompartment* srccomp = src->compartment();
3838 
3839     if (dstKind == JS::TraceKind::Object) {
3840         Value key = ObjectValue(*static_cast<JSObject*>(dst));
3841         if (WrapperMap::Ptr p = srccomp->lookupWrapper(key)) {
3842             if (*p->value().unsafeGet() == ObjectValue(*src))
3843                 return true;
3844         }
3845     }
3846 
3847     /*
3848      * If the cross-compartment edge is caused by the debugger, then we don't
3849      * know the right hashtable key, so we have to iterate.
3850      */
3851     for (JSCompartment::WrapperEnum e(srccomp); !e.empty(); e.popFront()) {
3852         if (e.front().key().wrapped == dst && ToMarkable(e.front().value()) == src)
3853             return true;
3854     }
3855 
3856     return false;
3857 }
3858 
3859 struct MaybeCompartmentFunctor {
operator ()MaybeCompartmentFunctor3860     template <typename T> JSCompartment* operator()(T* t) { return t->maybeCompartment(); }
3861 };
3862 
3863 void
onChild(const JS::GCCellPtr & thing)3864 CompartmentCheckTracer::onChild(const JS::GCCellPtr& thing)
3865 {
3866     TenuredCell* tenured = TenuredCell::fromPointer(thing.asCell());
3867 
3868     JSCompartment* comp = DispatchTyped(MaybeCompartmentFunctor(), thing);
3869     if (comp && compartment) {
3870         MOZ_ASSERT(comp == compartment || runtime()->isAtomsCompartment(comp) ||
3871                    (srcKind == JS::TraceKind::Object &&
3872                     InCrossCompartmentMap(static_cast<JSObject*>(src), tenured, thing.kind())));
3873     } else {
3874         MOZ_ASSERT(tenured->zone() == zone || tenured->zone()->isAtomsZone());
3875     }
3876 }
3877 
3878 void
checkForCompartmentMismatches()3879 GCRuntime::checkForCompartmentMismatches()
3880 {
3881     if (disableStrictProxyCheckingCount)
3882         return;
3883 
3884     CompartmentCheckTracer trc(rt);
3885     for (ZonesIter zone(rt, SkipAtoms); !zone.done(); zone.next()) {
3886         trc.zone = zone;
3887         for (auto thingKind : AllAllocKinds()) {
3888             for (ZoneCellIterUnderGC i(zone, thingKind); !i.done(); i.next()) {
3889                 trc.src = i.getCell();
3890                 trc.srcKind = MapAllocToTraceKind(thingKind);
3891                 trc.compartment = DispatchTraceKindTyped(MaybeCompartmentFunctor(),
3892                                                          trc.src, trc.srcKind);
3893                 js::TraceChildren(&trc, trc.src, trc.srcKind);
3894             }
3895         }
3896     }
3897 }
3898 #endif
3899 
3900 static void
RelazifyFunctions(Zone * zone,AllocKind kind)3901 RelazifyFunctions(Zone* zone, AllocKind kind)
3902 {
3903     MOZ_ASSERT(kind == AllocKind::FUNCTION ||
3904                kind == AllocKind::FUNCTION_EXTENDED);
3905 
3906     JSRuntime* rt = zone->runtimeFromMainThread();
3907 
3908     for (ZoneCellIterUnderGC i(zone, kind); !i.done(); i.next()) {
3909         JSFunction* fun = &i.get<JSObject>()->as<JSFunction>();
3910         if (fun->hasScript())
3911             fun->maybeRelazify(rt);
3912     }
3913 }
3914 
3915 bool
beginMarkPhase(JS::gcreason::Reason reason)3916 GCRuntime::beginMarkPhase(JS::gcreason::Reason reason)
3917 {
3918     int64_t currentTime = PRMJ_Now();
3919 
3920 #ifdef DEBUG
3921     if (fullCompartmentChecks)
3922         checkForCompartmentMismatches();
3923 #endif
3924 
3925     isFull = true;
3926     bool any = false;
3927 
3928     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
3929         /* Assert that zone state is as we expect */
3930         MOZ_ASSERT(!zone->isCollecting());
3931         MOZ_ASSERT(!zone->compartments.empty());
3932 #ifdef DEBUG
3933         for (auto i : AllAllocKinds()) { // Braces needed to appease MSVC 2013.
3934             MOZ_ASSERT(!zone->arenas.arenaListsToSweep[i]);
3935         }
3936 #endif
3937 
3938         /* Set up which zones will be collected. */
3939         if (zone->isGCScheduled()) {
3940             if (!zone->isAtomsZone()) {
3941                 any = true;
3942                 zone->setGCState(Zone::Mark);
3943             }
3944         } else {
3945             isFull = false;
3946         }
3947 
3948         zone->setPreservingCode(false);
3949     }
3950 
3951     // Discard JIT code more aggressively if the process is approaching its
3952     // executable code limit.
3953     bool canAllocateMoreCode = jit::CanLikelyAllocateMoreExecutableMemory();
3954 
3955     for (CompartmentsIter c(rt, WithAtoms); !c.done(); c.next()) {
3956         c->marked = false;
3957         c->scheduledForDestruction = false;
3958         c->maybeAlive = false;
3959         if (shouldPreserveJITCode(c, currentTime, reason, canAllocateMoreCode))
3960             c->zone()->setPreservingCode(true);
3961     }
3962 
3963     if (!rt->gc.cleanUpEverything && canAllocateMoreCode) {
3964         if (JSCompartment* comp = jit::TopmostIonActivationCompartment(rt))
3965             comp->zone()->setPreservingCode(true);
3966     }
3967 
3968     /*
3969      * Atoms are not in the cross-compartment map. So if there are any
3970      * zones that are not being collected, we are not allowed to collect
3971      * atoms. Otherwise, the non-collected zones could contain pointers
3972      * to atoms that we would miss.
3973      *
3974      * keepAtoms() will only change on the main thread, which we are currently
3975      * on. If the value of keepAtoms() changes between GC slices, then we'll
3976      * cancel the incremental GC. See IsIncrementalGCSafe.
3977      */
3978     if (isFull && !rt->keepAtoms()) {
3979         Zone* atomsZone = rt->atomsCompartment()->zone();
3980         if (atomsZone->isGCScheduled()) {
3981             MOZ_ASSERT(!atomsZone->isCollecting());
3982             atomsZone->setGCState(Zone::Mark);
3983             any = true;
3984         }
3985     }
3986 
3987     /* Check that at least one zone is scheduled for collection. */
3988     if (!any)
3989         return false;
3990 
3991     /*
3992      * At the end of each incremental slice, we call prepareForIncrementalGC,
3993      * which marks objects in all arenas that we're currently allocating
3994      * into. This can cause leaks if unreachable objects are in these
3995      * arenas. This purge call ensures that we only mark arenas that have had
3996      * allocations after the incremental GC started.
3997      */
3998     if (isIncremental) {
3999         for (GCZonesIter zone(rt); !zone.done(); zone.next())
4000             zone->arenas.purge();
4001     }
4002 
4003     MemProfiler::MarkTenuredStart(rt);
4004     marker.start();
4005     GCMarker* gcmarker = &marker;
4006 
4007     /* For non-incremental GC the following sweep discards the jit code. */
4008     if (isIncremental) {
4009         for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
4010             gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_DISCARD_CODE);
4011             zone->discardJitCode(rt->defaultFreeOp());
4012         }
4013     }
4014 
4015     /*
4016      * Relazify functions after discarding JIT code (we can't relazify
4017      * functions with JIT code) and before the actual mark phase, so that
4018      * the current GC can collect the JSScripts we're unlinking here.
4019      */
4020     for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
4021         gcstats::AutoPhase ap(stats, gcstats::PHASE_RELAZIFY_FUNCTIONS);
4022         RelazifyFunctions(zone, AllocKind::FUNCTION);
4023         RelazifyFunctions(zone, AllocKind::FUNCTION_EXTENDED);
4024     }
4025 
4026     startNumber = number;
4027 
4028     /*
4029      * We must purge the runtime at the beginning of an incremental GC. The
4030      * danger if we purge later is that the snapshot invariant of incremental
4031      * GC will be broken, as follows. If some object is reachable only through
4032      * some cache (say the dtoaCache) then it will not be part of the snapshot.
4033      * If we purge after root marking, then the mutator could obtain a pointer
4034      * to the object and start using it. This object might never be marked, so
4035      * a GC hazard would exist.
4036      */
4037     {
4038         gcstats::AutoPhase ap(stats, gcstats::PHASE_PURGE);
4039         purgeRuntime();
4040     }
4041 
4042     /*
4043      * Mark phase.
4044      */
4045     gcstats::AutoPhase ap1(stats, gcstats::PHASE_MARK);
4046 
4047     {
4048         gcstats::AutoPhase ap(stats, gcstats::PHASE_UNMARK);
4049 
4050         for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
4051             /* Unmark everything in the zones being collected. */
4052             zone->arenas.unmarkAll();
4053         }
4054 
4055         for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
4056             /* Unmark all weak maps in the zones being collected. */
4057             WeakMapBase::unmarkZone(zone);
4058         }
4059 
4060         if (isFull)
4061             UnmarkScriptData(rt);
4062     }
4063 
4064     markRuntime(gcmarker, MarkRuntime);
4065 
4066     gcstats::AutoPhase ap2(stats, gcstats::PHASE_MARK_ROOTS);
4067 
4068     if (isIncremental) {
4069         gcstats::AutoPhase ap3(stats, gcstats::PHASE_BUFFER_GRAY_ROOTS);
4070         bufferGrayRoots();
4071     }
4072 
4073     markCompartments();
4074 
4075     foundBlackGrayEdges = false;
4076 
4077     return true;
4078 }
4079 
4080 void
markCompartments()4081 GCRuntime::markCompartments()
4082 {
4083     gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_COMPARTMENTS);
4084 
4085     /*
4086      * This code ensures that if a compartment is "dead", then it will be
4087      * collected in this GC. A compartment is considered dead if its maybeAlive
4088      * flag is false. The maybeAlive flag is set if:
4089      *   (1) the compartment has incoming cross-compartment edges, or
4090      *   (2) an object in the compartment was marked during root marking, either
4091      *       as a black root or a gray root.
4092      * If the maybeAlive is false, then we set the scheduledForDestruction flag.
4093      * At the end of the GC, we look for compartments where
4094      * scheduledForDestruction is true. These are compartments that were somehow
4095      * "revived" during the incremental GC. If any are found, we do a special,
4096      * non-incremental GC of those compartments to try to collect them.
4097      *
4098      * Compartments can be revived for a variety of reasons. On reason is bug
4099      * 811587, where a reflector that was dead can be revived by DOM code that
4100      * still refers to the underlying DOM node.
4101      *
4102      * Read barriers and allocations can also cause revival. This might happen
4103      * during a function like JS_TransplantObject, which iterates over all
4104      * compartments, live or dead, and operates on their objects. See bug 803376
4105      * for details on this problem. To avoid the problem, we try to avoid
4106      * allocation and read barriers during JS_TransplantObject and the like.
4107      */
4108 
4109     /* Set the maybeAlive flag based on cross-compartment edges. */
4110     for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
4111         for (JSCompartment::WrapperEnum e(c); !e.empty(); e.popFront()) {
4112             const CrossCompartmentKey& key = e.front().key();
4113             JSCompartment* dest;
4114             switch (key.kind) {
4115               case CrossCompartmentKey::ObjectWrapper:
4116               case CrossCompartmentKey::DebuggerObject:
4117               case CrossCompartmentKey::DebuggerSource:
4118               case CrossCompartmentKey::DebuggerEnvironment:
4119                 dest = static_cast<JSObject*>(key.wrapped)->compartment();
4120                 break;
4121               case CrossCompartmentKey::DebuggerScript:
4122                 dest = static_cast<JSScript*>(key.wrapped)->compartment();
4123                 break;
4124               default:
4125                 dest = nullptr;
4126                 break;
4127             }
4128             if (dest)
4129                 dest->maybeAlive = true;
4130         }
4131     }
4132 
4133     /*
4134      * For black roots, code in gc/Marking.cpp will already have set maybeAlive
4135      * during MarkRuntime.
4136      */
4137 
4138     for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
4139         if (!c->maybeAlive && !rt->isAtomsCompartment(c))
4140             c->scheduledForDestruction = true;
4141     }
4142 }
4143 
4144 template <class ZoneIterT>
4145 void
markWeakReferences(gcstats::Phase phase)4146 GCRuntime::markWeakReferences(gcstats::Phase phase)
4147 {
4148     MOZ_ASSERT(marker.isDrained());
4149 
4150     gcstats::AutoPhase ap1(stats, phase);
4151 
4152     marker.enterWeakMarkingMode();
4153 
4154     // TODO bug 1167452: Make weak marking incremental
4155     SliceBudget budget = SliceBudget::unlimited();
4156     marker.drainMarkStack(budget);
4157 
4158     for (;;) {
4159         bool markedAny = false;
4160         if (!marker.isWeakMarkingTracer()) {
4161             for (ZoneIterT zone(rt); !zone.done(); zone.next())
4162                 markedAny |= WeakMapBase::markZoneIteratively(zone, &marker);
4163         }
4164         for (CompartmentsIterT<ZoneIterT> c(rt); !c.done(); c.next()) {
4165             if (c->watchpointMap)
4166                 markedAny |= c->watchpointMap->markIteratively(&marker);
4167         }
4168         markedAny |= Debugger::markAllIteratively(&marker);
4169         markedAny |= jit::JitRuntime::MarkJitcodeGlobalTableIteratively(&marker);
4170 
4171         if (!markedAny)
4172             break;
4173 
4174         auto unlimited = SliceBudget::unlimited();
4175         marker.drainMarkStack(unlimited);
4176     }
4177     MOZ_ASSERT(marker.isDrained());
4178 
4179     marker.leaveWeakMarkingMode();
4180 }
4181 
4182 void
markWeakReferencesInCurrentGroup(gcstats::Phase phase)4183 GCRuntime::markWeakReferencesInCurrentGroup(gcstats::Phase phase)
4184 {
4185     markWeakReferences<GCZoneGroupIter>(phase);
4186 }
4187 
4188 template <class ZoneIterT, class CompartmentIterT>
4189 void
markGrayReferences(gcstats::Phase phase)4190 GCRuntime::markGrayReferences(gcstats::Phase phase)
4191 {
4192     gcstats::AutoPhase ap(stats, phase);
4193     if (hasBufferedGrayRoots()) {
4194         for (ZoneIterT zone(rt); !zone.done(); zone.next())
4195             markBufferedGrayRoots(zone);
4196     } else {
4197         MOZ_ASSERT(!isIncremental);
4198         if (JSTraceDataOp op = grayRootTracer.op)
4199             (*op)(&marker, grayRootTracer.data);
4200     }
4201     auto unlimited = SliceBudget::unlimited();
4202     marker.drainMarkStack(unlimited);
4203 }
4204 
4205 void
markGrayReferencesInCurrentGroup(gcstats::Phase phase)4206 GCRuntime::markGrayReferencesInCurrentGroup(gcstats::Phase phase)
4207 {
4208     markGrayReferences<GCZoneGroupIter, GCCompartmentGroupIter>(phase);
4209 }
4210 
4211 void
markAllWeakReferences(gcstats::Phase phase)4212 GCRuntime::markAllWeakReferences(gcstats::Phase phase)
4213 {
4214     markWeakReferences<GCZonesIter>(phase);
4215 }
4216 
4217 void
markAllGrayReferences(gcstats::Phase phase)4218 GCRuntime::markAllGrayReferences(gcstats::Phase phase)
4219 {
4220     markGrayReferences<GCZonesIter, GCCompartmentsIter>(phase);
4221 }
4222 
4223 #ifdef DEBUG
4224 
4225 struct GCChunkHasher {
4226     typedef gc::Chunk* Lookup;
4227 
4228     /*
4229      * Strip zeros for better distribution after multiplying by the golden
4230      * ratio.
4231      */
hashGCChunkHasher4232     static HashNumber hash(gc::Chunk* chunk) {
4233         MOZ_ASSERT(!(uintptr_t(chunk) & gc::ChunkMask));
4234         return HashNumber(uintptr_t(chunk) >> gc::ChunkShift);
4235     }
4236 
matchGCChunkHasher4237     static bool match(gc::Chunk* k, gc::Chunk* l) {
4238         MOZ_ASSERT(!(uintptr_t(k) & gc::ChunkMask));
4239         MOZ_ASSERT(!(uintptr_t(l) & gc::ChunkMask));
4240         return k == l;
4241     }
4242 };
4243 
4244 class js::gc::MarkingValidator
4245 {
4246   public:
4247     explicit MarkingValidator(GCRuntime* gc);
4248     ~MarkingValidator();
4249     void nonIncrementalMark();
4250     void validate();
4251 
4252   private:
4253     GCRuntime* gc;
4254     bool initialized;
4255 
4256     typedef HashMap<Chunk*, ChunkBitmap*, GCChunkHasher, SystemAllocPolicy> BitmapMap;
4257     BitmapMap map;
4258 };
4259 
4260 #endif // DEBUG
4261 
4262 #ifdef JS_GC_MARKING_VALIDATION
4263 
MarkingValidator(GCRuntime * gc)4264 js::gc::MarkingValidator::MarkingValidator(GCRuntime* gc)
4265   : gc(gc),
4266     initialized(false)
4267 {}
4268 
~MarkingValidator()4269 js::gc::MarkingValidator::~MarkingValidator()
4270 {
4271     if (!map.initialized())
4272         return;
4273 
4274     for (BitmapMap::Range r(map.all()); !r.empty(); r.popFront())
4275         js_delete(r.front().value());
4276 }
4277 
4278 void
nonIncrementalMark()4279 js::gc::MarkingValidator::nonIncrementalMark()
4280 {
4281     /*
4282      * Perform a non-incremental mark for all collecting zones and record
4283      * the results for later comparison.
4284      *
4285      * Currently this does not validate gray marking.
4286      */
4287 
4288     if (!map.init())
4289         return;
4290 
4291     JSRuntime* runtime = gc->rt;
4292     GCMarker* gcmarker = &gc->marker;
4293 
4294     gc->waitBackgroundSweepEnd();
4295 
4296     /* Save existing mark bits. */
4297     for (auto chunk = gc->allNonEmptyChunks(); !chunk.done(); chunk.next()) {
4298         ChunkBitmap* bitmap = &chunk->bitmap;
4299 	ChunkBitmap* entry = js_new<ChunkBitmap>();
4300         if (!entry)
4301             return;
4302 
4303         memcpy((void*)entry->bitmap, (void*)bitmap->bitmap, sizeof(bitmap->bitmap));
4304         if (!map.putNew(chunk, entry))
4305             return;
4306     }
4307 
4308     /*
4309      * Temporarily clear the weakmaps' mark flags for the compartments we are
4310      * collecting.
4311      */
4312 
4313     WeakMapSet markedWeakMaps;
4314     if (!markedWeakMaps.init())
4315         return;
4316 
4317     /*
4318      * For saving, smush all of the keys into one big table and split them back
4319      * up into per-zone tables when restoring.
4320      */
4321     gc::WeakKeyTable savedWeakKeys(SystemAllocPolicy(), runtime->randomHashCodeScrambler());
4322     if (!savedWeakKeys.init())
4323         return;
4324 
4325     for (GCZonesIter zone(runtime); !zone.done(); zone.next()) {
4326         if (!WeakMapBase::saveZoneMarkedWeakMaps(zone, markedWeakMaps))
4327             return;
4328 
4329         for (gc::WeakKeyTable::Range r = zone->gcWeakKeys.all(); !r.empty(); r.popFront()) {
4330             AutoEnterOOMUnsafeRegion oomUnsafe;
4331             if (!savedWeakKeys.put(Move(r.front().key), Move(r.front().value)))
4332                 oomUnsafe.crash("saving weak keys table for validator");
4333         }
4334 
4335         zone->gcWeakKeys.clear();
4336     }
4337 
4338     /*
4339      * After this point, the function should run to completion, so we shouldn't
4340      * do anything fallible.
4341      */
4342     initialized = true;
4343 
4344     /* Re-do all the marking, but non-incrementally. */
4345     js::gc::State state = gc->incrementalState;
4346     gc->incrementalState = MARK_ROOTS;
4347 
4348     {
4349         gcstats::AutoPhase ap(gc->stats, gcstats::PHASE_MARK);
4350 
4351         {
4352             gcstats::AutoPhase ap(gc->stats, gcstats::PHASE_UNMARK);
4353 
4354             for (GCZonesIter zone(runtime); !zone.done(); zone.next())
4355                 WeakMapBase::unmarkZone(zone);
4356 
4357             MOZ_ASSERT(gcmarker->isDrained());
4358             gcmarker->reset();
4359 
4360             for (auto chunk = gc->allNonEmptyChunks(); !chunk.done(); chunk.next())
4361                 chunk->bitmap.clear();
4362         }
4363 
4364         gc->markRuntime(gcmarker, GCRuntime::MarkRuntime);
4365 
4366         auto unlimited = SliceBudget::unlimited();
4367         gc->incrementalState = MARK;
4368         gc->marker.drainMarkStack(unlimited);
4369     }
4370 
4371     gc->incrementalState = SWEEP;
4372     {
4373         gcstats::AutoPhase ap1(gc->stats, gcstats::PHASE_SWEEP);
4374         gcstats::AutoPhase ap2(gc->stats, gcstats::PHASE_SWEEP_MARK);
4375 
4376         gc->markAllWeakReferences(gcstats::PHASE_SWEEP_MARK_WEAK);
4377 
4378         /* Update zone state for gray marking. */
4379         for (GCZonesIter zone(runtime); !zone.done(); zone.next()) {
4380             MOZ_ASSERT(zone->isGCMarkingBlack());
4381             zone->setGCState(Zone::MarkGray);
4382         }
4383         gc->marker.setMarkColorGray();
4384 
4385         gc->markAllGrayReferences(gcstats::PHASE_SWEEP_MARK_GRAY);
4386         gc->markAllWeakReferences(gcstats::PHASE_SWEEP_MARK_GRAY_WEAK);
4387 
4388         /* Restore zone state. */
4389         for (GCZonesIter zone(runtime); !zone.done(); zone.next()) {
4390             MOZ_ASSERT(zone->isGCMarkingGray());
4391             zone->setGCState(Zone::Mark);
4392         }
4393         MOZ_ASSERT(gc->marker.isDrained());
4394         gc->marker.setMarkColorBlack();
4395     }
4396 
4397     /* Take a copy of the non-incremental mark state and restore the original. */
4398     for (auto chunk = gc->allNonEmptyChunks(); !chunk.done(); chunk.next()) {
4399         ChunkBitmap* bitmap = &chunk->bitmap;
4400         ChunkBitmap* entry = map.lookup(chunk)->value();
4401         Swap(*entry, *bitmap);
4402     }
4403 
4404     for (GCZonesIter zone(runtime); !zone.done(); zone.next()) {
4405         WeakMapBase::unmarkZone(zone);
4406         zone->gcWeakKeys.clear();
4407     }
4408 
4409     WeakMapBase::restoreMarkedWeakMaps(markedWeakMaps);
4410 
4411     for (gc::WeakKeyTable::Range r = savedWeakKeys.all(); !r.empty(); r.popFront()) {
4412         AutoEnterOOMUnsafeRegion oomUnsafe;
4413         Zone* zone = gc::TenuredCell::fromPointer(r.front().key.asCell())->zone();
4414         if (!zone->gcWeakKeys.put(Move(r.front().key), Move(r.front().value)))
4415             oomUnsafe.crash("restoring weak keys table for validator");
4416     }
4417 
4418     gc->incrementalState = state;
4419 }
4420 
4421 void
validate()4422 js::gc::MarkingValidator::validate()
4423 {
4424     /*
4425      * Validates the incremental marking for a single compartment by comparing
4426      * the mark bits to those previously recorded for a non-incremental mark.
4427      */
4428 
4429     if (!initialized)
4430         return;
4431 
4432     gc->waitBackgroundSweepEnd();
4433 
4434     for (auto chunk = gc->allNonEmptyChunks(); !chunk.done(); chunk.next()) {
4435         BitmapMap::Ptr ptr = map.lookup(chunk);
4436         if (!ptr)
4437             continue;  /* Allocated after we did the non-incremental mark. */
4438 
4439         ChunkBitmap* bitmap = ptr->value();
4440         ChunkBitmap* incBitmap = &chunk->bitmap;
4441 
4442         for (size_t i = 0; i < ArenasPerChunk; i++) {
4443             if (chunk->decommittedArenas.get(i))
4444                 continue;
4445             Arena* arena = &chunk->arenas[i];
4446             if (!arena->aheader.allocated())
4447                 continue;
4448             if (!arena->aheader.zone->isGCSweeping())
4449                 continue;
4450             if (arena->aheader.allocatedDuringIncremental)
4451                 continue;
4452 
4453             AllocKind kind = arena->aheader.getAllocKind();
4454             uintptr_t thing = arena->thingsStart(kind);
4455             uintptr_t end = arena->thingsEnd();
4456             while (thing < end) {
4457                 Cell* cell = (Cell*)thing;
4458 
4459                 /*
4460                  * If a non-incremental GC wouldn't have collected a cell, then
4461                  * an incremental GC won't collect it.
4462                  */
4463                 MOZ_ASSERT_IF(bitmap->isMarked(cell, BLACK), incBitmap->isMarked(cell, BLACK));
4464 
4465                 /*
4466                  * If the cycle collector isn't allowed to collect an object
4467                  * after a non-incremental GC has run, then it isn't allowed to
4468                  * collected it after an incremental GC.
4469                  */
4470                 MOZ_ASSERT_IF(!bitmap->isMarked(cell, GRAY), !incBitmap->isMarked(cell, GRAY));
4471 
4472                 thing += Arena::thingSize(kind);
4473             }
4474         }
4475     }
4476 }
4477 
4478 #endif // JS_GC_MARKING_VALIDATION
4479 
4480 void
computeNonIncrementalMarkingForValidation()4481 GCRuntime::computeNonIncrementalMarkingForValidation()
4482 {
4483 #ifdef JS_GC_MARKING_VALIDATION
4484     MOZ_ASSERT(!markingValidator);
4485     if (isIncremental && validate)
4486         markingValidator = js_new<MarkingValidator>(this);
4487     if (markingValidator)
4488         markingValidator->nonIncrementalMark();
4489 #endif
4490 }
4491 
4492 void
validateIncrementalMarking()4493 GCRuntime::validateIncrementalMarking()
4494 {
4495 #ifdef JS_GC_MARKING_VALIDATION
4496     if (markingValidator)
4497         markingValidator->validate();
4498 #endif
4499 }
4500 
4501 void
finishMarkingValidation()4502 GCRuntime::finishMarkingValidation()
4503 {
4504 #ifdef JS_GC_MARKING_VALIDATION
4505     js_delete(markingValidator);
4506     markingValidator = nullptr;
4507 #endif
4508 }
4509 
4510 static void
DropStringWrappers(JSRuntime * rt)4511 DropStringWrappers(JSRuntime* rt)
4512 {
4513     /*
4514      * String "wrappers" are dropped on GC because their presence would require
4515      * us to sweep the wrappers in all compartments every time we sweep a
4516      * compartment group.
4517      */
4518     for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
4519         for (JSCompartment::WrapperEnum e(c); !e.empty(); e.popFront()) {
4520             if (e.front().key().kind == CrossCompartmentKey::StringWrapper)
4521                 e.removeFront();
4522         }
4523     }
4524 }
4525 
4526 /*
4527  * Group zones that must be swept at the same time.
4528  *
4529  * If compartment A has an edge to an unmarked object in compartment B, then we
4530  * must not sweep A in a later slice than we sweep B. That's because a write
4531  * barrier in A could lead to the unmarked object in B becoming marked.
4532  * However, if we had already swept that object, we would be in trouble.
4533  *
4534  * If we consider these dependencies as a graph, then all the compartments in
4535  * any strongly-connected component of this graph must be swept in the same
4536  * slice.
4537  *
4538  * Tarjan's algorithm is used to calculate the components.
4539  */
4540 
4541 void
findOutgoingEdges(ComponentFinder<JS::Zone> & finder)4542 JSCompartment::findOutgoingEdges(ComponentFinder<JS::Zone>& finder)
4543 {
4544     for (js::WrapperMap::Enum e(crossCompartmentWrappers); !e.empty(); e.popFront()) {
4545         CrossCompartmentKey::Kind kind = e.front().key().kind;
4546         MOZ_ASSERT(kind != CrossCompartmentKey::StringWrapper);
4547         TenuredCell& other = e.front().key().wrapped->asTenured();
4548         if (kind == CrossCompartmentKey::ObjectWrapper) {
4549             /*
4550              * Add edge to wrapped object compartment if wrapped object is not
4551              * marked black to indicate that wrapper compartment not be swept
4552              * after wrapped compartment.
4553              */
4554             if (!other.isMarked(BLACK) || other.isMarked(GRAY)) {
4555                 JS::Zone* w = other.zone();
4556                 if (w->isGCMarking())
4557                     finder.addEdgeTo(w);
4558             }
4559         } else {
4560             MOZ_ASSERT(kind == CrossCompartmentKey::DebuggerScript ||
4561                        kind == CrossCompartmentKey::DebuggerSource ||
4562                        kind == CrossCompartmentKey::DebuggerObject ||
4563                        kind == CrossCompartmentKey::DebuggerEnvironment);
4564             /*
4565              * Add edge for debugger object wrappers, to ensure (in conjuction
4566              * with call to Debugger::findCompartmentEdges below) that debugger
4567              * and debuggee objects are always swept in the same group.
4568              */
4569             JS::Zone* w = other.zone();
4570             if (w->isGCMarking())
4571                 finder.addEdgeTo(w);
4572         }
4573     }
4574 }
4575 
4576 bool
findDeadProxyZoneEdges(bool * foundAny)4577 JSCompartment::findDeadProxyZoneEdges(bool* foundAny)
4578 {
4579     // As an optimization, return whether any dead proxy objects are found in
4580     // this compartment so that if a zone has none, its cross compartment
4581     // wrappers do not need to be scanned.
4582     *foundAny = false;
4583     for (js::WrapperMap::Enum e(crossCompartmentWrappers); !e.empty(); e.popFront()) {
4584         Value value = e.front().value().get();
4585         if (value.isObject()) {
4586             if (IsDeadProxyObject(&value.toObject())) {
4587                 *foundAny = true;
4588                 Zone* wrappedZone = static_cast<JSObject*>(e.front().key().wrapped)->zone();
4589                 if (!wrappedZone->isGCMarking())
4590                     continue;
4591                 if (!wrappedZone->gcZoneGroupEdges.put(zone()))
4592                     return false;
4593             }
4594         }
4595     }
4596 
4597     return true;
4598 }
4599 
4600 void
findOutgoingEdges(ComponentFinder<JS::Zone> & finder)4601 Zone::findOutgoingEdges(ComponentFinder<JS::Zone>& finder)
4602 {
4603     /*
4604      * Any compartment may have a pointer to an atom in the atoms
4605      * compartment, and these aren't in the cross compartment map.
4606      */
4607     JSRuntime* rt = runtimeFromMainThread();
4608     if (rt->atomsCompartment()->zone()->isGCMarking())
4609         finder.addEdgeTo(rt->atomsCompartment()->zone());
4610 
4611     for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next())
4612         comp->findOutgoingEdges(finder);
4613 
4614     for (ZoneSet::Range r = gcZoneGroupEdges.all(); !r.empty(); r.popFront()) {
4615         if (r.front()->isGCMarking())
4616             finder.addEdgeTo(r.front());
4617     }
4618 
4619     Debugger::findZoneEdges(this, finder);
4620 }
4621 
4622 bool
findInterZoneEdges()4623 GCRuntime::findInterZoneEdges()
4624 {
4625     /*
4626      * Weakmaps which have keys with delegates in a different zone introduce the
4627      * need for zone edges from the delegate's zone to the weakmap zone.
4628      *
4629      * Since the edges point into and not away from the zone the weakmap is in
4630      * we must find these edges in advance and store them in a set on the Zone.
4631      * If we run out of memory, we fall back to sweeping everything in one
4632      * group.
4633      */
4634 
4635     for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
4636         if (!WeakMapBase::findInterZoneEdges(zone))
4637             return false;
4638     }
4639 
4640     for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
4641         if (zone->hasDeadProxies) {
4642             bool foundInZone = false;
4643             for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
4644                 bool foundInCompartment = false;
4645                 if (!comp->findDeadProxyZoneEdges(&foundInCompartment))
4646                     return false;
4647                 foundInZone = foundInZone || foundInCompartment;
4648             }
4649             if (!foundInZone)
4650                 zone->hasDeadProxies = false;
4651         }
4652     }
4653 
4654     return true;
4655 }
4656 
4657 void
findZoneGroups()4658 GCRuntime::findZoneGroups()
4659 {
4660 #ifdef DEBUG
4661     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next())
4662         MOZ_ASSERT(zone->gcZoneGroupEdges.empty());
4663 #endif
4664 
4665     ComponentFinder<Zone> finder(rt->mainThread.nativeStackLimit[StackForSystemCode]);
4666     if (!isIncremental || !findInterZoneEdges())
4667         finder.useOneComponent();
4668 
4669     for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
4670         MOZ_ASSERT(zone->isGCMarking());
4671         finder.addNode(zone);
4672     }
4673     zoneGroups = finder.getResultsList();
4674     currentZoneGroup = zoneGroups;
4675     zoneGroupIndex = 0;
4676 
4677     for (GCZonesIter zone(rt); !zone.done(); zone.next())
4678         zone->gcZoneGroupEdges.clear();
4679 
4680 #ifdef DEBUG
4681     for (Zone* head = currentZoneGroup; head; head = head->nextGroup()) {
4682         for (Zone* zone = head; zone; zone = zone->nextNodeInGroup())
4683             MOZ_ASSERT(zone->isGCMarking());
4684     }
4685 
4686     MOZ_ASSERT_IF(!isIncremental, !currentZoneGroup->nextGroup());
4687     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next())
4688         MOZ_ASSERT(zone->gcZoneGroupEdges.empty());
4689 #endif
4690 }
4691 
4692 static void
4693 ResetGrayList(JSCompartment* comp);
4694 
4695 void
getNextZoneGroup()4696 GCRuntime::getNextZoneGroup()
4697 {
4698     currentZoneGroup = currentZoneGroup->nextGroup();
4699     ++zoneGroupIndex;
4700     if (!currentZoneGroup) {
4701         abortSweepAfterCurrentGroup = false;
4702         return;
4703     }
4704 
4705     for (Zone* zone = currentZoneGroup; zone; zone = zone->nextNodeInGroup()) {
4706         MOZ_ASSERT(zone->isGCMarking());
4707         MOZ_ASSERT(!zone->isQueuedForBackgroundSweep());
4708     }
4709 
4710     if (!isIncremental)
4711         ComponentFinder<Zone>::mergeGroups(currentZoneGroup);
4712 
4713     if (abortSweepAfterCurrentGroup) {
4714         MOZ_ASSERT(!isIncremental);
4715         for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
4716             MOZ_ASSERT(!zone->gcNextGraphComponent);
4717             MOZ_ASSERT(zone->isGCMarking());
4718             zone->setNeedsIncrementalBarrier(false, Zone::UpdateJit);
4719             zone->setGCState(Zone::NoGC);
4720             zone->gcGrayRoots.clearAndFree();
4721         }
4722 
4723         for (GCCompartmentGroupIter comp(rt); !comp.done(); comp.next())
4724             ResetGrayList(comp);
4725 
4726         abortSweepAfterCurrentGroup = false;
4727         currentZoneGroup = nullptr;
4728     }
4729 }
4730 
4731 /*
4732  * Gray marking:
4733  *
4734  * At the end of collection, anything reachable from a gray root that has not
4735  * otherwise been marked black must be marked gray.
4736  *
4737  * This means that when marking things gray we must not allow marking to leave
4738  * the current compartment group, as that could result in things being marked
4739  * grey when they might subsequently be marked black.  To achieve this, when we
4740  * find a cross compartment pointer we don't mark the referent but add it to a
4741  * singly-linked list of incoming gray pointers that is stored with each
4742  * compartment.
4743  *
4744  * The list head is stored in JSCompartment::gcIncomingGrayPointers and contains
4745  * cross compartment wrapper objects. The next pointer is stored in the second
4746  * extra slot of the cross compartment wrapper.
4747  *
4748  * The list is created during gray marking when one of the
4749  * MarkCrossCompartmentXXX functions is called for a pointer that leaves the
4750  * current compartent group.  This calls DelayCrossCompartmentGrayMarking to
4751  * push the referring object onto the list.
4752  *
4753  * The list is traversed and then unlinked in
4754  * MarkIncomingCrossCompartmentPointers.
4755  */
4756 
4757 static bool
IsGrayListObject(JSObject * obj)4758 IsGrayListObject(JSObject* obj)
4759 {
4760     MOZ_ASSERT(obj);
4761     return obj->is<CrossCompartmentWrapperObject>() && !IsDeadProxyObject(obj);
4762 }
4763 
4764 /* static */ unsigned
grayLinkExtraSlot(JSObject * obj)4765 ProxyObject::grayLinkExtraSlot(JSObject* obj)
4766 {
4767     MOZ_ASSERT(IsGrayListObject(obj));
4768     return 1;
4769 }
4770 
4771 #ifdef DEBUG
4772 static void
AssertNotOnGrayList(JSObject * obj)4773 AssertNotOnGrayList(JSObject* obj)
4774 {
4775     MOZ_ASSERT_IF(IsGrayListObject(obj),
4776                   GetProxyExtra(obj, ProxyObject::grayLinkExtraSlot(obj)).isUndefined());
4777 }
4778 #endif
4779 
4780 static JSObject*
CrossCompartmentPointerReferent(JSObject * obj)4781 CrossCompartmentPointerReferent(JSObject* obj)
4782 {
4783     MOZ_ASSERT(IsGrayListObject(obj));
4784     return &obj->as<ProxyObject>().private_().toObject();
4785 }
4786 
4787 static JSObject*
NextIncomingCrossCompartmentPointer(JSObject * prev,bool unlink)4788 NextIncomingCrossCompartmentPointer(JSObject* prev, bool unlink)
4789 {
4790     unsigned slot = ProxyObject::grayLinkExtraSlot(prev);
4791     JSObject* next = GetProxyExtra(prev, slot).toObjectOrNull();
4792     MOZ_ASSERT_IF(next, IsGrayListObject(next));
4793 
4794     if (unlink)
4795         SetProxyExtra(prev, slot, UndefinedValue());
4796 
4797     return next;
4798 }
4799 
4800 void
DelayCrossCompartmentGrayMarking(JSObject * src)4801 js::DelayCrossCompartmentGrayMarking(JSObject* src)
4802 {
4803     MOZ_ASSERT(IsGrayListObject(src));
4804 
4805     /* Called from MarkCrossCompartmentXXX functions. */
4806     unsigned slot = ProxyObject::grayLinkExtraSlot(src);
4807     JSObject* dest = CrossCompartmentPointerReferent(src);
4808     JSCompartment* comp = dest->compartment();
4809 
4810     if (GetProxyExtra(src, slot).isUndefined()) {
4811         SetProxyExtra(src, slot, ObjectOrNullValue(comp->gcIncomingGrayPointers));
4812         comp->gcIncomingGrayPointers = src;
4813     } else {
4814         MOZ_ASSERT(GetProxyExtra(src, slot).isObjectOrNull());
4815     }
4816 
4817 #ifdef DEBUG
4818     /*
4819      * Assert that the object is in our list, also walking the list to check its
4820      * integrity.
4821      */
4822     JSObject* obj = comp->gcIncomingGrayPointers;
4823     bool found = false;
4824     while (obj) {
4825         if (obj == src)
4826             found = true;
4827         obj = NextIncomingCrossCompartmentPointer(obj, false);
4828     }
4829     MOZ_ASSERT(found);
4830 #endif
4831 }
4832 
4833 static void
MarkIncomingCrossCompartmentPointers(JSRuntime * rt,const uint32_t color)4834 MarkIncomingCrossCompartmentPointers(JSRuntime* rt, const uint32_t color)
4835 {
4836     MOZ_ASSERT(color == BLACK || color == GRAY);
4837 
4838     static const gcstats::Phase statsPhases[] = {
4839         gcstats::PHASE_SWEEP_MARK_INCOMING_BLACK,
4840         gcstats::PHASE_SWEEP_MARK_INCOMING_GRAY
4841     };
4842     gcstats::AutoPhase ap1(rt->gc.stats, statsPhases[color]);
4843 
4844     bool unlinkList = color == GRAY;
4845 
4846     for (GCCompartmentGroupIter c(rt); !c.done(); c.next()) {
4847         MOZ_ASSERT_IF(color == GRAY, c->zone()->isGCMarkingGray());
4848         MOZ_ASSERT_IF(color == BLACK, c->zone()->isGCMarkingBlack());
4849         MOZ_ASSERT_IF(c->gcIncomingGrayPointers, IsGrayListObject(c->gcIncomingGrayPointers));
4850 
4851         for (JSObject* src = c->gcIncomingGrayPointers;
4852              src;
4853              src = NextIncomingCrossCompartmentPointer(src, unlinkList))
4854         {
4855             JSObject* dst = CrossCompartmentPointerReferent(src);
4856             MOZ_ASSERT(dst->compartment() == c);
4857 
4858             if (color == GRAY) {
4859                 if (IsMarkedUnbarriered(rt, &src) && src->asTenured().isMarked(GRAY))
4860                     TraceManuallyBarrieredEdge(&rt->gc.marker, &dst,
4861                                                "cross-compartment gray pointer");
4862             } else {
4863                 if (IsMarkedUnbarriered(rt, &src) && !src->asTenured().isMarked(GRAY))
4864                     TraceManuallyBarrieredEdge(&rt->gc.marker, &dst,
4865                                                "cross-compartment black pointer");
4866             }
4867         }
4868 
4869         if (unlinkList)
4870             c->gcIncomingGrayPointers = nullptr;
4871     }
4872 
4873     auto unlimited = SliceBudget::unlimited();
4874     rt->gc.marker.drainMarkStack(unlimited);
4875 }
4876 
4877 static bool
RemoveFromGrayList(JSObject * wrapper)4878 RemoveFromGrayList(JSObject* wrapper)
4879 {
4880     if (!IsGrayListObject(wrapper))
4881         return false;
4882 
4883     unsigned slot = ProxyObject::grayLinkExtraSlot(wrapper);
4884     if (GetProxyExtra(wrapper, slot).isUndefined())
4885         return false;  /* Not on our list. */
4886 
4887     JSObject* tail = GetProxyExtra(wrapper, slot).toObjectOrNull();
4888     SetProxyExtra(wrapper, slot, UndefinedValue());
4889 
4890     JSCompartment* comp = CrossCompartmentPointerReferent(wrapper)->compartment();
4891     JSObject* obj = comp->gcIncomingGrayPointers;
4892     if (obj == wrapper) {
4893         comp->gcIncomingGrayPointers = tail;
4894         return true;
4895     }
4896 
4897     while (obj) {
4898         unsigned slot = ProxyObject::grayLinkExtraSlot(obj);
4899         JSObject* next = GetProxyExtra(obj, slot).toObjectOrNull();
4900         if (next == wrapper) {
4901             SetProxyExtra(obj, slot, ObjectOrNullValue(tail));
4902             return true;
4903         }
4904         obj = next;
4905     }
4906 
4907     MOZ_CRASH("object not found in gray link list");
4908 }
4909 
4910 static void
ResetGrayList(JSCompartment * comp)4911 ResetGrayList(JSCompartment* comp)
4912 {
4913     JSObject* src = comp->gcIncomingGrayPointers;
4914     while (src)
4915         src = NextIncomingCrossCompartmentPointer(src, true);
4916     comp->gcIncomingGrayPointers = nullptr;
4917 }
4918 
4919 void
NotifyGCNukeWrapper(JSObject * obj)4920 js::NotifyGCNukeWrapper(JSObject* obj)
4921 {
4922     /*
4923      * References to target of wrapper are being removed, we no longer have to
4924      * remember to mark it.
4925      */
4926     RemoveFromGrayList(obj);
4927 
4928     obj->zone()->hasDeadProxies = true;
4929 }
4930 
4931 enum {
4932     JS_GC_SWAP_OBJECT_A_REMOVED = 1 << 0,
4933     JS_GC_SWAP_OBJECT_B_REMOVED = 1 << 1
4934 };
4935 
4936 unsigned
NotifyGCPreSwap(JSObject * a,JSObject * b)4937 js::NotifyGCPreSwap(JSObject* a, JSObject* b)
4938 {
4939     /*
4940      * Two objects in the same compartment are about to have had their contents
4941      * swapped.  If either of them are in our gray pointer list, then we remove
4942      * them from the lists, returning a bitset indicating what happened.
4943      */
4944     return (RemoveFromGrayList(a) ? JS_GC_SWAP_OBJECT_A_REMOVED : 0) |
4945            (RemoveFromGrayList(b) ? JS_GC_SWAP_OBJECT_B_REMOVED : 0);
4946 }
4947 
4948 void
NotifyGCPostSwap(JSObject * a,JSObject * b,unsigned removedFlags)4949 js::NotifyGCPostSwap(JSObject* a, JSObject* b, unsigned removedFlags)
4950 {
4951     /*
4952      * Two objects in the same compartment have had their contents swapped.  If
4953      * either of them were in our gray pointer list, we re-add them again.
4954      */
4955     if (removedFlags & JS_GC_SWAP_OBJECT_A_REMOVED)
4956         DelayCrossCompartmentGrayMarking(b);
4957     if (removedFlags & JS_GC_SWAP_OBJECT_B_REMOVED)
4958         DelayCrossCompartmentGrayMarking(a);
4959 }
4960 
4961 void
endMarkingZoneGroup()4962 GCRuntime::endMarkingZoneGroup()
4963 {
4964     gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_MARK);
4965 
4966     /*
4967      * Mark any incoming black pointers from previously swept compartments
4968      * whose referents are not marked. This can occur when gray cells become
4969      * black by the action of UnmarkGray.
4970      */
4971     MarkIncomingCrossCompartmentPointers(rt, BLACK);
4972     markWeakReferencesInCurrentGroup(gcstats::PHASE_SWEEP_MARK_WEAK);
4973 
4974     /*
4975      * Change state of current group to MarkGray to restrict marking to this
4976      * group.  Note that there may be pointers to the atoms compartment, and
4977      * these will be marked through, as they are not marked with
4978      * MarkCrossCompartmentXXX.
4979      */
4980     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
4981         MOZ_ASSERT(zone->isGCMarkingBlack());
4982         zone->setGCState(Zone::MarkGray);
4983     }
4984     marker.setMarkColorGray();
4985 
4986     /* Mark incoming gray pointers from previously swept compartments. */
4987     MarkIncomingCrossCompartmentPointers(rt, GRAY);
4988 
4989     /* Mark gray roots and mark transitively inside the current compartment group. */
4990     markGrayReferencesInCurrentGroup(gcstats::PHASE_SWEEP_MARK_GRAY);
4991     markWeakReferencesInCurrentGroup(gcstats::PHASE_SWEEP_MARK_GRAY_WEAK);
4992 
4993     /* Restore marking state. */
4994     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
4995         MOZ_ASSERT(zone->isGCMarkingGray());
4996         zone->setGCState(Zone::Mark);
4997     }
4998     MOZ_ASSERT(marker.isDrained());
4999     marker.setMarkColorBlack();
5000 }
5001 
5002 class GCSweepTask : public GCParallelTask
5003 {
runFromHelperThread()5004     virtual void runFromHelperThread() override {
5005         AutoSetThreadIsSweeping threadIsSweeping;
5006         GCParallelTask::runFromHelperThread();
5007     }
5008   protected:
5009     JSRuntime* runtime;
5010   public:
GCSweepTask(JSRuntime * rt)5011     explicit GCSweepTask(JSRuntime* rt) : runtime(rt) {}
5012 };
5013 
5014 #define MAKE_GC_SWEEP_TASK(name)                                              \
5015     class name : public GCSweepTask {                                         \
5016         virtual void run() override;                                          \
5017       public:                                                                 \
5018         explicit name (JSRuntime* rt) : GCSweepTask(rt) {}                    \
5019     }
5020 MAKE_GC_SWEEP_TASK(SweepAtomsTask);
5021 MAKE_GC_SWEEP_TASK(SweepInnerViewsTask);
5022 MAKE_GC_SWEEP_TASK(SweepCCWrappersTask);
5023 MAKE_GC_SWEEP_TASK(SweepBaseShapesTask);
5024 MAKE_GC_SWEEP_TASK(SweepInitialShapesTask);
5025 MAKE_GC_SWEEP_TASK(SweepObjectGroupsTask);
5026 MAKE_GC_SWEEP_TASK(SweepRegExpsTask);
5027 MAKE_GC_SWEEP_TASK(SweepMiscTask);
5028 #undef MAKE_GC_SWEEP_TASK
5029 
5030 /* virtual */ void
run()5031 SweepAtomsTask::run()
5032 {
5033     runtime->sweepAtoms();
5034 }
5035 
5036 /* virtual */ void
run()5037 SweepInnerViewsTask::run()
5038 {
5039     for (GCCompartmentGroupIter c(runtime); !c.done(); c.next())
5040         c->sweepInnerViews();
5041 }
5042 
5043 /* virtual */ void
run()5044 SweepCCWrappersTask::run()
5045 {
5046     for (GCCompartmentGroupIter c(runtime); !c.done(); c.next())
5047         c->sweepCrossCompartmentWrappers();
5048 }
5049 
5050 /* virtual */ void
run()5051 SweepBaseShapesTask::run()
5052 {
5053     for (GCCompartmentGroupIter c(runtime); !c.done(); c.next())
5054         c->sweepBaseShapeTable();
5055 }
5056 
5057 /* virtual */ void
run()5058 SweepInitialShapesTask::run()
5059 {
5060     for (GCCompartmentGroupIter c(runtime); !c.done(); c.next())
5061         c->sweepInitialShapeTable();
5062 }
5063 
5064 /* virtual */ void
run()5065 SweepObjectGroupsTask::run()
5066 {
5067     for (GCCompartmentGroupIter c(runtime); !c.done(); c.next())
5068         c->objectGroups.sweep(runtime->defaultFreeOp());
5069 }
5070 
5071 /* virtual */ void
run()5072 SweepRegExpsTask::run()
5073 {
5074     for (GCCompartmentGroupIter c(runtime); !c.done(); c.next())
5075         c->sweepRegExps();
5076 }
5077 
5078 /* virtual */ void
run()5079 SweepMiscTask::run()
5080 {
5081     for (GCCompartmentGroupIter c(runtime); !c.done(); c.next()) {
5082         c->sweepSavedStacks();
5083         c->sweepSelfHostingScriptSource();
5084         c->sweepNativeIterators();
5085     }
5086 }
5087 
5088 void
startTask(GCParallelTask & task,gcstats::Phase phase)5089 GCRuntime::startTask(GCParallelTask& task, gcstats::Phase phase)
5090 {
5091     MOZ_ASSERT(HelperThreadState().isLocked());
5092     if (!task.startWithLockHeld()) {
5093         AutoUnlockHelperThreadState unlock;
5094         gcstats::AutoPhase ap(stats, phase);
5095         task.runFromMainThread(rt);
5096     }
5097 }
5098 
5099 void
joinTask(GCParallelTask & task,gcstats::Phase phase)5100 GCRuntime::joinTask(GCParallelTask& task, gcstats::Phase phase)
5101 {
5102     gcstats::AutoPhase ap(stats, task, phase);
5103     task.joinWithLockHeld();
5104 }
5105 
5106 void
beginSweepingZoneGroup()5107 GCRuntime::beginSweepingZoneGroup()
5108 {
5109     /*
5110      * Begin sweeping the group of zones in gcCurrentZoneGroup,
5111      * performing actions that must be done before yielding to caller.
5112      */
5113 
5114     bool sweepingAtoms = false;
5115     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
5116         /* Set the GC state to sweeping. */
5117         MOZ_ASSERT(zone->isGCMarking());
5118         zone->setGCState(Zone::Sweep);
5119 
5120         /* Purge the ArenaLists before sweeping. */
5121         zone->arenas.purge();
5122 
5123         if (zone->isAtomsZone())
5124             sweepingAtoms = true;
5125 
5126         if (rt->sweepZoneCallback)
5127             rt->sweepZoneCallback(zone);
5128 
5129         zone->gcLastZoneGroupIndex = zoneGroupIndex;
5130     }
5131 
5132     validateIncrementalMarking();
5133 
5134     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
5135         /* Clear all weakrefs that point to unmarked things. */
5136         for (auto edge : zone->gcWeakRefs) {
5137             /* Edges may be present multiple times, so may already be nulled. */
5138             if (*edge && IsAboutToBeFinalizedDuringSweep(**edge))
5139                 *edge = nullptr;
5140         }
5141         zone->gcWeakRefs.clear();
5142 
5143         /* No need to look up any more weakmap keys from this zone group. */
5144         zone->gcWeakKeys.clear();
5145     }
5146 
5147     FreeOp fop(rt);
5148     SweepAtomsTask sweepAtomsTask(rt);
5149     SweepInnerViewsTask sweepInnerViewsTask(rt);
5150     SweepCCWrappersTask sweepCCWrappersTask(rt);
5151     SweepBaseShapesTask sweepBaseShapesTask(rt);
5152     SweepInitialShapesTask sweepInitialShapesTask(rt);
5153     SweepObjectGroupsTask sweepObjectGroupsTask(rt);
5154     SweepRegExpsTask sweepRegExpsTask(rt);
5155     SweepMiscTask sweepMiscTask(rt);
5156 
5157     {
5158         gcstats::AutoPhase ap(stats, gcstats::PHASE_FINALIZE_START);
5159         callFinalizeCallbacks(&fop, JSFINALIZE_GROUP_START);
5160         {
5161             gcstats::AutoPhase ap2(stats, gcstats::PHASE_WEAK_ZONEGROUP_CALLBACK);
5162             callWeakPointerZoneGroupCallbacks();
5163         }
5164         {
5165             gcstats::AutoPhase ap2(stats, gcstats::PHASE_WEAK_COMPARTMENT_CALLBACK);
5166             for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
5167                 for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
5168                     callWeakPointerCompartmentCallbacks(comp);
5169             }
5170         }
5171     }
5172 
5173     if (sweepingAtoms) {
5174         AutoLockHelperThreadState helperLock;
5175         startTask(sweepAtomsTask, gcstats::PHASE_SWEEP_ATOMS);
5176     }
5177 
5178     {
5179         gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_COMPARTMENTS);
5180         gcstats::AutoSCC scc(stats, zoneGroupIndex);
5181 
5182         {
5183             AutoLockHelperThreadState helperLock;
5184             startTask(sweepInnerViewsTask, gcstats::PHASE_SWEEP_INNER_VIEWS);
5185             startTask(sweepCCWrappersTask, gcstats::PHASE_SWEEP_CC_WRAPPER);
5186             startTask(sweepBaseShapesTask, gcstats::PHASE_SWEEP_BASE_SHAPE);
5187             startTask(sweepInitialShapesTask, gcstats::PHASE_SWEEP_INITIAL_SHAPE);
5188             startTask(sweepObjectGroupsTask, gcstats::PHASE_SWEEP_TYPE_OBJECT);
5189             startTask(sweepRegExpsTask, gcstats::PHASE_SWEEP_REGEXP);
5190             startTask(sweepMiscTask, gcstats::PHASE_SWEEP_MISC);
5191         }
5192 
5193         // The remainder of the of the tasks run in parallel on the main
5194         // thread until we join, below.
5195         {
5196             gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_MISC);
5197 
5198             for (GCCompartmentGroupIter c(rt); !c.done(); c.next()) {
5199                 c->sweepGlobalObject(&fop);
5200                 c->sweepObjectPendingMetadata();
5201                 c->sweepDebugScopes();
5202                 c->sweepJitCompartment(&fop);
5203                 c->sweepTemplateObjects();
5204             }
5205 
5206             for (GCZoneGroupIter zone(rt); !zone.done(); zone.next())
5207                 zone->sweepWeakMaps();
5208 
5209             // Bug 1071218: the following two methods have not yet been
5210             // refactored to work on a single zone-group at once.
5211 
5212             // Collect watch points associated with unreachable objects.
5213             WatchpointMap::sweepAll(rt);
5214 
5215             // Detach unreachable debuggers and global objects from each other.
5216             Debugger::sweepAll(&fop);
5217 
5218             // Sweep entries containing about-to-be-finalized JitCode and
5219             // update relocated TypeSet::Types inside the JitcodeGlobalTable.
5220             jit::JitRuntime::SweepJitcodeGlobalTable(rt);
5221         }
5222 
5223         {
5224             gcstats::AutoPhase apdc(stats, gcstats::PHASE_SWEEP_DISCARD_CODE);
5225             for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
5226                 zone->discardJitCode(&fop);
5227             }
5228         }
5229 
5230         {
5231             gcstats::AutoPhase ap1(stats, gcstats::PHASE_SWEEP_TYPES);
5232             gcstats::AutoPhase ap2(stats, gcstats::PHASE_SWEEP_TYPES_BEGIN);
5233             for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
5234                 zone->beginSweepTypes(&fop, releaseObservedTypes && !zone->isPreservingCode());
5235             }
5236         }
5237 
5238         {
5239             gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_BREAKPOINT);
5240             for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
5241                 zone->sweepBreakpoints(&fop);
5242             }
5243         }
5244 
5245         {
5246             gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_BREAKPOINT);
5247             for (GCZoneGroupIter zone(rt); !zone.done(); zone.next())
5248                 zone->sweepUniqueIds(&fop);
5249         }
5250     }
5251 
5252     if (sweepingAtoms) {
5253         gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_SYMBOL_REGISTRY);
5254         rt->symbolRegistry().sweep();
5255     }
5256 
5257     // Rejoin our off-main-thread tasks.
5258     if (sweepingAtoms) {
5259         AutoLockHelperThreadState helperLock;
5260         joinTask(sweepAtomsTask, gcstats::PHASE_SWEEP_ATOMS);
5261     }
5262 
5263     {
5264         gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_COMPARTMENTS);
5265         gcstats::AutoSCC scc(stats, zoneGroupIndex);
5266 
5267         AutoLockHelperThreadState helperLock;
5268         joinTask(sweepInnerViewsTask, gcstats::PHASE_SWEEP_INNER_VIEWS);
5269         joinTask(sweepCCWrappersTask, gcstats::PHASE_SWEEP_CC_WRAPPER);
5270         joinTask(sweepBaseShapesTask, gcstats::PHASE_SWEEP_BASE_SHAPE);
5271         joinTask(sweepInitialShapesTask, gcstats::PHASE_SWEEP_INITIAL_SHAPE);
5272         joinTask(sweepObjectGroupsTask, gcstats::PHASE_SWEEP_TYPE_OBJECT);
5273         joinTask(sweepRegExpsTask, gcstats::PHASE_SWEEP_REGEXP);
5274         joinTask(sweepMiscTask, gcstats::PHASE_SWEEP_MISC);
5275     }
5276 
5277     /*
5278      * Queue all GC things in all zones for sweeping, either in the
5279      * foreground or on the background thread.
5280      *
5281      * Note that order is important here for the background case.
5282      *
5283      * Objects are finalized immediately but this may change in the future.
5284      */
5285 
5286     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
5287         gcstats::AutoSCC scc(stats, zoneGroupIndex);
5288         zone->arenas.queueForegroundObjectsForSweep(&fop);
5289     }
5290     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
5291         gcstats::AutoSCC scc(stats, zoneGroupIndex);
5292         for (unsigned i = 0; i < ArrayLength(IncrementalFinalizePhases); ++i)
5293             zone->arenas.queueForForegroundSweep(&fop, IncrementalFinalizePhases[i]);
5294     }
5295     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
5296         gcstats::AutoSCC scc(stats, zoneGroupIndex);
5297         for (unsigned i = 0; i < ArrayLength(BackgroundFinalizePhases); ++i)
5298             zone->arenas.queueForBackgroundSweep(&fop, BackgroundFinalizePhases[i]);
5299     }
5300     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
5301         gcstats::AutoSCC scc(stats, zoneGroupIndex);
5302         zone->arenas.queueForegroundThingsForSweep(&fop);
5303     }
5304 
5305     sweepingTypes = true;
5306 
5307     finalizePhase = 0;
5308     sweepZone = currentZoneGroup;
5309     sweepKindIndex = 0;
5310 
5311     {
5312         gcstats::AutoPhase ap(stats, gcstats::PHASE_FINALIZE_END);
5313         callFinalizeCallbacks(&fop, JSFINALIZE_GROUP_END);
5314     }
5315 }
5316 
5317 void
endSweepingZoneGroup()5318 GCRuntime::endSweepingZoneGroup()
5319 {
5320     /* Update the GC state for zones we have swept. */
5321     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
5322         MOZ_ASSERT(zone->isGCSweeping());
5323         AutoLockGC lock(rt);
5324         zone->setGCState(Zone::Finished);
5325         zone->threshold.updateAfterGC(zone->usage.gcBytes(), invocationKind, tunables,
5326                                       schedulingState, lock);
5327     }
5328 
5329     /* Start background thread to sweep zones if required. */
5330     ZoneList zones;
5331     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next())
5332         zones.append(zone);
5333     if (sweepOnBackgroundThread)
5334         queueZonesForBackgroundSweep(zones);
5335     else
5336         sweepBackgroundThings(zones, freeLifoAlloc, MainThread);
5337 
5338     /* Reset the list of arenas marked as being allocated during sweep phase. */
5339     while (ArenaHeader* arena = arenasAllocatedDuringSweep) {
5340         arenasAllocatedDuringSweep = arena->getNextAllocDuringSweep();
5341         arena->unsetAllocDuringSweep();
5342     }
5343 }
5344 
5345 void
beginSweepPhase(bool destroyingRuntime)5346 GCRuntime::beginSweepPhase(bool destroyingRuntime)
5347 {
5348     /*
5349      * Sweep phase.
5350      *
5351      * Finalize as we sweep, outside of lock but with rt->isHeapBusy()
5352      * true so that any attempt to allocate a GC-thing from a finalizer will
5353      * fail, rather than nest badly and leave the unmarked newborn to be swept.
5354      */
5355 
5356     MOZ_ASSERT(!abortSweepAfterCurrentGroup);
5357 
5358     AutoSetThreadIsSweeping threadIsSweeping;
5359 
5360     releaseHeldRelocatedArenas();
5361 
5362     computeNonIncrementalMarkingForValidation();
5363 
5364     gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP);
5365 
5366     sweepOnBackgroundThread =
5367         !destroyingRuntime && !TraceEnabled() && CanUseExtraThreads();
5368 
5369     releaseObservedTypes = shouldReleaseObservedTypes();
5370 
5371 #ifdef DEBUG
5372     for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
5373         MOZ_ASSERT(!c->gcIncomingGrayPointers);
5374         for (JSCompartment::WrapperEnum e(c); !e.empty(); e.popFront()) {
5375             if (e.front().key().kind != CrossCompartmentKey::StringWrapper)
5376                 AssertNotOnGrayList(&e.front().value().unbarrieredGet().toObject());
5377         }
5378     }
5379 #endif
5380 
5381     DropStringWrappers(rt);
5382 
5383     findZoneGroups();
5384     endMarkingZoneGroup();
5385     beginSweepingZoneGroup();
5386 }
5387 
5388 bool
foregroundFinalize(FreeOp * fop,AllocKind thingKind,SliceBudget & sliceBudget,SortedArenaList & sweepList)5389 ArenaLists::foregroundFinalize(FreeOp* fop, AllocKind thingKind, SliceBudget& sliceBudget,
5390                                SortedArenaList& sweepList)
5391 {
5392     if (!arenaListsToSweep[thingKind] && incrementalSweptArenas.isEmpty())
5393         return true;
5394 
5395     if (!FinalizeArenas(fop, &arenaListsToSweep[thingKind], sweepList,
5396                         thingKind, sliceBudget, RELEASE_ARENAS))
5397     {
5398         incrementalSweptArenaKind = thingKind;
5399         incrementalSweptArenas = sweepList.toArenaList();
5400         return false;
5401     }
5402 
5403     // Clear any previous incremental sweep state we may have saved.
5404     incrementalSweptArenas.clear();
5405 
5406     // Join |arenaLists[thingKind]| and |sweepList| into a single list.
5407     ArenaList finalized = sweepList.toArenaList();
5408     arenaLists[thingKind] =
5409         finalized.insertListWithCursorAtEnd(arenaLists[thingKind]);
5410 
5411     return true;
5412 }
5413 
5414 GCRuntime::IncrementalProgress
drainMarkStack(SliceBudget & sliceBudget,gcstats::Phase phase)5415 GCRuntime::drainMarkStack(SliceBudget& sliceBudget, gcstats::Phase phase)
5416 {
5417     /* Run a marking slice and return whether the stack is now empty. */
5418     gcstats::AutoPhase ap(stats, phase);
5419     return marker.drainMarkStack(sliceBudget) ? Finished : NotFinished;
5420 }
5421 
5422 static void
SweepThing(Shape * shape)5423 SweepThing(Shape* shape)
5424 {
5425     if (!shape->isMarked())
5426         shape->sweep();
5427 }
5428 
5429 static void
SweepThing(JSScript * script,AutoClearTypeInferenceStateOnOOM * oom)5430 SweepThing(JSScript* script, AutoClearTypeInferenceStateOnOOM* oom)
5431 {
5432     script->maybeSweepTypes(oom);
5433 }
5434 
5435 static void
SweepThing(ObjectGroup * group,AutoClearTypeInferenceStateOnOOM * oom)5436 SweepThing(ObjectGroup* group, AutoClearTypeInferenceStateOnOOM* oom)
5437 {
5438     group->maybeSweep(oom);
5439 }
5440 
5441 template <typename T, typename... Args>
5442 static bool
SweepArenaList(ArenaHeader ** arenasToSweep,SliceBudget & sliceBudget,Args...args)5443 SweepArenaList(ArenaHeader** arenasToSweep, SliceBudget& sliceBudget, Args... args)
5444 {
5445     while (ArenaHeader* arena = *arenasToSweep) {
5446         for (ArenaCellIterUnderGC i(arena); !i.done(); i.next())
5447             SweepThing(i.get<T>(), args...);
5448 
5449         *arenasToSweep = (*arenasToSweep)->next;
5450         AllocKind kind = MapTypeToFinalizeKind<T>::kind;
5451         sliceBudget.step(Arena::thingsPerArena(Arena::thingSize(kind)));
5452         if (sliceBudget.isOverBudget())
5453             return false;
5454     }
5455 
5456     return true;
5457 }
5458 
5459 GCRuntime::IncrementalProgress
sweepPhase(SliceBudget & sliceBudget)5460 GCRuntime::sweepPhase(SliceBudget& sliceBudget)
5461 {
5462     AutoSetThreadIsSweeping threadIsSweeping;
5463 
5464     gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP);
5465     FreeOp fop(rt);
5466 
5467     if (drainMarkStack(sliceBudget, gcstats::PHASE_SWEEP_MARK) == NotFinished)
5468         return NotFinished;
5469 
5470 
5471     for (;;) {
5472         // Sweep dead type information stored in scripts and object groups, but
5473         // don't finalize them yet. We have to sweep dead information from both
5474         // live and dead scripts and object groups, so that no dead references
5475         // remain in them. Type inference can end up crawling these zones
5476         // again, such as for TypeCompartment::markSetsUnknown, and if this
5477         // happens after sweeping for the zone group finishes we won't be able
5478         // to determine which things in the zone are live.
5479         if (sweepingTypes) {
5480             gcstats::AutoPhase ap1(stats, gcstats::PHASE_SWEEP_COMPARTMENTS);
5481             gcstats::AutoPhase ap2(stats, gcstats::PHASE_SWEEP_TYPES);
5482 
5483             for (; sweepZone; sweepZone = sweepZone->nextNodeInGroup()) {
5484                 ArenaLists& al = sweepZone->arenas;
5485 
5486                 AutoClearTypeInferenceStateOnOOM oom(sweepZone);
5487 
5488                 if (!SweepArenaList<JSScript>(&al.gcScriptArenasToUpdate, sliceBudget, &oom))
5489                     return NotFinished;
5490 
5491                 if (!SweepArenaList<ObjectGroup>(
5492                         &al.gcObjectGroupArenasToUpdate, sliceBudget, &oom))
5493                 {
5494                     return NotFinished;
5495                 }
5496 
5497                 // Finish sweeping type information in the zone.
5498                 {
5499                     gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_TYPES_END);
5500                     sweepZone->types.endSweep(rt);
5501                 }
5502 
5503                 // Foreground finalized objects have already been finalized,
5504                 // and now their arenas can be reclaimed by freeing empty ones
5505                 // and making non-empty ones available for allocation.
5506                 al.mergeForegroundSweptObjectArenas();
5507             }
5508 
5509             sweepZone = currentZoneGroup;
5510             sweepingTypes = false;
5511         }
5512 
5513         /* Finalize foreground finalized things. */
5514         for (; finalizePhase < ArrayLength(IncrementalFinalizePhases) ; ++finalizePhase) {
5515             gcstats::AutoPhase ap(stats, IncrementalFinalizePhases[finalizePhase].statsPhase);
5516 
5517             for (; sweepZone; sweepZone = sweepZone->nextNodeInGroup()) {
5518                 Zone* zone = sweepZone;
5519 
5520                 while (sweepKindIndex < IncrementalFinalizePhases[finalizePhase].length) {
5521                     AllocKind kind = IncrementalFinalizePhases[finalizePhase].kinds[sweepKindIndex];
5522 
5523                     /* Set the number of things per arena for this AllocKind. */
5524                     size_t thingsPerArena = Arena::thingsPerArena(Arena::thingSize(kind));
5525                     incrementalSweepList.setThingsPerArena(thingsPerArena);
5526 
5527                     if (!zone->arenas.foregroundFinalize(&fop, kind, sliceBudget,
5528                                                          incrementalSweepList))
5529                         return NotFinished;
5530 
5531                     /* Reset the slots of the sweep list that we used. */
5532                     incrementalSweepList.reset(thingsPerArena);
5533 
5534                     ++sweepKindIndex;
5535                 }
5536                 sweepKindIndex = 0;
5537             }
5538             sweepZone = currentZoneGroup;
5539         }
5540 
5541         /* Remove dead shapes from the shape tree, but don't finalize them yet. */
5542         {
5543             gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_SHAPE);
5544 
5545             for (; sweepZone; sweepZone = sweepZone->nextNodeInGroup()) {
5546                 ArenaLists& al = sweepZone->arenas;
5547 
5548                 if (!SweepArenaList<Shape>(&al.gcShapeArenasToUpdate, sliceBudget))
5549                     return NotFinished;
5550 
5551                 if (!SweepArenaList<AccessorShape>(&al.gcAccessorShapeArenasToUpdate, sliceBudget))
5552                     return NotFinished;
5553             }
5554         }
5555 
5556         endSweepingZoneGroup();
5557         getNextZoneGroup();
5558         if (!currentZoneGroup)
5559             return Finished;
5560 
5561         endMarkingZoneGroup();
5562         beginSweepingZoneGroup();
5563     }
5564 }
5565 
5566 void
endSweepPhase(bool destroyingRuntime)5567 GCRuntime::endSweepPhase(bool destroyingRuntime)
5568 {
5569     AutoSetThreadIsSweeping threadIsSweeping;
5570 
5571     gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP);
5572     FreeOp fop(rt);
5573 
5574     MOZ_ASSERT_IF(destroyingRuntime, !sweepOnBackgroundThread);
5575 
5576     /*
5577      * Recalculate whether GC was full or not as this may have changed due to
5578      * newly created zones.  Can only change from full to not full.
5579      */
5580     if (isFull) {
5581         for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
5582             if (!zone->isCollecting()) {
5583                 isFull = false;
5584                 break;
5585             }
5586         }
5587     }
5588 
5589     /*
5590      * If we found any black->gray edges during marking, we completely clear the
5591      * mark bits of all uncollected zones, or if a reset has occured, zones that
5592      * will no longer be collected. This is safe, although it may
5593      * prevent the cycle collector from collecting some dead objects.
5594      */
5595     if (foundBlackGrayEdges) {
5596         for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
5597             if (!zone->isCollecting())
5598                 zone->arenas.unmarkAll();
5599         }
5600     }
5601 
5602     {
5603         gcstats::AutoPhase ap(stats, gcstats::PHASE_DESTROY);
5604 
5605         /*
5606          * Sweep script filenames after sweeping functions in the generic loop
5607          * above. In this way when a scripted function's finalizer destroys the
5608          * script and calls rt->destroyScriptHook, the hook can still access the
5609          * script's filename. See bug 323267.
5610          */
5611         if (isFull)
5612             SweepScriptData(rt);
5613 
5614         /* Clear out any small pools that we're hanging on to. */
5615         if (jit::JitRuntime* jitRuntime = rt->jitRuntime())
5616             jitRuntime->execAlloc().purge();
5617 
5618         /*
5619          * This removes compartments from rt->compartment, so we do it last to make
5620          * sure we don't miss sweeping any compartments.
5621          */
5622         if (!destroyingRuntime)
5623             sweepZones(&fop, destroyingRuntime);
5624     }
5625 
5626     {
5627         gcstats::AutoPhase ap(stats, gcstats::PHASE_FINALIZE_END);
5628         callFinalizeCallbacks(&fop, JSFINALIZE_COLLECTION_END);
5629 
5630         /* If we finished a full GC, then the gray bits are correct. */
5631         if (isFull)
5632             grayBitsValid = true;
5633     }
5634 
5635     /* If not sweeping on background thread then we must do it here. */
5636     if (!sweepOnBackgroundThread) {
5637         gcstats::AutoPhase ap(stats, gcstats::PHASE_DESTROY);
5638 
5639         assertBackgroundSweepingFinished();
5640 
5641         /*
5642          * Destroy arenas after we finished the sweeping so finalizers can
5643          * safely use IsAboutToBeFinalized(). This is done on the
5644          * GCHelperState if possible. We acquire the lock only because
5645          * Expire needs to unlock it for other callers.
5646          */
5647         {
5648             AutoLockGC lock(rt);
5649             expireChunksAndArenas(invocationKind == GC_SHRINK, lock);
5650         }
5651 
5652         /* Ensure the compartments get swept if it's the last GC. */
5653         if (destroyingRuntime)
5654             sweepZones(&fop, destroyingRuntime);
5655     }
5656 
5657     finishMarkingValidation();
5658 
5659 #ifdef DEBUG
5660     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
5661         for (auto i : AllAllocKinds()) {
5662             MOZ_ASSERT_IF(!IsBackgroundFinalized(i) ||
5663                           !sweepOnBackgroundThread,
5664                           !zone->arenas.arenaListsToSweep[i]);
5665         }
5666     }
5667 
5668     for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
5669         MOZ_ASSERT(!c->gcIncomingGrayPointers);
5670 
5671         for (JSCompartment::WrapperEnum e(c); !e.empty(); e.popFront()) {
5672             if (e.front().key().kind != CrossCompartmentKey::StringWrapper)
5673                 AssertNotOnGrayList(&e.front().value().unbarrieredGet().toObject());
5674         }
5675     }
5676 #endif
5677 }
5678 
5679 GCRuntime::IncrementalProgress
beginCompactPhase()5680 GCRuntime::beginCompactPhase()
5681 {
5682     gcstats::AutoPhase ap(stats, gcstats::PHASE_COMPACT);
5683 
5684     if (isIncremental) {
5685         // Poll for end of background sweeping
5686         AutoLockGC lock(rt);
5687         if (isBackgroundSweeping())
5688             return NotFinished;
5689     } else {
5690         waitBackgroundSweepEnd();
5691     }
5692 
5693     MOZ_ASSERT(zonesToMaybeCompact.isEmpty());
5694     for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
5695         if (CanRelocateZone(zone))
5696             zonesToMaybeCompact.append(zone);
5697     }
5698 
5699     MOZ_ASSERT(!relocatedArenasToRelease);
5700     startedCompacting = true;
5701     return Finished;
5702 }
5703 
5704 GCRuntime::IncrementalProgress
compactPhase(JS::gcreason::Reason reason,SliceBudget & sliceBudget)5705 GCRuntime::compactPhase(JS::gcreason::Reason reason, SliceBudget& sliceBudget)
5706 {
5707     MOZ_ASSERT(rt->gc.nursery.isEmpty());
5708     assertBackgroundSweepingFinished();
5709     MOZ_ASSERT(startedCompacting);
5710 
5711     gcstats::AutoPhase ap(stats, gcstats::PHASE_COMPACT);
5712 
5713     while (!zonesToMaybeCompact.isEmpty()) {
5714         Zone* zone = zonesToMaybeCompact.front();
5715         MOZ_ASSERT(zone->isGCFinished());
5716         ArenaHeader* relocatedArenas = nullptr;
5717         if (relocateArenas(zone, reason, relocatedArenas, sliceBudget)) {
5718             zone->setGCState(Zone::Compact);
5719             updatePointersToRelocatedCells(zone);
5720             zone->setGCState(Zone::Finished);
5721         }
5722         if (ShouldProtectRelocatedArenas(reason))
5723             protectAndHoldArenas(relocatedArenas);
5724         else
5725             releaseRelocatedArenas(relocatedArenas);
5726         zonesToMaybeCompact.removeFront();
5727         if (sliceBudget.isOverBudget())
5728             break;
5729     }
5730 
5731 #ifdef DEBUG
5732     CheckHashTablesAfterMovingGC(rt);
5733 #endif
5734 
5735     return zonesToMaybeCompact.isEmpty() ? Finished : NotFinished;
5736 }
5737 
5738 void
endCompactPhase(JS::gcreason::Reason reason)5739 GCRuntime::endCompactPhase(JS::gcreason::Reason reason)
5740 {
5741     startedCompacting = false;
5742 }
5743 
5744 void
finishCollection(JS::gcreason::Reason reason)5745 GCRuntime::finishCollection(JS::gcreason::Reason reason)
5746 {
5747     MOZ_ASSERT(marker.isDrained());
5748     marker.stop();
5749     clearBufferedGrayRoots();
5750     MemProfiler::SweepTenured(rt);
5751 
5752     uint64_t currentTime = PRMJ_Now();
5753     schedulingState.updateHighFrequencyMode(lastGCTime, currentTime, tunables);
5754 
5755     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
5756         if (zone->isCollecting()) {
5757             MOZ_ASSERT(zone->isGCFinished());
5758             zone->setGCState(Zone::NoGC);
5759             zone->active = false;
5760         }
5761 
5762         MOZ_ASSERT(!zone->isCollecting());
5763         MOZ_ASSERT(!zone->wasGCStarted());
5764     }
5765 
5766     MOZ_ASSERT(zonesToMaybeCompact.isEmpty());
5767 
5768     if (invocationKind == GC_SHRINK) {
5769         // Ensure excess chunks are returns to the system and free arenas
5770         // decommitted.
5771         shrinkBuffers();
5772     }
5773 
5774     lastGCTime = currentTime;
5775 
5776     // If this is an OOM GC reason, wait on the background sweeping thread
5777     // before returning to ensure that we free as much as possible. If this is
5778     // a zeal-triggered GC, we want to ensure that the mutator can continue
5779     // allocating on the same pages to reduce fragmentation.
5780     if (IsOOMReason(reason) || reason == JS::gcreason::DEBUG_GC) {
5781         gcstats::AutoPhase ap(stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
5782         rt->gc.waitBackgroundSweepOrAllocEnd();
5783     }
5784 }
5785 
5786 static const char*
HeapStateToLabel(JS::HeapState heapState)5787 HeapStateToLabel(JS::HeapState heapState)
5788 {
5789     switch (heapState) {
5790       case JS::HeapState::MinorCollecting:
5791         return "js::Nursery::collect";
5792       case JS::HeapState::MajorCollecting:
5793         return "js::GCRuntime::collect";
5794       case JS::HeapState::Tracing:
5795         return "JS_IterateCompartments";
5796       case JS::HeapState::Idle:
5797         MOZ_CRASH("Should never have an Idle heap state when pushing GC pseudo frames!");
5798     }
5799     MOZ_ASSERT_UNREACHABLE("Should have exhausted every JS::HeapState variant!");
5800     return nullptr;
5801 }
5802 
5803 /* Start a new heap session. */
AutoTraceSession(JSRuntime * rt,JS::HeapState heapState)5804 AutoTraceSession::AutoTraceSession(JSRuntime* rt, JS::HeapState heapState)
5805   : lock(rt),
5806     runtime(rt),
5807     prevState(rt->heapState_),
5808     pseudoFrame(rt, HeapStateToLabel(heapState), ProfileEntry::Category::GC)
5809 {
5810     MOZ_ASSERT(rt->heapState_ == JS::HeapState::Idle);
5811     MOZ_ASSERT(heapState != JS::HeapState::Idle);
5812     MOZ_ASSERT_IF(heapState == JS::HeapState::MajorCollecting, rt->gc.nursery.isEmpty());
5813 
5814     // Threads with an exclusive context can hit refillFreeList while holding
5815     // the exclusive access lock. To avoid deadlocking when we try to acquire
5816     // this lock during GC and the other thread is waiting, make sure we hold
5817     // the exclusive access lock during GC sessions.
5818     MOZ_ASSERT(rt->currentThreadHasExclusiveAccess());
5819 
5820     if (rt->exclusiveThreadsPresent()) {
5821         // Lock the helper thread state when changing the heap state in the
5822         // presence of exclusive threads, to avoid racing with refillFreeList.
5823         AutoLockHelperThreadState lock;
5824         rt->heapState_ = heapState;
5825     } else {
5826         rt->heapState_ = heapState;
5827     }
5828 }
5829 
~AutoTraceSession()5830 AutoTraceSession::~AutoTraceSession()
5831 {
5832     MOZ_ASSERT(runtime->isHeapBusy());
5833 
5834     if (runtime->exclusiveThreadsPresent()) {
5835         AutoLockHelperThreadState lock;
5836         runtime->heapState_ = prevState;
5837 
5838         // Notify any helper threads waiting for the trace session to end.
5839         HelperThreadState().notifyAll(GlobalHelperThreadState::PRODUCER);
5840     } else {
5841         runtime->heapState_ = prevState;
5842     }
5843 }
5844 
AutoCopyFreeListToArenas(JSRuntime * rt,ZoneSelector selector)5845 AutoCopyFreeListToArenas::AutoCopyFreeListToArenas(JSRuntime* rt, ZoneSelector selector)
5846   : runtime(rt),
5847     selector(selector)
5848 {
5849     for (ZonesIter zone(rt, selector); !zone.done(); zone.next())
5850         zone->arenas.copyFreeListsToArenas();
5851 }
5852 
~AutoCopyFreeListToArenas()5853 AutoCopyFreeListToArenas::~AutoCopyFreeListToArenas()
5854 {
5855     for (ZonesIter zone(runtime, selector); !zone.done(); zone.next())
5856         zone->arenas.clearFreeListsInArenas();
5857 }
5858 
5859 class AutoCopyFreeListToArenasForGC
5860 {
5861     JSRuntime* runtime;
5862 
5863   public:
AutoCopyFreeListToArenasForGC(JSRuntime * rt)5864     explicit AutoCopyFreeListToArenasForGC(JSRuntime* rt) : runtime(rt) {
5865         MOZ_ASSERT(rt->currentThreadHasExclusiveAccess());
5866         for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next())
5867             zone->arenas.copyFreeListsToArenas();
5868     }
~AutoCopyFreeListToArenasForGC()5869     ~AutoCopyFreeListToArenasForGC() {
5870         for (ZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next())
5871             zone->arenas.clearFreeListsInArenas();
5872     }
5873 };
5874 
5875 void
resetIncrementalGC(const char * reason)5876 GCRuntime::resetIncrementalGC(const char* reason)
5877 {
5878     switch (incrementalState) {
5879       case NO_INCREMENTAL:
5880         return;
5881 
5882       case MARK: {
5883         /* Cancel any ongoing marking. */
5884         AutoCopyFreeListToArenasForGC copy(rt);
5885 
5886         marker.reset();
5887         marker.stop();
5888         clearBufferedGrayRoots();
5889 
5890         for (GCCompartmentsIter c(rt); !c.done(); c.next())
5891             ResetGrayList(c);
5892 
5893         for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
5894             MOZ_ASSERT(zone->isGCMarking());
5895             zone->setNeedsIncrementalBarrier(false, Zone::UpdateJit);
5896             zone->setGCState(Zone::NoGC);
5897         }
5898 
5899         freeLifoAlloc.freeAll();
5900 
5901         incrementalState = NO_INCREMENTAL;
5902 
5903         MOZ_ASSERT(!marker.shouldCheckCompartments());
5904 
5905         break;
5906       }
5907 
5908       case SWEEP: {
5909         marker.reset();
5910 
5911         for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next())
5912             c->scheduledForDestruction = false;
5913 
5914         /* Finish sweeping the current zone group, then abort. */
5915         abortSweepAfterCurrentGroup = true;
5916 
5917         /* Don't perform any compaction after sweeping. */
5918         bool wasCompacting = isCompacting;
5919         isCompacting = false;
5920 
5921         auto unlimited = SliceBudget::unlimited();
5922         incrementalCollectSlice(unlimited, JS::gcreason::RESET);
5923 
5924         isCompacting = wasCompacting;
5925 
5926         {
5927             gcstats::AutoPhase ap(stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
5928             rt->gc.waitBackgroundSweepOrAllocEnd();
5929         }
5930         break;
5931       }
5932 
5933       case COMPACT: {
5934         {
5935             gcstats::AutoPhase ap(stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
5936             rt->gc.waitBackgroundSweepOrAllocEnd();
5937         }
5938 
5939         bool wasCompacting = isCompacting;
5940 
5941         isCompacting = true;
5942         startedCompacting = true;
5943         zonesToMaybeCompact.clear();
5944 
5945         auto unlimited = SliceBudget::unlimited();
5946         incrementalCollectSlice(unlimited, JS::gcreason::RESET);
5947 
5948         isCompacting = wasCompacting;
5949         break;
5950       }
5951 
5952       default:
5953         MOZ_CRASH("Invalid incremental GC state");
5954     }
5955 
5956     stats.reset(reason);
5957 
5958 #ifdef DEBUG
5959     assertBackgroundSweepingFinished();
5960     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
5961         MOZ_ASSERT(!zone->isCollecting());
5962         MOZ_ASSERT(!zone->needsIncrementalBarrier());
5963         MOZ_ASSERT(!zone->isOnList());
5964     }
5965     MOZ_ASSERT(zonesToMaybeCompact.isEmpty());
5966     MOZ_ASSERT(incrementalState == NO_INCREMENTAL);
5967 #endif
5968 }
5969 
5970 namespace {
5971 
5972 class AutoGCSlice {
5973   public:
5974     explicit AutoGCSlice(JSRuntime* rt);
5975     ~AutoGCSlice();
5976 
5977   private:
5978     JSRuntime* runtime;
5979 };
5980 
5981 } /* anonymous namespace */
5982 
AutoGCSlice(JSRuntime * rt)5983 AutoGCSlice::AutoGCSlice(JSRuntime* rt)
5984   : runtime(rt)
5985 {
5986     /*
5987      * During incremental GC, the compartment's active flag determines whether
5988      * there are stack frames active for any of its scripts. Normally this flag
5989      * is set at the beginning of the mark phase. During incremental GC, we also
5990      * set it at the start of every phase.
5991      */
5992     for (ActivationIterator iter(rt); !iter.done(); ++iter)
5993         iter->compartment()->zone()->active = true;
5994 
5995     for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
5996         /*
5997          * Clear needsIncrementalBarrier early so we don't do any write
5998          * barriers during GC. We don't need to update the Ion barriers (which
5999          * is expensive) because Ion code doesn't run during GC. If need be,
6000          * we'll update the Ion barriers in ~AutoGCSlice.
6001          */
6002         if (zone->isGCMarking()) {
6003             MOZ_ASSERT(zone->needsIncrementalBarrier());
6004             zone->setNeedsIncrementalBarrier(false, Zone::DontUpdateJit);
6005         } else {
6006             MOZ_ASSERT(!zone->needsIncrementalBarrier());
6007         }
6008     }
6009 }
6010 
~AutoGCSlice()6011 AutoGCSlice::~AutoGCSlice()
6012 {
6013     /* We can't use GCZonesIter if this is the end of the last slice. */
6014     for (ZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next()) {
6015         if (zone->isGCMarking()) {
6016             zone->setNeedsIncrementalBarrier(true, Zone::UpdateJit);
6017             zone->arenas.prepareForIncrementalGC(runtime);
6018         } else {
6019             zone->setNeedsIncrementalBarrier(false, Zone::UpdateJit);
6020         }
6021     }
6022 }
6023 
6024 void
pushZealSelectedObjects()6025 GCRuntime::pushZealSelectedObjects()
6026 {
6027 #ifdef JS_GC_ZEAL
6028     /* Push selected objects onto the mark stack and clear the list. */
6029     for (JSObject** obj = selectedForMarking.begin(); obj != selectedForMarking.end(); obj++)
6030         TraceManuallyBarrieredEdge(&marker, obj, "selected obj");
6031 #endif
6032 }
6033 
6034 static bool
IsShutdownGC(JS::gcreason::Reason reason)6035 IsShutdownGC(JS::gcreason::Reason reason)
6036 {
6037     return reason == JS::gcreason::SHUTDOWN_CC || reason == JS::gcreason::DESTROY_RUNTIME;
6038 }
6039 
6040 static bool
ShouldCleanUpEverything(JS::gcreason::Reason reason,JSGCInvocationKind gckind)6041 ShouldCleanUpEverything(JS::gcreason::Reason reason, JSGCInvocationKind gckind)
6042 {
6043     // During shutdown, we must clean everything up, for the sake of leak
6044     // detection. When a runtime has no contexts, or we're doing a GC before a
6045     // shutdown CC, those are strong indications that we're shutting down.
6046     return IsShutdownGC(reason) || gckind == GC_SHRINK;
6047 }
6048 
6049 void
incrementalCollectSlice(SliceBudget & budget,JS::gcreason::Reason reason)6050 GCRuntime::incrementalCollectSlice(SliceBudget& budget, JS::gcreason::Reason reason)
6051 {
6052     MOZ_ASSERT(rt->currentThreadHasExclusiveAccess());
6053 
6054     AutoCopyFreeListToArenasForGC copy(rt);
6055     AutoGCSlice slice(rt);
6056 
6057     bool destroyingRuntime = (reason == JS::gcreason::DESTROY_RUNTIME);
6058 
6059     gc::State initialState = incrementalState;
6060 
6061     int zeal = 0;
6062 #ifdef JS_GC_ZEAL
6063     if (reason == JS::gcreason::DEBUG_GC && !budget.isUnlimited()) {
6064         /*
6065          * Do the incremental collection type specified by zeal mode if the
6066          * collection was triggered by runDebugGC() and incremental GC has not
6067          * been cancelled by resetIncrementalGC().
6068          */
6069         zeal = zealMode;
6070     }
6071 #endif
6072 
6073     MOZ_ASSERT_IF(isIncrementalGCInProgress(), isIncremental);
6074     isIncremental = !budget.isUnlimited();
6075 
6076     if (zeal == ZealIncrementalRootsThenFinish || zeal == ZealIncrementalMarkAllThenFinish) {
6077         /*
6078          * Yields between slices occurs at predetermined points in these modes;
6079          * the budget is not used.
6080          */
6081         budget.makeUnlimited();
6082     }
6083 
6084     switch (incrementalState) {
6085       case NO_INCREMENTAL:
6086         initialReason = reason;
6087         cleanUpEverything = ShouldCleanUpEverything(reason, invocationKind);
6088         isCompacting = shouldCompact();
6089         lastMarkSlice = false;
6090 
6091         incrementalState = MARK_ROOTS;
6092         /* fall through */
6093 
6094       case MARK_ROOTS:
6095         if (!beginMarkPhase(reason)) {
6096             incrementalState = NO_INCREMENTAL;
6097             return;
6098         }
6099 
6100         if (!destroyingRuntime)
6101             pushZealSelectedObjects();
6102 
6103         incrementalState = MARK;
6104 
6105         if (isIncremental && zeal == ZealIncrementalRootsThenFinish)
6106             break;
6107 
6108         /* fall through */
6109 
6110       case MARK:
6111         AutoGCRooter::traceAllWrappers(&marker);
6112 
6113         /* If we needed delayed marking for gray roots, then collect until done. */
6114         if (!hasBufferedGrayRoots()) {
6115             budget.makeUnlimited();
6116             isIncremental = false;
6117         }
6118 
6119         if (drainMarkStack(budget, gcstats::PHASE_MARK) == NotFinished)
6120             break;
6121 
6122         MOZ_ASSERT(marker.isDrained());
6123 
6124         if (!lastMarkSlice && isIncremental &&
6125             ((initialState == MARK && zeal != ZealIncrementalRootsThenFinish) ||
6126              zeal == ZealIncrementalMarkAllThenFinish))
6127         {
6128             /*
6129              * Yield with the aim of starting the sweep in the next
6130              * slice.  We will need to mark anything new on the stack
6131              * when we resume, so we stay in MARK state.
6132              */
6133             lastMarkSlice = true;
6134             break;
6135         }
6136 
6137         incrementalState = SWEEP;
6138 
6139         /*
6140          * This runs to completion, but we don't continue if the budget is
6141          * now exhasted.
6142          */
6143         beginSweepPhase(destroyingRuntime);
6144         if (budget.isOverBudget())
6145             break;
6146 
6147         /*
6148          * Always yield here when running in incremental multi-slice zeal
6149          * mode, so RunDebugGC can reset the slice buget.
6150          */
6151         if (isIncremental && zeal == ZealIncrementalMultipleSlices)
6152             break;
6153 
6154         /* fall through */
6155 
6156       case SWEEP:
6157         if (sweepPhase(budget) == NotFinished)
6158             break;
6159 
6160         endSweepPhase(destroyingRuntime);
6161 
6162         incrementalState = COMPACT;
6163         MOZ_ASSERT(!startedCompacting);
6164 
6165         /* Yield before compacting since it is not incremental. */
6166         if (isCompacting && isIncremental)
6167             break;
6168 
6169       case COMPACT:
6170         if (isCompacting) {
6171             if (!startedCompacting && beginCompactPhase() == NotFinished)
6172                 break;
6173 
6174             if (compactPhase(reason, budget) == NotFinished)
6175                 break;
6176 
6177             endCompactPhase(reason);
6178         }
6179 
6180         finishCollection(reason);
6181 
6182         incrementalState = NO_INCREMENTAL;
6183         break;
6184 
6185       default:
6186         MOZ_ASSERT(false);
6187     }
6188 }
6189 
6190 IncrementalSafety
IsIncrementalGCSafe(JSRuntime * rt)6191 gc::IsIncrementalGCSafe(JSRuntime* rt)
6192 {
6193     MOZ_ASSERT(!rt->mainThread.suppressGC);
6194 
6195     if (rt->keepAtoms())
6196         return IncrementalSafety::Unsafe("keepAtoms set");
6197 
6198     if (!rt->gc.isIncrementalGCAllowed())
6199         return IncrementalSafety::Unsafe("incremental permanently disabled");
6200 
6201     return IncrementalSafety::Safe();
6202 }
6203 
6204 void
budgetIncrementalGC(SliceBudget & budget)6205 GCRuntime::budgetIncrementalGC(SliceBudget& budget)
6206 {
6207     IncrementalSafety safe = IsIncrementalGCSafe(rt);
6208     if (!safe) {
6209         resetIncrementalGC(safe.reason());
6210         budget.makeUnlimited();
6211         stats.nonincremental(safe.reason());
6212         return;
6213     }
6214 
6215     if (mode != JSGC_MODE_INCREMENTAL) {
6216         resetIncrementalGC("GC mode change");
6217         budget.makeUnlimited();
6218         stats.nonincremental("GC mode");
6219         return;
6220     }
6221 
6222     if (isTooMuchMalloc()) {
6223         budget.makeUnlimited();
6224         stats.nonincremental("malloc bytes trigger");
6225     }
6226 
6227     bool reset = false;
6228     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
6229         if (zone->usage.gcBytes() >= zone->threshold.gcTriggerBytes()) {
6230             budget.makeUnlimited();
6231             stats.nonincremental("allocation trigger");
6232         }
6233 
6234         if (isIncrementalGCInProgress() && zone->isGCScheduled() != zone->wasGCStarted())
6235             reset = true;
6236 
6237         if (zone->isTooMuchMalloc()) {
6238             budget.makeUnlimited();
6239             stats.nonincremental("malloc bytes trigger");
6240         }
6241     }
6242 
6243     if (reset)
6244         resetIncrementalGC("zone change");
6245 }
6246 
6247 namespace {
6248 
6249 class AutoScheduleZonesForGC
6250 {
6251     JSRuntime* rt_;
6252 
6253   public:
AutoScheduleZonesForGC(JSRuntime * rt)6254     explicit AutoScheduleZonesForGC(JSRuntime* rt) : rt_(rt) {
6255         for (ZonesIter zone(rt_, WithAtoms); !zone.done(); zone.next()) {
6256             if (rt->gc.gcMode() == JSGC_MODE_GLOBAL)
6257                 zone->scheduleGC();
6258 
6259             /* This is a heuristic to avoid resets. */
6260             if (rt->gc.isIncrementalGCInProgress() && zone->needsIncrementalBarrier())
6261                 zone->scheduleGC();
6262 
6263             /* This is a heuristic to reduce the total number of collections. */
6264             if (zone->usage.gcBytes() >=
6265                 zone->threshold.allocTrigger(rt->gc.schedulingState.inHighFrequencyGCMode()))
6266             {
6267                 zone->scheduleGC();
6268             }
6269         }
6270     }
6271 
~AutoScheduleZonesForGC()6272     ~AutoScheduleZonesForGC() {
6273         for (ZonesIter zone(rt_, WithAtoms); !zone.done(); zone.next())
6274             zone->unscheduleGC();
6275     }
6276 };
6277 
6278 } /* anonymous namespace */
6279 
6280 /*
6281  * Run one GC "cycle" (either a slice of incremental GC or an entire
6282  * non-incremental GC. We disable inlining to ensure that the bottom of the
6283  * stack with possible GC roots recorded in MarkRuntime excludes any pointers we
6284  * use during the marking implementation.
6285  *
6286  * Returns true if we "reset" an existing incremental GC, which would force us
6287  * to run another cycle.
6288  */
6289 MOZ_NEVER_INLINE bool
gcCycle(bool nonincrementalByAPI,SliceBudget & budget,JS::gcreason::Reason reason)6290 GCRuntime::gcCycle(bool nonincrementalByAPI, SliceBudget& budget, JS::gcreason::Reason reason)
6291 {
6292     AutoNotifyGCActivity notify(*this);
6293 
6294     evictNursery(reason);
6295 
6296     AutoTraceSession session(rt, JS::HeapState::MajorCollecting);
6297 
6298     majorGCTriggerReason = JS::gcreason::NO_REASON;
6299     interFrameGC = true;
6300 
6301     number++;
6302     if (!isIncrementalGCInProgress())
6303         incMajorGcNumber();
6304 
6305     // It's ok if threads other than the main thread have suppressGC set, as
6306     // they are operating on zones which will not be collected from here.
6307     MOZ_ASSERT(!rt->mainThread.suppressGC);
6308 
6309     // Assert if this is a GC unsafe region.
6310     JS::AutoAssertOnGC::VerifyIsSafeToGC(rt);
6311 
6312     {
6313         gcstats::AutoPhase ap(stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
6314 
6315         // As we are about to clear the mark bits, wait for background
6316         // finalization to finish. We only need to wait on the first slice.
6317         if (!isIncrementalGCInProgress())
6318             waitBackgroundSweepEnd();
6319 
6320         // We must also wait for background allocation to finish so we can
6321         // avoid taking the GC lock when manipulating the chunks during the GC.
6322         // The background alloc task can run between slices, so we must wait
6323         // for it at the start of every slice.
6324         allocTask.cancel(GCParallelTask::CancelAndWait);
6325     }
6326 
6327     State prevState = incrementalState;
6328 
6329     if (nonincrementalByAPI) {
6330         // Reset any in progress incremental GC if this was triggered via the
6331         // API. This isn't required for correctness, but sometimes during tests
6332         // the caller expects this GC to collect certain objects, and we need
6333         // to make sure to collect everything possible.
6334         if (reason != JS::gcreason::ALLOC_TRIGGER)
6335             resetIncrementalGC("requested");
6336 
6337         stats.nonincremental("requested");
6338         budget.makeUnlimited();
6339     } else {
6340         budgetIncrementalGC(budget);
6341     }
6342 
6343     /* The GC was reset, so we need a do-over. */
6344     if (prevState != NO_INCREMENTAL && !isIncrementalGCInProgress())
6345         return true;
6346 
6347     TraceMajorGCStart();
6348 
6349     incrementalCollectSlice(budget, reason);
6350 
6351 #ifndef JS_MORE_DETERMINISTIC
6352     nextFullGCTime = PRMJ_Now() + GC_IDLE_FULL_SPAN;
6353 #endif
6354 
6355     chunkAllocationSinceLastGC = false;
6356 
6357 #ifdef JS_GC_ZEAL
6358     /* Keeping these around after a GC is dangerous. */
6359     clearSelectedForMarking();
6360 #endif
6361 
6362     /* Clear gcMallocBytes for all zones. */
6363     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next())
6364         zone->resetGCMallocBytes();
6365 
6366     resetMallocBytes();
6367 
6368     TraceMajorGCEnd();
6369 
6370     return false;
6371 }
6372 
6373 #ifdef JS_GC_ZEAL
6374 static bool
IsDeterministicGCReason(JS::gcreason::Reason reason)6375 IsDeterministicGCReason(JS::gcreason::Reason reason)
6376 {
6377     if (reason > JS::gcreason::DEBUG_GC &&
6378         reason != JS::gcreason::CC_FORCED && reason != JS::gcreason::SHUTDOWN_CC)
6379     {
6380         return false;
6381     }
6382 
6383     if (reason == JS::gcreason::EAGER_ALLOC_TRIGGER)
6384         return false;
6385 
6386     return true;
6387 }
6388 #endif
6389 
6390 gcstats::ZoneGCStats
scanZonesBeforeGC()6391 GCRuntime::scanZonesBeforeGC()
6392 {
6393     gcstats::ZoneGCStats zoneStats;
6394     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
6395         zoneStats.zoneCount++;
6396         if (zone->isGCScheduled()) {
6397             zoneStats.collectedZoneCount++;
6398             zoneStats.collectedCompartmentCount += zone->compartments.length();
6399         }
6400     }
6401 
6402     for (CompartmentsIter c(rt, WithAtoms); !c.done(); c.next())
6403         zoneStats.compartmentCount++;
6404 
6405     return zoneStats;
6406 }
6407 
6408 void
checkCanCallAPI()6409 GCRuntime::checkCanCallAPI()
6410 {
6411     JS_AbortIfWrongThread(rt);
6412 
6413     /* If we attempt to invoke the GC while we are running in the GC, assert. */
6414     MOZ_RELEASE_ASSERT(!rt->isHeapBusy());
6415 
6416     /* The engine never locks across anything that could GC. */
6417     MOZ_ASSERT(!rt->currentThreadHasExclusiveAccess());
6418 
6419     MOZ_ASSERT(isAllocAllowed());
6420 }
6421 
6422 bool
checkIfGCAllowedInCurrentState(JS::gcreason::Reason reason)6423 GCRuntime::checkIfGCAllowedInCurrentState(JS::gcreason::Reason reason)
6424 {
6425     if (rt->mainThread.suppressGC)
6426         return false;
6427 
6428 #ifdef JS_GC_ZEAL
6429     if (deterministicOnly && !IsDeterministicGCReason(reason))
6430         return false;
6431 #endif
6432 
6433     return true;
6434 }
6435 
6436 void
collect(bool nonincrementalByAPI,SliceBudget budget,JS::gcreason::Reason reason)6437 GCRuntime::collect(bool nonincrementalByAPI, SliceBudget budget, JS::gcreason::Reason reason)
6438 {
6439     // Checks run for each request, even if we do not actually GC.
6440     checkCanCallAPI();
6441 
6442     // Check if we are allowed to GC at this time before proceeding.
6443     if (!checkIfGCAllowedInCurrentState(reason))
6444         return;
6445 
6446     AutoTraceLog logGC(TraceLoggerForMainThread(rt), TraceLogger_GC);
6447     AutoStopVerifyingBarriers av(rt, IsShutdownGC(reason));
6448     AutoEnqueuePendingParseTasksAfterGC aept(*this);
6449     AutoScheduleZonesForGC asz(rt);
6450     gcstats::AutoGCSlice agc(stats, scanZonesBeforeGC(), invocationKind, budget, reason);
6451 
6452     bool repeat = false;
6453     do {
6454         poked = false;
6455         bool wasReset = gcCycle(nonincrementalByAPI, budget, reason);
6456 
6457         /* Need to re-schedule all zones for GC. */
6458         if (poked && cleanUpEverything)
6459             JS::PrepareForFullGC(rt);
6460 
6461         /*
6462          * This code makes an extra effort to collect compartments that we
6463          * thought were dead at the start of the GC. See the large comment in
6464          * beginMarkPhase.
6465          */
6466         bool repeatForDeadZone = false;
6467         if (!nonincrementalByAPI && !isIncrementalGCInProgress()) {
6468             for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
6469                 if (c->scheduledForDestruction) {
6470                     nonincrementalByAPI = true;
6471                     repeatForDeadZone = true;
6472                     reason = JS::gcreason::COMPARTMENT_REVIVED;
6473                     c->zone()->scheduleGC();
6474                 }
6475             }
6476         }
6477 
6478         /*
6479          * If we reset an existing GC, we need to start a new one. Also, we
6480          * repeat GCs that happen during shutdown (the gcShouldCleanUpEverything
6481          * case) until we can be sure that no additional garbage is created
6482          * (which typically happens if roots are dropped during finalizers).
6483          */
6484         repeat = (poked && cleanUpEverything) || wasReset || repeatForDeadZone;
6485     } while (repeat);
6486 }
6487 
~AutoEnqueuePendingParseTasksAfterGC()6488 js::AutoEnqueuePendingParseTasksAfterGC::~AutoEnqueuePendingParseTasksAfterGC()
6489 {
6490     if (!gc_.isIncrementalGCInProgress())
6491         EnqueuePendingParseTasksAfterGC(gc_.rt);
6492 }
6493 
6494 SliceBudget
defaultBudget(JS::gcreason::Reason reason,int64_t millis)6495 GCRuntime::defaultBudget(JS::gcreason::Reason reason, int64_t millis)
6496 {
6497     if (millis == 0) {
6498         if (reason == JS::gcreason::ALLOC_TRIGGER)
6499             millis = defaultSliceBudget();
6500         else if (schedulingState.inHighFrequencyGCMode() && tunables.isDynamicMarkSliceEnabled())
6501             millis = defaultSliceBudget() * IGC_MARK_SLICE_MULTIPLIER;
6502         else
6503             millis = defaultSliceBudget();
6504     }
6505 
6506     return SliceBudget(TimeBudget(millis));
6507 }
6508 
6509 void
gc(JSGCInvocationKind gckind,JS::gcreason::Reason reason)6510 GCRuntime::gc(JSGCInvocationKind gckind, JS::gcreason::Reason reason)
6511 {
6512     invocationKind = gckind;
6513     collect(true, SliceBudget::unlimited(), reason);
6514 }
6515 
6516 void
startGC(JSGCInvocationKind gckind,JS::gcreason::Reason reason,int64_t millis)6517 GCRuntime::startGC(JSGCInvocationKind gckind, JS::gcreason::Reason reason, int64_t millis)
6518 {
6519     MOZ_ASSERT(!isIncrementalGCInProgress());
6520     invocationKind = gckind;
6521     collect(false, defaultBudget(reason, millis), reason);
6522 }
6523 
6524 void
gcSlice(JS::gcreason::Reason reason,int64_t millis)6525 GCRuntime::gcSlice(JS::gcreason::Reason reason, int64_t millis)
6526 {
6527     MOZ_ASSERT(isIncrementalGCInProgress());
6528     collect(false, defaultBudget(reason, millis), reason);
6529 }
6530 
6531 void
finishGC(JS::gcreason::Reason reason)6532 GCRuntime::finishGC(JS::gcreason::Reason reason)
6533 {
6534     MOZ_ASSERT(isIncrementalGCInProgress());
6535 
6536     // If we're not collecting because we're out of memory then skip the
6537     // compacting phase if we need to finish an ongoing incremental GC
6538     // non-incrementally to avoid janking the browser.
6539     if (!IsOOMReason(initialReason)) {
6540         if (incrementalState == COMPACT) {
6541             abortGC();
6542             return;
6543         }
6544 
6545         isCompacting = false;
6546     }
6547 
6548     collect(false, SliceBudget::unlimited(), reason);
6549 }
6550 
6551 void
abortGC()6552 GCRuntime::abortGC()
6553 {
6554     checkCanCallAPI();
6555     MOZ_ASSERT(!rt->mainThread.suppressGC);
6556 
6557     AutoStopVerifyingBarriers av(rt, false);
6558 
6559     gcstats::AutoGCSlice agc(stats, scanZonesBeforeGC(), invocationKind,
6560                              SliceBudget::unlimited(), JS::gcreason::ABORT_GC);
6561 
6562     evictNursery(JS::gcreason::ABORT_GC);
6563     AutoTraceSession session(rt, JS::HeapState::MajorCollecting);
6564 
6565     number++;
6566     resetIncrementalGC("abort");
6567 }
6568 
6569 void
notifyDidPaint()6570 GCRuntime::notifyDidPaint()
6571 {
6572     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
6573 
6574 #ifdef JS_GC_ZEAL
6575     if (zealMode == ZealFrameVerifierPreValue) {
6576         verifyPreBarriers();
6577         return;
6578     }
6579 
6580     if (zealMode == ZealFrameGCValue) {
6581         JS::PrepareForFullGC(rt);
6582         gc(GC_NORMAL, JS::gcreason::REFRESH_FRAME);
6583         return;
6584     }
6585 #endif
6586 
6587     if (isIncrementalGCInProgress() && !interFrameGC) {
6588         JS::PrepareForIncrementalGC(rt);
6589         gcSlice(JS::gcreason::REFRESH_FRAME);
6590     }
6591 
6592     interFrameGC = false;
6593 }
6594 
6595 static bool
ZonesSelected(JSRuntime * rt)6596 ZonesSelected(JSRuntime* rt)
6597 {
6598     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
6599         if (zone->isGCScheduled())
6600             return true;
6601     }
6602     return false;
6603 }
6604 
6605 void
startDebugGC(JSGCInvocationKind gckind,SliceBudget & budget)6606 GCRuntime::startDebugGC(JSGCInvocationKind gckind, SliceBudget& budget)
6607 {
6608     MOZ_ASSERT(!isIncrementalGCInProgress());
6609     if (!ZonesSelected(rt))
6610         JS::PrepareForFullGC(rt);
6611     invocationKind = gckind;
6612     collect(false, budget, JS::gcreason::DEBUG_GC);
6613 }
6614 
6615 void
debugGCSlice(SliceBudget & budget)6616 GCRuntime::debugGCSlice(SliceBudget& budget)
6617 {
6618     MOZ_ASSERT(isIncrementalGCInProgress());
6619     if (!ZonesSelected(rt))
6620         JS::PrepareForIncrementalGC(rt);
6621     collect(false, budget, JS::gcreason::DEBUG_GC);
6622 }
6623 
6624 /* Schedule a full GC unless a zone will already be collected. */
6625 void
PrepareForDebugGC(JSRuntime * rt)6626 js::PrepareForDebugGC(JSRuntime* rt)
6627 {
6628     if (!ZonesSelected(rt))
6629         JS::PrepareForFullGC(rt);
6630 }
6631 
JS_PUBLIC_API(void)6632 JS_PUBLIC_API(void)
6633 JS::ShrinkGCBuffers(JSRuntime* rt)
6634 {
6635     MOZ_ASSERT(!rt->isHeapBusy());
6636     rt->gc.shrinkBuffers();
6637 }
6638 
6639 void
shrinkBuffers()6640 GCRuntime::shrinkBuffers()
6641 {
6642     AutoLockHelperThreadState helperLock;
6643     AutoLockGC lock(rt);
6644 
6645     if (CanUseExtraThreads())
6646         helperState.startBackgroundShrink(lock);
6647     else
6648         expireChunksAndArenas(true, lock);
6649 }
6650 
6651 void
onOutOfMallocMemory()6652 GCRuntime::onOutOfMallocMemory()
6653 {
6654     // Stop allocating new chunks.
6655     allocTask.cancel(GCParallelTask::CancelAndWait);
6656 
6657     // Wait for background free of nursery huge slots to finish.
6658     nursery.waitBackgroundFreeEnd();
6659 
6660     AutoLockGC lock(rt);
6661     onOutOfMallocMemory(lock);
6662 }
6663 
6664 void
onOutOfMallocMemory(const AutoLockGC & lock)6665 GCRuntime::onOutOfMallocMemory(const AutoLockGC& lock)
6666 {
6667     // Release any relocated arenas we may be holding on to, without releasing
6668     // the GC lock.
6669     releaseHeldRelocatedArenasWithoutUnlocking(lock);
6670 
6671     // Throw away any excess chunks we have lying around.
6672     freeEmptyChunks(rt, lock);
6673 
6674     // Immediately decommit as many arenas as possible in the hopes that this
6675     // might let the OS scrape together enough pages to satisfy the failing
6676     // malloc request.
6677     decommitAllWithoutUnlocking(lock);
6678 }
6679 
6680 void
minorGCImpl(JS::gcreason::Reason reason,Nursery::ObjectGroupList * pretenureGroups)6681 GCRuntime::minorGCImpl(JS::gcreason::Reason reason, Nursery::ObjectGroupList* pretenureGroups)
6682 {
6683     minorGCTriggerReason = JS::gcreason::NO_REASON;
6684     TraceLoggerThread* logger = TraceLoggerForMainThread(rt);
6685     AutoTraceLog logMinorGC(logger, TraceLogger_MinorGC);
6686     nursery.collect(rt, reason, pretenureGroups);
6687     MOZ_ASSERT_IF(!rt->mainThread.suppressGC, nursery.isEmpty());
6688 }
6689 
6690 // Alternate to the runtime-taking form that allows marking object groups as
6691 // needing pretenuring.
6692 void
minorGC(JSContext * cx,JS::gcreason::Reason reason)6693 GCRuntime::minorGC(JSContext* cx, JS::gcreason::Reason reason)
6694 {
6695     gcstats::AutoPhase ap(stats, gcstats::PHASE_MINOR_GC);
6696 
6697     Nursery::ObjectGroupList pretenureGroups;
6698     minorGCImpl(reason, &pretenureGroups);
6699     for (size_t i = 0; i < pretenureGroups.length(); i++) {
6700         if (pretenureGroups[i]->canPreTenure())
6701             pretenureGroups[i]->setShouldPreTenure(cx);
6702     }
6703 }
6704 
6705 void
clearPostBarrierCallbacks()6706 GCRuntime::clearPostBarrierCallbacks()
6707 {
6708     if (storeBuffer.hasPostBarrierCallbacks())
6709         evictNursery();
6710 }
6711 
6712 void
disableGenerationalGC()6713 GCRuntime::disableGenerationalGC()
6714 {
6715     if (isGenerationalGCEnabled()) {
6716         minorGC(JS::gcreason::API);
6717         nursery.disable();
6718         storeBuffer.disable();
6719     }
6720     ++rt->gc.generationalDisabled;
6721 }
6722 
6723 void
enableGenerationalGC()6724 GCRuntime::enableGenerationalGC()
6725 {
6726     MOZ_ASSERT(generationalDisabled > 0);
6727     --generationalDisabled;
6728     if (generationalDisabled == 0) {
6729         nursery.enable();
6730         storeBuffer.enable();
6731     }
6732 }
6733 
6734 bool
gcIfRequested(JSContext * cx)6735 GCRuntime::gcIfRequested(JSContext* cx /* = nullptr */)
6736 {
6737     // This method returns whether a major GC was performed.
6738 
6739     if (minorGCRequested()) {
6740         if (cx)
6741             minorGC(cx, minorGCTriggerReason);
6742         else
6743             minorGC(minorGCTriggerReason);
6744     }
6745 
6746     if (majorGCRequested()) {
6747         if (!isIncrementalGCInProgress())
6748             startGC(GC_NORMAL, majorGCTriggerReason);
6749         else
6750             gcSlice(majorGCTriggerReason);
6751         return true;
6752     }
6753 
6754     return false;
6755 }
6756 
AutoFinishGC(JSRuntime * rt)6757 AutoFinishGC::AutoFinishGC(JSRuntime* rt)
6758 {
6759     if (JS::IsIncrementalGCInProgress(rt)) {
6760         JS::PrepareForIncrementalGC(rt);
6761         JS::FinishIncrementalGC(rt, JS::gcreason::API);
6762     }
6763 
6764     rt->gc.waitBackgroundSweepEnd();
6765     rt->gc.nursery.waitBackgroundFreeEnd();
6766 }
6767 
AutoPrepareForTracing(JSRuntime * rt,ZoneSelector selector)6768 AutoPrepareForTracing::AutoPrepareForTracing(JSRuntime* rt, ZoneSelector selector)
6769   : finish(rt),
6770     session(rt),
6771     copy(rt, selector)
6772 {
6773 }
6774 
6775 JSCompartment*
NewCompartment(JSContext * cx,Zone * zone,JSPrincipals * principals,const JS::CompartmentOptions & options)6776 js::NewCompartment(JSContext* cx, Zone* zone, JSPrincipals* principals,
6777                    const JS::CompartmentOptions& options)
6778 {
6779     JSRuntime* rt = cx->runtime();
6780     JS_AbortIfWrongThread(rt);
6781 
6782     ScopedJSDeletePtr<Zone> zoneHolder;
6783     if (!zone) {
6784         zone = cx->new_<Zone>(rt);
6785         if (!zone)
6786             return nullptr;
6787 
6788         zoneHolder.reset(zone);
6789 
6790         const JSPrincipals* trusted = rt->trustedPrincipals();
6791         bool isSystem = principals && principals == trusted;
6792         if (!zone->init(isSystem)) {
6793             ReportOutOfMemory(cx);
6794             return nullptr;
6795         }
6796     }
6797 
6798     ScopedJSDeletePtr<JSCompartment> compartment(cx->new_<JSCompartment>(zone, options));
6799     if (!compartment || !compartment->init(cx))
6800         return nullptr;
6801 
6802     // Set up the principals.
6803     JS_SetCompartmentPrincipals(compartment, principals);
6804 
6805     AutoLockGC lock(rt);
6806 
6807     if (!zone->compartments.append(compartment.get())) {
6808         ReportOutOfMemory(cx);
6809         return nullptr;
6810     }
6811 
6812     if (zoneHolder && !rt->gc.zones.append(zone)) {
6813         ReportOutOfMemory(cx);
6814         return nullptr;
6815     }
6816 
6817     zoneHolder.forget();
6818     return compartment.forget();
6819 }
6820 
6821 void
MergeCompartments(JSCompartment * source,JSCompartment * target)6822 gc::MergeCompartments(JSCompartment* source, JSCompartment* target)
6823 {
6824     // The source compartment must be specifically flagged as mergable.  This
6825     // also implies that the compartment is not visible to the debugger.
6826     MOZ_ASSERT(source->options_.mergeable());
6827 
6828     MOZ_ASSERT(source->addonId == target->addonId);
6829 
6830     JSRuntime* rt = source->runtimeFromMainThread();
6831 
6832     AutoPrepareForTracing prepare(rt, SkipAtoms);
6833 
6834     // Cleanup tables and other state in the source compartment that will be
6835     // meaningless after merging into the target compartment.
6836 
6837     source->clearTables();
6838     source->unsetIsDebuggee();
6839 
6840     // The delazification flag indicates the presence of LazyScripts in a
6841     // compartment for the Debugger API, so if the source compartment created
6842     // LazyScripts, the flag must be propagated to the target compartment.
6843     if (source->needsDelazificationForDebugger())
6844         target->scheduleDelazificationForDebugger();
6845 
6846     // Release any relocated arenas which we may be holding on to as they might
6847     // be in the source zone
6848     rt->gc.releaseHeldRelocatedArenas();
6849 
6850     // Fixup compartment pointers in source to refer to target, and make sure
6851     // type information generations are in sync.
6852 
6853     // Get the static global lexical scope of the target compartment. Static
6854     // scopes need to be fixed up below.
6855     RootedObject targetStaticGlobalLexicalScope(rt);
6856     targetStaticGlobalLexicalScope = &target->maybeGlobal()->lexicalScope().staticBlock();
6857 
6858     for (ZoneCellIter iter(source->zone(), AllocKind::SCRIPT); !iter.done(); iter.next()) {
6859         JSScript* script = iter.get<JSScript>();
6860         MOZ_ASSERT(script->compartment() == source);
6861         script->compartment_ = target;
6862         script->setTypesGeneration(target->zone()->types.generation);
6863 
6864         // See warning in handleParseWorkload. If we start optimizing global
6865         // lexicals, we would need to merge the contents of the static global
6866         // lexical scope.
6867         if (JSObject* enclosing = script->enclosingStaticScope()) {
6868             if (IsStaticGlobalLexicalScope(enclosing))
6869                 script->fixEnclosingStaticGlobalLexicalScope();
6870         }
6871 
6872         if (script->hasBlockScopes()) {
6873             BlockScopeArray* scopes = script->blockScopes();
6874             for (uint32_t i = 0; i < scopes->length; i++) {
6875                 uint32_t scopeIndex = scopes->vector[i].index;
6876                 if (scopeIndex != BlockScopeNote::NoBlockScopeIndex) {
6877                     ScopeObject* scope = &script->getObject(scopeIndex)->as<ScopeObject>();
6878                     MOZ_ASSERT(!IsStaticGlobalLexicalScope(scope));
6879                     JSObject* enclosing = &scope->enclosingScope();
6880                     if (IsStaticGlobalLexicalScope(enclosing))
6881                         scope->setEnclosingScope(targetStaticGlobalLexicalScope);
6882                 }
6883             }
6884         }
6885     }
6886 
6887     for (ZoneCellIter iter(source->zone(), AllocKind::BASE_SHAPE); !iter.done(); iter.next()) {
6888         BaseShape* base = iter.get<BaseShape>();
6889         MOZ_ASSERT(base->compartment() == source);
6890         base->compartment_ = target;
6891     }
6892 
6893     for (ZoneCellIter iter(source->zone(), AllocKind::OBJECT_GROUP); !iter.done(); iter.next()) {
6894         ObjectGroup* group = iter.get<ObjectGroup>();
6895         group->setGeneration(target->zone()->types.generation);
6896         group->compartment_ = target;
6897 
6898         // Remove any unboxed layouts from the list in the off thread
6899         // compartment. These do not need to be reinserted in the target
6900         // compartment's list, as the list is not required to be complete.
6901         if (UnboxedLayout* layout = group->maybeUnboxedLayoutDontCheckGeneration())
6902             layout->detachFromCompartment();
6903     }
6904 
6905     // Fixup zone pointers in source's zone to refer to target's zone.
6906 
6907     for (auto thingKind : AllAllocKinds()) {
6908         for (ArenaIter aiter(source->zone(), thingKind); !aiter.done(); aiter.next()) {
6909             ArenaHeader* aheader = aiter.get();
6910             aheader->zone = target->zone();
6911         }
6912     }
6913 
6914     // After fixing JSFunctions' compartments, we can fix LazyScripts'
6915     // enclosing scopes.
6916     for (ZoneCellIter iter(source->zone(), AllocKind::LAZY_SCRIPT); !iter.done(); iter.next()) {
6917         LazyScript* lazy = iter.get<LazyScript>();
6918         MOZ_ASSERT(lazy->functionNonDelazifying()->compartment() == target);
6919 
6920         // See warning in handleParseWorkload. If we start optimizing global
6921         // lexicals, we would need to merge the contents of the static global
6922         // lexical scope.
6923         if (JSObject* enclosing = lazy->enclosingScope()) {
6924             if (IsStaticGlobalLexicalScope(enclosing))
6925                 lazy->fixEnclosingStaticGlobalLexicalScope();
6926         }
6927     }
6928 
6929     // The source should be the only compartment in its zone.
6930     for (CompartmentsInZoneIter c(source->zone()); !c.done(); c.next())
6931         MOZ_ASSERT(c.get() == source);
6932 
6933     // Merge the allocator in source's zone into target's zone.
6934     target->zone()->arenas.adoptArenas(rt, &source->zone()->arenas);
6935     target->zone()->usage.adopt(source->zone()->usage);
6936 
6937     // Merge other info in source's zone into target's zone.
6938     target->zone()->types.typeLifoAlloc.transferFrom(&source->zone()->types.typeLifoAlloc);
6939 
6940     // Ensure that we did not create any UIDs when running off-thread.
6941     source->zone()->assertNoUniqueIdsInZone();
6942 }
6943 
6944 void
runDebugGC()6945 GCRuntime::runDebugGC()
6946 {
6947 #ifdef JS_GC_ZEAL
6948     int type = zealMode;
6949 
6950     if (rt->mainThread.suppressGC)
6951         return;
6952 
6953     if (type == js::gc::ZealGenerationalGCValue)
6954         return minorGC(JS::gcreason::DEBUG_GC);
6955 
6956     PrepareForDebugGC(rt);
6957 
6958     auto budget = SliceBudget::unlimited();
6959     if (type == ZealIncrementalRootsThenFinish ||
6960         type == ZealIncrementalMarkAllThenFinish ||
6961         type == ZealIncrementalMultipleSlices)
6962     {
6963         js::gc::State initialState = incrementalState;
6964         if (type == ZealIncrementalMultipleSlices) {
6965             /*
6966              * Start with a small slice limit and double it every slice. This
6967              * ensure that we get multiple slices, and collection runs to
6968              * completion.
6969              */
6970             if (!isIncrementalGCInProgress())
6971                 incrementalLimit = zealFrequency / 2;
6972             else
6973                 incrementalLimit *= 2;
6974             budget = SliceBudget(WorkBudget(incrementalLimit));
6975         } else {
6976             // This triggers incremental GC but is actually ignored by IncrementalMarkSlice.
6977             budget = SliceBudget(WorkBudget(1));
6978         }
6979 
6980         if (!isIncrementalGCInProgress())
6981             invocationKind = GC_SHRINK;
6982         collect(false, budget, JS::gcreason::DEBUG_GC);
6983 
6984         /*
6985          * For multi-slice zeal, reset the slice size when we get to the sweep
6986          * or compact phases.
6987          */
6988         if (type == ZealIncrementalMultipleSlices) {
6989             if ((initialState == MARK && incrementalState == SWEEP) ||
6990                 (initialState == SWEEP && incrementalState == COMPACT))
6991             {
6992                 incrementalLimit = zealFrequency / 2;
6993             }
6994         }
6995     } else if (type == ZealCompactValue) {
6996         gc(GC_SHRINK, JS::gcreason::DEBUG_GC);
6997     } else {
6998         gc(GC_NORMAL, JS::gcreason::DEBUG_GC);
6999     }
7000 
7001 #endif
7002 }
7003 
7004 void
setValidate(bool enabled)7005 GCRuntime::setValidate(bool enabled)
7006 {
7007     MOZ_ASSERT(!rt->isHeapMajorCollecting());
7008     validate = enabled;
7009 }
7010 
7011 void
setFullCompartmentChecks(bool enabled)7012 GCRuntime::setFullCompartmentChecks(bool enabled)
7013 {
7014     MOZ_ASSERT(!rt->isHeapMajorCollecting());
7015     fullCompartmentChecks = enabled;
7016 }
7017 
7018 #ifdef JS_GC_ZEAL
7019 bool
selectForMarking(JSObject * object)7020 GCRuntime::selectForMarking(JSObject* object)
7021 {
7022     MOZ_ASSERT(!rt->isHeapMajorCollecting());
7023     return selectedForMarking.append(object);
7024 }
7025 
7026 void
clearSelectedForMarking()7027 GCRuntime::clearSelectedForMarking()
7028 {
7029     selectedForMarking.clearAndFree();
7030 }
7031 
7032 void
setDeterministic(bool enabled)7033 GCRuntime::setDeterministic(bool enabled)
7034 {
7035     MOZ_ASSERT(!rt->isHeapMajorCollecting());
7036     deterministicOnly = enabled;
7037 }
7038 #endif
7039 
7040 #ifdef DEBUG
7041 
7042 /* Should only be called manually under gdb */
PreventGCDuringInteractiveDebug()7043 void PreventGCDuringInteractiveDebug()
7044 {
7045     TlsPerThreadData.get()->suppressGC++;
7046 }
7047 
7048 #endif
7049 
7050 void
ReleaseAllJITCode(FreeOp * fop)7051 js::ReleaseAllJITCode(FreeOp* fop)
7052 {
7053     /*
7054      * Scripts can entrain nursery things, inserting references to the script
7055      * into the store buffer. Clear the store buffer before discarding scripts.
7056      */
7057     fop->runtime()->gc.evictNursery();
7058 
7059     for (ZonesIter zone(fop->runtime(), SkipAtoms); !zone.done(); zone.next()) {
7060         if (!zone->jitZone())
7061             continue;
7062 
7063 #ifdef DEBUG
7064         /* Assert no baseline scripts are marked as active. */
7065         for (ZoneCellIter i(zone, AllocKind::SCRIPT); !i.done(); i.next()) {
7066             JSScript* script = i.get<JSScript>();
7067             MOZ_ASSERT_IF(script->hasBaselineScript(), !script->baselineScript()->active());
7068         }
7069 #endif
7070 
7071         /* Mark baseline scripts on the stack as active. */
7072         jit::MarkActiveBaselineScripts(zone);
7073 
7074         jit::InvalidateAll(fop, zone);
7075 
7076         for (ZoneCellIter i(zone, AllocKind::SCRIPT); !i.done(); i.next()) {
7077             JSScript* script = i.get<JSScript>();
7078             jit::FinishInvalidation(fop, script);
7079 
7080             /*
7081              * Discard baseline script if it's not marked as active. Note that
7082              * this also resets the active flag.
7083              */
7084             jit::FinishDiscardBaselineScript(fop, script);
7085         }
7086 
7087         zone->jitZone()->optimizedStubSpace()->free();
7088     }
7089 }
7090 
7091 void
PurgeJITCaches(Zone * zone)7092 js::PurgeJITCaches(Zone* zone)
7093 {
7094     for (ZoneCellIterUnderGC i(zone, AllocKind::SCRIPT); !i.done(); i.next()) {
7095         JSScript* script = i.get<JSScript>();
7096 
7097         /* Discard Ion caches. */
7098         jit::PurgeCaches(script);
7099     }
7100 }
7101 
7102 void
normalizeBackgroundFinalizeState(AllocKind thingKind)7103 ArenaLists::normalizeBackgroundFinalizeState(AllocKind thingKind)
7104 {
7105     ArenaLists::BackgroundFinalizeState* bfs = &backgroundFinalizeState[thingKind];
7106     switch (*bfs) {
7107       case BFS_DONE:
7108         break;
7109       default:
7110         MOZ_ASSERT(!"Background finalization in progress, but it should not be.");
7111         break;
7112     }
7113 }
7114 
7115 void
adoptArenas(JSRuntime * rt,ArenaLists * fromArenaLists)7116 ArenaLists::adoptArenas(JSRuntime* rt, ArenaLists* fromArenaLists)
7117 {
7118     // GC should be inactive, but still take the lock as a kind of read fence.
7119     AutoLockGC lock(rt);
7120 
7121     fromArenaLists->purge();
7122 
7123     for (auto thingKind : AllAllocKinds()) {
7124         // When we enter a parallel section, we join the background
7125         // thread, and we do not run GC while in the parallel section,
7126         // so no finalizer should be active!
7127         normalizeBackgroundFinalizeState(thingKind);
7128         fromArenaLists->normalizeBackgroundFinalizeState(thingKind);
7129 
7130         ArenaList* fromList = &fromArenaLists->arenaLists[thingKind];
7131         ArenaList* toList = &arenaLists[thingKind];
7132         fromList->check();
7133         toList->check();
7134         ArenaHeader* next;
7135         for (ArenaHeader* fromHeader = fromList->head(); fromHeader; fromHeader = next) {
7136             // Copy fromHeader->next before releasing/reinserting.
7137             next = fromHeader->next;
7138 
7139             MOZ_ASSERT(!fromHeader->isEmpty());
7140             toList->insertAtCursor(fromHeader);
7141         }
7142         fromList->clear();
7143         toList->check();
7144     }
7145 }
7146 
7147 bool
containsArena(JSRuntime * rt,ArenaHeader * needle)7148 ArenaLists::containsArena(JSRuntime* rt, ArenaHeader* needle)
7149 {
7150     AutoLockGC lock(rt);
7151     ArenaList& list = arenaLists[needle->getAllocKind()];
7152     for (ArenaHeader* aheader = list.head(); aheader; aheader = aheader->next) {
7153         if (aheader == needle)
7154             return true;
7155     }
7156     return false;
7157 }
7158 
7159 
AutoSuppressGC(ExclusiveContext * cx)7160 AutoSuppressGC::AutoSuppressGC(ExclusiveContext* cx)
7161   : suppressGC_(cx->perThreadData->suppressGC)
7162 {
7163     suppressGC_++;
7164 }
7165 
AutoSuppressGC(JSCompartment * comp)7166 AutoSuppressGC::AutoSuppressGC(JSCompartment* comp)
7167   : suppressGC_(comp->runtimeFromMainThread()->mainThread.suppressGC)
7168 {
7169     suppressGC_++;
7170 }
7171 
AutoSuppressGC(JSRuntime * rt)7172 AutoSuppressGC::AutoSuppressGC(JSRuntime* rt)
7173   : suppressGC_(rt->mainThread.suppressGC)
7174 {
7175     suppressGC_++;
7176 }
7177 
7178 bool
UninlinedIsInsideNursery(const gc::Cell * cell)7179 js::UninlinedIsInsideNursery(const gc::Cell* cell)
7180 {
7181     return IsInsideNursery(cell);
7182 }
7183 
7184 #ifdef DEBUG
AutoDisableProxyCheck(JSRuntime * rt)7185 AutoDisableProxyCheck::AutoDisableProxyCheck(JSRuntime* rt)
7186   : gc(rt->gc)
7187 {
7188     gc.disableStrictProxyChecking();
7189 }
7190 
~AutoDisableProxyCheck()7191 AutoDisableProxyCheck::~AutoDisableProxyCheck()
7192 {
7193     gc.enableStrictProxyChecking();
7194 }
7195 
JS_FRIEND_API(void)7196 JS_FRIEND_API(void)
7197 JS::AssertGCThingMustBeTenured(JSObject* obj)
7198 {
7199     MOZ_ASSERT(obj->isTenured() &&
7200                (!IsNurseryAllocable(obj->asTenured().getAllocKind()) || obj->getClass()->finalize));
7201 }
7202 
JS_FRIEND_API(void)7203 JS_FRIEND_API(void)
7204 JS::AssertGCThingIsNotAnObjectSubclass(Cell* cell)
7205 {
7206     MOZ_ASSERT(cell);
7207     MOZ_ASSERT(cell->getTraceKind() != JS::TraceKind::Object);
7208 }
7209 
JS_FRIEND_API(void)7210 JS_FRIEND_API(void)
7211 js::gc::AssertGCThingHasType(js::gc::Cell* cell, JS::TraceKind kind)
7212 {
7213     if (!cell)
7214         MOZ_ASSERT(kind == JS::TraceKind::Null);
7215     else if (IsInsideNursery(cell))
7216         MOZ_ASSERT(kind == JS::TraceKind::Object);
7217     else
7218         MOZ_ASSERT(MapAllocToTraceKind(cell->asTenured().getAllocKind()) == kind);
7219 }
7220 
JS_PUBLIC_API(size_t)7221 JS_PUBLIC_API(size_t)
7222 JS::GetGCNumber()
7223 {
7224     JSRuntime* rt = js::TlsPerThreadData.get()->runtimeFromMainThread();
7225     if (!rt)
7226         return 0;
7227     return rt->gc.gcNumber();
7228 }
7229 #endif
7230 
7231 #ifdef DEBUG
AutoAssertOnGC()7232 JS::AutoAssertOnGC::AutoAssertOnGC()
7233   : gc(nullptr), gcNumber(0)
7234 {
7235     js::PerThreadData* data = js::TlsPerThreadData.get();
7236     if (data) {
7237         /*
7238          * GC's from off-thread will always assert, so off-thread is implicitly
7239          * AutoAssertOnGC. We still need to allow AutoAssertOnGC to be used in
7240          * code that works from both threads, however. We also use this to
7241          * annotate the off thread run loops.
7242          */
7243         JSRuntime* runtime = data->runtimeIfOnOwnerThread();
7244         if (runtime) {
7245             gc = &runtime->gc;
7246             gcNumber = gc->gcNumber();
7247             gc->enterUnsafeRegion();
7248         }
7249     }
7250 }
7251 
AutoAssertOnGC(JSRuntime * rt)7252 JS::AutoAssertOnGC::AutoAssertOnGC(JSRuntime* rt)
7253   : gc(&rt->gc), gcNumber(rt->gc.gcNumber())
7254 {
7255     gc->enterUnsafeRegion();
7256 }
7257 
~AutoAssertOnGC()7258 JS::AutoAssertOnGC::~AutoAssertOnGC()
7259 {
7260     if (gc) {
7261         gc->leaveUnsafeRegion();
7262 
7263         /*
7264          * The following backstop assertion should never fire: if we bumped the
7265          * gcNumber, we should have asserted because inUnsafeRegion was true.
7266          */
7267         MOZ_ASSERT(gcNumber == gc->gcNumber(), "GC ran inside an AutoAssertOnGC scope.");
7268     }
7269 }
7270 
7271 /* static */ void
VerifyIsSafeToGC(JSRuntime * rt)7272 JS::AutoAssertOnGC::VerifyIsSafeToGC(JSRuntime* rt)
7273 {
7274     if (rt->gc.isInsideUnsafeRegion())
7275         MOZ_CRASH("[AutoAssertOnGC] possible GC in GC-unsafe region");
7276 }
7277 
AutoAssertNoAlloc(JSRuntime * rt)7278 JS::AutoAssertNoAlloc::AutoAssertNoAlloc(JSRuntime* rt)
7279   : gc(nullptr)
7280 {
7281     disallowAlloc(rt);
7282 }
7283 
disallowAlloc(JSRuntime * rt)7284 void JS::AutoAssertNoAlloc::disallowAlloc(JSRuntime* rt)
7285 {
7286     MOZ_ASSERT(!gc);
7287     gc = &rt->gc;
7288     gc->disallowAlloc();
7289 }
7290 
~AutoAssertNoAlloc()7291 JS::AutoAssertNoAlloc::~AutoAssertNoAlloc()
7292 {
7293     if (gc)
7294         gc->allowAlloc();
7295 }
7296 #endif
7297 
AutoAssertGCCallback(JSObject * obj)7298 JS::AutoAssertGCCallback::AutoAssertGCCallback(JSObject* obj)
7299   : AutoSuppressGCAnalysis()
7300 {
7301     MOZ_ASSERT(obj->runtimeFromMainThread()->isHeapCollecting());
7302 }
7303 
JS_FRIEND_API(const char *)7304 JS_FRIEND_API(const char*)
7305 JS::GCTraceKindToAscii(JS::TraceKind kind)
7306 {
7307     switch(kind) {
7308 #define MAP_NAME(name, _0, _1) case JS::TraceKind::name: return #name;
7309 JS_FOR_EACH_TRACEKIND(MAP_NAME);
7310 #undef MAP_NAME
7311       default: return "Invalid";
7312     }
7313 }
7314 
GCCellPtr(const Value & v)7315 JS::GCCellPtr::GCCellPtr(const Value& v)
7316   : ptr(0)
7317 {
7318     if (v.isString())
7319         ptr = checkedCast(v.toString(), JS::TraceKind::String);
7320     else if (v.isObject())
7321         ptr = checkedCast(&v.toObject(), JS::TraceKind::Object);
7322     else if (v.isSymbol())
7323         ptr = checkedCast(v.toSymbol(), JS::TraceKind::Symbol);
7324     else
7325         ptr = checkedCast(nullptr, JS::TraceKind::Null);
7326 }
7327 
7328 JS::TraceKind
outOfLineKind() const7329 JS::GCCellPtr::outOfLineKind() const
7330 {
7331     MOZ_ASSERT((ptr & OutOfLineTraceKindMask) == OutOfLineTraceKindMask);
7332     MOZ_ASSERT(asCell()->isTenured());
7333     return MapAllocToTraceKind(asCell()->asTenured().getAllocKind());
7334 }
7335 
7336 bool
mayBeOwnedByOtherRuntime() const7337 JS::GCCellPtr::mayBeOwnedByOtherRuntime() const
7338 {
7339     return (is<JSString>() && as<JSString>().isPermanentAtom()) ||
7340            (is<Symbol>() && as<Symbol>().isWellKnownSymbol());
7341 }
7342 
7343 #ifdef JSGC_HASH_TABLE_CHECKS
7344 void
CheckHashTablesAfterMovingGC(JSRuntime * rt)7345 js::gc::CheckHashTablesAfterMovingGC(JSRuntime* rt)
7346 {
7347     /*
7348      * Check that internal hash tables no longer have any pointers to things
7349      * that have been moved.
7350      */
7351     for (ZonesIter zone(rt, SkipAtoms); !zone.done(); zone.next()) {
7352         zone->checkUniqueIdTableAfterMovingGC();
7353     }
7354     for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
7355         c->objectGroups.checkTablesAfterMovingGC();
7356         c->checkInitialShapesTableAfterMovingGC();
7357         c->checkWrapperMapAfterMovingGC();
7358         c->checkBaseShapeTableAfterMovingGC();
7359         if (c->debugScopes)
7360             c->debugScopes->checkHashTablesAfterMovingGC(rt);
7361     }
7362 }
7363 #endif
7364 
JS_PUBLIC_API(void)7365 JS_PUBLIC_API(void)
7366 JS::PrepareZoneForGC(Zone* zone)
7367 {
7368     zone->scheduleGC();
7369 }
7370 
JS_PUBLIC_API(void)7371 JS_PUBLIC_API(void)
7372 JS::PrepareForFullGC(JSRuntime* rt)
7373 {
7374     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next())
7375         zone->scheduleGC();
7376 }
7377 
JS_PUBLIC_API(void)7378 JS_PUBLIC_API(void)
7379 JS::PrepareForIncrementalGC(JSRuntime* rt)
7380 {
7381     if (!JS::IsIncrementalGCInProgress(rt))
7382         return;
7383 
7384     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
7385         if (zone->wasGCStarted())
7386             PrepareZoneForGC(zone);
7387     }
7388 }
7389 
JS_PUBLIC_API(bool)7390 JS_PUBLIC_API(bool)
7391 JS::IsGCScheduled(JSRuntime* rt)
7392 {
7393     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
7394         if (zone->isGCScheduled())
7395             return true;
7396     }
7397 
7398     return false;
7399 }
7400 
JS_PUBLIC_API(void)7401 JS_PUBLIC_API(void)
7402 JS::SkipZoneForGC(Zone* zone)
7403 {
7404     zone->unscheduleGC();
7405 }
7406 
JS_PUBLIC_API(void)7407 JS_PUBLIC_API(void)
7408 JS::GCForReason(JSRuntime* rt, JSGCInvocationKind gckind, gcreason::Reason reason)
7409 {
7410     MOZ_ASSERT(gckind == GC_NORMAL || gckind == GC_SHRINK);
7411     rt->gc.gc(gckind, reason);
7412 }
7413 
JS_PUBLIC_API(void)7414 JS_PUBLIC_API(void)
7415 JS::StartIncrementalGC(JSRuntime* rt, JSGCInvocationKind gckind, gcreason::Reason reason, int64_t millis)
7416 {
7417     MOZ_ASSERT(gckind == GC_NORMAL || gckind == GC_SHRINK);
7418     rt->gc.startGC(gckind, reason, millis);
7419 }
7420 
JS_PUBLIC_API(void)7421 JS_PUBLIC_API(void)
7422 JS::IncrementalGCSlice(JSRuntime* rt, gcreason::Reason reason, int64_t millis)
7423 {
7424     rt->gc.gcSlice(reason, millis);
7425 }
7426 
JS_PUBLIC_API(void)7427 JS_PUBLIC_API(void)
7428 JS::FinishIncrementalGC(JSRuntime* rt, gcreason::Reason reason)
7429 {
7430     rt->gc.finishGC(reason);
7431 }
7432 
JS_PUBLIC_API(void)7433 JS_PUBLIC_API(void)
7434 JS::AbortIncrementalGC(JSRuntime* rt)
7435 {
7436     rt->gc.abortGC();
7437 }
7438 
7439 char16_t*
formatSliceMessage(JSRuntime * rt) const7440 JS::GCDescription::formatSliceMessage(JSRuntime* rt) const
7441 {
7442     UniqueChars cstr = rt->gc.stats.formatCompactSliceMessage();
7443 
7444     size_t nchars = strlen(cstr.get());
7445     UniquePtr<char16_t, JS::FreePolicy> out(js_pod_malloc<char16_t>(nchars + 1));
7446     if (!out)
7447         return nullptr;
7448     out.get()[nchars] = 0;
7449 
7450     CopyAndInflateChars(out.get(), cstr.get(), nchars);
7451     return out.release();
7452 }
7453 
7454 char16_t*
formatSummaryMessage(JSRuntime * rt) const7455 JS::GCDescription::formatSummaryMessage(JSRuntime* rt) const
7456 {
7457     UniqueChars cstr = rt->gc.stats.formatCompactSummaryMessage();
7458 
7459     size_t nchars = strlen(cstr.get());
7460     UniquePtr<char16_t, JS::FreePolicy> out(js_pod_malloc<char16_t>(nchars + 1));
7461     if (!out)
7462         return nullptr;
7463     out.get()[nchars] = 0;
7464 
7465     CopyAndInflateChars(out.get(), cstr.get(), nchars);
7466     return out.release();
7467 }
7468 
7469 JS::dbg::GarbageCollectionEvent::Ptr
toGCEvent(JSRuntime * rt) const7470 JS::GCDescription::toGCEvent(JSRuntime* rt) const
7471 {
7472     return JS::dbg::GarbageCollectionEvent::Create(rt, rt->gc.stats, rt->gc.majorGCCount());
7473 }
7474 
7475 char16_t*
formatJSON(JSRuntime * rt,uint64_t timestamp) const7476 JS::GCDescription::formatJSON(JSRuntime* rt, uint64_t timestamp) const
7477 {
7478     UniqueChars cstr = rt->gc.stats.formatJsonMessage(timestamp);
7479 
7480     size_t nchars = strlen(cstr.get());
7481     UniquePtr<char16_t, JS::FreePolicy> out(js_pod_malloc<char16_t>(nchars + 1));
7482     if (!out)
7483         return nullptr;
7484     out.get()[nchars] = 0;
7485 
7486     CopyAndInflateChars(out.get(), cstr.get(), nchars);
7487     return out.release();
7488 }
7489 
JS_PUBLIC_API(JS::GCSliceCallback)7490 JS_PUBLIC_API(JS::GCSliceCallback)
7491 JS::SetGCSliceCallback(JSRuntime* rt, GCSliceCallback callback)
7492 {
7493     return rt->gc.setSliceCallback(callback);
7494 }
7495 
JS_PUBLIC_API(void)7496 JS_PUBLIC_API(void)
7497 JS::DisableIncrementalGC(JSRuntime* rt)
7498 {
7499     rt->gc.disallowIncrementalGC();
7500 }
7501 
JS_PUBLIC_API(bool)7502 JS_PUBLIC_API(bool)
7503 JS::IsIncrementalGCEnabled(JSRuntime* rt)
7504 {
7505     return rt->gc.isIncrementalGCEnabled();
7506 }
7507 
JS_PUBLIC_API(bool)7508 JS_PUBLIC_API(bool)
7509 JS::IsIncrementalGCInProgress(JSRuntime* rt)
7510 {
7511     return rt->gc.isIncrementalGCInProgress() && !rt->gc.isVerifyPreBarriersEnabled();
7512 }
7513 
JS_PUBLIC_API(bool)7514 JS_PUBLIC_API(bool)
7515 JS::IsIncrementalBarrierNeeded(JSRuntime* rt)
7516 {
7517     if (rt->isHeapBusy())
7518         return false;
7519 
7520     auto state = rt->gc.state();
7521     return state != gc::NO_INCREMENTAL && state <= gc::SWEEP;
7522 }
7523 
JS_PUBLIC_API(bool)7524 JS_PUBLIC_API(bool)
7525 JS::IsIncrementalBarrierNeeded(JSContext* cx)
7526 {
7527     return IsIncrementalBarrierNeeded(cx->runtime());
7528 }
7529 
7530 struct IncrementalReferenceBarrierFunctor {
operator ()IncrementalReferenceBarrierFunctor7531     template <typename T> void operator()(T* t) { T::writeBarrierPre(t); }
7532 };
7533 
JS_PUBLIC_API(void)7534 JS_PUBLIC_API(void)
7535 JS::IncrementalReferenceBarrier(GCCellPtr thing)
7536 {
7537     if (!thing)
7538         return;
7539 
7540     DispatchTyped(IncrementalReferenceBarrierFunctor(), thing);
7541 }
7542 
JS_PUBLIC_API(void)7543 JS_PUBLIC_API(void)
7544 JS::IncrementalValueBarrier(const Value& v)
7545 {
7546     js::HeapValue::writeBarrierPre(v);
7547 }
7548 
JS_PUBLIC_API(void)7549 JS_PUBLIC_API(void)
7550 JS::IncrementalObjectBarrier(JSObject* obj)
7551 {
7552     if (!obj)
7553         return;
7554 
7555     MOZ_ASSERT(!obj->zone()->runtimeFromMainThread()->isHeapMajorCollecting());
7556 
7557     JSObject::writeBarrierPre(obj);
7558 }
7559 
JS_PUBLIC_API(bool)7560 JS_PUBLIC_API(bool)
7561 JS::WasIncrementalGC(JSRuntime* rt)
7562 {
7563     return rt->gc.isIncrementalGc();
7564 }
7565 
AutoDisableGenerationalGC(JSRuntime * rt)7566 JS::AutoDisableGenerationalGC::AutoDisableGenerationalGC(JSRuntime* rt)
7567   : gc(&rt->gc)
7568 {
7569     gc->disableGenerationalGC();
7570 }
7571 
~AutoDisableGenerationalGC()7572 JS::AutoDisableGenerationalGC::~AutoDisableGenerationalGC()
7573 {
7574     gc->enableGenerationalGC();
7575 }
7576 
JS_PUBLIC_API(bool)7577 JS_PUBLIC_API(bool)
7578 JS::IsGenerationalGCEnabled(JSRuntime* rt)
7579 {
7580     return rt->gc.isGenerationalGCEnabled();
7581 }
7582 
7583 uint64_t
NextCellUniqueId(JSRuntime * rt)7584 js::gc::NextCellUniqueId(JSRuntime* rt)
7585 {
7586     return rt->gc.nextCellUniqueId();
7587 }
7588 
7589 namespace js {
7590 namespace gc {
7591 namespace MemInfo {
7592 
7593 static bool
GCBytesGetter(JSContext * cx,unsigned argc,Value * vp)7594 GCBytesGetter(JSContext* cx, unsigned argc, Value* vp)
7595 {
7596     CallArgs args = CallArgsFromVp(argc, vp);
7597     args.rval().setNumber(double(cx->runtime()->gc.usage.gcBytes()));
7598     return true;
7599 }
7600 
7601 static bool
GCMaxBytesGetter(JSContext * cx,unsigned argc,Value * vp)7602 GCMaxBytesGetter(JSContext* cx, unsigned argc, Value* vp)
7603 {
7604     CallArgs args = CallArgsFromVp(argc, vp);
7605     args.rval().setNumber(double(cx->runtime()->gc.tunables.gcMaxBytes()));
7606     return true;
7607 }
7608 
7609 static bool
MallocBytesGetter(JSContext * cx,unsigned argc,Value * vp)7610 MallocBytesGetter(JSContext* cx, unsigned argc, Value* vp)
7611 {
7612     CallArgs args = CallArgsFromVp(argc, vp);
7613     args.rval().setNumber(double(cx->runtime()->gc.getMallocBytes()));
7614     return true;
7615 }
7616 
7617 static bool
MaxMallocGetter(JSContext * cx,unsigned argc,Value * vp)7618 MaxMallocGetter(JSContext* cx, unsigned argc, Value* vp)
7619 {
7620     CallArgs args = CallArgsFromVp(argc, vp);
7621     args.rval().setNumber(double(cx->runtime()->gc.maxMallocBytesAllocated()));
7622     return true;
7623 }
7624 
7625 static bool
GCHighFreqGetter(JSContext * cx,unsigned argc,Value * vp)7626 GCHighFreqGetter(JSContext* cx, unsigned argc, Value* vp)
7627 {
7628     CallArgs args = CallArgsFromVp(argc, vp);
7629     args.rval().setBoolean(cx->runtime()->gc.schedulingState.inHighFrequencyGCMode());
7630     return true;
7631 }
7632 
7633 static bool
GCNumberGetter(JSContext * cx,unsigned argc,Value * vp)7634 GCNumberGetter(JSContext* cx, unsigned argc, Value* vp)
7635 {
7636     CallArgs args = CallArgsFromVp(argc, vp);
7637     args.rval().setNumber(double(cx->runtime()->gc.gcNumber()));
7638     return true;
7639 }
7640 
7641 static bool
MajorGCCountGetter(JSContext * cx,unsigned argc,Value * vp)7642 MajorGCCountGetter(JSContext* cx, unsigned argc, Value* vp)
7643 {
7644     CallArgs args = CallArgsFromVp(argc, vp);
7645     args.rval().setNumber(double(cx->runtime()->gc.majorGCCount()));
7646     return true;
7647 }
7648 
7649 static bool
MinorGCCountGetter(JSContext * cx,unsigned argc,Value * vp)7650 MinorGCCountGetter(JSContext* cx, unsigned argc, Value* vp)
7651 {
7652     CallArgs args = CallArgsFromVp(argc, vp);
7653     args.rval().setNumber(double(cx->runtime()->gc.minorGCCount()));
7654     return true;
7655 }
7656 
7657 static bool
ZoneGCBytesGetter(JSContext * cx,unsigned argc,Value * vp)7658 ZoneGCBytesGetter(JSContext* cx, unsigned argc, Value* vp)
7659 {
7660     CallArgs args = CallArgsFromVp(argc, vp);
7661     args.rval().setNumber(double(cx->zone()->usage.gcBytes()));
7662     return true;
7663 }
7664 
7665 static bool
ZoneGCTriggerBytesGetter(JSContext * cx,unsigned argc,Value * vp)7666 ZoneGCTriggerBytesGetter(JSContext* cx, unsigned argc, Value* vp)
7667 {
7668     CallArgs args = CallArgsFromVp(argc, vp);
7669     args.rval().setNumber(double(cx->zone()->threshold.gcTriggerBytes()));
7670     return true;
7671 }
7672 
7673 static bool
ZoneGCAllocTriggerGetter(JSContext * cx,unsigned argc,Value * vp)7674 ZoneGCAllocTriggerGetter(JSContext* cx, unsigned argc, Value* vp)
7675 {
7676     CallArgs args = CallArgsFromVp(argc, vp);
7677     args.rval().setNumber(double(cx->zone()->threshold.allocTrigger(cx->runtime()->gc.schedulingState.inHighFrequencyGCMode())));
7678     return true;
7679 }
7680 
7681 static bool
ZoneMallocBytesGetter(JSContext * cx,unsigned argc,Value * vp)7682 ZoneMallocBytesGetter(JSContext* cx, unsigned argc, Value* vp)
7683 {
7684     CallArgs args = CallArgsFromVp(argc, vp);
7685     args.rval().setNumber(double(cx->zone()->gcMallocBytes));
7686     return true;
7687 }
7688 
7689 static bool
ZoneMaxMallocGetter(JSContext * cx,unsigned argc,Value * vp)7690 ZoneMaxMallocGetter(JSContext* cx, unsigned argc, Value* vp)
7691 {
7692     CallArgs args = CallArgsFromVp(argc, vp);
7693     args.rval().setNumber(double(cx->zone()->gcMaxMallocBytes));
7694     return true;
7695 }
7696 
7697 static bool
ZoneGCDelayBytesGetter(JSContext * cx,unsigned argc,Value * vp)7698 ZoneGCDelayBytesGetter(JSContext* cx, unsigned argc, Value* vp)
7699 {
7700     CallArgs args = CallArgsFromVp(argc, vp);
7701     args.rval().setNumber(double(cx->zone()->gcDelayBytes));
7702     return true;
7703 }
7704 
7705 static bool
ZoneGCHeapGrowthFactorGetter(JSContext * cx,unsigned argc,Value * vp)7706 ZoneGCHeapGrowthFactorGetter(JSContext* cx, unsigned argc, Value* vp)
7707 {
7708     CallArgs args = CallArgsFromVp(argc, vp);
7709     args.rval().setNumber(cx->zone()->threshold.gcHeapGrowthFactor());
7710     return true;
7711 }
7712 
7713 static bool
ZoneGCNumberGetter(JSContext * cx,unsigned argc,Value * vp)7714 ZoneGCNumberGetter(JSContext* cx, unsigned argc, Value* vp)
7715 {
7716     CallArgs args = CallArgsFromVp(argc, vp);
7717     args.rval().setNumber(double(cx->zone()->gcNumber()));
7718     return true;
7719 }
7720 
7721 #ifdef JS_MORE_DETERMINISTIC
7722 static bool
DummyGetter(JSContext * cx,unsigned argc,Value * vp)7723 DummyGetter(JSContext* cx, unsigned argc, Value* vp)
7724 {
7725     CallArgs args = CallArgsFromVp(argc, vp);
7726     args.rval().setUndefined();
7727     return true;
7728 }
7729 #endif
7730 
7731 } /* namespace MemInfo */
7732 
7733 JSObject*
NewMemoryInfoObject(JSContext * cx)7734 NewMemoryInfoObject(JSContext* cx)
7735 {
7736     RootedObject obj(cx, JS_NewObject(cx, nullptr));
7737 
7738     using namespace MemInfo;
7739     struct NamedGetter {
7740         const char* name;
7741         JSNative getter;
7742     } getters[] = {
7743         { "gcBytes", GCBytesGetter },
7744         { "gcMaxBytes", GCMaxBytesGetter },
7745         { "mallocBytesRemaining", MallocBytesGetter },
7746         { "maxMalloc", MaxMallocGetter },
7747         { "gcIsHighFrequencyMode", GCHighFreqGetter },
7748         { "gcNumber", GCNumberGetter },
7749         { "majorGCCount", MajorGCCountGetter },
7750         { "minorGCCount", MinorGCCountGetter }
7751     };
7752 
7753     for (auto pair : getters) {
7754 #ifdef JS_MORE_DETERMINISTIC
7755         JSNative getter = DummyGetter;
7756 #else
7757         JSNative getter = pair.getter;
7758 #endif
7759         if (!JS_DefineProperty(cx, obj, pair.name, UndefinedHandleValue,
7760                                JSPROP_ENUMERATE | JSPROP_SHARED,
7761                                getter, nullptr))
7762         {
7763             return nullptr;
7764         }
7765     }
7766 
7767     RootedObject zoneObj(cx, JS_NewObject(cx, nullptr));
7768     if (!zoneObj)
7769         return nullptr;
7770 
7771     if (!JS_DefineProperty(cx, obj, "zone", zoneObj, JSPROP_ENUMERATE))
7772         return nullptr;
7773 
7774     struct NamedZoneGetter {
7775         const char* name;
7776         JSNative getter;
7777     } zoneGetters[] = {
7778         { "gcBytes", ZoneGCBytesGetter },
7779         { "gcTriggerBytes", ZoneGCTriggerBytesGetter },
7780         { "gcAllocTrigger", ZoneGCAllocTriggerGetter },
7781         { "mallocBytesRemaining", ZoneMallocBytesGetter },
7782         { "maxMalloc", ZoneMaxMallocGetter },
7783         { "delayBytes", ZoneGCDelayBytesGetter },
7784         { "heapGrowthFactor", ZoneGCHeapGrowthFactorGetter },
7785         { "gcNumber", ZoneGCNumberGetter }
7786     };
7787 
7788     for (auto pair : zoneGetters) {
7789  #ifdef JS_MORE_DETERMINISTIC
7790         JSNative getter = DummyGetter;
7791 #else
7792         JSNative getter = pair.getter;
7793 #endif
7794         if (!JS_DefineProperty(cx, zoneObj, pair.name, UndefinedHandleValue,
7795                                JSPROP_ENUMERATE | JSPROP_SHARED,
7796                                getter, nullptr))
7797         {
7798             return nullptr;
7799         }
7800     }
7801 
7802     return obj;
7803 }
7804 
7805 } /* namespace gc */
7806 } /* namespace js */
7807