1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 /*
8  * This code implements an incremental mark-and-sweep garbage collector, with
9  * most sweeping carried out in the background on a parallel thread.
10  *
11  * Full vs. zone GC
12  * ----------------
13  *
14  * The collector can collect all zones at once, or a subset. These types of
15  * collection are referred to as a full GC and a zone GC respectively.
16  *
17  * The atoms zone is only collected in a full GC since objects in any zone may
18  * have pointers to atoms, and these are not recorded in the cross compartment
19  * pointer map. Also, the atoms zone is not collected if any thread has an
20  * AutoKeepAtoms instance on the stack, or there are any exclusive threads using
21  * the runtime.
22  *
23  * It is possible for an incremental collection that started out as a full GC to
24  * become a zone GC if new zones are created during the course of the
25  * collection.
26  *
27  * Incremental collection
28  * ----------------------
29  *
30  * For a collection to be carried out incrementally the following conditions
31  * must be met:
32  *  - the collection must be run by calling js::GCSlice() rather than js::GC()
33  *  - the GC mode must have been set to JSGC_MODE_INCREMENTAL with
34  *    JS_SetGCParameter()
35  *  - no thread may have an AutoKeepAtoms instance on the stack
36  *
37  * The last condition is an engine-internal mechanism to ensure that incremental
38  * collection is not carried out without the correct barriers being implemented.
39  * For more information see 'Incremental marking' below.
40  *
41  * If the collection is not incremental, all foreground activity happens inside
42  * a single call to GC() or GCSlice(). However the collection is not complete
43  * until the background sweeping activity has finished.
44  *
45  * An incremental collection proceeds as a series of slices, interleaved with
46  * mutator activity, i.e. running JavaScript code. Slices are limited by a time
47  * budget. The slice finishes as soon as possible after the requested time has
48  * passed.
49  *
50  * Collector states
51  * ----------------
52  *
53  * The collector proceeds through the following states, the current state being
54  * held in JSRuntime::gcIncrementalState:
55  *
56  *  - MarkRoots  - marks the stack and other roots
57  *  - Mark       - incrementally marks reachable things
58  *  - Sweep      - sweeps zones in groups and continues marking unswept zones
59  *  - Finalize   - performs background finalization, concurrent with mutator
60  *  - Compact    - incrementally compacts by zone
61  *  - Decommit   - performs background decommit and chunk removal
62  *
63  * The MarkRoots activity always takes place in the first slice. The next two
64  * states can take place over one or more slices.
65  *
66  * In other words an incremental collection proceeds like this:
67  *
68  * Slice 1:   MarkRoots:  Roots pushed onto the mark stack.
69  *            Mark:       The mark stack is processed by popping an element,
70  *                        marking it, and pushing its children.
71  *
72  *          ... JS code runs ...
73  *
74  * Slice 2:   Mark:       More mark stack processing.
75  *
76  *          ... JS code runs ...
77  *
78  * Slice n-1: Mark:       More mark stack processing.
79  *
80  *          ... JS code runs ...
81  *
82  * Slice n:   Mark:       Mark stack is completely drained.
83  *            Sweep:      Select first group of zones to sweep and sweep them.
84  *
85  *          ... JS code runs ...
86  *
87  * Slice n+1: Sweep:      Mark objects in unswept zones that were newly
88  *                        identified as alive (see below). Then sweep more zone
89  *                        groups.
90  *
91  *          ... JS code runs ...
92  *
93  * Slice n+2: Sweep:      Mark objects in unswept zones that were newly
94  *                        identified as alive. Then sweep more zone groups.
95  *
96  *          ... JS code runs ...
97  *
98  * Slice m:   Sweep:      Sweeping is finished, and background sweeping
99  *                        started on the helper thread.
100  *
101  *          ... JS code runs, remaining sweeping done on background thread ...
102  *
103  * When background sweeping finishes the GC is complete.
104  *
105  * Incremental marking
106  * -------------------
107  *
108  * Incremental collection requires close collaboration with the mutator (i.e.,
109  * JS code) to guarantee correctness.
110  *
111  *  - During an incremental GC, if a memory location (except a root) is written
112  *    to, then the value it previously held must be marked. Write barriers
113  *    ensure this.
114  *
115  *  - Any object that is allocated during incremental GC must start out marked.
116  *
117  *  - Roots are marked in the first slice and hence don't need write barriers.
118  *    Roots are things like the C stack and the VM stack.
119  *
120  * The problem that write barriers solve is that between slices the mutator can
121  * change the object graph. We must ensure that it cannot do this in such a way
122  * that makes us fail to mark a reachable object (marking an unreachable object
123  * is tolerable).
124  *
125  * We use a snapshot-at-the-beginning algorithm to do this. This means that we
126  * promise to mark at least everything that is reachable at the beginning of
127  * collection. To implement it we mark the old contents of every non-root memory
128  * location written to by the mutator while the collection is in progress, using
129  * write barriers. This is described in gc/Barrier.h.
130  *
131  * Incremental sweeping
132  * --------------------
133  *
134  * Sweeping is difficult to do incrementally because object finalizers must be
135  * run at the start of sweeping, before any mutator code runs. The reason is
136  * that some objects use their finalizers to remove themselves from caches. If
137  * mutator code was allowed to run after the start of sweeping, it could observe
138  * the state of the cache and create a new reference to an object that was just
139  * about to be destroyed.
140  *
141  * Sweeping all finalizable objects in one go would introduce long pauses, so
142  * instead sweeping broken up into groups of zones. Zones which are not yet
143  * being swept are still marked, so the issue above does not apply.
144  *
145  * The order of sweeping is restricted by cross compartment pointers - for
146  * example say that object |a| from zone A points to object |b| in zone B and
147  * neither object was marked when we transitioned to the Sweep phase. Imagine we
148  * sweep B first and then return to the mutator. It's possible that the mutator
149  * could cause |a| to become alive through a read barrier (perhaps it was a
150  * shape that was accessed via a shape table). Then we would need to mark |b|,
151  * which |a| points to, but |b| has already been swept.
152  *
153  * So if there is such a pointer then marking of zone B must not finish before
154  * marking of zone A.  Pointers which form a cycle between zones therefore
155  * restrict those zones to being swept at the same time, and these are found
156  * using Tarjan's algorithm for finding the strongly connected components of a
157  * graph.
158  *
159  * GC things without finalizers, and things with finalizers that are able to run
160  * in the background, are swept on the background thread. This accounts for most
161  * of the sweeping work.
162  *
163  * Reset
164  * -----
165  *
166  * During incremental collection it is possible, although unlikely, for
167  * conditions to change such that incremental collection is no longer safe. In
168  * this case, the collection is 'reset' by ResetIncrementalGC(). If we are in
169  * the mark state, this just stops marking, but if we have started sweeping
170  * already, we continue until we have swept the current zone group. Following a
171  * reset, a new non-incremental collection is started.
172  *
173  * Compacting GC
174  * -------------
175  *
176  * Compacting GC happens at the end of a major GC as part of the last slice.
177  * There are three parts:
178  *
179  *  - Arenas are selected for compaction.
180  *  - The contents of those arenas are moved to new arenas.
181  *  - All references to moved things are updated.
182  */
183 
184 #include "jsgcinlines.h"
185 
186 #include "mozilla/ArrayUtils.h"
187 #include "mozilla/DebugOnly.h"
188 #include "mozilla/MacroForEach.h"
189 #include "mozilla/MemoryReporting.h"
190 #include "mozilla/Move.h"
191 #include "mozilla/ScopeExit.h"
192 
193 #include <ctype.h>
194 #include <string.h>
195 #ifndef XP_WIN
196 # include <sys/mman.h>
197 # include <unistd.h>
198 #endif
199 
200 #include "jsapi.h"
201 #include "jsatom.h"
202 #include "jscntxt.h"
203 #include "jscompartment.h"
204 #include "jsfriendapi.h"
205 #include "jsobj.h"
206 #include "jsprf.h"
207 #include "jsscript.h"
208 #include "jstypes.h"
209 #include "jsutil.h"
210 #include "jswatchpoint.h"
211 #include "jsweakmap.h"
212 #ifdef XP_WIN
213 # include "jswin.h"
214 #endif
215 
216 #include "gc/FindSCCs.h"
217 #include "gc/GCInternals.h"
218 #include "gc/GCTrace.h"
219 #include "gc/Marking.h"
220 #include "gc/Memory.h"
221 #include "gc/Policy.h"
222 #include "jit/BaselineJIT.h"
223 #include "jit/IonCode.h"
224 #include "jit/JitcodeMap.h"
225 #include "js/SliceBudget.h"
226 #include "proxy/DeadObjectProxy.h"
227 #include "vm/Debugger.h"
228 #include "vm/ProxyObject.h"
229 #include "vm/Shape.h"
230 #include "vm/SPSProfiler.h"
231 #include "vm/String.h"
232 #include "vm/Symbol.h"
233 #include "vm/Time.h"
234 #include "vm/TraceLogging.h"
235 #include "vm/WrapperObject.h"
236 
237 #include "jsobjinlines.h"
238 #include "jsscriptinlines.h"
239 
240 #include "vm/Stack-inl.h"
241 #include "vm/String-inl.h"
242 
243 using namespace js;
244 using namespace js::gc;
245 
246 using mozilla::ArrayLength;
247 using mozilla::Get;
248 using mozilla::HashCodeScrambler;
249 using mozilla::Maybe;
250 using mozilla::Swap;
251 
252 using JS::AutoGCRooter;
253 
254 /* Increase the IGC marking slice time if we are in highFrequencyGC mode. */
255 static const int IGC_MARK_SLICE_MULTIPLIER = 2;
256 
257 const AllocKind gc::slotsToThingKind[] = {
258     /*  0 */ AllocKind::OBJECT0,  AllocKind::OBJECT2,  AllocKind::OBJECT2,  AllocKind::OBJECT4,
259     /*  4 */ AllocKind::OBJECT4,  AllocKind::OBJECT8,  AllocKind::OBJECT8,  AllocKind::OBJECT8,
260     /*  8 */ AllocKind::OBJECT8,  AllocKind::OBJECT12, AllocKind::OBJECT12, AllocKind::OBJECT12,
261     /* 12 */ AllocKind::OBJECT12, AllocKind::OBJECT16, AllocKind::OBJECT16, AllocKind::OBJECT16,
262     /* 16 */ AllocKind::OBJECT16
263 };
264 
265 static_assert(JS_ARRAY_LENGTH(slotsToThingKind) == SLOTS_TO_THING_KIND_LIMIT,
266               "We have defined a slot count for each kind.");
267 
268 #define CHECK_THING_SIZE(allocKind, traceKind, type, sizedType) \
269     static_assert(sizeof(sizedType) >= SortedArenaList::MinThingSize, \
270                   #sizedType " is smaller than SortedArenaList::MinThingSize!"); \
271     static_assert(sizeof(sizedType) >= sizeof(FreeSpan), \
272                   #sizedType " is smaller than FreeSpan"); \
273     static_assert(sizeof(sizedType) % CellSize == 0, \
274                   "Size of " #sizedType " is not a multiple of CellSize");
275 FOR_EACH_ALLOCKIND(CHECK_THING_SIZE);
276 #undef CHECK_THING_SIZE
277 
278 const uint32_t Arena::ThingSizes[] = {
279 #define EXPAND_THING_SIZE(allocKind, traceKind, type, sizedType) \
280     sizeof(sizedType),
281 FOR_EACH_ALLOCKIND(EXPAND_THING_SIZE)
282 #undef EXPAND_THING_SIZE
283 };
284 
285 FreeSpan ArenaLists::placeholder;
286 
287 #undef CHECK_THING_SIZE_INNER
288 #undef CHECK_THING_SIZE
289 
290 #define OFFSET(type) uint32_t(ArenaHeaderSize + (ArenaSize - ArenaHeaderSize) % sizeof(type))
291 
292 const uint32_t Arena::FirstThingOffsets[] = {
293 #define EXPAND_FIRST_THING_OFFSET(allocKind, traceKind, type, sizedType) \
294     OFFSET(sizedType),
295 FOR_EACH_ALLOCKIND(EXPAND_FIRST_THING_OFFSET)
296 #undef EXPAND_FIRST_THING_OFFSET
297 };
298 
299 #undef OFFSET
300 
301 #define COUNT(type) uint32_t((ArenaSize - ArenaHeaderSize) / sizeof(type))
302 
303 const uint32_t Arena::ThingsPerArena[] = {
304 #define EXPAND_THINGS_PER_ARENA(allocKind, traceKind, type, sizedType) \
305     COUNT(sizedType),
306 FOR_EACH_ALLOCKIND(EXPAND_THINGS_PER_ARENA)
307 #undef EXPAND_THINGS_PER_ARENA
308 };
309 
310 #undef COUNT
311 
312 struct js::gc::FinalizePhase
313 {
314     gcstats::Phase statsPhase;
315     AllocKinds kinds;
316 };
317 
318 /*
319  * Finalization order for GC things swept incrementally on the main thrad.
320  */
321 static const FinalizePhase IncrementalFinalizePhases[] = {
322     {
323         gcstats::PHASE_SWEEP_STRING, {
324             AllocKind::EXTERNAL_STRING
325         }
326     },
327     {
328         gcstats::PHASE_SWEEP_SCRIPT, {
329             AllocKind::SCRIPT
330         }
331     },
332     {
333         gcstats::PHASE_SWEEP_JITCODE, {
334             AllocKind::JITCODE
335         }
336     }
337 };
338 
339 /*
340  * Finalization order for GC things swept on the background thread.
341  */
342 static const FinalizePhase BackgroundFinalizePhases[] = {
343     {
344         gcstats::PHASE_SWEEP_SCRIPT, {
345             AllocKind::LAZY_SCRIPT
346         }
347     },
348     {
349         gcstats::PHASE_SWEEP_OBJECT, {
350             AllocKind::FUNCTION,
351             AllocKind::FUNCTION_EXTENDED,
352             AllocKind::OBJECT0_BACKGROUND,
353             AllocKind::OBJECT2_BACKGROUND,
354             AllocKind::OBJECT4_BACKGROUND,
355             AllocKind::OBJECT8_BACKGROUND,
356             AllocKind::OBJECT12_BACKGROUND,
357             AllocKind::OBJECT16_BACKGROUND
358         }
359     },
360     {
361         gcstats::PHASE_SWEEP_SCOPE, {
362             AllocKind::SCOPE
363         }
364     },
365     {
366         gcstats::PHASE_SWEEP_STRING, {
367             AllocKind::FAT_INLINE_STRING,
368             AllocKind::STRING,
369             AllocKind::FAT_INLINE_ATOM,
370             AllocKind::ATOM,
371             AllocKind::SYMBOL
372         }
373     },
374     {
375         gcstats::PHASE_SWEEP_SHAPE, {
376             AllocKind::SHAPE,
377             AllocKind::ACCESSOR_SHAPE,
378             AllocKind::BASE_SHAPE,
379             AllocKind::OBJECT_GROUP
380         }
381     }
382 };
383 
384 template<>
385 JSObject*
386 ArenaCellIterImpl::get<JSObject>() const
387 {
388     MOZ_ASSERT(!done());
389     return reinterpret_cast<JSObject*>(getCell());
390 }
391 
392 void
393 Arena::unmarkAll()
394 {
395     uintptr_t* word = chunk()->bitmap.arenaBits(this);
396     memset(word, 0, ArenaBitmapWords * sizeof(uintptr_t));
397 }
398 
399 /* static */ void
400 Arena::staticAsserts()
401 {
402     static_assert(size_t(AllocKind::LIMIT) <= 255,
403                   "We must be able to fit the allockind into uint8_t.");
404     static_assert(JS_ARRAY_LENGTH(ThingSizes) == size_t(AllocKind::LIMIT),
405                   "We haven't defined all thing sizes.");
406     static_assert(JS_ARRAY_LENGTH(FirstThingOffsets) == size_t(AllocKind::LIMIT),
407                   "We haven't defined all offsets.");
408     static_assert(JS_ARRAY_LENGTH(ThingsPerArena) == size_t(AllocKind::LIMIT),
409                   "We haven't defined all counts.");
410 }
411 
412 template<typename T>
413 inline size_t
414 Arena::finalize(FreeOp* fop, AllocKind thingKind, size_t thingSize)
415 {
416     /* Enforce requirements on size of T. */
417     MOZ_ASSERT(thingSize % CellSize == 0);
418     MOZ_ASSERT(thingSize <= 255);
419 
420     MOZ_ASSERT(allocated());
421     MOZ_ASSERT(thingKind == getAllocKind());
422     MOZ_ASSERT(thingSize == getThingSize());
423     MOZ_ASSERT(!hasDelayedMarking);
424     MOZ_ASSERT(!markOverflow);
425     MOZ_ASSERT(!allocatedDuringIncremental);
426 
427     uint_fast16_t firstThing = firstThingOffset(thingKind);
428     uint_fast16_t firstThingOrSuccessorOfLastMarkedThing = firstThing;
429     uint_fast16_t lastThing = ArenaSize - thingSize;
430 
431     FreeSpan newListHead;
432     FreeSpan* newListTail = &newListHead;
433     size_t nmarked = 0;
434 
435     if (MOZ_UNLIKELY(MemProfiler::enabled())) {
436         for (ArenaCellIterUnderFinalize i(this); !i.done(); i.next()) {
437             T* t = i.get<T>();
438             if (t->asTenured().isMarked())
439                 MemProfiler::MarkTenured(reinterpret_cast<void*>(t));
440         }
441     }
442 
443     for (ArenaCellIterUnderFinalize i(this); !i.done(); i.next()) {
444         T* t = i.get<T>();
445         if (t->asTenured().isMarked()) {
446             uint_fast16_t thing = uintptr_t(t) & ArenaMask;
447             if (thing != firstThingOrSuccessorOfLastMarkedThing) {
448                 // We just finished passing over one or more free things,
449                 // so record a new FreeSpan.
450                 newListTail->initBounds(firstThingOrSuccessorOfLastMarkedThing,
451                                         thing - thingSize, this);
452                 newListTail = newListTail->nextSpanUnchecked(this);
453             }
454             firstThingOrSuccessorOfLastMarkedThing = thing + thingSize;
455             nmarked++;
456         } else {
457             t->finalize(fop);
458             JS_POISON(t, JS_SWEPT_TENURED_PATTERN, thingSize);
459             TraceTenuredFinalize(t);
460         }
461     }
462 
463     if (nmarked == 0) {
464         // Do nothing. The caller will update the arena appropriately.
465         MOZ_ASSERT(newListTail == &newListHead);
466         JS_EXTRA_POISON(data, JS_SWEPT_TENURED_PATTERN, sizeof(data));
467         return nmarked;
468     }
469 
470     MOZ_ASSERT(firstThingOrSuccessorOfLastMarkedThing != firstThing);
471     uint_fast16_t lastMarkedThing = firstThingOrSuccessorOfLastMarkedThing - thingSize;
472     if (lastThing == lastMarkedThing) {
473         // If the last thing was marked, we will have already set the bounds of
474         // the final span, and we just need to terminate the list.
475         newListTail->initAsEmpty();
476     } else {
477         // Otherwise, end the list with a span that covers the final stretch of free things.
478         newListTail->initFinal(firstThingOrSuccessorOfLastMarkedThing, lastThing, this);
479     }
480 
481     firstFreeSpan = newListHead;
482 #ifdef DEBUG
483     size_t nfree = numFreeThings(thingSize);
484     MOZ_ASSERT(nfree + nmarked == thingsPerArena(thingKind));
485 #endif
486     return nmarked;
487 }
488 
489 // Finalize arenas from src list, releasing empty arenas if keepArenas wasn't
490 // specified and inserting the others into the appropriate destination size
491 // bins.
492 template<typename T>
493 static inline bool
494 FinalizeTypedArenas(FreeOp* fop,
495                     Arena** src,
496                     SortedArenaList& dest,
497                     AllocKind thingKind,
498                     SliceBudget& budget,
499                     ArenaLists::KeepArenasEnum keepArenas)
500 {
501     // When operating in the foreground, take the lock at the top.
502     Maybe<AutoLockGC> maybeLock;
503     if (fop->onMainThread())
504         maybeLock.emplace(fop->runtime());
505 
506     // During background sweeping free arenas are released later on in
507     // sweepBackgroundThings().
508     MOZ_ASSERT_IF(!fop->onMainThread(), keepArenas == ArenaLists::KEEP_ARENAS);
509 
510     size_t thingSize = Arena::thingSize(thingKind);
511     size_t thingsPerArena = Arena::thingsPerArena(thingKind);
512 
513     while (Arena* arena = *src) {
514         *src = arena->next;
515         size_t nmarked = arena->finalize<T>(fop, thingKind, thingSize);
516         size_t nfree = thingsPerArena - nmarked;
517 
518         if (nmarked)
519             dest.insertAt(arena, nfree);
520         else if (keepArenas == ArenaLists::KEEP_ARENAS)
521             arena->chunk()->recycleArena(arena, dest, thingsPerArena);
522         else
523             fop->runtime()->gc.releaseArena(arena, maybeLock.ref());
524 
525         budget.step(thingsPerArena);
526         if (budget.isOverBudget())
527             return false;
528     }
529 
530     return true;
531 }
532 
533 /*
534  * Finalize the list. On return, |al|'s cursor points to the first non-empty
535  * arena in the list (which may be null if all arenas are full).
536  */
537 static bool
538 FinalizeArenas(FreeOp* fop,
539                Arena** src,
540                SortedArenaList& dest,
541                AllocKind thingKind,
542                SliceBudget& budget,
543                ArenaLists::KeepArenasEnum keepArenas)
544 {
545     switch (thingKind) {
546 #define EXPAND_CASE(allocKind, traceKind, type, sizedType) \
547       case AllocKind::allocKind: \
548         return FinalizeTypedArenas<type>(fop, src, dest, thingKind, budget, keepArenas);
549 FOR_EACH_ALLOCKIND(EXPAND_CASE)
550 #undef EXPAND_CASE
551 
552       default:
553         MOZ_CRASH("Invalid alloc kind");
554     }
555 }
556 
557 Chunk*
558 ChunkPool::pop()
559 {
560     MOZ_ASSERT(bool(head_) == bool(count_));
561     if (!count_)
562         return nullptr;
563     return remove(head_);
564 }
565 
566 void
567 ChunkPool::push(Chunk* chunk)
568 {
569     MOZ_ASSERT(!chunk->info.next);
570     MOZ_ASSERT(!chunk->info.prev);
571 
572     chunk->info.next = head_;
573     if (head_)
574         head_->info.prev = chunk;
575     head_ = chunk;
576     ++count_;
577 
578     MOZ_ASSERT(verify());
579 }
580 
581 Chunk*
582 ChunkPool::remove(Chunk* chunk)
583 {
584     MOZ_ASSERT(count_ > 0);
585     MOZ_ASSERT(contains(chunk));
586 
587     if (head_ == chunk)
588         head_ = chunk->info.next;
589     if (chunk->info.prev)
590         chunk->info.prev->info.next = chunk->info.next;
591     if (chunk->info.next)
592         chunk->info.next->info.prev = chunk->info.prev;
593     chunk->info.next = chunk->info.prev = nullptr;
594     --count_;
595 
596     MOZ_ASSERT(verify());
597     return chunk;
598 }
599 
600 #ifdef DEBUG
601 bool
602 ChunkPool::contains(Chunk* chunk) const
603 {
604     verify();
605     for (Chunk* cursor = head_; cursor; cursor = cursor->info.next) {
606         if (cursor == chunk)
607             return true;
608     }
609     return false;
610 }
611 
612 bool
613 ChunkPool::verify() const
614 {
615     MOZ_ASSERT(bool(head_) == bool(count_));
616     uint32_t count = 0;
617     for (Chunk* cursor = head_; cursor; cursor = cursor->info.next, ++count) {
618         MOZ_ASSERT_IF(cursor->info.prev, cursor->info.prev->info.next == cursor);
619         MOZ_ASSERT_IF(cursor->info.next, cursor->info.next->info.prev == cursor);
620     }
621     MOZ_ASSERT(count_ == count);
622     return true;
623 }
624 #endif
625 
626 void
627 ChunkPool::Iter::next()
628 {
629     MOZ_ASSERT(!done());
630     current_ = current_->info.next;
631 }
632 
633 ChunkPool
634 GCRuntime::expireEmptyChunkPool(const AutoLockGC& lock)
635 {
636     MOZ_ASSERT(emptyChunks(lock).verify());
637     MOZ_ASSERT(tunables.minEmptyChunkCount(lock) <= tunables.maxEmptyChunkCount());
638 
639     ChunkPool expired;
640     while (emptyChunks(lock).count() > tunables.minEmptyChunkCount(lock)) {
641         Chunk* chunk = emptyChunks(lock).pop();
642         prepareToFreeChunk(chunk->info);
643         expired.push(chunk);
644     }
645 
646     MOZ_ASSERT(expired.verify());
647     MOZ_ASSERT(emptyChunks(lock).verify());
648     MOZ_ASSERT(emptyChunks(lock).count() <= tunables.maxEmptyChunkCount());
649     MOZ_ASSERT(emptyChunks(lock).count() <= tunables.minEmptyChunkCount(lock));
650     return expired;
651 }
652 
653 static void
654 FreeChunkPool(JSRuntime* rt, ChunkPool& pool)
655 {
656     for (ChunkPool::Iter iter(pool); !iter.done();) {
657         Chunk* chunk = iter.get();
658         iter.next();
659         pool.remove(chunk);
660         MOZ_ASSERT(!chunk->info.numArenasFreeCommitted);
661         UnmapPages(static_cast<void*>(chunk), ChunkSize);
662     }
663     MOZ_ASSERT(pool.count() == 0);
664 }
665 
666 void
667 GCRuntime::freeEmptyChunks(JSRuntime* rt, const AutoLockGC& lock)
668 {
669     FreeChunkPool(rt, emptyChunks(lock));
670 }
671 
672 inline void
673 GCRuntime::prepareToFreeChunk(ChunkInfo& info)
674 {
675     MOZ_ASSERT(numArenasFreeCommitted >= info.numArenasFreeCommitted);
676     numArenasFreeCommitted -= info.numArenasFreeCommitted;
677     stats.count(gcstats::STAT_DESTROY_CHUNK);
678 #ifdef DEBUG
679     /*
680      * Let FreeChunkPool detect a missing prepareToFreeChunk call before it
681      * frees chunk.
682      */
683     info.numArenasFreeCommitted = 0;
684 #endif
685 }
686 
687 inline void
688 GCRuntime::updateOnArenaFree(const ChunkInfo& info)
689 {
690     ++numArenasFreeCommitted;
691 }
692 
693 void
694 Chunk::addArenaToFreeList(JSRuntime* rt, Arena* arena)
695 {
696     MOZ_ASSERT(!arena->allocated());
697     arena->next = info.freeArenasHead;
698     info.freeArenasHead = arena;
699     ++info.numArenasFreeCommitted;
700     ++info.numArenasFree;
701     rt->gc.updateOnArenaFree(info);
702 }
703 
704 void
705 Chunk::addArenaToDecommittedList(JSRuntime* rt, const Arena* arena)
706 {
707     ++info.numArenasFree;
708     decommittedArenas.set(Chunk::arenaIndex(arena->address()));
709 }
710 
711 void
712 Chunk::recycleArena(Arena* arena, SortedArenaList& dest, size_t thingsPerArena)
713 {
714     arena->setAsFullyUnused();
715     dest.insertAt(arena, thingsPerArena);
716 }
717 
718 void
719 Chunk::releaseArena(JSRuntime* rt, Arena* arena, const AutoLockGC& lock)
720 {
721     MOZ_ASSERT(arena->allocated());
722     MOZ_ASSERT(!arena->hasDelayedMarking);
723 
724     arena->setAsNotAllocated();
725     addArenaToFreeList(rt, arena);
726     updateChunkListAfterFree(rt, lock);
727 }
728 
729 bool
730 Chunk::decommitOneFreeArena(JSRuntime* rt, AutoLockGC& lock)
731 {
732     MOZ_ASSERT(info.numArenasFreeCommitted > 0);
733     Arena* arena = fetchNextFreeArena(rt);
734     updateChunkListAfterAlloc(rt, lock);
735 
736     bool ok;
737     {
738         AutoUnlockGC unlock(lock);
739         ok = MarkPagesUnused(arena, ArenaSize);
740     }
741 
742     if (ok)
743         addArenaToDecommittedList(rt, arena);
744     else
745         addArenaToFreeList(rt, arena);
746     updateChunkListAfterFree(rt, lock);
747 
748     return ok;
749 }
750 
751 void
752 Chunk::decommitAllArenasWithoutUnlocking(const AutoLockGC& lock)
753 {
754     for (size_t i = 0; i < ArenasPerChunk; ++i) {
755         if (decommittedArenas.get(i) || arenas[i].allocated())
756             continue;
757 
758         if (MarkPagesUnused(&arenas[i], ArenaSize)) {
759             info.numArenasFreeCommitted--;
760             decommittedArenas.set(i);
761         }
762     }
763 }
764 
765 void
766 Chunk::updateChunkListAfterAlloc(JSRuntime* rt, const AutoLockGC& lock)
767 {
768     if (MOZ_UNLIKELY(!hasAvailableArenas())) {
769         rt->gc.availableChunks(lock).remove(this);
770         rt->gc.fullChunks(lock).push(this);
771     }
772 }
773 
774 void
775 Chunk::updateChunkListAfterFree(JSRuntime* rt, const AutoLockGC& lock)
776 {
777     if (info.numArenasFree == 1) {
778         rt->gc.fullChunks(lock).remove(this);
779         rt->gc.availableChunks(lock).push(this);
780     } else if (!unused()) {
781         MOZ_ASSERT(!rt->gc.fullChunks(lock).contains(this));
782         MOZ_ASSERT(rt->gc.availableChunks(lock).contains(this));
783         MOZ_ASSERT(!rt->gc.emptyChunks(lock).contains(this));
784     } else {
785         MOZ_ASSERT(unused());
786         rt->gc.availableChunks(lock).remove(this);
787         decommitAllArenas(rt);
788         MOZ_ASSERT(info.numArenasFreeCommitted == 0);
789         rt->gc.recycleChunk(this, lock);
790     }
791 }
792 
793 void
794 GCRuntime::releaseArena(Arena* arena, const AutoLockGC& lock)
795 {
796     arena->zone->usage.removeGCArena();
797     if (isBackgroundSweeping())
798         arena->zone->threshold.updateForRemovedArena(tunables);
799     return arena->chunk()->releaseArena(rt, arena, lock);
800 }
801 
802 GCRuntime::GCRuntime(JSRuntime* rt) :
803     rt(rt),
804     systemZone(nullptr),
805     nursery(rt),
806     storeBuffer(rt, nursery),
807     stats(rt),
808     marker(rt),
809     usage(nullptr),
810     mMemProfiler(rt),
811     maxMallocBytes(0),
812     nextCellUniqueId_(LargestTaggedNullCellPointer + 1), // Ensure disjoint from null tagged pointers.
813     numArenasFreeCommitted(0),
814     verifyPreData(nullptr),
815     chunkAllocationSinceLastGC(false),
816     lastGCTime(PRMJ_Now()),
817     mode(JSGC_MODE_INCREMENTAL),
818     numActiveZoneIters(0),
819     cleanUpEverything(false),
820     grayBufferState(GCRuntime::GrayBufferState::Unused),
821     majorGCTriggerReason(JS::gcreason::NO_REASON),
822     minorGCTriggerReason(JS::gcreason::NO_REASON),
823     fullGCForAtomsRequested_(false),
824     minorGCNumber(0),
825     majorGCNumber(0),
826     jitReleaseNumber(0),
827     number(0),
828     startNumber(0),
829     isFull(false),
830 #ifdef DEBUG
831     disableStrictProxyCheckingCount(0),
832 #endif
833     incrementalState(gc::State::NotActive),
834     lastMarkSlice(false),
835     sweepOnBackgroundThread(false),
836     blocksToFreeAfterSweeping(JSRuntime::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
837     blocksToFreeAfterMinorGC(JSRuntime::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
838     zoneGroupIndex(0),
839     zoneGroups(nullptr),
840     currentZoneGroup(nullptr),
841     sweepZone(nullptr),
842     sweepKind(AllocKind::FIRST),
843     abortSweepAfterCurrentGroup(false),
844     arenasAllocatedDuringSweep(nullptr),
845     startedCompacting(false),
846     relocatedArenasToRelease(nullptr),
847 #ifdef JS_GC_ZEAL
848     markingValidator(nullptr),
849 #endif
850     interFrameGC(false),
851     defaultTimeBudget_(SliceBudget::UnlimitedTimeBudget),
852     incrementalAllowed(true),
853     generationalDisabled(0),
854     compactingEnabled(true),
855     compactingDisabledCount(0),
856     manipulatingDeadZones(false),
857     objectsMarkedInDeadZones(0),
858     poked(false),
859 #ifdef JS_GC_ZEAL
860     zealModeBits(0),
861     zealFrequency(0),
862     nextScheduled(0),
863     deterministicOnly(false),
864     incrementalLimit(0),
865 #endif
866     fullCompartmentChecks(false),
867     mallocBytesUntilGC(0),
868     mallocGCTriggered(false),
869     alwaysPreserveCode(false),
870     inUnsafeRegion(0),
871 #ifdef DEBUG
872     noGCOrAllocationCheck(0),
873     noNurseryAllocationCheck(0),
874     arenasEmptyAtShutdown(true),
875 #endif
876     lock(mutexid::GCLock),
877     allocTask(rt, emptyChunks_),
878     decommitTask(rt),
879     helperState(rt)
880 {
881     setGCMode(JSGC_MODE_GLOBAL);
882 }
883 
884 #ifdef JS_GC_ZEAL
885 
886 void
887 GCRuntime::getZealBits(uint32_t* zealBits, uint32_t* frequency, uint32_t* scheduled)
888 {
889     *zealBits = zealModeBits;
890     *frequency = zealFrequency;
891     *scheduled = nextScheduled;
892 }
893 
894 const char* gc::ZealModeHelpText =
895     "  Specifies how zealous the garbage collector should be. Some of these modes can\n"
896     "  be set simultaneously, by passing multiple level options, e.g. \"2;4\" will activate\n"
897     "  both modes 2 and 4. Modes can be specified by name or number.\n"
898     "  \n"
899     "  Values:\n"
900     "    0: (None) Normal amount of collection (resets all modes)\n"
901     "    1: (Poke) Collect when roots are added or removed\n"
902     "    2: (Alloc) Collect when every N allocations (default: 100)\n"
903     "    3: (FrameGC) Collect when the window paints (browser only)\n"
904     "    4: (VerifierPre) Verify pre write barriers between instructions\n"
905     "    5: (FrameVerifierPre) Verify pre write barriers between paints\n"
906     "    6: (StackRooting) Verify stack rooting\n"
907     "    7: (GenerationalGC) Collect the nursery every N nursery allocations\n"
908     "    8: (IncrementalRootsThenFinish) Incremental GC in two slices: 1) mark roots 2) finish collection\n"
909     "    9: (IncrementalMarkAllThenFinish) Incremental GC in two slices: 1) mark all 2) new marking and finish\n"
910     "   10: (IncrementalMultipleSlices) Incremental GC in multiple slices\n"
911     "   11: (IncrementalMarkingValidator) Verify incremental marking\n"
912     "   12: (ElementsBarrier) Always use the individual element post-write barrier, regardless of elements size\n"
913     "   13: (CheckHashTablesOnMinorGC) Check internal hashtables on minor GC\n"
914     "   14: (Compact) Perform a shrinking collection every N allocations\n"
915     "   15: (CheckHeapAfterGC) Walk the heap to check its integrity after every GC\n"
916     "   16: (CheckNursery) Check nursery integrity on minor GC\n";
917 
918 void
919 GCRuntime::setZeal(uint8_t zeal, uint32_t frequency)
920 {
921     MOZ_ASSERT(zeal <= unsigned(ZealMode::Limit));
922 
923     if (verifyPreData)
924         VerifyBarriers(rt, PreBarrierVerifier);
925 
926     if (zeal == 0 && hasZealMode(ZealMode::GenerationalGC)) {
927         evictNursery(JS::gcreason::DEBUG_GC);
928         nursery.leaveZealMode();
929     }
930 
931     ZealMode zealMode = ZealMode(zeal);
932     if (zealMode == ZealMode::GenerationalGC)
933         nursery.enterZealMode();
934 
935     // Zeal modes 8-10 are mutually exclusive. If we're setting one of those,
936     // we first reset all of them.
937     if (zealMode >= ZealMode::IncrementalRootsThenFinish &&
938         zealMode <= ZealMode::IncrementalMultipleSlices)
939     {
940         clearZealMode(ZealMode::IncrementalRootsThenFinish);
941         clearZealMode(ZealMode::IncrementalMarkAllThenFinish);
942         clearZealMode(ZealMode::IncrementalMultipleSlices);
943     }
944 
945     bool schedule = zealMode >= ZealMode::Alloc;
946     if (zeal != 0)
947         zealModeBits |= 1 << unsigned(zeal);
948     else
949         zealModeBits = 0;
950     zealFrequency = frequency;
951     nextScheduled = schedule ? frequency : 0;
952 }
953 
954 void
955 GCRuntime::setNextScheduled(uint32_t count)
956 {
957     nextScheduled = count;
958 }
959 
960 bool
961 GCRuntime::parseAndSetZeal(const char* str)
962 {
963     int frequency = -1;
964     bool foundFrequency = false;
965     mozilla::Vector<int, 0, SystemAllocPolicy> zeals;
966 
967     static const struct {
968         const char* const zealMode;
969         size_t length;
970         uint32_t zeal;
971     } zealModes[] = {
972 #define ZEAL_MODE(name, value) {#name, sizeof(#name) - 1, value},
973         JS_FOR_EACH_ZEAL_MODE(ZEAL_MODE)
974 #undef ZEAL_MODE
975         {"None", 4, 0}
976     };
977 
978     do {
979         int zeal = -1;
980 
981         const char* p = nullptr;
982         if (isdigit(str[0])) {
983             zeal = atoi(str);
984 
985             size_t offset = strspn(str, "0123456789");
986             p = str + offset;
987         } else {
988             for (auto z : zealModes) {
989                 if (!strncmp(str, z.zealMode, z.length)) {
990                     zeal = z.zeal;
991                     p = str + z.length;
992                     break;
993                 }
994             }
995         }
996         if (p) {
997             if (!*p || *p == ';') {
998                 frequency = JS_DEFAULT_ZEAL_FREQ;
999             } else if (*p == ',') {
1000                 frequency = atoi(p + 1);
1001                 foundFrequency = true;
1002             }
1003         }
1004 
1005         if (zeal < 0 || zeal > int(ZealMode::Limit) || frequency <= 0) {
1006             fprintf(stderr, "Format: JS_GC_ZEAL=level(;level)*[,N]\n");
1007             fputs(ZealModeHelpText, stderr);
1008             return false;
1009         }
1010 
1011         if (!zeals.emplaceBack(zeal)) {
1012             return false;
1013         }
1014     } while (!foundFrequency &&
1015              (str = strchr(str, ';')) != nullptr &&
1016              str++);
1017 
1018     for (auto z : zeals)
1019         setZeal(z, frequency);
1020     return true;
1021 }
1022 
1023 #endif
1024 
1025 /*
1026  * Lifetime in number of major GCs for type sets attached to scripts containing
1027  * observed types.
1028  */
1029 static const uint64_t JIT_SCRIPT_RELEASE_TYPES_PERIOD = 20;
1030 
1031 bool
1032 GCRuntime::init(uint32_t maxbytes, uint32_t maxNurseryBytes)
1033 {
1034     InitMemorySubsystem();
1035 
1036     if (!rootsHash.init(256))
1037         return false;
1038 
1039     {
1040         AutoLockGC lock(rt);
1041 
1042         /*
1043          * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
1044          * for default backward API compatibility.
1045          */
1046         MOZ_ALWAYS_TRUE(tunables.setParameter(JSGC_MAX_BYTES, maxbytes, lock));
1047         setMaxMallocBytes(maxbytes);
1048 
1049         const char* size = getenv("JSGC_MARK_STACK_LIMIT");
1050         if (size)
1051             setMarkStackLimit(atoi(size), lock);
1052 
1053         jitReleaseNumber = majorGCNumber + JIT_SCRIPT_RELEASE_TYPES_PERIOD;
1054 
1055         if (!nursery.init(maxNurseryBytes, lock))
1056             return false;
1057 
1058         if (!nursery.isEnabled()) {
1059             MOZ_ASSERT(nursery.nurserySize() == 0);
1060             ++rt->gc.generationalDisabled;
1061         } else {
1062             MOZ_ASSERT(nursery.nurserySize() > 0);
1063         }
1064     }
1065 
1066 #ifdef JS_GC_ZEAL
1067     const char* zealSpec = getenv("JS_GC_ZEAL");
1068     if (zealSpec && zealSpec[0] && !parseAndSetZeal(zealSpec))
1069         return false;
1070 #endif
1071 
1072     if (!InitTrace(*this))
1073         return false;
1074 
1075     if (!marker.init(mode))
1076         return false;
1077 
1078     return true;
1079 }
1080 
1081 void
1082 GCRuntime::finish()
1083 {
1084     /* Wait for the nursery sweeping to end. */
1085     if (nursery.isEnabled())
1086         nursery.waitBackgroundFreeEnd();
1087 
1088     /*
1089      * Wait until the background finalization and allocation stops and the
1090      * helper thread shuts down before we forcefully release any remaining GC
1091      * memory.
1092      */
1093     helperState.finish();
1094     allocTask.cancel(GCParallelTask::CancelAndWait);
1095     decommitTask.cancel(GCParallelTask::CancelAndWait);
1096 
1097 #ifdef JS_GC_ZEAL
1098     /* Free memory associated with GC verification. */
1099     finishVerifier();
1100 #endif
1101 
1102     /* Delete all remaining zones. */
1103     if (rt->gcInitialized) {
1104         AutoSetThreadIsSweeping threadIsSweeping;
1105         for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
1106             for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
1107                 js_delete(comp.get());
1108             js_delete(zone.get());
1109         }
1110     }
1111 
1112     zones.clear();
1113 
1114     FreeChunkPool(rt, fullChunks_);
1115     FreeChunkPool(rt, availableChunks_);
1116     FreeChunkPool(rt, emptyChunks_);
1117 
1118     FinishTrace();
1119 
1120     nursery.printTotalProfileTimes();
1121     stats.printTotalProfileTimes();
1122 }
1123 
1124 bool
1125 GCRuntime::setParameter(JSGCParamKey key, uint32_t value, AutoLockGC& lock)
1126 {
1127     switch (key) {
1128       case JSGC_MAX_MALLOC_BYTES:
1129         setMaxMallocBytes(value);
1130         for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next())
1131             zone->setGCMaxMallocBytes(maxMallocBytesAllocated() * 0.9);
1132         break;
1133       case JSGC_SLICE_TIME_BUDGET:
1134         defaultTimeBudget_ = value ? value : SliceBudget::UnlimitedTimeBudget;
1135         break;
1136       case JSGC_MARK_STACK_LIMIT:
1137         if (value == 0)
1138             return false;
1139         setMarkStackLimit(value, lock);
1140         break;
1141       case JSGC_MODE:
1142         if (mode != JSGC_MODE_GLOBAL &&
1143             mode != JSGC_MODE_ZONE &&
1144             mode != JSGC_MODE_INCREMENTAL)
1145         {
1146             return false;
1147         }
1148         mode = JSGCMode(value);
1149         break;
1150       case JSGC_COMPACTING_ENABLED:
1151         compactingEnabled = value != 0;
1152         break;
1153       default:
1154         if (!tunables.setParameter(key, value, lock))
1155             return false;
1156         for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
1157             zone->threshold.updateAfterGC(zone->usage.gcBytes(), GC_NORMAL, tunables,
1158                                           schedulingState, lock);
1159         }
1160     }
1161 
1162     return true;
1163 }
1164 
1165 bool
1166 GCSchedulingTunables::setParameter(JSGCParamKey key, uint32_t value, const AutoLockGC& lock)
1167 {
1168     // Limit heap growth factor to one hundred times size of current heap.
1169     const double MaxHeapGrowthFactor = 100;
1170 
1171     switch(key) {
1172       case JSGC_MAX_BYTES:
1173         gcMaxBytes_ = value;
1174         break;
1175       case JSGC_HIGH_FREQUENCY_TIME_LIMIT:
1176         highFrequencyThresholdUsec_ = value * PRMJ_USEC_PER_MSEC;
1177         break;
1178       case JSGC_HIGH_FREQUENCY_LOW_LIMIT: {
1179         uint64_t newLimit = (uint64_t)value * 1024 * 1024;
1180         if (newLimit == UINT64_MAX)
1181             return false;
1182         highFrequencyLowLimitBytes_ = newLimit;
1183         if (highFrequencyLowLimitBytes_ >= highFrequencyHighLimitBytes_)
1184             highFrequencyHighLimitBytes_ = highFrequencyLowLimitBytes_ + 1;
1185         MOZ_ASSERT(highFrequencyHighLimitBytes_ > highFrequencyLowLimitBytes_);
1186         break;
1187       }
1188       case JSGC_HIGH_FREQUENCY_HIGH_LIMIT: {
1189         uint64_t newLimit = (uint64_t)value * 1024 * 1024;
1190         if (newLimit == 0)
1191             return false;
1192         highFrequencyHighLimitBytes_ = newLimit;
1193         if (highFrequencyHighLimitBytes_ <= highFrequencyLowLimitBytes_)
1194             highFrequencyLowLimitBytes_ = highFrequencyHighLimitBytes_ - 1;
1195         MOZ_ASSERT(highFrequencyHighLimitBytes_ > highFrequencyLowLimitBytes_);
1196         break;
1197       }
1198       case JSGC_HIGH_FREQUENCY_HEAP_GROWTH_MAX: {
1199         double newGrowth = value / 100.0;
1200         if (newGrowth <= 0.85 || newGrowth > MaxHeapGrowthFactor)
1201             return false;
1202         highFrequencyHeapGrowthMax_ = newGrowth;
1203         MOZ_ASSERT(highFrequencyHeapGrowthMax_ / 0.85 > 1.0);
1204         break;
1205       }
1206       case JSGC_HIGH_FREQUENCY_HEAP_GROWTH_MIN: {
1207         double newGrowth = value / 100.0;
1208         if (newGrowth <= 0.85 || newGrowth > MaxHeapGrowthFactor)
1209             return false;
1210         highFrequencyHeapGrowthMin_ = newGrowth;
1211         MOZ_ASSERT(highFrequencyHeapGrowthMin_ / 0.85 > 1.0);
1212         break;
1213       }
1214       case JSGC_LOW_FREQUENCY_HEAP_GROWTH: {
1215         double newGrowth = value / 100.0;
1216         if (newGrowth <= 0.9 || newGrowth > MaxHeapGrowthFactor)
1217             return false;
1218         lowFrequencyHeapGrowth_ = newGrowth;
1219         MOZ_ASSERT(lowFrequencyHeapGrowth_ / 0.9 > 1.0);
1220         break;
1221       }
1222       case JSGC_DYNAMIC_HEAP_GROWTH:
1223         dynamicHeapGrowthEnabled_ = value != 0;
1224         break;
1225       case JSGC_DYNAMIC_MARK_SLICE:
1226         dynamicMarkSliceEnabled_ = value != 0;
1227         break;
1228       case JSGC_ALLOCATION_THRESHOLD:
1229         gcZoneAllocThresholdBase_ = value * 1024 * 1024;
1230         break;
1231       case JSGC_MIN_EMPTY_CHUNK_COUNT:
1232         minEmptyChunkCount_ = value;
1233         if (minEmptyChunkCount_ > maxEmptyChunkCount_)
1234             maxEmptyChunkCount_ = minEmptyChunkCount_;
1235         MOZ_ASSERT(maxEmptyChunkCount_ >= minEmptyChunkCount_);
1236         break;
1237       case JSGC_MAX_EMPTY_CHUNK_COUNT:
1238         maxEmptyChunkCount_ = value;
1239         if (minEmptyChunkCount_ > maxEmptyChunkCount_)
1240             minEmptyChunkCount_ = maxEmptyChunkCount_;
1241         MOZ_ASSERT(maxEmptyChunkCount_ >= minEmptyChunkCount_);
1242         break;
1243       case JSGC_REFRESH_FRAME_SLICES_ENABLED:
1244         refreshFrameSlicesEnabled_ = value != 0;
1245         break;
1246       default:
1247         MOZ_CRASH("Unknown GC parameter.");
1248     }
1249 
1250     return true;
1251 }
1252 
1253 uint32_t
1254 GCRuntime::getParameter(JSGCParamKey key, const AutoLockGC& lock)
1255 {
1256     switch (key) {
1257       case JSGC_MAX_BYTES:
1258         return uint32_t(tunables.gcMaxBytes());
1259       case JSGC_MAX_MALLOC_BYTES:
1260         return maxMallocBytes;
1261       case JSGC_BYTES:
1262         return uint32_t(usage.gcBytes());
1263       case JSGC_MODE:
1264         return uint32_t(mode);
1265       case JSGC_UNUSED_CHUNKS:
1266         return uint32_t(emptyChunks(lock).count());
1267       case JSGC_TOTAL_CHUNKS:
1268         return uint32_t(fullChunks(lock).count() +
1269                         availableChunks(lock).count() +
1270                         emptyChunks(lock).count());
1271       case JSGC_SLICE_TIME_BUDGET:
1272         if (defaultTimeBudget_ == SliceBudget::UnlimitedTimeBudget) {
1273             return 0;
1274         } else {
1275             MOZ_RELEASE_ASSERT(defaultTimeBudget_ >= 0);
1276             MOZ_RELEASE_ASSERT(defaultTimeBudget_ <= UINT32_MAX);
1277             return uint32_t(defaultTimeBudget_);
1278         }
1279       case JSGC_MARK_STACK_LIMIT:
1280         return marker.maxCapacity();
1281       case JSGC_HIGH_FREQUENCY_TIME_LIMIT:
1282         return tunables.highFrequencyThresholdUsec() / PRMJ_USEC_PER_MSEC;
1283       case JSGC_HIGH_FREQUENCY_LOW_LIMIT:
1284         return tunables.highFrequencyLowLimitBytes() / 1024 / 1024;
1285       case JSGC_HIGH_FREQUENCY_HIGH_LIMIT:
1286         return tunables.highFrequencyHighLimitBytes() / 1024 / 1024;
1287       case JSGC_HIGH_FREQUENCY_HEAP_GROWTH_MAX:
1288         return uint32_t(tunables.highFrequencyHeapGrowthMax() * 100);
1289       case JSGC_HIGH_FREQUENCY_HEAP_GROWTH_MIN:
1290         return uint32_t(tunables.highFrequencyHeapGrowthMin() * 100);
1291       case JSGC_LOW_FREQUENCY_HEAP_GROWTH:
1292         return uint32_t(tunables.lowFrequencyHeapGrowth() * 100);
1293       case JSGC_DYNAMIC_HEAP_GROWTH:
1294         return tunables.isDynamicHeapGrowthEnabled();
1295       case JSGC_DYNAMIC_MARK_SLICE:
1296         return tunables.isDynamicMarkSliceEnabled();
1297       case JSGC_ALLOCATION_THRESHOLD:
1298         return tunables.gcZoneAllocThresholdBase() / 1024 / 1024;
1299       case JSGC_MIN_EMPTY_CHUNK_COUNT:
1300         return tunables.minEmptyChunkCount(lock);
1301       case JSGC_MAX_EMPTY_CHUNK_COUNT:
1302         return tunables.maxEmptyChunkCount();
1303       case JSGC_COMPACTING_ENABLED:
1304         return compactingEnabled;
1305       case JSGC_REFRESH_FRAME_SLICES_ENABLED:
1306         return tunables.areRefreshFrameSlicesEnabled();
1307       default:
1308         MOZ_ASSERT(key == JSGC_NUMBER);
1309         return uint32_t(number);
1310     }
1311 }
1312 
1313 void
1314 GCRuntime::setMarkStackLimit(size_t limit, AutoLockGC& lock)
1315 {
1316     MOZ_ASSERT(!rt->isHeapBusy());
1317     AutoUnlockGC unlock(lock);
1318     AutoStopVerifyingBarriers pauseVerification(rt, false);
1319     marker.setMaxCapacity(limit);
1320 }
1321 
1322 bool
1323 GCRuntime::addBlackRootsTracer(JSTraceDataOp traceOp, void* data)
1324 {
1325     AssertHeapIsIdle(rt);
1326     return !!blackRootTracers.append(Callback<JSTraceDataOp>(traceOp, data));
1327 }
1328 
1329 void
1330 GCRuntime::removeBlackRootsTracer(JSTraceDataOp traceOp, void* data)
1331 {
1332     // Can be called from finalizers
1333     for (size_t i = 0; i < blackRootTracers.length(); i++) {
1334         Callback<JSTraceDataOp>* e = &blackRootTracers[i];
1335         if (e->op == traceOp && e->data == data) {
1336             blackRootTracers.erase(e);
1337         }
1338     }
1339 }
1340 
1341 void
1342 GCRuntime::setGrayRootsTracer(JSTraceDataOp traceOp, void* data)
1343 {
1344     AssertHeapIsIdle(rt);
1345     grayRootTracer.op = traceOp;
1346     grayRootTracer.data = data;
1347 }
1348 
1349 void
1350 GCRuntime::setGCCallback(JSGCCallback callback, void* data)
1351 {
1352     gcCallback.op = callback;
1353     gcCallback.data = data;
1354 }
1355 
1356 void
1357 GCRuntime::callGCCallback(JSGCStatus status) const
1358 {
1359     if (gcCallback.op)
1360         gcCallback.op(rt->contextFromMainThread(), status, gcCallback.data);
1361 }
1362 
1363 void
1364 GCRuntime::setObjectsTenuredCallback(JSObjectsTenuredCallback callback,
1365                                      void* data)
1366 {
1367     tenuredCallback.op = callback;
1368     tenuredCallback.data = data;
1369 }
1370 
1371 void
1372 GCRuntime::callObjectsTenuredCallback()
1373 {
1374     if (tenuredCallback.op)
1375         tenuredCallback.op(rt->contextFromMainThread(), tenuredCallback.data);
1376 }
1377 
1378 namespace {
1379 
1380 class AutoNotifyGCActivity {
1381   public:
1382     explicit AutoNotifyGCActivity(GCRuntime& gc) : gc_(gc) {
1383         if (!gc_.isIncrementalGCInProgress()) {
1384             gcstats::AutoPhase ap(gc_.stats, gcstats::PHASE_GC_BEGIN);
1385             gc_.callGCCallback(JSGC_BEGIN);
1386         }
1387     }
1388     ~AutoNotifyGCActivity() {
1389         if (!gc_.isIncrementalGCInProgress()) {
1390             gcstats::AutoPhase ap(gc_.stats, gcstats::PHASE_GC_END);
1391             gc_.callGCCallback(JSGC_END);
1392         }
1393     }
1394 
1395   private:
1396     GCRuntime& gc_;
1397 };
1398 
1399 } // (anon)
1400 
1401 bool
1402 GCRuntime::addFinalizeCallback(JSFinalizeCallback callback, void* data)
1403 {
1404     return finalizeCallbacks.append(Callback<JSFinalizeCallback>(callback, data));
1405 }
1406 
1407 void
1408 GCRuntime::removeFinalizeCallback(JSFinalizeCallback callback)
1409 {
1410     for (Callback<JSFinalizeCallback>* p = finalizeCallbacks.begin();
1411          p < finalizeCallbacks.end(); p++)
1412     {
1413         if (p->op == callback) {
1414             finalizeCallbacks.erase(p);
1415             break;
1416         }
1417     }
1418 }
1419 
1420 void
1421 GCRuntime::callFinalizeCallbacks(FreeOp* fop, JSFinalizeStatus status) const
1422 {
1423     for (auto& p : finalizeCallbacks)
1424         p.op(fop, status, !isFull, p.data);
1425 }
1426 
1427 bool
1428 GCRuntime::addWeakPointerZoneGroupCallback(JSWeakPointerZoneGroupCallback callback, void* data)
1429 {
1430     return updateWeakPointerZoneGroupCallbacks.append(
1431             Callback<JSWeakPointerZoneGroupCallback>(callback, data));
1432 }
1433 
1434 void
1435 GCRuntime::removeWeakPointerZoneGroupCallback(JSWeakPointerZoneGroupCallback callback)
1436 {
1437     for (auto& p : updateWeakPointerZoneGroupCallbacks) {
1438         if (p.op == callback) {
1439             updateWeakPointerZoneGroupCallbacks.erase(&p);
1440             break;
1441         }
1442     }
1443 }
1444 
1445 void
1446 GCRuntime::callWeakPointerZoneGroupCallbacks() const
1447 {
1448     for (auto const& p : updateWeakPointerZoneGroupCallbacks)
1449         p.op(rt->contextFromMainThread(), p.data);
1450 }
1451 
1452 bool
1453 GCRuntime::addWeakPointerCompartmentCallback(JSWeakPointerCompartmentCallback callback, void* data)
1454 {
1455     return updateWeakPointerCompartmentCallbacks.append(
1456             Callback<JSWeakPointerCompartmentCallback>(callback, data));
1457 }
1458 
1459 void
1460 GCRuntime::removeWeakPointerCompartmentCallback(JSWeakPointerCompartmentCallback callback)
1461 {
1462     for (auto& p : updateWeakPointerCompartmentCallbacks) {
1463         if (p.op == callback) {
1464             updateWeakPointerCompartmentCallbacks.erase(&p);
1465             break;
1466         }
1467     }
1468 }
1469 
1470 void
1471 GCRuntime::callWeakPointerCompartmentCallbacks(JSCompartment* comp) const
1472 {
1473     for (auto const& p : updateWeakPointerCompartmentCallbacks)
1474         p.op(rt->contextFromMainThread(), comp, p.data);
1475 }
1476 
1477 JS::GCSliceCallback
1478 GCRuntime::setSliceCallback(JS::GCSliceCallback callback) {
1479     return stats.setSliceCallback(callback);
1480 }
1481 
1482 JS::GCNurseryCollectionCallback
1483 GCRuntime::setNurseryCollectionCallback(JS::GCNurseryCollectionCallback callback) {
1484     return stats.setNurseryCollectionCallback(callback);
1485 }
1486 
1487 JS::DoCycleCollectionCallback
1488 GCRuntime::setDoCycleCollectionCallback(JS::DoCycleCollectionCallback callback)
1489 {
1490     auto prior = gcDoCycleCollectionCallback;
1491     gcDoCycleCollectionCallback = Callback<JS::DoCycleCollectionCallback>(callback, nullptr);
1492     return prior.op;
1493 }
1494 
1495 void
1496 GCRuntime::callDoCycleCollectionCallback(JSContext* cx)
1497 {
1498     if (gcDoCycleCollectionCallback.op)
1499         gcDoCycleCollectionCallback.op(cx);
1500 }
1501 
1502 bool
1503 GCRuntime::addRoot(Value* vp, const char* name)
1504 {
1505     /*
1506      * Sometimes Firefox will hold weak references to objects and then convert
1507      * them to strong references by calling AddRoot (e.g., via PreserveWrapper,
1508      * or ModifyBusyCount in workers). We need a read barrier to cover these
1509      * cases.
1510      */
1511     if (isIncrementalGCInProgress())
1512         GCPtrValue::writeBarrierPre(*vp);
1513 
1514     return rootsHash.put(vp, name);
1515 }
1516 
1517 void
1518 GCRuntime::removeRoot(Value* vp)
1519 {
1520     rootsHash.remove(vp);
1521     poke();
1522 }
1523 
1524 extern JS_FRIEND_API(bool)
1525 js::AddRawValueRoot(JSContext* cx, Value* vp, const char* name)
1526 {
1527     MOZ_ASSERT(vp);
1528     MOZ_ASSERT(name);
1529     bool ok = cx->runtime()->gc.addRoot(vp, name);
1530     if (!ok)
1531         JS_ReportOutOfMemory(cx);
1532     return ok;
1533 }
1534 
1535 extern JS_FRIEND_API(void)
1536 js::RemoveRawValueRoot(JSContext* cx, Value* vp)
1537 {
1538     cx->runtime()->gc.removeRoot(vp);
1539 }
1540 
1541 void
1542 GCRuntime::setMaxMallocBytes(size_t value)
1543 {
1544     /*
1545      * For compatibility treat any value that exceeds PTRDIFF_T_MAX to
1546      * mean that value.
1547      */
1548     maxMallocBytes = (ptrdiff_t(value) >= 0) ? value : size_t(-1) >> 1;
1549     resetMallocBytes();
1550     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next())
1551         zone->setGCMaxMallocBytes(value);
1552 }
1553 
1554 void
1555 GCRuntime::resetMallocBytes()
1556 {
1557     mallocBytesUntilGC = ptrdiff_t(maxMallocBytes);
1558     mallocGCTriggered = false;
1559 }
1560 
1561 void
1562 GCRuntime::updateMallocCounter(JS::Zone* zone, size_t nbytes)
1563 {
1564     mallocBytesUntilGC -= ptrdiff_t(nbytes);
1565     if (MOZ_UNLIKELY(isTooMuchMalloc()))
1566         onTooMuchMalloc();
1567     else if (zone)
1568         zone->updateMallocCounter(nbytes);
1569 }
1570 
1571 void
1572 GCRuntime::onTooMuchMalloc()
1573 {
1574     if (!mallocGCTriggered)
1575         mallocGCTriggered = triggerGC(JS::gcreason::TOO_MUCH_MALLOC);
1576 }
1577 
1578 double
1579 ZoneHeapThreshold::allocTrigger(bool highFrequencyGC) const
1580 {
1581     return (highFrequencyGC ? 0.85 : 0.9) * gcTriggerBytes();
1582 }
1583 
1584 /* static */ double
1585 ZoneHeapThreshold::computeZoneHeapGrowthFactorForHeapSize(size_t lastBytes,
1586                                                           const GCSchedulingTunables& tunables,
1587                                                           const GCSchedulingState& state)
1588 {
1589     if (!tunables.isDynamicHeapGrowthEnabled())
1590         return 3.0;
1591 
1592     // For small zones, our collection heuristics do not matter much: favor
1593     // something simple in this case.
1594     if (lastBytes < 1 * 1024 * 1024)
1595         return tunables.lowFrequencyHeapGrowth();
1596 
1597     // If GC's are not triggering in rapid succession, use a lower threshold so
1598     // that we will collect garbage sooner.
1599     if (!state.inHighFrequencyGCMode())
1600         return tunables.lowFrequencyHeapGrowth();
1601 
1602     // The heap growth factor depends on the heap size after a GC and the GC
1603     // frequency. For low frequency GCs (more than 1sec between GCs) we let
1604     // the heap grow to 150%. For high frequency GCs we let the heap grow
1605     // depending on the heap size:
1606     //   lastBytes < highFrequencyLowLimit: 300%
1607     //   lastBytes > highFrequencyHighLimit: 150%
1608     //   otherwise: linear interpolation between 300% and 150% based on lastBytes
1609 
1610     // Use shorter names to make the operation comprehensible.
1611     double minRatio = tunables.highFrequencyHeapGrowthMin();
1612     double maxRatio = tunables.highFrequencyHeapGrowthMax();
1613     double lowLimit = tunables.highFrequencyLowLimitBytes();
1614     double highLimit = tunables.highFrequencyHighLimitBytes();
1615 
1616     if (lastBytes <= lowLimit)
1617         return maxRatio;
1618 
1619     if (lastBytes >= highLimit)
1620         return minRatio;
1621 
1622     double factor = maxRatio - ((maxRatio - minRatio) * ((lastBytes - lowLimit) /
1623                                                          (highLimit - lowLimit)));
1624     MOZ_ASSERT(factor >= minRatio);
1625     MOZ_ASSERT(factor <= maxRatio);
1626     return factor;
1627 }
1628 
1629 /* static */ size_t
1630 ZoneHeapThreshold::computeZoneTriggerBytes(double growthFactor, size_t lastBytes,
1631                                            JSGCInvocationKind gckind,
1632                                            const GCSchedulingTunables& tunables,
1633                                            const AutoLockGC& lock)
1634 {
1635     size_t base = gckind == GC_SHRINK
1636                 ? Max(lastBytes, tunables.minEmptyChunkCount(lock) * ChunkSize)
1637                 : Max(lastBytes, tunables.gcZoneAllocThresholdBase());
1638     double trigger = double(base) * growthFactor;
1639     return size_t(Min(double(tunables.gcMaxBytes()), trigger));
1640 }
1641 
1642 void
1643 ZoneHeapThreshold::updateAfterGC(size_t lastBytes, JSGCInvocationKind gckind,
1644                                  const GCSchedulingTunables& tunables,
1645                                  const GCSchedulingState& state, const AutoLockGC& lock)
1646 {
1647     gcHeapGrowthFactor_ = computeZoneHeapGrowthFactorForHeapSize(lastBytes, tunables, state);
1648     gcTriggerBytes_ = computeZoneTriggerBytes(gcHeapGrowthFactor_, lastBytes, gckind, tunables,
1649                                               lock);
1650 }
1651 
1652 void
1653 ZoneHeapThreshold::updateForRemovedArena(const GCSchedulingTunables& tunables)
1654 {
1655     size_t amount = ArenaSize * gcHeapGrowthFactor_;
1656     MOZ_ASSERT(amount > 0);
1657 
1658     if ((gcTriggerBytes_ < amount) ||
1659         (gcTriggerBytes_ - amount < tunables.gcZoneAllocThresholdBase() * gcHeapGrowthFactor_))
1660     {
1661         return;
1662     }
1663 
1664     gcTriggerBytes_ -= amount;
1665 }
1666 
1667 void
1668 GCMarker::delayMarkingArena(Arena* arena)
1669 {
1670     if (arena->hasDelayedMarking) {
1671         /* Arena already scheduled to be marked later */
1672         return;
1673     }
1674     arena->setNextDelayedMarking(unmarkedArenaStackTop);
1675     unmarkedArenaStackTop = arena;
1676 #ifdef DEBUG
1677     markLaterArenas++;
1678 #endif
1679 }
1680 
1681 void
1682 GCMarker::delayMarkingChildren(const void* thing)
1683 {
1684     const TenuredCell* cell = TenuredCell::fromPointer(thing);
1685     cell->arena()->markOverflow = 1;
1686     delayMarkingArena(cell->arena());
1687 }
1688 
1689 inline void
1690 ArenaLists::prepareForIncrementalGC(JSRuntime* rt)
1691 {
1692     for (auto i : AllAllocKinds()) {
1693         FreeSpan* span = freeLists[i];
1694         if (span != &placeholder) {
1695             if (!span->isEmpty()) {
1696                 Arena* arena = span->getArena();
1697                 arena->allocatedDuringIncremental = true;
1698                 rt->gc.marker.delayMarkingArena(arena);
1699             } else {
1700                 freeLists[i] = &placeholder;
1701             }
1702         }
1703     }
1704 }
1705 
1706 /* Compacting GC */
1707 
1708 bool
1709 GCRuntime::shouldCompact()
1710 {
1711     // Compact on shrinking GC if enabled, but skip compacting in incremental
1712     // GCs if we are currently animating.
1713     return invocationKind == GC_SHRINK && isCompactingGCEnabled() &&
1714         (!isIncremental || rt->lastAnimationTime + PRMJ_USEC_PER_SEC < PRMJ_Now());
1715 }
1716 
1717 void
1718 GCRuntime::disableCompactingGC()
1719 {
1720     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
1721     ++compactingDisabledCount;
1722 }
1723 
1724 void
1725 GCRuntime::enableCompactingGC()
1726 {
1727     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
1728     MOZ_ASSERT(compactingDisabledCount > 0);
1729     --compactingDisabledCount;
1730 }
1731 
1732 bool
1733 GCRuntime::isCompactingGCEnabled() const
1734 {
1735     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
1736     return compactingEnabled && compactingDisabledCount == 0;
1737 }
1738 
1739 AutoDisableCompactingGC::AutoDisableCompactingGC(JSContext* cx)
1740   : gc(cx->gc)
1741 {
1742     gc.disableCompactingGC();
1743     if (gc.isIncrementalGCInProgress() && gc.isCompactingGc())
1744         FinishGC(cx);
1745 }
1746 
1747 AutoDisableCompactingGC::~AutoDisableCompactingGC()
1748 {
1749     gc.enableCompactingGC();
1750 }
1751 
1752 static bool
1753 CanRelocateZone(Zone* zone)
1754 {
1755     return !zone->isAtomsZone() && !zone->isSelfHostingZone();
1756 }
1757 
1758 static const AllocKind AllocKindsToRelocate[] = {
1759     AllocKind::FUNCTION,
1760     AllocKind::FUNCTION_EXTENDED,
1761     AllocKind::OBJECT0,
1762     AllocKind::OBJECT0_BACKGROUND,
1763     AllocKind::OBJECT2,
1764     AllocKind::OBJECT2_BACKGROUND,
1765     AllocKind::OBJECT4,
1766     AllocKind::OBJECT4_BACKGROUND,
1767     AllocKind::OBJECT8,
1768     AllocKind::OBJECT8_BACKGROUND,
1769     AllocKind::OBJECT12,
1770     AllocKind::OBJECT12_BACKGROUND,
1771     AllocKind::OBJECT16,
1772     AllocKind::OBJECT16_BACKGROUND,
1773     AllocKind::SCRIPT,
1774     AllocKind::LAZY_SCRIPT,
1775     AllocKind::SCOPE,
1776     AllocKind::SHAPE,
1777     AllocKind::ACCESSOR_SHAPE,
1778     AllocKind::BASE_SHAPE,
1779     AllocKind::FAT_INLINE_STRING,
1780     AllocKind::STRING,
1781     AllocKind::EXTERNAL_STRING,
1782     AllocKind::FAT_INLINE_ATOM,
1783     AllocKind::ATOM
1784 };
1785 
1786 Arena*
1787 ArenaList::removeRemainingArenas(Arena** arenap)
1788 {
1789     // This is only ever called to remove arenas that are after the cursor, so
1790     // we don't need to update it.
1791 #ifdef DEBUG
1792     for (Arena* arena = *arenap; arena; arena = arena->next)
1793         MOZ_ASSERT(cursorp_ != &arena->next);
1794 #endif
1795     Arena* remainingArenas = *arenap;
1796     *arenap = nullptr;
1797     check();
1798     return remainingArenas;
1799 }
1800 
1801 static bool
1802 ShouldRelocateAllArenas(JS::gcreason::Reason reason)
1803 {
1804     return reason == JS::gcreason::DEBUG_GC;
1805 }
1806 
1807 /*
1808  * Choose which arenas to relocate all cells from. Return an arena cursor that
1809  * can be passed to removeRemainingArenas().
1810  */
1811 Arena**
1812 ArenaList::pickArenasToRelocate(size_t& arenaTotalOut, size_t& relocTotalOut)
1813 {
1814     // Relocate the greatest number of arenas such that the number of used cells
1815     // in relocated arenas is less than or equal to the number of free cells in
1816     // unrelocated arenas. In other words we only relocate cells we can move
1817     // into existing arenas, and we choose the least full areans to relocate.
1818     //
1819     // This is made easier by the fact that the arena list has been sorted in
1820     // descending order of number of used cells, so we will always relocate a
1821     // tail of the arena list. All we need to do is find the point at which to
1822     // start relocating.
1823 
1824     check();
1825 
1826     if (isCursorAtEnd())
1827         return nullptr;
1828 
1829     Arena** arenap = cursorp_;     // Next arena to consider for relocation.
1830     size_t previousFreeCells = 0;  // Count of free cells before arenap.
1831     size_t followingUsedCells = 0; // Count of used cells after arenap.
1832     size_t fullArenaCount = 0;     // Number of full arenas (not relocated).
1833     size_t nonFullArenaCount = 0;  // Number of non-full arenas (considered for relocation).
1834     size_t arenaIndex = 0;         // Index of the next arena to consider.
1835 
1836     for (Arena* arena = head_; arena != *cursorp_; arena = arena->next)
1837         fullArenaCount++;
1838 
1839     for (Arena* arena = *cursorp_; arena; arena = arena->next) {
1840         followingUsedCells += arena->countUsedCells();
1841         nonFullArenaCount++;
1842     }
1843 
1844     mozilla::DebugOnly<size_t> lastFreeCells(0);
1845     size_t cellsPerArena = Arena::thingsPerArena((*arenap)->getAllocKind());
1846 
1847     while (*arenap) {
1848         Arena* arena = *arenap;
1849         if (followingUsedCells <= previousFreeCells)
1850             break;
1851 
1852         size_t freeCells = arena->countFreeCells();
1853         size_t usedCells = cellsPerArena - freeCells;
1854         followingUsedCells -= usedCells;
1855 #ifdef DEBUG
1856         MOZ_ASSERT(freeCells >= lastFreeCells);
1857         lastFreeCells = freeCells;
1858 #endif
1859         previousFreeCells += freeCells;
1860         arenap = &arena->next;
1861         arenaIndex++;
1862     }
1863 
1864     size_t relocCount = nonFullArenaCount - arenaIndex;
1865     MOZ_ASSERT(relocCount < nonFullArenaCount);
1866     MOZ_ASSERT((relocCount == 0) == (!*arenap));
1867     arenaTotalOut += fullArenaCount + nonFullArenaCount;
1868     relocTotalOut += relocCount;
1869 
1870     return arenap;
1871 }
1872 
1873 #ifdef DEBUG
1874 inline bool
1875 PtrIsInRange(const void* ptr, const void* start, size_t length)
1876 {
1877     return uintptr_t(ptr) - uintptr_t(start) < length;
1878 }
1879 #endif
1880 
1881 static TenuredCell*
1882 AllocRelocatedCell(Zone* zone, AllocKind thingKind, size_t thingSize)
1883 {
1884     AutoEnterOOMUnsafeRegion oomUnsafe;
1885     void* dstAlloc = zone->arenas.allocateFromFreeList(thingKind, thingSize);
1886     if (!dstAlloc)
1887         dstAlloc = GCRuntime::refillFreeListInGC(zone, thingKind);
1888     if (!dstAlloc) {
1889         // This can only happen in zeal mode or debug builds as we don't
1890         // otherwise relocate more cells than we have existing free space
1891         // for.
1892         oomUnsafe.crash("Could not allocate new arena while compacting");
1893     }
1894     return TenuredCell::fromPointer(dstAlloc);
1895 }
1896 
1897 static void
1898 RelocateCell(Zone* zone, TenuredCell* src, AllocKind thingKind, size_t thingSize)
1899 {
1900     JS::AutoSuppressGCAnalysis nogc(zone->contextFromMainThread());
1901 
1902     // Allocate a new cell.
1903     MOZ_ASSERT(zone == src->zone());
1904     TenuredCell* dst = AllocRelocatedCell(zone, thingKind, thingSize);
1905 
1906     // Copy source cell contents to destination.
1907     memcpy(dst, src, thingSize);
1908 
1909     // Move any uid attached to the object.
1910     src->zone()->transferUniqueId(dst, src);
1911 
1912     if (IsObjectAllocKind(thingKind)) {
1913         JSObject* srcObj = static_cast<JSObject*>(static_cast<Cell*>(src));
1914         JSObject* dstObj = static_cast<JSObject*>(static_cast<Cell*>(dst));
1915 
1916         if (srcObj->isNative()) {
1917             NativeObject* srcNative = &srcObj->as<NativeObject>();
1918             NativeObject* dstNative = &dstObj->as<NativeObject>();
1919 
1920             // Fixup the pointer to inline object elements if necessary.
1921             if (srcNative->hasFixedElements())
1922                 dstNative->setFixedElements();
1923 
1924             // For copy-on-write objects that own their elements, fix up the
1925             // owner pointer to point to the relocated object.
1926             if (srcNative->denseElementsAreCopyOnWrite()) {
1927                 GCPtrNativeObject& owner = dstNative->getElementsHeader()->ownerObject();
1928                 if (owner == srcNative)
1929                     owner = dstNative;
1930             }
1931         }
1932 
1933         // Call object moved hook if present.
1934         if (JSObjectMovedOp op = srcObj->getClass()->extObjectMovedOp())
1935             op(dstObj, srcObj);
1936 
1937         MOZ_ASSERT_IF(dstObj->isNative(),
1938                       !PtrIsInRange((const Value*)dstObj->as<NativeObject>().getDenseElements(),
1939                                     src, thingSize));
1940     }
1941 
1942     // Copy the mark bits.
1943     dst->copyMarkBitsFrom(src);
1944 
1945     // Mark source cell as forwarded and leave a pointer to the destination.
1946     RelocationOverlay* overlay = RelocationOverlay::fromCell(src);
1947     overlay->forwardTo(dst);
1948 }
1949 
1950 static void
1951 RelocateArena(Arena* arena, SliceBudget& sliceBudget)
1952 {
1953     MOZ_ASSERT(arena->allocated());
1954     MOZ_ASSERT(!arena->hasDelayedMarking);
1955     MOZ_ASSERT(!arena->markOverflow);
1956     MOZ_ASSERT(!arena->allocatedDuringIncremental);
1957     MOZ_ASSERT(arena->bufferedCells->isEmpty());
1958 
1959     Zone* zone = arena->zone;
1960 
1961     AllocKind thingKind = arena->getAllocKind();
1962     size_t thingSize = arena->getThingSize();
1963 
1964     for (ArenaCellIterUnderGC i(arena); !i.done(); i.next()) {
1965         RelocateCell(zone, i.getCell(), thingKind, thingSize);
1966         sliceBudget.step();
1967     }
1968 
1969 #ifdef DEBUG
1970     for (ArenaCellIterUnderGC i(arena); !i.done(); i.next()) {
1971         TenuredCell* src = i.getCell();
1972         MOZ_ASSERT(RelocationOverlay::isCellForwarded(src));
1973         TenuredCell* dest = Forwarded(src);
1974         MOZ_ASSERT(src->isMarked(BLACK) == dest->isMarked(BLACK));
1975         MOZ_ASSERT(src->isMarked(GRAY) == dest->isMarked(GRAY));
1976     }
1977 #endif
1978 }
1979 
1980 static inline bool
1981 ShouldProtectRelocatedArenas(JS::gcreason::Reason reason)
1982 {
1983     // For zeal mode collections we don't release the relocated arenas
1984     // immediately. Instead we protect them and keep them around until the next
1985     // collection so we can catch any stray accesses to them.
1986 #ifdef DEBUG
1987     return reason == JS::gcreason::DEBUG_GC;
1988 #else
1989     return false;
1990 #endif
1991 }
1992 
1993 /*
1994  * Relocate all arenas identified by pickArenasToRelocate: for each arena,
1995  * relocate each cell within it, then add it to a list of relocated arenas.
1996  */
1997 Arena*
1998 ArenaList::relocateArenas(Arena* toRelocate, Arena* relocated, SliceBudget& sliceBudget,
1999                           gcstats::Statistics& stats)
2000 {
2001     check();
2002 
2003     while (Arena* arena = toRelocate) {
2004         toRelocate = arena->next;
2005         RelocateArena(arena, sliceBudget);
2006         // Prepend to list of relocated arenas
2007         arena->next = relocated;
2008         relocated = arena;
2009         stats.count(gcstats::STAT_ARENA_RELOCATED);
2010     }
2011 
2012     check();
2013 
2014     return relocated;
2015 }
2016 
2017 // Skip compacting zones unless we can free a certain proportion of their GC
2018 // heap memory.
2019 static const double MIN_ZONE_RECLAIM_PERCENT = 2.0;
2020 
2021 static bool
2022 ShouldRelocateZone(size_t arenaCount, size_t relocCount, JS::gcreason::Reason reason)
2023 {
2024     if (relocCount == 0)
2025         return false;
2026 
2027     if (IsOOMReason(reason))
2028         return true;
2029 
2030     return (relocCount * 100.0) / arenaCount >= MIN_ZONE_RECLAIM_PERCENT;
2031 }
2032 
2033 bool
2034 ArenaLists::relocateArenas(Zone* zone, Arena*& relocatedListOut, JS::gcreason::Reason reason,
2035                            SliceBudget& sliceBudget, gcstats::Statistics& stats)
2036 {
2037     // This is only called from the main thread while we are doing a GC, so
2038     // there is no need to lock.
2039     MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
2040     MOZ_ASSERT(runtime_->gc.isHeapCompacting());
2041     MOZ_ASSERT(!runtime_->gc.isBackgroundSweeping());
2042 
2043     // Clear all the free lists.
2044     purge();
2045 
2046     if (ShouldRelocateAllArenas(reason)) {
2047         zone->prepareForCompacting();
2048         for (auto kind : AllocKindsToRelocate) {
2049             ArenaList& al = arenaLists[kind];
2050             Arena* allArenas = al.head();
2051             al.clear();
2052             relocatedListOut = al.relocateArenas(allArenas, relocatedListOut, sliceBudget, stats);
2053         }
2054     } else {
2055         size_t arenaCount = 0;
2056         size_t relocCount = 0;
2057         AllAllocKindArray<Arena**> toRelocate;
2058 
2059         for (auto kind : AllocKindsToRelocate)
2060             toRelocate[kind] = arenaLists[kind].pickArenasToRelocate(arenaCount, relocCount);
2061 
2062         if (!ShouldRelocateZone(arenaCount, relocCount, reason))
2063             return false;
2064 
2065         zone->prepareForCompacting();
2066         for (auto kind : AllocKindsToRelocate) {
2067             if (toRelocate[kind]) {
2068                 ArenaList& al = arenaLists[kind];
2069                 Arena* arenas = al.removeRemainingArenas(toRelocate[kind]);
2070                 relocatedListOut = al.relocateArenas(arenas, relocatedListOut, sliceBudget, stats);
2071             }
2072         }
2073     }
2074 
2075     return true;
2076 }
2077 
2078 bool
2079 GCRuntime::relocateArenas(Zone* zone, JS::gcreason::Reason reason, Arena*& relocatedListOut,
2080                           SliceBudget& sliceBudget)
2081 {
2082     gcstats::AutoPhase ap(stats, gcstats::PHASE_COMPACT_MOVE);
2083 
2084     MOZ_ASSERT(!zone->isPreservingCode());
2085     MOZ_ASSERT(CanRelocateZone(zone));
2086 
2087     js::CancelOffThreadIonCompile(rt, JS::Zone::Compact);
2088 
2089     if (!zone->arenas.relocateArenas(zone, relocatedListOut, reason, sliceBudget, stats))
2090         return false;
2091 
2092 #ifdef DEBUG
2093     // Check that we did as much compaction as we should have. There
2094     // should always be less than one arena's worth of free cells.
2095     for (auto i : AllocKindsToRelocate) {
2096         ArenaList& al = zone->arenas.arenaLists[i];
2097         size_t freeCells = 0;
2098         for (Arena* arena = al.arenaAfterCursor(); arena; arena = arena->next)
2099             freeCells += arena->countFreeCells();
2100         MOZ_ASSERT(freeCells < Arena::thingsPerArena(i));
2101     }
2102 #endif
2103 
2104     return true;
2105 }
2106 
2107 void
2108 MovingTracer::onObjectEdge(JSObject** objp)
2109 {
2110     JSObject* obj = *objp;
2111     if (obj->runtimeFromAnyThread() == runtime() && IsForwarded(obj))
2112         *objp = Forwarded(obj);
2113 }
2114 
2115 void
2116 MovingTracer::onShapeEdge(Shape** shapep)
2117 {
2118     Shape* shape = *shapep;
2119     if (shape->runtimeFromAnyThread() == runtime() && IsForwarded(shape))
2120         *shapep = Forwarded(shape);
2121 }
2122 
2123 void
2124 MovingTracer::onStringEdge(JSString** stringp)
2125 {
2126     JSString* string = *stringp;
2127     if (string->runtimeFromAnyThread() == runtime() && IsForwarded(string))
2128         *stringp = Forwarded(string);
2129 }
2130 
2131 void
2132 MovingTracer::onScriptEdge(JSScript** scriptp)
2133 {
2134     JSScript* script = *scriptp;
2135     if (script->runtimeFromAnyThread() == runtime() && IsForwarded(script))
2136         *scriptp = Forwarded(script);
2137 }
2138 
2139 void
2140 MovingTracer::onLazyScriptEdge(LazyScript** lazyp)
2141 {
2142     LazyScript* lazy = *lazyp;
2143     if (lazy->runtimeFromAnyThread() == runtime() && IsForwarded(lazy))
2144         *lazyp = Forwarded(lazy);
2145 }
2146 
2147 void
2148 MovingTracer::onBaseShapeEdge(BaseShape** basep)
2149 {
2150     BaseShape* base = *basep;
2151     if (base->runtimeFromAnyThread() == runtime() && IsForwarded(base))
2152         *basep = Forwarded(base);
2153 }
2154 
2155 void
2156 MovingTracer::onScopeEdge(Scope** scopep)
2157 {
2158     Scope* scope = *scopep;
2159     if (scope->runtimeFromAnyThread() == runtime() && IsForwarded(scope))
2160         *scopep = Forwarded(scope);
2161 }
2162 
2163 void
2164 Zone::prepareForCompacting()
2165 {
2166     FreeOp* fop = runtimeFromMainThread()->defaultFreeOp();
2167     discardJitCode(fop);
2168 }
2169 
2170 void
2171 GCRuntime::sweepTypesAfterCompacting(Zone* zone)
2172 {
2173     FreeOp* fop = rt->defaultFreeOp();
2174     zone->beginSweepTypes(fop, rt->gc.releaseObservedTypes && !zone->isPreservingCode());
2175 
2176     AutoClearTypeInferenceStateOnOOM oom(zone);
2177 
2178     for (auto script = zone->cellIter<JSScript>(); !script.done(); script.next())
2179         script->maybeSweepTypes(&oom);
2180     for (auto group = zone->cellIter<ObjectGroup>(); !group.done(); group.next())
2181         group->maybeSweep(&oom);
2182 
2183     zone->types.endSweep(rt);
2184 }
2185 
2186 void
2187 GCRuntime::sweepZoneAfterCompacting(Zone* zone)
2188 {
2189     MOZ_ASSERT(zone->isCollecting());
2190     FreeOp* fop = rt->defaultFreeOp();
2191     sweepTypesAfterCompacting(zone);
2192     zone->sweepBreakpoints(fop);
2193     zone->sweepWeakMaps();
2194     for (auto* cache : zone->weakCaches_)
2195         cache->sweep();
2196 
2197     for (CompartmentsInZoneIter c(zone); !c.done(); c.next()) {
2198         c->objectGroups.sweep(fop);
2199         c->sweepRegExps();
2200         c->sweepSavedStacks();
2201         c->sweepGlobalObject(fop);
2202         c->sweepSelfHostingScriptSource();
2203         c->sweepDebugEnvironments();
2204         c->sweepJitCompartment(fop);
2205         c->sweepNativeIterators();
2206         c->sweepTemplateObjects();
2207     }
2208 }
2209 
2210 template <typename T>
2211 static inline void
2212 UpdateCellPointers(MovingTracer* trc, T* cell)
2213 {
2214     cell->fixupAfterMovingGC();
2215     cell->traceChildren(trc);
2216 }
2217 
2218 template <typename T>
2219 static void
2220 UpdateArenaPointersTyped(MovingTracer* trc, Arena* arena, JS::TraceKind traceKind)
2221 {
2222     for (ArenaCellIterUnderGC i(arena); !i.done(); i.next())
2223         UpdateCellPointers(trc, reinterpret_cast<T*>(i.getCell()));
2224 }
2225 
2226 /*
2227  * Update the internal pointers for all cells in an arena.
2228  */
2229 static void
2230 UpdateArenaPointers(MovingTracer* trc, Arena* arena)
2231 {
2232     AllocKind kind = arena->getAllocKind();
2233 
2234     switch (kind) {
2235 #define EXPAND_CASE(allocKind, traceKind, type, sizedType) \
2236       case AllocKind::allocKind: \
2237         UpdateArenaPointersTyped<type>(trc, arena, JS::TraceKind::traceKind); \
2238         return;
2239 FOR_EACH_ALLOCKIND(EXPAND_CASE)
2240 #undef EXPAND_CASE
2241 
2242       default:
2243         MOZ_CRASH("Invalid alloc kind for UpdateArenaPointers");
2244     }
2245 }
2246 
2247 namespace js {
2248 namespace gc {
2249 
2250 struct ArenaListSegment
2251 {
2252     Arena* begin;
2253     Arena* end;
2254 };
2255 
2256 struct ArenasToUpdate
2257 {
2258     ArenasToUpdate(Zone* zone, AllocKinds kinds);
2259     bool done() { return kind == AllocKind::LIMIT; }
2260     ArenaListSegment getArenasToUpdate(AutoLockHelperThreadState& lock, unsigned maxLength);
2261 
2262   private:
2263     AllocKinds kinds;  // Selects which thing kinds to update
2264     Zone* zone;        // Zone to process
2265     AllocKind kind;    // Current alloc kind to process
2266     Arena* arena;      // Next arena to process
2267 
2268     AllocKind nextAllocKind(AllocKind i) { return AllocKind(uint8_t(i) + 1); }
2269     bool shouldProcessKind(AllocKind kind);
2270     Arena* next(AutoLockHelperThreadState& lock);
2271 };
2272 
2273 ArenasToUpdate::ArenasToUpdate(Zone* zone, AllocKinds kinds)
2274   : kinds(kinds), zone(zone), kind(AllocKind::FIRST), arena(nullptr)
2275 {
2276     MOZ_ASSERT(zone->isGCCompacting());
2277 }
2278 
2279 Arena*
2280 ArenasToUpdate::next(AutoLockHelperThreadState& lock)
2281 {
2282     // Find the next arena to update.
2283     //
2284     // This iterates through the GC thing kinds filtered by shouldProcessKind(),
2285     // and then through thea arenas of that kind.  All state is held in the
2286     // object and we just return when we find an arena.
2287 
2288     for (; kind < AllocKind::LIMIT; kind = nextAllocKind(kind)) {
2289         if (kinds.contains(kind)) {
2290             if (!arena)
2291                 arena = zone->arenas.getFirstArena(kind);
2292             else
2293                 arena = arena->next;
2294             if (arena)
2295                 return arena;
2296         }
2297     }
2298 
2299     MOZ_ASSERT(!arena);
2300     MOZ_ASSERT(done());
2301     return nullptr;
2302 }
2303 
2304 ArenaListSegment
2305 ArenasToUpdate::getArenasToUpdate(AutoLockHelperThreadState& lock, unsigned maxLength)
2306 {
2307     Arena* begin = next(lock);
2308     if (!begin)
2309         return { nullptr, nullptr };
2310 
2311     Arena* last = begin;
2312     unsigned count = 1;
2313     while (last->next && count < maxLength) {
2314         last = last->next;
2315         count++;
2316     }
2317 
2318     arena = last;
2319     return { begin, last->next };
2320 }
2321 
2322 struct UpdatePointersTask : public GCParallelTask
2323 {
2324     // Maximum number of arenas to update in one block.
2325 #ifdef DEBUG
2326     static const unsigned MaxArenasToProcess = 16;
2327 #else
2328     static const unsigned MaxArenasToProcess = 256;
2329 #endif
2330 
2331     UpdatePointersTask(JSRuntime* rt, ArenasToUpdate* source, AutoLockHelperThreadState& lock)
2332       : rt_(rt), source_(source)
2333     {
2334         arenas_.begin = nullptr;
2335         arenas_.end = nullptr;
2336     }
2337 
2338     ~UpdatePointersTask() override { join(); }
2339 
2340   private:
2341     JSRuntime* rt_;
2342     ArenasToUpdate* source_;
2343     ArenaListSegment arenas_;
2344 
2345     virtual void run() override;
2346     bool getArenasToUpdate();
2347     void updateArenas();
2348 };
2349 
2350 bool
2351 UpdatePointersTask::getArenasToUpdate()
2352 {
2353     AutoLockHelperThreadState lock;
2354     arenas_ = source_->getArenasToUpdate(lock, MaxArenasToProcess);
2355     return arenas_.begin != nullptr;
2356 }
2357 
2358 void
2359 UpdatePointersTask::updateArenas()
2360 {
2361     MovingTracer trc(rt_);
2362     for (Arena* arena = arenas_.begin; arena != arenas_.end; arena = arena->next)
2363         UpdateArenaPointers(&trc, arena);
2364 }
2365 
2366 /* virtual */ void
2367 UpdatePointersTask::run()
2368 {
2369     while (getArenasToUpdate())
2370         updateArenas();
2371 }
2372 
2373 } // namespace gc
2374 } // namespace js
2375 
2376 static const size_t MinCellUpdateBackgroundTasks = 2;
2377 static const size_t MaxCellUpdateBackgroundTasks = 8;
2378 
2379 static size_t
2380 CellUpdateBackgroundTaskCount()
2381 {
2382     if (!CanUseExtraThreads())
2383         return 0;
2384 
2385     size_t targetTaskCount = HelperThreadState().cpuCount / 2;
2386     return Min(Max(targetTaskCount, MinCellUpdateBackgroundTasks), MaxCellUpdateBackgroundTasks);
2387 }
2388 
2389 static bool
2390 CanUpdateKindInBackground(AllocKind kind) {
2391     // We try to update as many GC things in parallel as we can, but there are
2392     // kinds for which this might not be safe:
2393     //  - we assume JSObjects that are foreground finalized are not safe to
2394     //    update in parallel
2395     //  - updating a shape touches child shapes in fixupShapeTreeAfterMovingGC()
2396     if (!js::gc::IsBackgroundFinalized(kind) || IsShapeAllocKind(kind))
2397         return false;
2398 
2399     return true;
2400 }
2401 
2402 static AllocKinds
2403 ForegroundUpdateKinds(AllocKinds kinds)
2404 {
2405     AllocKinds result;
2406     for (AllocKind kind : kinds) {
2407         if (!CanUpdateKindInBackground(kind))
2408             result += kind;
2409     }
2410     return result;
2411 }
2412 
2413 void
2414 GCRuntime::updateTypeDescrObjects(MovingTracer* trc, Zone* zone)
2415 {
2416     zone->typeDescrObjects.sweep();
2417     for (auto r = zone->typeDescrObjects.all(); !r.empty(); r.popFront())
2418         UpdateCellPointers(trc, r.front().get());
2419 }
2420 
2421 void
2422 GCRuntime::updateCellPointers(MovingTracer* trc, Zone* zone, AllocKinds kinds, size_t bgTaskCount)
2423 {
2424     AllocKinds fgKinds = bgTaskCount == 0 ? kinds : ForegroundUpdateKinds(kinds);
2425     AllocKinds bgKinds = kinds - fgKinds;
2426 
2427     ArenasToUpdate fgArenas(zone, fgKinds);
2428     ArenasToUpdate bgArenas(zone, bgKinds);
2429     Maybe<UpdatePointersTask> fgTask;
2430     Maybe<UpdatePointersTask> bgTasks[MaxCellUpdateBackgroundTasks];
2431 
2432     size_t tasksStarted = 0;
2433 
2434     {
2435         AutoLockHelperThreadState lock;
2436 
2437         fgTask.emplace(rt, &fgArenas, lock);
2438 
2439         for (size_t i = 0; i < bgTaskCount && !bgArenas.done(); i++) {
2440             bgTasks[i].emplace(rt, &bgArenas, lock);
2441             startTask(*bgTasks[i], gcstats::PHASE_COMPACT_UPDATE_CELLS, lock);
2442             tasksStarted = i;
2443         }
2444     }
2445 
2446     fgTask->runFromMainThread(rt);
2447 
2448     {
2449         AutoLockHelperThreadState lock;
2450 
2451         for (size_t i = 0; i < tasksStarted; i++)
2452             joinTask(*bgTasks[i], gcstats::PHASE_COMPACT_UPDATE_CELLS, lock);
2453     }
2454 }
2455 
2456 // After cells have been relocated any pointers to a cell's old locations must
2457 // be updated to point to the new location.  This happens by iterating through
2458 // all cells in heap and tracing their children (non-recursively) to update
2459 // them.
2460 //
2461 // This is complicated by the fact that updating a GC thing sometimes depends on
2462 // making use of other GC things.  After a moving GC these things may not be in
2463 // a valid state since they may contain pointers which have not been updated
2464 // yet.
2465 //
2466 // The main dependencies are:
2467 //
2468 //   - Updating a JSObject makes use of its shape
2469 //   - Updating a typed object makes use of its type descriptor object
2470 //
2471 // This means we require at least three phases for update:
2472 //
2473 //  1) shapes
2474 //  2) typed object type descriptor objects
2475 //  3) all other objects
2476 //
2477 // Since we want to minimize the number of phases, we put everything else into
2478 // the first phase and label it the 'misc' phase.
2479 
2480 static const AllocKinds UpdatePhaseMisc {
2481     AllocKind::SCRIPT,
2482     AllocKind::LAZY_SCRIPT,
2483     AllocKind::BASE_SHAPE,
2484     AllocKind::SHAPE,
2485     AllocKind::ACCESSOR_SHAPE,
2486     AllocKind::OBJECT_GROUP,
2487     AllocKind::STRING,
2488     AllocKind::JITCODE,
2489     AllocKind::SCOPE
2490 };
2491 
2492 static const AllocKinds UpdatePhaseObjects {
2493     AllocKind::FUNCTION,
2494     AllocKind::FUNCTION_EXTENDED,
2495     AllocKind::OBJECT0,
2496     AllocKind::OBJECT0_BACKGROUND,
2497     AllocKind::OBJECT2,
2498     AllocKind::OBJECT2_BACKGROUND,
2499     AllocKind::OBJECT4,
2500     AllocKind::OBJECT4_BACKGROUND,
2501     AllocKind::OBJECT8,
2502     AllocKind::OBJECT8_BACKGROUND,
2503     AllocKind::OBJECT12,
2504     AllocKind::OBJECT12_BACKGROUND,
2505     AllocKind::OBJECT16,
2506     AllocKind::OBJECT16_BACKGROUND
2507 };
2508 
2509 void
2510 GCRuntime::updateAllCellPointers(MovingTracer* trc, Zone* zone)
2511 {
2512     AutoDisableProxyCheck noProxyCheck(rt); // These checks assert when run in parallel.
2513 
2514     size_t bgTaskCount = CellUpdateBackgroundTaskCount();
2515 
2516     updateCellPointers(trc, zone, UpdatePhaseMisc, bgTaskCount);
2517 
2518     // Update TypeDescrs before all other objects as typed objects access these
2519     // objects when we trace them.
2520     updateTypeDescrObjects(trc, zone);
2521 
2522     updateCellPointers(trc, zone, UpdatePhaseObjects, bgTaskCount);
2523 }
2524 
2525 /*
2526  * Update pointers to relocated cells by doing a full heap traversal and sweep.
2527  *
2528  * The latter is necessary to update weak references which are not marked as
2529  * part of the traversal.
2530  */
2531 void
2532 GCRuntime::updatePointersToRelocatedCells(Zone* zone, AutoLockForExclusiveAccess& lock)
2533 {
2534     MOZ_ASSERT(!rt->isBeingDestroyed());
2535     MOZ_ASSERT(zone->isGCCompacting());
2536 
2537     gcstats::AutoPhase ap(stats, gcstats::PHASE_COMPACT_UPDATE);
2538     MovingTracer trc(rt);
2539 
2540     zone->fixupAfterMovingGC();
2541 
2542     // Fixup compartment global pointers as these get accessed during marking.
2543     for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
2544         comp->fixupAfterMovingGC();
2545     JSCompartment::fixupCrossCompartmentWrappersAfterMovingGC(&trc);
2546     rt->spsProfiler.fixupStringsMapAfterMovingGC();
2547 
2548     // Iterate through all cells that can contain relocatable pointers to update
2549     // them. Since updating each cell is independent we try to parallelize this
2550     // as much as possible.
2551     updateAllCellPointers(&trc, zone);
2552 
2553     // Mark roots to update them.
2554     {
2555         traceRuntimeForMajorGC(&trc, lock);
2556 
2557         gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_ROOTS);
2558         Debugger::markAll(&trc);
2559         Debugger::markIncomingCrossCompartmentEdges(&trc);
2560 
2561         WeakMapBase::markAll(zone, &trc);
2562         for (CompartmentsInZoneIter c(zone); !c.done(); c.next()) {
2563             c->trace(&trc);
2564             if (c->watchpointMap)
2565                 c->watchpointMap->markAll(&trc);
2566         }
2567 
2568         // Mark all gray roots, making sure we call the trace callback to get the
2569         // current set.
2570         if (JSTraceDataOp op = grayRootTracer.op)
2571             (*op)(&trc, grayRootTracer.data);
2572     }
2573 
2574     // Sweep everything to fix up weak pointers
2575     WatchpointMap::sweepAll(rt);
2576     Debugger::sweepAll(rt->defaultFreeOp());
2577     jit::JitRuntime::SweepJitcodeGlobalTable(rt);
2578     rt->gc.sweepZoneAfterCompacting(zone);
2579 
2580     // Type inference may put more blocks here to free.
2581     blocksToFreeAfterSweeping.freeAll();
2582 
2583     // Call callbacks to get the rest of the system to fixup other untraced pointers.
2584     callWeakPointerZoneGroupCallbacks();
2585     for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
2586         callWeakPointerCompartmentCallbacks(comp);
2587     if (rt->sweepZoneCallback)
2588         rt->sweepZoneCallback(zone);
2589 }
2590 
2591 void
2592 GCRuntime::protectAndHoldArenas(Arena* arenaList)
2593 {
2594     for (Arena* arena = arenaList; arena; ) {
2595         MOZ_ASSERT(arena->allocated());
2596         Arena* next = arena->next;
2597         if (!next) {
2598             // Prepend to hold list before we protect the memory.
2599             arena->next = relocatedArenasToRelease;
2600             relocatedArenasToRelease = arenaList;
2601         }
2602         ProtectPages(arena, ArenaSize);
2603         arena = next;
2604     }
2605 }
2606 
2607 void
2608 GCRuntime::unprotectHeldRelocatedArenas()
2609 {
2610     for (Arena* arena = relocatedArenasToRelease; arena; arena = arena->next) {
2611         UnprotectPages(arena, ArenaSize);
2612         MOZ_ASSERT(arena->allocated());
2613     }
2614 }
2615 
2616 void
2617 GCRuntime::releaseRelocatedArenas(Arena* arenaList)
2618 {
2619     AutoLockGC lock(rt);
2620     releaseRelocatedArenasWithoutUnlocking(arenaList, lock);
2621 }
2622 
2623 void
2624 GCRuntime::releaseRelocatedArenasWithoutUnlocking(Arena* arenaList, const AutoLockGC& lock)
2625 {
2626     // Release the relocated arenas, now containing only forwarding pointers
2627     unsigned count = 0;
2628     while (arenaList) {
2629         Arena* arena = arenaList;
2630         arenaList = arenaList->next;
2631 
2632         // Clear the mark bits
2633         arena->unmarkAll();
2634 
2635         // Mark arena as empty
2636         arena->setAsFullyUnused();
2637 
2638 #if defined(JS_CRASH_DIAGNOSTICS) || defined(JS_GC_ZEAL)
2639         JS_POISON(reinterpret_cast<void*>(arena->thingsStart()),
2640                   JS_MOVED_TENURED_PATTERN, arena->getThingsSpan());
2641 #endif
2642 
2643         releaseArena(arena, lock);
2644         ++count;
2645     }
2646 }
2647 
2648 // In debug mode we don't always release relocated arenas straight away.
2649 // Sometimes protect them instead and hold onto them until the next GC sweep
2650 // phase to catch any pointers to them that didn't get forwarded.
2651 
2652 void
2653 GCRuntime::releaseHeldRelocatedArenas()
2654 {
2655 #ifdef DEBUG
2656     unprotectHeldRelocatedArenas();
2657     Arena* arenas = relocatedArenasToRelease;
2658     relocatedArenasToRelease = nullptr;
2659     releaseRelocatedArenas(arenas);
2660 #endif
2661 }
2662 
2663 void
2664 GCRuntime::releaseHeldRelocatedArenasWithoutUnlocking(const AutoLockGC& lock)
2665 {
2666 #ifdef DEBUG
2667     unprotectHeldRelocatedArenas();
2668     releaseRelocatedArenasWithoutUnlocking(relocatedArenasToRelease, lock);
2669     relocatedArenasToRelease = nullptr;
2670 #endif
2671 }
2672 
2673 void
2674 ReleaseArenaList(JSRuntime* rt, Arena* arena, const AutoLockGC& lock)
2675 {
2676     Arena* next;
2677     for (; arena; arena = next) {
2678         next = arena->next;
2679         rt->gc.releaseArena(arena, lock);
2680     }
2681 }
2682 
2683 ArenaLists::~ArenaLists()
2684 {
2685     AutoLockGC lock(runtime_);
2686 
2687     for (auto i : AllAllocKinds()) {
2688         /*
2689          * We can only call this during the shutdown after the last GC when
2690          * the background finalization is disabled.
2691          */
2692         MOZ_ASSERT(backgroundFinalizeState[i] == BFS_DONE);
2693         ReleaseArenaList(runtime_, arenaLists[i].head(), lock);
2694     }
2695     ReleaseArenaList(runtime_, incrementalSweptArenas.head(), lock);
2696 
2697     for (auto i : ObjectAllocKinds())
2698         ReleaseArenaList(runtime_, savedObjectArenas[i].head(), lock);
2699     ReleaseArenaList(runtime_, savedEmptyObjectArenas, lock);
2700 }
2701 
2702 void
2703 ArenaLists::finalizeNow(FreeOp* fop, const FinalizePhase& phase)
2704 {
2705     gcstats::AutoPhase ap(fop->runtime()->gc.stats, phase.statsPhase);
2706     for (auto kind : phase.kinds)
2707         finalizeNow(fop, kind, RELEASE_ARENAS, nullptr);
2708 }
2709 
2710 void
2711 ArenaLists::finalizeNow(FreeOp* fop, AllocKind thingKind, KeepArenasEnum keepArenas, Arena** empty)
2712 {
2713     MOZ_ASSERT(!IsBackgroundFinalized(thingKind));
2714     forceFinalizeNow(fop, thingKind, keepArenas, empty);
2715 }
2716 
2717 void
2718 ArenaLists::forceFinalizeNow(FreeOp* fop, AllocKind thingKind,
2719                              KeepArenasEnum keepArenas, Arena** empty)
2720 {
2721     MOZ_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE);
2722 
2723     Arena* arenas = arenaLists[thingKind].head();
2724     if (!arenas)
2725         return;
2726     arenaLists[thingKind].clear();
2727 
2728     size_t thingsPerArena = Arena::thingsPerArena(thingKind);
2729     SortedArenaList finalizedSorted(thingsPerArena);
2730 
2731     auto unlimited = SliceBudget::unlimited();
2732     FinalizeArenas(fop, &arenas, finalizedSorted, thingKind, unlimited, keepArenas);
2733     MOZ_ASSERT(!arenas);
2734 
2735     if (empty) {
2736         MOZ_ASSERT(keepArenas == KEEP_ARENAS);
2737         finalizedSorted.extractEmpty(empty);
2738     }
2739 
2740     arenaLists[thingKind] = finalizedSorted.toArenaList();
2741 }
2742 
2743 void
2744 ArenaLists::queueForForegroundSweep(FreeOp* fop, const FinalizePhase& phase)
2745 {
2746     gcstats::AutoPhase ap(fop->runtime()->gc.stats, phase.statsPhase);
2747     for (auto kind : phase.kinds)
2748         queueForForegroundSweep(fop, kind);
2749 }
2750 
2751 void
2752 ArenaLists::queueForForegroundSweep(FreeOp* fop, AllocKind thingKind)
2753 {
2754     MOZ_ASSERT(!IsBackgroundFinalized(thingKind));
2755     MOZ_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE);
2756     MOZ_ASSERT(!arenaListsToSweep[thingKind]);
2757 
2758     arenaListsToSweep[thingKind] = arenaLists[thingKind].head();
2759     arenaLists[thingKind].clear();
2760 }
2761 
2762 void
2763 ArenaLists::queueForBackgroundSweep(FreeOp* fop, const FinalizePhase& phase)
2764 {
2765     gcstats::AutoPhase ap(fop->runtime()->gc.stats, phase.statsPhase);
2766     for (auto kind : phase.kinds)
2767         queueForBackgroundSweep(fop, kind);
2768 }
2769 
2770 inline void
2771 ArenaLists::queueForBackgroundSweep(FreeOp* fop, AllocKind thingKind)
2772 {
2773     MOZ_ASSERT(IsBackgroundFinalized(thingKind));
2774 
2775     ArenaList* al = &arenaLists[thingKind];
2776     if (al->isEmpty()) {
2777         MOZ_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE);
2778         return;
2779     }
2780 
2781     MOZ_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE);
2782 
2783     arenaListsToSweep[thingKind] = al->head();
2784     al->clear();
2785     backgroundFinalizeState[thingKind] = BFS_RUN;
2786 }
2787 
2788 /*static*/ void
2789 ArenaLists::backgroundFinalize(FreeOp* fop, Arena* listHead, Arena** empty)
2790 {
2791     MOZ_ASSERT(listHead);
2792     MOZ_ASSERT(empty);
2793 
2794     AllocKind thingKind = listHead->getAllocKind();
2795     Zone* zone = listHead->zone;
2796 
2797     size_t thingsPerArena = Arena::thingsPerArena(thingKind);
2798     SortedArenaList finalizedSorted(thingsPerArena);
2799 
2800     auto unlimited = SliceBudget::unlimited();
2801     FinalizeArenas(fop, &listHead, finalizedSorted, thingKind, unlimited, KEEP_ARENAS);
2802     MOZ_ASSERT(!listHead);
2803 
2804     finalizedSorted.extractEmpty(empty);
2805 
2806     // When arenas are queued for background finalization, all arenas are moved
2807     // to arenaListsToSweep[], leaving the arenaLists[] empty. However, new
2808     // arenas may be allocated before background finalization finishes; now that
2809     // finalization is complete, we want to merge these lists back together.
2810     ArenaLists* lists = &zone->arenas;
2811     ArenaList* al = &lists->arenaLists[thingKind];
2812 
2813     // Flatten |finalizedSorted| into a regular ArenaList.
2814     ArenaList finalized = finalizedSorted.toArenaList();
2815 
2816     // We must take the GC lock to be able to safely modify the ArenaList;
2817     // however, this does not by itself make the changes visible to all threads,
2818     // as not all threads take the GC lock to read the ArenaLists.
2819     // That safety is provided by the ReleaseAcquire memory ordering of the
2820     // background finalize state, which we explicitly set as the final step.
2821     {
2822         AutoLockGC lock(lists->runtime_);
2823         MOZ_ASSERT(lists->backgroundFinalizeState[thingKind] == BFS_RUN);
2824 
2825         // Join |al| and |finalized| into a single list.
2826         *al = finalized.insertListWithCursorAtEnd(*al);
2827 
2828         lists->arenaListsToSweep[thingKind] = nullptr;
2829     }
2830 
2831     lists->backgroundFinalizeState[thingKind] = BFS_DONE;
2832 }
2833 
2834 void
2835 ArenaLists::queueForegroundObjectsForSweep(FreeOp* fop)
2836 {
2837     gcstats::AutoPhase ap(fop->runtime()->gc.stats, gcstats::PHASE_SWEEP_OBJECT);
2838 
2839 #ifdef DEBUG
2840     for (auto i : ObjectAllocKinds())
2841         MOZ_ASSERT(savedObjectArenas[i].isEmpty());
2842     MOZ_ASSERT(savedEmptyObjectArenas == nullptr);
2843 #endif
2844 
2845     // Foreground finalized objects must be finalized at the beginning of the
2846     // sweep phase, before control can return to the mutator. Otherwise,
2847     // mutator behavior can resurrect certain objects whose references would
2848     // otherwise have been erased by the finalizer.
2849     finalizeNow(fop, AllocKind::OBJECT0, KEEP_ARENAS, &savedEmptyObjectArenas);
2850     finalizeNow(fop, AllocKind::OBJECT2, KEEP_ARENAS, &savedEmptyObjectArenas);
2851     finalizeNow(fop, AllocKind::OBJECT4, KEEP_ARENAS, &savedEmptyObjectArenas);
2852     finalizeNow(fop, AllocKind::OBJECT8, KEEP_ARENAS, &savedEmptyObjectArenas);
2853     finalizeNow(fop, AllocKind::OBJECT12, KEEP_ARENAS, &savedEmptyObjectArenas);
2854     finalizeNow(fop, AllocKind::OBJECT16, KEEP_ARENAS, &savedEmptyObjectArenas);
2855 
2856     // Prevent the arenas from having new objects allocated into them. We need
2857     // to know which objects are marked while we incrementally sweep dead
2858     // references from type information.
2859     savedObjectArenas[AllocKind::OBJECT0] = arenaLists[AllocKind::OBJECT0].copyAndClear();
2860     savedObjectArenas[AllocKind::OBJECT2] = arenaLists[AllocKind::OBJECT2].copyAndClear();
2861     savedObjectArenas[AllocKind::OBJECT4] = arenaLists[AllocKind::OBJECT4].copyAndClear();
2862     savedObjectArenas[AllocKind::OBJECT8] = arenaLists[AllocKind::OBJECT8].copyAndClear();
2863     savedObjectArenas[AllocKind::OBJECT12] = arenaLists[AllocKind::OBJECT12].copyAndClear();
2864     savedObjectArenas[AllocKind::OBJECT16] = arenaLists[AllocKind::OBJECT16].copyAndClear();
2865 }
2866 
2867 void
2868 ArenaLists::mergeForegroundSweptObjectArenas()
2869 {
2870     AutoLockGC lock(runtime_);
2871     ReleaseArenaList(runtime_, savedEmptyObjectArenas, lock);
2872     savedEmptyObjectArenas = nullptr;
2873 
2874     mergeSweptArenas(AllocKind::OBJECT0);
2875     mergeSweptArenas(AllocKind::OBJECT2);
2876     mergeSweptArenas(AllocKind::OBJECT4);
2877     mergeSweptArenas(AllocKind::OBJECT8);
2878     mergeSweptArenas(AllocKind::OBJECT12);
2879     mergeSweptArenas(AllocKind::OBJECT16);
2880 }
2881 
2882 inline void
2883 ArenaLists::mergeSweptArenas(AllocKind thingKind)
2884 {
2885     ArenaList* al = &arenaLists[thingKind];
2886     ArenaList* saved = &savedObjectArenas[thingKind];
2887 
2888     *al = saved->insertListWithCursorAtEnd(*al);
2889     saved->clear();
2890 }
2891 
2892 void
2893 ArenaLists::queueForegroundThingsForSweep(FreeOp* fop)
2894 {
2895     gcShapeArenasToUpdate = arenaListsToSweep[AllocKind::SHAPE];
2896     gcAccessorShapeArenasToUpdate = arenaListsToSweep[AllocKind::ACCESSOR_SHAPE];
2897     gcObjectGroupArenasToUpdate = arenaListsToSweep[AllocKind::OBJECT_GROUP];
2898     gcScriptArenasToUpdate = arenaListsToSweep[AllocKind::SCRIPT];
2899 }
2900 
2901 SliceBudget::SliceBudget()
2902   : timeBudget(UnlimitedTimeBudget), workBudget(UnlimitedWorkBudget)
2903 {
2904     makeUnlimited();
2905 }
2906 
2907 SliceBudget::SliceBudget(TimeBudget time)
2908   : timeBudget(time), workBudget(UnlimitedWorkBudget)
2909 {
2910     if (time.budget < 0) {
2911         makeUnlimited();
2912     } else {
2913         // Note: TimeBudget(0) is equivalent to WorkBudget(CounterReset).
2914         deadline = PRMJ_Now() + time.budget * PRMJ_USEC_PER_MSEC;
2915         counter = CounterReset;
2916     }
2917 }
2918 
2919 SliceBudget::SliceBudget(WorkBudget work)
2920   : timeBudget(UnlimitedTimeBudget), workBudget(work)
2921 {
2922     if (work.budget < 0) {
2923         makeUnlimited();
2924     } else {
2925         deadline = 0;
2926         counter = work.budget;
2927     }
2928 }
2929 
2930 int
2931 SliceBudget::describe(char* buffer, size_t maxlen) const
2932 {
2933     if (isUnlimited())
2934         return snprintf(buffer, maxlen, "unlimited");
2935     else if (isWorkBudget())
2936         return snprintf(buffer, maxlen, "work(%" PRId64 ")", workBudget.budget);
2937     else
2938         return snprintf(buffer, maxlen, "%" PRId64 "ms", timeBudget.budget);
2939 }
2940 
2941 bool
2942 SliceBudget::checkOverBudget()
2943 {
2944     bool over = PRMJ_Now() >= deadline;
2945     if (!over)
2946         counter = CounterReset;
2947     return over;
2948 }
2949 
2950 void
2951 js::MarkCompartmentActive(InterpreterFrame* fp)
2952 {
2953     fp->script()->compartment()->zone()->active = true;
2954 }
2955 
2956 void
2957 GCRuntime::requestMajorGC(JS::gcreason::Reason reason)
2958 {
2959     MOZ_ASSERT(!CurrentThreadIsPerformingGC());
2960 
2961     if (majorGCRequested())
2962         return;
2963 
2964     majorGCTriggerReason = reason;
2965 
2966     // There's no need to use RequestInterruptUrgent here. It's slower because
2967     // it has to interrupt (looping) Ion code, but loops in Ion code that
2968     // affect GC will have an explicit interrupt check.
2969     rt->requestInterrupt(JSRuntime::RequestInterruptCanWait);
2970 }
2971 
2972 void
2973 GCRuntime::requestMinorGC(JS::gcreason::Reason reason)
2974 {
2975     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
2976     MOZ_ASSERT(!CurrentThreadIsPerformingGC());
2977 
2978     if (minorGCRequested())
2979         return;
2980 
2981     minorGCTriggerReason = reason;
2982 
2983     // See comment in requestMajorGC.
2984     rt->requestInterrupt(JSRuntime::RequestInterruptCanWait);
2985 }
2986 
2987 bool
2988 GCRuntime::triggerGC(JS::gcreason::Reason reason)
2989 {
2990     /*
2991      * Don't trigger GCs if this is being called off the main thread from
2992      * onTooMuchMalloc().
2993      */
2994     if (!CurrentThreadCanAccessRuntime(rt))
2995         return false;
2996 
2997     /* GC is already running. */
2998     if (rt->isHeapCollecting())
2999         return false;
3000 
3001     JS::PrepareForFullGC(rt->contextFromMainThread());
3002     requestMajorGC(reason);
3003     return true;
3004 }
3005 
3006 void
3007 GCRuntime::maybeAllocTriggerZoneGC(Zone* zone, const AutoLockGC& lock)
3008 {
3009     size_t usedBytes = zone->usage.gcBytes();
3010     size_t thresholdBytes = zone->threshold.gcTriggerBytes();
3011     size_t igcThresholdBytes = thresholdBytes * tunables.zoneAllocThresholdFactor();
3012 
3013     if (usedBytes >= thresholdBytes) {
3014         // The threshold has been surpassed, immediately trigger a GC,
3015         // which will be done non-incrementally.
3016         triggerZoneGC(zone, JS::gcreason::ALLOC_TRIGGER);
3017     } else if (usedBytes >= igcThresholdBytes) {
3018         // Reduce the delay to the start of the next incremental slice.
3019         if (zone->gcDelayBytes < ArenaSize)
3020             zone->gcDelayBytes = 0;
3021         else
3022             zone->gcDelayBytes -= ArenaSize;
3023 
3024         if (!zone->gcDelayBytes) {
3025             // Start or continue an in progress incremental GC. We do this
3026             // to try to avoid performing non-incremental GCs on zones
3027             // which allocate a lot of data, even when incremental slices
3028             // can't be triggered via scheduling in the event loop.
3029             triggerZoneGC(zone, JS::gcreason::ALLOC_TRIGGER);
3030 
3031             // Delay the next slice until a certain amount of allocation
3032             // has been performed.
3033             zone->gcDelayBytes = tunables.zoneAllocDelayBytes();
3034         }
3035     }
3036 }
3037 
3038 bool
3039 GCRuntime::triggerZoneGC(Zone* zone, JS::gcreason::Reason reason)
3040 {
3041     /* Zones in use by a thread with an exclusive context can't be collected. */
3042     if (!CurrentThreadCanAccessRuntime(rt)) {
3043         MOZ_ASSERT(zone->usedByExclusiveThread || zone->isAtomsZone());
3044         return false;
3045     }
3046 
3047     /* GC is already running. */
3048     if (rt->isHeapCollecting())
3049         return false;
3050 
3051 #ifdef JS_GC_ZEAL
3052     if (hasZealMode(ZealMode::Alloc)) {
3053         MOZ_RELEASE_ASSERT(triggerGC(reason));
3054         return true;
3055     }
3056 #endif
3057 
3058     if (zone->isAtomsZone()) {
3059         /* We can't do a zone GC of the atoms compartment. */
3060         if (rt->keepAtoms()) {
3061             /* Skip GC and retrigger later, since atoms zone won't be collected
3062              * if keepAtoms is true. */
3063             fullGCForAtomsRequested_ = true;
3064             return false;
3065         }
3066         MOZ_RELEASE_ASSERT(triggerGC(reason));
3067         return true;
3068     }
3069 
3070     PrepareZoneForGC(zone);
3071     requestMajorGC(reason);
3072     return true;
3073 }
3074 
3075 void
3076 GCRuntime::maybeGC(Zone* zone)
3077 {
3078     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
3079 
3080 #ifdef JS_GC_ZEAL
3081     if (hasZealMode(ZealMode::Alloc) || hasZealMode(ZealMode::Poke)) {
3082         JS::PrepareForFullGC(rt->contextFromMainThread());
3083         gc(GC_NORMAL, JS::gcreason::DEBUG_GC);
3084         return;
3085     }
3086 #endif
3087 
3088     if (gcIfRequested())
3089         return;
3090 
3091     if (zone->usage.gcBytes() > 1024 * 1024 &&
3092         zone->usage.gcBytes() >= zone->threshold.allocTrigger(schedulingState.inHighFrequencyGCMode()) &&
3093         !isIncrementalGCInProgress() &&
3094         !isBackgroundSweeping())
3095     {
3096         PrepareZoneForGC(zone);
3097         startGC(GC_NORMAL, JS::gcreason::EAGER_ALLOC_TRIGGER);
3098     }
3099 }
3100 
3101 // Do all possible decommit immediately from the current thread without
3102 // releasing the GC lock or allocating any memory.
3103 void
3104 GCRuntime::decommitAllWithoutUnlocking(const AutoLockGC& lock)
3105 {
3106     MOZ_ASSERT(emptyChunks(lock).count() == 0);
3107     for (ChunkPool::Iter chunk(availableChunks(lock)); !chunk.done(); chunk.next())
3108         chunk->decommitAllArenasWithoutUnlocking(lock);
3109     MOZ_ASSERT(availableChunks(lock).verify());
3110 }
3111 
3112 void
3113 GCRuntime::startDecommit()
3114 {
3115     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
3116     MOZ_ASSERT(!decommitTask.isRunning());
3117 
3118     // If we are allocating heavily enough to trigger "high freqency" GC, then
3119     // skip decommit so that we do not compete with the mutator.
3120     if (schedulingState.inHighFrequencyGCMode())
3121         return;
3122 
3123     BackgroundDecommitTask::ChunkVector toDecommit;
3124     {
3125         AutoLockGC lock(rt);
3126 
3127         // Verify that all entries in the empty chunks pool are already decommitted.
3128         for (ChunkPool::Iter chunk(emptyChunks(lock)); !chunk.done(); chunk.next())
3129             MOZ_ASSERT(!chunk->info.numArenasFreeCommitted);
3130 
3131         // Since we release the GC lock while doing the decommit syscall below,
3132         // it is dangerous to iterate the available list directly, as the main
3133         // thread could modify it concurrently. Instead, we build and pass an
3134         // explicit Vector containing the Chunks we want to visit.
3135         MOZ_ASSERT(availableChunks(lock).verify());
3136         for (ChunkPool::Iter iter(availableChunks(lock)); !iter.done(); iter.next()) {
3137             if (!toDecommit.append(iter.get())) {
3138                 // The OOM handler does a full, immediate decommit.
3139                 return onOutOfMallocMemory(lock);
3140             }
3141         }
3142     }
3143     decommitTask.setChunksToScan(toDecommit);
3144 
3145     if (sweepOnBackgroundThread && decommitTask.start())
3146         return;
3147 
3148     decommitTask.runFromMainThread(rt);
3149 }
3150 
3151 void
3152 js::gc::BackgroundDecommitTask::setChunksToScan(ChunkVector &chunks)
3153 {
3154     MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime));
3155     MOZ_ASSERT(!isRunning());
3156     MOZ_ASSERT(toDecommit.empty());
3157     Swap(toDecommit, chunks);
3158 }
3159 
3160 /* virtual */ void
3161 js::gc::BackgroundDecommitTask::run()
3162 {
3163     AutoLockGC lock(runtime);
3164 
3165     for (Chunk* chunk : toDecommit) {
3166 
3167         // The arena list is not doubly-linked, so we have to work in the free
3168         // list order and not in the natural order.
3169         while (chunk->info.numArenasFreeCommitted) {
3170             bool ok = chunk->decommitOneFreeArena(runtime, lock);
3171 
3172             // If we are low enough on memory that we can't update the page
3173             // tables, or if we need to return for any other reason, break out
3174             // of the loop.
3175             if (cancel_ || !ok)
3176                 break;
3177         }
3178     }
3179     toDecommit.clearAndFree();
3180 
3181     ChunkPool toFree = runtime->gc.expireEmptyChunkPool(lock);
3182     if (toFree.count()) {
3183         AutoUnlockGC unlock(lock);
3184         FreeChunkPool(runtime, toFree);
3185     }
3186 }
3187 
3188 void
3189 GCRuntime::sweepBackgroundThings(ZoneList& zones, LifoAlloc& freeBlocks)
3190 {
3191     freeBlocks.freeAll();
3192 
3193     if (zones.isEmpty())
3194         return;
3195 
3196     // We must finalize thing kinds in the order specified by BackgroundFinalizePhases.
3197     Arena* emptyArenas = nullptr;
3198     FreeOp fop(nullptr);
3199     for (unsigned phase = 0 ; phase < ArrayLength(BackgroundFinalizePhases) ; ++phase) {
3200         for (Zone* zone = zones.front(); zone; zone = zone->nextZone()) {
3201             for (auto kind : BackgroundFinalizePhases[phase].kinds) {
3202                 Arena* arenas = zone->arenas.arenaListsToSweep[kind];
3203                 MOZ_RELEASE_ASSERT(uintptr_t(arenas) != uintptr_t(-1));
3204                 if (arenas)
3205                     ArenaLists::backgroundFinalize(&fop, arenas, &emptyArenas);
3206             }
3207         }
3208     }
3209 
3210     AutoLockGC lock(rt);
3211 
3212     // Release swept arenas, dropping and reaquiring the lock every so often to
3213     // avoid blocking the main thread from allocating chunks.
3214     static const size_t LockReleasePeriod = 32;
3215     size_t releaseCount = 0;
3216     Arena* next;
3217     for (Arena* arena = emptyArenas; arena; arena = next) {
3218         next = arena->next;
3219         rt->gc.releaseArena(arena, lock);
3220         releaseCount++;
3221         if (releaseCount % LockReleasePeriod == 0) {
3222             lock.unlock();
3223             lock.lock();
3224         }
3225     }
3226 
3227     while (!zones.isEmpty())
3228         zones.removeFront();
3229 }
3230 
3231 void
3232 GCRuntime::assertBackgroundSweepingFinished()
3233 {
3234 #ifdef DEBUG
3235     MOZ_ASSERT(backgroundSweepZones.isEmpty());
3236     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
3237         for (auto i : AllAllocKinds()) {
3238             MOZ_ASSERT(!zone->arenas.arenaListsToSweep[i]);
3239             MOZ_ASSERT(zone->arenas.doneBackgroundFinalize(i));
3240         }
3241     }
3242     MOZ_ASSERT(blocksToFreeAfterSweeping.computedSizeOfExcludingThis() == 0);
3243 #endif
3244 }
3245 
3246 unsigned
3247 js::GetCPUCount()
3248 {
3249     static unsigned ncpus = 0;
3250     if (ncpus == 0) {
3251 # ifdef XP_WIN
3252         SYSTEM_INFO sysinfo;
3253         GetSystemInfo(&sysinfo);
3254         ncpus = unsigned(sysinfo.dwNumberOfProcessors);
3255 # else
3256         long n = sysconf(_SC_NPROCESSORS_ONLN);
3257         ncpus = (n > 0) ? unsigned(n) : 1;
3258 # endif
3259     }
3260     return ncpus;
3261 }
3262 
3263 void
3264 GCHelperState::finish()
3265 {
3266     // Wait for any lingering background sweeping to finish.
3267     waitBackgroundSweepEnd();
3268 }
3269 
3270 GCHelperState::State
3271 GCHelperState::state(const AutoLockGC&)
3272 {
3273     return state_;
3274 }
3275 
3276 void
3277 GCHelperState::setState(State state, const AutoLockGC&)
3278 {
3279     state_ = state;
3280 }
3281 
3282 void
3283 GCHelperState::startBackgroundThread(State newState, const AutoLockGC& lock,
3284                                      const AutoLockHelperThreadState& helperLock)
3285 {
3286     MOZ_ASSERT(!thread && state(lock) == IDLE && newState != IDLE);
3287     setState(newState, lock);
3288 
3289     {
3290         AutoEnterOOMUnsafeRegion noOOM;
3291         if (!HelperThreadState().gcHelperWorklist(helperLock).append(this))
3292             noOOM.crash("Could not add to pending GC helpers list");
3293     }
3294 
3295     HelperThreadState().notifyAll(GlobalHelperThreadState::PRODUCER, helperLock);
3296 }
3297 
3298 void
3299 GCHelperState::waitForBackgroundThread(js::AutoLockGC& lock)
3300 {
3301     done.wait(lock.guard());
3302 }
3303 
3304 void
3305 GCHelperState::work()
3306 {
3307     MOZ_ASSERT(CanUseExtraThreads());
3308 
3309     AutoLockGC lock(rt);
3310 
3311     MOZ_ASSERT(thread.isNothing());
3312     thread = mozilla::Some(ThisThread::GetId());
3313 
3314     TraceLoggerThread* logger = TraceLoggerForCurrentThread();
3315 
3316     switch (state(lock)) {
3317 
3318       case IDLE:
3319         MOZ_CRASH("GC helper triggered on idle state");
3320         break;
3321 
3322       case SWEEPING: {
3323         AutoTraceLog logSweeping(logger, TraceLogger_GCSweeping);
3324         doSweep(lock);
3325         MOZ_ASSERT(state(lock) == SWEEPING);
3326         break;
3327       }
3328 
3329     }
3330 
3331     setState(IDLE, lock);
3332     thread.reset();
3333 
3334     done.notify_all();
3335 }
3336 
3337 void
3338 GCRuntime::queueZonesForBackgroundSweep(ZoneList& zones)
3339 {
3340     AutoLockHelperThreadState helperLock;
3341     AutoLockGC lock(rt);
3342     backgroundSweepZones.transferFrom(zones);
3343     helperState.maybeStartBackgroundSweep(lock, helperLock);
3344 }
3345 
3346 void
3347 GCRuntime::freeUnusedLifoBlocksAfterSweeping(LifoAlloc* lifo)
3348 {
3349     MOZ_ASSERT(rt->isHeapBusy());
3350     AutoLockGC lock(rt);
3351     blocksToFreeAfterSweeping.transferUnusedFrom(lifo);
3352 }
3353 
3354 void
3355 GCRuntime::freeAllLifoBlocksAfterSweeping(LifoAlloc* lifo)
3356 {
3357     MOZ_ASSERT(rt->isHeapBusy());
3358     AutoLockGC lock(rt);
3359     blocksToFreeAfterSweeping.transferFrom(lifo);
3360 }
3361 
3362 void
3363 GCRuntime::freeAllLifoBlocksAfterMinorGC(LifoAlloc* lifo)
3364 {
3365     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
3366     blocksToFreeAfterMinorGC.transferFrom(lifo);
3367 }
3368 
3369 void
3370 GCHelperState::maybeStartBackgroundSweep(const AutoLockGC& lock,
3371                                          const AutoLockHelperThreadState& helperLock)
3372 {
3373     MOZ_ASSERT(CanUseExtraThreads());
3374 
3375     if (state(lock) == IDLE)
3376         startBackgroundThread(SWEEPING, lock, helperLock);
3377 }
3378 
3379 void
3380 GCHelperState::waitBackgroundSweepEnd()
3381 {
3382     AutoLockGC lock(rt);
3383     while (state(lock) == SWEEPING)
3384         waitForBackgroundThread(lock);
3385     if (!rt->gc.isIncrementalGCInProgress())
3386         rt->gc.assertBackgroundSweepingFinished();
3387 }
3388 
3389 void
3390 GCHelperState::doSweep(AutoLockGC& lock)
3391 {
3392     // The main thread may call queueZonesForBackgroundSweep() while this is
3393     // running so we must check there is no more work to do before exiting.
3394 
3395     do {
3396         while (!rt->gc.backgroundSweepZones.isEmpty()) {
3397             AutoSetThreadIsSweeping threadIsSweeping;
3398 
3399             ZoneList zones;
3400             zones.transferFrom(rt->gc.backgroundSweepZones);
3401             LifoAlloc freeLifoAlloc(JSRuntime::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE);
3402             freeLifoAlloc.transferFrom(&rt->gc.blocksToFreeAfterSweeping);
3403 
3404             AutoUnlockGC unlock(lock);
3405             rt->gc.sweepBackgroundThings(zones, freeLifoAlloc);
3406         }
3407     } while (!rt->gc.backgroundSweepZones.isEmpty());
3408 }
3409 
3410 bool
3411 GCHelperState::onBackgroundThread()
3412 {
3413     return thread.isSome() && *thread == ThisThread::GetId();
3414 }
3415 
3416 bool
3417 GCRuntime::shouldReleaseObservedTypes()
3418 {
3419     bool releaseTypes = false;
3420 
3421 #ifdef JS_GC_ZEAL
3422     if (zealModeBits != 0)
3423         releaseTypes = true;
3424 #endif
3425 
3426     /* We may miss the exact target GC due to resets. */
3427     if (majorGCNumber >= jitReleaseNumber)
3428         releaseTypes = true;
3429 
3430     if (releaseTypes)
3431         jitReleaseNumber = majorGCNumber + JIT_SCRIPT_RELEASE_TYPES_PERIOD;
3432 
3433     return releaseTypes;
3434 }
3435 
3436 struct IsAboutToBeFinalizedFunctor {
3437     template <typename T> bool operator()(Cell** t) {
3438         mozilla::DebugOnly<const Cell*> prior = *t;
3439         bool result = IsAboutToBeFinalizedUnbarriered(reinterpret_cast<T**>(t));
3440         // Sweep should not have to deal with moved pointers, since moving GC
3441         // handles updating the UID table manually.
3442         MOZ_ASSERT(*t == prior);
3443         return result;
3444     }
3445 };
3446 
3447 /* static */ bool
3448 UniqueIdGCPolicy::needsSweep(Cell** cell, uint64_t*)
3449 {
3450     return DispatchTraceKindTyped(IsAboutToBeFinalizedFunctor(), (*cell)->getTraceKind(), cell);
3451 }
3452 
3453 void
3454 JS::Zone::sweepUniqueIds(js::FreeOp* fop)
3455 {
3456     uniqueIds_.sweep();
3457 }
3458 
3459 /*
3460  * It's simpler if we preserve the invariant that every zone has at least one
3461  * compartment. If we know we're deleting the entire zone, then
3462  * SweepCompartments is allowed to delete all compartments. In this case,
3463  * |keepAtleastOne| is false. If some objects remain in the zone so that it
3464  * cannot be deleted, then we set |keepAtleastOne| to true, which prohibits
3465  * SweepCompartments from deleting every compartment. Instead, it preserves an
3466  * arbitrary compartment in the zone.
3467  */
3468 void
3469 Zone::sweepCompartments(FreeOp* fop, bool keepAtleastOne, bool destroyingRuntime)
3470 {
3471     JSRuntime* rt = runtimeFromMainThread();
3472     JSDestroyCompartmentCallback callback = rt->destroyCompartmentCallback;
3473 
3474     JSCompartment** read = compartments.begin();
3475     JSCompartment** end = compartments.end();
3476     JSCompartment** write = read;
3477     bool foundOne = false;
3478     while (read < end) {
3479         JSCompartment* comp = *read++;
3480         MOZ_ASSERT(!rt->isAtomsCompartment(comp));
3481 
3482         /*
3483          * Don't delete the last compartment if all the ones before it were
3484          * deleted and keepAtleastOne is true.
3485          */
3486         bool dontDelete = read == end && !foundOne && keepAtleastOne;
3487         if ((!comp->marked && !dontDelete) || destroyingRuntime) {
3488             if (callback)
3489                 callback(fop, comp);
3490             if (comp->principals())
3491                 JS_DropPrincipals(rt->contextFromMainThread(), comp->principals());
3492             js_delete(comp);
3493             rt->gc.stats.sweptCompartment();
3494         } else {
3495             *write++ = comp;
3496             foundOne = true;
3497         }
3498     }
3499     compartments.shrinkTo(write - compartments.begin());
3500     MOZ_ASSERT_IF(keepAtleastOne, !compartments.empty());
3501 }
3502 
3503 void
3504 GCRuntime::sweepZones(FreeOp* fop, bool destroyingRuntime)
3505 {
3506     MOZ_ASSERT_IF(destroyingRuntime, numActiveZoneIters == 0);
3507     MOZ_ASSERT_IF(destroyingRuntime, arenasEmptyAtShutdown);
3508 
3509     if (rt->gc.numActiveZoneIters)
3510         return;
3511 
3512     assertBackgroundSweepingFinished();
3513 
3514     JSZoneCallback callback = rt->destroyZoneCallback;
3515 
3516     /* Skip the atomsCompartment zone. */
3517     Zone** read = zones.begin() + 1;
3518     Zone** end = zones.end();
3519     Zone** write = read;
3520     MOZ_ASSERT(zones.length() >= 1);
3521     MOZ_ASSERT(zones[0]->isAtomsZone());
3522 
3523     while (read < end) {
3524         Zone* zone = *read++;
3525 
3526         if (zone->wasGCStarted()) {
3527             MOZ_ASSERT(!zone->isQueuedForBackgroundSweep());
3528             const bool zoneIsDead = zone->arenas.arenaListsAreEmpty() &&
3529                                     !zone->hasMarkedCompartments();
3530             if (zoneIsDead || destroyingRuntime)
3531             {
3532                 // We have just finished sweeping, so we should have freed any
3533                 // empty arenas back to their Chunk for future allocation.
3534                 zone->arenas.checkEmptyFreeLists();
3535 
3536                 // We are about to delete the Zone; this will leave the Zone*
3537                 // in the arena header dangling if there are any arenas
3538                 // remaining at this point.
3539 #ifdef DEBUG
3540                 if (!zone->arenas.checkEmptyArenaLists())
3541                     arenasEmptyAtShutdown = false;
3542 #endif
3543 
3544                 if (callback)
3545                     callback(zone);
3546 
3547                 zone->sweepCompartments(fop, false, destroyingRuntime);
3548                 MOZ_ASSERT(zone->compartments.empty());
3549                 MOZ_ASSERT_IF(arenasEmptyAtShutdown, zone->typeDescrObjects.empty());
3550                 fop->delete_(zone);
3551                 stats.sweptZone();
3552                 continue;
3553             }
3554             zone->sweepCompartments(fop, true, destroyingRuntime);
3555         }
3556         *write++ = zone;
3557     }
3558     zones.shrinkTo(write - zones.begin());
3559 }
3560 
3561 #ifdef DEBUG
3562 static const char*
3563 AllocKindToAscii(AllocKind kind)
3564 {
3565     switch(kind) {
3566 #define MAKE_CASE(allocKind, traceKind, type, sizedType) \
3567       case AllocKind:: allocKind: return #allocKind;
3568 FOR_EACH_ALLOCKIND(MAKE_CASE)
3569 #undef MAKE_CASE
3570 
3571       default:
3572         MOZ_CRASH("Unknown AllocKind in AllocKindToAscii");
3573     }
3574 }
3575 #endif // DEBUG
3576 
3577 bool
3578 ArenaLists::checkEmptyArenaList(AllocKind kind)
3579 {
3580     size_t num_live = 0;
3581 #ifdef DEBUG
3582     if (!arenaLists[kind].isEmpty()) {
3583         size_t max_cells = 20;
3584         char *env = getenv("JS_GC_MAX_LIVE_CELLS");
3585         if (env && *env)
3586             max_cells = atol(env);
3587         for (Arena* current = arenaLists[kind].head(); current; current = current->next) {
3588             for (ArenaCellIterUnderGC i(current); !i.done(); i.next()) {
3589                 TenuredCell* t = i.getCell();
3590                 MOZ_ASSERT(t->isMarked(), "unmarked cells should have been finalized");
3591                 if (++num_live <= max_cells) {
3592                     fprintf(stderr, "ERROR: GC found live Cell %p of kind %s at shutdown\n",
3593                             t, AllocKindToAscii(kind));
3594                 }
3595             }
3596         }
3597         fprintf(stderr, "ERROR: GC found %" PRIuSIZE " live Cells at shutdown\n", num_live);
3598     }
3599 #endif // DEBUG
3600     return num_live == 0;
3601 }
3602 
3603 void
3604 GCRuntime::purgeRuntime(AutoLockForExclusiveAccess& lock)
3605 {
3606     for (GCCompartmentsIter comp(rt); !comp.done(); comp.next())
3607         comp->purge();
3608 
3609     freeUnusedLifoBlocksAfterSweeping(&rt->tempLifoAlloc);
3610 
3611     rt->interpreterStack().purge(rt);
3612 
3613     JSContext* cx = rt->contextFromMainThread();
3614     cx->caches.gsnCache.purge();
3615     cx->caches.envCoordinateNameCache.purge();
3616     cx->caches.newObjectCache.purge();
3617     cx->caches.nativeIterCache.purge();
3618     cx->caches.uncompressedSourceCache.purge();
3619     if (cx->caches.evalCache.initialized())
3620         cx->caches.evalCache.clear();
3621 
3622     rt->mainThread.frontendCollectionPool.purge();
3623 
3624     if (auto cache = rt->maybeThisRuntimeSharedImmutableStrings())
3625         cache->purge();
3626 
3627     rt->promiseTasksToDestroy.lock()->clear();
3628 }
3629 
3630 bool
3631 GCRuntime::shouldPreserveJITCode(JSCompartment* comp, int64_t currentTime,
3632                                  JS::gcreason::Reason reason, bool canAllocateMoreCode)
3633 {
3634     if (cleanUpEverything)
3635         return false;
3636     if (!canAllocateMoreCode)
3637         return false;
3638 
3639     if (alwaysPreserveCode)
3640         return true;
3641     if (comp->preserveJitCode())
3642         return true;
3643     if (comp->lastAnimationTime + PRMJ_USEC_PER_SEC >= currentTime)
3644         return true;
3645     if (reason == JS::gcreason::DEBUG_GC)
3646         return true;
3647 
3648     return false;
3649 }
3650 
3651 #ifdef DEBUG
3652 class CompartmentCheckTracer : public JS::CallbackTracer
3653 {
3654     void onChild(const JS::GCCellPtr& thing) override;
3655 
3656   public:
3657     explicit CompartmentCheckTracer(JSRuntime* rt)
3658       : JS::CallbackTracer(rt), src(nullptr), zone(nullptr), compartment(nullptr)
3659     {}
3660 
3661     Cell* src;
3662     JS::TraceKind srcKind;
3663     Zone* zone;
3664     JSCompartment* compartment;
3665 };
3666 
3667 namespace {
3668 struct IsDestComparatorFunctor {
3669     JS::GCCellPtr dst_;
3670     explicit IsDestComparatorFunctor(JS::GCCellPtr dst) : dst_(dst) {}
3671 
3672     template <typename T> bool operator()(T* t) { return (*t) == dst_.asCell(); }
3673 };
3674 } // namespace (anonymous)
3675 
3676 static bool
3677 InCrossCompartmentMap(JSObject* src, JS::GCCellPtr dst)
3678 {
3679     JSCompartment* srccomp = src->compartment();
3680 
3681     if (dst.is<JSObject>()) {
3682         Value key = ObjectValue(dst.as<JSObject>());
3683         if (WrapperMap::Ptr p = srccomp->lookupWrapper(key)) {
3684             if (*p->value().unsafeGet() == ObjectValue(*src))
3685                 return true;
3686         }
3687     }
3688 
3689     /*
3690      * If the cross-compartment edge is caused by the debugger, then we don't
3691      * know the right hashtable key, so we have to iterate.
3692      */
3693     for (JSCompartment::WrapperEnum e(srccomp); !e.empty(); e.popFront()) {
3694         if (e.front().mutableKey().applyToWrapped(IsDestComparatorFunctor(dst)) &&
3695             ToMarkable(e.front().value().unbarrieredGet()) == src)
3696         {
3697             return true;
3698         }
3699     }
3700 
3701     return false;
3702 }
3703 
3704 struct MaybeCompartmentFunctor {
3705     template <typename T> JSCompartment* operator()(T* t) { return t->maybeCompartment(); }
3706 };
3707 
3708 void
3709 CompartmentCheckTracer::onChild(const JS::GCCellPtr& thing)
3710 {
3711     JSCompartment* comp = DispatchTyped(MaybeCompartmentFunctor(), thing);
3712     if (comp && compartment) {
3713         MOZ_ASSERT(comp == compartment || runtime()->isAtomsCompartment(comp) ||
3714                    (srcKind == JS::TraceKind::Object &&
3715                     InCrossCompartmentMap(static_cast<JSObject*>(src), thing)));
3716     } else {
3717         TenuredCell* tenured = TenuredCell::fromPointer(thing.asCell());
3718         Zone* thingZone = tenured->zoneFromAnyThread();
3719         MOZ_ASSERT(thingZone == zone || thingZone->isAtomsZone());
3720     }
3721 }
3722 
3723 void
3724 GCRuntime::checkForCompartmentMismatches()
3725 {
3726     if (disableStrictProxyCheckingCount)
3727         return;
3728 
3729     CompartmentCheckTracer trc(rt);
3730     AutoAssertEmptyNursery empty(rt);
3731     for (ZonesIter zone(rt, SkipAtoms); !zone.done(); zone.next()) {
3732         trc.zone = zone;
3733         for (auto thingKind : AllAllocKinds()) {
3734             for (auto i = zone->cellIter<TenuredCell>(thingKind, empty); !i.done(); i.next()) {
3735                 trc.src = i.getCell();
3736                 trc.srcKind = MapAllocToTraceKind(thingKind);
3737                 trc.compartment = DispatchTraceKindTyped(MaybeCompartmentFunctor(),
3738                                                          trc.src, trc.srcKind);
3739                 js::TraceChildren(&trc, trc.src, trc.srcKind);
3740             }
3741         }
3742     }
3743 }
3744 #endif
3745 
3746 static void
3747 RelazifyFunctions(Zone* zone, AllocKind kind)
3748 {
3749     MOZ_ASSERT(kind == AllocKind::FUNCTION ||
3750                kind == AllocKind::FUNCTION_EXTENDED);
3751 
3752     JSRuntime* rt = zone->runtimeFromMainThread();
3753     AutoAssertEmptyNursery empty(rt);
3754 
3755     for (auto i = zone->cellIter<JSObject>(kind, empty); !i.done(); i.next()) {
3756         JSFunction* fun = &i->as<JSFunction>();
3757         if (fun->hasScript())
3758             fun->maybeRelazify(rt);
3759     }
3760 }
3761 
3762 bool
3763 GCRuntime::beginMarkPhase(JS::gcreason::Reason reason, AutoLockForExclusiveAccess& lock)
3764 {
3765     int64_t currentTime = PRMJ_Now();
3766 
3767 #ifdef DEBUG
3768     if (fullCompartmentChecks)
3769         checkForCompartmentMismatches();
3770 #endif
3771 
3772     isFull = true;
3773     bool any = false;
3774 
3775     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
3776         /* Assert that zone state is as we expect */
3777         MOZ_ASSERT(!zone->isCollecting());
3778         MOZ_ASSERT(!zone->compartments.empty());
3779 #ifdef DEBUG
3780         for (auto i : AllAllocKinds())
3781             MOZ_ASSERT(!zone->arenas.arenaListsToSweep[i]);
3782 #endif
3783 
3784         /* Set up which zones will be collected. */
3785         if (zone->isGCScheduled()) {
3786             if (!zone->isAtomsZone()) {
3787                 any = true;
3788                 zone->setGCState(Zone::Mark);
3789             }
3790         } else {
3791             isFull = false;
3792         }
3793 
3794         zone->setPreservingCode(false);
3795     }
3796 
3797     // Discard JIT code more aggressively if the process is approaching its
3798     // executable code limit.
3799     bool canAllocateMoreCode = jit::CanLikelyAllocateMoreExecutableMemory();
3800 
3801     for (CompartmentsIter c(rt, WithAtoms); !c.done(); c.next()) {
3802         c->marked = false;
3803         c->scheduledForDestruction = false;
3804         c->maybeAlive = false;
3805         if (shouldPreserveJITCode(c, currentTime, reason, canAllocateMoreCode))
3806             c->zone()->setPreservingCode(true);
3807     }
3808 
3809     if (!rt->gc.cleanUpEverything && canAllocateMoreCode) {
3810         if (JSCompartment* comp = jit::TopmostIonActivationCompartment(rt))
3811             comp->zone()->setPreservingCode(true);
3812     }
3813 
3814     /*
3815      * Atoms are not in the cross-compartment map. So if there are any
3816      * zones that are not being collected, we are not allowed to collect
3817      * atoms. Otherwise, the non-collected zones could contain pointers
3818      * to atoms that we would miss.
3819      *
3820      * keepAtoms() will only change on the main thread, which we are currently
3821      * on. If the value of keepAtoms() changes between GC slices, then we'll
3822      * cancel the incremental GC. See IsIncrementalGCSafe.
3823      */
3824     if (isFull && !rt->keepAtoms()) {
3825         Zone* atomsZone = rt->atomsCompartment(lock)->zone();
3826         if (atomsZone->isGCScheduled()) {
3827             MOZ_ASSERT(!atomsZone->isCollecting());
3828             atomsZone->setGCState(Zone::Mark);
3829             any = true;
3830         }
3831     }
3832 
3833     /* Check that at least one zone is scheduled for collection. */
3834     if (!any)
3835         return false;
3836 
3837     /*
3838      * At the end of each incremental slice, we call prepareForIncrementalGC,
3839      * which marks objects in all arenas that we're currently allocating
3840      * into. This can cause leaks if unreachable objects are in these
3841      * arenas. This purge call ensures that we only mark arenas that have had
3842      * allocations after the incremental GC started.
3843      */
3844     if (isIncremental) {
3845         for (GCZonesIter zone(rt); !zone.done(); zone.next())
3846             zone->arenas.purge();
3847     }
3848 
3849     MemProfiler::MarkTenuredStart(rt);
3850     marker.start();
3851     GCMarker* gcmarker = &marker;
3852 
3853     /* For non-incremental GC the following sweep discards the jit code. */
3854     if (isIncremental) {
3855         js::CancelOffThreadIonCompile(rt, JS::Zone::Mark);
3856         for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
3857             gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_DISCARD_CODE);
3858             zone->discardJitCode(rt->defaultFreeOp());
3859         }
3860     }
3861 
3862     /*
3863      * Relazify functions after discarding JIT code (we can't relazify
3864      * functions with JIT code) and before the actual mark phase, so that
3865      * the current GC can collect the JSScripts we're unlinking here.
3866      * We do this only when we're performing a shrinking GC, as too much
3867      * relazification can cause performance issues when we have to reparse
3868      * the same functions over and over.
3869      */
3870     if (invocationKind == GC_SHRINK) {
3871         {
3872             gcstats::AutoPhase ap(stats, gcstats::PHASE_RELAZIFY_FUNCTIONS);
3873             for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
3874                 if (zone->isSelfHostingZone())
3875                     continue;
3876                 RelazifyFunctions(zone, AllocKind::FUNCTION);
3877                 RelazifyFunctions(zone, AllocKind::FUNCTION_EXTENDED);
3878             }
3879         }
3880 
3881         /* Purge ShapeTables. */
3882         gcstats::AutoPhase ap(stats, gcstats::PHASE_PURGE_SHAPE_TABLES);
3883         for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
3884             if (zone->keepShapeTables() || zone->isSelfHostingZone())
3885                 continue;
3886             for (auto baseShape = zone->cellIter<BaseShape>(); !baseShape.done(); baseShape.next())
3887                 baseShape->maybePurgeTable();
3888         }
3889     }
3890 
3891     startNumber = number;
3892 
3893     /*
3894      * We must purge the runtime at the beginning of an incremental GC. The
3895      * danger if we purge later is that the snapshot invariant of incremental
3896      * GC will be broken, as follows. If some object is reachable only through
3897      * some cache (say the dtoaCache) then it will not be part of the snapshot.
3898      * If we purge after root marking, then the mutator could obtain a pointer
3899      * to the object and start using it. This object might never be marked, so
3900      * a GC hazard would exist.
3901      */
3902     {
3903         gcstats::AutoPhase ap(stats, gcstats::PHASE_PURGE);
3904         purgeRuntime(lock);
3905     }
3906 
3907     /*
3908      * Mark phase.
3909      */
3910     gcstats::AutoPhase ap1(stats, gcstats::PHASE_MARK);
3911 
3912     {
3913         gcstats::AutoPhase ap(stats, gcstats::PHASE_UNMARK);
3914 
3915         for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
3916             /* Unmark everything in the zones being collected. */
3917             zone->arenas.unmarkAll();
3918         }
3919 
3920         for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
3921             /* Unmark all weak maps in the zones being collected. */
3922             WeakMapBase::unmarkZone(zone);
3923         }
3924     }
3925 
3926     traceRuntimeForMajorGC(gcmarker, lock);
3927 
3928     gcstats::AutoPhase ap2(stats, gcstats::PHASE_MARK_ROOTS);
3929 
3930     if (isIncremental) {
3931         gcstats::AutoPhase ap3(stats, gcstats::PHASE_BUFFER_GRAY_ROOTS);
3932         bufferGrayRoots();
3933     }
3934 
3935     markCompartments();
3936 
3937     return true;
3938 }
3939 
3940 void
3941 GCRuntime::markCompartments()
3942 {
3943     gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_COMPARTMENTS);
3944 
3945     /*
3946      * This code ensures that if a compartment is "dead", then it will be
3947      * collected in this GC. A compartment is considered dead if its maybeAlive
3948      * flag is false. The maybeAlive flag is set if:
3949      *   (1) the compartment has incoming cross-compartment edges, or
3950      *   (2) an object in the compartment was marked during root marking, either
3951      *       as a black root or a gray root.
3952      * If the maybeAlive is false, then we set the scheduledForDestruction flag.
3953      * At the end of the GC, we look for compartments where
3954      * scheduledForDestruction is true. These are compartments that were somehow
3955      * "revived" during the incremental GC. If any are found, we do a special,
3956      * non-incremental GC of those compartments to try to collect them.
3957      *
3958      * Compartments can be revived for a variety of reasons. On reason is bug
3959      * 811587, where a reflector that was dead can be revived by DOM code that
3960      * still refers to the underlying DOM node.
3961      *
3962      * Read barriers and allocations can also cause revival. This might happen
3963      * during a function like JS_TransplantObject, which iterates over all
3964      * compartments, live or dead, and operates on their objects. See bug 803376
3965      * for details on this problem. To avoid the problem, we try to avoid
3966      * allocation and read barriers during JS_TransplantObject and the like.
3967      */
3968 
3969     /* Set the maybeAlive flag based on cross-compartment edges. */
3970     for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
3971         for (JSCompartment::WrapperEnum e(c); !e.empty(); e.popFront()) {
3972             if (e.front().key().is<JSString*>())
3973                 continue;
3974             JSCompartment* dest = e.front().mutableKey().compartment();
3975             if (dest)
3976                 dest->maybeAlive = true;
3977         }
3978     }
3979 
3980     /*
3981      * For black roots, code in gc/Marking.cpp will already have set maybeAlive
3982      * during MarkRuntime.
3983      */
3984 
3985     /* Propogate maybeAlive to scheduleForDestruction. */
3986     for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
3987         if (!c->maybeAlive && !rt->isAtomsCompartment(c))
3988             c->scheduledForDestruction = true;
3989     }
3990 }
3991 
3992 template <class ZoneIterT>
3993 void
3994 GCRuntime::markWeakReferences(gcstats::Phase phase)
3995 {
3996     MOZ_ASSERT(marker.isDrained());
3997 
3998     gcstats::AutoPhase ap1(stats, phase);
3999 
4000     marker.enterWeakMarkingMode();
4001 
4002     // TODO bug 1167452: Make weak marking incremental
4003     auto unlimited = SliceBudget::unlimited();
4004     MOZ_RELEASE_ASSERT(marker.drainMarkStack(unlimited));
4005 
4006     for (;;) {
4007         bool markedAny = false;
4008         if (!marker.isWeakMarkingTracer()) {
4009             for (ZoneIterT zone(rt); !zone.done(); zone.next())
4010                 markedAny |= WeakMapBase::markZoneIteratively(zone, &marker);
4011         }
4012         for (CompartmentsIterT<ZoneIterT> c(rt); !c.done(); c.next()) {
4013             if (c->watchpointMap)
4014                 markedAny |= c->watchpointMap->markIteratively(&marker);
4015         }
4016         markedAny |= Debugger::markAllIteratively(&marker);
4017         markedAny |= jit::JitRuntime::MarkJitcodeGlobalTableIteratively(&marker);
4018 
4019         if (!markedAny)
4020             break;
4021 
4022         auto unlimited = SliceBudget::unlimited();
4023         MOZ_RELEASE_ASSERT(marker.drainMarkStack(unlimited));
4024     }
4025     MOZ_ASSERT(marker.isDrained());
4026 
4027     marker.leaveWeakMarkingMode();
4028 }
4029 
4030 void
4031 GCRuntime::markWeakReferencesInCurrentGroup(gcstats::Phase phase)
4032 {
4033     markWeakReferences<GCZoneGroupIter>(phase);
4034 }
4035 
4036 template <class ZoneIterT, class CompartmentIterT>
4037 void
4038 GCRuntime::markGrayReferences(gcstats::Phase phase)
4039 {
4040     gcstats::AutoPhase ap(stats, phase);
4041     if (hasBufferedGrayRoots()) {
4042         for (ZoneIterT zone(rt); !zone.done(); zone.next())
4043             markBufferedGrayRoots(zone);
4044     } else {
4045         MOZ_ASSERT(!isIncremental);
4046         if (JSTraceDataOp op = grayRootTracer.op)
4047             (*op)(&marker, grayRootTracer.data);
4048     }
4049     auto unlimited = SliceBudget::unlimited();
4050     MOZ_RELEASE_ASSERT(marker.drainMarkStack(unlimited));
4051 }
4052 
4053 void
4054 GCRuntime::markGrayReferencesInCurrentGroup(gcstats::Phase phase)
4055 {
4056     markGrayReferences<GCZoneGroupIter, GCCompartmentGroupIter>(phase);
4057 }
4058 
4059 void
4060 GCRuntime::markAllWeakReferences(gcstats::Phase phase)
4061 {
4062     markWeakReferences<GCZonesIter>(phase);
4063 }
4064 
4065 void
4066 GCRuntime::markAllGrayReferences(gcstats::Phase phase)
4067 {
4068     markGrayReferences<GCZonesIter, GCCompartmentsIter>(phase);
4069 }
4070 
4071 #ifdef JS_GC_ZEAL
4072 
4073 struct GCChunkHasher {
4074     typedef gc::Chunk* Lookup;
4075 
4076     /*
4077      * Strip zeros for better distribution after multiplying by the golden
4078      * ratio.
4079      */
4080     static HashNumber hash(gc::Chunk* chunk) {
4081         MOZ_ASSERT(!(uintptr_t(chunk) & gc::ChunkMask));
4082         return HashNumber(uintptr_t(chunk) >> gc::ChunkShift);
4083     }
4084 
4085     static bool match(gc::Chunk* k, gc::Chunk* l) {
4086         MOZ_ASSERT(!(uintptr_t(k) & gc::ChunkMask));
4087         MOZ_ASSERT(!(uintptr_t(l) & gc::ChunkMask));
4088         return k == l;
4089     }
4090 };
4091 
4092 class js::gc::MarkingValidator
4093 {
4094   public:
4095     explicit MarkingValidator(GCRuntime* gc);
4096     ~MarkingValidator();
4097     void nonIncrementalMark(AutoLockForExclusiveAccess& lock);
4098     void validate();
4099 
4100   private:
4101     GCRuntime* gc;
4102     bool initialized;
4103 
4104     typedef HashMap<Chunk*, ChunkBitmap*, GCChunkHasher, SystemAllocPolicy> BitmapMap;
4105     BitmapMap map;
4106 };
4107 
4108 js::gc::MarkingValidator::MarkingValidator(GCRuntime* gc)
4109   : gc(gc),
4110     initialized(false)
4111 {}
4112 
4113 js::gc::MarkingValidator::~MarkingValidator()
4114 {
4115     if (!map.initialized())
4116         return;
4117 
4118     for (BitmapMap::Range r(map.all()); !r.empty(); r.popFront())
4119         js_delete(r.front().value());
4120 }
4121 
4122 void
4123 js::gc::MarkingValidator::nonIncrementalMark(AutoLockForExclusiveAccess& lock)
4124 {
4125     /*
4126      * Perform a non-incremental mark for all collecting zones and record
4127      * the results for later comparison.
4128      *
4129      * Currently this does not validate gray marking.
4130      */
4131 
4132     if (!map.init())
4133         return;
4134 
4135     JSRuntime* runtime = gc->rt;
4136     GCMarker* gcmarker = &gc->marker;
4137 
4138     gc->waitBackgroundSweepEnd();
4139 
4140     /* Save existing mark bits. */
4141     for (auto chunk = gc->allNonEmptyChunks(); !chunk.done(); chunk.next()) {
4142         ChunkBitmap* bitmap = &chunk->bitmap;
4143 	ChunkBitmap* entry = js_new<ChunkBitmap>();
4144         if (!entry)
4145             return;
4146 
4147         memcpy((void*)entry->bitmap, (void*)bitmap->bitmap, sizeof(bitmap->bitmap));
4148         if (!map.putNew(chunk, entry))
4149             return;
4150     }
4151 
4152     /*
4153      * Temporarily clear the weakmaps' mark flags for the compartments we are
4154      * collecting.
4155      */
4156 
4157     WeakMapSet markedWeakMaps;
4158     if (!markedWeakMaps.init())
4159         return;
4160 
4161     /*
4162      * For saving, smush all of the keys into one big table and split them back
4163      * up into per-zone tables when restoring.
4164      */
4165     gc::WeakKeyTable savedWeakKeys(SystemAllocPolicy(), runtime->randomHashCodeScrambler());
4166     if (!savedWeakKeys.init())
4167         return;
4168 
4169     for (GCZonesIter zone(runtime); !zone.done(); zone.next()) {
4170         if (!WeakMapBase::saveZoneMarkedWeakMaps(zone, markedWeakMaps))
4171             return;
4172 
4173         AutoEnterOOMUnsafeRegion oomUnsafe;
4174         for (gc::WeakKeyTable::Range r = zone->gcWeakKeys.all(); !r.empty(); r.popFront()) {
4175             if (!savedWeakKeys.put(Move(r.front().key), Move(r.front().value)))
4176                 oomUnsafe.crash("saving weak keys table for validator");
4177         }
4178 
4179         if (!zone->gcWeakKeys.clear())
4180             oomUnsafe.crash("clearing weak keys table for validator");
4181     }
4182 
4183     /*
4184      * After this point, the function should run to completion, so we shouldn't
4185      * do anything fallible.
4186      */
4187     initialized = true;
4188 
4189     /* Re-do all the marking, but non-incrementally. */
4190     js::gc::State state = gc->incrementalState;
4191     gc->incrementalState = State::MarkRoots;
4192 
4193     {
4194         gcstats::AutoPhase ap(gc->stats, gcstats::PHASE_MARK);
4195 
4196         {
4197             gcstats::AutoPhase ap(gc->stats, gcstats::PHASE_UNMARK);
4198 
4199             for (GCZonesIter zone(runtime); !zone.done(); zone.next())
4200                 WeakMapBase::unmarkZone(zone);
4201 
4202             MOZ_ASSERT(gcmarker->isDrained());
4203             gcmarker->reset();
4204 
4205             for (auto chunk = gc->allNonEmptyChunks(); !chunk.done(); chunk.next())
4206                 chunk->bitmap.clear();
4207         }
4208 
4209         gc->traceRuntimeForMajorGC(gcmarker, lock);
4210 
4211         gc->incrementalState = State::Mark;
4212         auto unlimited = SliceBudget::unlimited();
4213         MOZ_RELEASE_ASSERT(gc->marker.drainMarkStack(unlimited));
4214     }
4215 
4216     gc->incrementalState = State::Sweep;
4217     {
4218         gcstats::AutoPhase ap1(gc->stats, gcstats::PHASE_SWEEP);
4219         gcstats::AutoPhase ap2(gc->stats, gcstats::PHASE_SWEEP_MARK);
4220 
4221         gc->markAllWeakReferences(gcstats::PHASE_SWEEP_MARK_WEAK);
4222 
4223         /* Update zone state for gray marking. */
4224         for (GCZonesIter zone(runtime); !zone.done(); zone.next()) {
4225             MOZ_ASSERT(zone->isGCMarkingBlack());
4226             zone->setGCState(Zone::MarkGray);
4227         }
4228         gc->marker.setMarkColorGray();
4229 
4230         gc->markAllGrayReferences(gcstats::PHASE_SWEEP_MARK_GRAY);
4231         gc->markAllWeakReferences(gcstats::PHASE_SWEEP_MARK_GRAY_WEAK);
4232 
4233         /* Restore zone state. */
4234         for (GCZonesIter zone(runtime); !zone.done(); zone.next()) {
4235             MOZ_ASSERT(zone->isGCMarkingGray());
4236             zone->setGCState(Zone::Mark);
4237         }
4238         MOZ_ASSERT(gc->marker.isDrained());
4239         gc->marker.setMarkColorBlack();
4240     }
4241 
4242     /* Take a copy of the non-incremental mark state and restore the original. */
4243     for (auto chunk = gc->allNonEmptyChunks(); !chunk.done(); chunk.next()) {
4244         ChunkBitmap* bitmap = &chunk->bitmap;
4245         ChunkBitmap* entry = map.lookup(chunk)->value();
4246         Swap(*entry, *bitmap);
4247     }
4248 
4249     for (GCZonesIter zone(runtime); !zone.done(); zone.next()) {
4250         WeakMapBase::unmarkZone(zone);
4251         AutoEnterOOMUnsafeRegion oomUnsafe;
4252         if (!zone->gcWeakKeys.clear())
4253             oomUnsafe.crash("clearing weak keys table for validator");
4254     }
4255 
4256     WeakMapBase::restoreMarkedWeakMaps(markedWeakMaps);
4257 
4258     for (gc::WeakKeyTable::Range r = savedWeakKeys.all(); !r.empty(); r.popFront()) {
4259         AutoEnterOOMUnsafeRegion oomUnsafe;
4260         Zone* zone = gc::TenuredCell::fromPointer(r.front().key.asCell())->zone();
4261         if (!zone->gcWeakKeys.put(Move(r.front().key), Move(r.front().value)))
4262             oomUnsafe.crash("restoring weak keys table for validator");
4263     }
4264 
4265     gc->incrementalState = state;
4266 }
4267 
4268 void
4269 js::gc::MarkingValidator::validate()
4270 {
4271     /*
4272      * Validates the incremental marking for a single compartment by comparing
4273      * the mark bits to those previously recorded for a non-incremental mark.
4274      */
4275 
4276     if (!initialized)
4277         return;
4278 
4279     gc->waitBackgroundSweepEnd();
4280 
4281     for (auto chunk = gc->allNonEmptyChunks(); !chunk.done(); chunk.next()) {
4282         BitmapMap::Ptr ptr = map.lookup(chunk);
4283         if (!ptr)
4284             continue;  /* Allocated after we did the non-incremental mark. */
4285 
4286         ChunkBitmap* bitmap = ptr->value();
4287         ChunkBitmap* incBitmap = &chunk->bitmap;
4288 
4289         for (size_t i = 0; i < ArenasPerChunk; i++) {
4290             if (chunk->decommittedArenas.get(i))
4291                 continue;
4292             Arena* arena = &chunk->arenas[i];
4293             if (!arena->allocated())
4294                 continue;
4295             if (!arena->zone->isGCSweeping())
4296                 continue;
4297             if (arena->allocatedDuringIncremental)
4298                 continue;
4299 
4300             AllocKind kind = arena->getAllocKind();
4301             uintptr_t thing = arena->thingsStart();
4302             uintptr_t end = arena->thingsEnd();
4303             while (thing < end) {
4304                 Cell* cell = (Cell*)thing;
4305 
4306                 /*
4307                  * If a non-incremental GC wouldn't have collected a cell, then
4308                  * an incremental GC won't collect it.
4309                  */
4310                 MOZ_ASSERT_IF(bitmap->isMarked(cell, BLACK), incBitmap->isMarked(cell, BLACK));
4311 
4312                 /*
4313                  * If the cycle collector isn't allowed to collect an object
4314                  * after a non-incremental GC has run, then it isn't allowed to
4315                  * collected it after an incremental GC.
4316                  */
4317                 MOZ_ASSERT_IF(!bitmap->isMarked(cell, GRAY), !incBitmap->isMarked(cell, GRAY));
4318 
4319                 thing += Arena::thingSize(kind);
4320             }
4321         }
4322     }
4323 }
4324 
4325 #endif // JS_GC_ZEAL
4326 
4327 void
4328 GCRuntime::computeNonIncrementalMarkingForValidation(AutoLockForExclusiveAccess& lock)
4329 {
4330 #ifdef JS_GC_ZEAL
4331     MOZ_ASSERT(!markingValidator);
4332     if (isIncremental && hasZealMode(ZealMode::IncrementalMarkingValidator))
4333         markingValidator = js_new<MarkingValidator>(this);
4334     if (markingValidator)
4335         markingValidator->nonIncrementalMark(lock);
4336 #endif
4337 }
4338 
4339 void
4340 GCRuntime::validateIncrementalMarking()
4341 {
4342 #ifdef JS_GC_ZEAL
4343     if (markingValidator)
4344         markingValidator->validate();
4345 #endif
4346 }
4347 
4348 void
4349 GCRuntime::finishMarkingValidation()
4350 {
4351 #ifdef JS_GC_ZEAL
4352     js_delete(markingValidator);
4353     markingValidator = nullptr;
4354 #endif
4355 }
4356 
4357 static void
4358 DropStringWrappers(JSRuntime* rt)
4359 {
4360     /*
4361      * String "wrappers" are dropped on GC because their presence would require
4362      * us to sweep the wrappers in all compartments every time we sweep a
4363      * compartment group.
4364      */
4365     for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
4366         for (JSCompartment::WrapperEnum e(c); !e.empty(); e.popFront()) {
4367             if (e.front().key().is<JSString*>())
4368                 e.removeFront();
4369         }
4370     }
4371 }
4372 
4373 /*
4374  * Group zones that must be swept at the same time.
4375  *
4376  * If compartment A has an edge to an unmarked object in compartment B, then we
4377  * must not sweep A in a later slice than we sweep B. That's because a write
4378  * barrier in A could lead to the unmarked object in B becoming marked.
4379  * However, if we had already swept that object, we would be in trouble.
4380  *
4381  * If we consider these dependencies as a graph, then all the compartments in
4382  * any strongly-connected component of this graph must be swept in the same
4383  * slice.
4384  *
4385  * Tarjan's algorithm is used to calculate the components.
4386  */
4387 namespace {
4388 struct AddOutgoingEdgeFunctor {
4389     bool needsEdge_;
4390     ZoneComponentFinder& finder_;
4391 
4392     AddOutgoingEdgeFunctor(bool needsEdge, ZoneComponentFinder& finder)
4393       : needsEdge_(needsEdge), finder_(finder)
4394     {}
4395 
4396     template <typename T>
4397     void operator()(T tp) {
4398         TenuredCell& other = (*tp)->asTenured();
4399 
4400         /*
4401          * Add edge to wrapped object compartment if wrapped object is not
4402          * marked black to indicate that wrapper compartment not be swept
4403          * after wrapped compartment.
4404          */
4405         if (needsEdge_) {
4406             JS::Zone* zone = other.zone();
4407             if (zone->isGCMarking())
4408                 finder_.addEdgeTo(zone);
4409         }
4410     }
4411 };
4412 } // namespace (anonymous)
4413 
4414 void
4415 JSCompartment::findOutgoingEdges(ZoneComponentFinder& finder)
4416 {
4417     for (js::WrapperMap::Enum e(crossCompartmentWrappers); !e.empty(); e.popFront()) {
4418         CrossCompartmentKey& key = e.front().mutableKey();
4419         MOZ_ASSERT(!key.is<JSString*>());
4420         bool needsEdge = true;
4421         if (key.is<JSObject*>()) {
4422             TenuredCell& other = key.as<JSObject*>()->asTenured();
4423             needsEdge = !other.isMarked(BLACK) || other.isMarked(GRAY);
4424         }
4425         key.applyToWrapped(AddOutgoingEdgeFunctor(needsEdge, finder));
4426     }
4427 }
4428 
4429 void
4430 Zone::findOutgoingEdges(ZoneComponentFinder& finder)
4431 {
4432     /*
4433      * Any compartment may have a pointer to an atom in the atoms
4434      * compartment, and these aren't in the cross compartment map.
4435      */
4436     JSRuntime* rt = runtimeFromMainThread();
4437     Zone* atomsZone = rt->atomsCompartment(finder.lock)->zone();
4438     if (atomsZone->isGCMarking())
4439         finder.addEdgeTo(atomsZone);
4440 
4441     for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next())
4442         comp->findOutgoingEdges(finder);
4443 
4444     for (ZoneSet::Range r = gcZoneGroupEdges.all(); !r.empty(); r.popFront()) {
4445         if (r.front()->isGCMarking())
4446             finder.addEdgeTo(r.front());
4447     }
4448 
4449     Debugger::findZoneEdges(this, finder);
4450 }
4451 
4452 bool
4453 GCRuntime::findInterZoneEdges()
4454 {
4455     /*
4456      * Weakmaps which have keys with delegates in a different zone introduce the
4457      * need for zone edges from the delegate's zone to the weakmap zone.
4458      *
4459      * Since the edges point into and not away from the zone the weakmap is in
4460      * we must find these edges in advance and store them in a set on the Zone.
4461      * If we run out of memory, we fall back to sweeping everything in one
4462      * group.
4463      */
4464 
4465     for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
4466         if (!WeakMapBase::findInterZoneEdges(zone))
4467             return false;
4468     }
4469 
4470     return true;
4471 }
4472 
4473 void
4474 GCRuntime::findZoneGroups(AutoLockForExclusiveAccess& lock)
4475 {
4476 #ifdef DEBUG
4477     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next())
4478         MOZ_ASSERT(zone->gcZoneGroupEdges.empty());
4479 #endif
4480 
4481     JSContext* cx = rt->contextFromMainThread();
4482     ZoneComponentFinder finder(cx->nativeStackLimit[StackForSystemCode], lock);
4483     if (!isIncremental || !findInterZoneEdges())
4484         finder.useOneComponent();
4485 
4486     for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
4487         MOZ_ASSERT(zone->isGCMarking());
4488         finder.addNode(zone);
4489     }
4490     zoneGroups = finder.getResultsList();
4491     currentZoneGroup = zoneGroups;
4492     zoneGroupIndex = 0;
4493 
4494     for (GCZonesIter zone(rt); !zone.done(); zone.next())
4495         zone->gcZoneGroupEdges.clear();
4496 
4497 #ifdef DEBUG
4498     for (Zone* head = currentZoneGroup; head; head = head->nextGroup()) {
4499         for (Zone* zone = head; zone; zone = zone->nextNodeInGroup())
4500             MOZ_ASSERT(zone->isGCMarking());
4501     }
4502 
4503     MOZ_ASSERT_IF(!isIncremental, !currentZoneGroup->nextGroup());
4504     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next())
4505         MOZ_ASSERT(zone->gcZoneGroupEdges.empty());
4506 #endif
4507 }
4508 
4509 static void
4510 ResetGrayList(JSCompartment* comp);
4511 
4512 void
4513 GCRuntime::getNextZoneGroup()
4514 {
4515     currentZoneGroup = currentZoneGroup->nextGroup();
4516     ++zoneGroupIndex;
4517     if (!currentZoneGroup) {
4518         abortSweepAfterCurrentGroup = false;
4519         return;
4520     }
4521 
4522     for (Zone* zone = currentZoneGroup; zone; zone = zone->nextNodeInGroup()) {
4523         MOZ_ASSERT(zone->isGCMarking());
4524         MOZ_ASSERT(!zone->isQueuedForBackgroundSweep());
4525     }
4526 
4527     if (!isIncremental)
4528         ZoneComponentFinder::mergeGroups(currentZoneGroup);
4529 
4530     if (abortSweepAfterCurrentGroup) {
4531         MOZ_ASSERT(!isIncremental);
4532         for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
4533             MOZ_ASSERT(!zone->gcNextGraphComponent);
4534             MOZ_ASSERT(zone->isGCMarking());
4535             zone->setNeedsIncrementalBarrier(false, Zone::UpdateJit);
4536             zone->setGCState(Zone::NoGC);
4537             zone->gcGrayRoots.clearAndFree();
4538         }
4539 
4540         for (GCCompartmentGroupIter comp(rt); !comp.done(); comp.next())
4541             ResetGrayList(comp);
4542 
4543         abortSweepAfterCurrentGroup = false;
4544         currentZoneGroup = nullptr;
4545     }
4546 }
4547 
4548 /*
4549  * Gray marking:
4550  *
4551  * At the end of collection, anything reachable from a gray root that has not
4552  * otherwise been marked black must be marked gray.
4553  *
4554  * This means that when marking things gray we must not allow marking to leave
4555  * the current compartment group, as that could result in things being marked
4556  * grey when they might subsequently be marked black.  To achieve this, when we
4557  * find a cross compartment pointer we don't mark the referent but add it to a
4558  * singly-linked list of incoming gray pointers that is stored with each
4559  * compartment.
4560  *
4561  * The list head is stored in JSCompartment::gcIncomingGrayPointers and contains
4562  * cross compartment wrapper objects. The next pointer is stored in the second
4563  * extra slot of the cross compartment wrapper.
4564  *
4565  * The list is created during gray marking when one of the
4566  * MarkCrossCompartmentXXX functions is called for a pointer that leaves the
4567  * current compartent group.  This calls DelayCrossCompartmentGrayMarking to
4568  * push the referring object onto the list.
4569  *
4570  * The list is traversed and then unlinked in
4571  * MarkIncomingCrossCompartmentPointers.
4572  */
4573 
4574 static bool
4575 IsGrayListObject(JSObject* obj)
4576 {
4577     MOZ_ASSERT(obj);
4578     return obj->is<CrossCompartmentWrapperObject>() && !IsDeadProxyObject(obj);
4579 }
4580 
4581 /* static */ unsigned
4582 ProxyObject::grayLinkExtraSlot(JSObject* obj)
4583 {
4584     MOZ_ASSERT(IsGrayListObject(obj));
4585     return 1;
4586 }
4587 
4588 #ifdef DEBUG
4589 static void
4590 AssertNotOnGrayList(JSObject* obj)
4591 {
4592     MOZ_ASSERT_IF(IsGrayListObject(obj),
4593                   GetProxyExtra(obj, ProxyObject::grayLinkExtraSlot(obj)).isUndefined());
4594 }
4595 #endif
4596 
4597 static void
4598 AssertNoWrappersInGrayList(JSRuntime* rt)
4599 {
4600 #ifdef DEBUG
4601     for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
4602         MOZ_ASSERT(!c->gcIncomingGrayPointers);
4603         for (JSCompartment::WrapperEnum e(c); !e.empty(); e.popFront()) {
4604             if (!e.front().key().is<JSString*>())
4605                 AssertNotOnGrayList(&e.front().value().unbarrieredGet().toObject());
4606         }
4607     }
4608 #endif
4609 }
4610 
4611 static JSObject*
4612 CrossCompartmentPointerReferent(JSObject* obj)
4613 {
4614     MOZ_ASSERT(IsGrayListObject(obj));
4615     return &obj->as<ProxyObject>().private_().toObject();
4616 }
4617 
4618 static JSObject*
4619 NextIncomingCrossCompartmentPointer(JSObject* prev, bool unlink)
4620 {
4621     unsigned slot = ProxyObject::grayLinkExtraSlot(prev);
4622     JSObject* next = GetProxyExtra(prev, slot).toObjectOrNull();
4623     MOZ_ASSERT_IF(next, IsGrayListObject(next));
4624 
4625     if (unlink)
4626         SetProxyExtra(prev, slot, UndefinedValue());
4627 
4628     return next;
4629 }
4630 
4631 void
4632 js::DelayCrossCompartmentGrayMarking(JSObject* src)
4633 {
4634     MOZ_ASSERT(IsGrayListObject(src));
4635 
4636     /* Called from MarkCrossCompartmentXXX functions. */
4637     unsigned slot = ProxyObject::grayLinkExtraSlot(src);
4638     JSObject* dest = CrossCompartmentPointerReferent(src);
4639     JSCompartment* comp = dest->compartment();
4640 
4641     if (GetProxyExtra(src, slot).isUndefined()) {
4642         SetProxyExtra(src, slot, ObjectOrNullValue(comp->gcIncomingGrayPointers));
4643         comp->gcIncomingGrayPointers = src;
4644     } else {
4645         MOZ_ASSERT(GetProxyExtra(src, slot).isObjectOrNull());
4646     }
4647 
4648 #ifdef DEBUG
4649     /*
4650      * Assert that the object is in our list, also walking the list to check its
4651      * integrity.
4652      */
4653     JSObject* obj = comp->gcIncomingGrayPointers;
4654     bool found = false;
4655     while (obj) {
4656         if (obj == src)
4657             found = true;
4658         obj = NextIncomingCrossCompartmentPointer(obj, false);
4659     }
4660     MOZ_ASSERT(found);
4661 #endif
4662 }
4663 
4664 static void
4665 MarkIncomingCrossCompartmentPointers(JSRuntime* rt, const uint32_t color)
4666 {
4667     MOZ_ASSERT(color == BLACK || color == GRAY);
4668 
4669     static const gcstats::Phase statsPhases[] = {
4670         gcstats::PHASE_SWEEP_MARK_INCOMING_BLACK,
4671         gcstats::PHASE_SWEEP_MARK_INCOMING_GRAY
4672     };
4673     gcstats::AutoPhase ap1(rt->gc.stats, statsPhases[color]);
4674 
4675     bool unlinkList = color == GRAY;
4676 
4677     for (GCCompartmentGroupIter c(rt); !c.done(); c.next()) {
4678         MOZ_ASSERT_IF(color == GRAY, c->zone()->isGCMarkingGray());
4679         MOZ_ASSERT_IF(color == BLACK, c->zone()->isGCMarkingBlack());
4680         MOZ_ASSERT_IF(c->gcIncomingGrayPointers, IsGrayListObject(c->gcIncomingGrayPointers));
4681 
4682         for (JSObject* src = c->gcIncomingGrayPointers;
4683              src;
4684              src = NextIncomingCrossCompartmentPointer(src, unlinkList))
4685         {
4686             JSObject* dst = CrossCompartmentPointerReferent(src);
4687             MOZ_ASSERT(dst->compartment() == c);
4688 
4689             if (color == GRAY) {
4690                 if (IsMarkedUnbarriered(rt, &src) && src->asTenured().isMarked(GRAY))
4691                     TraceManuallyBarrieredEdge(&rt->gc.marker, &dst,
4692                                                "cross-compartment gray pointer");
4693             } else {
4694                 if (IsMarkedUnbarriered(rt, &src) && !src->asTenured().isMarked(GRAY))
4695                     TraceManuallyBarrieredEdge(&rt->gc.marker, &dst,
4696                                                "cross-compartment black pointer");
4697             }
4698         }
4699 
4700         if (unlinkList)
4701             c->gcIncomingGrayPointers = nullptr;
4702     }
4703 
4704     auto unlimited = SliceBudget::unlimited();
4705     MOZ_RELEASE_ASSERT(rt->gc.marker.drainMarkStack(unlimited));
4706 }
4707 
4708 static bool
4709 RemoveFromGrayList(JSObject* wrapper)
4710 {
4711     if (!IsGrayListObject(wrapper))
4712         return false;
4713 
4714     unsigned slot = ProxyObject::grayLinkExtraSlot(wrapper);
4715     if (GetProxyExtra(wrapper, slot).isUndefined())
4716         return false;  /* Not on our list. */
4717 
4718     JSObject* tail = GetProxyExtra(wrapper, slot).toObjectOrNull();
4719     SetProxyExtra(wrapper, slot, UndefinedValue());
4720 
4721     JSCompartment* comp = CrossCompartmentPointerReferent(wrapper)->compartment();
4722     JSObject* obj = comp->gcIncomingGrayPointers;
4723     if (obj == wrapper) {
4724         comp->gcIncomingGrayPointers = tail;
4725         return true;
4726     }
4727 
4728     while (obj) {
4729         unsigned slot = ProxyObject::grayLinkExtraSlot(obj);
4730         JSObject* next = GetProxyExtra(obj, slot).toObjectOrNull();
4731         if (next == wrapper) {
4732             SetProxyExtra(obj, slot, ObjectOrNullValue(tail));
4733             return true;
4734         }
4735         obj = next;
4736     }
4737 
4738     MOZ_CRASH("object not found in gray link list");
4739 }
4740 
4741 static void
4742 ResetGrayList(JSCompartment* comp)
4743 {
4744     JSObject* src = comp->gcIncomingGrayPointers;
4745     while (src)
4746         src = NextIncomingCrossCompartmentPointer(src, true);
4747     comp->gcIncomingGrayPointers = nullptr;
4748 }
4749 
4750 void
4751 js::NotifyGCNukeWrapper(JSObject* obj)
4752 {
4753     /*
4754      * References to target of wrapper are being removed, we no longer have to
4755      * remember to mark it.
4756      */
4757     RemoveFromGrayList(obj);
4758 }
4759 
4760 enum {
4761     JS_GC_SWAP_OBJECT_A_REMOVED = 1 << 0,
4762     JS_GC_SWAP_OBJECT_B_REMOVED = 1 << 1
4763 };
4764 
4765 unsigned
4766 js::NotifyGCPreSwap(JSObject* a, JSObject* b)
4767 {
4768     /*
4769      * Two objects in the same compartment are about to have had their contents
4770      * swapped.  If either of them are in our gray pointer list, then we remove
4771      * them from the lists, returning a bitset indicating what happened.
4772      */
4773     return (RemoveFromGrayList(a) ? JS_GC_SWAP_OBJECT_A_REMOVED : 0) |
4774            (RemoveFromGrayList(b) ? JS_GC_SWAP_OBJECT_B_REMOVED : 0);
4775 }
4776 
4777 void
4778 js::NotifyGCPostSwap(JSObject* a, JSObject* b, unsigned removedFlags)
4779 {
4780     /*
4781      * Two objects in the same compartment have had their contents swapped.  If
4782      * either of them were in our gray pointer list, we re-add them again.
4783      */
4784     if (removedFlags & JS_GC_SWAP_OBJECT_A_REMOVED)
4785         DelayCrossCompartmentGrayMarking(b);
4786     if (removedFlags & JS_GC_SWAP_OBJECT_B_REMOVED)
4787         DelayCrossCompartmentGrayMarking(a);
4788 }
4789 
4790 void
4791 GCRuntime::endMarkingZoneGroup()
4792 {
4793     gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_MARK);
4794 
4795     /*
4796      * Mark any incoming black pointers from previously swept compartments
4797      * whose referents are not marked. This can occur when gray cells become
4798      * black by the action of UnmarkGray.
4799      */
4800     MarkIncomingCrossCompartmentPointers(rt, BLACK);
4801     markWeakReferencesInCurrentGroup(gcstats::PHASE_SWEEP_MARK_WEAK);
4802 
4803     /*
4804      * Change state of current group to MarkGray to restrict marking to this
4805      * group.  Note that there may be pointers to the atoms compartment, and
4806      * these will be marked through, as they are not marked with
4807      * MarkCrossCompartmentXXX.
4808      */
4809     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
4810         MOZ_ASSERT(zone->isGCMarkingBlack());
4811         zone->setGCState(Zone::MarkGray);
4812     }
4813     marker.setMarkColorGray();
4814 
4815     /* Mark incoming gray pointers from previously swept compartments. */
4816     MarkIncomingCrossCompartmentPointers(rt, GRAY);
4817 
4818     /* Mark gray roots and mark transitively inside the current compartment group. */
4819     markGrayReferencesInCurrentGroup(gcstats::PHASE_SWEEP_MARK_GRAY);
4820     markWeakReferencesInCurrentGroup(gcstats::PHASE_SWEEP_MARK_GRAY_WEAK);
4821 
4822     /* Restore marking state. */
4823     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
4824         MOZ_ASSERT(zone->isGCMarkingGray());
4825         zone->setGCState(Zone::Mark);
4826     }
4827     MOZ_ASSERT(marker.isDrained());
4828     marker.setMarkColorBlack();
4829 }
4830 
4831 class GCSweepTask : public GCParallelTask
4832 {
4833     GCSweepTask(const GCSweepTask&) = delete;
4834 
4835   protected:
4836     JSRuntime* runtime;
4837 
4838   public:
4839     explicit GCSweepTask(JSRuntime* rt) : runtime(rt) {}
4840     GCSweepTask(GCSweepTask&& other)
4841       : GCParallelTask(mozilla::Move(other)),
4842         runtime(other.runtime)
4843     {}
4844 };
4845 
4846 // Causes the given WeakCache to be swept when run.
4847 class SweepWeakCacheTask : public GCSweepTask
4848 {
4849     JS::WeakCache<void*>& cache;
4850 
4851     SweepWeakCacheTask(const SweepWeakCacheTask&) = delete;
4852 
4853   public:
4854     SweepWeakCacheTask(JSRuntime* rt, JS::WeakCache<void*>& wc) : GCSweepTask(rt), cache(wc) {}
4855     SweepWeakCacheTask(SweepWeakCacheTask&& other)
4856       : GCSweepTask(mozilla::Move(other)), cache(other.cache)
4857     {}
4858 
4859     void run() override {
4860         cache.sweep();
4861     }
4862 };
4863 
4864 #define MAKE_GC_SWEEP_TASK(name)                                              \
4865     class name : public GCSweepTask {                                         \
4866         void run() override;                                                  \
4867       public:                                                                 \
4868         explicit name (JSRuntime* rt) : GCSweepTask(rt) {}                    \
4869     }
4870 MAKE_GC_SWEEP_TASK(SweepAtomsTask);
4871 MAKE_GC_SWEEP_TASK(SweepCCWrappersTask);
4872 MAKE_GC_SWEEP_TASK(SweepBaseShapesTask);
4873 MAKE_GC_SWEEP_TASK(SweepInitialShapesTask);
4874 MAKE_GC_SWEEP_TASK(SweepObjectGroupsTask);
4875 MAKE_GC_SWEEP_TASK(SweepRegExpsTask);
4876 MAKE_GC_SWEEP_TASK(SweepMiscTask);
4877 #undef MAKE_GC_SWEEP_TASK
4878 
4879 /* virtual */ void
4880 SweepAtomsTask::run()
4881 {
4882     runtime->sweepAtoms();
4883     for (CompartmentsIter comp(runtime, SkipAtoms); !comp.done(); comp.next())
4884         comp->sweepVarNames();
4885 }
4886 
4887 /* virtual */ void
4888 SweepCCWrappersTask::run()
4889 {
4890     for (GCCompartmentGroupIter c(runtime); !c.done(); c.next())
4891         c->sweepCrossCompartmentWrappers();
4892 }
4893 
4894 /* virtual */ void
4895 SweepObjectGroupsTask::run()
4896 {
4897     for (GCCompartmentGroupIter c(runtime); !c.done(); c.next())
4898         c->objectGroups.sweep(runtime->defaultFreeOp());
4899 }
4900 
4901 /* virtual */ void
4902 SweepRegExpsTask::run()
4903 {
4904     for (GCCompartmentGroupIter c(runtime); !c.done(); c.next())
4905         c->sweepRegExps();
4906 }
4907 
4908 /* virtual */ void
4909 SweepMiscTask::run()
4910 {
4911     for (GCCompartmentGroupIter c(runtime); !c.done(); c.next()) {
4912         c->sweepSavedStacks();
4913         c->sweepSelfHostingScriptSource();
4914         c->sweepNativeIterators();
4915     }
4916 }
4917 
4918 void
4919 GCRuntime::startTask(GCParallelTask& task, gcstats::Phase phase, AutoLockHelperThreadState& locked)
4920 {
4921     if (!task.startWithLockHeld(locked)) {
4922         AutoUnlockHelperThreadState unlock(locked);
4923         gcstats::AutoPhase ap(stats, phase);
4924         task.runFromMainThread(rt);
4925     }
4926 }
4927 
4928 void
4929 GCRuntime::joinTask(GCParallelTask& task, gcstats::Phase phase, AutoLockHelperThreadState& locked)
4930 {
4931     gcstats::AutoPhase ap(stats, task, phase);
4932     task.joinWithLockHeld(locked);
4933 }
4934 
4935 using WeakCacheTaskVector = mozilla::Vector<SweepWeakCacheTask, 0, SystemAllocPolicy>;
4936 
4937 static void
4938 SweepWeakCachesFromMainThread(JSRuntime* rt)
4939 {
4940     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
4941         for (JS::WeakCache<void*>* cache : zone->weakCaches_) {
4942             SweepWeakCacheTask task(rt, *cache);
4943             task.runFromMainThread(rt);
4944         }
4945     }
4946 }
4947 
4948 static WeakCacheTaskVector
4949 PrepareWeakCacheTasks(JSRuntime* rt)
4950 {
4951     WeakCacheTaskVector out;
4952     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
4953         for (JS::WeakCache<void*>* cache : zone->weakCaches_) {
4954             if (!out.append(SweepWeakCacheTask(rt, *cache))) {
4955                 SweepWeakCachesFromMainThread(rt);
4956                 return WeakCacheTaskVector();
4957             }
4958         }
4959     }
4960     return out;
4961 }
4962 
4963 void
4964 GCRuntime::beginSweepingZoneGroup(AutoLockForExclusiveAccess& lock)
4965 {
4966     /*
4967      * Begin sweeping the group of zones in gcCurrentZoneGroup,
4968      * performing actions that must be done before yielding to caller.
4969      */
4970 
4971     bool sweepingAtoms = false;
4972     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
4973         /* Set the GC state to sweeping. */
4974         MOZ_ASSERT(zone->isGCMarking());
4975         zone->setGCState(Zone::Sweep);
4976 
4977         /* Purge the ArenaLists before sweeping. */
4978         zone->arenas.purge();
4979 
4980         if (zone->isAtomsZone())
4981             sweepingAtoms = true;
4982 
4983         if (rt->sweepZoneCallback)
4984             rt->sweepZoneCallback(zone);
4985 
4986 #ifdef DEBUG
4987         zone->gcLastZoneGroupIndex = zoneGroupIndex;
4988 #endif
4989     }
4990 
4991     validateIncrementalMarking();
4992 
4993     FreeOp fop(rt);
4994     SweepAtomsTask sweepAtomsTask(rt);
4995     SweepCCWrappersTask sweepCCWrappersTask(rt);
4996     SweepObjectGroupsTask sweepObjectGroupsTask(rt);
4997     SweepRegExpsTask sweepRegExpsTask(rt);
4998     SweepMiscTask sweepMiscTask(rt);
4999     WeakCacheTaskVector sweepCacheTasks = PrepareWeakCacheTasks(rt);
5000 
5001     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
5002         /* Clear all weakrefs that point to unmarked things. */
5003         for (auto edge : zone->gcWeakRefs) {
5004             /* Edges may be present multiple times, so may already be nulled. */
5005             if (*edge && IsAboutToBeFinalizedDuringSweep(**edge))
5006                 *edge = nullptr;
5007         }
5008         zone->gcWeakRefs.clear();
5009 
5010         /* No need to look up any more weakmap keys from this zone group. */
5011         AutoEnterOOMUnsafeRegion oomUnsafe;
5012         if (!zone->gcWeakKeys.clear())
5013             oomUnsafe.crash("clearing weak keys in beginSweepingZoneGroup()");
5014     }
5015 
5016     {
5017         gcstats::AutoPhase ap(stats, gcstats::PHASE_FINALIZE_START);
5018         callFinalizeCallbacks(&fop, JSFINALIZE_GROUP_START);
5019         {
5020             gcstats::AutoPhase ap2(stats, gcstats::PHASE_WEAK_ZONEGROUP_CALLBACK);
5021             callWeakPointerZoneGroupCallbacks();
5022         }
5023         {
5024             gcstats::AutoPhase ap2(stats, gcstats::PHASE_WEAK_COMPARTMENT_CALLBACK);
5025             for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
5026                 for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
5027                     callWeakPointerCompartmentCallbacks(comp);
5028             }
5029         }
5030     }
5031 
5032     if (sweepingAtoms) {
5033         AutoLockHelperThreadState helperLock;
5034         startTask(sweepAtomsTask, gcstats::PHASE_SWEEP_ATOMS, helperLock);
5035     }
5036 
5037     {
5038         gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_COMPARTMENTS);
5039         gcstats::AutoSCC scc(stats, zoneGroupIndex);
5040 
5041         {
5042             AutoLockHelperThreadState helperLock;
5043             startTask(sweepCCWrappersTask, gcstats::PHASE_SWEEP_CC_WRAPPER, helperLock);
5044             startTask(sweepObjectGroupsTask, gcstats::PHASE_SWEEP_TYPE_OBJECT, helperLock);
5045             startTask(sweepRegExpsTask, gcstats::PHASE_SWEEP_REGEXP, helperLock);
5046             startTask(sweepMiscTask, gcstats::PHASE_SWEEP_MISC, helperLock);
5047             for (auto& task : sweepCacheTasks)
5048                 startTask(task, gcstats::PHASE_SWEEP_MISC, helperLock);
5049         }
5050 
5051         // The remainder of the of the tasks run in parallel on the main
5052         // thread until we join, below.
5053         {
5054             gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_MISC);
5055 
5056             // Cancel any active or pending off thread compilations.
5057             js::CancelOffThreadIonCompile(rt, JS::Zone::Sweep);
5058 
5059             for (GCCompartmentGroupIter c(rt); !c.done(); c.next()) {
5060                 c->sweepGlobalObject(&fop);
5061                 c->sweepDebugEnvironments();
5062                 c->sweepJitCompartment(&fop);
5063                 c->sweepTemplateObjects();
5064             }
5065 
5066             for (GCZoneGroupIter zone(rt); !zone.done(); zone.next())
5067                 zone->sweepWeakMaps();
5068 
5069             // Bug 1071218: the following two methods have not yet been
5070             // refactored to work on a single zone-group at once.
5071 
5072             // Collect watch points associated with unreachable objects.
5073             WatchpointMap::sweepAll(rt);
5074 
5075             // Detach unreachable debuggers and global objects from each other.
5076             Debugger::sweepAll(&fop);
5077 
5078             // Sweep entries containing about-to-be-finalized JitCode and
5079             // update relocated TypeSet::Types inside the JitcodeGlobalTable.
5080             jit::JitRuntime::SweepJitcodeGlobalTable(rt);
5081         }
5082 
5083         {
5084             gcstats::AutoPhase apdc(stats, gcstats::PHASE_SWEEP_DISCARD_CODE);
5085             for (GCZoneGroupIter zone(rt); !zone.done(); zone.next())
5086                 zone->discardJitCode(&fop);
5087         }
5088 
5089         {
5090             gcstats::AutoPhase ap1(stats, gcstats::PHASE_SWEEP_TYPES);
5091             gcstats::AutoPhase ap2(stats, gcstats::PHASE_SWEEP_TYPES_BEGIN);
5092             for (GCZoneGroupIter zone(rt); !zone.done(); zone.next())
5093                 zone->beginSweepTypes(&fop, releaseObservedTypes && !zone->isPreservingCode());
5094         }
5095 
5096         {
5097             gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_BREAKPOINT);
5098             for (GCZoneGroupIter zone(rt); !zone.done(); zone.next())
5099                 zone->sweepBreakpoints(&fop);
5100         }
5101 
5102         {
5103             gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_BREAKPOINT);
5104             for (GCZoneGroupIter zone(rt); !zone.done(); zone.next())
5105                 zone->sweepUniqueIds(&fop);
5106         }
5107     }
5108 
5109     if (sweepingAtoms) {
5110         gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_SYMBOL_REGISTRY);
5111         rt->symbolRegistry(lock).sweep();
5112     }
5113 
5114     // Rejoin our off-main-thread tasks.
5115     if (sweepingAtoms) {
5116         AutoLockHelperThreadState helperLock;
5117         joinTask(sweepAtomsTask, gcstats::PHASE_SWEEP_ATOMS, helperLock);
5118     }
5119 
5120     {
5121         gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_COMPARTMENTS);
5122         gcstats::AutoSCC scc(stats, zoneGroupIndex);
5123 
5124         AutoLockHelperThreadState helperLock;
5125         joinTask(sweepCCWrappersTask, gcstats::PHASE_SWEEP_CC_WRAPPER, helperLock);
5126         joinTask(sweepObjectGroupsTask, gcstats::PHASE_SWEEP_TYPE_OBJECT, helperLock);
5127         joinTask(sweepRegExpsTask, gcstats::PHASE_SWEEP_REGEXP, helperLock);
5128         joinTask(sweepMiscTask, gcstats::PHASE_SWEEP_MISC, helperLock);
5129         for (auto& task : sweepCacheTasks)
5130             joinTask(task, gcstats::PHASE_SWEEP_MISC, helperLock);
5131     }
5132 
5133     /*
5134      * Queue all GC things in all zones for sweeping, either in the
5135      * foreground or on the background thread.
5136      *
5137      * Note that order is important here for the background case.
5138      *
5139      * Objects are finalized immediately but this may change in the future.
5140      */
5141 
5142     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
5143         gcstats::AutoSCC scc(stats, zoneGroupIndex);
5144         zone->arenas.queueForegroundObjectsForSweep(&fop);
5145     }
5146     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
5147         gcstats::AutoSCC scc(stats, zoneGroupIndex);
5148         for (unsigned i = 0; i < ArrayLength(IncrementalFinalizePhases); ++i)
5149             zone->arenas.queueForForegroundSweep(&fop, IncrementalFinalizePhases[i]);
5150     }
5151     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
5152         gcstats::AutoSCC scc(stats, zoneGroupIndex);
5153         for (unsigned i = 0; i < ArrayLength(BackgroundFinalizePhases); ++i)
5154             zone->arenas.queueForBackgroundSweep(&fop, BackgroundFinalizePhases[i]);
5155     }
5156     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
5157         gcstats::AutoSCC scc(stats, zoneGroupIndex);
5158         zone->arenas.queueForegroundThingsForSweep(&fop);
5159     }
5160 
5161     sweepingTypes = true;
5162 
5163     finalizePhase = 0;
5164     sweepZone = currentZoneGroup;
5165     sweepKind = AllocKind::FIRST;
5166 
5167     {
5168         gcstats::AutoPhase ap(stats, gcstats::PHASE_FINALIZE_END);
5169         callFinalizeCallbacks(&fop, JSFINALIZE_GROUP_END);
5170     }
5171 }
5172 
5173 void
5174 GCRuntime::endSweepingZoneGroup()
5175 {
5176     /* Update the GC state for zones we have swept. */
5177     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
5178         MOZ_ASSERT(zone->isGCSweeping());
5179         AutoLockGC lock(rt);
5180         zone->setGCState(Zone::Finished);
5181         zone->threshold.updateAfterGC(zone->usage.gcBytes(), invocationKind, tunables,
5182                                       schedulingState, lock);
5183     }
5184 
5185     /* Start background thread to sweep zones if required. */
5186     ZoneList zones;
5187     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next())
5188         zones.append(zone);
5189     if (sweepOnBackgroundThread)
5190         queueZonesForBackgroundSweep(zones);
5191     else
5192         sweepBackgroundThings(zones, blocksToFreeAfterSweeping);
5193 
5194     /* Reset the list of arenas marked as being allocated during sweep phase. */
5195     while (Arena* arena = arenasAllocatedDuringSweep) {
5196         arenasAllocatedDuringSweep = arena->getNextAllocDuringSweep();
5197         arena->unsetAllocDuringSweep();
5198     }
5199 }
5200 
5201 void
5202 GCRuntime::beginSweepPhase(bool destroyingRuntime, AutoLockForExclusiveAccess& lock)
5203 {
5204     /*
5205      * Sweep phase.
5206      *
5207      * Finalize as we sweep, outside of lock but with rt->isHeapBusy()
5208      * true so that any attempt to allocate a GC-thing from a finalizer will
5209      * fail, rather than nest badly and leave the unmarked newborn to be swept.
5210      */
5211 
5212     MOZ_ASSERT(!abortSweepAfterCurrentGroup);
5213 
5214     AutoSetThreadIsSweeping threadIsSweeping;
5215 
5216     releaseHeldRelocatedArenas();
5217 
5218     computeNonIncrementalMarkingForValidation(lock);
5219 
5220     gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP);
5221 
5222     sweepOnBackgroundThread =
5223         !destroyingRuntime && !TraceEnabled() && CanUseExtraThreads();
5224 
5225     releaseObservedTypes = shouldReleaseObservedTypes();
5226 
5227     AssertNoWrappersInGrayList(rt);
5228     DropStringWrappers(rt);
5229 
5230     findZoneGroups(lock);
5231     endMarkingZoneGroup();
5232     beginSweepingZoneGroup(lock);
5233 }
5234 
5235 bool
5236 ArenaLists::foregroundFinalize(FreeOp* fop, AllocKind thingKind, SliceBudget& sliceBudget,
5237                                SortedArenaList& sweepList)
5238 {
5239     if (!arenaListsToSweep[thingKind] && incrementalSweptArenas.isEmpty())
5240         return true;
5241 
5242     if (!FinalizeArenas(fop, &arenaListsToSweep[thingKind], sweepList,
5243                         thingKind, sliceBudget, RELEASE_ARENAS))
5244     {
5245         incrementalSweptArenaKind = thingKind;
5246         incrementalSweptArenas = sweepList.toArenaList();
5247         return false;
5248     }
5249 
5250     // Clear any previous incremental sweep state we may have saved.
5251     incrementalSweptArenas.clear();
5252 
5253     // Join |arenaLists[thingKind]| and |sweepList| into a single list.
5254     ArenaList finalized = sweepList.toArenaList();
5255     arenaLists[thingKind] =
5256         finalized.insertListWithCursorAtEnd(arenaLists[thingKind]);
5257 
5258     return true;
5259 }
5260 
5261 GCRuntime::IncrementalProgress
5262 GCRuntime::drainMarkStack(SliceBudget& sliceBudget, gcstats::Phase phase)
5263 {
5264     /* Run a marking slice and return whether the stack is now empty. */
5265     gcstats::AutoPhase ap(stats, phase);
5266     return marker.drainMarkStack(sliceBudget) ? Finished : NotFinished;
5267 }
5268 
5269 static void
5270 SweepThing(Shape* shape)
5271 {
5272     if (!shape->isMarked())
5273         shape->sweep();
5274 }
5275 
5276 static void
5277 SweepThing(JSScript* script, AutoClearTypeInferenceStateOnOOM* oom)
5278 {
5279     script->maybeSweepTypes(oom);
5280 }
5281 
5282 static void
5283 SweepThing(ObjectGroup* group, AutoClearTypeInferenceStateOnOOM* oom)
5284 {
5285     group->maybeSweep(oom);
5286 }
5287 
5288 template <typename T, typename... Args>
5289 static bool
5290 SweepArenaList(Arena** arenasToSweep, SliceBudget& sliceBudget, Args... args)
5291 {
5292     while (Arena* arena = *arenasToSweep) {
5293         for (ArenaCellIterUnderGC i(arena); !i.done(); i.next())
5294             SweepThing(i.get<T>(), args...);
5295 
5296         *arenasToSweep = (*arenasToSweep)->next;
5297         AllocKind kind = MapTypeToFinalizeKind<T>::kind;
5298         sliceBudget.step(Arena::thingsPerArena(kind));
5299         if (sliceBudget.isOverBudget())
5300             return false;
5301     }
5302 
5303     return true;
5304 }
5305 
5306 GCRuntime::IncrementalProgress
5307 GCRuntime::sweepPhase(SliceBudget& sliceBudget, AutoLockForExclusiveAccess& lock)
5308 {
5309     AutoSetThreadIsSweeping threadIsSweeping;
5310 
5311     gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP);
5312     FreeOp fop(rt);
5313 
5314     if (drainMarkStack(sliceBudget, gcstats::PHASE_SWEEP_MARK) == NotFinished)
5315         return NotFinished;
5316 
5317 
5318     for (;;) {
5319         // Sweep dead type information stored in scripts and object groups, but
5320         // don't finalize them yet. We have to sweep dead information from both
5321         // live and dead scripts and object groups, so that no dead references
5322         // remain in them. Type inference can end up crawling these zones
5323         // again, such as for TypeCompartment::markSetsUnknown, and if this
5324         // happens after sweeping for the zone group finishes we won't be able
5325         // to determine which things in the zone are live.
5326         if (sweepingTypes) {
5327             gcstats::AutoPhase ap1(stats, gcstats::PHASE_SWEEP_COMPARTMENTS);
5328             gcstats::AutoPhase ap2(stats, gcstats::PHASE_SWEEP_TYPES);
5329 
5330             for (; sweepZone; sweepZone = sweepZone->nextNodeInGroup()) {
5331                 ArenaLists& al = sweepZone->arenas;
5332 
5333                 AutoClearTypeInferenceStateOnOOM oom(sweepZone);
5334 
5335                 if (!SweepArenaList<JSScript>(&al.gcScriptArenasToUpdate, sliceBudget, &oom))
5336                     return NotFinished;
5337 
5338                 if (!SweepArenaList<ObjectGroup>(
5339                         &al.gcObjectGroupArenasToUpdate, sliceBudget, &oom))
5340                 {
5341                     return NotFinished;
5342                 }
5343 
5344                 // Finish sweeping type information in the zone.
5345                 {
5346                     gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_TYPES_END);
5347                     sweepZone->types.endSweep(rt);
5348                 }
5349 
5350                 // Foreground finalized objects have already been finalized,
5351                 // and now their arenas can be reclaimed by freeing empty ones
5352                 // and making non-empty ones available for allocation.
5353                 al.mergeForegroundSweptObjectArenas();
5354             }
5355 
5356             sweepZone = currentZoneGroup;
5357             sweepingTypes = false;
5358         }
5359 
5360         /* Finalize foreground finalized things. */
5361         for (; finalizePhase < ArrayLength(IncrementalFinalizePhases) ; ++finalizePhase) {
5362             gcstats::AutoPhase ap(stats, IncrementalFinalizePhases[finalizePhase].statsPhase);
5363 
5364             for (; sweepZone; sweepZone = sweepZone->nextNodeInGroup()) {
5365                 Zone* zone = sweepZone;
5366 
5367                 for (auto kind : SomeAllocKinds(sweepKind, AllocKind::LIMIT)) {
5368                     if (!IncrementalFinalizePhases[finalizePhase].kinds.contains(kind))
5369                         continue;
5370 
5371                     /* Set the number of things per arena for this AllocKind. */
5372                     size_t thingsPerArena = Arena::thingsPerArena(kind);
5373                     incrementalSweepList.setThingsPerArena(thingsPerArena);
5374 
5375                     if (!zone->arenas.foregroundFinalize(&fop, kind, sliceBudget,
5376                                                          incrementalSweepList))
5377                     {
5378                         sweepKind = kind;
5379                         return NotFinished;
5380                     }
5381 
5382                     /* Reset the slots of the sweep list that we used. */
5383                     incrementalSweepList.reset(thingsPerArena);
5384                 }
5385                 sweepKind = AllocKind::FIRST;
5386             }
5387             sweepZone = currentZoneGroup;
5388         }
5389 
5390         /* Remove dead shapes from the shape tree, but don't finalize them yet. */
5391         {
5392             gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_SHAPE);
5393 
5394             for (; sweepZone; sweepZone = sweepZone->nextNodeInGroup()) {
5395                 ArenaLists& al = sweepZone->arenas;
5396 
5397                 if (!SweepArenaList<Shape>(&al.gcShapeArenasToUpdate, sliceBudget))
5398                     return NotFinished;
5399 
5400                 if (!SweepArenaList<AccessorShape>(&al.gcAccessorShapeArenasToUpdate, sliceBudget))
5401                     return NotFinished;
5402             }
5403         }
5404 
5405         endSweepingZoneGroup();
5406         getNextZoneGroup();
5407         if (!currentZoneGroup)
5408             return Finished;
5409 
5410         endMarkingZoneGroup();
5411         beginSweepingZoneGroup(lock);
5412     }
5413 }
5414 
5415 void
5416 GCRuntime::endSweepPhase(bool destroyingRuntime, AutoLockForExclusiveAccess& lock)
5417 {
5418     AutoSetThreadIsSweeping threadIsSweeping;
5419 
5420     gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP);
5421     FreeOp fop(rt);
5422 
5423     MOZ_ASSERT_IF(destroyingRuntime, !sweepOnBackgroundThread);
5424 
5425     /*
5426      * Recalculate whether GC was full or not as this may have changed due to
5427      * newly created zones.  Can only change from full to not full.
5428      */
5429     if (isFull) {
5430         for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
5431             if (!zone->isCollecting()) {
5432                 isFull = false;
5433                 break;
5434             }
5435         }
5436     }
5437 
5438     {
5439         gcstats::AutoPhase ap(stats, gcstats::PHASE_DESTROY);
5440 
5441         /*
5442          * Sweep script filenames after sweeping functions in the generic loop
5443          * above. In this way when a scripted function's finalizer destroys the
5444          * script and calls rt->destroyScriptHook, the hook can still access the
5445          * script's filename. See bug 323267.
5446          */
5447         SweepScriptData(rt, lock);
5448 
5449         /* Clear out any small pools that we're hanging on to. */
5450         if (jit::JitRuntime* jitRuntime = rt->jitRuntime()) {
5451             jitRuntime->execAlloc().purge();
5452             jitRuntime->backedgeExecAlloc().purge();
5453         }
5454     }
5455 
5456     {
5457         gcstats::AutoPhase ap(stats, gcstats::PHASE_FINALIZE_END);
5458         callFinalizeCallbacks(&fop, JSFINALIZE_COLLECTION_END);
5459 
5460         /* If we finished a full GC, then the gray bits are correct. */
5461         if (isFull)
5462             rt->setGCGrayBitsValid(true);
5463     }
5464 
5465     finishMarkingValidation();
5466 
5467 #ifdef DEBUG
5468     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
5469         for (auto i : AllAllocKinds()) {
5470             MOZ_ASSERT_IF(!IsBackgroundFinalized(i) ||
5471                           !sweepOnBackgroundThread,
5472                           !zone->arenas.arenaListsToSweep[i]);
5473         }
5474     }
5475 #endif
5476 
5477     AssertNoWrappersInGrayList(rt);
5478 }
5479 
5480 void
5481 GCRuntime::beginCompactPhase()
5482 {
5483     MOZ_ASSERT(!isBackgroundSweeping());
5484 
5485     gcstats::AutoPhase ap(stats, gcstats::PHASE_COMPACT);
5486 
5487     MOZ_ASSERT(zonesToMaybeCompact.isEmpty());
5488     for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
5489         if (CanRelocateZone(zone))
5490             zonesToMaybeCompact.append(zone);
5491     }
5492 
5493     MOZ_ASSERT(!relocatedArenasToRelease);
5494     startedCompacting = true;
5495 }
5496 
5497 GCRuntime::IncrementalProgress
5498 GCRuntime::compactPhase(JS::gcreason::Reason reason, SliceBudget& sliceBudget,
5499                         AutoLockForExclusiveAccess& lock)
5500 {
5501     MOZ_ASSERT(rt->gc.nursery.isEmpty());
5502     assertBackgroundSweepingFinished();
5503     MOZ_ASSERT(startedCompacting);
5504 
5505     gcstats::AutoPhase ap(stats, gcstats::PHASE_COMPACT);
5506 
5507     Arena* relocatedArenas = nullptr;
5508     while (!zonesToMaybeCompact.isEmpty()) {
5509         // TODO: JSScripts can move. If the sampler interrupts the GC in the
5510         // middle of relocating an arena, invalid JSScript pointers may be
5511         // accessed. Suppress all sampling until a finer-grained solution can be
5512         // found. See bug 1295775.
5513         AutoSuppressProfilerSampling suppressSampling(rt);
5514 
5515         Zone* zone = zonesToMaybeCompact.front();
5516         MOZ_ASSERT(zone->isGCFinished());
5517         zone->setGCState(Zone::Compact);
5518         if (relocateArenas(zone, reason, relocatedArenas, sliceBudget))
5519             updatePointersToRelocatedCells(zone, lock);
5520         zone->setGCState(Zone::Finished);
5521         zonesToMaybeCompact.removeFront();
5522         if (sliceBudget.isOverBudget())
5523             break;
5524     }
5525 
5526     if (ShouldProtectRelocatedArenas(reason))
5527         protectAndHoldArenas(relocatedArenas);
5528     else
5529         releaseRelocatedArenas(relocatedArenas);
5530 
5531     // Clear caches that can contain cell pointers.
5532     JSContext* cx = rt->contextFromMainThread();
5533     cx->caches.newObjectCache.purge();
5534     cx->caches.nativeIterCache.purge();
5535     if (cx->caches.evalCache.initialized())
5536         cx->caches.evalCache.clear();
5537 
5538 #ifdef DEBUG
5539     CheckHashTablesAfterMovingGC(rt);
5540 #endif
5541 
5542     return zonesToMaybeCompact.isEmpty() ? Finished : NotFinished;
5543 }
5544 
5545 void
5546 GCRuntime::endCompactPhase(JS::gcreason::Reason reason)
5547 {
5548     startedCompacting = false;
5549 }
5550 
5551 void
5552 GCRuntime::finishCollection(JS::gcreason::Reason reason)
5553 {
5554     assertBackgroundSweepingFinished();
5555     MOZ_ASSERT(marker.isDrained());
5556     marker.stop();
5557     clearBufferedGrayRoots();
5558     MemProfiler::SweepTenured(rt);
5559 
5560     uint64_t currentTime = PRMJ_Now();
5561     schedulingState.updateHighFrequencyMode(lastGCTime, currentTime, tunables);
5562 
5563     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
5564         if (zone->isCollecting()) {
5565             MOZ_ASSERT(zone->isGCFinished());
5566             zone->setGCState(Zone::NoGC);
5567             zone->active = false;
5568         }
5569 
5570         MOZ_ASSERT(!zone->isCollecting());
5571         MOZ_ASSERT(!zone->wasGCStarted());
5572     }
5573 
5574     MOZ_ASSERT(zonesToMaybeCompact.isEmpty());
5575 
5576     lastGCTime = currentTime;
5577 }
5578 
5579 static const char*
5580 HeapStateToLabel(JS::HeapState heapState)
5581 {
5582     switch (heapState) {
5583       case JS::HeapState::MinorCollecting:
5584         return "js::Nursery::collect";
5585       case JS::HeapState::MajorCollecting:
5586         return "js::GCRuntime::collect";
5587       case JS::HeapState::Tracing:
5588         return "JS_IterateCompartments";
5589       case JS::HeapState::Idle:
5590       case JS::HeapState::CycleCollecting:
5591         MOZ_CRASH("Should never have an Idle or CC heap state when pushing GC pseudo frames!");
5592     }
5593     MOZ_ASSERT_UNREACHABLE("Should have exhausted every JS::HeapState variant!");
5594     return nullptr;
5595 }
5596 
5597 /* Start a new heap session. */
5598 AutoTraceSession::AutoTraceSession(JSRuntime* rt, JS::HeapState heapState)
5599   : lock(rt),
5600     runtime(rt),
5601     prevState(rt->heapState()),
5602     pseudoFrame(rt, HeapStateToLabel(heapState), ProfileEntry::Category::GC)
5603 {
5604     MOZ_ASSERT(prevState == JS::HeapState::Idle);
5605     MOZ_ASSERT(heapState != JS::HeapState::Idle);
5606     MOZ_ASSERT_IF(heapState == JS::HeapState::MajorCollecting, rt->gc.nursery.isEmpty());
5607     rt->setHeapState(heapState);
5608 }
5609 
5610 AutoTraceSession::~AutoTraceSession()
5611 {
5612     MOZ_ASSERT(runtime->isHeapBusy());
5613     runtime->setHeapState(prevState);
5614 }
5615 
5616 void
5617 GCRuntime::resetIncrementalGC(gc::AbortReason reason, AutoLockForExclusiveAccess& lock)
5618 {
5619     MOZ_ASSERT(reason != gc::AbortReason::None);
5620 
5621     switch (incrementalState) {
5622       case State::NotActive:
5623         return;
5624 
5625       case State::MarkRoots:
5626         MOZ_CRASH("resetIncrementalGC did not expect MarkRoots state");
5627         break;
5628 
5629       case State::Mark: {
5630         /* Cancel any ongoing marking. */
5631         marker.reset();
5632         marker.stop();
5633         clearBufferedGrayRoots();
5634 
5635         for (GCCompartmentsIter c(rt); !c.done(); c.next())
5636             ResetGrayList(c);
5637 
5638         for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
5639             MOZ_ASSERT(zone->isGCMarking());
5640             zone->setNeedsIncrementalBarrier(false, Zone::UpdateJit);
5641             zone->setGCState(Zone::NoGC);
5642         }
5643 
5644         blocksToFreeAfterSweeping.freeAll();
5645 
5646         incrementalState = State::NotActive;
5647 
5648         MOZ_ASSERT(!marker.shouldCheckCompartments());
5649 
5650         break;
5651       }
5652 
5653       case State::Sweep: {
5654         marker.reset();
5655 
5656         for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next())
5657             c->scheduledForDestruction = false;
5658 
5659         /* Finish sweeping the current zone group, then abort. */
5660         abortSweepAfterCurrentGroup = true;
5661 
5662         /* Don't perform any compaction after sweeping. */
5663         bool wasCompacting = isCompacting;
5664         isCompacting = false;
5665 
5666         auto unlimited = SliceBudget::unlimited();
5667         incrementalCollectSlice(unlimited, JS::gcreason::RESET, lock);
5668 
5669         isCompacting = wasCompacting;
5670 
5671         {
5672             gcstats::AutoPhase ap(stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
5673             rt->gc.waitBackgroundSweepOrAllocEnd();
5674         }
5675         break;
5676       }
5677 
5678       case State::Finalize: {
5679         {
5680             gcstats::AutoPhase ap(stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
5681             rt->gc.waitBackgroundSweepOrAllocEnd();
5682         }
5683 
5684         bool wasCompacting = isCompacting;
5685         isCompacting = false;
5686 
5687         auto unlimited = SliceBudget::unlimited();
5688         incrementalCollectSlice(unlimited, JS::gcreason::RESET, lock);
5689 
5690         isCompacting = wasCompacting;
5691 
5692         break;
5693       }
5694 
5695       case State::Compact: {
5696         bool wasCompacting = isCompacting;
5697 
5698         isCompacting = true;
5699         startedCompacting = true;
5700         zonesToMaybeCompact.clear();
5701 
5702         auto unlimited = SliceBudget::unlimited();
5703         incrementalCollectSlice(unlimited, JS::gcreason::RESET, lock);
5704 
5705         isCompacting = wasCompacting;
5706         break;
5707       }
5708 
5709       case State::Decommit: {
5710         auto unlimited = SliceBudget::unlimited();
5711         incrementalCollectSlice(unlimited, JS::gcreason::RESET, lock);
5712         break;
5713       }
5714     }
5715 
5716     stats.reset(reason);
5717 
5718 #ifdef DEBUG
5719     assertBackgroundSweepingFinished();
5720     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
5721         MOZ_ASSERT(!zone->isCollecting());
5722         MOZ_ASSERT(!zone->needsIncrementalBarrier());
5723         MOZ_ASSERT(!zone->isOnList());
5724     }
5725     MOZ_ASSERT(zonesToMaybeCompact.isEmpty());
5726     MOZ_ASSERT(incrementalState == State::NotActive);
5727 #endif
5728 }
5729 
5730 namespace {
5731 
5732 class AutoGCSlice {
5733   public:
5734     explicit AutoGCSlice(JSRuntime* rt);
5735     ~AutoGCSlice();
5736 
5737   private:
5738     JSRuntime* runtime;
5739     AutoSetThreadIsPerformingGC performingGC;
5740 };
5741 
5742 } /* anonymous namespace */
5743 
5744 AutoGCSlice::AutoGCSlice(JSRuntime* rt)
5745   : runtime(rt)
5746 {
5747     /*
5748      * During incremental GC, the compartment's active flag determines whether
5749      * there are stack frames active for any of its scripts. Normally this flag
5750      * is set at the beginning of the mark phase. During incremental GC, we also
5751      * set it at the start of every phase.
5752      */
5753     for (ActivationIterator iter(rt); !iter.done(); ++iter)
5754         iter->compartment()->zone()->active = true;
5755 
5756     for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
5757         /*
5758          * Clear needsIncrementalBarrier early so we don't do any write
5759          * barriers during GC. We don't need to update the Ion barriers (which
5760          * is expensive) because Ion code doesn't run during GC. If need be,
5761          * we'll update the Ion barriers in ~AutoGCSlice.
5762          */
5763         if (zone->isGCMarking()) {
5764             MOZ_ASSERT(zone->needsIncrementalBarrier());
5765             zone->setNeedsIncrementalBarrier(false, Zone::DontUpdateJit);
5766         } else {
5767             MOZ_ASSERT(!zone->needsIncrementalBarrier());
5768         }
5769     }
5770 }
5771 
5772 AutoGCSlice::~AutoGCSlice()
5773 {
5774     /* We can't use GCZonesIter if this is the end of the last slice. */
5775     for (ZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next()) {
5776         if (zone->isGCMarking()) {
5777             zone->setNeedsIncrementalBarrier(true, Zone::UpdateJit);
5778             zone->arenas.prepareForIncrementalGC(runtime);
5779         } else {
5780             zone->setNeedsIncrementalBarrier(false, Zone::UpdateJit);
5781         }
5782     }
5783 }
5784 
5785 void
5786 GCRuntime::pushZealSelectedObjects()
5787 {
5788 #ifdef JS_GC_ZEAL
5789     /* Push selected objects onto the mark stack and clear the list. */
5790     for (JSObject** obj = selectedForMarking.begin(); obj != selectedForMarking.end(); obj++)
5791         TraceManuallyBarrieredEdge(&marker, obj, "selected obj");
5792 #endif
5793 }
5794 
5795 static bool
5796 IsShutdownGC(JS::gcreason::Reason reason)
5797 {
5798     return reason == JS::gcreason::SHUTDOWN_CC || reason == JS::gcreason::DESTROY_RUNTIME;
5799 }
5800 
5801 static bool
5802 ShouldCleanUpEverything(JS::gcreason::Reason reason, JSGCInvocationKind gckind)
5803 {
5804     // During shutdown, we must clean everything up, for the sake of leak
5805     // detection. When a runtime has no contexts, or we're doing a GC before a
5806     // shutdown CC, those are strong indications that we're shutting down.
5807     return IsShutdownGC(reason) || gckind == GC_SHRINK;
5808 }
5809 
5810 void
5811 GCRuntime::incrementalCollectSlice(SliceBudget& budget, JS::gcreason::Reason reason,
5812                                    AutoLockForExclusiveAccess& lock)
5813 {
5814     AutoGCSlice slice(rt);
5815 
5816     bool destroyingRuntime = (reason == JS::gcreason::DESTROY_RUNTIME);
5817 
5818     gc::State initialState = incrementalState;
5819 
5820     bool useZeal = false;
5821 #ifdef JS_GC_ZEAL
5822     if (reason == JS::gcreason::DEBUG_GC && !budget.isUnlimited()) {
5823         /*
5824          * Do the incremental collection type specified by zeal mode if the
5825          * collection was triggered by runDebugGC() and incremental GC has not
5826          * been cancelled by resetIncrementalGC().
5827          */
5828         useZeal = true;
5829     }
5830 #endif
5831 
5832     MOZ_ASSERT_IF(isIncrementalGCInProgress(), isIncremental);
5833     isIncremental = !budget.isUnlimited();
5834 
5835     if (useZeal && (hasZealMode(ZealMode::IncrementalRootsThenFinish) ||
5836                     hasZealMode(ZealMode::IncrementalMarkAllThenFinish)))
5837     {
5838         /*
5839          * Yields between slices occurs at predetermined points in these modes;
5840          * the budget is not used.
5841          */
5842         budget.makeUnlimited();
5843     }
5844 
5845     switch (incrementalState) {
5846       case State::NotActive:
5847         initialReason = reason;
5848         cleanUpEverything = ShouldCleanUpEverything(reason, invocationKind);
5849         isCompacting = shouldCompact();
5850         lastMarkSlice = false;
5851 
5852         incrementalState = State::MarkRoots;
5853 
5854         MOZ_FALLTHROUGH;
5855 
5856       case State::MarkRoots:
5857         if (!beginMarkPhase(reason, lock)) {
5858             incrementalState = State::NotActive;
5859             return;
5860         }
5861 
5862         if (!destroyingRuntime)
5863             pushZealSelectedObjects();
5864 
5865         incrementalState = State::Mark;
5866 
5867         if (isIncremental && useZeal && hasZealMode(ZealMode::IncrementalRootsThenFinish))
5868             break;
5869 
5870         MOZ_FALLTHROUGH;
5871 
5872       case State::Mark:
5873         AutoGCRooter::traceAllWrappers(&marker);
5874 
5875         /* If we needed delayed marking for gray roots, then collect until done. */
5876         if (!hasBufferedGrayRoots()) {
5877             budget.makeUnlimited();
5878             isIncremental = false;
5879         }
5880 
5881         if (drainMarkStack(budget, gcstats::PHASE_MARK) == NotFinished)
5882             break;
5883 
5884         MOZ_ASSERT(marker.isDrained());
5885 
5886         /*
5887          * In incremental GCs where we have already performed more than once
5888          * slice we yield after marking with the aim of starting the sweep in
5889          * the next slice, since the first slice of sweeping can be expensive.
5890          *
5891          * This is modified by the various zeal modes.  We don't yield in
5892          * IncrementalRootsThenFinish mode and we always yield in
5893          * IncrementalMarkAllThenFinish mode.
5894          *
5895          * We will need to mark anything new on the stack when we resume, so
5896          * we stay in Mark state.
5897          */
5898         if (!lastMarkSlice && isIncremental &&
5899             ((initialState == State::Mark &&
5900               !(useZeal && hasZealMode(ZealMode::IncrementalRootsThenFinish))) ||
5901              (useZeal && hasZealMode(ZealMode::IncrementalMarkAllThenFinish))))
5902         {
5903             lastMarkSlice = true;
5904             break;
5905         }
5906 
5907         incrementalState = State::Sweep;
5908 
5909         /*
5910          * This runs to completion, but we don't continue if the budget is
5911          * now exhasted.
5912          */
5913         beginSweepPhase(destroyingRuntime, lock);
5914         if (budget.isOverBudget())
5915             break;
5916 
5917         /*
5918          * Always yield here when running in incremental multi-slice zeal
5919          * mode, so RunDebugGC can reset the slice buget.
5920          */
5921         if (isIncremental && useZeal && hasZealMode(ZealMode::IncrementalMultipleSlices))
5922             break;
5923 
5924         MOZ_FALLTHROUGH;
5925 
5926       case State::Sweep:
5927         if (sweepPhase(budget, lock) == NotFinished)
5928             break;
5929 
5930         endSweepPhase(destroyingRuntime, lock);
5931 
5932         incrementalState = State::Finalize;
5933 
5934         MOZ_FALLTHROUGH;
5935 
5936       case State::Finalize:
5937         {
5938             gcstats::AutoPhase ap(stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
5939 
5940             // Yield until background finalization is done.
5941             if (isIncremental) {
5942                 // Poll for end of background sweeping
5943                 AutoLockGC lock(rt);
5944                 if (isBackgroundSweeping())
5945                     break;
5946             } else {
5947                 waitBackgroundSweepEnd();
5948             }
5949         }
5950 
5951         {
5952             // Re-sweep the zones list, now that background finalization is
5953             // finished to actually remove and free dead zones.
5954             gcstats::AutoPhase ap1(stats, gcstats::PHASE_SWEEP);
5955             gcstats::AutoPhase ap2(stats, gcstats::PHASE_DESTROY);
5956             AutoSetThreadIsSweeping threadIsSweeping;
5957             FreeOp fop(rt);
5958             sweepZones(&fop, destroyingRuntime);
5959         }
5960 
5961         MOZ_ASSERT(!startedCompacting);
5962         incrementalState = State::Compact;
5963 
5964         // Always yield before compacting since it is not incremental.
5965         if (isCompacting && isIncremental)
5966             break;
5967 
5968         MOZ_FALLTHROUGH;
5969 
5970       case State::Compact:
5971         if (isCompacting) {
5972             if (!startedCompacting)
5973                 beginCompactPhase();
5974 
5975             if (compactPhase(reason, budget, lock) == NotFinished)
5976                 break;
5977 
5978             endCompactPhase(reason);
5979         }
5980 
5981         startDecommit();
5982         incrementalState = State::Decommit;
5983 
5984         MOZ_FALLTHROUGH;
5985 
5986       case State::Decommit:
5987         {
5988             gcstats::AutoPhase ap(stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
5989 
5990             // Yield until background decommit is done.
5991             if (isIncremental && decommitTask.isRunning())
5992                 break;
5993 
5994             decommitTask.join();
5995         }
5996 
5997         finishCollection(reason);
5998         incrementalState = State::NotActive;
5999         break;
6000     }
6001 }
6002 
6003 gc::AbortReason
6004 gc::IsIncrementalGCUnsafe(JSRuntime* rt)
6005 {
6006     MOZ_ASSERT(!rt->mainThread.suppressGC);
6007 
6008     if (rt->keepAtoms())
6009         return gc::AbortReason::KeepAtomsSet;
6010 
6011     if (!rt->gc.isIncrementalGCAllowed())
6012         return gc::AbortReason::IncrementalDisabled;
6013 
6014     return gc::AbortReason::None;
6015 }
6016 
6017 void
6018 GCRuntime::budgetIncrementalGC(SliceBudget& budget, AutoLockForExclusiveAccess& lock)
6019 {
6020     AbortReason unsafeReason = IsIncrementalGCUnsafe(rt);
6021     if (unsafeReason != AbortReason::None) {
6022         resetIncrementalGC(unsafeReason, lock);
6023         budget.makeUnlimited();
6024         stats.nonincremental(unsafeReason);
6025         return;
6026     }
6027 
6028     if (mode != JSGC_MODE_INCREMENTAL) {
6029         resetIncrementalGC(AbortReason::ModeChange, lock);
6030         budget.makeUnlimited();
6031         stats.nonincremental(AbortReason::ModeChange);
6032         return;
6033     }
6034 
6035     if (isTooMuchMalloc()) {
6036         budget.makeUnlimited();
6037         stats.nonincremental(AbortReason::MallocBytesTrigger);
6038     }
6039 
6040     bool reset = false;
6041     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
6042         if (zone->usage.gcBytes() >= zone->threshold.gcTriggerBytes()) {
6043             budget.makeUnlimited();
6044             stats.nonincremental(AbortReason::GCBytesTrigger);
6045         }
6046 
6047         if (isIncrementalGCInProgress() && zone->isGCScheduled() != zone->wasGCStarted())
6048             reset = true;
6049 
6050         if (zone->isTooMuchMalloc()) {
6051             budget.makeUnlimited();
6052             stats.nonincremental(AbortReason::MallocBytesTrigger);
6053         }
6054     }
6055 
6056     if (reset)
6057         resetIncrementalGC(AbortReason::ZoneChange, lock);
6058 }
6059 
6060 namespace {
6061 
6062 class AutoScheduleZonesForGC
6063 {
6064     JSRuntime* rt_;
6065 
6066   public:
6067     explicit AutoScheduleZonesForGC(JSRuntime* rt) : rt_(rt) {
6068         for (ZonesIter zone(rt_, WithAtoms); !zone.done(); zone.next()) {
6069             if (rt->gc.gcMode() == JSGC_MODE_GLOBAL)
6070                 zone->scheduleGC();
6071 
6072             /* This is a heuristic to avoid resets. */
6073             if (rt->gc.isIncrementalGCInProgress() && zone->needsIncrementalBarrier())
6074                 zone->scheduleGC();
6075 
6076             /* This is a heuristic to reduce the total number of collections. */
6077             if (zone->usage.gcBytes() >=
6078                 zone->threshold.allocTrigger(rt->gc.schedulingState.inHighFrequencyGCMode()))
6079             {
6080                 zone->scheduleGC();
6081             }
6082         }
6083     }
6084 
6085     ~AutoScheduleZonesForGC() {
6086         for (ZonesIter zone(rt_, WithAtoms); !zone.done(); zone.next())
6087             zone->unscheduleGC();
6088     }
6089 };
6090 
6091 /*
6092  * An invariant of our GC/CC interaction is that there must not ever be any
6093  * black to gray edges in the system. It is possible to violate this with
6094  * simple compartmental GC. For example, in GC[n], we collect in both
6095  * compartmentA and compartmentB, and mark both sides of the cross-compartment
6096  * edge gray. Later in GC[n+1], we only collect compartmentA, but this time
6097  * mark it black. Now we are violating the invariants and must fix it somehow.
6098  *
6099  * To prevent this situation, we explicitly detect the black->gray state when
6100  * marking cross-compartment edges -- see ShouldMarkCrossCompartment -- adding
6101  * each violating edges to foundBlackGrayEdges. After we leave the trace
6102  * session for each GC slice, we "ExposeToActiveJS" on each of these edges
6103  * (which we cannot do safely from the guts of the GC).
6104  */
6105 class AutoExposeLiveCrossZoneEdges
6106 {
6107     BlackGrayEdgeVector* edges;
6108 
6109   public:
6110     explicit AutoExposeLiveCrossZoneEdges(BlackGrayEdgeVector* edgesPtr) : edges(edgesPtr) {
6111         MOZ_ASSERT(edges->empty());
6112     }
6113     ~AutoExposeLiveCrossZoneEdges() {
6114         for (auto& target : *edges) {
6115             MOZ_ASSERT(target);
6116             MOZ_ASSERT(!target->zone()->isCollecting());
6117             UnmarkGrayCellRecursively(target, target->getTraceKind());
6118         }
6119         edges->clear();
6120     }
6121 };
6122 
6123 } /* anonymous namespace */
6124 
6125 /*
6126  * Run one GC "cycle" (either a slice of incremental GC or an entire
6127  * non-incremental GC. We disable inlining to ensure that the bottom of the
6128  * stack with possible GC roots recorded in MarkRuntime excludes any pointers we
6129  * use during the marking implementation.
6130  *
6131  * Returns true if we "reset" an existing incremental GC, which would force us
6132  * to run another cycle.
6133  */
6134 MOZ_NEVER_INLINE bool
6135 GCRuntime::gcCycle(bool nonincrementalByAPI, SliceBudget& budget, JS::gcreason::Reason reason)
6136 {
6137     // Note that the following is allowed to re-enter GC in the finalizer.
6138     AutoNotifyGCActivity notify(*this);
6139 
6140     gcstats::AutoGCSlice agc(stats, scanZonesBeforeGC(), invocationKind, budget, reason);
6141 
6142     AutoExposeLiveCrossZoneEdges aelcze(&foundBlackGrayEdges);
6143 
6144     evictNursery(reason);
6145 
6146     AutoTraceSession session(rt, JS::HeapState::MajorCollecting);
6147 
6148     majorGCTriggerReason = JS::gcreason::NO_REASON;
6149     interFrameGC = true;
6150 
6151     number++;
6152     if (!isIncrementalGCInProgress())
6153         incMajorGcNumber();
6154 
6155     // It's ok if threads other than the main thread have suppressGC set, as
6156     // they are operating on zones which will not be collected from here.
6157     MOZ_ASSERT(!rt->mainThread.suppressGC);
6158 
6159     // Assert if this is a GC unsafe region.
6160     verifyIsSafeToGC();
6161 
6162     {
6163         gcstats::AutoPhase ap(stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
6164 
6165         // Background finalization and decommit are finished by defininition
6166         // before we can start a new GC session.
6167         if (!isIncrementalGCInProgress()) {
6168             assertBackgroundSweepingFinished();
6169             MOZ_ASSERT(!decommitTask.isRunning());
6170         }
6171 
6172         // We must also wait for background allocation to finish so we can
6173         // avoid taking the GC lock when manipulating the chunks during the GC.
6174         // The background alloc task can run between slices, so we must wait
6175         // for it at the start of every slice.
6176         allocTask.cancel(GCParallelTask::CancelAndWait);
6177     }
6178 
6179     State prevState = incrementalState;
6180 
6181     if (nonincrementalByAPI) {
6182         // Reset any in progress incremental GC if this was triggered via the
6183         // API. This isn't required for correctness, but sometimes during tests
6184         // the caller expects this GC to collect certain objects, and we need
6185         // to make sure to collect everything possible.
6186         if (reason != JS::gcreason::ALLOC_TRIGGER)
6187             resetIncrementalGC(gc::AbortReason::NonIncrementalRequested, session.lock);
6188 
6189         stats.nonincremental(gc::AbortReason::NonIncrementalRequested);
6190         budget.makeUnlimited();
6191     } else {
6192         budgetIncrementalGC(budget, session.lock);
6193     }
6194 
6195     /* The GC was reset, so we need a do-over. */
6196     if (prevState != State::NotActive && !isIncrementalGCInProgress())
6197         return true;
6198 
6199     TraceMajorGCStart();
6200 
6201     incrementalCollectSlice(budget, reason, session.lock);
6202 
6203     chunkAllocationSinceLastGC = false;
6204 
6205 #ifdef JS_GC_ZEAL
6206     /* Keeping these around after a GC is dangerous. */
6207     clearSelectedForMarking();
6208 #endif
6209 
6210     /* Clear gcMallocBytes for all zones. */
6211     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next())
6212         zone->resetGCMallocBytes();
6213 
6214     resetMallocBytes();
6215 
6216     TraceMajorGCEnd();
6217 
6218     return false;
6219 }
6220 
6221 #ifdef JS_GC_ZEAL
6222 static bool
6223 IsDeterministicGCReason(JS::gcreason::Reason reason)
6224 {
6225     if (reason > JS::gcreason::DEBUG_GC &&
6226         reason != JS::gcreason::CC_FORCED && reason != JS::gcreason::SHUTDOWN_CC)
6227     {
6228         return false;
6229     }
6230 
6231     if (reason == JS::gcreason::EAGER_ALLOC_TRIGGER)
6232         return false;
6233 
6234     return true;
6235 }
6236 #endif
6237 
6238 gcstats::ZoneGCStats
6239 GCRuntime::scanZonesBeforeGC()
6240 {
6241     gcstats::ZoneGCStats zoneStats;
6242     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
6243         zoneStats.zoneCount++;
6244         if (zone->isGCScheduled()) {
6245             zoneStats.collectedZoneCount++;
6246             zoneStats.collectedCompartmentCount += zone->compartments.length();
6247         }
6248     }
6249 
6250     for (CompartmentsIter c(rt, WithAtoms); !c.done(); c.next())
6251         zoneStats.compartmentCount++;
6252 
6253     return zoneStats;
6254 }
6255 
6256 // The GC can only clean up scheduledForDestruction compartments that were
6257 // marked live by a barrier (e.g. by RemapWrappers from a navigation event).
6258 // It is also common to have compartments held live because they are part of a
6259 // cycle in gecko, e.g. involving the HTMLDocument wrapper. In this case, we
6260 // need to run the CycleCollector in order to remove these edges before the
6261 // compartment can be freed.
6262 void
6263 GCRuntime::maybeDoCycleCollection()
6264 {
6265     const static double ExcessiveGrayCompartments = 0.8;
6266     const static size_t LimitGrayCompartments = 200;
6267 
6268     size_t compartmentsTotal = 0;
6269     size_t compartmentsGray = 0;
6270     for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
6271         ++compartmentsTotal;
6272         GlobalObject* global = c->unsafeUnbarrieredMaybeGlobal();
6273         if (global && global->asTenured().isMarked(GRAY))
6274             ++compartmentsGray;
6275     }
6276     double grayFraction = double(compartmentsGray) / double(compartmentsTotal);
6277     if (grayFraction > ExcessiveGrayCompartments || compartmentsGray > LimitGrayCompartments)
6278         callDoCycleCollectionCallback(rt->contextFromMainThread());
6279 }
6280 
6281 void
6282 GCRuntime::checkCanCallAPI()
6283 {
6284     MOZ_RELEASE_ASSERT(CurrentThreadCanAccessRuntime(rt));
6285 
6286     /* If we attempt to invoke the GC while we are running in the GC, assert. */
6287     MOZ_RELEASE_ASSERT(!rt->isHeapBusy());
6288 
6289     MOZ_ASSERT(isAllocAllowed());
6290 }
6291 
6292 bool
6293 GCRuntime::checkIfGCAllowedInCurrentState(JS::gcreason::Reason reason)
6294 {
6295     if (rt->mainThread.suppressGC)
6296         return false;
6297 
6298     // Only allow shutdown GCs when we're destroying the runtime. This keeps
6299     // the GC callback from triggering a nested GC and resetting global state.
6300     if (rt->isBeingDestroyed() && !IsShutdownGC(reason))
6301         return false;
6302 
6303 #ifdef JS_GC_ZEAL
6304     if (deterministicOnly && !IsDeterministicGCReason(reason))
6305         return false;
6306 #endif
6307 
6308     return true;
6309 }
6310 
6311 void
6312 GCRuntime::collect(bool nonincrementalByAPI, SliceBudget budget, JS::gcreason::Reason reason)
6313 {
6314     // Checks run for each request, even if we do not actually GC.
6315     checkCanCallAPI();
6316 
6317     // Check if we are allowed to GC at this time before proceeding.
6318     if (!checkIfGCAllowedInCurrentState(reason))
6319         return;
6320 
6321     AutoTraceLog logGC(TraceLoggerForMainThread(rt), TraceLogger_GC);
6322     AutoStopVerifyingBarriers av(rt, IsShutdownGC(reason));
6323     AutoEnqueuePendingParseTasksAfterGC aept(*this);
6324     AutoScheduleZonesForGC asz(rt);
6325 
6326     bool repeat = false;
6327     do {
6328         poked = false;
6329         bool wasReset = gcCycle(nonincrementalByAPI, budget, reason);
6330 
6331         /* Need to re-schedule all zones for GC. */
6332         if (poked && cleanUpEverything)
6333             JS::PrepareForFullGC(rt->contextFromMainThread());
6334 
6335         /*
6336          * This code makes an extra effort to collect compartments that we
6337          * thought were dead at the start of the GC. See the large comment in
6338          * beginMarkPhase.
6339          */
6340         bool repeatForDeadZone = false;
6341         if (!nonincrementalByAPI && !isIncrementalGCInProgress()) {
6342             for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
6343                 if (c->scheduledForDestruction) {
6344                     nonincrementalByAPI = true;
6345                     repeatForDeadZone = true;
6346                     reason = JS::gcreason::COMPARTMENT_REVIVED;
6347                     c->zone()->scheduleGC();
6348                 }
6349             }
6350         }
6351 
6352         /*
6353          * If we reset an existing GC, we need to start a new one. Also, we
6354          * repeat GCs that happen during shutdown (the gcShouldCleanUpEverything
6355          * case) until we can be sure that no additional garbage is created
6356          * (which typically happens if roots are dropped during finalizers).
6357          */
6358         repeat = (poked && cleanUpEverything) || wasReset || repeatForDeadZone;
6359     } while (repeat);
6360 
6361     if (reason == JS::gcreason::COMPARTMENT_REVIVED)
6362         maybeDoCycleCollection();
6363 
6364 #ifdef JS_GC_ZEAL
6365     if (rt->hasZealMode(ZealMode::CheckHeapAfterGC)) {
6366         gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_TRACE_HEAP);
6367         CheckHeapAfterGC(rt);
6368     }
6369 #endif
6370 }
6371 
6372 js::AutoEnqueuePendingParseTasksAfterGC::~AutoEnqueuePendingParseTasksAfterGC()
6373 {
6374     if (!OffThreadParsingMustWaitForGC(gc_.rt))
6375         EnqueuePendingParseTasksAfterGC(gc_.rt);
6376 }
6377 
6378 SliceBudget
6379 GCRuntime::defaultBudget(JS::gcreason::Reason reason, int64_t millis)
6380 {
6381     if (millis == 0) {
6382         if (reason == JS::gcreason::ALLOC_TRIGGER)
6383             millis = defaultSliceBudget();
6384         else if (schedulingState.inHighFrequencyGCMode() && tunables.isDynamicMarkSliceEnabled())
6385             millis = defaultSliceBudget() * IGC_MARK_SLICE_MULTIPLIER;
6386         else
6387             millis = defaultSliceBudget();
6388     }
6389 
6390     return SliceBudget(TimeBudget(millis));
6391 }
6392 
6393 void
6394 GCRuntime::gc(JSGCInvocationKind gckind, JS::gcreason::Reason reason)
6395 {
6396     invocationKind = gckind;
6397     collect(true, SliceBudget::unlimited(), reason);
6398 }
6399 
6400 void
6401 GCRuntime::startGC(JSGCInvocationKind gckind, JS::gcreason::Reason reason, int64_t millis)
6402 {
6403     MOZ_ASSERT(!isIncrementalGCInProgress());
6404     if (!JS::IsIncrementalGCEnabled(rt->contextFromMainThread())) {
6405         gc(gckind, reason);
6406         return;
6407     }
6408     invocationKind = gckind;
6409     collect(false, defaultBudget(reason, millis), reason);
6410 }
6411 
6412 void
6413 GCRuntime::gcSlice(JS::gcreason::Reason reason, int64_t millis)
6414 {
6415     MOZ_ASSERT(isIncrementalGCInProgress());
6416     collect(false, defaultBudget(reason, millis), reason);
6417 }
6418 
6419 void
6420 GCRuntime::finishGC(JS::gcreason::Reason reason)
6421 {
6422     MOZ_ASSERT(isIncrementalGCInProgress());
6423 
6424     // If we're not collecting because we're out of memory then skip the
6425     // compacting phase if we need to finish an ongoing incremental GC
6426     // non-incrementally to avoid janking the browser.
6427     if (!IsOOMReason(initialReason)) {
6428         if (incrementalState == State::Compact) {
6429             abortGC();
6430             return;
6431         }
6432 
6433         isCompacting = false;
6434     }
6435 
6436     collect(false, SliceBudget::unlimited(), reason);
6437 }
6438 
6439 void
6440 GCRuntime::abortGC()
6441 {
6442     checkCanCallAPI();
6443     MOZ_ASSERT(!rt->mainThread.suppressGC);
6444 
6445     AutoStopVerifyingBarriers av(rt, false);
6446     AutoEnqueuePendingParseTasksAfterGC aept(*this);
6447 
6448     gcstats::AutoGCSlice agc(stats, scanZonesBeforeGC(), invocationKind,
6449                              SliceBudget::unlimited(), JS::gcreason::ABORT_GC);
6450 
6451     evictNursery(JS::gcreason::ABORT_GC);
6452     AutoTraceSession session(rt, JS::HeapState::MajorCollecting);
6453 
6454     number++;
6455     resetIncrementalGC(gc::AbortReason::AbortRequested, session.lock);
6456 }
6457 
6458 void
6459 GCRuntime::notifyDidPaint()
6460 {
6461     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
6462 
6463 #ifdef JS_GC_ZEAL
6464     if (hasZealMode(ZealMode::FrameVerifierPre))
6465         verifyPreBarriers();
6466 
6467     if (hasZealMode(ZealMode::FrameGC)) {
6468         JS::PrepareForFullGC(rt->contextFromMainThread());
6469         gc(GC_NORMAL, JS::gcreason::REFRESH_FRAME);
6470         return;
6471     }
6472 #endif
6473 
6474     if (isIncrementalGCInProgress() && !interFrameGC && tunables.areRefreshFrameSlicesEnabled()) {
6475         JS::PrepareForIncrementalGC(rt->contextFromMainThread());
6476         gcSlice(JS::gcreason::REFRESH_FRAME);
6477     }
6478 
6479     interFrameGC = false;
6480 }
6481 
6482 static bool
6483 ZonesSelected(JSRuntime* rt)
6484 {
6485     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
6486         if (zone->isGCScheduled())
6487             return true;
6488     }
6489     return false;
6490 }
6491 
6492 void
6493 GCRuntime::startDebugGC(JSGCInvocationKind gckind, SliceBudget& budget)
6494 {
6495     MOZ_ASSERT(!isIncrementalGCInProgress());
6496     if (!ZonesSelected(rt))
6497         JS::PrepareForFullGC(rt->contextFromMainThread());
6498     invocationKind = gckind;
6499     collect(false, budget, JS::gcreason::DEBUG_GC);
6500 }
6501 
6502 void
6503 GCRuntime::debugGCSlice(SliceBudget& budget)
6504 {
6505     MOZ_ASSERT(isIncrementalGCInProgress());
6506     if (!ZonesSelected(rt))
6507         JS::PrepareForIncrementalGC(rt->contextFromMainThread());
6508     collect(false, budget, JS::gcreason::DEBUG_GC);
6509 }
6510 
6511 /* Schedule a full GC unless a zone will already be collected. */
6512 void
6513 js::PrepareForDebugGC(JSRuntime* rt)
6514 {
6515     if (!ZonesSelected(rt))
6516         JS::PrepareForFullGC(rt->contextFromMainThread());
6517 }
6518 
6519 void
6520 GCRuntime::onOutOfMallocMemory()
6521 {
6522     // Stop allocating new chunks.
6523     allocTask.cancel(GCParallelTask::CancelAndWait);
6524 
6525     // Make sure we release anything queued for release.
6526     decommitTask.join();
6527 
6528     // Wait for background free of nursery huge slots to finish.
6529     nursery.waitBackgroundFreeEnd();
6530 
6531     AutoLockGC lock(rt);
6532     onOutOfMallocMemory(lock);
6533 }
6534 
6535 void
6536 GCRuntime::onOutOfMallocMemory(const AutoLockGC& lock)
6537 {
6538     // Release any relocated arenas we may be holding on to, without releasing
6539     // the GC lock.
6540     releaseHeldRelocatedArenasWithoutUnlocking(lock);
6541 
6542     // Throw away any excess chunks we have lying around.
6543     freeEmptyChunks(rt, lock);
6544 
6545     // Immediately decommit as many arenas as possible in the hopes that this
6546     // might let the OS scrape together enough pages to satisfy the failing
6547     // malloc request.
6548     decommitAllWithoutUnlocking(lock);
6549 }
6550 
6551 void
6552 GCRuntime::minorGC(JS::gcreason::Reason reason, gcstats::Phase phase)
6553 {
6554     MOZ_ASSERT(!rt->isHeapBusy());
6555 
6556     if (rt->mainThread.suppressGC)
6557         return;
6558 
6559     gcstats::AutoPhase ap(stats, phase);
6560 
6561     minorGCTriggerReason = JS::gcreason::NO_REASON;
6562     TraceLoggerThread* logger = TraceLoggerForMainThread(rt);
6563     AutoTraceLog logMinorGC(logger, TraceLogger_MinorGC);
6564     nursery.collect(rt, reason);
6565     MOZ_ASSERT(nursery.isEmpty());
6566 
6567     blocksToFreeAfterMinorGC.freeAll();
6568 
6569 #ifdef JS_GC_ZEAL
6570     if (rt->hasZealMode(ZealMode::CheckHeapAfterGC))
6571         CheckHeapAfterGC(rt);
6572 #endif
6573 
6574     {
6575         AutoLockGC lock(rt);
6576         for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next())
6577             maybeAllocTriggerZoneGC(zone, lock);
6578     }
6579 }
6580 
6581 void
6582 GCRuntime::disableGenerationalGC()
6583 {
6584     if (isGenerationalGCEnabled()) {
6585         evictNursery(JS::gcreason::API);
6586         nursery.disable();
6587     }
6588     ++rt->gc.generationalDisabled;
6589 }
6590 
6591 void
6592 GCRuntime::enableGenerationalGC()
6593 {
6594     MOZ_ASSERT(generationalDisabled > 0);
6595     --generationalDisabled;
6596     if (generationalDisabled == 0)
6597         nursery.enable();
6598 }
6599 
6600 bool
6601 GCRuntime::gcIfRequested()
6602 {
6603     // This method returns whether a major GC was performed.
6604 
6605     if (minorGCRequested())
6606         minorGC(minorGCTriggerReason);
6607 
6608     if (majorGCRequested()) {
6609         if (!isIncrementalGCInProgress())
6610             startGC(GC_NORMAL, majorGCTriggerReason);
6611         else
6612             gcSlice(majorGCTriggerReason);
6613         return true;
6614     }
6615 
6616     return false;
6617 }
6618 
6619 void
6620 js::gc::FinishGC(JSContext* cx)
6621 {
6622     if (JS::IsIncrementalGCInProgress(cx)) {
6623         JS::PrepareForIncrementalGC(cx);
6624         JS::FinishIncrementalGC(cx, JS::gcreason::API);
6625     }
6626 
6627     cx->gc.nursery.waitBackgroundFreeEnd();
6628 }
6629 
6630 AutoPrepareForTracing::AutoPrepareForTracing(JSContext* cx, ZoneSelector selector)
6631 {
6632     js::gc::FinishGC(cx);
6633     session_.emplace(cx);
6634 }
6635 
6636 JSCompartment*
6637 js::NewCompartment(JSContext* cx, Zone* zone, JSPrincipals* principals,
6638                    const JS::CompartmentOptions& options)
6639 {
6640     JSRuntime* rt = cx->runtime();
6641     JS_AbortIfWrongThread(cx);
6642 
6643     ScopedJSDeletePtr<Zone> zoneHolder;
6644     if (!zone) {
6645         zone = cx->new_<Zone>(rt);
6646         if (!zone)
6647             return nullptr;
6648 
6649         zoneHolder.reset(zone);
6650 
6651         const JSPrincipals* trusted = rt->trustedPrincipals();
6652         bool isSystem = principals && principals == trusted;
6653         if (!zone->init(isSystem)) {
6654             ReportOutOfMemory(cx);
6655             return nullptr;
6656         }
6657     }
6658 
6659     ScopedJSDeletePtr<JSCompartment> compartment(cx->new_<JSCompartment>(zone, options));
6660     if (!compartment || !compartment->init(cx))
6661         return nullptr;
6662 
6663     // Set up the principals.
6664     JS_SetCompartmentPrincipals(compartment, principals);
6665 
6666     AutoLockGC lock(rt);
6667 
6668     if (!zone->compartments.append(compartment.get())) {
6669         ReportOutOfMemory(cx);
6670         return nullptr;
6671     }
6672 
6673     if (zoneHolder && !rt->gc.zones.append(zone)) {
6674         ReportOutOfMemory(cx);
6675         return nullptr;
6676     }
6677 
6678     zoneHolder.forget();
6679     return compartment.forget();
6680 }
6681 
6682 void
6683 gc::MergeCompartments(JSCompartment* source, JSCompartment* target)
6684 {
6685     // The source compartment must be specifically flagged as mergable.  This
6686     // also implies that the compartment is not visible to the debugger.
6687     MOZ_ASSERT(source->creationOptions_.mergeable());
6688     MOZ_ASSERT(source->creationOptions_.invisibleToDebugger());
6689 
6690     MOZ_ASSERT(source->creationOptions().addonIdOrNull() ==
6691                target->creationOptions().addonIdOrNull());
6692 
6693     JSContext* cx = source->contextFromMainThread();
6694 
6695     AutoPrepareForTracing prepare(cx, SkipAtoms);
6696 
6697     // Cleanup tables and other state in the source compartment that will be
6698     // meaningless after merging into the target compartment.
6699 
6700     source->clearTables();
6701     source->zone()->clearTables();
6702     source->unsetIsDebuggee();
6703 
6704     // The delazification flag indicates the presence of LazyScripts in a
6705     // compartment for the Debugger API, so if the source compartment created
6706     // LazyScripts, the flag must be propagated to the target compartment.
6707     if (source->needsDelazificationForDebugger())
6708         target->scheduleDelazificationForDebugger();
6709 
6710     // Release any relocated arenas which we may be holding on to as they might
6711     // be in the source zone
6712     cx->gc.releaseHeldRelocatedArenas();
6713 
6714     // Fixup compartment pointers in source to refer to target, and make sure
6715     // type information generations are in sync.
6716 
6717     for (auto script = source->zone()->cellIter<JSScript>(); !script.done(); script.next()) {
6718         MOZ_ASSERT(script->compartment() == source);
6719         script->compartment_ = target;
6720         script->setTypesGeneration(target->zone()->types.generation);
6721     }
6722 
6723     for (auto group = source->zone()->cellIter<ObjectGroup>(); !group.done(); group.next()) {
6724         group->setGeneration(target->zone()->types.generation);
6725         group->compartment_ = target;
6726 
6727         // Remove any unboxed layouts from the list in the off thread
6728         // compartment. These do not need to be reinserted in the target
6729         // compartment's list, as the list is not required to be complete.
6730         if (UnboxedLayout* layout = group->maybeUnboxedLayoutDontCheckGeneration())
6731             layout->detachFromCompartment();
6732     }
6733 
6734     // Fixup zone pointers in source's zone to refer to target's zone.
6735 
6736     for (auto thingKind : AllAllocKinds()) {
6737         for (ArenaIter aiter(source->zone(), thingKind); !aiter.done(); aiter.next()) {
6738             Arena* arena = aiter.get();
6739             arena->zone = target->zone();
6740         }
6741     }
6742 
6743     // The source should be the only compartment in its zone.
6744     for (CompartmentsInZoneIter c(source->zone()); !c.done(); c.next())
6745         MOZ_ASSERT(c.get() == source);
6746 
6747     // Merge the allocator, stats and UIDs in source's zone into target's zone.
6748     target->zone()->arenas.adoptArenas(cx, &source->zone()->arenas);
6749     target->zone()->usage.adopt(source->zone()->usage);
6750     target->zone()->adoptUniqueIds(source->zone());
6751 
6752     // Merge other info in source's zone into target's zone.
6753     target->zone()->types.typeLifoAlloc.transferFrom(&source->zone()->types.typeLifoAlloc);
6754 }
6755 
6756 void
6757 GCRuntime::runDebugGC()
6758 {
6759 #ifdef JS_GC_ZEAL
6760     if (rt->mainThread.suppressGC)
6761         return;
6762 
6763     if (hasZealMode(ZealMode::GenerationalGC))
6764         return minorGC(JS::gcreason::DEBUG_GC);
6765 
6766     PrepareForDebugGC(rt);
6767 
6768     auto budget = SliceBudget::unlimited();
6769     if (hasZealMode(ZealMode::IncrementalRootsThenFinish) ||
6770         hasZealMode(ZealMode::IncrementalMarkAllThenFinish) ||
6771         hasZealMode(ZealMode::IncrementalMultipleSlices))
6772     {
6773         js::gc::State initialState = incrementalState;
6774         if (hasZealMode(ZealMode::IncrementalMultipleSlices)) {
6775             /*
6776              * Start with a small slice limit and double it every slice. This
6777              * ensure that we get multiple slices, and collection runs to
6778              * completion.
6779              */
6780             if (!isIncrementalGCInProgress())
6781                 incrementalLimit = zealFrequency / 2;
6782             else
6783                 incrementalLimit *= 2;
6784             budget = SliceBudget(WorkBudget(incrementalLimit));
6785         } else {
6786             // This triggers incremental GC but is actually ignored by IncrementalMarkSlice.
6787             budget = SliceBudget(WorkBudget(1));
6788         }
6789 
6790         if (!isIncrementalGCInProgress())
6791             invocationKind = GC_SHRINK;
6792         collect(false, budget, JS::gcreason::DEBUG_GC);
6793 
6794         /*
6795          * For multi-slice zeal, reset the slice size when we get to the sweep
6796          * or compact phases.
6797          */
6798         if (hasZealMode(ZealMode::IncrementalMultipleSlices)) {
6799             if ((initialState == State::Mark && incrementalState == State::Sweep) ||
6800                 (initialState == State::Sweep && incrementalState == State::Compact))
6801             {
6802                 incrementalLimit = zealFrequency / 2;
6803             }
6804         }
6805     } else if (hasZealMode(ZealMode::Compact)) {
6806         gc(GC_SHRINK, JS::gcreason::DEBUG_GC);
6807     } else {
6808         gc(GC_NORMAL, JS::gcreason::DEBUG_GC);
6809     }
6810 
6811 #endif
6812 }
6813 
6814 void
6815 GCRuntime::setFullCompartmentChecks(bool enabled)
6816 {
6817     MOZ_ASSERT(!rt->isHeapMajorCollecting());
6818     fullCompartmentChecks = enabled;
6819 }
6820 
6821 #ifdef JS_GC_ZEAL
6822 bool
6823 GCRuntime::selectForMarking(JSObject* object)
6824 {
6825     MOZ_ASSERT(!rt->isHeapMajorCollecting());
6826     return selectedForMarking.append(object);
6827 }
6828 
6829 void
6830 GCRuntime::clearSelectedForMarking()
6831 {
6832     selectedForMarking.clearAndFree();
6833 }
6834 
6835 void
6836 GCRuntime::setDeterministic(bool enabled)
6837 {
6838     MOZ_ASSERT(!rt->isHeapMajorCollecting());
6839     deterministicOnly = enabled;
6840 }
6841 #endif
6842 
6843 #ifdef DEBUG
6844 
6845 /* Should only be called manually under gdb */
6846 void PreventGCDuringInteractiveDebug()
6847 {
6848     TlsPerThreadData.get()->suppressGC++;
6849 }
6850 
6851 #endif
6852 
6853 void
6854 js::ReleaseAllJITCode(FreeOp* fop)
6855 {
6856     js::CancelOffThreadIonCompile(fop->runtime());
6857     for (ZonesIter zone(fop->runtime(), SkipAtoms); !zone.done(); zone.next()) {
6858         zone->setPreservingCode(false);
6859         zone->discardJitCode(fop);
6860     }
6861 }
6862 
6863 void
6864 js::PurgeJITCaches(Zone* zone)
6865 {
6866     /* Discard Ion caches. */
6867     for (auto script = zone->cellIter<JSScript>(); !script.done(); script.next())
6868         jit::PurgeCaches(script);
6869 }
6870 
6871 void
6872 ArenaLists::normalizeBackgroundFinalizeState(AllocKind thingKind)
6873 {
6874     ArenaLists::BackgroundFinalizeState* bfs = &backgroundFinalizeState[thingKind];
6875     switch (*bfs) {
6876       case BFS_DONE:
6877         break;
6878       default:
6879         MOZ_ASSERT_UNREACHABLE("Background finalization in progress, but it should not be.");
6880         break;
6881     }
6882 }
6883 
6884 void
6885 ArenaLists::adoptArenas(JSRuntime* rt, ArenaLists* fromArenaLists)
6886 {
6887     // GC should be inactive, but still take the lock as a kind of read fence.
6888     AutoLockGC lock(rt);
6889 
6890     fromArenaLists->purge();
6891 
6892     for (auto thingKind : AllAllocKinds()) {
6893         // When we enter a parallel section, we join the background
6894         // thread, and we do not run GC while in the parallel section,
6895         // so no finalizer should be active!
6896         normalizeBackgroundFinalizeState(thingKind);
6897         fromArenaLists->normalizeBackgroundFinalizeState(thingKind);
6898 
6899         ArenaList* fromList = &fromArenaLists->arenaLists[thingKind];
6900         ArenaList* toList = &arenaLists[thingKind];
6901         fromList->check();
6902         toList->check();
6903         Arena* next;
6904         for (Arena* fromArena = fromList->head(); fromArena; fromArena = next) {
6905             // Copy fromArena->next before releasing/reinserting.
6906             next = fromArena->next;
6907 
6908             MOZ_ASSERT(!fromArena->isEmpty());
6909             toList->insertAtCursor(fromArena);
6910         }
6911         fromList->clear();
6912         toList->check();
6913     }
6914 }
6915 
6916 bool
6917 ArenaLists::containsArena(JSRuntime* rt, Arena* needle)
6918 {
6919     AutoLockGC lock(rt);
6920     ArenaList& list = arenaLists[needle->getAllocKind()];
6921     for (Arena* arena = list.head(); arena; arena = arena->next) {
6922         if (arena == needle)
6923             return true;
6924     }
6925     return false;
6926 }
6927 
6928 
6929 AutoSuppressGC::AutoSuppressGC(ExclusiveContext* cx)
6930   : suppressGC_(cx->perThreadData->suppressGC)
6931 {
6932     suppressGC_++;
6933 }
6934 
6935 AutoSuppressGC::AutoSuppressGC(JSCompartment* comp)
6936   : suppressGC_(comp->runtimeFromMainThread()->mainThread.suppressGC)
6937 {
6938     suppressGC_++;
6939 }
6940 
6941 AutoSuppressGC::AutoSuppressGC(JSContext* cx)
6942   : suppressGC_(cx->mainThread().suppressGC)
6943 {
6944     suppressGC_++;
6945 }
6946 
6947 bool
6948 js::UninlinedIsInsideNursery(const gc::Cell* cell)
6949 {
6950     return IsInsideNursery(cell);
6951 }
6952 
6953 #ifdef DEBUG
6954 AutoDisableProxyCheck::AutoDisableProxyCheck(JSRuntime* rt)
6955   : gc(rt->gc)
6956 {
6957     gc.disableStrictProxyChecking();
6958 }
6959 
6960 AutoDisableProxyCheck::~AutoDisableProxyCheck()
6961 {
6962     gc.enableStrictProxyChecking();
6963 }
6964 
6965 JS_FRIEND_API(void)
6966 JS::AssertGCThingMustBeTenured(JSObject* obj)
6967 {
6968     MOZ_ASSERT(obj->isTenured() &&
6969                (!IsNurseryAllocable(obj->asTenured().getAllocKind()) ||
6970                 obj->getClass()->hasFinalize()));
6971 }
6972 
6973 JS_FRIEND_API(void)
6974 JS::AssertGCThingIsNotAnObjectSubclass(Cell* cell)
6975 {
6976     MOZ_ASSERT(cell);
6977     MOZ_ASSERT(cell->getTraceKind() != JS::TraceKind::Object);
6978 }
6979 
6980 JS_FRIEND_API(void)
6981 js::gc::AssertGCThingHasType(js::gc::Cell* cell, JS::TraceKind kind)
6982 {
6983     if (!cell)
6984         MOZ_ASSERT(kind == JS::TraceKind::Null);
6985     else if (IsInsideNursery(cell))
6986         MOZ_ASSERT(kind == JS::TraceKind::Object);
6987     else
6988         MOZ_ASSERT(MapAllocToTraceKind(cell->asTenured().getAllocKind()) == kind);
6989 }
6990 
6991 JS_PUBLIC_API(size_t)
6992 JS::GetGCNumber()
6993 {
6994     JSRuntime* rt = js::TlsPerThreadData.get()->runtimeFromMainThread();
6995     if (!rt)
6996         return 0;
6997     return rt->gc.gcNumber();
6998 }
6999 #endif
7000 
7001 JS::AutoAssertNoGC::AutoAssertNoGC()
7002   : gc(nullptr), gcNumber(0)
7003 {
7004     js::PerThreadData* data = js::TlsPerThreadData.get();
7005     if (data) {
7006         /*
7007          * GC's from off-thread will always assert, so off-thread is implicitly
7008          * AutoAssertNoGC. We still need to allow AutoAssertNoGC to be used in
7009          * code that works from both threads, however. We also use this to
7010          * annotate the off thread run loops.
7011          */
7012         JSRuntime* runtime = data->runtimeIfOnOwnerThread();
7013         if (runtime) {
7014             gc = &runtime->gc;
7015             gcNumber = gc->gcNumber();
7016             gc->enterUnsafeRegion();
7017         }
7018     }
7019 }
7020 
7021 JS::AutoAssertNoGC::AutoAssertNoGC(JSRuntime* rt)
7022   : gc(&rt->gc), gcNumber(rt->gc.gcNumber())
7023 {
7024     gc->enterUnsafeRegion();
7025 }
7026 
7027 JS::AutoAssertNoGC::AutoAssertNoGC(JSContext* cx)
7028   : gc(&cx->gc), gcNumber(cx->gc.gcNumber())
7029 {
7030     gc->enterUnsafeRegion();
7031 }
7032 
7033 JS::AutoAssertNoGC::~AutoAssertNoGC()
7034 {
7035     if (gc) {
7036         gc->leaveUnsafeRegion();
7037 
7038         /*
7039          * The following backstop assertion should never fire: if we bumped the
7040          * gcNumber, we should have asserted because inUnsafeRegion was true.
7041          */
7042         MOZ_ASSERT(gcNumber == gc->gcNumber(), "GC ran inside an AutoAssertNoGC scope.");
7043     }
7044 }
7045 
7046 JS::AutoAssertOnBarrier::AutoAssertOnBarrier(JSContext* cx)
7047   : context(cx),
7048     prev(cx->runtime()->allowGCBarriers())
7049 {
7050     context->runtime()->allowGCBarriers_ = false;
7051 }
7052 
7053 JS::AutoAssertOnBarrier::~AutoAssertOnBarrier()
7054 {
7055     MOZ_ASSERT(!context->runtime()->allowGCBarriers_);
7056     context->runtime()->allowGCBarriers_ = prev;
7057 }
7058 
7059 #ifdef DEBUG
7060 JS::AutoAssertNoAlloc::AutoAssertNoAlloc(JSContext* cx)
7061   : gc(nullptr)
7062 {
7063     disallowAlloc(cx);
7064 }
7065 
7066 void JS::AutoAssertNoAlloc::disallowAlloc(JSRuntime* rt)
7067 {
7068     MOZ_ASSERT(!gc);
7069     gc = &rt->gc;
7070     gc->disallowAlloc();
7071 }
7072 
7073 JS::AutoAssertNoAlloc::~AutoAssertNoAlloc()
7074 {
7075     if (gc)
7076         gc->allowAlloc();
7077 }
7078 
7079 AutoAssertNoNurseryAlloc::AutoAssertNoNurseryAlloc(JSRuntime* rt)
7080   : gc(rt->gc)
7081 {
7082     gc.disallowNurseryAlloc();
7083 }
7084 
7085 AutoAssertNoNurseryAlloc::~AutoAssertNoNurseryAlloc()
7086 {
7087     gc.allowNurseryAlloc();
7088 }
7089 
7090 JS::AutoEnterCycleCollection::AutoEnterCycleCollection(JSContext* cx)
7091   : runtime(cx->runtime())
7092 {
7093     MOZ_ASSERT(!runtime->isHeapBusy());
7094     runtime->setHeapState(HeapState::CycleCollecting);
7095 }
7096 
7097 JS::AutoEnterCycleCollection::~AutoEnterCycleCollection()
7098 {
7099     MOZ_ASSERT(runtime->isCycleCollecting());
7100     runtime->setHeapState(HeapState::Idle);
7101 }
7102 #endif
7103 
7104 JS::AutoAssertGCCallback::AutoAssertGCCallback(JSObject* obj)
7105   : AutoSuppressGCAnalysis()
7106 {
7107     MOZ_ASSERT(obj->runtimeFromMainThread()->isHeapCollecting());
7108 }
7109 
7110 JS_FRIEND_API(const char*)
7111 JS::GCTraceKindToAscii(JS::TraceKind kind)
7112 {
7113     switch(kind) {
7114 #define MAP_NAME(name, _0, _1) case JS::TraceKind::name: return #name;
7115 JS_FOR_EACH_TRACEKIND(MAP_NAME);
7116 #undef MAP_NAME
7117       default: return "Invalid";
7118     }
7119 }
7120 
7121 JS::GCCellPtr::GCCellPtr(const Value& v)
7122   : ptr(0)
7123 {
7124     if (v.isString())
7125         ptr = checkedCast(v.toString(), JS::TraceKind::String);
7126     else if (v.isObject())
7127         ptr = checkedCast(&v.toObject(), JS::TraceKind::Object);
7128     else if (v.isSymbol())
7129         ptr = checkedCast(v.toSymbol(), JS::TraceKind::Symbol);
7130     else if (v.isPrivateGCThing())
7131         ptr = checkedCast(v.toGCThing(), v.toGCThing()->getTraceKind());
7132     else
7133         ptr = checkedCast(nullptr, JS::TraceKind::Null);
7134 }
7135 
7136 JS::TraceKind
7137 JS::GCCellPtr::outOfLineKind() const
7138 {
7139     MOZ_ASSERT((ptr & OutOfLineTraceKindMask) == OutOfLineTraceKindMask);
7140     MOZ_ASSERT(asCell()->isTenured());
7141     return MapAllocToTraceKind(asCell()->asTenured().getAllocKind());
7142 }
7143 
7144 bool
7145 JS::GCCellPtr::mayBeOwnedByOtherRuntime() const
7146 {
7147     return (is<JSString>() && as<JSString>().isPermanentAtom()) ||
7148            (is<Symbol>() && as<Symbol>().isWellKnownSymbol());
7149 }
7150 
7151 #ifdef JSGC_HASH_TABLE_CHECKS
7152 void
7153 js::gc::CheckHashTablesAfterMovingGC(JSRuntime* rt)
7154 {
7155     /*
7156      * Check that internal hash tables no longer have any pointers to things
7157      * that have been moved.
7158      */
7159     rt->spsProfiler.checkStringsMapAfterMovingGC();
7160     for (ZonesIter zone(rt, SkipAtoms); !zone.done(); zone.next()) {
7161         zone->checkUniqueIdTableAfterMovingGC();
7162         zone->checkInitialShapesTableAfterMovingGC();
7163         zone->checkBaseShapeTableAfterMovingGC();
7164 
7165         JS::AutoCheckCannotGC nogc;
7166         for (auto baseShape = zone->cellIter<BaseShape>(); !baseShape.done(); baseShape.next()) {
7167             if (ShapeTable* table = baseShape->maybeTable(nogc))
7168                 table->checkAfterMovingGC();
7169         }
7170     }
7171     for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
7172         c->objectGroups.checkTablesAfterMovingGC();
7173         c->dtoaCache.checkCacheAfterMovingGC();
7174         c->checkWrapperMapAfterMovingGC();
7175         c->checkScriptMapsAfterMovingGC();
7176         if (c->debugEnvs)
7177             c->debugEnvs->checkHashTablesAfterMovingGC(rt);
7178     }
7179 }
7180 #endif
7181 
7182 JS_PUBLIC_API(void)
7183 JS::PrepareZoneForGC(Zone* zone)
7184 {
7185     zone->scheduleGC();
7186 }
7187 
7188 JS_PUBLIC_API(void)
7189 JS::PrepareForFullGC(JSContext* cx)
7190 {
7191     for (ZonesIter zone(cx, WithAtoms); !zone.done(); zone.next())
7192         zone->scheduleGC();
7193 }
7194 
7195 JS_PUBLIC_API(void)
7196 JS::PrepareForIncrementalGC(JSContext* cx)
7197 {
7198     if (!JS::IsIncrementalGCInProgress(cx))
7199         return;
7200 
7201     for (ZonesIter zone(cx, WithAtoms); !zone.done(); zone.next()) {
7202         if (zone->wasGCStarted())
7203             PrepareZoneForGC(zone);
7204     }
7205 }
7206 
7207 JS_PUBLIC_API(bool)
7208 JS::IsGCScheduled(JSContext* cx)
7209 {
7210     for (ZonesIter zone(cx, WithAtoms); !zone.done(); zone.next()) {
7211         if (zone->isGCScheduled())
7212             return true;
7213     }
7214 
7215     return false;
7216 }
7217 
7218 JS_PUBLIC_API(void)
7219 JS::SkipZoneForGC(Zone* zone)
7220 {
7221     zone->unscheduleGC();
7222 }
7223 
7224 JS_PUBLIC_API(void)
7225 JS::GCForReason(JSContext* cx, JSGCInvocationKind gckind, gcreason::Reason reason)
7226 {
7227     MOZ_ASSERT(gckind == GC_NORMAL || gckind == GC_SHRINK);
7228     cx->gc.gc(gckind, reason);
7229 }
7230 
7231 JS_PUBLIC_API(void)
7232 JS::StartIncrementalGC(JSContext* cx, JSGCInvocationKind gckind, gcreason::Reason reason, int64_t millis)
7233 {
7234     MOZ_ASSERT(gckind == GC_NORMAL || gckind == GC_SHRINK);
7235     cx->gc.startGC(gckind, reason, millis);
7236 }
7237 
7238 JS_PUBLIC_API(void)
7239 JS::IncrementalGCSlice(JSContext* cx, gcreason::Reason reason, int64_t millis)
7240 {
7241     cx->gc.gcSlice(reason, millis);
7242 }
7243 
7244 JS_PUBLIC_API(void)
7245 JS::FinishIncrementalGC(JSContext* cx, gcreason::Reason reason)
7246 {
7247     cx->gc.finishGC(reason);
7248 }
7249 
7250 JS_PUBLIC_API(void)
7251 JS::AbortIncrementalGC(JSContext* cx)
7252 {
7253     cx->gc.abortGC();
7254 }
7255 
7256 char16_t*
7257 JS::GCDescription::formatSliceMessage(JSContext* cx) const
7258 {
7259     UniqueChars cstr = cx->gc.stats.formatCompactSliceMessage();
7260 
7261     size_t nchars = strlen(cstr.get());
7262     UniqueTwoByteChars out(js_pod_malloc<char16_t>(nchars + 1));
7263     if (!out)
7264         return nullptr;
7265     out.get()[nchars] = 0;
7266 
7267     CopyAndInflateChars(out.get(), cstr.get(), nchars);
7268     return out.release();
7269 }
7270 
7271 char16_t*
7272 JS::GCDescription::formatSummaryMessage(JSContext* cx) const
7273 {
7274     UniqueChars cstr = cx->gc.stats.formatCompactSummaryMessage();
7275 
7276     size_t nchars = strlen(cstr.get());
7277     UniqueTwoByteChars out(js_pod_malloc<char16_t>(nchars + 1));
7278     if (!out)
7279         return nullptr;
7280     out.get()[nchars] = 0;
7281 
7282     CopyAndInflateChars(out.get(), cstr.get(), nchars);
7283     return out.release();
7284 }
7285 
7286 JS::dbg::GarbageCollectionEvent::Ptr
7287 JS::GCDescription::toGCEvent(JSContext* cx) const
7288 {
7289     return JS::dbg::GarbageCollectionEvent::Create(cx, cx->gc.stats, cx->gc.majorGCCount());
7290 }
7291 
7292 char16_t*
7293 JS::GCDescription::formatJSON(JSContext* cx, uint64_t timestamp) const
7294 {
7295     UniqueChars cstr = cx->gc.stats.formatJsonMessage(timestamp);
7296 
7297     size_t nchars = strlen(cstr.get());
7298     UniqueTwoByteChars out(js_pod_malloc<char16_t>(nchars + 1));
7299     if (!out)
7300         return nullptr;
7301     out.get()[nchars] = 0;
7302 
7303     CopyAndInflateChars(out.get(), cstr.get(), nchars);
7304     return out.release();
7305 }
7306 
7307 JS_PUBLIC_API(JS::GCSliceCallback)
7308 JS::SetGCSliceCallback(JSContext* cx, GCSliceCallback callback)
7309 {
7310     return cx->gc.setSliceCallback(callback);
7311 }
7312 
7313 JS_PUBLIC_API(JS::DoCycleCollectionCallback)
7314 JS::SetDoCycleCollectionCallback(JSContext* cx, JS::DoCycleCollectionCallback callback)
7315 {
7316     return cx->gc.setDoCycleCollectionCallback(callback);
7317 }
7318 
7319 JS_PUBLIC_API(JS::GCNurseryCollectionCallback)
7320 JS::SetGCNurseryCollectionCallback(JSContext* cx, GCNurseryCollectionCallback callback)
7321 {
7322     return cx->gc.setNurseryCollectionCallback(callback);
7323 }
7324 
7325 JS_PUBLIC_API(void)
7326 JS::DisableIncrementalGC(JSContext* cx)
7327 {
7328     cx->gc.disallowIncrementalGC();
7329 }
7330 
7331 JS_PUBLIC_API(bool)
7332 JS::IsIncrementalGCEnabled(JSContext* cx)
7333 {
7334     return cx->gc.isIncrementalGCEnabled();
7335 }
7336 
7337 JS_PUBLIC_API(bool)
7338 JS::IsIncrementalGCInProgress(JSContext* cx)
7339 {
7340     return cx->gc.isIncrementalGCInProgress() && !cx->gc.isVerifyPreBarriersEnabled();
7341 }
7342 
7343 JS_PUBLIC_API(bool)
7344 JS::IsIncrementalBarrierNeeded(JSContext* cx)
7345 {
7346     if (cx->isHeapBusy())
7347         return false;
7348 
7349     auto state = cx->gc.state();
7350     return state != gc::State::NotActive && state <= gc::State::Sweep;
7351 }
7352 
7353 struct IncrementalReferenceBarrierFunctor {
7354     template <typename T> void operator()(T* t) { T::writeBarrierPre(t); }
7355 };
7356 
7357 JS_PUBLIC_API(void)
7358 JS::IncrementalReferenceBarrier(GCCellPtr thing)
7359 {
7360     if (!thing)
7361         return;
7362 
7363     DispatchTyped(IncrementalReferenceBarrierFunctor(), thing);
7364 }
7365 
7366 JS_PUBLIC_API(void)
7367 JS::IncrementalValueBarrier(const Value& v)
7368 {
7369     js::GCPtrValue::writeBarrierPre(v);
7370 }
7371 
7372 JS_PUBLIC_API(void)
7373 JS::IncrementalObjectBarrier(JSObject* obj)
7374 {
7375     if (!obj)
7376         return;
7377 
7378     MOZ_ASSERT(!obj->zone()->runtimeFromMainThread()->isHeapMajorCollecting());
7379 
7380     JSObject::writeBarrierPre(obj);
7381 }
7382 
7383 JS_PUBLIC_API(bool)
7384 JS::WasIncrementalGC(JSContext* cx)
7385 {
7386     return cx->gc.isIncrementalGc();
7387 }
7388 
7389 JS::AutoDisableGenerationalGC::AutoDisableGenerationalGC(JSRuntime* rt)
7390   : gc(&rt->gc)
7391 {
7392     gc->disableGenerationalGC();
7393 }
7394 
7395 JS::AutoDisableGenerationalGC::~AutoDisableGenerationalGC()
7396 {
7397     gc->enableGenerationalGC();
7398 }
7399 
7400 JS_PUBLIC_API(bool)
7401 JS::IsGenerationalGCEnabled(JSRuntime* rt)
7402 {
7403     return rt->gc.isGenerationalGCEnabled();
7404 }
7405 
7406 uint64_t
7407 js::gc::NextCellUniqueId(JSRuntime* rt)
7408 {
7409     return rt->gc.nextCellUniqueId();
7410 }
7411 
7412 namespace js {
7413 namespace gc {
7414 namespace MemInfo {
7415 
7416 static bool
7417 GCBytesGetter(JSContext* cx, unsigned argc, Value* vp)
7418 {
7419     CallArgs args = CallArgsFromVp(argc, vp);
7420     args.rval().setNumber(double(cx->runtime()->gc.usage.gcBytes()));
7421     return true;
7422 }
7423 
7424 static bool
7425 GCMaxBytesGetter(JSContext* cx, unsigned argc, Value* vp)
7426 {
7427     CallArgs args = CallArgsFromVp(argc, vp);
7428     args.rval().setNumber(double(cx->runtime()->gc.tunables.gcMaxBytes()));
7429     return true;
7430 }
7431 
7432 static bool
7433 MallocBytesGetter(JSContext* cx, unsigned argc, Value* vp)
7434 {
7435     CallArgs args = CallArgsFromVp(argc, vp);
7436     args.rval().setNumber(double(cx->runtime()->gc.getMallocBytes()));
7437     return true;
7438 }
7439 
7440 static bool
7441 MaxMallocGetter(JSContext* cx, unsigned argc, Value* vp)
7442 {
7443     CallArgs args = CallArgsFromVp(argc, vp);
7444     args.rval().setNumber(double(cx->runtime()->gc.maxMallocBytesAllocated()));
7445     return true;
7446 }
7447 
7448 static bool
7449 GCHighFreqGetter(JSContext* cx, unsigned argc, Value* vp)
7450 {
7451     CallArgs args = CallArgsFromVp(argc, vp);
7452     args.rval().setBoolean(cx->runtime()->gc.schedulingState.inHighFrequencyGCMode());
7453     return true;
7454 }
7455 
7456 static bool
7457 GCNumberGetter(JSContext* cx, unsigned argc, Value* vp)
7458 {
7459     CallArgs args = CallArgsFromVp(argc, vp);
7460     args.rval().setNumber(double(cx->runtime()->gc.gcNumber()));
7461     return true;
7462 }
7463 
7464 static bool
7465 MajorGCCountGetter(JSContext* cx, unsigned argc, Value* vp)
7466 {
7467     CallArgs args = CallArgsFromVp(argc, vp);
7468     args.rval().setNumber(double(cx->runtime()->gc.majorGCCount()));
7469     return true;
7470 }
7471 
7472 static bool
7473 MinorGCCountGetter(JSContext* cx, unsigned argc, Value* vp)
7474 {
7475     CallArgs args = CallArgsFromVp(argc, vp);
7476     args.rval().setNumber(double(cx->runtime()->gc.minorGCCount()));
7477     return true;
7478 }
7479 
7480 static bool
7481 ZoneGCBytesGetter(JSContext* cx, unsigned argc, Value* vp)
7482 {
7483     CallArgs args = CallArgsFromVp(argc, vp);
7484     args.rval().setNumber(double(cx->zone()->usage.gcBytes()));
7485     return true;
7486 }
7487 
7488 static bool
7489 ZoneGCTriggerBytesGetter(JSContext* cx, unsigned argc, Value* vp)
7490 {
7491     CallArgs args = CallArgsFromVp(argc, vp);
7492     args.rval().setNumber(double(cx->zone()->threshold.gcTriggerBytes()));
7493     return true;
7494 }
7495 
7496 static bool
7497 ZoneGCAllocTriggerGetter(JSContext* cx, unsigned argc, Value* vp)
7498 {
7499     CallArgs args = CallArgsFromVp(argc, vp);
7500     args.rval().setNumber(double(cx->zone()->threshold.allocTrigger(cx->runtime()->gc.schedulingState.inHighFrequencyGCMode())));
7501     return true;
7502 }
7503 
7504 static bool
7505 ZoneMallocBytesGetter(JSContext* cx, unsigned argc, Value* vp)
7506 {
7507     CallArgs args = CallArgsFromVp(argc, vp);
7508     args.rval().setNumber(double(cx->zone()->gcMallocBytes));
7509     return true;
7510 }
7511 
7512 static bool
7513 ZoneMaxMallocGetter(JSContext* cx, unsigned argc, Value* vp)
7514 {
7515     CallArgs args = CallArgsFromVp(argc, vp);
7516     args.rval().setNumber(double(cx->zone()->gcMaxMallocBytes));
7517     return true;
7518 }
7519 
7520 static bool
7521 ZoneGCDelayBytesGetter(JSContext* cx, unsigned argc, Value* vp)
7522 {
7523     CallArgs args = CallArgsFromVp(argc, vp);
7524     args.rval().setNumber(double(cx->zone()->gcDelayBytes));
7525     return true;
7526 }
7527 
7528 static bool
7529 ZoneGCHeapGrowthFactorGetter(JSContext* cx, unsigned argc, Value* vp)
7530 {
7531     CallArgs args = CallArgsFromVp(argc, vp);
7532     args.rval().setNumber(cx->zone()->threshold.gcHeapGrowthFactor());
7533     return true;
7534 }
7535 
7536 static bool
7537 ZoneGCNumberGetter(JSContext* cx, unsigned argc, Value* vp)
7538 {
7539     CallArgs args = CallArgsFromVp(argc, vp);
7540     args.rval().setNumber(double(cx->zone()->gcNumber()));
7541     return true;
7542 }
7543 
7544 #ifdef JS_MORE_DETERMINISTIC
7545 static bool
7546 DummyGetter(JSContext* cx, unsigned argc, Value* vp)
7547 {
7548     CallArgs args = CallArgsFromVp(argc, vp);
7549     args.rval().setUndefined();
7550     return true;
7551 }
7552 #endif
7553 
7554 } /* namespace MemInfo */
7555 
7556 JSObject*
7557 NewMemoryInfoObject(JSContext* cx)
7558 {
7559     RootedObject obj(cx, JS_NewObject(cx, nullptr));
7560     if (!obj)
7561         return nullptr;
7562 
7563     using namespace MemInfo;
7564     struct NamedGetter {
7565         const char* name;
7566         JSNative getter;
7567     } getters[] = {
7568         { "gcBytes", GCBytesGetter },
7569         { "gcMaxBytes", GCMaxBytesGetter },
7570         { "mallocBytesRemaining", MallocBytesGetter },
7571         { "maxMalloc", MaxMallocGetter },
7572         { "gcIsHighFrequencyMode", GCHighFreqGetter },
7573         { "gcNumber", GCNumberGetter },
7574         { "majorGCCount", MajorGCCountGetter },
7575         { "minorGCCount", MinorGCCountGetter }
7576     };
7577 
7578     for (auto pair : getters) {
7579 #ifdef JS_MORE_DETERMINISTIC
7580         JSNative getter = DummyGetter;
7581 #else
7582         JSNative getter = pair.getter;
7583 #endif
7584         if (!JS_DefineProperty(cx, obj, pair.name, UndefinedHandleValue,
7585                                JSPROP_ENUMERATE | JSPROP_SHARED,
7586                                getter, nullptr))
7587         {
7588             return nullptr;
7589         }
7590     }
7591 
7592     RootedObject zoneObj(cx, JS_NewObject(cx, nullptr));
7593     if (!zoneObj)
7594         return nullptr;
7595 
7596     if (!JS_DefineProperty(cx, obj, "zone", zoneObj, JSPROP_ENUMERATE))
7597         return nullptr;
7598 
7599     struct NamedZoneGetter {
7600         const char* name;
7601         JSNative getter;
7602     } zoneGetters[] = {
7603         { "gcBytes", ZoneGCBytesGetter },
7604         { "gcTriggerBytes", ZoneGCTriggerBytesGetter },
7605         { "gcAllocTrigger", ZoneGCAllocTriggerGetter },
7606         { "mallocBytesRemaining", ZoneMallocBytesGetter },
7607         { "maxMalloc", ZoneMaxMallocGetter },
7608         { "delayBytes", ZoneGCDelayBytesGetter },
7609         { "heapGrowthFactor", ZoneGCHeapGrowthFactorGetter },
7610         { "gcNumber", ZoneGCNumberGetter }
7611     };
7612 
7613     for (auto pair : zoneGetters) {
7614  #ifdef JS_MORE_DETERMINISTIC
7615         JSNative getter = DummyGetter;
7616 #else
7617         JSNative getter = pair.getter;
7618 #endif
7619         if (!JS_DefineProperty(cx, zoneObj, pair.name, UndefinedHandleValue,
7620                                JSPROP_ENUMERATE | JSPROP_SHARED,
7621                                getter, nullptr))
7622         {
7623             return nullptr;
7624         }
7625     }
7626 
7627     return obj;
7628 }
7629 
7630 const char*
7631 StateName(State state)
7632 {
7633     switch(state) {
7634 #define MAKE_CASE(name) case State::name: return #name;
7635       GCSTATES(MAKE_CASE)
7636 #undef MAKE_CASE
7637     }
7638     MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("invalide gc::State enum value");
7639 }
7640 
7641 void
7642 AutoAssertHeapBusy::checkCondition(JSRuntime *rt)
7643 {
7644     this->rt = rt;
7645     MOZ_ASSERT(rt->isHeapBusy());
7646 }
7647 
7648 void
7649 AutoAssertEmptyNursery::checkCondition(JSRuntime *rt) {
7650     if (!noAlloc)
7651         noAlloc.emplace(rt);
7652     this->rt = rt;
7653     MOZ_ASSERT(rt->gc.nursery.isEmpty());
7654 }
7655 
7656 AutoEmptyNursery::AutoEmptyNursery(JSRuntime *rt)
7657   : AutoAssertEmptyNursery()
7658 {
7659     MOZ_ASSERT(!rt->mainThread.suppressGC);
7660     rt->gc.stats.suspendPhases();
7661     rt->gc.evictNursery();
7662     rt->gc.stats.resumePhases();
7663     checkCondition(rt);
7664 }
7665 
7666 } /* namespace gc */
7667 } /* namespace js */
7668 
7669 #ifdef DEBUG
7670 void
7671 js::gc::Cell::dump(FILE* fp) const
7672 {
7673     switch (getTraceKind()) {
7674       case JS::TraceKind::Object:
7675         reinterpret_cast<const JSObject*>(this)->dump(fp);
7676         break;
7677 
7678       case JS::TraceKind::String:
7679           js::DumpString(reinterpret_cast<JSString*>(const_cast<Cell*>(this)), fp);
7680         break;
7681 
7682       case JS::TraceKind::Shape:
7683         reinterpret_cast<const Shape*>(this)->dump(fp);
7684         break;
7685 
7686       default:
7687         fprintf(fp, "%s(%p)\n", JS::GCTraceKindToAscii(getTraceKind()), (void*) this);
7688     }
7689 }
7690 
7691 // For use in a debugger.
7692 void
7693 js::gc::Cell::dump() const
7694 {
7695     dump(stderr);
7696 }
7697 #endif
7698 
7699 JS_PUBLIC_API(bool)
7700 js::gc::detail::CellIsMarkedGrayIfKnown(const Cell* cell)
7701 {
7702     MOZ_ASSERT(cell);
7703     if (!cell->isTenured())
7704         return false;
7705 
7706     // We ignore the gray marking state of cells and return false in two cases:
7707     //
7708     // 1) When OOM has caused us to clear the gcGrayBitsValid_ flag.
7709     //
7710     // 2) When we are in an incremental GC and examine a cell that is in a zone
7711     // that is not being collected. Gray targets of CCWs that are marked black
7712     // by a barrier will eventually be marked black in the next GC slice.
7713     auto tc = &cell->asTenured();
7714     auto rt = tc->runtimeFromMainThread();
7715     if (!rt->areGCGrayBitsValid() ||
7716         (rt->gc.isIncrementalGCInProgress() && !tc->zone()->wasGCStarted()))
7717     {
7718         return false;
7719     }
7720 
7721     return detail::CellIsMarkedGray(tc);
7722 }
7723