1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #ifndef gc_Heap_h
8 #define gc_Heap_h
9 
10 #include "mozilla/Atomics.h"
11 #include "mozilla/DebugOnly.h"
12 
13 #include "ds/BitArray.h"
14 #include "gc/AllocKind.h"
15 #include "gc/GCEnum.h"
16 #include "js/TypeDecls.h"
17 #include "util/Poison.h"
18 
19 namespace js {
20 
21 class AutoLockGC;
22 class AutoLockGCBgAlloc;
23 class NurseryDecommitTask;
24 
25 namespace gc {
26 
27 class Arena;
28 class ArenaCellSet;
29 class ArenaList;
30 class GCRuntime;
31 class SortedArenaList;
32 class StoreBuffer;
33 class TenuredCell;
34 struct Chunk;
35 
36 // Cells are aligned to CellAlignShift, so the largest tagged null pointer is:
37 const uintptr_t LargestTaggedNullCellPointer = (1 << CellAlignShift) - 1;
38 
39 /*
40  * The minimum cell size ends up as twice the cell alignment because the mark
41  * bitmap contains one bit per CellBytesPerMarkBit bytes (which is equal to
42  * CellAlignBytes) and we need two mark bits per cell.
43  */
44 const size_t MarkBitsPerCell = 2;
45 const size_t MinCellSize = CellBytesPerMarkBit * MarkBitsPerCell;
46 
DivideAndRoundUp(size_t numerator,size_t divisor)47 constexpr size_t DivideAndRoundUp(size_t numerator, size_t divisor) {
48   return (numerator + divisor - 1) / divisor;
49 }
50 
51 static_assert(ArenaSize % CellAlignBytes == 0,
52               "Arena size must be a multiple of cell alignment");
53 
54 /*
55  * The mark bitmap has one bit per each possible cell start position. This
56  * wastes some space for larger GC things but allows us to avoid division by the
57  * cell's size when accessing the bitmap.
58  */
59 const size_t ArenaBitmapBits = ArenaSize / CellBytesPerMarkBit;
60 const size_t ArenaBitmapBytes = DivideAndRoundUp(ArenaBitmapBits, 8);
61 const size_t ArenaBitmapWords =
62     DivideAndRoundUp(ArenaBitmapBits, JS_BITS_PER_WORD);
63 
64 /*
65  * A FreeSpan represents a contiguous sequence of free cells in an Arena. It
66  * can take two forms.
67  *
68  * - In an empty span, |first| and |last| are both zero.
69  *
70  * - In a non-empty span, |first| is the address of the first free thing in the
71  *   span, and |last| is the address of the last free thing in the span.
72  *   Furthermore, the memory pointed to by |last| holds a FreeSpan structure
73  *   that points to the next span (which may be empty); this works because
74  *   sizeof(FreeSpan) is less than the smallest thingSize.
75  */
76 class FreeSpan {
77   friend class Arena;
78   friend class ArenaCellIter;
79   friend class ArenaFreeCellIter;
80 
81   uint16_t first;
82   uint16_t last;
83 
84  public:
85   // This inits just |first| and |last|; if the span is non-empty it doesn't
86   // do anything with the next span stored at |last|.
initBounds(uintptr_t firstArg,uintptr_t lastArg,const Arena * arena)87   void initBounds(uintptr_t firstArg, uintptr_t lastArg, const Arena* arena) {
88     checkRange(firstArg, lastArg, arena);
89     first = firstArg;
90     last = lastArg;
91   }
92 
initAsEmpty()93   void initAsEmpty() {
94     first = 0;
95     last = 0;
96   }
97 
98   // This sets |first| and |last|, and also sets the next span stored at
99   // |last| as empty. (As a result, |firstArg| and |lastArg| cannot represent
100   // an empty span.)
initFinal(uintptr_t firstArg,uintptr_t lastArg,const Arena * arena)101   void initFinal(uintptr_t firstArg, uintptr_t lastArg, const Arena* arena) {
102     initBounds(firstArg, lastArg, arena);
103     FreeSpan* last = nextSpanUnchecked(arena);
104     last->initAsEmpty();
105     checkSpan(arena);
106   }
107 
isEmpty()108   bool isEmpty() const { return !first; }
109 
getArenaUnchecked()110   Arena* getArenaUnchecked() { return reinterpret_cast<Arena*>(this); }
111   inline Arena* getArena();
112 
offsetOfFirst()113   static size_t offsetOfFirst() { return offsetof(FreeSpan, first); }
114 
offsetOfLast()115   static size_t offsetOfLast() { return offsetof(FreeSpan, last); }
116 
117   // Like nextSpan(), but no checking of the following span is done.
nextSpanUnchecked(const Arena * arena)118   FreeSpan* nextSpanUnchecked(const Arena* arena) const {
119     MOZ_ASSERT(arena && !isEmpty());
120     return reinterpret_cast<FreeSpan*>(uintptr_t(arena) + last);
121   }
122 
nextSpan(const Arena * arena)123   const FreeSpan* nextSpan(const Arena* arena) const {
124     checkSpan(arena);
125     return nextSpanUnchecked(arena);
126   }
127 
allocate(size_t thingSize)128   MOZ_ALWAYS_INLINE TenuredCell* allocate(size_t thingSize) {
129     // Eschew the usual checks, because this might be the placeholder span.
130     // If this is somehow an invalid, non-empty span, checkSpan() will catch it.
131     Arena* arena = getArenaUnchecked();
132     checkSpan(arena);
133     uintptr_t thing = uintptr_t(arena) + first;
134     if (first < last) {
135       // We have space for at least two more things, so do a simple
136       // bump-allocate.
137       first += thingSize;
138     } else if (MOZ_LIKELY(first)) {
139       // The last space points to the next free span (which may be empty).
140       const FreeSpan* next = nextSpan(arena);
141       first = next->first;
142       last = next->last;
143     } else {
144       return nullptr;  // The span is empty.
145     }
146     checkSpan(arena);
147     DebugOnlyPoison(reinterpret_cast<void*>(thing),
148                     JS_ALLOCATED_TENURED_PATTERN, thingSize,
149                     MemCheckKind::MakeUndefined);
150     return reinterpret_cast<TenuredCell*>(thing);
151   }
152 
153   inline void checkSpan(const Arena* arena) const;
154   inline void checkRange(uintptr_t first, uintptr_t last,
155                          const Arena* arena) const;
156 };
157 
158 /*
159  * Arenas are the allocation units of the tenured heap in the GC. An arena
160  * is 4kiB in size and 4kiB-aligned. It starts with several header fields
161  * followed by some bytes of padding. The remainder of the arena is filled
162  * with GC things of a particular AllocKind. The padding ensures that the
163  * GC thing array ends exactly at the end of the arena:
164  *
165  * <----------------------------------------------> = ArenaSize bytes
166  * +---------------+---------+----+----+-----+----+
167  * | header fields | padding | T0 | T1 | ... | Tn |
168  * +---------------+---------+----+----+-----+----+
169  * <-------------------------> = first thing offset
170  */
171 class Arena {
172   static JS_FRIEND_DATA const uint8_t ThingSizes[];
173   static JS_FRIEND_DATA const uint8_t FirstThingOffsets[];
174   static JS_FRIEND_DATA const uint8_t ThingsPerArena[];
175   /*
176    * The first span of free things in the arena. Most of these spans are
177    * stored as offsets in free regions of the data array, and most operations
178    * on FreeSpans take an Arena pointer for safety. However, the FreeSpans
179    * used for allocation are stored here, at the start of an Arena, and use
180    * their own address to grab the next span within the same Arena.
181    */
182   FreeSpan firstFreeSpan;
183 
184  public:
185   /*
186    * The zone that this Arena is contained within, when allocated. The offset
187    * of this field must match the ArenaZoneOffset stored in js/HeapAPI.h,
188    * as is statically asserted below.
189    */
190   JS::Zone* zone;
191 
192   /*
193    * Arena::next has two purposes: when unallocated, it points to the next
194    * available Arena. When allocated, it points to the next Arena in the same
195    * zone and with the same alloc kind.
196    */
197   Arena* next;
198 
199  private:
200   /*
201    * One of the AllocKind constants or AllocKind::LIMIT when the arena does
202    * not contain any GC things and is on the list of empty arenas in the GC
203    * chunk.
204    *
205    * We use 8 bits for the alloc kind so the compiler can use byte-level
206    * memory instructions to access it.
207    */
208   size_t allocKind : 8;
209 
210  private:
211   /*
212    * When recursive marking uses too much stack we delay marking of
213    * arenas and link them into a list for later processing. This
214    * uses the following fields.
215    */
216   static const size_t DELAYED_MARKING_FLAG_BITS = 3;
217   static const size_t DELAYED_MARKING_ARENA_BITS =
218       JS_BITS_PER_WORD - 8 - DELAYED_MARKING_FLAG_BITS;
219   size_t onDelayedMarkingList_ : 1;
220   size_t hasDelayedBlackMarking_ : 1;
221   size_t hasDelayedGrayMarking_ : 1;
222   size_t nextDelayedMarkingArena_ : DELAYED_MARKING_ARENA_BITS;
223   static_assert(
224       DELAYED_MARKING_ARENA_BITS >= JS_BITS_PER_WORD - ArenaShift,
225       "Arena::nextDelayedMarkingArena_ packing assumes that ArenaShift has "
226       "enough bits to cover allocKind and delayed marking state.");
227 
228   union {
229     /*
230      * For arenas in zones other than the atoms zone, if non-null, points
231      * to an ArenaCellSet that represents the set of cells in this arena
232      * that are in the nursery's store buffer.
233      */
234     ArenaCellSet* bufferedCells_;
235 
236     /*
237      * For arenas in the atoms zone, the starting index into zone atom
238      * marking bitmaps (see AtomMarking.h) of the things in this zone.
239      * Atoms never refer to nursery things, so no store buffer index is
240      * needed.
241      */
242     size_t atomBitmapStart_;
243   };
244 
245  public:
246   /*
247    * The size of data should be |ArenaSize - offsetof(data)|, but the offset
248    * is not yet known to the compiler, so we do it by hand. |firstFreeSpan|
249    * takes up 8 bytes on 64-bit due to alignment requirements; the rest are
250    * obvious. This constant is stored in js/HeapAPI.h.
251    */
252   uint8_t data[ArenaSize - ArenaHeaderSize];
253 
254   void init(JS::Zone* zoneArg, AllocKind kind, const AutoLockGC& lock);
255 
256   // Sets |firstFreeSpan| to the Arena's entire valid range, and
257   // also sets the next span stored at |firstFreeSpan.last| as empty.
setAsFullyUnused()258   void setAsFullyUnused() {
259     AllocKind kind = getAllocKind();
260     firstFreeSpan.first = firstThingOffset(kind);
261     firstFreeSpan.last = lastThingOffset(kind);
262     FreeSpan* last = firstFreeSpan.nextSpanUnchecked(this);
263     last->initAsEmpty();
264   }
265 
266   // Initialize an arena to its unallocated state. For arenas that were
267   // previously allocated for some zone, use release() instead.
setAsNotAllocated()268   void setAsNotAllocated() {
269     firstFreeSpan.initAsEmpty();
270 
271     // Poison zone pointer to highlight UAF on released arenas in crash data.
272     AlwaysPoison(&zone, JS_FREED_ARENA_PATTERN, sizeof(zone),
273                  MemCheckKind::MakeNoAccess);
274 
275     allocKind = size_t(AllocKind::LIMIT);
276     onDelayedMarkingList_ = 0;
277     hasDelayedBlackMarking_ = 0;
278     hasDelayedGrayMarking_ = 0;
279     nextDelayedMarkingArena_ = 0;
280     bufferedCells_ = nullptr;
281   }
282 
283   // Return an allocated arena to its unallocated state.
284   inline void release(const AutoLockGC& lock);
285 
address()286   uintptr_t address() const {
287     checkAddress();
288     return uintptr_t(this);
289   }
290 
291   inline void checkAddress() const;
292 
293   inline Chunk* chunk() const;
294 
allocated()295   bool allocated() const {
296     MOZ_ASSERT(IsAllocKind(AllocKind(allocKind)));
297     return IsValidAllocKind(AllocKind(allocKind));
298   }
299 
getAllocKind()300   AllocKind getAllocKind() const {
301     MOZ_ASSERT(allocated());
302     return AllocKind(allocKind);
303   }
304 
getFirstFreeSpan()305   FreeSpan* getFirstFreeSpan() { return &firstFreeSpan; }
306 
thingSize(AllocKind kind)307   static size_t thingSize(AllocKind kind) { return ThingSizes[size_t(kind)]; }
thingsPerArena(AllocKind kind)308   static size_t thingsPerArena(AllocKind kind) {
309     return ThingsPerArena[size_t(kind)];
310   }
thingsSpan(AllocKind kind)311   static size_t thingsSpan(AllocKind kind) {
312     return thingsPerArena(kind) * thingSize(kind);
313   }
314 
firstThingOffset(AllocKind kind)315   static size_t firstThingOffset(AllocKind kind) {
316     return FirstThingOffsets[size_t(kind)];
317   }
lastThingOffset(AllocKind kind)318   static size_t lastThingOffset(AllocKind kind) {
319     return ArenaSize - thingSize(kind);
320   }
321 
getThingSize()322   size_t getThingSize() const { return thingSize(getAllocKind()); }
getThingsPerArena()323   size_t getThingsPerArena() const { return thingsPerArena(getAllocKind()); }
getThingsSpan()324   size_t getThingsSpan() const { return getThingsPerArena() * getThingSize(); }
getFirstThingOffset()325   size_t getFirstThingOffset() const {
326     return firstThingOffset(getAllocKind());
327   }
328 
thingsStart()329   uintptr_t thingsStart() const { return address() + getFirstThingOffset(); }
thingsEnd()330   uintptr_t thingsEnd() const { return address() + ArenaSize; }
331 
isEmpty()332   bool isEmpty() const {
333     // Arena is empty if its first span covers the whole arena.
334     firstFreeSpan.checkSpan(this);
335     AllocKind kind = getAllocKind();
336     return firstFreeSpan.first == firstThingOffset(kind) &&
337            firstFreeSpan.last == lastThingOffset(kind);
338   }
339 
hasFreeThings()340   bool hasFreeThings() const { return !firstFreeSpan.isEmpty(); }
341 
numFreeThings(size_t thingSize)342   size_t numFreeThings(size_t thingSize) const {
343     firstFreeSpan.checkSpan(this);
344     size_t numFree = 0;
345     const FreeSpan* span = &firstFreeSpan;
346     for (; !span->isEmpty(); span = span->nextSpan(this)) {
347       numFree += (span->last - span->first) / thingSize + 1;
348     }
349     return numFree;
350   }
351 
countFreeCells()352   size_t countFreeCells() { return numFreeThings(getThingSize()); }
countUsedCells()353   size_t countUsedCells() { return getThingsPerArena() - countFreeCells(); }
354 
inFreeList(uintptr_t thing)355   bool inFreeList(uintptr_t thing) {
356     uintptr_t base = address();
357     const FreeSpan* span = &firstFreeSpan;
358     for (; !span->isEmpty(); span = span->nextSpan(this)) {
359       /* If the thing comes before the current span, it's not free. */
360       if (thing < base + span->first) {
361         return false;
362       }
363 
364       /* If we find it before the end of the span, it's free. */
365       if (thing <= base + span->last) {
366         return true;
367       }
368     }
369     return false;
370   }
371 
isAligned(uintptr_t thing,size_t thingSize)372   static bool isAligned(uintptr_t thing, size_t thingSize) {
373     /* Things ends at the arena end. */
374     uintptr_t tailOffset = ArenaSize - (thing & ArenaMask);
375     return tailOffset % thingSize == 0;
376   }
377 
onDelayedMarkingList()378   bool onDelayedMarkingList() const { return onDelayedMarkingList_; }
379 
getNextDelayedMarking()380   Arena* getNextDelayedMarking() const {
381     MOZ_ASSERT(onDelayedMarkingList_);
382     return reinterpret_cast<Arena*>(nextDelayedMarkingArena_ << ArenaShift);
383   }
384 
setNextDelayedMarkingArena(Arena * arena)385   void setNextDelayedMarkingArena(Arena* arena) {
386     MOZ_ASSERT(!(uintptr_t(arena) & ArenaMask));
387     MOZ_ASSERT(!onDelayedMarkingList_);
388     MOZ_ASSERT(!hasDelayedBlackMarking_);
389     MOZ_ASSERT(!hasDelayedGrayMarking_);
390     MOZ_ASSERT(!nextDelayedMarkingArena_);
391     onDelayedMarkingList_ = 1;
392     if (arena) {
393       nextDelayedMarkingArena_ = arena->address() >> ArenaShift;
394     }
395   }
396 
updateNextDelayedMarkingArena(Arena * arena)397   void updateNextDelayedMarkingArena(Arena* arena) {
398     MOZ_ASSERT(!(uintptr_t(arena) & ArenaMask));
399     MOZ_ASSERT(onDelayedMarkingList_);
400     nextDelayedMarkingArena_ = arena ? arena->address() >> ArenaShift : 0;
401   }
402 
hasDelayedMarking(MarkColor color)403   bool hasDelayedMarking(MarkColor color) const {
404     MOZ_ASSERT(onDelayedMarkingList_);
405     return color == MarkColor::Black ? hasDelayedBlackMarking_
406                                      : hasDelayedGrayMarking_;
407   }
408 
hasAnyDelayedMarking()409   bool hasAnyDelayedMarking() const {
410     MOZ_ASSERT(onDelayedMarkingList_);
411     return hasDelayedBlackMarking_ || hasDelayedGrayMarking_;
412   }
413 
setHasDelayedMarking(MarkColor color,bool value)414   void setHasDelayedMarking(MarkColor color, bool value) {
415     MOZ_ASSERT(onDelayedMarkingList_);
416     if (color == MarkColor::Black) {
417       hasDelayedBlackMarking_ = value;
418     } else {
419       hasDelayedGrayMarking_ = value;
420     }
421   }
422 
clearDelayedMarkingState()423   void clearDelayedMarkingState() {
424     MOZ_ASSERT(onDelayedMarkingList_);
425     onDelayedMarkingList_ = 0;
426     hasDelayedBlackMarking_ = 0;
427     hasDelayedGrayMarking_ = 0;
428     nextDelayedMarkingArena_ = 0;
429   }
430 
431   inline ArenaCellSet*& bufferedCells();
432   inline size_t& atomBitmapStart();
433 
434   template <typename T>
435   size_t finalize(JSFreeOp* fop, AllocKind thingKind, size_t thingSize);
436 
437   static void staticAsserts();
438   static void checkLookupTables();
439 
440   void unmarkAll();
441   void unmarkPreMarkedFreeCells();
442 
443   void arenaAllocatedDuringGC();
444 
445 #ifdef DEBUG
446   void checkNoMarkedFreeCells();
447 #endif
448 };
449 
450 static_assert(ArenaZoneOffset == offsetof(Arena, zone),
451               "The hardcoded API zone offset must match the actual offset.");
452 
453 static_assert(sizeof(Arena) == ArenaSize,
454               "ArenaSize must match the actual size of the Arena structure.");
455 
456 static_assert(
457     offsetof(Arena, data) == ArenaHeaderSize,
458     "ArenaHeaderSize must match the actual size of the header fields.");
459 
getArena()460 inline Arena* FreeSpan::getArena() {
461   Arena* arena = getArenaUnchecked();
462   arena->checkAddress();
463   return arena;
464 }
465 
checkSpan(const Arena * arena)466 inline void FreeSpan::checkSpan(const Arena* arena) const {
467 #ifdef DEBUG
468   if (!first) {
469     MOZ_ASSERT(!first && !last);
470     return;
471   }
472 
473   arena->checkAddress();
474   checkRange(first, last, arena);
475 
476   // If there's a following span, it must have a higher address,
477   // and the gap must be at least 2 * thingSize.
478   const FreeSpan* next = nextSpanUnchecked(arena);
479   if (next->first) {
480     checkRange(next->first, next->last, arena);
481     size_t thingSize = arena->getThingSize();
482     MOZ_ASSERT(last + 2 * thingSize <= next->first);
483   }
484 #endif
485 }
486 
checkRange(uintptr_t first,uintptr_t last,const Arena * arena)487 inline void FreeSpan::checkRange(uintptr_t first, uintptr_t last,
488                                  const Arena* arena) const {
489 #ifdef DEBUG
490   MOZ_ASSERT(arena);
491   MOZ_ASSERT(first <= last);
492   AllocKind thingKind = arena->getAllocKind();
493   MOZ_ASSERT(first >= Arena::firstThingOffset(thingKind));
494   MOZ_ASSERT(last <= Arena::lastThingOffset(thingKind));
495   MOZ_ASSERT((last - first) % Arena::thingSize(thingKind) == 0);
496 #endif
497 }
498 
499 /*
500  * The tail of the chunk info is shared between all chunks in the system, both
501  * nursery and tenured. This structure is locatable from any GC pointer by
502  * aligning to 1MiB.
503  */
504 struct ChunkTrailer {
505   // Construct a Nursery ChunkTrailer.
ChunkTrailerChunkTrailer506   ChunkTrailer(JSRuntime* rt, StoreBuffer* sb)
507       : location(ChunkLocation::Nursery), storeBuffer(sb), runtime(rt) {}
508 
509   // Construct a Tenured heap ChunkTrailer.
ChunkTrailerChunkTrailer510   explicit ChunkTrailer(JSRuntime* rt)
511       : location(ChunkLocation::TenuredHeap),
512         storeBuffer(nullptr),
513         runtime(rt) {}
514 
515  public:
516   // The index of the chunk in the nursery, or LocationTenuredHeap.
517   ChunkLocation location;
518   uint32_t : 32;  // padding
519 
520   // The store buffer for pointers from tenured things to things in this
521   // chunk. Will be non-null only for nursery chunks.
522   StoreBuffer* storeBuffer;
523 
524   // Provide quick access to the runtime from absolutely anywhere.
525   JSRuntime* runtime;
526 };
527 
528 static_assert(sizeof(ChunkTrailer) == ChunkTrailerSize,
529               "ChunkTrailer size must match the API defined size.");
530 
531 // The chunk header (located at the end of the chunk to preserve arena
532 // alignment).
533 struct ChunkInfo {
initChunkInfo534   void init() { next = prev = nullptr; }
535 
536  private:
537   friend class ChunkPool;
538   friend class js::NurseryDecommitTask;
539   Chunk* next;
540   Chunk* prev;
541 
542  public:
543   /* Free arenas are linked together with arena.next. */
544   Arena* freeArenasHead;
545 
546 #if JS_BITS_PER_WORD == 32
547   /*
548    * Calculating sizes and offsets is simpler if sizeof(ChunkInfo) is
549    * architecture-independent.
550    */
551   char padding[24];
552 #endif
553 
554   /*
555    * Decommitted arenas are tracked by a bitmap in the chunk header. We use
556    * this offset to start our search iteration close to a decommitted arena
557    * that we can allocate.
558    */
559   uint32_t lastDecommittedArenaOffset;
560 
561   /* Number of free arenas, either committed or decommitted. */
562   uint32_t numArenasFree;
563 
564   /* Number of free, committed arenas. */
565   uint32_t numArenasFreeCommitted;
566 };
567 
568 /*
569  * Calculating ArenasPerChunk:
570  *
571  * In order to figure out how many Arenas will fit in a chunk, we need to know
572  * how much extra space is available after we allocate the header data. This
573  * is a problem because the header size depends on the number of arenas in the
574  * chunk. The two dependent fields are bitmap and decommittedArenas.
575  *
576  * For the mark bitmap, we know that each arena will use a fixed number of full
577  * bytes: ArenaBitmapBytes. The full size of the header data is this number
578  * multiplied by the eventual number of arenas we have in the header. We,
579  * conceptually, distribute this header data among the individual arenas and do
580  * not include it in the header. This way we do not have to worry about its
581  * variable size: it gets attached to the variable number we are computing.
582  *
583  * For the decommitted arena bitmap, we only have 1 bit per arena, so this
584  * technique will not work. Instead, we observe that we do not have enough
585  * header info to fill 8 full arenas: it is currently 4 on 64bit, less on
586  * 32bit. Thus, with current numbers, we need 64 bytes for decommittedArenas.
587  * This will not become 63 bytes unless we double the data required in the
588  * header. Therefore, we just compute the number of bytes required to track
589  * every possible arena and do not worry about slop bits, since there are too
590  * few to usefully allocate.
591  *
592  * To actually compute the number of arenas we can allocate in a chunk, we
593  * divide the amount of available space less the header info (not including
594  * the mark bitmap which is distributed into the arena size) by the size of
595  * the arena (with the mark bitmap bytes it uses).
596  */
597 const size_t BytesPerArenaWithHeader = ArenaSize + ArenaBitmapBytes;
598 const size_t ChunkDecommitBitmapBytes = ChunkSize / ArenaSize / CHAR_BIT;
599 const size_t ChunkBytesAvailable = ChunkSize - sizeof(ChunkTrailer) -
600                                    sizeof(ChunkInfo) - ChunkDecommitBitmapBytes;
601 const size_t ArenasPerChunk = ChunkBytesAvailable / BytesPerArenaWithHeader;
602 
603 #ifdef JS_GC_SMALL_CHUNK_SIZE
604 static_assert(ArenasPerChunk == 62,
605               "Do not accidentally change our heap's density.");
606 #else
607 static_assert(ArenasPerChunk == 252,
608               "Do not accidentally change our heap's density.");
609 #endif
610 
611 /* A chunk bitmap contains enough mark bits for all the cells in a chunk. */
612 struct ChunkBitmap {
613   volatile uintptr_t bitmap[ArenaBitmapWords * ArenasPerChunk];
614 
615  public:
616   ChunkBitmap() = default;
617 
getMarkWordAndMaskChunkBitmap618   MOZ_ALWAYS_INLINE void getMarkWordAndMask(const TenuredCell* cell,
619                                             ColorBit colorBit,
620                                             uintptr_t** wordp,
621                                             uintptr_t* maskp) {
622     MOZ_ASSERT(size_t(colorBit) < MarkBitsPerCell);
623     detail::GetGCThingMarkWordAndMask(uintptr_t(cell), colorBit, wordp, maskp);
624   }
625 
markBitChunkBitmap626   MOZ_ALWAYS_INLINE MOZ_TSAN_BLACKLIST bool markBit(const TenuredCell* cell,
627                                                     ColorBit colorBit) {
628     uintptr_t *word, mask;
629     getMarkWordAndMask(cell, colorBit, &word, &mask);
630     return *word & mask;
631   }
632 
isMarkedAnyChunkBitmap633   MOZ_ALWAYS_INLINE MOZ_TSAN_BLACKLIST bool isMarkedAny(
634       const TenuredCell* cell) {
635     return markBit(cell, ColorBit::BlackBit) ||
636            markBit(cell, ColorBit::GrayOrBlackBit);
637   }
638 
isMarkedBlackChunkBitmap639   MOZ_ALWAYS_INLINE MOZ_TSAN_BLACKLIST bool isMarkedBlack(
640       const TenuredCell* cell) {
641     return markBit(cell, ColorBit::BlackBit);
642   }
643 
isMarkedGrayChunkBitmap644   MOZ_ALWAYS_INLINE MOZ_TSAN_BLACKLIST bool isMarkedGray(
645       const TenuredCell* cell) {
646     return !markBit(cell, ColorBit::BlackBit) &&
647            markBit(cell, ColorBit::GrayOrBlackBit);
648   }
649 
650   // The return value indicates if the cell went from unmarked to marked.
markIfUnmarkedChunkBitmap651   MOZ_ALWAYS_INLINE bool markIfUnmarked(const TenuredCell* cell,
652                                         MarkColor color) {
653     uintptr_t *word, mask;
654     getMarkWordAndMask(cell, ColorBit::BlackBit, &word, &mask);
655     if (*word & mask) {
656       return false;
657     }
658     if (color == MarkColor::Black) {
659       *word |= mask;
660     } else {
661       /*
662        * We use getMarkWordAndMask to recalculate both mask and word as
663        * doing just mask << color may overflow the mask.
664        */
665       getMarkWordAndMask(cell, ColorBit::GrayOrBlackBit, &word, &mask);
666       if (*word & mask) {
667         return false;
668       }
669       *word |= mask;
670     }
671     return true;
672   }
673 
markBlackChunkBitmap674   MOZ_ALWAYS_INLINE void markBlack(const TenuredCell* cell) {
675     uintptr_t *word, mask;
676     getMarkWordAndMask(cell, ColorBit::BlackBit, &word, &mask);
677     *word |= mask;
678   }
679 
copyMarkBitChunkBitmap680   MOZ_ALWAYS_INLINE void copyMarkBit(TenuredCell* dst, const TenuredCell* src,
681                                      ColorBit colorBit) {
682     uintptr_t *srcWord, srcMask;
683     getMarkWordAndMask(src, colorBit, &srcWord, &srcMask);
684 
685     uintptr_t *dstWord, dstMask;
686     getMarkWordAndMask(dst, colorBit, &dstWord, &dstMask);
687 
688     *dstWord &= ~dstMask;
689     if (*srcWord & srcMask) {
690       *dstWord |= dstMask;
691     }
692   }
693 
unmarkChunkBitmap694   MOZ_ALWAYS_INLINE void unmark(const TenuredCell* cell) {
695     uintptr_t *word, mask;
696     getMarkWordAndMask(cell, ColorBit::BlackBit, &word, &mask);
697     *word &= ~mask;
698     getMarkWordAndMask(cell, ColorBit::GrayOrBlackBit, &word, &mask);
699     *word &= ~mask;
700   }
701 
clearChunkBitmap702   void clear() { memset((void*)bitmap, 0, sizeof(bitmap)); }
703 
arenaBitsChunkBitmap704   uintptr_t* arenaBits(Arena* arena) {
705     static_assert(
706         ArenaBitmapBits == ArenaBitmapWords * JS_BITS_PER_WORD,
707         "We assume that the part of the bitmap corresponding to the arena "
708         "has the exact number of words so we do not need to deal with a word "
709         "that covers bits from two arenas.");
710 
711     uintptr_t *word, unused;
712     getMarkWordAndMask(reinterpret_cast<TenuredCell*>(arena->address()),
713                        ColorBit::BlackBit, &word, &unused);
714     return word;
715   }
716 };
717 
718 static_assert(ArenaBitmapBytes * ArenasPerChunk == sizeof(ChunkBitmap),
719               "Ensure our ChunkBitmap actually covers all arenas.");
720 static_assert(js::gc::ChunkMarkBitmapBits == ArenaBitmapBits * ArenasPerChunk,
721               "Ensure that the mark bitmap has the right number of bits.");
722 
723 using PerArenaBitmap = BitArray<ArenasPerChunk>;
724 
725 const size_t ChunkPadSize = ChunkSize - (sizeof(Arena) * ArenasPerChunk) -
726                             sizeof(ChunkBitmap) - sizeof(PerArenaBitmap) -
727                             sizeof(ChunkInfo) - sizeof(ChunkTrailer);
728 static_assert(ChunkPadSize < BytesPerArenaWithHeader,
729               "If the chunk padding is larger than an arena, we should have "
730               "one more arena.");
731 
732 /*
733  * Chunks contain arenas and associated data structures (mark bitmap, delayed
734  * marking state).
735  */
736 struct Chunk {
737   Arena arenas[ArenasPerChunk];
738 
739   /* Pad to full size to ensure cache alignment of ChunkInfo. */
740   uint8_t padding[ChunkPadSize];
741 
742   ChunkBitmap bitmap;
743   PerArenaBitmap decommittedArenas;
744   ChunkInfo info;
745   ChunkTrailer trailer;
746 
fromAddressChunk747   static Chunk* fromAddress(uintptr_t addr) {
748     addr &= ~ChunkMask;
749     return reinterpret_cast<Chunk*>(addr);
750   }
751 
withinValidRangeChunk752   static bool withinValidRange(uintptr_t addr) {
753     uintptr_t offset = addr & ChunkMask;
754     return Chunk::fromAddress(addr)->isNurseryChunk()
755                ? offset < ChunkSize - sizeof(ChunkTrailer)
756                : offset < ArenasPerChunk * ArenaSize;
757   }
758 
arenaIndexChunk759   static size_t arenaIndex(uintptr_t addr) {
760     MOZ_ASSERT(!Chunk::fromAddress(addr)->isNurseryChunk());
761     MOZ_ASSERT(withinValidRange(addr));
762     return (addr & ChunkMask) >> ArenaShift;
763   }
764 
addressChunk765   uintptr_t address() const {
766     uintptr_t addr = reinterpret_cast<uintptr_t>(this);
767     MOZ_ASSERT(!(addr & ChunkMask));
768     return addr;
769   }
770 
unusedChunk771   bool unused() const { return info.numArenasFree == ArenasPerChunk; }
772 
hasAvailableArenasChunk773   bool hasAvailableArenas() const { return info.numArenasFree != 0; }
774 
isNurseryChunkChunk775   bool isNurseryChunk() const { return trailer.storeBuffer; }
776 
777   Arena* allocateArena(GCRuntime* gc, JS::Zone* zone, AllocKind kind,
778                        const AutoLockGC& lock);
779 
780   void releaseArena(GCRuntime* gc, Arena* arena, const AutoLockGC& lock);
781   void recycleArena(Arena* arena, SortedArenaList& dest, size_t thingsPerArena);
782 
783   MOZ_MUST_USE bool decommitOneFreeArena(GCRuntime* gc, AutoLockGC& lock);
784   void decommitAllArenas();
785 
786   // This will decommit each unused not-already decommitted arena. It performs a
787   // system call for each arena but is only used during OOM.
788   void decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock);
789 
790   static Chunk* allocate(GCRuntime* gc);
791   void init(GCRuntime* gc);
792 
793  private:
794   /* Search for a decommitted arena to allocate. */
795   unsigned findDecommittedArenaOffset();
796   Arena* fetchNextDecommittedArena();
797 
798   void addArenaToFreeList(GCRuntime* gc, Arena* arena);
799   void addArenaToDecommittedList(const Arena* arena);
800 
801   void updateChunkListAfterAlloc(GCRuntime* gc, const AutoLockGC& lock);
802   void updateChunkListAfterFree(GCRuntime* gc, const AutoLockGC& lock);
803 
804  public:
805   /* Unlink and return the freeArenasHead. */
806   Arena* fetchNextFreeArena(GCRuntime* gc);
807 };
808 
809 static_assert(
810     sizeof(Chunk) == ChunkSize,
811     "Ensure the hardcoded chunk size definition actually matches the struct.");
812 static_assert(js::gc::ChunkMarkBitmapOffset == offsetof(Chunk, bitmap),
813               "The hardcoded API bitmap offset must match the actual offset.");
814 static_assert(js::gc::ChunkRuntimeOffset ==
815                   offsetof(Chunk, trailer) + offsetof(ChunkTrailer, runtime),
816               "The hardcoded API runtime offset must match the actual offset.");
817 static_assert(
818     js::gc::ChunkLocationOffset ==
819         offsetof(Chunk, trailer) + offsetof(ChunkTrailer, location),
820     "The hardcoded API location offset must match the actual offset.");
821 static_assert(
822     js::gc::ChunkStoreBufferOffset ==
823         offsetof(Chunk, trailer) + offsetof(ChunkTrailer, storeBuffer),
824     "The hardcoded API storeBuffer offset must match the actual offset.");
825 
checkAddress()826 inline void Arena::checkAddress() const {
827   mozilla::DebugOnly<uintptr_t> addr = uintptr_t(this);
828   MOZ_ASSERT(addr);
829   MOZ_ASSERT(!(addr & ArenaMask));
830   MOZ_ASSERT(Chunk::withinValidRange(addr));
831 }
832 
chunk()833 inline Chunk* Arena::chunk() const { return Chunk::fromAddress(address()); }
834 
InFreeList(Arena * arena,void * thing)835 inline bool InFreeList(Arena* arena, void* thing) {
836   uintptr_t addr = reinterpret_cast<uintptr_t>(thing);
837   MOZ_ASSERT(Arena::isAligned(addr, arena->getThingSize()));
838   return arena->inFreeList(addr);
839 }
840 
841 static const int32_t ChunkLocationOffsetFromLastByte =
842     int32_t(gc::ChunkLocationOffset) - int32_t(gc::ChunkMask);
843 static const int32_t ChunkStoreBufferOffsetFromLastByte =
844     int32_t(gc::ChunkStoreBufferOffset) - int32_t(gc::ChunkMask);
845 
846 // Cell header stored before all nursery cells.
alignas(gc::CellAlignBytes)847 struct alignas(gc::CellAlignBytes) NurseryCellHeader {
848   // Store zone pointer with the trace kind in the lowest three bits.
849   const uintptr_t zoneAndTraceKind;
850 
851   // We only need to store a subset of trace kinds so this doesn't cover the
852   // full range.
853   static const uintptr_t TraceKindMask = 3;
854 
855   static uintptr_t MakeValue(JS::Zone* const zone, JS::TraceKind kind) {
856     MOZ_ASSERT(uintptr_t(kind) < TraceKindMask);
857     MOZ_ASSERT((uintptr_t(zone) & TraceKindMask) == 0);
858     return uintptr_t(zone) | uintptr_t(kind);
859   }
860 
861   NurseryCellHeader(JS::Zone* const zone, JS::TraceKind kind)
862       : zoneAndTraceKind(MakeValue(zone, kind)) {}
863 
864   JS::Zone* zone() const {
865     return reinterpret_cast<JS::Zone*>(zoneAndTraceKind & ~TraceKindMask);
866   }
867 
868   JS::TraceKind traceKind() const {
869     return JS::TraceKind(zoneAndTraceKind & TraceKindMask);
870   }
871 
872   static const NurseryCellHeader* from(const Cell* cell) {
873     MOZ_ASSERT(IsInsideNursery(cell));
874     return reinterpret_cast<const NurseryCellHeader*>(
875         uintptr_t(cell) - sizeof(NurseryCellHeader));
876   }
877 };
878 
879 static_assert(uintptr_t(JS::TraceKind::Object) <=
880               NurseryCellHeader::TraceKindMask);
881 static_assert(uintptr_t(JS::TraceKind::String) <=
882               NurseryCellHeader::TraceKindMask);
883 static_assert(uintptr_t(JS::TraceKind::BigInt) <=
884               NurseryCellHeader::TraceKindMask);
885 
886 } /* namespace gc */
887 
888 namespace debug {
889 
890 // Utility functions meant to be called from an interactive debugger.
891 enum class MarkInfo : int {
892   BLACK = 0,
893   GRAY = 1,
894   UNMARKED = -1,
895   NURSERY = -2,
896 };
897 
898 // Get the mark color for a cell, in a way easily usable from a debugger.
899 MOZ_NEVER_INLINE MarkInfo GetMarkInfo(js::gc::Cell* cell);
900 
901 // Sample usage from gdb:
902 //
903 //   (gdb) p $word = js::debug::GetMarkWordAddress(obj)
904 //   $1 = (uintptr_t *) 0x7fa56d5fe360
905 //   (gdb) p/x $mask = js::debug::GetMarkMask(obj, js::gc::GRAY)
906 //   $2 = 0x200000000
907 //   (gdb) watch *$word
908 //   Hardware watchpoint 7: *$word
909 //   (gdb) cond 7 *$word & $mask
910 //   (gdb) cont
911 //
912 // Note that this is *not* a watchpoint on a single bit. It is a watchpoint on
913 // the whole word, which will trigger whenever the word changes and the
914 // selected bit is set after the change.
915 //
916 // So if the bit changing is the desired one, this is exactly what you want.
917 // But if a different bit changes (either set or cleared), you may still stop
918 // execution if the $mask bit happened to already be set. gdb does not expose
919 // enough information to restrict the watchpoint to just a single bit.
920 
921 // Return the address of the word containing the mark bits for the given cell,
922 // or nullptr if the cell is in the nursery.
923 MOZ_NEVER_INLINE uintptr_t* GetMarkWordAddress(js::gc::Cell* cell);
924 
925 // Return the mask for the given cell and color bit, or 0 if the cell is in the
926 // nursery.
927 MOZ_NEVER_INLINE uintptr_t GetMarkMask(js::gc::Cell* cell, uint32_t colorBit);
928 
929 } /* namespace debug */
930 } /* namespace js */
931 
932 #endif /* gc_Heap_h */
933