1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #ifndef gc_StoreBuffer_h
8 #define gc_StoreBuffer_h
9 
10 #include "mozilla/Attributes.h"
11 #include "mozilla/HashFunctions.h"
12 #include "mozilla/ReentrancyGuard.h"
13 
14 #include <algorithm>
15 
16 #include "ds/BitArray.h"
17 #include "ds/LifoAlloc.h"
18 #include "gc/Nursery.h"
19 #include "js/AllocPolicy.h"
20 #include "js/MemoryMetrics.h"
21 #include "js/UniquePtr.h"
22 #include "threading/Mutex.h"
23 
24 namespace js {
25 
26 #ifdef DEBUG
27 extern bool CurrentThreadIsGCMarking();
28 #endif
29 
30 namespace gc {
31 
32 // Map from all trace kinds to the base GC type.
33 template <JS::TraceKind kind>
34 struct MapTraceKindToType {};
35 
36 #define DEFINE_TRACE_KIND_MAP(name, type, _, _1)   \
37   template <>                                      \
38   struct MapTraceKindToType<JS::TraceKind::name> { \
39     using Type = type;                             \
40   };
41 JS_FOR_EACH_TRACEKIND(DEFINE_TRACE_KIND_MAP);
42 #undef DEFINE_TRACE_KIND_MAP
43 
44 // Map from a possibly-derived type to the base GC type.
45 template <typename T>
46 struct BaseGCType {
47   using type =
48       typename MapTraceKindToType<JS::MapTypeToTraceKind<T>::kind>::Type;
49   static_assert(std::is_base_of_v<type, T>, "Failed to find base type");
50 };
51 
52 class Arena;
53 class ArenaCellSet;
54 
55 #ifdef DEBUG
56 extern bool CurrentThreadHasLockedGC();
57 #endif
58 
59 /*
60  * BufferableRef represents an abstract reference for use in the generational
61  * GC's remembered set. Entries in the store buffer that cannot be represented
62  * with the simple pointer-to-a-pointer scheme must derive from this class and
63  * use the generic store buffer interface.
64  *
65  * A single BufferableRef entry in the generic buffer can represent many entries
66  * in the remembered set.  For example js::OrderedHashTableRef represents all
67  * the incoming edges corresponding to keys in an ordered hash table.
68  */
69 class BufferableRef {
70  public:
71   virtual void trace(JSTracer* trc) = 0;
maybeInRememberedSet(const Nursery &)72   bool maybeInRememberedSet(const Nursery&) const { return true; }
73 };
74 
75 typedef HashSet<void*, PointerHasher<void*>, SystemAllocPolicy> EdgeSet;
76 
77 /* The size of a single block of store buffer storage space. */
78 static const size_t LifoAllocBlockSize = 8 * 1024;
79 
80 /*
81  * The StoreBuffer observes all writes that occur in the system and performs
82  * efficient filtering of them to derive a remembered set for nursery GC.
83  */
84 class StoreBuffer {
85   friend class mozilla::ReentrancyGuard;
86 
87   /* The size at which a block is about to overflow for the generic buffer. */
88   static const size_t GenericBufferLowAvailableThreshold =
89       LifoAllocBlockSize / 2;
90 
91   /* The size at which other store buffers are about to overflow. */
92   static const size_t BufferOverflowThresholdBytes = 128 * 1024;
93 
94   /*
95    * This buffer holds only a single type of edge. Using this buffer is more
96    * efficient than the generic buffer when many writes will be to the same
97    * type of edge: e.g. Value or Cell*.
98    */
99   template <typename T>
100   struct MonoTypeBuffer {
101     /* The canonical set of stores. */
102     typedef HashSet<T, typename T::Hasher, SystemAllocPolicy> StoreSet;
103     StoreSet stores_;
104 
105     /*
106      * A one element cache in front of the canonical set to speed up
107      * temporary instances of HeapPtr.
108      */
109     T last_;
110 
111     StoreBuffer* owner_;
112 
113     JS::GCReason gcReason_;
114 
115     /* Maximum number of entries before we request a minor GC. */
116     const static size_t MaxEntries = BufferOverflowThresholdBytes / sizeof(T);
117 
MonoTypeBufferMonoTypeBuffer118     explicit MonoTypeBuffer(StoreBuffer* owner, JS::GCReason reason)
119         : last_(T()), owner_(owner), gcReason_(reason) {}
120 
clearMonoTypeBuffer121     void clear() {
122       last_ = T();
123       stores_.clear();
124     }
125 
126     /* Add one item to the buffer. */
putMonoTypeBuffer127     void put(const T& t) {
128       sinkStore();
129       last_ = t;
130     }
131 
132     /* Remove an item from the store buffer. */
unputMonoTypeBuffer133     void unput(const T& v) {
134       // Fast, hashless remove of last put.
135       if (last_ == v) {
136         last_ = T();
137         return;
138       }
139       stores_.remove(v);
140     }
141 
142     /* Move any buffered stores to the canonical store set. */
sinkStoreMonoTypeBuffer143     void sinkStore() {
144       if (last_) {
145         AutoEnterOOMUnsafeRegion oomUnsafe;
146         if (!stores_.put(last_)) {
147           oomUnsafe.crash("Failed to allocate for MonoTypeBuffer::put.");
148         }
149       }
150       last_ = T();
151 
152       if (MOZ_UNLIKELY(stores_.count() > MaxEntries)) {
153         owner_->setAboutToOverflow(gcReason_);
154       }
155     }
156 
157     /* Trace the source of all edges in the store buffer. */
158     void trace(TenuringTracer& mover);
159 
sizeOfExcludingThisMonoTypeBuffer160     size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
161       return stores_.shallowSizeOfExcludingThis(mallocSizeOf);
162     }
163 
isEmptyMonoTypeBuffer164     bool isEmpty() const { return last_ == T() && stores_.empty(); }
165 
166    private:
167     MonoTypeBuffer(const MonoTypeBuffer& other) = delete;
168     MonoTypeBuffer& operator=(const MonoTypeBuffer& other) = delete;
169   };
170 
171   struct WholeCellBuffer {
172     UniquePtr<LifoAlloc> storage_;
173     ArenaCellSet* stringHead_;
174     ArenaCellSet* nonStringHead_;
175     StoreBuffer* owner_;
176 
WholeCellBufferWholeCellBuffer177     explicit WholeCellBuffer(StoreBuffer* owner)
178         : storage_(nullptr),
179           stringHead_(nullptr),
180           nonStringHead_(nullptr),
181           owner_(owner) {}
182 
183     [[nodiscard]] bool init();
184 
185     void clear();
186 
isAboutToOverflowWholeCellBuffer187     bool isAboutToOverflow() const {
188       return !storage_->isEmpty() &&
189              storage_->used() > BufferOverflowThresholdBytes;
190     }
191 
192     void trace(TenuringTracer& mover);
193 
194     inline void put(const Cell* cell);
195 
sizeOfExcludingThisWholeCellBuffer196     size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
197       return storage_ ? storage_->sizeOfIncludingThis(mallocSizeOf) : 0;
198     }
199 
isEmptyWholeCellBuffer200     bool isEmpty() const {
201       MOZ_ASSERT_IF(!stringHead_ && !nonStringHead_,
202                     !storage_ || storage_->isEmpty());
203       return !stringHead_ && !nonStringHead_;
204     }
205 
206    private:
207     ArenaCellSet* allocateCellSet(Arena* arena);
208 
209     WholeCellBuffer(const WholeCellBuffer& other) = delete;
210     WholeCellBuffer& operator=(const WholeCellBuffer& other) = delete;
211   };
212 
213   struct GenericBuffer {
214     UniquePtr<LifoAlloc> storage_;
215     StoreBuffer* owner_;
216 
GenericBufferGenericBuffer217     explicit GenericBuffer(StoreBuffer* owner)
218         : storage_(nullptr), owner_(owner) {}
219 
220     [[nodiscard]] bool init();
221 
clearGenericBuffer222     void clear() {
223       if (storage_) {
224         storage_->used() ? storage_->releaseAll() : storage_->freeAll();
225       }
226     }
227 
isAboutToOverflowGenericBuffer228     bool isAboutToOverflow() const {
229       return !storage_->isEmpty() && storage_->availableInCurrentChunk() <
230                                          GenericBufferLowAvailableThreshold;
231     }
232 
233     /* Trace all generic edges. */
234     void trace(JSTracer* trc);
235 
236     template <typename T>
putGenericBuffer237     void put(const T& t) {
238       MOZ_ASSERT(storage_);
239 
240       /* Ensure T is derived from BufferableRef. */
241       (void)static_cast<const BufferableRef*>(&t);
242 
243       AutoEnterOOMUnsafeRegion oomUnsafe;
244       unsigned size = sizeof(T);
245       unsigned* sizep = storage_->pod_malloc<unsigned>();
246       if (!sizep) {
247         oomUnsafe.crash("Failed to allocate for GenericBuffer::put.");
248       }
249       *sizep = size;
250 
251       T* tp = storage_->new_<T>(t);
252       if (!tp) {
253         oomUnsafe.crash("Failed to allocate for GenericBuffer::put.");
254       }
255 
256       if (isAboutToOverflow()) {
257         owner_->setAboutToOverflow(JS::GCReason::FULL_GENERIC_BUFFER);
258       }
259     }
260 
sizeOfExcludingThisGenericBuffer261     size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
262       return storage_ ? storage_->sizeOfIncludingThis(mallocSizeOf) : 0;
263     }
264 
isEmptyGenericBuffer265     bool isEmpty() const { return !storage_ || storage_->isEmpty(); }
266 
267    private:
268     GenericBuffer(const GenericBuffer& other) = delete;
269     GenericBuffer& operator=(const GenericBuffer& other) = delete;
270   };
271 
272   template <typename Edge>
273   struct PointerEdgeHasher {
274     using Lookup = Edge;
hashPointerEdgeHasher275     static HashNumber hash(const Lookup& l) {
276       return mozilla::HashGeneric(l.edge);
277     }
matchPointerEdgeHasher278     static bool match(const Edge& k, const Lookup& l) { return k == l; }
279   };
280 
281   template <typename T>
282   struct CellPtrEdge {
283     T** edge = nullptr;
284 
285     CellPtrEdge() = default;
CellPtrEdgeCellPtrEdge286     explicit CellPtrEdge(T** v) : edge(v) {}
287     bool operator==(const CellPtrEdge& other) const {
288       return edge == other.edge;
289     }
290     bool operator!=(const CellPtrEdge& other) const {
291       return edge != other.edge;
292     }
293 
maybeInRememberedSetCellPtrEdge294     bool maybeInRememberedSet(const Nursery& nursery) const {
295       MOZ_ASSERT(IsInsideNursery(*edge));
296       return !nursery.isInside(edge);
297     }
298 
299     void trace(TenuringTracer& mover) const;
300 
301     explicit operator bool() const { return edge != nullptr; }
302 
303     using Hasher = PointerEdgeHasher<CellPtrEdge<T>>;
304   };
305 
306   using ObjectPtrEdge = CellPtrEdge<JSObject>;
307   using StringPtrEdge = CellPtrEdge<JSString>;
308   using BigIntPtrEdge = CellPtrEdge<JS::BigInt>;
309 
310   struct ValueEdge {
311     JS::Value* edge;
312 
ValueEdgeValueEdge313     ValueEdge() : edge(nullptr) {}
ValueEdgeValueEdge314     explicit ValueEdge(JS::Value* v) : edge(v) {}
315     bool operator==(const ValueEdge& other) const { return edge == other.edge; }
316     bool operator!=(const ValueEdge& other) const { return edge != other.edge; }
317 
derefValueEdge318     Cell* deref() const {
319       return edge->isGCThing() ? static_cast<Cell*>(edge->toGCThing())
320                                : nullptr;
321     }
322 
maybeInRememberedSetValueEdge323     bool maybeInRememberedSet(const Nursery& nursery) const {
324       MOZ_ASSERT(IsInsideNursery(deref()));
325       return !nursery.isInside(edge);
326     }
327 
328     void trace(TenuringTracer& mover) const;
329 
330     explicit operator bool() const { return edge != nullptr; }
331 
332     using Hasher = PointerEdgeHasher<ValueEdge>;
333   };
334 
335   struct SlotsEdge {
336     // These definitions must match those in HeapSlot::Kind.
337     const static int SlotKind = 0;
338     const static int ElementKind = 1;
339 
340     uintptr_t objectAndKind_;  // NativeObject* | Kind
341     uint32_t start_;
342     uint32_t count_;
343 
SlotsEdgeSlotsEdge344     SlotsEdge() : objectAndKind_(0), start_(0), count_(0) {}
SlotsEdgeSlotsEdge345     SlotsEdge(NativeObject* object, int kind, uint32_t start, uint32_t count)
346         : objectAndKind_(uintptr_t(object) | kind),
347           start_(start),
348           count_(count) {
349       MOZ_ASSERT((uintptr_t(object) & 1) == 0);
350       MOZ_ASSERT(kind <= 1);
351       MOZ_ASSERT(count > 0);
352       MOZ_ASSERT(start + count > start);
353     }
354 
objectSlotsEdge355     NativeObject* object() const {
356       return reinterpret_cast<NativeObject*>(objectAndKind_ & ~1);
357     }
kindSlotsEdge358     int kind() const { return (int)(objectAndKind_ & 1); }
359 
360     bool operator==(const SlotsEdge& other) const {
361       return objectAndKind_ == other.objectAndKind_ && start_ == other.start_ &&
362              count_ == other.count_;
363     }
364 
365     bool operator!=(const SlotsEdge& other) const { return !(*this == other); }
366 
367     // True if this SlotsEdge range overlaps with the other SlotsEdge range,
368     // false if they do not overlap.
overlapsSlotsEdge369     bool overlaps(const SlotsEdge& other) const {
370       if (objectAndKind_ != other.objectAndKind_) {
371         return false;
372       }
373 
374       // Widen our range by one on each side so that we consider
375       // adjacent-but-not-actually-overlapping ranges as overlapping. This
376       // is particularly useful for coalescing a series of increasing or
377       // decreasing single index writes 0, 1, 2, ..., N into a SlotsEdge
378       // range of elements [0, N].
379       uint32_t end = start_ + count_ + 1;
380       uint32_t start = start_ > 0 ? start_ - 1 : 0;
381       MOZ_ASSERT(start < end);
382 
383       uint32_t otherEnd = other.start_ + other.count_;
384       MOZ_ASSERT(other.start_ <= otherEnd);
385       return (start <= other.start_ && other.start_ <= end) ||
386              (start <= otherEnd && otherEnd <= end);
387     }
388 
389     // Destructively make this SlotsEdge range the union of the other
390     // SlotsEdge range and this one. A precondition is that the ranges must
391     // overlap.
mergeSlotsEdge392     void merge(const SlotsEdge& other) {
393       MOZ_ASSERT(overlaps(other));
394       uint32_t end = std::max(start_ + count_, other.start_ + other.count_);
395       start_ = std::min(start_, other.start_);
396       count_ = end - start_;
397     }
398 
maybeInRememberedSetSlotsEdge399     bool maybeInRememberedSet(const Nursery& n) const {
400       return !IsInsideNursery(reinterpret_cast<Cell*>(object()));
401     }
402 
403     void trace(TenuringTracer& mover) const;
404 
405     explicit operator bool() const { return objectAndKind_ != 0; }
406 
407     typedef struct Hasher {
408       using Lookup = SlotsEdge;
hashSlotsEdge::Hasher409       static HashNumber hash(const Lookup& l) {
410         return mozilla::HashGeneric(l.objectAndKind_, l.start_, l.count_);
411       }
matchSlotsEdge::Hasher412       static bool match(const SlotsEdge& k, const Lookup& l) { return k == l; }
413     } Hasher;
414   };
415 
416   // The GC runs tasks that may access the storebuffer in parallel and so must
417   // take a lock. The mutator may only access the storebuffer from the main
418   // thread.
CheckAccess()419   inline void CheckAccess() const {
420 #ifdef DEBUG
421     if (JS::RuntimeHeapIsBusy()) {
422       MOZ_ASSERT(!CurrentThreadIsGCMarking());
423       lock_.assertOwnedByCurrentThread();
424     } else {
425       MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
426     }
427 #endif
428   }
429 
430   template <typename Buffer, typename Edge>
unput(Buffer & buffer,const Edge & edge)431   void unput(Buffer& buffer, const Edge& edge) {
432     CheckAccess();
433     if (!isEnabled()) {
434       return;
435     }
436     mozilla::ReentrancyGuard g(*this);
437     buffer.unput(edge);
438   }
439 
440   template <typename Buffer, typename Edge>
put(Buffer & buffer,const Edge & edge)441   void put(Buffer& buffer, const Edge& edge) {
442     CheckAccess();
443     if (!isEnabled()) {
444       return;
445     }
446     mozilla::ReentrancyGuard g(*this);
447     if (edge.maybeInRememberedSet(nursery_)) {
448       buffer.put(edge);
449     }
450   }
451 
452   Mutex lock_;
453 
454   MonoTypeBuffer<ValueEdge> bufferVal;
455   MonoTypeBuffer<StringPtrEdge> bufStrCell;
456   MonoTypeBuffer<BigIntPtrEdge> bufBigIntCell;
457   MonoTypeBuffer<ObjectPtrEdge> bufObjCell;
458   MonoTypeBuffer<SlotsEdge> bufferSlot;
459   WholeCellBuffer bufferWholeCell;
460   GenericBuffer bufferGeneric;
461 
462   JSRuntime* runtime_;
463   const Nursery& nursery_;
464 
465   bool aboutToOverflow_;
466   bool enabled_;
467   bool mayHavePointersToDeadCells_;
468 #ifdef DEBUG
469   bool mEntered; /* For ReentrancyGuard. */
470 #endif
471 
472  public:
473 #ifdef DEBUG
474   bool markingNondeduplicatable;
475 #endif
476 
477   explicit StoreBuffer(JSRuntime* rt, const Nursery& nursery);
478   [[nodiscard]] bool enable();
479 
480   void disable();
isEnabled()481   bool isEnabled() const { return enabled_; }
482 
483   bool isEmpty() const;
484   void clear();
485 
nursery()486   const Nursery& nursery() const { return nursery_; }
487 
488   /* Get the overflowed status. */
isAboutToOverflow()489   bool isAboutToOverflow() const { return aboutToOverflow_; }
490 
491   /*
492    * Brain transplants may add whole cell buffer entires for dead cells. We must
493    * evict the nursery prior to sweeping arenas if any such entries are present.
494    */
mayHavePointersToDeadCells()495   bool mayHavePointersToDeadCells() const {
496     return mayHavePointersToDeadCells_;
497   }
498 
499   /* Insert a single edge into the buffer/remembered set. */
putValue(JS::Value * vp)500   void putValue(JS::Value* vp) { put(bufferVal, ValueEdge(vp)); }
unputValue(JS::Value * vp)501   void unputValue(JS::Value* vp) { unput(bufferVal, ValueEdge(vp)); }
502 
putCell(JSString ** strp)503   void putCell(JSString** strp) { put(bufStrCell, StringPtrEdge(strp)); }
unputCell(JSString ** strp)504   void unputCell(JSString** strp) { unput(bufStrCell, StringPtrEdge(strp)); }
505 
putCell(JS::BigInt ** bip)506   void putCell(JS::BigInt** bip) { put(bufBigIntCell, BigIntPtrEdge(bip)); }
unputCell(JS::BigInt ** bip)507   void unputCell(JS::BigInt** bip) { unput(bufBigIntCell, BigIntPtrEdge(bip)); }
508 
putCell(JSObject ** strp)509   void putCell(JSObject** strp) { put(bufObjCell, ObjectPtrEdge(strp)); }
unputCell(JSObject ** strp)510   void unputCell(JSObject** strp) { unput(bufObjCell, ObjectPtrEdge(strp)); }
511 
putSlot(NativeObject * obj,int kind,uint32_t start,uint32_t count)512   void putSlot(NativeObject* obj, int kind, uint32_t start, uint32_t count) {
513     SlotsEdge edge(obj, kind, start, count);
514     if (bufferSlot.last_.overlaps(edge)) {
515       bufferSlot.last_.merge(edge);
516     } else {
517       put(bufferSlot, edge);
518     }
519   }
520 
521   inline void putWholeCell(Cell* cell);
522 
523   /* Insert an entry into the generic buffer. */
524   template <typename T>
putGeneric(const T & t)525   void putGeneric(const T& t) {
526     put(bufferGeneric, t);
527   }
528 
setMayHavePointersToDeadCells()529   void setMayHavePointersToDeadCells() { mayHavePointersToDeadCells_ = true; }
530 
531   /* Methods to trace the source of all edges in the store buffer. */
traceValues(TenuringTracer & mover)532   void traceValues(TenuringTracer& mover) { bufferVal.trace(mover); }
traceCells(TenuringTracer & mover)533   void traceCells(TenuringTracer& mover) {
534     bufStrCell.trace(mover);
535     bufBigIntCell.trace(mover);
536     bufObjCell.trace(mover);
537   }
traceSlots(TenuringTracer & mover)538   void traceSlots(TenuringTracer& mover) { bufferSlot.trace(mover); }
traceWholeCells(TenuringTracer & mover)539   void traceWholeCells(TenuringTracer& mover) { bufferWholeCell.trace(mover); }
traceGenericEntries(JSTracer * trc)540   void traceGenericEntries(JSTracer* trc) { bufferGeneric.trace(trc); }
541 
542   /* For use by our owned buffers and for testing. */
543   void setAboutToOverflow(JS::GCReason);
544 
545   void addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf,
546                               JS::GCSizes* sizes);
547 
548   void checkEmpty() const;
549 
550   // For use by the GC only.
lock()551   void lock() { lock_.lock(); }
unlock()552   void unlock() { lock_.unlock(); }
553 };
554 
555 // A set of cells in an arena used to implement the whole cell store buffer.
556 class ArenaCellSet {
557   friend class StoreBuffer;
558 
559   using ArenaCellBits = BitArray<MaxArenaCellIndex>;
560 
561   // The arena this relates to.
562   Arena* arena;
563 
564   // Pointer to next set forming a linked list.
565   ArenaCellSet* next;
566 
567   // Bit vector for each possible cell start position.
568   ArenaCellBits bits;
569 
570 #ifdef DEBUG
571   // The minor GC number when this was created. This object should not survive
572   // past the next minor collection.
573   const uint64_t minorGCNumberAtCreation;
574 #endif
575 
576   // Construct the empty sentinel object.
ArenaCellSet()577   constexpr ArenaCellSet()
578       : arena(nullptr),
579         next(nullptr)
580 #ifdef DEBUG
581         ,
582         minorGCNumberAtCreation(0)
583 #endif
584   {
585   }
586 
587  public:
588   using WordT = ArenaCellBits::WordT;
589   const size_t BitsPerWord = ArenaCellBits::bitsPerElement;
590   const size_t NumWords = ArenaCellBits::numSlots;
591 
592   ArenaCellSet(Arena* arena, ArenaCellSet* next);
593 
hasCell(const TenuredCell * cell)594   bool hasCell(const TenuredCell* cell) const {
595     return hasCell(getCellIndex(cell));
596   }
597 
putCell(const TenuredCell * cell)598   void putCell(const TenuredCell* cell) { putCell(getCellIndex(cell)); }
599 
isEmpty()600   bool isEmpty() const { return this == &Empty; }
601 
602   bool hasCell(size_t cellIndex) const;
603 
604   void putCell(size_t cellIndex);
605 
606   void check() const;
607 
getWord(size_t wordIndex)608   WordT getWord(size_t wordIndex) const { return bits.getWord(wordIndex); }
609 
610   void trace(TenuringTracer& mover);
611 
612   // Sentinel object used for all empty sets.
613   //
614   // We use a sentinel because it simplifies the JIT code slightly as we can
615   // assume all arenas have a cell set.
616   static ArenaCellSet Empty;
617 
618   static size_t getCellIndex(const TenuredCell* cell);
619   static void getWordIndexAndMask(size_t cellIndex, size_t* wordp,
620                                   uint32_t* maskp);
621 
622   // Attempt to trigger a minor GC if free space in the nursery (where these
623   // objects are allocated) falls below this threshold.
624   static const size_t NurseryFreeThresholdBytes = 64 * 1024;
625 
offsetOfArena()626   static size_t offsetOfArena() { return offsetof(ArenaCellSet, arena); }
offsetOfBits()627   static size_t offsetOfBits() { return offsetof(ArenaCellSet, bits); }
628 };
629 
630 // Post-write barrier implementation for GC cells.
631 
632 // Implement the post-write barrier for nursery allocateable cell type |T|. Call
633 // this from |T::postWriteBarrier|.
634 template <typename T>
PostWriteBarrierImpl(void * cellp,T * prev,T * next)635 MOZ_ALWAYS_INLINE void PostWriteBarrierImpl(void* cellp, T* prev, T* next) {
636   MOZ_ASSERT(cellp);
637 
638   // If the target needs an entry, add it.
639   StoreBuffer* buffer;
640   if (next && (buffer = next->storeBuffer())) {
641     // If we know that the prev has already inserted an entry, we can skip
642     // doing the lookup to add the new entry. Note that we cannot safely
643     // assert the presence of the entry because it may have been added
644     // via a different store buffer.
645     if (prev && prev->storeBuffer()) {
646       return;
647     }
648     buffer->putCell(static_cast<T**>(cellp));
649     return;
650   }
651 
652   // Remove the prev entry if the new value does not need it. There will only
653   // be a prev entry if the prev value was in the nursery.
654   if (prev && (buffer = prev->storeBuffer())) {
655     buffer->unputCell(static_cast<T**>(cellp));
656   }
657 }
658 
659 template <typename T>
PostWriteBarrier(T ** vp,T * prev,T * next)660 MOZ_ALWAYS_INLINE void PostWriteBarrier(T** vp, T* prev, T* next) {
661   static_assert(std::is_base_of_v<Cell, T>);
662   static_assert(!std::is_same_v<Cell, T> && !std::is_same_v<TenuredCell, T>);
663 
664   if constexpr (!std::is_base_of_v<TenuredCell, T>) {
665     using BaseT = typename BaseGCType<T>::type;
666     PostWriteBarrierImpl<BaseT>(vp, prev, next);
667     return;
668   }
669 
670   MOZ_ASSERT(!IsInsideNursery(next));
671 }
672 
673 // Used when we don't have a specific edge to put in the store buffer.
674 void PostWriteBarrierCell(Cell* cell, Cell* prev, Cell* next);
675 
676 } /* namespace gc */
677 } /* namespace js */
678 
679 #endif /* gc_StoreBuffer_h */
680