1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #ifndef gc_Zone_h
8 #define gc_Zone_h
9 
10 #include "mozilla/Atomics.h"
11 #include "mozilla/DebugOnly.h"
12 #include "mozilla/MemoryReporting.h"
13 
14 #include "jscntxt.h"
15 
16 #include "ds/SplayTree.h"
17 #include "gc/FindSCCs.h"
18 #include "gc/GCRuntime.h"
19 #include "js/GCHashTable.h"
20 #include "js/TracingAPI.h"
21 #include "vm/MallocProvider.h"
22 #include "vm/TypeInference.h"
23 
24 namespace js {
25 
26 namespace jit {
27 class JitZone;
28 } // namespace jit
29 
30 namespace gc {
31 
32 // This class encapsulates the data that determines when we need to do a zone GC.
33 class ZoneHeapThreshold
34 {
35     // The "growth factor" for computing our next thresholds after a GC.
36     double gcHeapGrowthFactor_;
37 
38     // GC trigger threshold for allocations on the GC heap.
39     size_t gcTriggerBytes_;
40 
41   public:
ZoneHeapThreshold()42     ZoneHeapThreshold()
43       : gcHeapGrowthFactor_(3.0),
44         gcTriggerBytes_(0)
45     {}
46 
gcHeapGrowthFactor()47     double gcHeapGrowthFactor() const { return gcHeapGrowthFactor_; }
gcTriggerBytes()48     size_t gcTriggerBytes() const { return gcTriggerBytes_; }
49     double allocTrigger(bool highFrequencyGC) const;
50 
51     void updateAfterGC(size_t lastBytes, JSGCInvocationKind gckind,
52                        const GCSchedulingTunables& tunables, const GCSchedulingState& state,
53                        const AutoLockGC& lock);
54     void updateForRemovedArena(const GCSchedulingTunables& tunables);
55 
56   private:
57     static double computeZoneHeapGrowthFactorForHeapSize(size_t lastBytes,
58                                                          const GCSchedulingTunables& tunables,
59                                                          const GCSchedulingState& state);
60     static size_t computeZoneTriggerBytes(double growthFactor, size_t lastBytes,
61                                           JSGCInvocationKind gckind,
62                                           const GCSchedulingTunables& tunables,
63                                           const AutoLockGC& lock);
64 };
65 
66 struct UniqueIdGCPolicy {
67     static bool needsSweep(Cell** cell, uint64_t* value);
68 };
69 
70 // Maps a Cell* to a unique, 64bit id.
71 using UniqueIdMap = GCHashMap<Cell*,
72                               uint64_t,
73                               PointerHasher<Cell*, 3>,
74                               SystemAllocPolicy,
75                               UniqueIdGCPolicy>;
76 
77 extern uint64_t NextCellUniqueId(JSRuntime* rt);
78 
79 } // namespace gc
80 } // namespace js
81 
82 namespace JS {
83 
84 // A zone is a collection of compartments. Every compartment belongs to exactly
85 // one zone. In Firefox, there is roughly one zone per tab along with a system
86 // zone for everything else. Zones mainly serve as boundaries for garbage
87 // collection. Unlike compartments, they have no special security properties.
88 //
89 // Every GC thing belongs to exactly one zone. GC things from the same zone but
90 // different compartments can share an arena (4k page). GC things from different
91 // zones cannot be stored in the same arena. The garbage collector is capable of
92 // collecting one zone at a time; it cannot collect at the granularity of
93 // compartments.
94 //
95 // GC things are tied to zones and compartments as follows:
96 //
97 // - JSObjects belong to a compartment and cannot be shared between
98 //   compartments. If an object needs to point to a JSObject in a different
99 //   compartment, regardless of zone, it must go through a cross-compartment
100 //   wrapper. Each compartment keeps track of its outgoing wrappers in a table.
101 //
102 // - JSStrings do not belong to any particular compartment, but they do belong
103 //   to a zone. Thus, two different compartments in the same zone can point to a
104 //   JSString. When a string needs to be wrapped, we copy it if it's in a
105 //   different zone and do nothing if it's in the same zone. Thus, transferring
106 //   strings within a zone is very efficient.
107 //
108 // - Shapes and base shapes belong to a compartment and cannot be shared between
109 //   compartments. A base shape holds a pointer to its compartment. Shapes find
110 //   their compartment via their base shape. JSObjects find their compartment
111 //   via their shape.
112 //
113 // - Scripts are also compartment-local and cannot be shared. A script points to
114 //   its compartment.
115 //
116 // - Type objects and JitCode objects belong to a compartment and cannot be
117 //   shared. However, there is no mechanism to obtain their compartments.
118 //
119 // A zone remains alive as long as any GC things in the zone are alive. A
120 // compartment remains alive as long as any JSObjects, scripts, shapes, or base
121 // shapes within it are alive.
122 //
123 // We always guarantee that a zone has at least one live compartment by refusing
124 // to delete the last compartment in a live zone.
125 struct Zone : public JS::shadow::Zone,
126               public js::gc::GraphNodeBase<JS::Zone>,
127               public js::MallocProvider<JS::Zone>
128 {
129     explicit Zone(JSRuntime* rt);
130     ~Zone();
131     bool init(bool isSystem);
132 
133     void findOutgoingEdges(js::gc::ComponentFinder<JS::Zone>& finder);
134 
135     void discardJitCode(js::FreeOp* fop);
136 
137     void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
138                                 size_t* typePool,
139                                 size_t* baselineStubsOptimized,
140                                 size_t* uniqueIdMap);
141 
142     void resetGCMallocBytes();
143     void setGCMaxMallocBytes(size_t value);
updateMallocCounterZone144     void updateMallocCounter(size_t nbytes) {
145         // Note: this code may be run from worker threads. We tolerate any
146         // thread races when updating gcMallocBytes.
147         gcMallocBytes -= ptrdiff_t(nbytes);
148         if (MOZ_UNLIKELY(isTooMuchMalloc()))
149             onTooMuchMalloc();
150     }
151 
isTooMuchMallocZone152     bool isTooMuchMalloc() const { return gcMallocBytes <= 0; }
153     void onTooMuchMalloc();
154 
155     void* onOutOfMemory(js::AllocFunction allocFunc, size_t nbytes, void* reallocPtr = nullptr) {
156         if (!js::CurrentThreadCanAccessRuntime(runtime_))
157             return nullptr;
158         return runtimeFromMainThread()->onOutOfMemory(allocFunc, nbytes, reallocPtr);
159     }
reportAllocationOverflowZone160     void reportAllocationOverflow() { js::ReportAllocationOverflow(nullptr); }
161 
162     void beginSweepTypes(js::FreeOp* fop, bool releaseTypes);
163 
164     bool hasMarkedCompartments();
165 
scheduleGCZone166     void scheduleGC() { MOZ_ASSERT(!runtimeFromMainThread()->isHeapBusy()); gcScheduled_ = true; }
unscheduleGCZone167     void unscheduleGC() { gcScheduled_ = false; }
isGCScheduledZone168     bool isGCScheduled() { return gcScheduled_ && canCollect(); }
169 
setPreservingCodeZone170     void setPreservingCode(bool preserving) { gcPreserveCode_ = preserving; }
isPreservingCodeZone171     bool isPreservingCode() const { return gcPreserveCode_; }
172 
173     bool canCollect();
174 
175     void notifyObservingDebuggers();
176 
177     enum GCState {
178         NoGC,
179         Mark,
180         MarkGray,
181         Sweep,
182         Finished,
183         Compact
184     };
setGCStateZone185     void setGCState(GCState state) {
186         MOZ_ASSERT(runtimeFromMainThread()->isHeapBusy());
187         MOZ_ASSERT_IF(state != NoGC, canCollect());
188         gcState_ = state;
189         if (state == Finished)
190             notifyObservingDebuggers();
191     }
192 
isCollectingZone193     bool isCollecting() const {
194         if (runtimeFromMainThread()->isHeapCollecting())
195             return gcState_ != NoGC;
196         else
197             return needsIncrementalBarrier();
198     }
199 
isCollectingFromAnyThreadZone200     bool isCollectingFromAnyThread() const {
201         if (runtimeFromAnyThread()->isHeapCollecting())
202             return gcState_ != NoGC;
203         else
204             return needsIncrementalBarrier();
205     }
206 
207     // If this returns true, all object tracing must be done with a GC marking
208     // tracer.
requireGCTracerZone209     bool requireGCTracer() const {
210         JSRuntime* rt = runtimeFromAnyThread();
211         return rt->isHeapMajorCollecting() && !rt->gc.isHeapCompacting() && gcState_ != NoGC;
212     }
213 
isGCMarkingZone214     bool isGCMarking() {
215         if (runtimeFromMainThread()->isHeapCollecting())
216             return gcState_ == Mark || gcState_ == MarkGray;
217         else
218             return needsIncrementalBarrier();
219     }
220 
wasGCStartedZone221     bool wasGCStarted() const { return gcState_ != NoGC; }
isGCMarkingBlackZone222     bool isGCMarkingBlack() { return gcState_ == Mark; }
isGCMarkingGrayZone223     bool isGCMarkingGray() { return gcState_ == MarkGray; }
isGCSweepingZone224     bool isGCSweeping() { return gcState_ == Sweep; }
isGCFinishedZone225     bool isGCFinished() { return gcState_ == Finished; }
isGCCompactingZone226     bool isGCCompacting() { return gcState_ == Compact; }
isGCSweepingOrCompactingZone227     bool isGCSweepingOrCompacting() { return gcState_ == Sweep || gcState_ == Compact; }
228 
229     // Get a number that is incremented whenever this zone is collected, and
230     // possibly at other times too.
231     uint64_t gcNumber();
232 
compileBarriersZone233     bool compileBarriers() const { return compileBarriers(needsIncrementalBarrier()); }
compileBarriersZone234     bool compileBarriers(bool needsIncrementalBarrier) const {
235         return needsIncrementalBarrier ||
236                runtimeFromMainThread()->gcZeal() == js::gc::ZealVerifierPreValue;
237     }
238 
239     enum ShouldUpdateJit { DontUpdateJit, UpdateJit };
240     void setNeedsIncrementalBarrier(bool needs, ShouldUpdateJit updateJit);
addressOfNeedsIncrementalBarrierZone241     const bool* addressOfNeedsIncrementalBarrier() const { return &needsIncrementalBarrier_; }
242 
getJitZoneZone243     js::jit::JitZone* getJitZone(JSContext* cx) { return jitZone_ ? jitZone_ : createJitZone(cx); }
jitZoneZone244     js::jit::JitZone* jitZone() { return jitZone_; }
245 
isAtomsZoneZone246     bool isAtomsZone() const { return runtimeFromAnyThread()->isAtomsZone(this); }
isSelfHostingZoneZone247     bool isSelfHostingZone() const { return runtimeFromAnyThread()->isSelfHostingZone(this); }
248 
249     void prepareForCompacting();
250 
251 #ifdef DEBUG
252     // For testing purposes, return the index of the zone group which this zone
253     // was swept in in the last GC.
lastZoneGroupIndexZone254     unsigned lastZoneGroupIndex() { return gcLastZoneGroupIndex; }
255 #endif
256 
257     using DebuggerVector = js::Vector<js::Debugger*, 0, js::SystemAllocPolicy>;
258 
259   private:
260     DebuggerVector* debuggers;
261 
262     using LogTenurePromotionQueue = js::Vector<JSObject*, 0, js::SystemAllocPolicy>;
263     LogTenurePromotionQueue awaitingTenureLogging;
264 
265     void sweepBreakpoints(js::FreeOp* fop);
266     void sweepUniqueIds(js::FreeOp* fop);
267     void sweepWeakMaps();
268     void sweepCompartments(js::FreeOp* fop, bool keepAtleastOne, bool lastGC);
269 
270     js::jit::JitZone* createJitZone(JSContext* cx);
271 
isQueuedForBackgroundSweepZone272     bool isQueuedForBackgroundSweep() {
273         return isOnList();
274     }
275 
276     // Side map for storing a unique ids for cells, independent of address.
277     js::gc::UniqueIdMap uniqueIds_;
278 
279   public:
hasDebuggersZone280     bool hasDebuggers() const { return debuggers && debuggers->length(); }
getDebuggersZone281     DebuggerVector* getDebuggers() const { return debuggers; }
282     DebuggerVector* getOrCreateDebuggers(JSContext* cx);
283 
enqueueForPromotionToTenuredLoggingZone284     void enqueueForPromotionToTenuredLogging(JSObject& obj) {
285         MOZ_ASSERT(hasDebuggers());
286         MOZ_ASSERT(!IsInsideNursery(&obj));
287         js::AutoEnterOOMUnsafeRegion oomUnsafe;
288         if (!awaitingTenureLogging.append(&obj))
289             oomUnsafe.crash("Zone::enqueueForPromotionToTenuredLogging");
290     }
291     void logPromotionsToTenured();
292 
293     js::gc::ArenaLists arenas;
294 
295     js::TypeZone types;
296 
297     /* Live weakmaps in this zone. */
298     mozilla::LinkedList<js::WeakMapBase> gcWeakMapList;
299 
300     // The set of compartments in this zone.
301     typedef js::Vector<JSCompartment*, 1, js::SystemAllocPolicy> CompartmentVector;
302     CompartmentVector compartments;
303 
304     // This zone's gray roots.
305     typedef js::Vector<js::gc::Cell*, 0, js::SystemAllocPolicy> GrayRootVector;
306     GrayRootVector gcGrayRoots;
307 
308     // This zone's weak edges found via graph traversal during marking,
309     // preserved for re-scanning during sweeping.
310     using WeakEdges = js::Vector<js::gc::TenuredCell**, 0, js::SystemAllocPolicy>;
311     WeakEdges gcWeakRefs;
312 
313     /*
314      * Mapping from not yet marked keys to a vector of all values that the key
315      * maps to in any live weak map.
316      */
317     js::gc::WeakKeyTable gcWeakKeys;
318 
319     // A set of edges from this zone to other zones.
320     //
321     // This is used during GC while calculating zone groups to record edges that
322     // can't be determined by examining this zone by itself.
323     ZoneSet gcZoneGroupEdges;
324 
325     // Zones with dead proxies require an extra scan through the wrapper map,
326     // so track whether any dead proxies are known to exist.
327     bool hasDeadProxies;
328 
329     // Malloc counter to measure memory pressure for GC scheduling. It runs from
330     // gcMaxMallocBytes down to zero. This counter should be used only when it's
331     // not possible to know the size of a free.
332     mozilla::Atomic<ptrdiff_t, mozilla::ReleaseAcquire> gcMallocBytes;
333 
334     // GC trigger threshold for allocations on the C heap.
335     size_t gcMaxMallocBytes;
336 
337     // Whether a GC has been triggered as a result of gcMallocBytes falling
338     // below zero.
339     //
340     // This should be a bool, but Atomic only supports 32-bit and pointer-sized
341     // types.
342     mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> gcMallocGCTriggered;
343 
344     // Track heap usage under this Zone.
345     js::gc::HeapUsage usage;
346 
347     // Thresholds used to trigger GC.
348     js::gc::ZoneHeapThreshold threshold;
349 
350     // Amount of data to allocate before triggering a new incremental slice for
351     // the current GC.
352     size_t gcDelayBytes;
353 
354     // Per-zone data for use by an embedder.
355     void* data;
356 
357     bool isSystem;
358 
359     bool usedByExclusiveThread;
360 
361     // True when there are active frames.
362     bool active;
363 
364     mozilla::DebugOnly<unsigned> gcLastZoneGroupIndex;
365 
366     // Creates a HashNumber based on getUniqueId. Returns false on OOM.
getHashCodeZone367     bool getHashCode(js::gc::Cell* cell, js::HashNumber* hashp) {
368         uint64_t uid;
369         if (!getUniqueId(cell, &uid))
370             return false;
371         *hashp = js::HashNumber(uid >> 32) ^ js::HashNumber(uid & 0xFFFFFFFF);
372         return true;
373     }
374 
375     // Puts an existing UID in |uidp|, or creates a new UID for this Cell and
376     // puts that into |uidp|. Returns false on OOM.
getUniqueIdZone377     bool getUniqueId(js::gc::Cell* cell, uint64_t* uidp) {
378         MOZ_ASSERT(uidp);
379         MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
380 
381         // Get an existing uid, if one has been set.
382         auto p = uniqueIds_.lookupForAdd(cell);
383         if (p) {
384             *uidp = p->value();
385             return true;
386         }
387 
388         // Set a new uid on the cell.
389         *uidp = js::gc::NextCellUniqueId(runtimeFromAnyThread());
390         if (!uniqueIds_.add(p, cell, *uidp))
391             return false;
392 
393         // If the cell was in the nursery, hopefully unlikely, then we need to
394         // tell the nursery about it so that it can sweep the uid if the thing
395         // does not get tenured.
396         js::AutoEnterOOMUnsafeRegion oomUnsafe;
397         if (!runtimeFromAnyThread()->gc.nursery.addedUniqueIdToCell(cell))
398             oomUnsafe.crash("failed to allocate tracking data for a nursery uid");
399         return true;
400     }
401 
402     // Return true if this cell has a UID associated with it.
hasUniqueIdZone403     bool hasUniqueId(js::gc::Cell* cell) {
404         MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
405         return uniqueIds_.has(cell);
406     }
407 
408     // Transfer an id from another cell. This must only be called on behalf of a
409     // moving GC. This method is infallible.
transferUniqueIdZone410     void transferUniqueId(js::gc::Cell* tgt, js::gc::Cell* src) {
411         MOZ_ASSERT(src != tgt);
412         MOZ_ASSERT(!IsInsideNursery(tgt));
413         MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtimeFromMainThread()));
414         MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
415         uniqueIds_.rekeyIfMoved(src, tgt);
416     }
417 
418     // Remove any unique id associated with this Cell.
removeUniqueIdZone419     void removeUniqueId(js::gc::Cell* cell) {
420         MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
421         uniqueIds_.remove(cell);
422     }
423 
424     // Off-thread parsing should not result in any UIDs being created.
assertNoUniqueIdsInZoneZone425     void assertNoUniqueIdsInZone() const {
426         MOZ_ASSERT(uniqueIds_.count() == 0);
427     }
428 
429 #ifdef JSGC_HASH_TABLE_CHECKS
430     // Assert that the UniqueId table has been redirected successfully.
431     void checkUniqueIdTableAfterMovingGC();
432 #endif
433 
434   private:
435     js::jit::JitZone* jitZone_;
436 
437     GCState gcState_;
438     bool gcScheduled_;
439     bool gcPreserveCode_;
440     bool jitUsingBarriers_;
441 
442     // Allow zones to be linked into a list
443     friend class js::gc::ZoneList;
444     static Zone * const NotOnList;
445     Zone* listNext_;
446     bool isOnList() const;
447     Zone* nextZone() const;
448 
449     friend bool js::CurrentThreadCanAccessZone(Zone* zone);
450     friend class js::gc::GCRuntime;
451 };
452 
453 } // namespace JS
454 
455 namespace js {
456 
457 // Using the atoms zone without holding the exclusive access lock is dangerous
458 // because worker threads may be using it simultaneously. Therefore, it's
459 // better to skip the atoms zone when iterating over zones. If you need to
460 // iterate over the atoms zone, consider taking the exclusive access lock first.
461 enum ZoneSelector {
462     WithAtoms,
463     SkipAtoms
464 };
465 
466 class ZonesIter
467 {
468     gc::AutoEnterIteration iterMarker;
469     JS::Zone** it;
470     JS::Zone** end;
471 
472   public:
ZonesIter(JSRuntime * rt,ZoneSelector selector)473     ZonesIter(JSRuntime* rt, ZoneSelector selector) : iterMarker(&rt->gc) {
474         it = rt->gc.zones.begin();
475         end = rt->gc.zones.end();
476 
477         if (selector == SkipAtoms) {
478             MOZ_ASSERT(atAtomsZone(rt));
479             it++;
480         }
481     }
482 
483     bool atAtomsZone(JSRuntime* rt);
484 
done()485     bool done() const { return it == end; }
486 
next()487     void next() {
488         MOZ_ASSERT(!done());
489         do {
490             it++;
491         } while (!done() && (*it)->usedByExclusiveThread);
492     }
493 
get()494     JS::Zone* get() const {
495         MOZ_ASSERT(!done());
496         return *it;
497     }
498 
499     operator JS::Zone*() const { return get(); }
500     JS::Zone* operator->() const { return get(); }
501 };
502 
503 struct CompartmentsInZoneIter
504 {
CompartmentsInZoneIterCompartmentsInZoneIter505     explicit CompartmentsInZoneIter(JS::Zone* zone) : zone(zone) {
506         it = zone->compartments.begin();
507     }
508 
doneCompartmentsInZoneIter509     bool done() const {
510         MOZ_ASSERT(it);
511         return it < zone->compartments.begin() ||
512                it >= zone->compartments.end();
513     }
nextCompartmentsInZoneIter514     void next() {
515         MOZ_ASSERT(!done());
516         it++;
517     }
518 
getCompartmentsInZoneIter519     JSCompartment* get() const {
520         MOZ_ASSERT(it);
521         return *it;
522     }
523 
524     operator JSCompartment*() const { return get(); }
525     JSCompartment* operator->() const { return get(); }
526 
527   private:
528     JS::Zone* zone;
529     JSCompartment** it;
530 
CompartmentsInZoneIterCompartmentsInZoneIter531     CompartmentsInZoneIter()
532       : zone(nullptr), it(nullptr)
533     {}
534 
535     // This is for the benefit of CompartmentsIterT::comp.
536     friend class mozilla::Maybe<CompartmentsInZoneIter>;
537 };
538 
539 // This iterator iterates over all the compartments in a given set of zones. The
540 // set of zones is determined by iterating ZoneIterT.
541 template<class ZonesIterT>
542 class CompartmentsIterT
543 {
544     gc::AutoEnterIteration iterMarker;
545     ZonesIterT zone;
546     mozilla::Maybe<CompartmentsInZoneIter> comp;
547 
548   public:
CompartmentsIterT(JSRuntime * rt)549     explicit CompartmentsIterT(JSRuntime* rt)
550       : iterMarker(&rt->gc), zone(rt)
551     {
552         if (zone.done())
553             comp.emplace();
554         else
555             comp.emplace(zone);
556     }
557 
CompartmentsIterT(JSRuntime * rt,ZoneSelector selector)558     CompartmentsIterT(JSRuntime* rt, ZoneSelector selector)
559       : iterMarker(&rt->gc), zone(rt, selector)
560     {
561         if (zone.done())
562             comp.emplace();
563         else
564             comp.emplace(zone);
565     }
566 
done()567     bool done() const { return zone.done(); }
568 
next()569     void next() {
570         MOZ_ASSERT(!done());
571         MOZ_ASSERT(!comp.ref().done());
572         comp->next();
573         if (comp->done()) {
574             comp.reset();
575             zone.next();
576             if (!zone.done())
577                 comp.emplace(zone);
578         }
579     }
580 
get()581     JSCompartment* get() const {
582         MOZ_ASSERT(!done());
583         return *comp;
584     }
585 
586     operator JSCompartment*() const { return get(); }
587     JSCompartment* operator->() const { return get(); }
588 };
589 
590 typedef CompartmentsIterT<ZonesIter> CompartmentsIter;
591 
592 /*
593  * Allocation policy that uses Zone::pod_malloc and friends, so that memory
594  * pressure is accounted for on the zone. This is suitable for memory associated
595  * with GC things allocated in the zone.
596  *
597  * Since it doesn't hold a JSContext (those may not live long enough), it can't
598  * report out-of-memory conditions itself; the caller must check for OOM and
599  * take the appropriate action.
600  *
601  * FIXME bug 647103 - replace these *AllocPolicy names.
602  */
603 class ZoneAllocPolicy
604 {
605     Zone* const zone;
606 
607   public:
ZoneAllocPolicy(Zone * zone)608     MOZ_IMPLICIT ZoneAllocPolicy(Zone* zone) : zone(zone) {}
609 
610     template <typename T>
maybe_pod_malloc(size_t numElems)611     T* maybe_pod_malloc(size_t numElems) {
612         return zone->maybe_pod_malloc<T>(numElems);
613     }
614 
615     template <typename T>
maybe_pod_calloc(size_t numElems)616     T* maybe_pod_calloc(size_t numElems) {
617         return zone->maybe_pod_calloc<T>(numElems);
618     }
619 
620     template <typename T>
maybe_pod_realloc(T * p,size_t oldSize,size_t newSize)621     T* maybe_pod_realloc(T* p, size_t oldSize, size_t newSize) {
622         return zone->maybe_pod_realloc<T>(p, oldSize, newSize);
623     }
624 
625     template <typename T>
pod_malloc(size_t numElems)626     T* pod_malloc(size_t numElems) {
627         return zone->pod_malloc<T>(numElems);
628     }
629 
630     template <typename T>
pod_calloc(size_t numElems)631     T* pod_calloc(size_t numElems) {
632         return zone->pod_calloc<T>(numElems);
633     }
634 
635     template <typename T>
pod_realloc(T * p,size_t oldSize,size_t newSize)636     T* pod_realloc(T* p, size_t oldSize, size_t newSize) {
637         return zone->pod_realloc<T>(p, oldSize, newSize);
638     }
639 
free_(void * p)640     void free_(void* p) { js_free(p); }
reportAllocOverflow()641     void reportAllocOverflow() const {}
642 
checkSimulatedOOM()643     bool checkSimulatedOOM() const {
644         return !js::oom::ShouldFailWithOOM();
645     }
646 };
647 
648 } // namespace js
649 
650 #endif // gc_Zone_h
651