1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "gc/Allocator.h"
8
9 #include "mozilla/DebugOnly.h"
10 #include "mozilla/TimeStamp.h"
11
12 #include <type_traits>
13
14 #include "gc/GCInternals.h"
15 #include "gc/GCLock.h"
16 #include "gc/GCProbes.h"
17 #include "gc/Nursery.h"
18 #include "threading/CpuCount.h"
19 #include "util/Poison.h"
20 #include "vm/GetterSetter.h"
21 #include "vm/JSContext.h"
22 #include "vm/PropMap.h"
23 #include "vm/Runtime.h"
24 #include "vm/StringType.h"
25 #include "vm/TraceLogging.h"
26
27 #include "gc/ArenaList-inl.h"
28 #include "gc/Heap-inl.h"
29 #include "gc/PrivateIterators-inl.h"
30 #include "vm/JSObject-inl.h"
31
32 using mozilla::TimeDuration;
33 using mozilla::TimeStamp;
34
35 using namespace js;
36 using namespace gc;
37
38 template <AllowGC allowGC /* = CanGC */>
AllocateObject(JSContext * cx,AllocKind kind,size_t nDynamicSlots,gc::InitialHeap heap,const JSClass * clasp,AllocSite * site)39 JSObject* js::AllocateObject(JSContext* cx, AllocKind kind,
40 size_t nDynamicSlots, gc::InitialHeap heap,
41 const JSClass* clasp,
42 AllocSite* site /* = nullptr */) {
43 MOZ_ASSERT(IsObjectAllocKind(kind));
44 size_t thingSize = Arena::thingSize(kind);
45
46 MOZ_ASSERT(thingSize == Arena::thingSize(kind));
47 MOZ_ASSERT(thingSize >= sizeof(JSObject_Slots0));
48 static_assert(
49 sizeof(JSObject_Slots0) >= MinCellSize,
50 "All allocations must be at least the allocator-imposed minimum size.");
51
52 MOZ_ASSERT_IF(nDynamicSlots != 0, clasp->isNativeObject());
53
54 MOZ_ASSERT_IF(site && site->initialHeap() == TenuredHeap,
55 heap == TenuredHeap);
56
57 // We cannot trigger GC or make runtime assertions when nursery allocation
58 // is suppressed, either explicitly or because we are off-thread.
59 if (cx->isNurseryAllocSuppressed()) {
60 JSObject* obj = GCRuntime::tryNewTenuredObject<NoGC>(cx, kind, thingSize,
61 nDynamicSlots);
62 if (MOZ_UNLIKELY(allowGC && !obj)) {
63 ReportOutOfMemory(cx);
64 }
65 return obj;
66 }
67
68 JSRuntime* rt = cx->runtime();
69 if (!rt->gc.checkAllocatorState<allowGC>(cx, kind)) {
70 return nullptr;
71 }
72
73 if (cx->nursery().isEnabled() && heap != TenuredHeap) {
74 if (!site) {
75 site = cx->zone()->unknownAllocSite();
76 }
77
78 JSObject* obj = rt->gc.tryNewNurseryObject<allowGC>(
79 cx, thingSize, nDynamicSlots, clasp, site);
80 if (obj) {
81 return obj;
82 }
83
84 // Our most common non-jit allocation path is NoGC; thus, if we fail the
85 // alloc and cannot GC, we *must* return nullptr here so that the caller
86 // will do a CanGC allocation to clear the nursery. Failing to do so will
87 // cause all allocations on this path to land in Tenured, and we will not
88 // get the benefit of the nursery.
89 if (!allowGC) {
90 return nullptr;
91 }
92 }
93
94 return GCRuntime::tryNewTenuredObject<allowGC>(cx, kind, thingSize,
95 nDynamicSlots);
96 }
97 template JSObject* js::AllocateObject<NoGC>(JSContext* cx, gc::AllocKind kind,
98 size_t nDynamicSlots,
99 gc::InitialHeap heap,
100 const JSClass* clasp,
101 gc::AllocSite* site);
102 template JSObject* js::AllocateObject<CanGC>(JSContext* cx, gc::AllocKind kind,
103 size_t nDynamicSlots,
104 gc::InitialHeap heap,
105 const JSClass* clasp,
106 gc::AllocSite* site);
107
108 // Attempt to allocate a new JSObject out of the nursery. If there is not
109 // enough room in the nursery or there is an OOM, this method will return
110 // nullptr.
111 template <AllowGC allowGC>
tryNewNurseryObject(JSContext * cx,size_t thingSize,size_t nDynamicSlots,const JSClass * clasp,AllocSite * site)112 JSObject* GCRuntime::tryNewNurseryObject(JSContext* cx, size_t thingSize,
113 size_t nDynamicSlots,
114 const JSClass* clasp,
115 AllocSite* site) {
116 MOZ_RELEASE_ASSERT(!cx->isHelperThreadContext());
117
118 MOZ_ASSERT(cx->isNurseryAllocAllowed());
119 MOZ_ASSERT(!cx->isNurseryAllocSuppressed());
120 MOZ_ASSERT(!cx->zone()->isAtomsZone());
121
122 JSObject* obj =
123 cx->nursery().allocateObject(site, thingSize, nDynamicSlots, clasp);
124 if (obj) {
125 return obj;
126 }
127
128 if (allowGC && !cx->suppressGC) {
129 cx->runtime()->gc.minorGC(JS::GCReason::OUT_OF_NURSERY);
130
131 // Exceeding gcMaxBytes while tenuring can disable the Nursery.
132 if (cx->nursery().isEnabled()) {
133 return cx->nursery().allocateObject(site, thingSize, nDynamicSlots,
134 clasp);
135 }
136 }
137 return nullptr;
138 }
139
140 template <AllowGC allowGC>
tryNewTenuredObject(JSContext * cx,AllocKind kind,size_t thingSize,size_t nDynamicSlots)141 JSObject* GCRuntime::tryNewTenuredObject(JSContext* cx, AllocKind kind,
142 size_t thingSize,
143 size_t nDynamicSlots) {
144 ObjectSlots* slotsHeader = nullptr;
145 if (nDynamicSlots) {
146 HeapSlot* allocation =
147 cx->maybe_pod_malloc<HeapSlot>(ObjectSlots::allocCount(nDynamicSlots));
148 if (MOZ_UNLIKELY(!allocation)) {
149 if (allowGC) {
150 ReportOutOfMemory(cx);
151 }
152 return nullptr;
153 }
154
155 slotsHeader = new (allocation) ObjectSlots(nDynamicSlots, 0);
156 Debug_SetSlotRangeToCrashOnTouch(slotsHeader->slots(), nDynamicSlots);
157 }
158
159 JSObject* obj = tryNewTenuredThing<JSObject, allowGC>(cx, kind, thingSize);
160
161 if (obj) {
162 if (nDynamicSlots) {
163 static_cast<NativeObject*>(obj)->initSlots(slotsHeader->slots());
164 AddCellMemory(obj, ObjectSlots::allocSize(nDynamicSlots),
165 MemoryUse::ObjectSlots);
166 }
167 } else {
168 js_free(slotsHeader);
169 }
170
171 return obj;
172 }
173
174 // Attempt to allocate a new string out of the nursery. If there is not enough
175 // room in the nursery or there is an OOM, this method will return nullptr.
176 template <AllowGC allowGC>
tryNewNurseryString(JSContext * cx,size_t thingSize,AllocKind kind)177 JSString* GCRuntime::tryNewNurseryString(JSContext* cx, size_t thingSize,
178 AllocKind kind) {
179 MOZ_ASSERT(IsNurseryAllocable(kind));
180 MOZ_ASSERT(cx->isNurseryAllocAllowed());
181 MOZ_ASSERT(!cx->isHelperThreadContext());
182 MOZ_ASSERT(!cx->isNurseryAllocSuppressed());
183 MOZ_ASSERT(!cx->zone()->isAtomsZone());
184
185 AllocSite* site = cx->zone()->unknownAllocSite();
186 Cell* cell = cx->nursery().allocateString(site, thingSize);
187 if (cell) {
188 return static_cast<JSString*>(cell);
189 }
190
191 if (allowGC && !cx->suppressGC) {
192 cx->runtime()->gc.minorGC(JS::GCReason::OUT_OF_NURSERY);
193
194 // Exceeding gcMaxBytes while tenuring can disable the Nursery, and
195 // other heuristics can disable nursery strings for this zone.
196 if (cx->nursery().isEnabled() && cx->zone()->allocNurseryStrings) {
197 return static_cast<JSString*>(
198 cx->nursery().allocateString(site, thingSize));
199 }
200 }
201 return nullptr;
202 }
203
204 template <typename StringAllocT, AllowGC allowGC /* = CanGC */>
205 StringAllocT* js::AllocateStringImpl(JSContext* cx, InitialHeap heap) {
206 static_assert(std::is_convertible_v<StringAllocT*, JSString*>,
207 "must be JSString derived");
208
209 AllocKind kind = MapTypeToFinalizeKind<StringAllocT>::kind;
210 size_t size = sizeof(StringAllocT);
211 MOZ_ASSERT(size == Arena::thingSize(kind));
212 MOZ_ASSERT(size == sizeof(JSString) || size == sizeof(JSFatInlineString));
213
214 // Off-thread alloc cannot trigger GC or make runtime assertions.
215 if (cx->isNurseryAllocSuppressed()) {
216 StringAllocT* str =
217 GCRuntime::tryNewTenuredThing<StringAllocT, NoGC>(cx, kind, size);
218 if (MOZ_UNLIKELY(allowGC && !str)) {
219 ReportOutOfMemory(cx);
220 }
221 return str;
222 }
223
224 JSRuntime* rt = cx->runtime();
225 if (!rt->gc.checkAllocatorState<allowGC>(cx, kind)) {
226 return nullptr;
227 }
228
229 if (cx->nursery().isEnabled() && heap != TenuredHeap &&
230 cx->nursery().canAllocateStrings() && cx->zone()->allocNurseryStrings) {
231 auto* str = static_cast<StringAllocT*>(
232 rt->gc.tryNewNurseryString<allowGC>(cx, size, kind));
233 if (str) {
234 return str;
235 }
236
237 // Our most common non-jit allocation path is NoGC; thus, if we fail the
238 // alloc and cannot GC, we *must* return nullptr here so that the caller
239 // will do a CanGC allocation to clear the nursery. Failing to do so will
240 // cause all allocations on this path to land in Tenured, and we will not
241 // get the benefit of the nursery.
242 if (!allowGC) {
243 return nullptr;
244 }
245 }
246
247 return GCRuntime::tryNewTenuredThing<StringAllocT, allowGC>(cx, kind, size);
248 }
249
250 // Attempt to allocate a new BigInt out of the nursery. If there is not enough
251 // room in the nursery or there is an OOM, this method will return nullptr.
252 template <AllowGC allowGC>
tryNewNurseryBigInt(JSContext * cx,size_t thingSize,AllocKind kind)253 JS::BigInt* GCRuntime::tryNewNurseryBigInt(JSContext* cx, size_t thingSize,
254 AllocKind kind) {
255 MOZ_ASSERT(IsNurseryAllocable(kind));
256 MOZ_ASSERT(cx->isNurseryAllocAllowed());
257 MOZ_ASSERT(!cx->isHelperThreadContext());
258 MOZ_ASSERT(!cx->isNurseryAllocSuppressed());
259 MOZ_ASSERT(!cx->zone()->isAtomsZone());
260
261 AllocSite* site = cx->zone()->unknownAllocSite();
262 Cell* cell = cx->nursery().allocateBigInt(site, thingSize);
263 if (cell) {
264 return static_cast<JS::BigInt*>(cell);
265 }
266
267 if (allowGC && !cx->suppressGC) {
268 cx->runtime()->gc.minorGC(JS::GCReason::OUT_OF_NURSERY);
269
270 // Exceeding gcMaxBytes while tenuring can disable the Nursery, and
271 // other heuristics can disable nursery BigInts for this zone.
272 if (cx->nursery().isEnabled() && cx->zone()->allocNurseryBigInts) {
273 return static_cast<JS::BigInt*>(
274 cx->nursery().allocateBigInt(site, thingSize));
275 }
276 }
277 return nullptr;
278 }
279
280 template <AllowGC allowGC /* = CanGC */>
AllocateBigInt(JSContext * cx,InitialHeap heap)281 JS::BigInt* js::AllocateBigInt(JSContext* cx, InitialHeap heap) {
282 AllocKind kind = MapTypeToFinalizeKind<JS::BigInt>::kind;
283 size_t size = sizeof(JS::BigInt);
284 MOZ_ASSERT(size == Arena::thingSize(kind));
285
286 // Off-thread alloc cannot trigger GC or make runtime assertions.
287 if (cx->isNurseryAllocSuppressed()) {
288 JS::BigInt* bi =
289 GCRuntime::tryNewTenuredThing<JS::BigInt, NoGC>(cx, kind, size);
290 if (MOZ_UNLIKELY(allowGC && !bi)) {
291 ReportOutOfMemory(cx);
292 }
293 return bi;
294 }
295
296 JSRuntime* rt = cx->runtime();
297 if (!rt->gc.checkAllocatorState<allowGC>(cx, kind)) {
298 return nullptr;
299 }
300
301 if (cx->nursery().isEnabled() && heap != TenuredHeap &&
302 cx->nursery().canAllocateBigInts() && cx->zone()->allocNurseryBigInts) {
303 auto* bi = static_cast<JS::BigInt*>(
304 rt->gc.tryNewNurseryBigInt<allowGC>(cx, size, kind));
305 if (bi) {
306 return bi;
307 }
308
309 // Our most common non-jit allocation path is NoGC; thus, if we fail the
310 // alloc and cannot GC, we *must* return nullptr here so that the caller
311 // will do a CanGC allocation to clear the nursery. Failing to do so will
312 // cause all allocations on this path to land in Tenured, and we will not
313 // get the benefit of the nursery.
314 if (!allowGC) {
315 return nullptr;
316 }
317 }
318
319 return GCRuntime::tryNewTenuredThing<JS::BigInt, allowGC>(cx, kind, size);
320 }
321 template JS::BigInt* js::AllocateBigInt<NoGC>(JSContext* cx,
322 gc::InitialHeap heap);
323 template JS::BigInt* js::AllocateBigInt<CanGC>(JSContext* cx,
324 gc::InitialHeap heap);
325
326 #define DECL_ALLOCATOR_INSTANCES(allocKind, traceKind, type, sizedType, \
327 bgfinal, nursery, compact) \
328 template type* js::AllocateStringImpl<type, NoGC>(JSContext * cx, \
329 InitialHeap heap); \
330 template type* js::AllocateStringImpl<type, CanGC>(JSContext * cx, \
331 InitialHeap heap);
FOR_EACH_NURSERY_STRING_ALLOCKIND(DECL_ALLOCATOR_INSTANCES)332 FOR_EACH_NURSERY_STRING_ALLOCKIND(DECL_ALLOCATOR_INSTANCES)
333 #undef DECL_ALLOCATOR_INSTANCES
334
335 template <typename T, AllowGC allowGC /* = CanGC */>
336 T* js::Allocate(JSContext* cx) {
337 static_assert(!std::is_convertible_v<T*, JSObject*>,
338 "must not be JSObject derived");
339 static_assert(
340 sizeof(T) >= MinCellSize,
341 "All allocations must be at least the allocator-imposed minimum size.");
342
343 AllocKind kind = MapTypeToFinalizeKind<T>::kind;
344 size_t thingSize = sizeof(T);
345 MOZ_ASSERT(thingSize == Arena::thingSize(kind));
346
347 if (!cx->isHelperThreadContext()) {
348 if (!cx->runtime()->gc.checkAllocatorState<allowGC>(cx, kind)) {
349 return nullptr;
350 }
351 }
352
353 return GCRuntime::tryNewTenuredThing<T, allowGC>(cx, kind, thingSize);
354 }
355
356 #define DECL_ALLOCATOR_INSTANCES(allocKind, traceKind, type, sizedType, \
357 bgFinal, nursery, compact) \
358 template type* js::Allocate<type, NoGC>(JSContext * cx); \
359 template type* js::Allocate<type, CanGC>(JSContext * cx);
360 FOR_EACH_NONOBJECT_NONNURSERY_ALLOCKIND(DECL_ALLOCATOR_INSTANCES)
361 #undef DECL_ALLOCATOR_INSTANCES
362
363 template <typename T, AllowGC allowGC>
364 /* static */
365 T* GCRuntime::tryNewTenuredThing(JSContext* cx, AllocKind kind,
366 size_t thingSize) {
367 // Bump allocate in the arena's current free-list span.
368 auto* t = reinterpret_cast<T*>(cx->freeLists().allocate(kind));
369 if (MOZ_UNLIKELY(!t)) {
370 // Get the next available free list and allocate out of it. This may
371 // acquire a new arena, which will lock the chunk list. If there are no
372 // chunks available it may also allocate new memory directly.
373 t = reinterpret_cast<T*>(refillFreeListFromAnyThread(cx, kind));
374
375 if (MOZ_UNLIKELY(!t)) {
376 if (allowGC) {
377 cx->runtime()->gc.attemptLastDitchGC(cx);
378 t = tryNewTenuredThing<T, NoGC>(cx, kind, thingSize);
379 }
380 if (!t) {
381 if (allowGC) {
382 ReportOutOfMemory(cx);
383 }
384 return nullptr;
385 }
386 }
387 }
388
389 checkIncrementalZoneState(cx, t);
390 gcprobes::TenuredAlloc(t, kind);
391 // We count this regardless of the profiler's state, assuming that it costs
392 // just as much to count it, as to check the profiler's state and decide not
393 // to count it.
394 cx->noteTenuredAlloc();
395 return t;
396 }
397
398 void GCRuntime::attemptLastDitchGC(JSContext* cx) {
399 // Either there was no memory available for a new chunk or the heap hit its
400 // size limit. Try to perform an all-compartments, non-incremental, shrinking
401 // GC and wait for it to finish.
402
403 if (cx->isHelperThreadContext()) {
404 return;
405 }
406
407 if (!lastLastDitchTime.IsNull() &&
408 TimeStamp::Now() - lastLastDitchTime <= tunables.minLastDitchGCPeriod()) {
409 return;
410 }
411
412 JS::PrepareForFullGC(cx);
413 gc(JS::GCOptions::Shrink, JS::GCReason::LAST_DITCH);
414 waitBackgroundAllocEnd();
415 waitBackgroundFreeEnd();
416
417 lastLastDitchTime = mozilla::TimeStamp::Now();
418 }
419
420 template <AllowGC allowGC>
checkAllocatorState(JSContext * cx,AllocKind kind)421 bool GCRuntime::checkAllocatorState(JSContext* cx, AllocKind kind) {
422 if (allowGC) {
423 if (!gcIfNeededAtAllocation(cx)) {
424 return false;
425 }
426 }
427
428 #if defined(JS_GC_ZEAL) || defined(DEBUG)
429 MOZ_ASSERT_IF(cx->zone()->isAtomsZone(),
430 kind == AllocKind::ATOM || kind == AllocKind::FAT_INLINE_ATOM ||
431 kind == AllocKind::SYMBOL || kind == AllocKind::JITCODE ||
432 kind == AllocKind::SCOPE);
433 MOZ_ASSERT_IF(!cx->zone()->isAtomsZone(),
434 kind != AllocKind::ATOM && kind != AllocKind::FAT_INLINE_ATOM);
435 MOZ_ASSERT_IF(cx->zone()->isSelfHostingZone(),
436 !rt->parentRuntime && !selfHostingZoneFrozen);
437 MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
438 #endif
439
440 // Crash if we perform a GC action when it is not safe.
441 if (allowGC && !cx->suppressGC) {
442 cx->verifyIsSafeToGC();
443 }
444
445 // For testing out of memory conditions
446 if (js::oom::ShouldFailWithOOM()) {
447 // If we are doing a fallible allocation, percolate up the OOM
448 // instead of reporting it.
449 if (allowGC) {
450 ReportOutOfMemory(cx);
451 }
452 return false;
453 }
454
455 return true;
456 }
457
gcIfNeededAtAllocation(JSContext * cx)458 inline bool GCRuntime::gcIfNeededAtAllocation(JSContext* cx) {
459 #ifdef JS_GC_ZEAL
460 if (needZealousGC()) {
461 runDebugGC();
462 }
463 #endif
464
465 // Invoking the interrupt callback can fail and we can't usefully
466 // handle that here. Just check in case we need to collect instead.
467 if (cx->hasAnyPendingInterrupt()) {
468 gcIfRequested();
469 }
470
471 return true;
472 }
473
474 template <typename T>
475 /* static */
checkIncrementalZoneState(JSContext * cx,T * t)476 void GCRuntime::checkIncrementalZoneState(JSContext* cx, T* t) {
477 #ifdef DEBUG
478 if (cx->isHelperThreadContext() || !t) {
479 return;
480 }
481
482 TenuredCell* cell = &t->asTenured();
483 Zone* zone = cell->zone();
484 if (zone->isGCMarkingOrSweeping()) {
485 MOZ_ASSERT(cell->isMarkedBlack());
486 } else {
487 MOZ_ASSERT(!cell->isMarkedAny());
488 }
489 #endif
490 }
491
AllocateCellInGC(Zone * zone,AllocKind thingKind)492 TenuredCell* js::gc::AllocateCellInGC(Zone* zone, AllocKind thingKind) {
493 TenuredCell* cell = zone->arenas.allocateFromFreeList(thingKind);
494 if (!cell) {
495 AutoEnterOOMUnsafeRegion oomUnsafe;
496 cell = GCRuntime::refillFreeListInGC(zone, thingKind);
497 if (!cell) {
498 oomUnsafe.crash(ChunkSize, "Failed to allocate new chunk during GC");
499 }
500 }
501 return cell;
502 }
503
504 // /////////// Arena -> Thing Allocator //////////////////////////////////////
505
startBackgroundAllocTaskIfIdle()506 void GCRuntime::startBackgroundAllocTaskIfIdle() {
507 AutoLockHelperThreadState lock;
508 if (!allocTask.wasStarted(lock)) {
509 // Join the previous invocation of the task. This will return immediately
510 // if the thread has never been started.
511 allocTask.joinWithLockHeld(lock);
512 allocTask.startWithLockHeld(lock);
513 }
514 }
515
516 /* static */
refillFreeListFromAnyThread(JSContext * cx,AllocKind thingKind)517 TenuredCell* GCRuntime::refillFreeListFromAnyThread(JSContext* cx,
518 AllocKind thingKind) {
519 MOZ_ASSERT(cx->freeLists().isEmpty(thingKind));
520
521 if (!cx->isHelperThreadContext()) {
522 return refillFreeListFromMainThread(cx, thingKind);
523 }
524
525 return refillFreeListFromHelperThread(cx, thingKind);
526 }
527
528 /* static */
refillFreeListFromMainThread(JSContext * cx,AllocKind thingKind)529 TenuredCell* GCRuntime::refillFreeListFromMainThread(JSContext* cx,
530 AllocKind thingKind) {
531 // It should not be possible to allocate on the main thread while we are
532 // inside a GC.
533 MOZ_ASSERT(!JS::RuntimeHeapIsBusy(), "allocating while under GC");
534
535 return cx->zone()->arenas.refillFreeListAndAllocate(
536 cx->freeLists(), thingKind, ShouldCheckThresholds::CheckThresholds);
537 }
538
539 /* static */
refillFreeListFromHelperThread(JSContext * cx,AllocKind thingKind)540 TenuredCell* GCRuntime::refillFreeListFromHelperThread(JSContext* cx,
541 AllocKind thingKind) {
542 // A GC may be happening on the main thread, but zones used by off thread
543 // tasks are never collected.
544 Zone* zone = cx->zone();
545 MOZ_ASSERT(!zone->wasGCStarted());
546
547 return zone->arenas.refillFreeListAndAllocate(
548 cx->freeLists(), thingKind, ShouldCheckThresholds::CheckThresholds);
549 }
550
551 /* static */
refillFreeListInGC(Zone * zone,AllocKind thingKind)552 TenuredCell* GCRuntime::refillFreeListInGC(Zone* zone, AllocKind thingKind) {
553 // Called by compacting GC to refill a free list while we are in a GC.
554 MOZ_ASSERT(JS::RuntimeHeapIsCollecting());
555 MOZ_ASSERT_IF(!JS::RuntimeHeapIsMinorCollecting(),
556 !zone->runtimeFromMainThread()->gc.isBackgroundSweeping());
557
558 return zone->arenas.refillFreeListAndAllocate(
559 zone->arenas.freeLists(), thingKind,
560 ShouldCheckThresholds::DontCheckThresholds);
561 }
562
refillFreeListAndAllocate(FreeLists & freeLists,AllocKind thingKind,ShouldCheckThresholds checkThresholds)563 TenuredCell* ArenaLists::refillFreeListAndAllocate(
564 FreeLists& freeLists, AllocKind thingKind,
565 ShouldCheckThresholds checkThresholds) {
566 MOZ_ASSERT(freeLists.isEmpty(thingKind));
567
568 JSRuntime* rt = runtimeFromAnyThread();
569
570 mozilla::Maybe<AutoLockGCBgAlloc> maybeLock;
571
572 // See if we can proceed without taking the GC lock.
573 if (concurrentUse(thingKind) != ConcurrentUse::None) {
574 maybeLock.emplace(rt);
575 }
576
577 Arena* arena = arenaList(thingKind).takeNextArena();
578 if (arena) {
579 // Empty arenas should be immediately freed.
580 MOZ_ASSERT(!arena->isEmpty());
581
582 return freeLists.setArenaAndAllocate(arena, thingKind);
583 }
584
585 // Parallel threads have their own ArenaLists, but chunks are shared;
586 // if we haven't already, take the GC lock now to avoid racing.
587 if (maybeLock.isNothing()) {
588 maybeLock.emplace(rt);
589 }
590
591 TenuredChunk* chunk = rt->gc.pickChunk(maybeLock.ref());
592 if (!chunk) {
593 return nullptr;
594 }
595
596 // Although our chunk should definitely have enough space for another arena,
597 // there are other valid reasons why TenuredChunk::allocateArena() may fail.
598 arena = rt->gc.allocateArena(chunk, zone_, thingKind, checkThresholds,
599 maybeLock.ref());
600 if (!arena) {
601 return nullptr;
602 }
603
604 addNewArena(arena, thingKind);
605
606 return freeLists.setArenaAndAllocate(arena, thingKind);
607 }
608
addNewArena(Arena * arena,AllocKind thingKind)609 inline void ArenaLists::addNewArena(Arena* arena, AllocKind thingKind) {
610 ArenaList& al = zone_->isGCMarking() ? newArenasInMarkPhase(thingKind)
611 : arenaList(thingKind);
612
613 MOZ_ASSERT(al.isCursorAtEnd());
614 al.insertBeforeCursor(arena);
615 }
616
setArenaAndAllocate(Arena * arena,AllocKind kind)617 inline TenuredCell* FreeLists::setArenaAndAllocate(Arena* arena,
618 AllocKind kind) {
619 #ifdef DEBUG
620 auto old = freeLists_[kind];
621 if (!old->isEmpty()) {
622 old->getArena()->checkNoMarkedFreeCells();
623 }
624 #endif
625
626 FreeSpan* span = arena->getFirstFreeSpan();
627 freeLists_[kind] = span;
628
629 Zone* zone = arena->zone;
630 if (MOZ_UNLIKELY(zone->isGCMarkingOrSweeping())) {
631 arena->arenaAllocatedDuringGC();
632 }
633
634 TenuredCell* thing = span->allocate(Arena::thingSize(kind));
635 MOZ_ASSERT(thing); // This allocation is infallible.
636
637 return thing;
638 }
639
arenaAllocatedDuringGC()640 void Arena::arenaAllocatedDuringGC() {
641 // Ensure that anything allocated during the mark or sweep phases of an
642 // incremental GC will be marked black by pre-marking all free cells in the
643 // arena we are about to allocate from.
644
645 MOZ_ASSERT(zone->isGCMarkingOrSweeping());
646 for (ArenaFreeCellIter cell(this); !cell.done(); cell.next()) {
647 MOZ_ASSERT(!cell->isMarkedAny());
648 cell->markBlack();
649 }
650 }
651
setParallelAtomsAllocEnabled(bool enabled)652 void GCRuntime::setParallelAtomsAllocEnabled(bool enabled) {
653 // This can only be changed on the main thread otherwise we could race.
654 MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
655 MOZ_ASSERT(enabled == rt->hasHelperThreadZones());
656
657 atomsZone->arenas.setParallelAllocEnabled(enabled);
658 }
659
setParallelAllocEnabled(bool enabled)660 void ArenaLists::setParallelAllocEnabled(bool enabled) {
661 MOZ_ASSERT(zone_->isAtomsZone());
662
663 static const ConcurrentUse states[2] = {ConcurrentUse::None,
664 ConcurrentUse::ParallelAlloc};
665
666 for (auto kind : AllAllocKinds()) {
667 MOZ_ASSERT(concurrentUse(kind) == states[!enabled]);
668 concurrentUse(kind) = states[enabled];
669 }
670 }
671
setParallelUnmarkEnabled(bool enabled)672 void GCRuntime::setParallelUnmarkEnabled(bool enabled) {
673 // This can only be changed on the main thread otherwise we could race.
674 MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
675 MOZ_ASSERT(JS::RuntimeHeapIsMajorCollecting());
676 for (GCZonesIter zone(this); !zone.done(); zone.next()) {
677 zone->arenas.setParallelUnmarkEnabled(enabled);
678 }
679 }
680
setParallelUnmarkEnabled(bool enabled)681 void ArenaLists::setParallelUnmarkEnabled(bool enabled) {
682 static const ConcurrentUse states[2] = {ConcurrentUse::None,
683 ConcurrentUse::ParallelUnmark};
684
685 for (auto kind : AllAllocKinds()) {
686 MOZ_ASSERT(concurrentUse(kind) == states[!enabled]);
687 concurrentUse(kind) = states[enabled];
688 }
689 }
690
691 // /////////// TenuredChunk -> Arena Allocator ///////////////////////////////
692
wantBackgroundAllocation(const AutoLockGC & lock) const693 bool GCRuntime::wantBackgroundAllocation(const AutoLockGC& lock) const {
694 // To minimize memory waste, we do not want to run the background chunk
695 // allocation if we already have some empty chunks or when the runtime has
696 // a small heap size (and therefore likely has a small growth rate).
697 return allocTask.enabled() &&
698 emptyChunks(lock).count() < tunables.minEmptyChunkCount(lock) &&
699 (fullChunks(lock).count() + availableChunks(lock).count()) >= 4;
700 }
701
allocateArena(TenuredChunk * chunk,Zone * zone,AllocKind thingKind,ShouldCheckThresholds checkThresholds,const AutoLockGC & lock)702 Arena* GCRuntime::allocateArena(TenuredChunk* chunk, Zone* zone,
703 AllocKind thingKind,
704 ShouldCheckThresholds checkThresholds,
705 const AutoLockGC& lock) {
706 MOZ_ASSERT(chunk->hasAvailableArenas());
707
708 // Fail the allocation if we are over our heap size limits.
709 if ((checkThresholds != ShouldCheckThresholds::DontCheckThresholds) &&
710 (heapSize.bytes() >= tunables.gcMaxBytes()))
711 return nullptr;
712
713 Arena* arena = chunk->allocateArena(this, zone, thingKind, lock);
714 zone->gcHeapSize.addGCArena();
715
716 // Trigger an incremental slice if needed.
717 if (checkThresholds != ShouldCheckThresholds::DontCheckThresholds) {
718 maybeTriggerGCAfterAlloc(zone);
719 }
720
721 return arena;
722 }
723
allocateArena(GCRuntime * gc,Zone * zone,AllocKind thingKind,const AutoLockGC & lock)724 Arena* TenuredChunk::allocateArena(GCRuntime* gc, Zone* zone,
725 AllocKind thingKind,
726 const AutoLockGC& lock) {
727 if (info.numArenasFreeCommitted == 0) {
728 commitOnePage(gc);
729 MOZ_ASSERT(info.numArenasFreeCommitted == ArenasPerPage);
730 }
731 MOZ_ASSERT(info.numArenasFreeCommitted > 0);
732 Arena* arena = fetchNextFreeArena(gc);
733
734 arena->init(zone, thingKind, lock);
735 updateChunkListAfterAlloc(gc, lock);
736 return arena;
737 }
738
commitOnePage(GCRuntime * gc)739 void TenuredChunk::commitOnePage(GCRuntime* gc) {
740 MOZ_ASSERT(info.numArenasFreeCommitted == 0);
741 MOZ_ASSERT(!info.freeArenasHead);
742 MOZ_ASSERT(info.numArenasFree > 0);
743
744 unsigned offset = findDecommittedPageOffset();
745 info.lastDecommittedPageOffset = offset + 1;
746
747 if (DecommitEnabled()) {
748 MarkPagesInUseSoft(pageAddress(offset), PageSize);
749 }
750
751 size_t arenaIndex = offset * ArenasPerPage;
752 decommittedPages[offset] = false;
753 for (size_t i = 0; i < ArenasPerPage; i++) {
754 arenas[arenaIndex + i].setAsNotAllocated();
755 }
756
757 // numArenasFreeCommitted will be updated in addArenasInPageToFreeList.
758 // No need to update numArenasFree, as the arena is still free, it just
759 // changes from decommitted to committed free. Later fetchNextFreeArena should
760 // update the numArenasFree.
761 addArenasInPageToFreeList(gc, offset);
762 }
763
updateOnFreeArenaAlloc(const TenuredChunkInfo & info)764 inline void GCRuntime::updateOnFreeArenaAlloc(const TenuredChunkInfo& info) {
765 MOZ_ASSERT(info.numArenasFreeCommitted <= numArenasFreeCommitted);
766 --numArenasFreeCommitted;
767 }
768
fetchNextFreeArena(GCRuntime * gc)769 Arena* TenuredChunk::fetchNextFreeArena(GCRuntime* gc) {
770 MOZ_ASSERT(info.numArenasFreeCommitted > 0);
771 MOZ_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree);
772 MOZ_ASSERT(info.freeArenasHead);
773
774 Arena* arena = info.freeArenasHead;
775 info.freeArenasHead = arena->next;
776 --info.numArenasFreeCommitted;
777 --info.numArenasFree;
778 gc->updateOnFreeArenaAlloc(info);
779
780 return arena;
781 }
782
783 /*
784 * Search for and return the next decommitted page. Our goal is to keep
785 * lastDecommittedPageOffset "close" to a free page. We do this by setting
786 * it to the most recently freed page when we free, and forcing it to
787 * the last alloc + 1 when we allocate.
788 */
findDecommittedPageOffset()789 uint32_t TenuredChunk::findDecommittedPageOffset() {
790 /* Note: lastFreeArenaOffset can be past the end of the list. */
791 for (unsigned i = info.lastDecommittedPageOffset; i < PagesPerChunk; i++) {
792 if (decommittedPages[i]) {
793 return i;
794 }
795 }
796 for (unsigned i = 0; i < info.lastDecommittedPageOffset; i++) {
797 if (decommittedPages[i]) {
798 return i;
799 }
800 }
801 MOZ_CRASH("No decommitted arenas found.");
802 }
803
804 // /////////// System -> TenuredChunk Allocator //////////////////////////////
805
getOrAllocChunk(AutoLockGCBgAlloc & lock)806 TenuredChunk* GCRuntime::getOrAllocChunk(AutoLockGCBgAlloc& lock) {
807 TenuredChunk* chunk = emptyChunks(lock).pop();
808 if (!chunk) {
809 chunk = TenuredChunk::allocate(this);
810 if (!chunk) {
811 return nullptr;
812 }
813 MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
814 }
815
816 if (wantBackgroundAllocation(lock)) {
817 lock.tryToStartBackgroundAllocation();
818 }
819
820 return chunk;
821 }
822
recycleChunk(TenuredChunk * chunk,const AutoLockGC & lock)823 void GCRuntime::recycleChunk(TenuredChunk* chunk, const AutoLockGC& lock) {
824 AlwaysPoison(chunk, JS_FREED_CHUNK_PATTERN, sizeof(ChunkBase),
825 MemCheckKind::MakeNoAccess);
826 emptyChunks(lock).push(chunk);
827 }
828
pickChunk(AutoLockGCBgAlloc & lock)829 TenuredChunk* GCRuntime::pickChunk(AutoLockGCBgAlloc& lock) {
830 if (availableChunks(lock).count()) {
831 return availableChunks(lock).head();
832 }
833
834 TenuredChunk* chunk = getOrAllocChunk(lock);
835 if (!chunk) {
836 return nullptr;
837 }
838
839 chunk->init(this);
840 MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
841 MOZ_ASSERT(chunk->unused());
842 MOZ_ASSERT(!fullChunks(lock).contains(chunk));
843 MOZ_ASSERT(!availableChunks(lock).contains(chunk));
844
845 availableChunks(lock).push(chunk);
846
847 return chunk;
848 }
849
BackgroundAllocTask(GCRuntime * gc,ChunkPool & pool)850 BackgroundAllocTask::BackgroundAllocTask(GCRuntime* gc, ChunkPool& pool)
851 : GCParallelTask(gc),
852 chunkPool_(pool),
853 enabled_(CanUseExtraThreads() && GetCPUCount() >= 2) {}
854
run(AutoLockHelperThreadState & lock)855 void BackgroundAllocTask::run(AutoLockHelperThreadState& lock) {
856 AutoUnlockHelperThreadState unlock(lock);
857
858 TraceLoggerThread* logger = TraceLoggerForCurrentThread();
859 AutoTraceLog logAllocation(logger, TraceLogger_GCAllocation);
860
861 AutoLockGC gcLock(gc);
862 while (!isCancelled() && gc->wantBackgroundAllocation(gcLock)) {
863 TenuredChunk* chunk;
864 {
865 AutoUnlockGC unlock(gcLock);
866 chunk = TenuredChunk::allocate(gc);
867 if (!chunk) {
868 break;
869 }
870 chunk->init(gc);
871 }
872 chunkPool_.ref().push(chunk);
873 }
874 }
875
876 /* static */
allocate(GCRuntime * gc)877 TenuredChunk* TenuredChunk::allocate(GCRuntime* gc) {
878 void* chunk = MapAlignedPages(ChunkSize, ChunkSize);
879 if (!chunk) {
880 return nullptr;
881 }
882
883 gc->stats().count(gcstats::COUNT_NEW_CHUNK);
884 return static_cast<TenuredChunk*>(chunk);
885 }
886
init(GCRuntime * gc)887 void TenuredChunk::init(GCRuntime* gc) {
888 /* The chunk may still have some regions marked as no-access. */
889 MOZ_MAKE_MEM_UNDEFINED(this, ChunkSize);
890
891 /*
892 * Poison the chunk. Note that decommitAllArenas() below will mark the
893 * arenas as inaccessible (for memory sanitizers).
894 */
895 Poison(this, JS_FRESH_TENURED_PATTERN, ChunkSize,
896 MemCheckKind::MakeUndefined);
897
898 new (this) TenuredChunk(gc->rt);
899
900 /*
901 * Decommit the arenas. We do this after poisoning so that if the OS does
902 * not have to recycle the pages, we still get the benefit of poisoning.
903 */
904 decommitAllArenas();
905
906 #ifdef DEBUG
907 verify();
908 #endif
909
910 /* The rest of info fields are initialized in pickChunk. */
911 }
912
decommitAllArenas()913 void TenuredChunk::decommitAllArenas() {
914 decommittedPages.SetAll();
915 if (DecommitEnabled()) {
916 MarkPagesUnusedSoft(&arenas[0], ArenasPerChunk * ArenaSize);
917 }
918
919 info.freeArenasHead = nullptr;
920 info.lastDecommittedPageOffset = 0;
921 info.numArenasFree = ArenasPerChunk;
922 info.numArenasFreeCommitted = 0;
923 }
924