1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sw=4 et tw=78:
3 *
4 * This Source Code Form is subject to the terms of the Mozilla Public
5 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
6 * You can obtain one at http://mozilla.org/MPL/2.0/. */
7
8 #include "gc/Nursery-inl.h"
9
10 #include "mozilla/DebugOnly.h"
11 #include "mozilla/IntegerPrintfMacros.h"
12 #include "mozilla/Move.h"
13 #include "mozilla/Unused.h"
14
15 #include "jscompartment.h"
16 #include "jsfriendapi.h"
17 #include "jsgc.h"
18 #include "jsutil.h"
19
20 #include "gc/GCInternals.h"
21 #include "gc/Memory.h"
22 #include "jit/JitFrames.h"
23 #include "vm/ArrayObject.h"
24 #include "vm/Debugger.h"
25 #if defined(DEBUG)
26 #include "vm/EnvironmentObject.h"
27 #endif
28 #include "vm/Time.h"
29 #include "vm/TypedArrayObject.h"
30 #include "vm/TypeInference.h"
31
32 #include "jsobjinlines.h"
33
34 #include "vm/NativeObject-inl.h"
35
36 using namespace js;
37 using namespace gc;
38
39 using mozilla::ArrayLength;
40 using mozilla::DebugOnly;
41 using mozilla::PodCopy;
42 using mozilla::PodZero;
43
44 static const uintptr_t CanaryMagicValue = 0xDEADB15D;
45
46 struct js::Nursery::FreeMallocedBuffersTask : public GCParallelTask
47 {
FreeMallocedBuffersTaskjs::Nursery::FreeMallocedBuffersTask48 explicit FreeMallocedBuffersTask(FreeOp* fop) : fop_(fop) {}
initjs::Nursery::FreeMallocedBuffersTask49 bool init() { return buffers_.init(); }
50 void transferBuffersToFree(MallocedBuffersSet& buffersToFree,
51 const AutoLockHelperThreadState& lock);
~FreeMallocedBuffersTaskjs::Nursery::FreeMallocedBuffersTask52 ~FreeMallocedBuffersTask() override { join(); }
53
54 private:
55 FreeOp* fop_;
56 MallocedBuffersSet buffers_;
57
58 virtual void run() override;
59 };
60
61 struct js::Nursery::SweepAction
62 {
SweepActionjs::Nursery::SweepAction63 SweepAction(SweepThunk thunk, void* data, SweepAction* next)
64 : thunk(thunk), data(data), next(next)
65 {}
66
67 SweepThunk thunk;
68 void* data;
69 SweepAction* next;
70
71 #if JS_BITS_PER_WORD == 32
72 protected:
73 uint32_t padding;
74 #endif
75 };
76
77 #ifdef JS_GC_ZEAL
78 struct js::Nursery::Canary
79 {
80 uintptr_t magicValue;
81 Canary* next;
82 };
83 #endif
84
85 inline void
poisonAndInit(JSRuntime * rt,uint8_t poison)86 js::Nursery::NurseryChunk::poisonAndInit(JSRuntime* rt, uint8_t poison)
87 {
88 JS_POISON(this, poison, ChunkSize);
89 init(rt);
90 }
91
92 inline void
init(JSRuntime * rt)93 js::Nursery::NurseryChunk::init(JSRuntime* rt)
94 {
95 new (&trailer) gc::ChunkTrailer(rt, &rt->gc.storeBuffer);
96 }
97
98 /* static */ inline js::Nursery::NurseryChunk*
fromChunk(Chunk * chunk)99 js::Nursery::NurseryChunk::fromChunk(Chunk* chunk)
100 {
101 return reinterpret_cast<NurseryChunk*>(chunk);
102 }
103
104 inline Chunk*
toChunk(JSRuntime * rt)105 js::Nursery::NurseryChunk::toChunk(JSRuntime* rt)
106 {
107 auto chunk = reinterpret_cast<Chunk*>(this);
108 chunk->init(rt);
109 return chunk;
110 }
111
Nursery(JSRuntime * rt)112 js::Nursery::Nursery(JSRuntime* rt)
113 : runtime_(rt)
114 , position_(0)
115 , currentStartChunk_(0)
116 , currentStartPosition_(0)
117 , currentEnd_(0)
118 , currentChunk_(0)
119 , maxNurseryChunks_(0)
120 , previousPromotionRate_(0)
121 , profileThreshold_(0)
122 , enableProfiling_(false)
123 , reportTenurings_(0)
124 , minorGcCount_(0)
125 , freeMallocedBuffersTask(nullptr)
126 , sweepActions_(nullptr)
127 #ifdef JS_GC_ZEAL
128 , lastCanary_(nullptr)
129 #endif
130 {}
131
132 bool
init(uint32_t maxNurseryBytes,AutoLockGC & lock)133 js::Nursery::init(uint32_t maxNurseryBytes, AutoLockGC& lock)
134 {
135 /* maxNurseryBytes parameter is rounded down to a multiple of chunk size. */
136 maxNurseryChunks_ = maxNurseryBytes >> ChunkShift;
137
138 /* If no chunks are specified then the nursery is permenantly disabled. */
139 if (maxNurseryChunks_ == 0)
140 return true;
141
142 if (!mallocedBuffers.init())
143 return false;
144
145 if (!cellsWithUid_.init())
146 return false;
147
148 freeMallocedBuffersTask = js_new<FreeMallocedBuffersTask>(runtime()->defaultFreeOp());
149 if (!freeMallocedBuffersTask || !freeMallocedBuffersTask->init())
150 return false;
151
152 AutoMaybeStartBackgroundAllocation maybeBgAlloc;
153 updateNumChunksLocked(1, maybeBgAlloc, lock);
154 if (numChunks() == 0)
155 return false;
156
157 setCurrentChunk(0);
158 setStartPosition();
159
160 char* env = getenv("JS_GC_PROFILE_NURSERY");
161 if (env) {
162 if (0 == strcmp(env, "help")) {
163 fprintf(stderr, "JS_GC_PROFILE_NURSERY=N\n"
164 "\tReport minor GC's taking at least N microseconds.\n");
165 exit(0);
166 }
167 enableProfiling_ = true;
168 profileThreshold_ = atoi(env);
169 }
170
171 env = getenv("JS_GC_REPORT_TENURING");
172 if (env) {
173 if (0 == strcmp(env, "help")) {
174 fprintf(stderr, "JS_GC_REPORT_TENURING=N\n"
175 "\tAfter a minor GC, report any ObjectGroups with at least N instances tenured.\n");
176 exit(0);
177 }
178 reportTenurings_ = atoi(env);
179 }
180
181 PodZero(&startTimes_);
182 PodZero(&profileTimes_);
183 PodZero(&totalTimes_);
184
185 if (!runtime()->gc.storeBuffer.enable())
186 return false;
187
188 MOZ_ASSERT(isEnabled());
189 return true;
190 }
191
~Nursery()192 js::Nursery::~Nursery()
193 {
194 disable();
195 js_delete(freeMallocedBuffersTask);
196 }
197
198 void
enable()199 js::Nursery::enable()
200 {
201 MOZ_ASSERT(isEmpty());
202 MOZ_ASSERT(!runtime()->gc.isVerifyPreBarriersEnabled());
203 if (isEnabled())
204 return;
205
206 updateNumChunks(1);
207 if (numChunks() == 0)
208 return;
209
210 setCurrentChunk(0);
211 setStartPosition();
212 #ifdef JS_GC_ZEAL
213 if (runtime()->hasZealMode(ZealMode::GenerationalGC))
214 enterZealMode();
215 #endif
216
217 MOZ_ALWAYS_TRUE(runtime()->gc.storeBuffer.enable());
218 return;
219 }
220
221 void
disable()222 js::Nursery::disable()
223 {
224 MOZ_ASSERT(isEmpty());
225 if (!isEnabled())
226 return;
227 updateNumChunks(0);
228 currentEnd_ = 0;
229 runtime()->gc.storeBuffer.disable();
230 }
231
232 bool
isEmpty() const233 js::Nursery::isEmpty() const
234 {
235 MOZ_ASSERT(runtime_);
236 if (!isEnabled())
237 return true;
238
239 if (!runtime_->hasZealMode(ZealMode::GenerationalGC)) {
240 MOZ_ASSERT(currentStartChunk_ == 0);
241 MOZ_ASSERT(currentStartPosition_ == chunk(0).start());
242 }
243 return position() == currentStartPosition_;
244 }
245
246 #ifdef JS_GC_ZEAL
247 void
enterZealMode()248 js::Nursery::enterZealMode() {
249 if (isEnabled())
250 updateNumChunks(maxNurseryChunks_);
251 }
252
253 void
leaveZealMode()254 js::Nursery::leaveZealMode() {
255 if (isEnabled()) {
256 MOZ_ASSERT(isEmpty());
257 setCurrentChunk(0);
258 setStartPosition();
259 }
260 }
261 #endif // JS_GC_ZEAL
262
263 JSObject*
allocateObject(JSContext * cx,size_t size,size_t numDynamic,const js::Class * clasp)264 js::Nursery::allocateObject(JSContext* cx, size_t size, size_t numDynamic, const js::Class* clasp)
265 {
266 /* Ensure there's enough space to replace the contents with a RelocationOverlay. */
267 MOZ_ASSERT(size >= sizeof(RelocationOverlay));
268
269 /* Sanity check the finalizer. */
270 MOZ_ASSERT_IF(clasp->hasFinalize(), CanNurseryAllocateFinalizedClass(clasp) ||
271 clasp->isProxy());
272
273 /* Make the object allocation. */
274 JSObject* obj = static_cast<JSObject*>(allocate(size));
275 if (!obj)
276 return nullptr;
277
278 /* If we want external slots, add them. */
279 HeapSlot* slots = nullptr;
280 if (numDynamic) {
281 MOZ_ASSERT(clasp->isNative() || clasp->isProxy());
282 slots = static_cast<HeapSlot*>(allocateBuffer(cx->zone(), numDynamic * sizeof(HeapSlot)));
283 if (!slots) {
284 /*
285 * It is safe to leave the allocated object uninitialized, since we
286 * do not visit unallocated things in the nursery.
287 */
288 return nullptr;
289 }
290 }
291
292 /* Always initialize the slots field to match the JIT behavior. */
293 obj->setInitialSlotsMaybeNonNative(slots);
294
295 TraceNurseryAlloc(obj, size);
296 return obj;
297 }
298
299 void*
allocate(size_t size)300 js::Nursery::allocate(size_t size)
301 {
302 MOZ_ASSERT(isEnabled());
303 MOZ_ASSERT(!runtime()->isHeapBusy());
304 MOZ_ASSERT_IF(currentChunk_ == currentStartChunk_, position() >= currentStartPosition_);
305 MOZ_ASSERT(position() % gc::CellSize == 0);
306 MOZ_ASSERT(size % gc::CellSize == 0);
307
308 #ifdef JS_GC_ZEAL
309 static const size_t CanarySize = (sizeof(Nursery::Canary) + CellSize - 1) & ~CellMask;
310 if (runtime()->gc.hasZealMode(ZealMode::CheckNursery))
311 size += CanarySize;
312 #endif
313
314 if (currentEnd() < position() + size) {
315 if (currentChunk_ + 1 == numChunks())
316 return nullptr;
317 setCurrentChunk(currentChunk_ + 1);
318 }
319
320 void* thing = (void*)position();
321 position_ = position() + size;
322
323 JS_EXTRA_POISON(thing, JS_ALLOCATED_NURSERY_PATTERN, size);
324
325 #ifdef JS_GC_ZEAL
326 if (runtime()->gc.hasZealMode(ZealMode::CheckNursery)) {
327 auto canary = reinterpret_cast<Canary*>(position() - CanarySize);
328 canary->magicValue = CanaryMagicValue;
329 canary->next = nullptr;
330 if (lastCanary_) {
331 MOZ_ASSERT(!lastCanary_->next);
332 lastCanary_->next = canary;
333 }
334 lastCanary_ = canary;
335 }
336 #endif
337
338 MemProfiler::SampleNursery(reinterpret_cast<void*>(thing), size);
339 return thing;
340 }
341
342 void*
allocateBuffer(Zone * zone,size_t nbytes)343 js::Nursery::allocateBuffer(Zone* zone, size_t nbytes)
344 {
345 MOZ_ASSERT(nbytes > 0);
346
347 if (nbytes <= MaxNurseryBufferSize) {
348 void* buffer = allocate(nbytes);
349 if (buffer)
350 return buffer;
351 }
352
353 void* buffer = zone->pod_malloc<uint8_t>(nbytes);
354 if (buffer && !mallocedBuffers.putNew(buffer)) {
355 js_free(buffer);
356 return nullptr;
357 }
358 return buffer;
359 }
360
361 void*
allocateBuffer(JSObject * obj,size_t nbytes)362 js::Nursery::allocateBuffer(JSObject* obj, size_t nbytes)
363 {
364 MOZ_ASSERT(obj);
365 MOZ_ASSERT(nbytes > 0);
366
367 if (!IsInsideNursery(obj))
368 return obj->zone()->pod_malloc<uint8_t>(nbytes);
369 return allocateBuffer(obj->zone(), nbytes);
370 }
371
372 void*
reallocateBuffer(JSObject * obj,void * oldBuffer,size_t oldBytes,size_t newBytes)373 js::Nursery::reallocateBuffer(JSObject* obj, void* oldBuffer,
374 size_t oldBytes, size_t newBytes)
375 {
376 if (!IsInsideNursery(obj))
377 return obj->zone()->pod_realloc<uint8_t>((uint8_t*)oldBuffer, oldBytes, newBytes);
378
379 if (!isInside(oldBuffer)) {
380 void* newBuffer = obj->zone()->pod_realloc<uint8_t>((uint8_t*)oldBuffer, oldBytes, newBytes);
381 if (newBuffer && oldBuffer != newBuffer)
382 MOZ_ALWAYS_TRUE(mallocedBuffers.rekeyAs(oldBuffer, newBuffer, newBuffer));
383 return newBuffer;
384 }
385
386 /* The nursery cannot make use of the returned slots data. */
387 if (newBytes < oldBytes)
388 return oldBuffer;
389
390 void* newBuffer = allocateBuffer(obj->zone(), newBytes);
391 if (newBuffer)
392 PodCopy((uint8_t*)newBuffer, (uint8_t*)oldBuffer, oldBytes);
393 return newBuffer;
394 }
395
396 void
freeBuffer(void * buffer)397 js::Nursery::freeBuffer(void* buffer)
398 {
399 if (!isInside(buffer)) {
400 removeMallocedBuffer(buffer);
401 js_free(buffer);
402 }
403 }
404
405 void
setForwardingPointer(void * oldData,void * newData,bool direct)406 Nursery::setForwardingPointer(void* oldData, void* newData, bool direct)
407 {
408 MOZ_ASSERT(isInside(oldData));
409
410 // Bug 1196210: If a zero-capacity header lands in the last 2 words of a
411 // jemalloc chunk abutting the start of a nursery chunk, the (invalid)
412 // newData pointer will appear to be "inside" the nursery.
413 MOZ_ASSERT(!isInside(newData) || (uintptr_t(newData) & ChunkMask) == 0);
414
415 if (direct) {
416 *reinterpret_cast<void**>(oldData) = newData;
417 } else {
418 AutoEnterOOMUnsafeRegion oomUnsafe;
419 if (!forwardedBuffers.initialized() && !forwardedBuffers.init())
420 oomUnsafe.crash("Nursery::setForwardingPointer");
421 #ifdef DEBUG
422 if (ForwardedBufferMap::Ptr p = forwardedBuffers.lookup(oldData))
423 MOZ_ASSERT(p->value() == newData);
424 #endif
425 if (!forwardedBuffers.put(oldData, newData))
426 oomUnsafe.crash("Nursery::setForwardingPointer");
427 }
428 }
429
430 void
setSlotsForwardingPointer(HeapSlot * oldSlots,HeapSlot * newSlots,uint32_t nslots)431 Nursery::setSlotsForwardingPointer(HeapSlot* oldSlots, HeapSlot* newSlots, uint32_t nslots)
432 {
433 // Slot arrays always have enough space for a forwarding pointer, since the
434 // number of slots is never zero.
435 MOZ_ASSERT(nslots > 0);
436 setForwardingPointer(oldSlots, newSlots, /* direct = */ true);
437 }
438
439 void
setElementsForwardingPointer(ObjectElements * oldHeader,ObjectElements * newHeader,uint32_t nelems)440 Nursery::setElementsForwardingPointer(ObjectElements* oldHeader, ObjectElements* newHeader,
441 uint32_t nelems)
442 {
443 // Only use a direct forwarding pointer if there is enough space for one.
444 setForwardingPointer(oldHeader->elements(), newHeader->elements(),
445 nelems > ObjectElements::VALUES_PER_HEADER);
446 }
447
448 #ifdef DEBUG
IsWriteableAddress(void * ptr)449 static bool IsWriteableAddress(void* ptr)
450 {
451 volatile uint64_t* vPtr = reinterpret_cast<volatile uint64_t*>(ptr);
452 *vPtr = *vPtr;
453 return true;
454 }
455 #endif
456
457 void
forwardBufferPointer(HeapSlot ** pSlotsElems)458 js::Nursery::forwardBufferPointer(HeapSlot** pSlotsElems)
459 {
460 HeapSlot* old = *pSlotsElems;
461
462 if (!isInside(old))
463 return;
464
465 // The new location for this buffer is either stored inline with it or in
466 // the forwardedBuffers table.
467 do {
468 if (forwardedBuffers.initialized()) {
469 if (ForwardedBufferMap::Ptr p = forwardedBuffers.lookup(old)) {
470 *pSlotsElems = reinterpret_cast<HeapSlot*>(p->value());
471 break;
472 }
473 }
474
475 *pSlotsElems = *reinterpret_cast<HeapSlot**>(old);
476 } while (false);
477
478 MOZ_ASSERT(!isInside(*pSlotsElems));
479 MOZ_ASSERT(IsWriteableAddress(*pSlotsElems));
480 }
481
TenuringTracer(JSRuntime * rt,Nursery * nursery)482 js::TenuringTracer::TenuringTracer(JSRuntime* rt, Nursery* nursery)
483 : JSTracer(rt, JSTracer::TracerKindTag::Tenuring, TraceWeakMapKeysValues)
484 , nursery_(*nursery)
485 , tenuredSize(0)
486 , head(nullptr)
487 , tail(&head)
488 {
489 }
490
491 /* static */ void
printProfileHeader()492 js::Nursery::printProfileHeader()
493 {
494 #define PRINT_HEADER(name, text) \
495 fprintf(stderr, " %6s", text);
496 FOR_EACH_NURSERY_PROFILE_TIME(PRINT_HEADER)
497 #undef PRINT_HEADER
498 fprintf(stderr, "\n");
499 }
500
501 /* static */ void
printProfileTimes(const ProfileTimes & times)502 js::Nursery::printProfileTimes(const ProfileTimes& times)
503 {
504 for (auto time : times)
505 fprintf(stderr, " %6" PRIi64, time);
506 fprintf(stderr, "\n");
507 }
508
509 void
printTotalProfileTimes()510 js::Nursery::printTotalProfileTimes()
511 {
512 if (enableProfiling_) {
513 fprintf(stderr, "MinorGC TOTALS: %7" PRIu64 " collections: ", minorGcCount_);
514 printProfileTimes(totalTimes_);
515 }
516 }
517
518 inline void
startProfile(ProfileKey key)519 js::Nursery::startProfile(ProfileKey key)
520 {
521 startTimes_[key] = PRMJ_Now();
522 }
523
524 inline void
endProfile(ProfileKey key)525 js::Nursery::endProfile(ProfileKey key)
526 {
527 profileTimes_[key] = PRMJ_Now() - startTimes_[key];
528 totalTimes_[key] += profileTimes_[key];
529 }
530
531 inline void
maybeStartProfile(ProfileKey key)532 js::Nursery::maybeStartProfile(ProfileKey key)
533 {
534 if (enableProfiling_)
535 startProfile(key);
536 }
537
538 inline void
maybeEndProfile(ProfileKey key)539 js::Nursery::maybeEndProfile(ProfileKey key)
540 {
541 if (enableProfiling_)
542 endProfile(key);
543 }
544
545 void
collect(JSRuntime * rt,JS::gcreason::Reason reason)546 js::Nursery::collect(JSRuntime* rt, JS::gcreason::Reason reason)
547 {
548 MOZ_ASSERT(!rt->mainThread.suppressGC);
549 MOZ_RELEASE_ASSERT(CurrentThreadCanAccessRuntime(rt));
550
551 if (!isEnabled() || isEmpty()) {
552 // Our barriers are not always exact, and there may be entries in the
553 // storebuffer even when the nursery is disabled or empty. It's not safe
554 // to keep these entries as they may refer to tenured cells which may be
555 // freed after this point.
556 rt->gc.storeBuffer.clear();
557 }
558
559 if (!isEnabled())
560 return;
561
562 rt->gc.incMinorGcNumber();
563
564 #ifdef JS_GC_ZEAL
565 if (rt->gc.hasZealMode(ZealMode::CheckNursery)) {
566 for (auto canary = lastCanary_; canary; canary = canary->next)
567 MOZ_ASSERT(canary->magicValue == CanaryMagicValue);
568 }
569 lastCanary_ = nullptr;
570 #endif
571
572 rt->gc.stats.beginNurseryCollection(reason);
573 TraceMinorGCStart();
574
575 startProfile(ProfileKey::Total);
576
577 // The hazard analysis thinks doCollection can invalidate pointers in
578 // tenureCounts below.
579 JS::AutoSuppressGCAnalysis nogc;
580
581 TenureCountCache tenureCounts;
582 double promotionRate = 0;
583 if (!isEmpty())
584 promotionRate = doCollection(rt, reason, tenureCounts);
585
586 // Resize the nursery.
587 maybeStartProfile(ProfileKey::Resize);
588 maybeResizeNursery(reason, promotionRate);
589 maybeEndProfile(ProfileKey::Resize);
590
591 // If we are promoting the nursery, or exhausted the store buffer with
592 // pointers to nursery things, which will force a collection well before
593 // the nursery is full, look for object groups that are getting promoted
594 // excessively and try to pretenure them.
595 maybeStartProfile(ProfileKey::Pretenure);
596 uint32_t pretenureCount = 0;
597 if (promotionRate > 0.8 || reason == JS::gcreason::FULL_STORE_BUFFER) {
598 JSContext* cx = rt->contextFromMainThread();
599 for (auto& entry : tenureCounts.entries) {
600 if (entry.count >= 3000) {
601 ObjectGroup* group = entry.group;
602 if (group->canPreTenure()) {
603 AutoCompartment ac(cx, group->compartment());
604 group->setShouldPreTenure(cx);
605 pretenureCount++;
606 }
607 }
608 }
609 }
610 maybeEndProfile(ProfileKey::Pretenure);
611
612 // We ignore gcMaxBytes when allocating for minor collection. However, if we
613 // overflowed, we disable the nursery. The next time we allocate, we'll fail
614 // because gcBytes >= gcMaxBytes.
615 if (rt->gc.usage.gcBytes() >= rt->gc.tunables.gcMaxBytes())
616 disable();
617
618 endProfile(ProfileKey::Total);
619 minorGcCount_++;
620
621 int64_t totalTime = profileTimes_[ProfileKey::Total];
622 rt->addTelemetry(JS_TELEMETRY_GC_MINOR_US, totalTime);
623 rt->addTelemetry(JS_TELEMETRY_GC_MINOR_REASON, reason);
624 if (totalTime > 1000)
625 rt->addTelemetry(JS_TELEMETRY_GC_MINOR_REASON_LONG, reason);
626 rt->addTelemetry(JS_TELEMETRY_GC_NURSERY_BYTES, sizeOfHeapCommitted());
627 rt->addTelemetry(JS_TELEMETRY_GC_PRETENURE_COUNT, pretenureCount);
628
629 rt->gc.stats.endNurseryCollection(reason);
630 TraceMinorGCEnd();
631
632 if (enableProfiling_ && totalTime >= profileThreshold_) {
633 static int printedHeader = 0;
634 if ((printedHeader++ % 200) == 0) {
635 fprintf(stderr, "MinorGC: Reason PRate Size ");
636 printProfileHeader();
637 }
638
639 fprintf(stderr, "MinorGC: %20s %5.1f%% %4u ",
640 JS::gcreason::ExplainReason(reason),
641 promotionRate * 100,
642 numChunks());
643 printProfileTimes(profileTimes_);
644
645 if (reportTenurings_) {
646 for (auto& entry : tenureCounts.entries) {
647 if (entry.count >= reportTenurings_) {
648 fprintf(stderr, "%d x ", entry.count);
649 entry.group->print();
650 }
651 }
652 }
653 }
654 }
655
656 double
doCollection(JSRuntime * rt,JS::gcreason::Reason reason,TenureCountCache & tenureCounts)657 js::Nursery::doCollection(JSRuntime* rt, JS::gcreason::Reason reason,
658 TenureCountCache& tenureCounts)
659 {
660 AutoTraceSession session(rt, JS::HeapState::MinorCollecting);
661 AutoSetThreadIsPerformingGC performingGC;
662 AutoStopVerifyingBarriers av(rt, false);
663 AutoDisableProxyCheck disableStrictProxyChecking(rt);
664 mozilla::DebugOnly<AutoEnterOOMUnsafeRegion> oomUnsafeRegion;
665
666 size_t initialNurserySize = spaceToEnd();
667
668 // Move objects pointed to by roots from the nursery to the major heap.
669 TenuringTracer mover(rt, this);
670
671 // Mark the store buffer. This must happen first.
672 StoreBuffer& sb = rt->gc.storeBuffer;
673
674 // The MIR graph only contains nursery pointers if cancelIonCompilations()
675 // is set on the store buffer, in which case we cancel all compilations.
676 maybeStartProfile(ProfileKey::CancelIonCompilations);
677 if (sb.cancelIonCompilations())
678 js::CancelOffThreadIonCompile(rt);
679 maybeEndProfile(ProfileKey::CancelIonCompilations);
680
681 maybeStartProfile(ProfileKey::TraceValues);
682 sb.traceValues(mover);
683 maybeEndProfile(ProfileKey::TraceValues);
684
685 maybeStartProfile(ProfileKey::TraceCells);
686 sb.traceCells(mover);
687 maybeEndProfile(ProfileKey::TraceCells);
688
689 maybeStartProfile(ProfileKey::TraceSlots);
690 sb.traceSlots(mover);
691 maybeEndProfile(ProfileKey::TraceSlots);
692
693 maybeStartProfile(ProfileKey::TraceWholeCells);
694 sb.traceWholeCells(mover);
695 maybeEndProfile(ProfileKey::TraceWholeCells);
696
697 maybeStartProfile(ProfileKey::TraceGenericEntries);
698 sb.traceGenericEntries(&mover);
699 maybeEndProfile(ProfileKey::TraceGenericEntries);
700
701 maybeStartProfile(ProfileKey::MarkRuntime);
702 rt->gc.traceRuntimeForMinorGC(&mover, session.lock);
703 maybeEndProfile(ProfileKey::MarkRuntime);
704
705 maybeStartProfile(ProfileKey::MarkDebugger);
706 {
707 gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_MARK_ROOTS);
708 Debugger::markAll(&mover);
709 }
710 maybeEndProfile(ProfileKey::MarkDebugger);
711
712 maybeStartProfile(ProfileKey::ClearNewObjectCache);
713 rt->contextFromMainThread()->caches.newObjectCache.clearNurseryObjects(rt);
714 maybeEndProfile(ProfileKey::ClearNewObjectCache);
715
716 // Most of the work is done here. This loop iterates over objects that have
717 // been moved to the major heap. If these objects have any outgoing pointers
718 // to the nursery, then those nursery objects get moved as well, until no
719 // objects are left to move. That is, we iterate to a fixed point.
720 maybeStartProfile(ProfileKey::CollectToFP);
721 collectToFixedPoint(mover, tenureCounts);
722 maybeEndProfile(ProfileKey::CollectToFP);
723
724 // Sweep compartments to update the array buffer object's view lists.
725 maybeStartProfile(ProfileKey::SweepArrayBufferViewList);
726 for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next())
727 c->sweepAfterMinorGC(&mover);
728 maybeEndProfile(ProfileKey::SweepArrayBufferViewList);
729
730 // Update any slot or element pointers whose destination has been tenured.
731 maybeStartProfile(ProfileKey::UpdateJitActivations);
732 js::jit::UpdateJitActivationsForMinorGC(rt, &mover);
733 forwardedBuffers.finish();
734 maybeEndProfile(ProfileKey::UpdateJitActivations);
735
736 maybeStartProfile(ProfileKey::ObjectsTenuredCallback);
737 rt->gc.callObjectsTenuredCallback();
738 maybeEndProfile(ProfileKey::ObjectsTenuredCallback);
739
740 // Sweep.
741 maybeStartProfile(ProfileKey::FreeMallocedBuffers);
742 freeMallocedBuffers();
743 maybeEndProfile(ProfileKey::FreeMallocedBuffers);
744
745 maybeStartProfile(ProfileKey::Sweep);
746 sweep();
747 maybeEndProfile(ProfileKey::Sweep);
748
749 maybeStartProfile(ProfileKey::ClearStoreBuffer);
750 rt->gc.storeBuffer.clear();
751 maybeEndProfile(ProfileKey::ClearStoreBuffer);
752
753 // Make sure hashtables have been updated after the collection.
754 maybeStartProfile(ProfileKey::CheckHashTables);
755 #ifdef JS_GC_ZEAL
756 if (rt->hasZealMode(ZealMode::CheckHashTablesOnMinorGC))
757 CheckHashTablesAfterMovingGC(rt);
758 #endif
759 maybeEndProfile(ProfileKey::CheckHashTables);
760
761 // Calculate and return the promotion rate.
762 return mover.tenuredSize / double(initialNurserySize);
763 }
764
765 void
transferBuffersToFree(MallocedBuffersSet & buffersToFree,const AutoLockHelperThreadState & lock)766 js::Nursery::FreeMallocedBuffersTask::transferBuffersToFree(MallocedBuffersSet& buffersToFree,
767 const AutoLockHelperThreadState& lock)
768 {
769 // Transfer the contents of the source set to the task's buffers_ member by
770 // swapping the sets, which also clears the source.
771 MOZ_ASSERT(!isRunningWithLockHeld(lock));
772 MOZ_ASSERT(buffers_.empty());
773 mozilla::Swap(buffers_, buffersToFree);
774 }
775
776 void
run()777 js::Nursery::FreeMallocedBuffersTask::run()
778 {
779 for (MallocedBuffersSet::Range r = buffers_.all(); !r.empty(); r.popFront())
780 fop_->free_(r.front());
781 buffers_.clear();
782 }
783
784 void
freeMallocedBuffers()785 js::Nursery::freeMallocedBuffers()
786 {
787 if (mallocedBuffers.empty())
788 return;
789
790 bool started;
791 {
792 AutoLockHelperThreadState lock;
793 freeMallocedBuffersTask->joinWithLockHeld(lock);
794 freeMallocedBuffersTask->transferBuffersToFree(mallocedBuffers, lock);
795 started = freeMallocedBuffersTask->startWithLockHeld(lock);
796 }
797
798 if (!started)
799 freeMallocedBuffersTask->runFromMainThread(runtime());
800
801 MOZ_ASSERT(mallocedBuffers.empty());
802 }
803
804 void
waitBackgroundFreeEnd()805 js::Nursery::waitBackgroundFreeEnd()
806 {
807 // We may finishRoots before nursery init if runtime init fails.
808 if (!isEnabled())
809 return;
810
811 MOZ_ASSERT(freeMallocedBuffersTask);
812 freeMallocedBuffersTask->join();
813 }
814
815 void
sweep()816 js::Nursery::sweep()
817 {
818 /* Sweep unique id's in all in-use chunks. */
819 for (CellsWithUniqueIdSet::Enum e(cellsWithUid_); !e.empty(); e.popFront()) {
820 JSObject* obj = static_cast<JSObject*>(e.front());
821 if (!IsForwarded(obj))
822 obj->zone()->removeUniqueId(obj);
823 else
824 MOZ_ASSERT(Forwarded(obj)->zone()->hasUniqueId(Forwarded(obj)));
825 }
826 cellsWithUid_.clear();
827
828 runSweepActions();
829 sweepDictionaryModeObjects();
830
831 #ifdef JS_GC_ZEAL
832 /* Poison the nursery contents so touching a freed object will crash. */
833 for (unsigned i = 0; i < numChunks(); i++)
834 chunk(i).poisonAndInit(runtime(), JS_SWEPT_NURSERY_PATTERN);
835
836 if (runtime()->hasZealMode(ZealMode::GenerationalGC)) {
837 /* Only reset the alloc point when we are close to the end. */
838 if (currentChunk_ + 1 == numChunks())
839 setCurrentChunk(0);
840 } else
841 #endif
842 {
843 #ifdef JS_CRASH_DIAGNOSTICS
844 for (unsigned i = 0; i < numChunks(); ++i)
845 chunk(i).poisonAndInit(runtime(), JS_SWEPT_NURSERY_PATTERN);
846 #endif
847 setCurrentChunk(0);
848 }
849
850 /* Set current start position for isEmpty checks. */
851 setStartPosition();
852 MemProfiler::SweepNursery(runtime());
853 }
854
855 size_t
spaceToEnd() const856 js::Nursery::spaceToEnd() const
857 {
858 unsigned lastChunk = numChunks() - 1;
859
860 MOZ_ASSERT(lastChunk >= currentStartChunk_);
861 MOZ_ASSERT(currentStartPosition_ - chunk(currentStartChunk_).start() <= NurseryChunkUsableSize);
862
863 size_t bytes = (chunk(currentStartChunk_).end() - currentStartPosition_) +
864 ((lastChunk - currentStartChunk_) * NurseryChunkUsableSize);
865
866 MOZ_ASSERT(bytes <= numChunks() * NurseryChunkUsableSize);
867
868 return bytes;
869 }
870
871 MOZ_ALWAYS_INLINE void
setCurrentChunk(unsigned chunkno)872 js::Nursery::setCurrentChunk(unsigned chunkno)
873 {
874 MOZ_ASSERT(chunkno < maxChunks());
875 MOZ_ASSERT(chunkno < numChunks());
876 currentChunk_ = chunkno;
877 position_ = chunk(chunkno).start();
878 currentEnd_ = chunk(chunkno).end();
879 chunk(chunkno).poisonAndInit(runtime(), JS_FRESH_NURSERY_PATTERN);
880 }
881
882 MOZ_ALWAYS_INLINE void
setStartPosition()883 js::Nursery::setStartPosition()
884 {
885 currentStartChunk_ = currentChunk_;
886 currentStartPosition_ = position();
887 }
888
889 void
maybeResizeNursery(JS::gcreason::Reason reason,double promotionRate)890 js::Nursery::maybeResizeNursery(JS::gcreason::Reason reason, double promotionRate)
891 {
892 static const double GrowThreshold = 0.05;
893 static const double ShrinkThreshold = 0.01;
894
895 // Shrink the nursery to its minimum size of we ran out of memory or
896 // received a memory pressure event.
897 if (gc::IsOOMReason(reason)) {
898 minimizeAllocableSpace();
899 return;
900 }
901
902 if (promotionRate > GrowThreshold)
903 growAllocableSpace();
904 else if (promotionRate < ShrinkThreshold && previousPromotionRate_ < ShrinkThreshold)
905 shrinkAllocableSpace();
906
907 previousPromotionRate_ = promotionRate;
908 }
909
910 void
growAllocableSpace()911 js::Nursery::growAllocableSpace()
912 {
913 updateNumChunks(Min(numChunks() * 2, maxNurseryChunks_));
914 }
915
916 void
shrinkAllocableSpace()917 js::Nursery::shrinkAllocableSpace()
918 {
919 #ifdef JS_GC_ZEAL
920 if (runtime()->hasZealMode(ZealMode::GenerationalGC))
921 return;
922 #endif
923 updateNumChunks(Max(numChunks() - 1, 1u));
924 }
925
926 void
minimizeAllocableSpace()927 js::Nursery::minimizeAllocableSpace()
928 {
929 #ifdef JS_GC_ZEAL
930 if (runtime()->hasZealMode(ZealMode::GenerationalGC))
931 return;
932 #endif
933 updateNumChunks(1);
934 }
935
936 void
updateNumChunks(unsigned newCount)937 js::Nursery::updateNumChunks(unsigned newCount)
938 {
939 if (numChunks() != newCount) {
940 AutoMaybeStartBackgroundAllocation maybeBgAlloc;
941 AutoLockGC lock(runtime());
942 updateNumChunksLocked(newCount, maybeBgAlloc, lock);
943 }
944 }
945
946 void
updateNumChunksLocked(unsigned newCount,AutoMaybeStartBackgroundAllocation & maybeBgAlloc,AutoLockGC & lock)947 js::Nursery::updateNumChunksLocked(unsigned newCount,
948 AutoMaybeStartBackgroundAllocation& maybeBgAlloc,
949 AutoLockGC& lock)
950 {
951 // The GC nursery is an optimization and so if we fail to allocate nursery
952 // chunks we do not report an error.
953
954 unsigned priorCount = numChunks();
955 MOZ_ASSERT(priorCount != newCount);
956
957 if (newCount < priorCount) {
958 // Shrink the nursery and free unused chunks.
959 for (unsigned i = newCount; i < priorCount; i++)
960 runtime()->gc.recycleChunk(chunk(i).toChunk(runtime()), lock);
961 chunks_.shrinkTo(newCount);
962 return;
963 }
964
965 // Grow the nursery and allocate new chunks.
966 if (!chunks_.resize(newCount))
967 return;
968
969 for (unsigned i = priorCount; i < newCount; i++) {
970 auto newChunk = runtime()->gc.getOrAllocChunk(lock, maybeBgAlloc);
971 if (!newChunk) {
972 chunks_.shrinkTo(i);
973 return;
974 }
975
976 chunks_[i] = NurseryChunk::fromChunk(newChunk);
977 chunk(i).poisonAndInit(runtime(), JS_FRESH_NURSERY_PATTERN);
978 }
979 }
980
981 void
queueSweepAction(SweepThunk thunk,void * data)982 js::Nursery::queueSweepAction(SweepThunk thunk, void* data)
983 {
984 static_assert(sizeof(SweepAction) % CellSize == 0,
985 "SweepAction size must be a multiple of cell size");
986
987 MOZ_ASSERT(isEnabled());
988
989 AutoEnterOOMUnsafeRegion oomUnsafe;
990 auto action = reinterpret_cast<SweepAction*>(allocate(sizeof(SweepAction)));
991 if (!action)
992 oomUnsafe.crash("Nursery::queueSweepAction");
993
994 new (action) SweepAction(thunk, data, sweepActions_);
995 sweepActions_ = action;
996 }
997
998 void
runSweepActions()999 js::Nursery::runSweepActions()
1000 {
1001 // The hazard analysis doesn't know whether the thunks can GC.
1002 JS::AutoSuppressGCAnalysis nogc;
1003
1004 AutoSetThreadIsSweeping threadIsSweeping;
1005 for (auto action = sweepActions_; action; action = action->next)
1006 action->thunk(action->data);
1007 sweepActions_ = nullptr;
1008 }
1009
1010 bool
queueDictionaryModeObjectToSweep(NativeObject * obj)1011 js::Nursery::queueDictionaryModeObjectToSweep(NativeObject* obj)
1012 {
1013 MOZ_ASSERT(IsInsideNursery(obj));
1014 return dictionaryModeObjects_.append(obj);
1015 }
1016
1017 void
sweepDictionaryModeObjects()1018 js::Nursery::sweepDictionaryModeObjects()
1019 {
1020 for (auto obj : dictionaryModeObjects_) {
1021 if (!IsForwarded(obj))
1022 obj->sweepDictionaryListPointer();
1023 }
1024 dictionaryModeObjects_.clear();
1025 }
1026