1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 /*
8  * [SMDOC] GC Scheduling
9  *
10  * GC Scheduling Overview
11  * ======================
12  *
13  * See also GC scheduling from Firefox's perspective here:
14  * https://searchfox.org/mozilla-central/source/dom/base/CCGCScheduler.cpp
15  *
16  * Scheduling GC's in SpiderMonkey/Firefox is tremendously complicated because
17  * of the large number of subtle, cross-cutting, and widely dispersed factors
18  * that must be taken into account. A summary of some of the more important
19  * factors follows.
20  *
21  * Cost factors:
22  *
23  *   * GC too soon and we'll revisit an object graph almost identical to the
24  *     one we just visited; since we are unlikely to find new garbage, the
25  *     traversal will be largely overhead. We rely heavily on external factors
26  *     to signal us that we are likely to find lots of garbage: e.g. "a tab
27  *     just got closed".
28  *
29  *   * GC too late and we'll run out of memory to allocate (e.g. Out-Of-Memory,
30  *     hereafter simply abbreviated to OOM). If this happens inside
31  *     SpiderMonkey we may be able to recover, but most embedder allocations
32  *     will simply crash on OOM, even if the GC has plenty of free memory it
33  *     could surrender.
34  *
35  *   * Memory fragmentation: if we fill the process with GC allocations, a
36  *     request for a large block of contiguous memory may fail because no
37  *     contiguous block is free, despite having enough memory available to
38  *     service the request.
39  *
40  *   * Management overhead: if our GC heap becomes large, we create extra
41  *     overhead when managing the GC's structures, even if the allocations are
42  *     mostly unused.
43  *
44  * Heap Management Factors:
45  *
46  *   * GC memory: The GC has its own allocator that it uses to make fixed size
47  *     allocations for GC managed things. In cases where the GC thing requires
48  *     larger or variable sized memory to implement itself, it is responsible
49  *     for using the system heap.
50  *
51  *   * C Heap Memory: Rather than allowing for large or variable allocations,
52  *     the SpiderMonkey GC allows GC things to hold pointers to C heap memory.
53  *     It is the responsibility of the thing to free this memory with a custom
54  *     finalizer (with the sole exception of NativeObject, which knows about
55  *     slots and elements for performance reasons). C heap memory has different
56  *     performance and overhead tradeoffs than GC internal memory, which need
57  *     to be considered with scheduling a GC.
58  *
59  * Application Factors:
60  *
61  *   * Most applications allocate heavily at startup, then enter a processing
62  *     stage where memory utilization remains roughly fixed with a slower
63  *     allocation rate. This is not always the case, however, so while we may
64  *     optimize for this pattern, we must be able to handle arbitrary
65  *     allocation patterns.
66  *
67  * Other factors:
68  *
69  *   * Other memory: This is memory allocated outside the purview of the GC.
70  *     Data mapped by the system for code libraries, data allocated by those
71  *     libraries, data in the JSRuntime that is used to manage the engine,
72  *     memory used by the embedding that is not attached to a GC thing, memory
73  *     used by unrelated processes running on the hardware that use space we
74  *     could otherwise use for allocation, etc. While we don't have to manage
75  *     it, we do have to take it into account when scheduling since it affects
76  *     when we will OOM.
77  *
78  *   * Physical Reality: All real machines have limits on the number of bits
79  *     that they are physically able to store. While modern operating systems
80  *     can generally make additional space available with swapping, at some
81  *     point there are simply no more bits to allocate. There is also the
82  *     factor of address space limitations, particularly on 32bit machines.
83  *
84  *   * Platform Factors: Each OS makes use of wildly different memory
85  *     management techniques. These differences result in different performance
86  *     tradeoffs, different fragmentation patterns, and different hard limits
87  *     on the amount of physical and/or virtual memory that we can use before
88  *     OOMing.
89  *
90  *
91  * Reasons for scheduling GC
92  * -------------------------
93  *
94  *  While code generally takes the above factors into account in only an ad-hoc
95  *  fashion, the API forces the user to pick a "reason" for the GC. We have a
96  *  bunch of JS::GCReason reasons in GCAPI.h. These fall into a few categories
97  *  that generally coincide with one or more of the above factors.
98  *
99  *  Embedding reasons:
100  *
101  *   1) Do a GC now because the embedding knows something useful about the
102  *      zone's memory retention state. These are GCReasons like LOAD_END,
103  *      PAGE_HIDE, SET_NEW_DOCUMENT, DOM_UTILS. Mostly, Gecko uses these to
104  *      indicate that a significant fraction of the scheduled zone's memory is
105  *      probably reclaimable.
106  *
107  *   2) Do some known amount of GC work now because the embedding knows now is
108  *      a good time to do a long, unblockable operation of a known duration.
109  *      These are INTER_SLICE_GC and REFRESH_FRAME.
110  *
111  *  Correctness reasons:
112  *
113  *   3) Do a GC now because correctness depends on some GC property. For
114  *      example, CC_FORCED is where the embedding requires the mark bits to be
115  *      set correctly. Also, EVICT_NURSERY where we need to work on the tenured
116  *      heap.
117  *
118  *   4) Do a GC because we are shutting down: e.g. SHUTDOWN_CC or DESTROY_*.
119  *
120  *   5) Do a GC because a compartment was accessed between GC slices when we
121  *      would have otherwise discarded it. We have to do a second GC to clean
122  *      it up: e.g. COMPARTMENT_REVIVED.
123  *
124  *  Emergency Reasons:
125  *
126  *   6) Do an all-zones, non-incremental GC now because the embedding knows it
127  *      cannot wait: e.g. MEM_PRESSURE.
128  *
129  *   7) OOM when fetching a new Chunk results in a LAST_DITCH GC.
130  *
131  *  Heap Size Limitation Reasons:
132  *
133  *   8) Do an incremental, zonal GC with reason MAYBEGC when we discover that
134  *      the gc's allocated size is approaching the current trigger. This is
135  *      called MAYBEGC because we make this check in the MaybeGC function.
136  *      MaybeGC gets called at the top of the main event loop. Normally, it is
137  *      expected that this callback will keep the heap size limited. It is
138  *      relatively inexpensive, because it is invoked with no JS running and
139  *      thus few stack roots to scan. For this reason, the GC's "trigger" bytes
140  *      is less than the GC's "max" bytes as used by the trigger below.
141  *
142  *   9) Do an incremental, zonal GC with reason MAYBEGC when we go to allocate
143  *      a new GC thing and find that the GC heap size has grown beyond the
144  *      configured maximum (JSGC_MAX_BYTES). We trigger this GC by returning
145  *      nullptr and then calling maybeGC at the top level of the allocator.
146  *      This is then guaranteed to fail the "size greater than trigger" check
147  *      above, since trigger is always less than max. After performing the GC,
148  *      the allocator unconditionally returns nullptr to force an OOM exception
149  *      is raised by the script.
150  *
151  *      Note that this differs from a LAST_DITCH GC where we actually run out
152  *      of memory (i.e., a call to a system allocator fails) when trying to
153  *      allocate. Unlike above, LAST_DITCH GC only happens when we are really
154  *      out of memory, not just when we cross an arbitrary trigger; despite
155  *      this, it may still return an allocation at the end and allow the script
156  *      to continue, if the LAST_DITCH GC was able to free up enough memory.
157  *
158  *  10) Do a GC under reason ALLOC_TRIGGER when we are over the GC heap trigger
159  *      limit, but in the allocator rather than in a random call to maybeGC.
160  *      This occurs if we allocate too much before returning to the event loop
161  *      and calling maybeGC; this is extremely common in benchmarks and
162  *      long-running Worker computations. Note that this uses a wildly
163  *      different mechanism from the above in that it sets the interrupt flag
164  *      and does the GC at the next loop head, before the next alloc, or
165  *      maybeGC. The reason for this is that this check is made after the
166  *      allocation and we cannot GC with an uninitialized thing in the heap.
167  *
168  *  11) Do an incremental, zonal GC with reason TOO_MUCH_MALLOC when the total
169  * amount of malloced memory is greater than the malloc trigger limit for the
170  * zone.
171  *
172  *
173  * Size Limitation Triggers Explanation
174  * ------------------------------------
175  *
176  *  The GC internally is entirely unaware of the context of the execution of
177  *  the mutator. It sees only:
178  *
179  *   A) Allocated size: this is the amount of memory currently requested by the
180  *      mutator. This quantity is monotonically increasing: i.e. the allocation
181  *      rate is always >= 0. It is also easy for the system to track.
182  *
183  *   B) Retained size: this is the amount of memory that the mutator can
184  *      currently reach. Said another way, it is the size of the heap
185  *      immediately after a GC (modulo background sweeping). This size is very
186  *      costly to know exactly and also extremely hard to estimate with any
187  *      fidelity.
188  *
189  *   For reference, a common allocated vs. retained graph might look like:
190  *
191  *       |                                  **         **
192  *       |                       **       ** *       **
193  *       |                     ** *     **   *     **
194  *       |           *       **   *   **     *   **
195  *       |          **     **     * **       * **
196  *      s|         * *   **       ** +  +    **
197  *      i|        *  *  *      +  +       +  +     +
198  *      z|       *   * * +  +                   +     +  +
199  *      e|      *    **+
200  *       |     *     +
201  *       |    *    +
202  *       |   *   +
203  *       |  *  +
204  *       | * +
205  *       |*+
206  *       +--------------------------------------------------
207  *                               time
208  *                                           *** = allocated
209  *                                           +++ = retained
210  *
211  *           Note that this is a bit of a simplification
212  *           because in reality we track malloc and GC heap
213  *           sizes separately and have a different level of
214  *           granularity and accuracy on each heap.
215  *
216  *   This presents some obvious implications for Mark-and-Sweep collectors.
217  *   Namely:
218  *       -> t[marking] ~= size[retained]
219  *       -> t[sweeping] ~= size[allocated] - size[retained]
220  *
221  *   In a non-incremental collector, maintaining low latency and high
222  *   responsiveness requires that total GC times be as low as possible. Thus,
223  *   in order to stay responsive when we did not have a fully incremental
224  *   collector, our GC triggers were focused on minimizing collection time.
225  *   Furthermore, since size[retained] is not under control of the GC, all the
226  *   GC could do to control collection times was reduce sweep times by
227  *   minimizing size[allocated], per the equation above.
228  *
229  *   The result of the above is GC triggers that focus on size[allocated] to
230  *   the exclusion of other important factors and default heuristics that are
231  *   not optimal for a fully incremental collector. On the other hand, this is
232  *   not all bad: minimizing size[allocated] also minimizes the chance of OOM
233  *   and sweeping remains one of the hardest areas to further incrementalize.
234  *
235  *      EAGER_ALLOC_TRIGGER
236  *      -------------------
237  *      Occurs when we return to the event loop and find our heap is getting
238  *      largish, but before t[marking] OR t[sweeping] is too large for a
239  *      responsive non-incremental GC. This is intended to be the common case
240  *      in normal web applications: e.g. we just finished an event handler and
241  *      the few objects we allocated when computing the new whatzitz have
242  *      pushed us slightly over the limit. After this GC we rescale the new
243  *      EAGER_ALLOC_TRIGGER trigger to 150% of size[retained] so that our
244  *      non-incremental GC times will always be proportional to this size
245  *      rather than being dominated by sweeping.
246  *
247  *      As a concession to mutators that allocate heavily during their startup
248  *      phase, we have a highFrequencyGCMode that ups the growth rate to 300%
249  *      of the current size[retained] so that we'll do fewer longer GCs at the
250  *      end of the mutator startup rather than more, smaller GCs.
251  *
252  *          Assumptions:
253  *            -> Responsiveness is proportional to t[marking] + t[sweeping].
254  *            -> size[retained] is proportional only to GC allocations.
255  *
256  *      ALLOC_TRIGGER (non-incremental)
257  *      -------------------------------
258  *      If we do not return to the event loop before getting all the way to our
259  *      gc trigger bytes then MAYBEGC will never fire. To avoid OOMing, we
260  *      succeed the current allocation and set the script interrupt so that we
261  *      will (hopefully) do a GC before we overflow our max and have to raise
262  *      an OOM exception for the script.
263  *
264  *          Assumptions:
265  *            -> Common web scripts will return to the event loop before using
266  *               10% of the current triggerBytes worth of GC memory.
267  *
268  *      ALLOC_TRIGGER (incremental)
269  *      ---------------------------
270  *      In practice the above trigger is rough: if a website is just on the
271  *      cusp, sometimes it will trigger a non-incremental GC moments before
272  *      returning to the event loop, where it could have done an incremental
273  *      GC. Thus, we recently added an incremental version of the above with a
274  *      substantially lower threshold, so that we have a soft limit here. If
275  *      IGC can collect faster than the allocator generates garbage, even if
276  *      the allocator does not return to the event loop frequently, we should
277  *      not have to fall back to a non-incremental GC.
278  *
279  *      INCREMENTAL_TOO_SLOW
280  *      --------------------
281  *      Do a full, non-incremental GC if we overflow ALLOC_TRIGGER during an
282  *      incremental GC. When in the middle of an incremental GC, we suppress
283  *      our other triggers, so we need a way to backstop the IGC if the
284  *      mutator allocates faster than the IGC can clean things up.
285  *
286  *      TOO_MUCH_MALLOC
287  *      ---------------
288  *      Performs a GC before size[allocated] - size[retained] gets too large
289  *      for non-incremental sweeping to be fast in the case that we have
290  *      significantly more malloc allocation than GC allocation. This is meant
291  *      to complement MAYBEGC triggers. We track this by counting malloced
292  *      bytes; the counter gets reset at every GC since we do not always have a
293  *      size at the time we call free. Because of this, the malloc heuristic
294  *      is, unfortunately, not usefully able to augment our other GC heap
295  *      triggers and is limited to this singular heuristic.
296  *
297  *          Assumptions:
298  *            -> EITHER size[allocated_by_malloc] ~= size[allocated_by_GC]
299  *                 OR   time[sweeping] ~= size[allocated_by_malloc]
300  *            -> size[retained] @ t0 ~= size[retained] @ t1
301  *               i.e. That the mutator is in steady-state operation.
302  *
303  *      LAST_DITCH_GC
304  *      -------------
305  *      Does a GC because we are out of memory.
306  *
307  *          Assumptions:
308  *            -> size[retained] < size[available_memory]
309  */
310 
311 #ifndef gc_Scheduling_h
312 #define gc_Scheduling_h
313 
314 #include "mozilla/Atomics.h"
315 #include "mozilla/DebugOnly.h"
316 
317 #include "gc/GCEnum.h"
318 #include "js/AllocPolicy.h"
319 #include "js/GCAPI.h"
320 #include "js/HashTable.h"
321 #include "js/HeapAPI.h"
322 #include "js/SliceBudget.h"
323 #include "threading/ProtectedData.h"
324 #include "util/DifferentialTesting.h"
325 
326 namespace js {
327 
328 class AutoLockGC;
329 class ZoneAllocator;
330 class ZoneAllocPolicy;
331 
332 namespace gc {
333 
334 struct Cell;
335 
336 /*
337  * Default settings for tuning the GC.  Some of these can be set at runtime,
338  * This list is not complete, some tuning parameters are not listed here.
339  *
340  * If you change the values here, please also consider changing them in
341  * modules/libpref/init/all.js where they are duplicated for the Firefox
342  * preferences.
343  */
344 namespace TuningDefaults {
345 
346 /* JSGC_MAX_BYTES */
347 static const size_t GCMaxBytes = 0xffffffff;
348 
349 /* JSGC_ALLOCATION_THRESHOLD */
350 static const size_t GCZoneAllocThresholdBase = 27 * 1024 * 1024;
351 
352 /*
353  * JSGC_MIN_NURSERY_BYTES
354  *
355  * With some testing (Bug 1532838) we increased this to 256K from 192K
356  * which improves performance.  We should try to reduce this for background
357  * tabs.
358  */
359 static const size_t GCMinNurseryBytes = 256 * 1024;
360 
361 /*
362  * JSGC_SMALL_HEAP_INCREMENTAL_LIMIT
363  *
364  * This must be greater than 1.3 to maintain performance on splay-latency.
365  */
366 static const double SmallHeapIncrementalLimit = 1.40;
367 
368 /* JSGC_LARGE_HEAP_INCREMENTAL_LIMIT */
369 static const double LargeHeapIncrementalLimit = 1.10;
370 
371 /* JSGC_ZONE_ALLOC_DELAY_KB */
372 static const size_t ZoneAllocDelayBytes = 1024 * 1024;
373 
374 /* JSGC_HIGH_FREQUENCY_TIME_LIMIT */
375 static const auto HighFrequencyThreshold = 1;  // in seconds
376 
377 /* JSGC_SMALL_HEAP_SIZE_MAX */
378 static const size_t SmallHeapSizeMaxBytes = 100 * 1024 * 1024;
379 
380 /* JSGC_LARGE_HEAP_SIZE_MIN */
381 static const size_t LargeHeapSizeMinBytes = 500 * 1024 * 1024;
382 
383 /* JSGC_HIGH_FREQUENCY_SMALL_HEAP_GROWTH */
384 static const double HighFrequencySmallHeapGrowth = 3.0;
385 
386 /* JSGC_HIGH_FREQUENCY_LARGE_HEAP_GROWTH */
387 static const double HighFrequencyLargeHeapGrowth = 1.5;
388 
389 /* JSGC_LOW_FREQUENCY_HEAP_GROWTH */
390 static const double LowFrequencyHeapGrowth = 1.5;
391 
392 /* JSGC_MIN_EMPTY_CHUNK_COUNT */
393 static const uint32_t MinEmptyChunkCount = 1;
394 
395 /* JSGC_MAX_EMPTY_CHUNK_COUNT */
396 static const uint32_t MaxEmptyChunkCount = 30;
397 
398 /* JSGC_SLICE_TIME_BUDGET_MS */
399 static const int64_t DefaultTimeBudgetMS = 0;  // Unlimited by default.
400 
401 /* JSGC_INCREMENTAL_GC_ENABLED */
402 static const bool IncrementalGCEnabled = false;
403 
404 /* JSGC_PER_ZONE_GC_ENABLED */
405 static const bool PerZoneGCEnabled = false;
406 
407 /* JSGC_COMPACTING_ENABLED */
408 static const bool CompactingEnabled = true;
409 
410 /* JSGC_INCREMENTAL_WEAKMAP_ENABLED */
411 static const bool IncrementalWeakMapMarkingEnabled = true;
412 
413 /* JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION */
414 static const uint32_t NurseryFreeThresholdForIdleCollection = ChunkSize / 4;
415 
416 /* JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION_PERCENT */
417 static const double NurseryFreeThresholdForIdleCollectionFraction = 0.25;
418 
419 /* JSGC_NURSERY_TIMEOUT_FOR_IDLE_COLLECTION_MS */
420 static const uint32_t NurseryTimeoutForIdleCollectionMS = 5000;
421 
422 /* JSGC_PRETENURE_THRESHOLD */
423 static const double PretenureThreshold = 0.6;
424 
425 /* JSGC_PRETENURE_GROUP_THRESHOLD */
426 static const double PretenureGroupThreshold = 3000;
427 
428 /* JSGC_PRETENURE_STRING_THRESHOLD */
429 static const double PretenureStringThreshold = 0.55;
430 
431 /* JSGC_STOP_PRETENURE_STRING_THRESHOLD */
432 static const double StopPretenureStringThreshold = 0.9;
433 
434 /* JSGC_MIN_LAST_DITCH_GC_PERIOD */
435 static const auto MinLastDitchGCPeriod = 60;  // in seconds
436 
437 /* JSGC_MALLOC_THRESHOLD_BASE */
438 static const size_t MallocThresholdBase = 38 * 1024 * 1024;
439 
440 /* JSGC_HELPER_THREAD_RATIO */
441 static const double HelperThreadRatio = 0.5;
442 
443 /* JSGC_MAX_HELPER_THREADS */
444 static const size_t MaxHelperThreads = 8;
445 
446 /* JSGC_URGENT_THRESHOLD_MB */
447 static const size_t UrgentThresholdBytes = 16 * 1024 * 1024;
448 
449 }  // namespace TuningDefaults
450 
451 /*
452  * Encapsulates all of the GC tunables. These are effectively constant and
453  * should only be modified by setParameter.
454  */
455 class GCSchedulingTunables {
456   /*
457    * JSGC_MAX_BYTES
458    *
459    * Maximum nominal heap before last ditch GC.
460    */
461   UnprotectedData<size_t> gcMaxBytes_;
462 
463   /*
464    * JSGC_MIN_NURSERY_BYTES
465    * JSGC_MAX_NURSERY_BYTES
466    *
467    * Minimum and maximum nursery size for each runtime.
468    */
469   MainThreadData<size_t> gcMinNurseryBytes_;
470   MainThreadData<size_t> gcMaxNurseryBytes_;
471 
472   /*
473    * JSGC_ALLOCATION_THRESHOLD
474    *
475    * The base value used to compute zone->threshold.bytes(). When
476    * gcHeapSize.bytes() exceeds threshold.bytes() for a zone, the zone may be
477    * scheduled for a GC, depending on the exact circumstances.
478    */
479   MainThreadOrGCTaskData<size_t> gcZoneAllocThresholdBase_;
480 
481   /*
482    * JSGC_SMALL_HEAP_INCREMENTAL_LIMIT
483    *
484    * Multiple of threshold.bytes() which triggers a non-incremental GC.
485    */
486   UnprotectedData<double> smallHeapIncrementalLimit_;
487 
488   /*
489    * JSGC_LARGE_HEAP_INCREMENTAL_LIMIT
490    *
491    * Multiple of threshold.bytes() which triggers a non-incremental GC.
492    */
493   UnprotectedData<double> largeHeapIncrementalLimit_;
494 
495   /*
496    * Number of bytes to allocate between incremental slices in GCs triggered by
497    * the zone allocation threshold.
498    *
499    * This value does not have a JSGCParamKey parameter yet.
500    */
501   UnprotectedData<size_t> zoneAllocDelayBytes_;
502 
503   /*
504    * JSGC_HIGH_FREQUENCY_TIME_LIMIT
505    *
506    * We enter high-frequency mode if we GC a twice within this many
507    * microseconds.
508    */
509   MainThreadOrGCTaskData<mozilla::TimeDuration> highFrequencyThreshold_;
510 
511   /*
512    * JSGC_SMALL_HEAP_SIZE_MAX
513    * JSGC_LARGE_HEAP_SIZE_MIN
514    * JSGC_HIGH_FREQUENCY_SMALL_HEAP_GROWTH
515    * JSGC_HIGH_FREQUENCY_LARGE_HEAP_GROWTH
516    *
517    * When in the |highFrequencyGC| mode, these parameterize the per-zone
518    * "HeapGrowthFactor" computation.
519    */
520   MainThreadOrGCTaskData<size_t> smallHeapSizeMaxBytes_;
521   MainThreadOrGCTaskData<size_t> largeHeapSizeMinBytes_;
522   MainThreadOrGCTaskData<double> highFrequencySmallHeapGrowth_;
523   MainThreadOrGCTaskData<double> highFrequencyLargeHeapGrowth_;
524 
525   /*
526    * JSGC_LOW_FREQUENCY_HEAP_GROWTH
527    *
528    * When not in |highFrequencyGC| mode, this is the global (stored per-zone)
529    * "HeapGrowthFactor".
530    */
531   MainThreadOrGCTaskData<double> lowFrequencyHeapGrowth_;
532 
533   /*
534    * JSGC_MIN_EMPTY_CHUNK_COUNT
535    * JSGC_MAX_EMPTY_CHUNK_COUNT
536    *
537    * Controls the number of empty chunks reserved for future allocation.
538    */
539   UnprotectedData<uint32_t> minEmptyChunkCount_;
540   UnprotectedData<uint32_t> maxEmptyChunkCount_;
541 
542   /*
543    * JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION
544    * JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION_FRACTION
545    *
546    * Attempt to run a minor GC in the idle time if the free space falls
547    * below this threshold. The absolute threshold is used when the nursery is
548    * large and the percentage when it is small.  See Nursery::shouldCollect()
549    */
550   UnprotectedData<uint32_t> nurseryFreeThresholdForIdleCollection_;
551   UnprotectedData<double> nurseryFreeThresholdForIdleCollectionFraction_;
552 
553   /* See JSGC_NURSERY_TIMEOUT_FOR_IDLE_COLLECTION_MS. */
554   MainThreadData<mozilla::TimeDuration> nurseryTimeoutForIdleCollection_;
555 
556   /*
557    * JSGC_PRETENURE_THRESHOLD
558    *
559    * Fraction of objects tenured to trigger pretenuring (between 0 and 1). If
560    * this fraction is met, the GC proceeds to calculate which objects will be
561    * tenured. If this is 1.0f (actually if it is not < 1.0f) then pretenuring
562    * is disabled.
563    */
564   UnprotectedData<double> pretenureThreshold_;
565 
566   /*
567    * JSGC_PRETENURE_GROUP_THRESHOLD
568    *
569    * During a single nursery collection, if this many objects from the same
570    * object group are tenured, then that group will be pretenured.
571    */
572   UnprotectedData<uint32_t> pretenureGroupThreshold_;
573 
574   /*
575    * JSGC_PRETENURE_STRING_THRESHOLD
576    *
577    * If the percentage of the tenured strings exceeds this threshold, string
578    * will be allocated in tenured heap instead. (Default is allocated in
579    * nursery.)
580    */
581   MainThreadData<double> pretenureStringThreshold_;
582 
583   /*
584    * JSGC_STOP_PRETENURE_STRING_THRESHOLD
585    *
586    * If the finalization rate of the tenured strings exceeds this threshold,
587    * string will be allocated in nursery.
588    */
589   MainThreadData<double> stopPretenureStringThreshold_;
590 
591   /*
592    * JSGC_MIN_LAST_DITCH_GC_PERIOD
593    *
594    * Last ditch GC is skipped if allocation failure occurs less than this many
595    * seconds from the previous one.
596    */
597   MainThreadData<mozilla::TimeDuration> minLastDitchGCPeriod_;
598 
599   /*
600    * JSGC_MALLOC_THRESHOLD_BASE
601    *
602    * The base value used to compute the GC trigger for malloc allocated memory.
603    */
604   MainThreadOrGCTaskData<size_t> mallocThresholdBase_;
605 
606   /*
607    * JSGC_URGENT_THRESHOLD_MB
608    *
609    * The base value used to compute the GC trigger for malloc allocated memory.
610    */
611   MainThreadData<size_t> urgentThresholdBytes_;
612 
613  public:
614   GCSchedulingTunables();
615 
gcMaxBytes()616   size_t gcMaxBytes() const { return gcMaxBytes_; }
gcMinNurseryBytes()617   size_t gcMinNurseryBytes() const { return gcMinNurseryBytes_; }
gcMaxNurseryBytes()618   size_t gcMaxNurseryBytes() const { return gcMaxNurseryBytes_; }
gcZoneAllocThresholdBase()619   size_t gcZoneAllocThresholdBase() const { return gcZoneAllocThresholdBase_; }
smallHeapIncrementalLimit()620   double smallHeapIncrementalLimit() const {
621     return smallHeapIncrementalLimit_;
622   }
largeHeapIncrementalLimit()623   double largeHeapIncrementalLimit() const {
624     return largeHeapIncrementalLimit_;
625   }
zoneAllocDelayBytes()626   size_t zoneAllocDelayBytes() const { return zoneAllocDelayBytes_; }
highFrequencyThreshold()627   const mozilla::TimeDuration& highFrequencyThreshold() const {
628     return highFrequencyThreshold_;
629   }
smallHeapSizeMaxBytes()630   size_t smallHeapSizeMaxBytes() const { return smallHeapSizeMaxBytes_; }
largeHeapSizeMinBytes()631   size_t largeHeapSizeMinBytes() const { return largeHeapSizeMinBytes_; }
highFrequencySmallHeapGrowth()632   double highFrequencySmallHeapGrowth() const {
633     return highFrequencySmallHeapGrowth_;
634   }
highFrequencyLargeHeapGrowth()635   double highFrequencyLargeHeapGrowth() const {
636     return highFrequencyLargeHeapGrowth_;
637   }
lowFrequencyHeapGrowth()638   double lowFrequencyHeapGrowth() const { return lowFrequencyHeapGrowth_; }
minEmptyChunkCount(const AutoLockGC &)639   unsigned minEmptyChunkCount(const AutoLockGC&) const {
640     return minEmptyChunkCount_;
641   }
maxEmptyChunkCount()642   unsigned maxEmptyChunkCount() const { return maxEmptyChunkCount_; }
nurseryFreeThresholdForIdleCollection()643   uint32_t nurseryFreeThresholdForIdleCollection() const {
644     return nurseryFreeThresholdForIdleCollection_;
645   }
nurseryFreeThresholdForIdleCollectionFraction()646   double nurseryFreeThresholdForIdleCollectionFraction() const {
647     return nurseryFreeThresholdForIdleCollectionFraction_;
648   }
nurseryTimeoutForIdleCollection()649   mozilla::TimeDuration nurseryTimeoutForIdleCollection() const {
650     return nurseryTimeoutForIdleCollection_;
651   }
652 
attemptPretenuring()653   bool attemptPretenuring() const { return pretenureThreshold_ < 1.0; }
pretenureThreshold()654   double pretenureThreshold() const { return pretenureThreshold_; }
pretenureGroupThreshold()655   uint32_t pretenureGroupThreshold() const { return pretenureGroupThreshold_; }
pretenureStringThreshold()656   double pretenureStringThreshold() const { return pretenureStringThreshold_; }
stopPretenureStringThreshold()657   double stopPretenureStringThreshold() const {
658     return stopPretenureStringThreshold_;
659   }
660 
minLastDitchGCPeriod()661   mozilla::TimeDuration minLastDitchGCPeriod() const {
662     return minLastDitchGCPeriod_;
663   }
664 
mallocThresholdBase()665   size_t mallocThresholdBase() const { return mallocThresholdBase_; }
666 
urgentThresholdBytes()667   size_t urgentThresholdBytes() const { return urgentThresholdBytes_; }
668 
669   [[nodiscard]] bool setParameter(JSGCParamKey key, uint32_t value,
670                                   const AutoLockGC& lock);
671   void resetParameter(JSGCParamKey key, const AutoLockGC& lock);
672 
673  private:
674   void setSmallHeapSizeMaxBytes(size_t value);
675   void setLargeHeapSizeMinBytes(size_t value);
676   void setHighFrequencySmallHeapGrowth(double value);
677   void setHighFrequencyLargeHeapGrowth(double value);
678   void setLowFrequencyHeapGrowth(double value);
679   void setMinEmptyChunkCount(uint32_t value);
680   void setMaxEmptyChunkCount(uint32_t value);
681 
682   static bool megabytesToBytes(uint32_t value, size_t* bytesOut);
683   static bool kilobytesToBytes(uint32_t value, size_t* bytesOut);
684 };
685 
686 class GCSchedulingState {
687   /*
688    * Influences how we schedule and run GC's in several subtle ways. The most
689    * important factor is in how it controls the "HeapGrowthFactor". The
690    * growth factor is a measure of how large (as a percentage of the last GC)
691    * the heap is allowed to grow before we try to schedule another GC.
692    */
693   mozilla::Atomic<bool, mozilla::ReleaseAcquire> inHighFrequencyGCMode_;
694 
695  public:
696   /*
697    * Influences the GC thresholds for the atoms zone to discourage collection of
698    * this zone during page load.
699    */
700   MainThreadOrGCTaskData<bool> inPageLoad;
701 
GCSchedulingState()702   GCSchedulingState() : inHighFrequencyGCMode_(false) {}
703 
inHighFrequencyGCMode()704   bool inHighFrequencyGCMode() const { return inHighFrequencyGCMode_; }
705 
706   void updateHighFrequencyMode(const mozilla::TimeStamp& lastGCTime,
707                                const mozilla::TimeStamp& currentTime,
708                                const GCSchedulingTunables& tunables);
709   void updateHighFrequencyModeForReason(JS::GCReason reason);
710 };
711 
712 struct TriggerResult {
713   bool shouldTrigger;
714   size_t usedBytes;
715   size_t thresholdBytes;
716 };
717 
718 using AtomicByteCount = mozilla::Atomic<size_t, mozilla::ReleaseAcquire>;
719 
720 /*
721  * Tracks the size of allocated data. This is used for both GC and malloc data.
722  * It automatically maintains the memory usage relationship between parent and
723  * child instances, i.e. between those in a GCRuntime and its Zones.
724  */
725 class HeapSize {
726   /*
727    * An instance that contains our parent's heap usage, or null if this is the
728    * top-level usage container.
729    */
730   HeapSize* const parent_;
731 
732   /*
733    * The number of bytes in use. For GC heaps this is approximate to the nearest
734    * ArenaSize. It is atomic because it is updated by both the active and GC
735    * helper threads.
736    */
737   AtomicByteCount bytes_;
738 
739   /*
740    * The number of bytes retained after the last collection. This is updated
741    * dynamically during incremental GC. It does not include allocations that
742    * happen during a GC.
743    */
744   AtomicByteCount retainedBytes_;
745 
746  public:
HeapSize(HeapSize * parent)747   explicit HeapSize(HeapSize* parent) : parent_(parent), bytes_(0) {}
748 
bytes()749   size_t bytes() const { return bytes_; }
retainedBytes()750   size_t retainedBytes() const { return retainedBytes_; }
751 
updateOnGCStart()752   void updateOnGCStart() { retainedBytes_ = size_t(bytes_); }
753 
addGCArena()754   void addGCArena() { addBytes(ArenaSize); }
removeGCArena()755   void removeGCArena() {
756     MOZ_ASSERT(retainedBytes_ >= ArenaSize);
757     removeBytes(ArenaSize, true /* only sweeping removes arenas */);
758   }
759 
addBytes(size_t nbytes)760   void addBytes(size_t nbytes) {
761     mozilla::DebugOnly<size_t> initialBytes(bytes_);
762     MOZ_ASSERT(initialBytes + nbytes > initialBytes);
763     bytes_ += nbytes;
764     if (parent_) {
765       parent_->addBytes(nbytes);
766     }
767   }
removeBytes(size_t nbytes,bool wasSwept)768   void removeBytes(size_t nbytes, bool wasSwept) {
769     if (wasSwept) {
770       // TODO: We would like to assert that retainedBytes_ >= nbytes is here but
771       // we can't do that yet, so clamp the result to zero.
772       retainedBytes_ = nbytes <= retainedBytes_ ? retainedBytes_ - nbytes : 0;
773     }
774     MOZ_ASSERT(bytes_ >= nbytes);
775     bytes_ -= nbytes;
776     if (parent_) {
777       parent_->removeBytes(nbytes, wasSwept);
778     }
779   }
780 };
781 
782 // Heap size thresholds used to trigger GC. This is an abstract base class for
783 // GC heap and malloc thresholds defined below.
784 class HeapThreshold {
785  protected:
HeapThreshold()786   HeapThreshold()
787       : startBytes_(SIZE_MAX),
788         incrementalLimitBytes_(SIZE_MAX),
789         sliceBytes_(SIZE_MAX) {}
790 
791   // The threshold at which to start a new incremental collection.
792   MainThreadOrGCTaskData<size_t> startBytes_;
793 
794   // The threshold at which start a new non-incremental collection or finish an
795   // ongoing collection non-incrementally.
796   MainThreadData<size_t> incrementalLimitBytes_;
797 
798   // The threshold at which to trigger a slice during an ongoing incremental
799   // collection.
800   MainThreadData<size_t> sliceBytes_;
801 
802  public:
startBytes()803   size_t startBytes() const { return startBytes_; }
sliceBytes()804   size_t sliceBytes() const { return sliceBytes_; }
incrementalLimitBytes()805   size_t incrementalLimitBytes() const { return incrementalLimitBytes_; }
806   double eagerAllocTrigger(bool highFrequencyGC) const;
807   size_t incrementalBytesRemaining(const HeapSize& heapSize) const;
808 
809   void setSliceThreshold(ZoneAllocator* zone, const HeapSize& heapSize,
810                          const GCSchedulingTunables& tunables,
811                          bool waitingOnBGTask);
clearSliceThreshold()812   void clearSliceThreshold() { sliceBytes_ = SIZE_MAX; }
hasSliceThreshold()813   bool hasSliceThreshold() const { return sliceBytes_ != SIZE_MAX; }
814 
815  protected:
816   static double computeZoneHeapGrowthFactorForHeapSize(
817       size_t lastBytes, const GCSchedulingTunables& tunables,
818       const GCSchedulingState& state);
819 
820   void setIncrementalLimitFromStartBytes(size_t retainedBytes,
821                                          const GCSchedulingTunables& tunables);
822 };
823 
824 // A heap threshold that is based on a multiple of the retained size after the
825 // last collection adjusted based on collection frequency and retained
826 // size. This is used to determine when to do a zone GC based on GC heap size.
827 class GCHeapThreshold : public HeapThreshold {
828  public:
829   void updateStartThreshold(size_t lastBytes,
830                             const GCSchedulingTunables& tunables,
831                             const GCSchedulingState& state, bool isAtomsZone,
832                             const AutoLockGC& lock);
833 
834  private:
835   static size_t computeZoneTriggerBytes(double growthFactor, size_t lastBytes,
836                                         const GCSchedulingTunables& tunables,
837                                         const AutoLockGC& lock);
838 };
839 
840 // A heap threshold that is calculated as a constant multiple of the retained
841 // size after the last collection. This is used to determines when to do a zone
842 // GC based on malloc data.
843 class MallocHeapThreshold : public HeapThreshold {
844  public:
845   void updateStartThreshold(size_t lastBytes,
846                             const GCSchedulingTunables& tunables,
847                             const GCSchedulingState& state,
848                             const AutoLockGC& lock);
849 
850  private:
851   static size_t computeZoneTriggerBytes(double growthFactor, size_t lastBytes,
852                                         size_t baseBytes,
853                                         const AutoLockGC& lock);
854 };
855 
856 // A fixed threshold that's used to determine when we need to do a zone GC based
857 // on allocated JIT code.
858 class JitHeapThreshold : public HeapThreshold {
859  public:
JitHeapThreshold(size_t bytes)860   explicit JitHeapThreshold(size_t bytes) { startBytes_ = bytes; }
861 };
862 
863 struct SharedMemoryUse {
SharedMemoryUseSharedMemoryUse864   explicit SharedMemoryUse(MemoryUse use) : count(0), nbytes(0) {
865 #ifdef DEBUG
866     this->use = use;
867 #endif
868   }
869 
870   size_t count;
871   size_t nbytes;
872 #ifdef DEBUG
873   MemoryUse use;
874 #endif
875 };
876 
877 // A map which tracks shared memory uses (shared in the sense that an allocation
878 // can be referenced by more than one GC thing in a zone). This allows us to
879 // only account for the memory once.
880 using SharedMemoryMap =
881     HashMap<void*, SharedMemoryUse, DefaultHasher<void*>, SystemAllocPolicy>;
882 
883 #ifdef DEBUG
884 
885 // Counts memory associated with GC things in a zone.
886 //
887 // This records details of the cell (or non-cell pointer) the memory allocation
888 // is associated with to check the correctness of the information provided. This
889 // is not present in opt builds.
890 class MemoryTracker {
891  public:
892   MemoryTracker();
893   void fixupAfterMovingGC();
894   void checkEmptyOnDestroy();
895 
896   void adopt(MemoryTracker& other);
897 
898   // Track memory by associated GC thing pointer.
899   void trackGCMemory(Cell* cell, size_t nbytes, MemoryUse use);
900   void untrackGCMemory(Cell* cell, size_t nbytes, MemoryUse use);
901   void swapGCMemory(Cell* a, Cell* b, MemoryUse use);
902 
903   // Track memory by associated non-GC thing pointer.
904   void registerNonGCMemory(void* ptr, MemoryUse use);
905   void unregisterNonGCMemory(void* ptr, MemoryUse use);
906   void moveNonGCMemory(void* dst, void* src, MemoryUse use);
907   void incNonGCMemory(void* ptr, size_t nbytes, MemoryUse use);
908   void decNonGCMemory(void* ptr, size_t nbytes, MemoryUse use);
909 
910  private:
911   template <typename Ptr>
912   struct Key {
913     Key(Ptr* ptr, MemoryUse use);
914     Ptr* ptr() const;
915     MemoryUse use() const;
916 
917    private:
918 #  ifdef JS_64BIT
919     // Pack this into a single word on 64 bit platforms.
920     uintptr_t ptr_ : 56;
921     uintptr_t use_ : 8;
922 #  else
923     uintptr_t ptr_ : 32;
924     uintptr_t use_ : 8;
925 #  endif
926   };
927 
928   template <typename Ptr>
929   struct Hasher {
930     using KeyT = Key<Ptr>;
931     using Lookup = KeyT;
932     static HashNumber hash(const Lookup& l);
933     static bool match(const KeyT& key, const Lookup& l);
934     static void rekey(KeyT& k, const KeyT& newKey);
935   };
936 
937   template <typename Ptr>
938   using Map = HashMap<Key<Ptr>, size_t, Hasher<Ptr>, SystemAllocPolicy>;
939   using GCMap = Map<Cell>;
940   using NonGCMap = Map<void>;
941 
942   static bool isGCMemoryUse(MemoryUse use);
943   static bool isNonGCMemoryUse(MemoryUse use);
944   static bool allowMultipleAssociations(MemoryUse use);
945 
946   size_t getAndRemoveEntry(const Key<Cell>& key, LockGuard<Mutex>& lock);
947 
948   Mutex mutex;
949 
950   // Map containing the allocated size associated with (cell, use) pairs.
951   GCMap gcMap;
952 
953   // Map containing the allocated size associated (non-cell pointer, use) pairs.
954   NonGCMap nonGCMap;
955 };
956 
957 #endif  // DEBUG
958 
LinearInterpolate(double x,double x0,double y0,double x1,double y1)959 static inline double LinearInterpolate(double x, double x0, double y0,
960                                        double x1, double y1) {
961   MOZ_ASSERT(x0 < x1);
962 
963   if (x < x0) {
964     return y0;
965   }
966 
967   if (x < x1) {
968     return y0 + (y1 - y0) * ((x - x0) / (x1 - x0));
969   }
970 
971   return y1;
972 }
973 
974 }  // namespace gc
975 }  // namespace js
976 
977 #endif  // gc_Scheduling_h
978