1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4 // optimize for speed
5 
6 
7 #ifndef _DEBUG
8 #ifdef _MSC_VER
9 #pragma optimize( "t", on )
10 #endif
11 #endif
12 #define inline __forceinline
13 
14 #include "gc.h"
15 
16 //#define DT_LOG
17 
18 #include "gcrecord.h"
19 
20 #ifdef _MSC_VER
21 #pragma warning(disable:4293)
22 #pragma warning(disable:4477)
23 #endif //_MSC_VER
24 
FATAL_GC_ERROR()25 inline void FATAL_GC_ERROR()
26 {
27 #ifndef DACCESS_COMPILE
28     GCToOSInterface::DebugBreak();
29 #endif // DACCESS_COMPILE
30     _ASSERTE(!"Fatal Error in GC.");
31     EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
32 }
33 
34 #ifdef _MSC_VER
35 #pragma inline_depth(20)
36 #endif
37 
38 /* the following section defines the optional features */
39 
40 // FEATURE_STRUCTALIGN was added by Midori. In CLR we are not interested
41 // in supporting custom alignments on LOH. Currently FEATURE_LOH_COMPACTION
42 // and FEATURE_STRUCTALIGN are mutually exclusive. It shouldn't be much
43 // work to make FEATURE_STRUCTALIGN not apply to LOH so they can be both
44 // turned on.
45 #define FEATURE_LOH_COMPACTION
46 
47 #ifdef FEATURE_64BIT_ALIGNMENT
48 // We need the following feature as part of keeping 64-bit types aligned in the GC heap.
49 #define RESPECT_LARGE_ALIGNMENT //used to keep "double" objects aligned during
50                                 //relocation
51 #endif //FEATURE_64BIT_ALIGNMENT
52 
53 #define SHORT_PLUGS //used to keep ephemeral plugs short so they fit better into the oldest generation free items
54 
55 #ifdef SHORT_PLUGS
56 #define DESIRED_PLUG_LENGTH (1000)
57 #endif //SHORT_PLUGS
58 
59 #define FEATURE_PREMORTEM_FINALIZATION
60 #define GC_HISTORY
61 
62 #ifndef FEATURE_REDHAWK
63 #define HEAP_ANALYZE
64 #define COLLECTIBLE_CLASS
65 #endif // !FEATURE_REDHAWK
66 
67 #ifdef HEAP_ANALYZE
68 #define initial_internal_roots        (1024*16)
69 #endif // HEAP_ANALYZE
70 
71 #define MARK_LIST         //used sorted list to speed up plan phase
72 
73 #define BACKGROUND_GC   //concurrent background GC (requires WRITE_WATCH)
74 
75 #ifdef SERVER_GC
76 #define MH_SC_MARK //scalable marking
77 //#define SNOOP_STATS //diagnostic
78 #define PARALLEL_MARK_LIST_SORT //do the sorting and merging of the multiple mark lists in server gc in parallel
79 #endif //SERVER_GC
80 
81 //This is used to mark some type volatile only when the scalable marking is used.
82 #if defined (SERVER_GC) && defined (MH_SC_MARK)
83 #define SERVER_SC_MARK_VOLATILE(x) VOLATILE(x)
84 #else //SERVER_GC&&MH_SC_MARK
85 #define SERVER_SC_MARK_VOLATILE(x) x
86 #endif //SERVER_GC&&MH_SC_MARK
87 
88 //#define MULTIPLE_HEAPS         //Allow multiple heaps for servers
89 
90 #define INTERIOR_POINTERS   //Allow interior pointers in the code manager
91 
92 #define CARD_BUNDLE         //enable card bundle feature.(requires WRITE_WATCH)
93 
94 // If this is defined we use a map for segments in order to find the heap for
95 // a segment fast. But it does use more memory as we have to cover the whole
96 // heap range and for each entry we allocate a struct of 5 ptr-size words
97 // (3 for WKS as there's only one heap).
98 #define SEG_MAPPING_TABLE
99 
100 // If allocating the heap mapping table for the available VA consumes too
101 // much memory, you can enable this to allocate only the portion that
102 // corresponds to rw segments and grow it when needed in grow_brick_card_table.
103 // However in heap_of you will need to always compare the address with
104 // g_lowest/highest before you can look at the heap mapping table.
105 #define GROWABLE_SEG_MAPPING_TABLE
106 
107 #ifdef BACKGROUND_GC
108 #define MARK_ARRAY      //Mark bit in an array
109 #endif //BACKGROUND_GC
110 
111 #if defined(BACKGROUND_GC) || defined (CARD_BUNDLE) || defined(FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP)
112 #define WRITE_WATCH     //Write Watch feature
113 #endif //BACKGROUND_GC || CARD_BUNDLE
114 
115 #ifdef WRITE_WATCH
116 #define array_size 100
117 #endif //WRITE_WATCH
118 
119 //#define SHORT_PLUGS           //keep plug short
120 
121 #define FFIND_OBJECT        //faster find_object, slower allocation
122 #define FFIND_DECAY  7      //Number of GC for which fast find will be active
123 
124 //#define NO_WRITE_BARRIER  //no write barrier, use Write Watch feature
125 
126 //#define DEBUG_WRITE_WATCH //Additional debug for write watch
127 
128 //#define STRESS_PINNING    //Stress pinning by pinning randomly
129 
130 //#define TRACE_GC          //debug trace gc operation
131 //#define SIMPLE_DPRINTF
132 
133 //#define TIME_GC           //time allocation and garbage collection
134 //#define TIME_WRITE_WATCH  //time GetWriteWatch and ResetWriteWatch calls
135 //#define COUNT_CYCLES  //Use cycle counter for timing
136 //#define JOIN_STATS         //amount of time spent in the join
137 //also, see TIME_SUSPEND in switches.h.
138 
139 //#define SYNCHRONIZATION_STATS
140 //#define SEG_REUSE_STATS
141 
142 #if defined (SYNCHRONIZATION_STATS) || defined (STAGE_STATS)
143 #define BEGIN_TIMING(x) \
144     int64_t x##_start; \
145     x##_start = GCToOSInterface::QueryPerformanceCounter()
146 
147 #define END_TIMING(x) \
148     int64_t x##_end; \
149     x##_end = GCToOSInterface::QueryPerformanceCounter(); \
150     x += x##_end - x##_start
151 
152 #else
153 #define BEGIN_TIMING(x)
154 #define END_TIMING(x)
155 #define BEGIN_TIMING_CYCLES(x)
156 #define END_TIMING_CYCLES(x)
157 #endif //SYNCHRONIZATION_STATS || STAGE_STATS
158 
159 /* End of optional features */
160 
161 #ifdef GC_CONFIG_DRIVEN
162 void GCLogConfig (const char *fmt, ... );
163 #define cprintf(x) {GCLogConfig x;}
164 #endif //GC_CONFIG_DRIVEN
165 
166 #ifdef _DEBUG
167 #define TRACE_GC
168 #endif
169 
170 #define NUMBERGENERATIONS   4               //Max number of generations
171 
172 // For the bestfit algorithm when we relocate ephemeral generations into an
173 // existing gen2 segment.
174 // We recorded sizes from 2^6, 2^7, 2^8...up to 2^30 (1GB). So that's 25 sizes total.
175 #define MIN_INDEX_POWER2 6
176 
177 #ifdef SERVER_GC
178 
179 #ifdef BIT64
180 #define MAX_INDEX_POWER2 30
181 #else
182 #define MAX_INDEX_POWER2 26
183 #endif  // BIT64
184 
185 #else //SERVER_GC
186 
187 #ifdef BIT64
188 #define MAX_INDEX_POWER2 28
189 #else
190 #define MAX_INDEX_POWER2 24
191 #endif  // BIT64
192 
193 #endif //SERVER_GC
194 
195 #define MAX_NUM_BUCKETS (MAX_INDEX_POWER2 - MIN_INDEX_POWER2 + 1)
196 
197 #define MAX_NUM_FREE_SPACES 200
198 #define MIN_NUM_FREE_SPACES 5
199 
200 //Please leave these definitions intact.
201 
202 #define CLREvent CLREventStatic
203 
204 // hosted api
205 #ifdef memcpy
206 #undef memcpy
207 #endif //memcpy
208 
209 #ifdef FEATURE_STRUCTALIGN
210 #define REQD_ALIGN_DCL ,int requiredAlignment
211 #define REQD_ALIGN_ARG ,requiredAlignment
212 #define REQD_ALIGN_AND_OFFSET_DCL ,int requiredAlignment,size_t alignmentOffset
213 #define REQD_ALIGN_AND_OFFSET_DEFAULT_DCL ,int requiredAlignment=DATA_ALIGNMENT,size_t alignmentOffset=0
214 #define REQD_ALIGN_AND_OFFSET_ARG ,requiredAlignment,alignmentOffset
215 #else // FEATURE_STRUCTALIGN
216 #define REQD_ALIGN_DCL
217 #define REQD_ALIGN_ARG
218 #define REQD_ALIGN_AND_OFFSET_DCL
219 #define REQD_ALIGN_AND_OFFSET_DEFAULT_DCL
220 #define REQD_ALIGN_AND_OFFSET_ARG
221 #endif // FEATURE_STRUCTALIGN
222 
223 #ifdef MULTIPLE_HEAPS
224 #define THREAD_NUMBER_DCL ,int thread
225 #define THREAD_NUMBER_ARG ,thread
226 #define THREAD_NUMBER_FROM_CONTEXT int thread = sc->thread_number;
227 #define THREAD_FROM_HEAP  int thread = heap_number;
228 #define HEAP_FROM_THREAD  gc_heap* hpt = gc_heap::g_heaps[thread];
229 #else
230 #define THREAD_NUMBER_DCL
231 #define THREAD_NUMBER_ARG
232 #define THREAD_NUMBER_FROM_CONTEXT
233 #define THREAD_FROM_HEAP
234 #define HEAP_FROM_THREAD  gc_heap* hpt = 0;
235 #endif //MULTIPLE_HEAPS
236 
237 //These constants are ordered
238 const int policy_sweep = 0;
239 const int policy_compact = 1;
240 const int policy_expand  = 2;
241 
242 #ifdef TRACE_GC
243 
244 
245 extern int     print_level;
246 extern BOOL    trace_gc;
247 extern int    gc_trace_fac;
248 
249 
250 class hlet
251 {
252     static hlet* bindings;
253     int prev_val;
254     int* pval;
255     hlet* prev_let;
256 public:
hlet(int & place,int value)257     hlet (int& place, int value)
258     {
259         prev_val = place;
260         pval = &place;
261         place = value;
262         prev_let = bindings;
263         bindings = this;
264     }
~hlet()265     ~hlet ()
266     {
267         *pval = prev_val;
268         bindings = prev_let;
269     }
270 };
271 
272 
273 #define let(p,v) hlet __x = hlet (p, v);
274 
275 #else //TRACE_GC
276 
277 #define gc_count    -1
278 #define let(s,v)
279 
280 #endif //TRACE_GC
281 
282 #ifdef TRACE_GC
283 #define SEG_REUSE_LOG_0 7
284 #define SEG_REUSE_LOG_1 (SEG_REUSE_LOG_0 + 1)
285 #define DT_LOG_0 (SEG_REUSE_LOG_1 + 1)
286 #define BGC_LOG (DT_LOG_0 + 1)
287 #define GTC_LOG (DT_LOG_0 + 2)
288 #define GC_TABLE_LOG (DT_LOG_0 + 3)
289 #define JOIN_LOG (DT_LOG_0 + 4)
290 #define SPINLOCK_LOG (DT_LOG_0 + 5)
291 #define SNOOP_LOG (DT_LOG_0 + 6)
292 
293 #ifndef DACCESS_COMPILE
294 
295 #ifdef SIMPLE_DPRINTF
296 
297 //#define dprintf(l,x) {if (trace_gc && ((l<=print_level)||gc_heap::settings.concurrent)) {printf ("\n");printf x ; fflush(stdout);}}
298 void GCLog (const char *fmt, ... );
299 //#define dprintf(l,x) {if (trace_gc && (l<=print_level)) {GCLog x;}}
300 //#define dprintf(l,x) {if ((l==SEG_REUSE_LOG_0) || (l==SEG_REUSE_LOG_1) || (trace_gc && (l<=3))) {GCLog x;}}
301 //#define dprintf(l,x) {if (l == DT_LOG_0) {GCLog x;}}
302 //#define dprintf(l,x) {if (trace_gc && ((l <= 2) || (l == BGC_LOG) || (l==GTC_LOG))) {GCLog x;}}
303 //#define dprintf(l,x) {if ((l == 1) || (l == 2222)) {GCLog x;}}
304 #define dprintf(l,x) {if ((l <= 1) || (l == GTC_LOG)) {GCLog x;}}
305 //#define dprintf(l,x) {if ((l==GTC_LOG) || (l <= 1)) {GCLog x;}}
306 //#define dprintf(l,x) {if (trace_gc && ((l <= print_level) || (l==GTC_LOG))) {GCLog x;}}
307 //#define dprintf(l,x) {if (l==GTC_LOG) {printf ("\n");printf x ; fflush(stdout);}}
308 #else //SIMPLE_DPRINTF
309 
310 // The GCTrace output goes to stdout by default but can get sent to the stress log or the logfile if the
311 // reg key GCTraceFacility is set.  THe stress log can only take a format string and 4 numbers or
312 // string literals.
313 #define dprintf(l,x) {if (trace_gc && (l<=print_level)) { \
314       if ( !gc_trace_fac) {printf ("\n");printf x ; fflush(stdout);} \
315       else if ( gc_trace_fac == 2) {LogSpewAlways x;LogSpewAlways ("\n");} \
316       else if ( gc_trace_fac == 1) {STRESS_LOG_VA(x);}}}
317 
318 #endif //SIMPLE_DPRINTF
319 
320 #else //DACCESS_COMPILE
321 #define dprintf(l,x)
322 #endif //DACCESS_COMPILE
323 #else //TRACE_GC
324 #define dprintf(l,x)
325 #endif //TRACE_GC
326 
327 #ifndef FEATURE_REDHAWK
328 #undef  assert
329 #define assert _ASSERTE
330 #undef  ASSERT
331 #define ASSERT _ASSERTE
332 #endif // FEATURE_REDHAWK
333 
334 #ifdef _DEBUG
335 
336 struct GCDebugSpinLock {
337     VOLATILE(int32_t) lock;                   // -1 if free, 0 if held
338     VOLATILE(Thread *) holding_thread;     // -1 if no thread holds the lock.
339     VOLATILE(BOOL) released_by_gc_p;       // a GC thread released the lock.
340 
GCDebugSpinLockGCDebugSpinLock341     GCDebugSpinLock()
342         : lock(-1), holding_thread((Thread*) -1)
343     {
344     }
345 };
346 typedef GCDebugSpinLock GCSpinLock;
347 
348 #elif defined (SYNCHRONIZATION_STATS)
349 
350 struct GCSpinLockInstru {
351     VOLATILE(int32_t) lock;
352     // number of times we went into SwitchToThread in enter_spin_lock.
353     unsigned int num_switch_thread;
354     // number of times we went into WaitLonger.
355     unsigned int num_wait_longer;
356     // number of times we went to calling SwitchToThread in WaitLonger.
357     unsigned int num_switch_thread_w;
358     // number of times we went to calling DisablePreemptiveGC in WaitLonger.
359     unsigned int num_disable_preemptive_w;
360 
GCSpinLockInstruGCSpinLockInstru361     GCSpinLockInstru()
362         : lock(-1), num_switch_thread(0), num_wait_longer(0), num_switch_thread_w(0), num_disable_preemptive_w(0)
363     {
364     }
365 
initGCSpinLockInstru366     void init()
367     {
368         num_switch_thread = 0;
369         num_wait_longer = 0;
370         num_switch_thread_w = 0;
371         num_disable_preemptive_w = 0;
372     }
373 };
374 
375 typedef GCSpinLockInstru GCSpinLock;
376 
377 #else
378 
379 struct GCDebugSpinLock {
380     VOLATILE(int32_t) lock;                   // -1 if free, 0 if held
381 
GCDebugSpinLockGCDebugSpinLock382     GCDebugSpinLock()
383         : lock(-1)
384     {
385     }
386 };
387 typedef GCDebugSpinLock GCSpinLock;
388 
389 #endif
390 
391 class mark;
392 class heap_segment;
393 class CObjectHeader;
394 class l_heap;
395 class sorted_table;
396 class c_synchronize;
397 class seg_free_spaces;
398 class gc_heap;
399 
400 #ifdef BACKGROUND_GC
401 class exclusive_sync;
402 class recursive_gc_sync;
403 #endif //BACKGROUND_GC
404 
405 // The following 2 modes are of the same format as in clr\src\bcl\system\runtime\gcsettings.cs
406 // make sure you change that one if you change this one!
407 enum gc_pause_mode
408 {
409     pause_batch = 0, //We are not concerned about pause length
410     pause_interactive = 1,     //We are running an interactive app
411     pause_low_latency = 2,     //short pauses are essential
412     //avoid long pauses from blocking full GCs unless running out of memory
413     pause_sustained_low_latency = 3,
414     pause_no_gc = 4
415 };
416 
417 enum gc_loh_compaction_mode
418 {
419     loh_compaction_default = 1, // the default mode, don't compact LOH.
420     loh_compaction_once = 2, // only compact once the next time a blocking full GC happens.
421     loh_compaction_auto = 4 // GC decides when to compact LOH, to be implemented.
422 };
423 
424 enum set_pause_mode_status
425 {
426     set_pause_mode_success = 0,
427     set_pause_mode_no_gc = 1 // NoGCRegion is in progress, can't change pause mode.
428 };
429 
430 enum gc_tuning_point
431 {
432     tuning_deciding_condemned_gen,
433     tuning_deciding_full_gc,
434     tuning_deciding_compaction,
435     tuning_deciding_expansion,
436     tuning_deciding_promote_ephemeral
437 };
438 
439 #if defined(TRACE_GC) && defined(BACKGROUND_GC)
440 static const char * const str_bgc_state[] =
441 {
442     "not_in_process",
443     "mark_handles",
444     "mark_stack",
445     "revisit_soh",
446     "revisit_loh",
447     "overflow_soh",
448     "overflow_loh",
449     "final_marking",
450     "sweep_soh",
451     "sweep_loh",
452     "plan_phase"
453 };
454 #endif // defined(TRACE_GC) && defined(BACKGROUND_GC)
455 
456 enum allocation_state
457 {
458     a_state_start = 0,
459     a_state_can_allocate,
460     a_state_cant_allocate,
461     a_state_try_fit,
462     a_state_try_fit_new_seg,
463     a_state_try_fit_new_seg_after_cg,
464     a_state_try_fit_no_seg,
465     a_state_try_fit_after_cg,
466     a_state_try_fit_after_bgc,
467     a_state_try_free_full_seg_in_bgc,
468     a_state_try_free_after_bgc,
469     a_state_try_seg_end,
470     a_state_acquire_seg,
471     a_state_acquire_seg_after_cg,
472     a_state_acquire_seg_after_bgc,
473     a_state_check_and_wait_for_bgc,
474     a_state_trigger_full_compact_gc,
475     a_state_trigger_ephemeral_gc,
476     a_state_trigger_2nd_ephemeral_gc,
477     a_state_check_retry_seg,
478     a_state_max
479 };
480 
481 enum gc_type
482 {
483     gc_type_compacting = 0,
484     gc_type_blocking = 1,
485 #ifdef BACKGROUND_GC
486     gc_type_background = 2,
487 #endif //BACKGROUND_GC
488     gc_type_max = 3
489 };
490 
491 #define v_high_memory_load_th 97
492 
493 //encapsulates the mechanism for the current gc
494 class gc_mechanisms
495 {
496 public:
497     VOLATILE(size_t) gc_index; // starts from 1 for the first GC, like dd_collection_count
498     int condemned_generation;
499     BOOL promotion;
500     BOOL compaction;
501     BOOL loh_compaction;
502     BOOL heap_expansion;
503     uint32_t concurrent;
504     BOOL demotion;
505     BOOL card_bundles;
506     int  gen0_reduction_count;
507     BOOL should_lock_elevation;
508     int elevation_locked_count;
509     BOOL elevation_reduced;
510     BOOL minimal_gc;
511     gc_reason reason;
512     gc_pause_mode pause_mode;
513     BOOL found_finalizers;
514 
515 #ifdef BACKGROUND_GC
516     BOOL background_p;
517     bgc_state b_state;
518     BOOL allocations_allowed;
519 #endif //BACKGROUND_GC
520 
521 #ifdef STRESS_HEAP
522     BOOL stress_induced;
523 #endif // STRESS_HEAP
524 
525     uint32_t entry_memory_load;
526 
527     void init_mechanisms(); //for each GC
528     void first_init(); // for the life of the EE
529 
530     void record (gc_history_global* history);
531 };
532 
533 // This is a compact version of gc_mechanism that we use to save in the history.
534 class gc_mechanisms_store
535 {
536 public:
537     size_t gc_index;
538     bool promotion;
539     bool compaction;
540     bool loh_compaction;
541     bool heap_expansion;
542     bool concurrent;
543     bool demotion;
544     bool card_bundles;
545     bool should_lock_elevation;
546     int condemned_generation   : 8;
547     int gen0_reduction_count   : 8;
548     int elevation_locked_count : 8;
549     gc_reason reason           : 8;
550     gc_pause_mode pause_mode   : 8;
551 #ifdef BACKGROUND_GC
552     bgc_state b_state          : 8;
553 #endif //BACKGROUND_GC
554     bool found_finalizers;
555 
556 #ifdef BACKGROUND_GC
557     bool background_p;
558 #endif //BACKGROUND_GC
559 
560 #ifdef STRESS_HEAP
561     bool stress_induced;
562 #endif // STRESS_HEAP
563 
564 #ifdef BIT64
565     uint32_t entry_memory_load;
566 #endif // BIT64
567 
store(gc_mechanisms * gm)568     void store (gc_mechanisms* gm)
569     {
570         gc_index                = gm->gc_index;
571         condemned_generation    = gm->condemned_generation;
572         promotion               = (gm->promotion != 0);
573         compaction              = (gm->compaction != 0);
574         loh_compaction          = (gm->loh_compaction != 0);
575         heap_expansion          = (gm->heap_expansion != 0);
576         concurrent              = (gm->concurrent != 0);
577         demotion                = (gm->demotion != 0);
578         card_bundles            = (gm->card_bundles != 0);
579         gen0_reduction_count    = gm->gen0_reduction_count;
580         should_lock_elevation   = (gm->should_lock_elevation != 0);
581         elevation_locked_count  = gm->elevation_locked_count;
582         reason                  = gm->reason;
583         pause_mode              = gm->pause_mode;
584         found_finalizers        = (gm->found_finalizers != 0);
585 
586 #ifdef BACKGROUND_GC
587         background_p            = (gm->background_p != 0);
588         b_state                 = gm->b_state;
589 #endif //BACKGROUND_GC
590 
591 #ifdef STRESS_HEAP
592         stress_induced          = (gm->stress_induced != 0);
593 #endif // STRESS_HEAP
594 
595 #ifdef BIT64
596         entry_memory_load       = gm->entry_memory_load;
597 #endif // BIT64
598     }
599 };
600 
601 #ifdef GC_STATS
602 
603 // GC specific statistics, tracking counts and timings for GCs occuring in the system.
604 // This writes the statistics to a file every 60 seconds, if a file is specified in
605 // COMPlus_GcMixLog
606 
607 struct GCStatistics
608     : public StatisticsBase
609 {
610     // initialized to the contents of COMPlus_GcMixLog, or NULL, if not present
611     static TCHAR* logFileName;
612     static FILE*  logFile;
613 
614     // number of times we executed a background GC, a foreground GC, or a
615     // non-concurrent GC
616     int cntBGC, cntFGC, cntNGC;
617 
618     // min, max, and total time spent performing BGCs, FGCs, NGCs
619     // (BGC time includes everything between the moment the BGC starts until
620     // it completes, i.e. the times of all FGCs occuring concurrently)
621     MinMaxTot bgc, fgc, ngc;
622 
623     // number of times we executed a compacting GC (sweeping counts can be derived)
624     int cntCompactNGC, cntCompactFGC;
625 
626     // count of reasons
627     int cntReasons[reason_max];
628 
629     // count of condemned generation, by NGC and FGC:
630     int cntNGCGen[max_generation+1];
631     int cntFGCGen[max_generation];
632 
633     ///////////////////////////////////////////////////////////////////////////////////////////////
634     // Internal mechanism:
635 
636     virtual void Initialize();
637     virtual void DisplayAndUpdate();
638 
639     // Public API
640 
EnabledGCStatistics641     static BOOL Enabled()
642     { return logFileName != NULL; }
643 
644     void AddGCStats(const gc_mechanisms& settings, size_t timeInMSec);
645 };
646 
647 extern GCStatistics g_GCStatistics;
648 extern GCStatistics g_LastGCStatistics;
649 
650 #endif // GC_STATS
651 
652 
653 typedef DPTR(class heap_segment)               PTR_heap_segment;
654 typedef DPTR(class gc_heap)                    PTR_gc_heap;
655 typedef DPTR(PTR_gc_heap)                      PTR_PTR_gc_heap;
656 #ifdef FEATURE_PREMORTEM_FINALIZATION
657 typedef DPTR(class CFinalize)                  PTR_CFinalize;
658 #endif // FEATURE_PREMORTEM_FINALIZATION
659 
660 //-------------------------------------
661 //generation free list. It is an array of free lists bucketed by size, starting at sizes lower than first_bucket_size
662 //and doubling each time. The last bucket (index == num_buckets) is for largest sizes with no limit
663 
664 #define MAX_BUCKET_COUNT (13)//Max number of buckets for the small generations.
665 class alloc_list
666 {
667     uint8_t* head;
668     uint8_t* tail;
669 
670     size_t damage_count;
671 public:
672 #ifdef FL_VERIFICATION
673     size_t item_count;
674 #endif //FL_VERIFICATION
675 
alloc_list_head()676     uint8_t*& alloc_list_head () { return head;}
alloc_list_tail()677     uint8_t*& alloc_list_tail () { return tail;}
alloc_list_damage_count()678     size_t& alloc_list_damage_count(){ return damage_count; }
alloc_list()679     alloc_list()
680     {
681         head = 0;
682         tail = 0;
683         damage_count = 0;
684     }
685 };
686 
687 
688 class allocator
689 {
690     size_t num_buckets;
691     size_t frst_bucket_size;
692     alloc_list first_bucket;
693     alloc_list* buckets;
694     alloc_list& alloc_list_of (unsigned int bn);
695     size_t& alloc_list_damage_count_of (unsigned int bn);
696 
697 public:
698     allocator (unsigned int num_b, size_t fbs, alloc_list* b);
allocator()699     allocator()
700     {
701         num_buckets = 1;
702         frst_bucket_size = SIZE_T_MAX;
703     }
number_of_buckets()704     unsigned int number_of_buckets() {return (unsigned int)num_buckets;}
705 
first_bucket_size()706     size_t first_bucket_size() {return frst_bucket_size;}
alloc_list_head_of(unsigned int bn)707     uint8_t*& alloc_list_head_of (unsigned int bn)
708     {
709         return alloc_list_of (bn).alloc_list_head();
710     }
alloc_list_tail_of(unsigned int bn)711     uint8_t*& alloc_list_tail_of (unsigned int bn)
712     {
713         return alloc_list_of (bn).alloc_list_tail();
714     }
715     void clear();
discard_if_no_fit_p()716     BOOL discard_if_no_fit_p()
717     {
718         return (num_buckets == 1);
719     }
720 
721     // This is when we know there's nothing to repair because this free
722     // list has never gone through plan phase. Right now it's only used
723     // by the background ephemeral sweep when we copy the local free list
724     // to gen0's free list.
725     //
726     // We copy head and tail manually (vs together like copy_to_alloc_list)
727     // since we need to copy tail first because when we get the free items off
728     // of each bucket we check head first. We also need to copy the
729     // smaller buckets first so when gen0 allocation needs to thread
730     // smaller items back that bucket is guaranteed to have been full
731     // copied.
copy_with_no_repair(allocator * allocator_to_copy)732     void copy_with_no_repair (allocator* allocator_to_copy)
733     {
734         assert (num_buckets == allocator_to_copy->number_of_buckets());
735         for (unsigned int i = 0; i < num_buckets; i++)
736         {
737             alloc_list* al = &(allocator_to_copy->alloc_list_of (i));
738             alloc_list_tail_of(i) = al->alloc_list_tail();
739             alloc_list_head_of(i) = al->alloc_list_head();
740         }
741     }
742 
743     void unlink_item (unsigned int bucket_number, uint8_t* item, uint8_t* previous_item, BOOL use_undo_p);
744     void thread_item (uint8_t* item, size_t size);
745     void thread_item_front (uint8_t* itme, size_t size);
746     void thread_free_item (uint8_t* free_item, uint8_t*& head, uint8_t*& tail);
747     void copy_to_alloc_list (alloc_list* toalist);
748     void copy_from_alloc_list (alloc_list* fromalist);
749     void commit_alloc_list_changes();
750 };
751 
752 #define NUM_GEN_POWER2 (20)
753 #define BASE_GEN_SIZE (1*512)
754 
755 // group the frequently used ones together (need intrumentation on accessors)
756 class generation
757 {
758 public:
759     // Don't move these first two fields without adjusting the references
760     // from the __asm in jitinterface.cpp.
761     alloc_context   allocation_context;
762     heap_segment*   allocation_segment;
763     PTR_heap_segment start_segment;
764     uint8_t*        allocation_context_start_region;
765     uint8_t*        allocation_start;
766     allocator       free_list_allocator;
767     size_t          free_list_allocated;
768     size_t          end_seg_allocated;
769     BOOL            allocate_end_seg_p;
770     size_t          condemned_allocated;
771     size_t          free_list_space;
772     size_t          free_obj_space;
773     size_t          allocation_size;
774     uint8_t*        plan_allocation_start;
775     size_t          plan_allocation_start_size;
776 
777     // this is the pinned plugs that got allocated into this gen.
778     size_t          pinned_allocated;
779     size_t          pinned_allocation_compact_size;
780     size_t          pinned_allocation_sweep_size;
781     int             gen_num;
782 
783 #ifdef FREE_USAGE_STATS
784     size_t          gen_free_spaces[NUM_GEN_POWER2];
785     // these are non pinned plugs only
786     size_t          gen_plugs[NUM_GEN_POWER2];
787     size_t          gen_current_pinned_free_spaces[NUM_GEN_POWER2];
788     size_t          pinned_free_obj_space;
789     // this is what got allocated into the pinned free spaces.
790     size_t          allocated_in_pinned_free;
791     size_t          allocated_since_last_pin;
792 #endif //FREE_USAGE_STATS
793 };
794 
795 // The dynamic data fields are grouped into 3 categories:
796 //
797 // calculated logical data (like desired_allocation)
798 // physical data (like fragmentation)
799 // const data (like min_gc_size), initialized at the beginning
800 class dynamic_data
801 {
802 public:
803     ptrdiff_t new_allocation;
804     ptrdiff_t gc_new_allocation; // new allocation at beginning of gc
805     float     surv;
806     size_t    desired_allocation;
807 
808     // # of bytes taken by objects (ie, not free space) at the beginning
809     // of the GC.
810     size_t    begin_data_size;
811     // # of bytes taken by survived objects after mark.
812     size_t    survived_size;
813     // # of bytes taken by survived pinned plugs after mark.
814     size_t    pinned_survived_size;
815     size_t    artificial_pinned_survived_size;
816     size_t    added_pinned_size;
817 
818 #ifdef SHORT_PLUGS
819     size_t    padding_size;
820 #endif //SHORT_PLUGS
821 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
822     // # of plugs that are not pinned plugs.
823     size_t    num_npinned_plugs;
824 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
825     //total object size after a GC, ie, doesn't include fragmentation
826     size_t    current_size;
827     size_t    collection_count;
828     size_t    promoted_size;
829     size_t    freach_previous_promotion;
830     size_t    fragmentation;    //fragmentation when we don't compact
831     size_t    gc_clock;         //gc# when last GC happened
832     size_t    time_clock;       //time when last gc started
833     size_t    gc_elapsed_time;  // Time it took for the gc to complete
834     float     gc_speed;         //  speed in bytes/msec for the gc to complete
835 
836     // min_size is always the same as min_gc_size..
837     size_t    min_gc_size;
838     size_t    max_size;
839     size_t    min_size;
840     size_t    default_new_allocation;
841     size_t    fragmentation_limit;
842     float     fragmentation_burden_limit;
843     float     limit;
844     float     max_limit;
845 };
846 
847 #define ro_in_entry 0x1
848 
849 #ifdef SEG_MAPPING_TABLE
850 // Note that I am storing both h0 and seg0, even though in Server GC you can get to
851 // the heap* from the segment info. This is because heap_of needs to be really fast
852 // and we would not want yet another indirection.
853 struct seg_mapping
854 {
855     // if an address is > boundary it belongs to h1; else h0.
856     // since we init h0 and h1 to 0, if we get 0 it means that
857     // address doesn't exist on managed segments. And heap_of
858     // would just return heap0 which is what it does now.
859     uint8_t* boundary;
860 #ifdef MULTIPLE_HEAPS
861     gc_heap* h0;
862     gc_heap* h1;
863 #endif //MULTIPLE_HEAPS
864     // You could have an address that's inbetween 2 segments and
865     // this would return a seg, the caller then will use
866     // in_range_for_segment to determine if it's on that seg.
867     heap_segment* seg0; // this is what the seg for h0 is.
868     heap_segment* seg1; // this is what the seg for h1 is.
869     // Note that when frozen objects are used we mask seg1
870     // with 0x1 to indicate that there is a ro segment for
871     // this entry.
872 };
873 #endif //SEG_MAPPING_TABLE
874 
875 // alignment helpers
876 //Alignment constant for allocation
877 #define ALIGNCONST (DATA_ALIGNMENT-1)
878 
879 inline
880 size_t Align (size_t nbytes, int alignment=ALIGNCONST)
881 {
882     return (nbytes + alignment) & ~alignment;
883 }
884 
885 //return alignment constant for small object heap vs large object heap
886 inline
get_alignment_constant(BOOL small_object_p)887 int get_alignment_constant (BOOL small_object_p)
888 {
889 #ifdef FEATURE_STRUCTALIGN
890     // If any objects on the large object heap require 8-byte alignment,
891     // the compiler will tell us so.  Let's not guess an alignment here.
892     return ALIGNCONST;
893 #else // FEATURE_STRUCTALIGN
894     return small_object_p ? ALIGNCONST : 7;
895 #endif // FEATURE_STRUCTALIGN
896 }
897 
898 struct etw_opt_info
899 {
900     size_t desired_allocation;
901     size_t new_allocation;
902     int    gen_number;
903 };
904 
905 enum alloc_wait_reason
906 {
907     // When we don't care about firing an event for
908     // this.
909     awr_ignored = -1,
910 
911     // when we detect we are in low memory
912     awr_low_memory = 0,
913 
914     // when we detect the ephemeral segment is too full
915     awr_low_ephemeral = 1,
916 
917     // we've given out too much budget for gen0.
918     awr_gen0_alloc = 2,
919 
920     // we've given out too much budget for loh.
921     awr_loh_alloc = 3,
922 
923     // this event is really obsolete - it's for pre-XP
924     // OSs where low mem notification is not supported.
925     awr_alloc_loh_low_mem = 4,
926 
927     // we ran out of VM spaced to reserve on loh.
928     awr_loh_oos = 5,
929 
930     // ran out of space when allocating a small object
931     awr_gen0_oos_bgc = 6,
932 
933     // ran out of space when allocating a large object
934     awr_loh_oos_bgc = 7,
935 
936     // waiting for BGC to let FGC happen
937     awr_fgc_wait_for_bgc = 8,
938 
939     // wait for bgc to finish to get loh seg.
940     awr_get_loh_seg = 9,
941 
942     // we don't allow loh allocation during bgc planning.
943     awr_loh_alloc_during_plan = 10,
944 
945     // we don't allow too much loh allocation during bgc.
946     awr_loh_alloc_during_bgc = 11
947 };
948 
949 struct alloc_thread_wait_data
950 {
951     int awr;
952 };
953 
954 enum msl_take_state
955 {
956     mt_get_large_seg,
957     mt_wait_bgc_plan,
958     mt_wait_bgc,
959     mt_block_gc,
960     mt_clr_mem,
961     mt_clr_large_mem,
962     mt_t_eph_gc,
963     mt_t_full_gc,
964     mt_alloc_small,
965     mt_alloc_large,
966     mt_alloc_small_cant,
967     mt_alloc_large_cant,
968     mt_try_alloc,
969     mt_try_budget
970 };
971 
972 enum msl_enter_state
973 {
974     me_acquire,
975     me_release
976 };
977 
978 struct spinlock_info
979 {
980     msl_enter_state enter_state;
981     msl_take_state take_state;
982     EEThreadId thread_id;
983 };
984 
985 const unsigned HS_CACHE_LINE_SIZE = 128;
986 
987 #ifdef SNOOP_STATS
988 struct snoop_stats_data
989 {
990     int heap_index;
991 
992     // total number of objects that we called
993     // gc_mark on.
994     size_t objects_checked_count;
995     // total number of time we called gc_mark
996     // on a 0 reference.
997     size_t zero_ref_count;
998     // total objects actually marked.
999     size_t objects_marked_count;
1000     // number of objects written to the mark stack because
1001     // of mark_stolen.
1002     size_t stolen_stack_count;
1003     // number of objects pushed onto the mark stack because
1004     // of the partial mark code path.
1005     size_t partial_stack_count;
1006     // number of objects pushed onto the mark stack because
1007     // of the non partial mark code path.
1008     size_t normal_stack_count;
1009     // number of references marked without mark stack.
1010     size_t non_stack_count;
1011 
1012     // number of times we detect next heap's mark stack
1013     // is not busy.
1014     size_t stack_idle_count;
1015 
1016     // number of times we do switch to thread.
1017     size_t switch_to_thread_count;
1018 
1019     // number of times we are checking if the next heap's
1020     // mark stack is busy.
1021     size_t check_level_count;
1022     // number of times next stack is busy and level is
1023     // at the bottom.
1024     size_t busy_count;
1025     // how many interlocked exchange operations we did
1026     size_t interlocked_count;
1027     // numer of times parent objects stolen
1028     size_t partial_mark_parent_count;
1029     // numer of times we look at a normal stolen entry,
1030     // or the beginning/ending PM pair.
1031     size_t stolen_or_pm_count;
1032     // number of times we see 2 for the entry.
1033     size_t stolen_entry_count;
1034     // number of times we see a PM entry that's not ready.
1035     size_t pm_not_ready_count;
1036     // number of stolen normal marked objects and partial mark children.
1037     size_t normal_count;
1038     // number of times the bottom of mark stack was cleared.
1039     size_t stack_bottom_clear_count;
1040 };
1041 #endif //SNOOP_STATS
1042 
1043 struct no_gc_region_info
1044 {
1045     size_t soh_allocation_size;
1046     size_t loh_allocation_size;
1047     size_t started;
1048     size_t num_gcs;
1049     size_t num_gcs_induced;
1050     start_no_gc_region_status start_status;
1051     gc_pause_mode saved_pause_mode;
1052     size_t saved_gen0_min_size;
1053     size_t saved_gen3_min_size;
1054     BOOL minimal_gc_p;
1055 };
1056 
1057 // if you change these, make sure you update them for sos (strike.cpp) as well.
1058 //
1059 // !!!NOTE!!!
1060 // Right now I am only recording data from blocking GCs. When recording from BGC,
1061 // it should have its own copy just like gc_data_per_heap.
1062 // for BGCs we will have a very different set of datapoints to record.
1063 enum interesting_data_point
1064 {
1065     idp_pre_short = 0,
1066     idp_post_short = 1,
1067     idp_merged_pin = 2,
1068     idp_converted_pin = 3,
1069     idp_pre_pin = 4,
1070     idp_post_pin = 5,
1071     idp_pre_and_post_pin = 6,
1072     idp_pre_short_padded = 7,
1073     idp_post_short_padded = 8,
1074     max_idp_count
1075 };
1076 
1077 //class definition of the internal class
1078 class gc_heap
1079 {
1080     friend struct ::_DacGlobals;
1081 #ifdef DACCESS_COMPILE
1082     friend class ::ClrDataAccess;
1083     friend class ::DacHeapWalker;
1084 #endif //DACCESS_COMPILE
1085 
1086     friend class GCHeap;
1087 #ifdef FEATURE_PREMORTEM_FINALIZATION
1088     friend class CFinalize;
1089 #endif // FEATURE_PREMORTEM_FINALIZATION
1090     friend struct ::alloc_context;
1091     friend void ProfScanRootsHelper(Object** object, ScanContext *pSC, uint32_t dwFlags);
1092     friend void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw);
1093     friend class t_join;
1094     friend class gc_mechanisms;
1095     friend class seg_free_spaces;
1096 
1097 #ifdef BACKGROUND_GC
1098     friend class exclusive_sync;
1099     friend class recursive_gc_sync;
1100 #endif //BACKGROUND_GC
1101 
1102 #if defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
1103     friend void checkGCWriteBarrier();
1104     friend void initGCShadow();
1105 #endif //defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
1106 
1107 #ifdef MULTIPLE_HEAPS
1108     typedef void (gc_heap::* card_fn) (uint8_t**, int);
1109 #define call_fn(fn) (this->*fn)
1110 #define __this this
1111 #else
1112     typedef void (* card_fn) (uint8_t**);
1113 #define call_fn(fn) (*fn)
1114 #define __this (gc_heap*)0
1115 #endif
1116 
1117 public:
1118 
1119 #ifdef TRACE_GC
1120     PER_HEAP
1121     void print_free_list (int gen, heap_segment* seg);
1122 #endif // TRACE_GC
1123 
1124 #ifdef SYNCHRONIZATION_STATS
1125 
1126     PER_HEAP_ISOLATED
init_sync_stats()1127     void init_sync_stats()
1128     {
1129 #ifdef MULTIPLE_HEAPS
1130         for (int i = 0; i < gc_heap::n_heaps; i++)
1131         {
1132             gc_heap::g_heaps[i]->init_heap_sync_stats();
1133         }
1134 #else  //MULTIPLE_HEAPS
1135         init_heap_sync_stats();
1136 #endif  //MULTIPLE_HEAPS
1137     }
1138 
1139     PER_HEAP_ISOLATED
print_sync_stats(unsigned int gc_count_during_log)1140     void print_sync_stats(unsigned int gc_count_during_log)
1141     {
1142         // bad/good gl acquire is accumulative during the log interval (because the numbers are too small)
1143         // min/max msl_acquire is the min/max during the log interval, not each GC.
1144         // Threads is however many allocation threads for the last GC.
1145         // num of msl acquired, avg_msl, high and low are all for each GC.
1146         printf("%2s%2s%10s%10s%12s%6s%4s%8s(  st,  wl, stw, dpw)\n",
1147             "H", "T", "good_sus", "bad_sus", "avg_msl", "high", "low", "num_msl");
1148 
1149 #ifdef MULTIPLE_HEAPS
1150         for (int i = 0; i < gc_heap::n_heaps; i++)
1151         {
1152             gc_heap::g_heaps[i]->print_heap_sync_stats(i, gc_count_during_log);
1153         }
1154 #else  //MULTIPLE_HEAPS
1155         print_heap_sync_stats(0, gc_count_during_log);
1156 #endif  //MULTIPLE_HEAPS
1157     }
1158 
1159 #endif //SYNCHRONIZATION_STATS
1160 
1161     PER_HEAP
1162     void verify_soh_segment_list();
1163     PER_HEAP
1164     void verify_mark_array_cleared (heap_segment* seg);
1165     PER_HEAP
1166     void verify_mark_array_cleared();
1167     PER_HEAP
1168     void verify_seg_end_mark_array_cleared();
1169     PER_HEAP
1170     void verify_partial();
1171 
1172 #ifdef VERIFY_HEAP
1173     PER_HEAP
1174     void verify_free_lists();
1175     PER_HEAP
1176     void verify_heap (BOOL begin_gc_p);
1177 #endif //VERIFY_HEAP
1178 
1179     PER_HEAP_ISOLATED
1180     void fire_per_heap_hist_event (gc_history_per_heap* current_gc_data_per_heap, int heap_num);
1181 
1182     PER_HEAP_ISOLATED
1183     void fire_pevents();
1184 
1185 #ifdef FEATURE_BASICFREEZE
1186     static void walk_read_only_segment(heap_segment *seg, void *pvContext, object_callback_func pfnMethodTable, object_callback_func pfnObjRef);
1187 #endif
1188 
1189     static
1190     heap_segment* make_heap_segment (uint8_t* new_pages,
1191                                      size_t size,
1192                                      int h_number);
1193     static
1194     l_heap* make_large_heap (uint8_t* new_pages, size_t size, BOOL managed);
1195 
1196     static
1197     gc_heap* make_gc_heap(
1198 #if defined (MULTIPLE_HEAPS)
1199         GCHeap* vm_heap,
1200         int heap_number
1201 #endif //MULTIPLE_HEAPS
1202         );
1203 
1204     static
1205     void destroy_gc_heap(gc_heap* heap);
1206 
1207     static
1208     HRESULT initialize_gc  (size_t segment_size,
1209                             size_t heap_size
1210 #ifdef MULTIPLE_HEAPS
1211                             , unsigned number_of_heaps
1212 #endif //MULTIPLE_HEAPS
1213         );
1214 
1215     static
1216     void shutdown_gc();
1217 
1218     PER_HEAP
1219     CObjectHeader* allocate (size_t jsize,
1220                              alloc_context* acontext);
1221 
1222 #ifdef MULTIPLE_HEAPS
1223     static void balance_heaps (alloc_context* acontext);
1224     static
1225     gc_heap* balance_heaps_loh (alloc_context* acontext, size_t size);
1226     static
1227     void gc_thread_stub (void* arg);
1228 #endif //MULTIPLE_HEAPS
1229 
1230     CObjectHeader* try_fast_alloc (size_t jsize);
1231 
1232     // For LOH allocations we only update the alloc_bytes_loh in allocation
1233     // context - we don't actually use the ptr/limit from it so I am
1234     // making this explicit by not passing in the alloc_context.
1235     PER_HEAP
1236     CObjectHeader* allocate_large_object (size_t size, int64_t& alloc_bytes);
1237 
1238 #ifdef FEATURE_STRUCTALIGN
1239     PER_HEAP
1240     uint8_t* pad_for_alignment_large (uint8_t* newAlloc, int requiredAlignment, size_t size);
1241 #endif // FEATURE_STRUCTALIGN
1242 
1243     PER_HEAP_ISOLATED
1244     void do_pre_gc();
1245 
1246     PER_HEAP_ISOLATED
1247     void do_post_gc();
1248 
1249     PER_HEAP
1250     BOOL expand_soh_with_minimal_gc();
1251 
1252     // EE is always suspended when this method is called.
1253     // returning FALSE means we actually didn't do a GC. This happens
1254     // when we figured that we needed to do a BGC.
1255     PER_HEAP
1256     int garbage_collect (int n);
1257 
1258     PER_HEAP
1259     void init_records();
1260 
1261     static
1262     uint32_t* make_card_table (uint8_t* start, uint8_t* end);
1263 
1264     static
1265     void set_fgm_result (failure_get_memory f, size_t s, BOOL loh_p);
1266 
1267     static
1268     int grow_brick_card_tables (uint8_t* start,
1269                                 uint8_t* end,
1270                                 size_t size,
1271                                 heap_segment* new_seg,
1272                                 gc_heap* hp,
1273                                 BOOL loh_p);
1274 
1275     PER_HEAP
1276     BOOL is_mark_set (uint8_t* o);
1277 
1278 #ifdef FEATURE_BASICFREEZE
1279     PER_HEAP_ISOLATED
1280     bool frozen_object_p(Object* obj);
1281 #endif // FEATURE_BASICFREEZE
1282 
1283 protected:
1284 
1285     PER_HEAP_ISOLATED
1286     void walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
1287 
1288     PER_HEAP
1289     void walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
1290 
1291     struct walk_relocate_args
1292     {
1293         uint8_t* last_plug;
1294         BOOL is_shortened;
1295         mark* pinned_plug_entry;
1296         size_t profiling_context;
1297         record_surv_fn fn;
1298     };
1299 
1300     PER_HEAP
1301     void walk_survivors (record_surv_fn fn, size_t context, walk_surv_type type);
1302 
1303     PER_HEAP
1304     void walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p,
1305                     walk_relocate_args* args);
1306 
1307     PER_HEAP
1308     void walk_relocation (size_t profiling_context, record_surv_fn fn);
1309 
1310     PER_HEAP
1311     void walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args);
1312 
1313     PER_HEAP
1314     void walk_finalize_queue (fq_walk_fn fn);
1315 
1316 #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
1317     PER_HEAP
1318     void walk_survivors_for_bgc (size_t profiling_context, record_surv_fn fn);
1319 #endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
1320 
1321     // used in blocking GCs after plan phase so this walks the plugs.
1322     PER_HEAP
1323     void walk_survivors_relocation (size_t profiling_context, record_surv_fn fn);
1324     PER_HEAP
1325     void walk_survivors_for_loh (size_t profiling_context, record_surv_fn fn);
1326 
1327     PER_HEAP
1328     int generation_to_condemn (int n,
1329                                BOOL* blocking_collection_p,
1330                                BOOL* elevation_requested_p,
1331                                BOOL check_only_p);
1332 
1333     PER_HEAP_ISOLATED
1334     int joined_generation_to_condemn (BOOL should_evaluate_elevation, int n_initial, BOOL* blocking_collection
1335                                         STRESS_HEAP_ARG(int n_original));
1336 
1337     PER_HEAP
1338     size_t min_reclaim_fragmentation_threshold (uint32_t num_heaps);
1339 
1340     PER_HEAP_ISOLATED
1341     uint64_t min_high_fragmentation_threshold (uint64_t available_mem, uint32_t num_heaps);
1342 
1343     PER_HEAP
1344     void concurrent_print_time_delta (const char* msg);
1345     PER_HEAP
1346     void free_list_info (int gen_num, const char* msg);
1347 
1348     // in svr GC on entry and exit of this method, the GC threads are not
1349     // synchronized
1350     PER_HEAP
1351     void gc1();
1352 
1353     PER_HEAP_ISOLATED
1354     void save_data_for_no_gc();
1355 
1356     PER_HEAP_ISOLATED
1357     void restore_data_for_no_gc();
1358 
1359     PER_HEAP_ISOLATED
1360     void update_collection_counts_for_no_gc();
1361 
1362     PER_HEAP_ISOLATED
1363     BOOL should_proceed_with_gc();
1364 
1365     PER_HEAP_ISOLATED
1366     void record_gcs_during_no_gc();
1367 
1368     PER_HEAP
1369     BOOL find_loh_free_for_no_gc();
1370 
1371     PER_HEAP
1372     BOOL find_loh_space_for_no_gc();
1373 
1374     PER_HEAP
1375     BOOL commit_loh_for_no_gc (heap_segment* seg);
1376 
1377     PER_HEAP_ISOLATED
1378     start_no_gc_region_status prepare_for_no_gc_region (uint64_t total_size,
1379                                                         BOOL loh_size_known,
1380                                                         uint64_t loh_size,
1381                                                         BOOL disallow_full_blocking);
1382 
1383     PER_HEAP
1384     BOOL loh_allocated_for_no_gc();
1385 
1386     PER_HEAP_ISOLATED
1387     void release_no_gc_loh_segments();
1388 
1389     PER_HEAP_ISOLATED
1390     void thread_no_gc_loh_segments();
1391 
1392     PER_HEAP
1393     void check_and_set_no_gc_oom();
1394 
1395     PER_HEAP
1396     void allocate_for_no_gc_after_gc();
1397 
1398     PER_HEAP
1399     void set_loh_allocations_for_no_gc();
1400 
1401     PER_HEAP
1402     void set_soh_allocations_for_no_gc();
1403 
1404     PER_HEAP
1405     void prepare_for_no_gc_after_gc();
1406 
1407     PER_HEAP_ISOLATED
1408     void set_allocations_for_no_gc();
1409 
1410     PER_HEAP_ISOLATED
1411     BOOL should_proceed_for_no_gc();
1412 
1413     PER_HEAP_ISOLATED
1414     start_no_gc_region_status get_start_no_gc_region_status();
1415 
1416     PER_HEAP_ISOLATED
1417     end_no_gc_region_status end_no_gc_region();
1418 
1419     PER_HEAP_ISOLATED
1420     void handle_failure_for_no_gc();
1421 
1422     PER_HEAP
1423     void fire_etw_allocation_event (size_t allocation_amount, int gen_number, uint8_t* object_address);
1424 
1425     PER_HEAP
1426     void fire_etw_pin_object_event (uint8_t* object, uint8_t** ppObject);
1427 
1428     PER_HEAP
1429     size_t limit_from_size (size_t size, size_t room, int gen_number,
1430                             int align_const);
1431     PER_HEAP
1432     int try_allocate_more_space (alloc_context* acontext, size_t jsize,
1433                                  int alloc_generation_number);
1434     PER_HEAP
1435     BOOL allocate_more_space (alloc_context* acontext, size_t jsize,
1436                               int alloc_generation_number);
1437 
1438     PER_HEAP
1439     size_t get_full_compact_gc_count();
1440 
1441     PER_HEAP
1442     BOOL short_on_end_of_seg (int gen_number,
1443                               heap_segment* seg,
1444                               int align_const);
1445 
1446     PER_HEAP
1447     BOOL a_fit_free_list_p (int gen_number,
1448                             size_t size,
1449                             alloc_context* acontext,
1450                             int align_const);
1451 
1452 #ifdef BACKGROUND_GC
1453     PER_HEAP
1454     void wait_for_background (alloc_wait_reason awr);
1455 
1456     PER_HEAP
1457     void wait_for_bgc_high_memory (alloc_wait_reason awr);
1458 
1459     PER_HEAP
1460     void bgc_loh_alloc_clr (uint8_t* alloc_start,
1461                             size_t size,
1462                             alloc_context* acontext,
1463                             int align_const,
1464                             int lock_index,
1465                             BOOL check_used_p,
1466                             heap_segment* seg);
1467 #endif //BACKGROUND_GC
1468 
1469 #ifdef BACKGROUND_GC
1470     PER_HEAP
1471     void wait_for_background_planning (alloc_wait_reason awr);
1472 
1473     PER_HEAP
1474     BOOL bgc_loh_should_allocate();
1475 #endif //BACKGROUND_GC
1476 
1477 #define max_saved_spinlock_info 48
1478 
1479 #ifdef SPINLOCK_HISTORY
1480     PER_HEAP
1481     int spinlock_info_index;
1482 
1483     PER_HEAP
1484     spinlock_info last_spinlock_info[max_saved_spinlock_info + 8];
1485 #endif //SPINLOCK_HISTORY
1486 
1487     PER_HEAP
1488     void add_saved_spinlock_info (
1489             msl_enter_state enter_state,
1490             msl_take_state take_state);
1491 
1492     PER_HEAP
1493     BOOL a_fit_free_list_large_p (size_t size,
1494                                   alloc_context* acontext,
1495                                   int align_const);
1496 
1497     PER_HEAP
1498     BOOL a_fit_segment_end_p (int gen_number,
1499                               heap_segment* seg,
1500                               size_t size,
1501                               alloc_context* acontext,
1502                               int align_const,
1503                               BOOL* commit_failed_p);
1504     PER_HEAP
1505     BOOL loh_a_fit_segment_end_p (int gen_number,
1506                                   size_t size,
1507                                   alloc_context* acontext,
1508                                   int align_const,
1509                                   BOOL* commit_failed_p,
1510                                   oom_reason* oom_r);
1511     PER_HEAP
1512     BOOL loh_get_new_seg (generation* gen,
1513                           size_t size,
1514                           int align_const,
1515                           BOOL* commit_failed_p,
1516                           oom_reason* oom_r);
1517 
1518     PER_HEAP_ISOLATED
1519     size_t get_large_seg_size (size_t size);
1520 
1521     PER_HEAP
1522     BOOL retry_full_compact_gc (size_t size);
1523 
1524     PER_HEAP
1525     BOOL check_and_wait_for_bgc (alloc_wait_reason awr,
1526                                  BOOL* did_full_compact_gc);
1527 
1528     PER_HEAP
1529     BOOL trigger_full_compact_gc (gc_reason gr,
1530                                   oom_reason* oom_r);
1531 
1532     PER_HEAP
1533     BOOL trigger_ephemeral_gc (gc_reason gr);
1534 
1535     PER_HEAP
1536     BOOL soh_try_fit (int gen_number,
1537                       size_t size,
1538                       alloc_context* acontext,
1539                       int align_const,
1540                       BOOL* commit_failed_p,
1541                       BOOL* short_seg_end_p);
1542     PER_HEAP
1543     BOOL loh_try_fit (int gen_number,
1544                       size_t size,
1545                       alloc_context* acontext,
1546                       int align_const,
1547                       BOOL* commit_failed_p,
1548                       oom_reason* oom_r);
1549 
1550     PER_HEAP
1551     BOOL allocate_small (int gen_number,
1552                          size_t size,
1553                          alloc_context* acontext,
1554                          int align_const);
1555 
1556     enum c_gc_state
1557     {
1558         c_gc_state_marking,
1559         c_gc_state_planning,
1560         c_gc_state_free
1561     };
1562 
1563 #ifdef RECORD_LOH_STATE
1564     #define max_saved_loh_states 12
1565     PER_HEAP
1566     int loh_state_index;
1567 
1568     struct loh_state_info
1569     {
1570         allocation_state alloc_state;
1571         EEThreadId thread_id;
1572     };
1573 
1574     PER_HEAP
1575     loh_state_info last_loh_states[max_saved_loh_states];
1576     PER_HEAP
1577     void add_saved_loh_state (allocation_state loh_state_to_save, EEThreadId thread_id);
1578 #endif //RECORD_LOH_STATE
1579     PER_HEAP
1580     BOOL allocate_large (int gen_number,
1581                          size_t size,
1582                          alloc_context* acontext,
1583                          int align_const);
1584 
1585     PER_HEAP_ISOLATED
1586     int init_semi_shared();
1587     PER_HEAP
1588     int init_gc_heap (int heap_number);
1589     PER_HEAP
1590     void self_destroy();
1591     PER_HEAP_ISOLATED
1592     void destroy_semi_shared();
1593     PER_HEAP
1594     void repair_allocation_contexts (BOOL repair_p);
1595     PER_HEAP
1596     void fix_allocation_contexts (BOOL for_gc_p);
1597     PER_HEAP
1598     void fix_youngest_allocation_area (BOOL for_gc_p);
1599     PER_HEAP
1600     void fix_allocation_context (alloc_context* acontext, BOOL for_gc_p,
1601                                  int align_const);
1602     PER_HEAP
1603     void fix_large_allocation_area (BOOL for_gc_p);
1604     PER_HEAP
1605     void fix_older_allocation_area (generation* older_gen);
1606     PER_HEAP
1607     void set_allocation_heap_segment (generation* gen);
1608     PER_HEAP
1609     void reset_allocation_pointers (generation* gen, uint8_t* start);
1610     PER_HEAP
1611     int object_gennum (uint8_t* o);
1612     PER_HEAP
1613     int object_gennum_plan (uint8_t* o);
1614     PER_HEAP_ISOLATED
1615     void init_heap_segment (heap_segment* seg);
1616     PER_HEAP
1617     void delete_heap_segment (heap_segment* seg, BOOL consider_hoarding=FALSE);
1618 #ifdef FEATURE_BASICFREEZE
1619     PER_HEAP
1620     BOOL insert_ro_segment (heap_segment* seg);
1621     PER_HEAP
1622     void remove_ro_segment (heap_segment* seg);
1623 #endif //FEATURE_BASICFREEZE
1624     PER_HEAP
1625     BOOL set_ro_segment_in_range (heap_segment* seg);
1626     PER_HEAP
1627     BOOL unprotect_segment (heap_segment* seg);
1628     PER_HEAP
1629     heap_segment* soh_get_segment_to_expand();
1630     PER_HEAP
1631     heap_segment* get_segment (size_t size, BOOL loh_p);
1632     PER_HEAP_ISOLATED
1633     void seg_mapping_table_add_segment (heap_segment* seg, gc_heap* hp);
1634     PER_HEAP_ISOLATED
1635     void seg_mapping_table_remove_segment (heap_segment* seg);
1636     PER_HEAP
1637     heap_segment* get_large_segment (size_t size, BOOL* did_full_compact_gc);
1638     PER_HEAP
1639     void thread_loh_segment (heap_segment* new_seg);
1640     PER_HEAP_ISOLATED
1641     heap_segment* get_segment_for_loh (size_t size
1642 #ifdef MULTIPLE_HEAPS
1643                                       , gc_heap* hp
1644 #endif //MULTIPLE_HEAPS
1645                                       );
1646     PER_HEAP
1647     void reset_heap_segment_pages (heap_segment* seg);
1648     PER_HEAP
1649     void decommit_heap_segment_pages (heap_segment* seg, size_t extra_space);
1650     PER_HEAP
1651     void decommit_heap_segment (heap_segment* seg);
1652     PER_HEAP
1653     void clear_gen0_bricks();
1654 #ifdef BACKGROUND_GC
1655     PER_HEAP
1656     void rearrange_small_heap_segments();
1657 #endif //BACKGROUND_GC
1658     PER_HEAP
1659     void rearrange_large_heap_segments();
1660     PER_HEAP
1661     void rearrange_heap_segments(BOOL compacting);
1662 
1663     PER_HEAP_ISOLATED
1664     void reset_write_watch_for_gc_heap(void* base_address, size_t region_size);
1665     PER_HEAP_ISOLATED
1666     void get_write_watch_for_gc_heap(bool reset, void *base_address, size_t region_size, void** dirty_pages, uintptr_t* dirty_page_count_ref, bool is_runtime_suspended);
1667 
1668     PER_HEAP
1669     void switch_one_quantum();
1670     PER_HEAP
1671     void reset_ww_by_chunk (uint8_t* start_address, size_t total_reset_size);
1672     PER_HEAP
1673     void switch_on_reset (BOOL concurrent_p, size_t* current_total_reset_size, size_t last_reset_size);
1674     PER_HEAP
1675     void reset_write_watch (BOOL concurrent_p);
1676     PER_HEAP
1677     void adjust_ephemeral_limits ();
1678     PER_HEAP
1679     void make_generation (generation& gen, heap_segment* seg,
1680                           uint8_t* start, uint8_t* pointer);
1681 
1682 
1683 #define USE_PADDING_FRONT 1
1684 #define USE_PADDING_TAIL  2
1685 
1686     PER_HEAP
1687     BOOL size_fit_p (size_t size REQD_ALIGN_AND_OFFSET_DCL, uint8_t* alloc_pointer, uint8_t* alloc_limit,
1688                      uint8_t* old_loc=0, int use_padding=USE_PADDING_TAIL);
1689     PER_HEAP
1690     BOOL a_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit,
1691                        int align_const);
1692 
1693     PER_HEAP
1694     void handle_oom (int heap_num, oom_reason reason, size_t alloc_size,
1695                      uint8_t* allocated, uint8_t* reserved);
1696 
1697     PER_HEAP
1698     size_t card_of ( uint8_t* object);
1699     PER_HEAP
1700     uint8_t* brick_address (size_t brick);
1701     PER_HEAP
1702     size_t brick_of (uint8_t* add);
1703     PER_HEAP
1704     uint8_t* card_address (size_t card);
1705     PER_HEAP
1706     size_t card_to_brick (size_t card);
1707     PER_HEAP
1708     void clear_card (size_t card);
1709     PER_HEAP
1710     void set_card (size_t card);
1711     PER_HEAP
1712     BOOL  card_set_p (size_t card);
1713     PER_HEAP
1714     void card_table_set_bit (uint8_t* location);
1715 
1716 #ifdef CARD_BUNDLE
1717     PER_HEAP
1718     void update_card_table_bundle();
1719     PER_HEAP
1720     void reset_card_table_write_watch();
1721     PER_HEAP
1722     void card_bundle_clear(size_t cardb);
1723     PER_HEAP
1724     void card_bundles_set (size_t start_cardb, size_t end_cardb);
1725     PER_HEAP
1726     BOOL card_bundle_set_p (size_t cardb);
1727     PER_HEAP
1728     BOOL find_card_dword (size_t& cardw, size_t cardw_end);
1729     PER_HEAP
1730     void enable_card_bundles();
1731     PER_HEAP_ISOLATED
1732     BOOL card_bundles_enabled();
1733 
1734 #endif //CARD_BUNDLE
1735 
1736     PER_HEAP
1737     BOOL find_card (uint32_t* card_table, size_t& card,
1738                     size_t card_word_end, size_t& end_card);
1739     PER_HEAP
1740     BOOL grow_heap_segment (heap_segment* seg, uint8_t* high_address);
1741     PER_HEAP
1742     int grow_heap_segment (heap_segment* seg, uint8_t* high_address, uint8_t* old_loc, size_t size, BOOL pad_front_p REQD_ALIGN_AND_OFFSET_DCL);
1743     PER_HEAP
1744     void copy_brick_card_range (uint8_t* la, uint32_t* old_card_table,
1745                                 short* old_brick_table,
1746                                 heap_segment* seg,
1747                                 uint8_t* start, uint8_t* end);
1748     PER_HEAP
1749     void init_brick_card_range (heap_segment* seg);
1750     PER_HEAP
1751     void copy_brick_card_table_l_heap ();
1752     PER_HEAP
1753     void copy_brick_card_table();
1754     PER_HEAP
1755     void clear_brick_table (uint8_t* from, uint8_t* end);
1756     PER_HEAP
1757     void set_brick (size_t index, ptrdiff_t val);
1758     PER_HEAP
1759     int brick_entry (size_t index);
1760 #ifdef MARK_ARRAY
1761     PER_HEAP
1762     unsigned int mark_array_marked (uint8_t* add);
1763     PER_HEAP
1764     void mark_array_set_marked (uint8_t* add);
1765     PER_HEAP
1766     BOOL is_mark_bit_set (uint8_t* add);
1767     PER_HEAP
1768     void gmark_array_set_marked (uint8_t* add);
1769     PER_HEAP
1770     void set_mark_array_bit (size_t mark_bit);
1771     PER_HEAP
1772     BOOL mark_array_bit_set (size_t mark_bit);
1773     PER_HEAP
1774     void mark_array_clear_marked (uint8_t* add);
1775     PER_HEAP
1776     void clear_mark_array (uint8_t* from, uint8_t* end, BOOL check_only=TRUE
1777 #ifdef FEATURE_BASICFREEZE
1778         , BOOL read_only=FALSE
1779 #endif // FEATURE_BASICFREEZE
1780         );
1781 #ifdef BACKGROUND_GC
1782     PER_HEAP
1783     void seg_clear_mark_array_bits_soh (heap_segment* seg);
1784     PER_HEAP
1785     void clear_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1786     PER_HEAP
1787     void bgc_clear_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1788     PER_HEAP
1789     void clear_mark_array_by_objects (uint8_t* from, uint8_t* end, BOOL loh_p);
1790 #ifdef VERIFY_HEAP
1791     PER_HEAP
1792     void set_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1793     PER_HEAP
1794     void check_batch_mark_array_bits (uint8_t* start, uint8_t* end);
1795 #endif //VERIFY_HEAP
1796 #endif //BACKGROUND_GC
1797 #endif //MARK_ARRAY
1798 
1799     PER_HEAP
1800     BOOL large_object_marked (uint8_t* o, BOOL clearp);
1801 
1802 #ifdef BACKGROUND_GC
1803     PER_HEAP
1804     BOOL background_allowed_p();
1805 #endif //BACKGROUND_GC
1806 
1807     PER_HEAP_ISOLATED
1808     void send_full_gc_notification (int gen_num, BOOL due_to_alloc_p);
1809 
1810     PER_HEAP
1811     void check_for_full_gc (int gen_num, size_t size);
1812 
1813     PER_HEAP
1814     void adjust_limit (uint8_t* start, size_t limit_size, generation* gen,
1815                        int gen_number);
1816     PER_HEAP
1817     void adjust_limit_clr (uint8_t* start, size_t limit_size,
1818                            alloc_context* acontext, heap_segment* seg,
1819                            int align_const, int gen_number);
1820     PER_HEAP
1821     void  leave_allocation_segment (generation* gen);
1822 
1823     PER_HEAP
1824     void init_free_and_plug();
1825 
1826     PER_HEAP
1827     void print_free_and_plug (const char* msg);
1828 
1829     PER_HEAP
1830     void add_gen_plug (int gen_number, size_t plug_size);
1831 
1832     PER_HEAP
1833     void add_gen_free (int gen_number, size_t free_size);
1834 
1835     PER_HEAP
1836     void add_item_to_current_pinned_free (int gen_number, size_t free_size);
1837 
1838     PER_HEAP
1839     void remove_gen_free (int gen_number, size_t free_size);
1840 
1841     PER_HEAP
1842     uint8_t* allocate_in_older_generation (generation* gen, size_t size,
1843                                         int from_gen_number,
1844                                         uint8_t* old_loc=0
1845                                         REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
1846     PER_HEAP
1847     generation*  ensure_ephemeral_heap_segment (generation* consing_gen);
1848     PER_HEAP
1849     uint8_t* allocate_in_condemned_generations (generation* gen,
1850                                              size_t size,
1851                                              int from_gen_number,
1852 #ifdef SHORT_PLUGS
1853                                              BOOL* convert_to_pinned_p=NULL,
1854                                              uint8_t* next_pinned_plug=0,
1855                                              heap_segment* current_seg=0,
1856 #endif //SHORT_PLUGS
1857                                              uint8_t* old_loc=0
1858                                              REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
1859 #ifdef INTERIOR_POINTERS
1860     // Verifies that interior is actually in the range of seg; otherwise
1861     // returns 0.
1862     PER_HEAP_ISOLATED
1863     heap_segment* find_segment (uint8_t* interior, BOOL small_segment_only_p);
1864 
1865     PER_HEAP
1866     heap_segment* find_segment_per_heap (uint8_t* interior, BOOL small_segment_only_p);
1867 
1868     PER_HEAP
1869     uint8_t* find_object_for_relocation (uint8_t* o, uint8_t* low, uint8_t* high);
1870 #endif //INTERIOR_POINTERS
1871 
1872     PER_HEAP_ISOLATED
1873     gc_heap* heap_of (uint8_t* object);
1874 
1875     PER_HEAP_ISOLATED
1876     gc_heap* heap_of_gc (uint8_t* object);
1877 
1878     PER_HEAP_ISOLATED
1879     size_t&  promoted_bytes (int);
1880 
1881     PER_HEAP
1882     uint8_t* find_object (uint8_t* o, uint8_t* low);
1883 
1884     PER_HEAP
1885     dynamic_data* dynamic_data_of (int gen_number);
1886     PER_HEAP
1887     ptrdiff_t  get_desired_allocation (int gen_number);
1888     PER_HEAP
1889     ptrdiff_t  get_new_allocation (int gen_number);
1890     PER_HEAP
1891     ptrdiff_t  get_allocation (int gen_number);
1892     PER_HEAP
1893     bool new_allocation_allowed (int gen_number);
1894 #ifdef BACKGROUND_GC
1895     PER_HEAP_ISOLATED
1896     void allow_new_allocation (int gen_number);
1897     PER_HEAP_ISOLATED
1898     void disallow_new_allocation (int gen_number);
1899 #endif //BACKGROUND_GC
1900     PER_HEAP
1901     void reset_pinned_queue();
1902     PER_HEAP
1903     void reset_pinned_queue_bos();
1904     PER_HEAP
1905     void set_allocator_next_pin (generation* gen);
1906     PER_HEAP
1907     void set_allocator_next_pin (uint8_t* alloc_pointer, uint8_t*& alloc_limit);
1908     PER_HEAP
1909     void enque_pinned_plug (generation* gen, uint8_t* plug, size_t len);
1910     PER_HEAP
1911     void enque_pinned_plug (uint8_t* plug,
1912                             BOOL save_pre_plug_info_p,
1913                             uint8_t* last_object_in_last_plug);
1914     PER_HEAP
1915     void merge_with_last_pinned_plug (uint8_t* last_pinned_plug, size_t plug_size);
1916     PER_HEAP
1917     void set_pinned_info (uint8_t* last_pinned_plug,
1918                           size_t plug_len,
1919                           uint8_t* alloc_pointer,
1920                           uint8_t*& alloc_limit);
1921     PER_HEAP
1922     void set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, generation* gen);
1923     PER_HEAP
1924     void save_post_plug_info (uint8_t* last_pinned_plug, uint8_t* last_object_in_last_plug, uint8_t* post_plug);
1925     PER_HEAP
1926     size_t deque_pinned_plug ();
1927     PER_HEAP
1928     mark* pinned_plug_of (size_t bos);
1929     PER_HEAP
1930     mark* oldest_pin ();
1931     PER_HEAP
1932     mark* before_oldest_pin();
1933     PER_HEAP
1934     BOOL pinned_plug_que_empty_p ();
1935     PER_HEAP
1936     void make_mark_stack (mark* arr);
1937 #ifdef MH_SC_MARK
1938     PER_HEAP
1939     int& mark_stack_busy();
1940     PER_HEAP
1941     VOLATILE(uint8_t*)& ref_mark_stack (gc_heap* hp, int index);
1942 #endif
1943 #ifdef BACKGROUND_GC
1944     PER_HEAP_ISOLATED
1945     size_t&  bpromoted_bytes (int);
1946     PER_HEAP
1947     void make_background_mark_stack (uint8_t** arr);
1948     PER_HEAP
1949     void make_c_mark_list (uint8_t** arr);
1950 #endif //BACKGROUND_GC
1951     PER_HEAP
1952     generation* generation_of (int  n);
1953     PER_HEAP
1954     BOOL gc_mark1 (uint8_t* o);
1955     PER_HEAP
1956     BOOL gc_mark (uint8_t* o, uint8_t* low, uint8_t* high);
1957     PER_HEAP
1958     uint8_t* mark_object(uint8_t* o THREAD_NUMBER_DCL);
1959 #ifdef HEAP_ANALYZE
1960     PER_HEAP
1961     void ha_mark_object_simple (uint8_t** o THREAD_NUMBER_DCL);
1962 #endif //HEAP_ANALYZE
1963     PER_HEAP
1964     void mark_object_simple (uint8_t** o THREAD_NUMBER_DCL);
1965     PER_HEAP
1966     void mark_object_simple1 (uint8_t* o, uint8_t* start THREAD_NUMBER_DCL);
1967 
1968 #ifdef MH_SC_MARK
1969     PER_HEAP
1970     void mark_steal ();
1971 #endif //MH_SC_MARK
1972 
1973 #ifdef BACKGROUND_GC
1974 
1975     PER_HEAP
1976     BOOL background_marked (uint8_t* o);
1977     PER_HEAP
1978     BOOL background_mark1 (uint8_t* o);
1979     PER_HEAP
1980     BOOL background_mark (uint8_t* o, uint8_t* low, uint8_t* high);
1981     PER_HEAP
1982     uint8_t* background_mark_object (uint8_t* o THREAD_NUMBER_DCL);
1983     PER_HEAP
1984     void background_mark_simple (uint8_t* o THREAD_NUMBER_DCL);
1985     PER_HEAP
1986     void background_mark_simple1 (uint8_t* o THREAD_NUMBER_DCL);
1987     PER_HEAP_ISOLATED
1988     void background_promote (Object**, ScanContext* , uint32_t);
1989     PER_HEAP
1990     BOOL background_object_marked (uint8_t* o, BOOL clearp);
1991     PER_HEAP
1992     void init_background_gc();
1993     PER_HEAP
1994     uint8_t* background_next_end (heap_segment*, BOOL);
1995     PER_HEAP
1996     void generation_delete_heap_segment (generation*,
1997                                          heap_segment*, heap_segment*, heap_segment*);
1998     PER_HEAP
1999     void set_mem_verify (uint8_t*, uint8_t*, uint8_t);
2000     PER_HEAP
2001     void process_background_segment_end (heap_segment*, generation*, uint8_t*,
2002                                      heap_segment*, BOOL*);
2003     PER_HEAP
2004     void process_n_background_segments (heap_segment*, heap_segment*, generation* gen);
2005     PER_HEAP
2006     BOOL fgc_should_consider_object (uint8_t* o,
2007                                      heap_segment* seg,
2008                                      BOOL consider_bgc_mark_p,
2009                                      BOOL check_current_sweep_p,
2010                                      BOOL check_saved_sweep_p);
2011     PER_HEAP
2012     void should_check_bgc_mark (heap_segment* seg,
2013                                 BOOL* consider_bgc_mark_p,
2014                                 BOOL* check_current_sweep_p,
2015                                 BOOL* check_saved_sweep_p);
2016     PER_HEAP
2017     void background_ephemeral_sweep();
2018     PER_HEAP
2019     void background_sweep ();
2020     PER_HEAP
2021     void background_mark_through_object (uint8_t* oo THREAD_NUMBER_DCL);
2022     PER_HEAP
2023     uint8_t* background_seg_end (heap_segment* seg, BOOL concurrent_p);
2024     PER_HEAP
2025     uint8_t* background_first_overflow (uint8_t* min_add,
2026                                      heap_segment* seg,
2027                                      BOOL concurrent_p,
2028                                      BOOL small_object_p);
2029     PER_HEAP
2030     void background_process_mark_overflow_internal (int condemned_gen_number,
2031                                                     uint8_t* min_add, uint8_t* max_add,
2032                                                     BOOL concurrent_p);
2033     PER_HEAP
2034     BOOL background_process_mark_overflow (BOOL concurrent_p);
2035 
2036     // for foreground GC to get hold of background structures containing refs
2037     PER_HEAP
2038     void
2039     scan_background_roots (promote_func* fn, int hn, ScanContext *pSC);
2040 
2041     PER_HEAP
2042     BOOL bgc_mark_array_range (heap_segment* seg,
2043                                BOOL whole_seg_p,
2044                                uint8_t** range_beg,
2045                                uint8_t** range_end);
2046     PER_HEAP
2047     void bgc_verify_mark_array_cleared (heap_segment* seg);
2048     PER_HEAP
2049     void verify_mark_bits_cleared (uint8_t* obj, size_t s);
2050     PER_HEAP
2051     void clear_all_mark_array();
2052 #endif //BACKGROUND_GC
2053 
2054     PER_HEAP
2055     uint8_t* next_end (heap_segment* seg, uint8_t* f);
2056     PER_HEAP
2057     void fix_card_table ();
2058     PER_HEAP
2059     void mark_through_object (uint8_t* oo, BOOL mark_class_object_p THREAD_NUMBER_DCL);
2060     PER_HEAP
2061     BOOL process_mark_overflow (int condemned_gen_number);
2062     PER_HEAP
2063     void process_mark_overflow_internal (int condemned_gen_number,
2064                                          uint8_t* min_address, uint8_t* max_address);
2065 
2066 #ifdef SNOOP_STATS
2067     PER_HEAP
2068     void print_snoop_stat();
2069 #endif //SNOOP_STATS
2070 
2071 #ifdef MH_SC_MARK
2072 
2073     PER_HEAP
2074     BOOL check_next_mark_stack (gc_heap* next_heap);
2075 
2076 #endif //MH_SC_MARK
2077 
2078     PER_HEAP
2079     void scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p);
2080 
2081     PER_HEAP
2082     void mark_phase (int condemned_gen_number, BOOL mark_only_p);
2083 
2084     PER_HEAP
2085     void pin_object (uint8_t* o, uint8_t** ppObject, uint8_t* low, uint8_t* high);
2086 
2087 #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
2088     PER_HEAP_ISOLATED
2089     size_t get_total_pinned_objects();
2090 #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
2091 
2092     PER_HEAP
2093     void reset_mark_stack ();
2094     PER_HEAP
2095     uint8_t* insert_node (uint8_t* new_node, size_t sequence_number,
2096                        uint8_t* tree, uint8_t* last_node);
2097     PER_HEAP
2098     size_t update_brick_table (uint8_t* tree, size_t current_brick,
2099                                uint8_t* x, uint8_t* plug_end);
2100 
2101     PER_HEAP
2102     void plan_generation_start (generation* gen, generation* consing_gen, uint8_t* next_plug_to_allocate);
2103 
2104     PER_HEAP
2105     void realloc_plan_generation_start (generation* gen, generation* consing_gen);
2106 
2107     PER_HEAP
2108     void plan_generation_starts (generation*& consing_gen);
2109 
2110     PER_HEAP
2111     void advance_pins_for_demotion (generation* gen);
2112 
2113     PER_HEAP
2114     void process_ephemeral_boundaries(uint8_t* x, int& active_new_gen_number,
2115                                       int& active_old_gen_number,
2116                                       generation*& consing_gen,
2117                                       BOOL& allocate_in_condemned);
2118     PER_HEAP
2119     void seg_clear_mark_bits (heap_segment* seg);
2120     PER_HEAP
2121     void sweep_ro_segments (heap_segment* start_seg);
2122     PER_HEAP
2123     void convert_to_pinned_plug (BOOL& last_npinned_plug_p,
2124                                  BOOL& last_pinned_plug_p,
2125                                  BOOL& pinned_plug_p,
2126                                  size_t ps,
2127                                  size_t& artificial_pinned_size);
2128     PER_HEAP
2129     void store_plug_gap_info (uint8_t* plug_start,
2130                               uint8_t* plug_end,
2131                               BOOL& last_npinned_plug_p,
2132                               BOOL& last_pinned_plug_p,
2133                               uint8_t*& last_pinned_plug,
2134                               BOOL& pinned_plug_p,
2135                               uint8_t* last_object_in_last_plug,
2136                               BOOL& merge_with_last_pin_p,
2137                               // this is only for verification purpose
2138                               size_t last_plug_len);
2139     PER_HEAP
2140     void plan_phase (int condemned_gen_number);
2141 
2142     PER_HEAP
2143     void record_interesting_data_point (interesting_data_point idp);
2144 
2145 #ifdef GC_CONFIG_DRIVEN
2146     PER_HEAP
2147     void record_interesting_info_per_heap();
2148     PER_HEAP_ISOLATED
2149     void record_global_mechanisms();
2150     PER_HEAP_ISOLATED
2151     BOOL should_do_sweeping_gc (BOOL compact_p);
2152 #endif //GC_CONFIG_DRIVEN
2153 
2154 #ifdef FEATURE_LOH_COMPACTION
2155     // plan_loh can allocate memory so it can fail. If it fails, we will
2156     // fall back to sweeping.
2157     PER_HEAP
2158     BOOL plan_loh();
2159 
2160     PER_HEAP
2161     void compact_loh();
2162 
2163     PER_HEAP
2164     void relocate_in_loh_compact();
2165 
2166     PER_HEAP
2167     void walk_relocation_for_loh (size_t profiling_context, record_surv_fn fn);
2168 
2169     PER_HEAP
2170     BOOL loh_enque_pinned_plug (uint8_t* plug, size_t len);
2171 
2172     PER_HEAP
2173     void loh_set_allocator_next_pin();
2174 
2175     PER_HEAP
2176     BOOL loh_pinned_plug_que_empty_p();
2177 
2178     PER_HEAP
2179     size_t loh_deque_pinned_plug();
2180 
2181     PER_HEAP
2182     mark* loh_pinned_plug_of (size_t bos);
2183 
2184     PER_HEAP
2185     mark* loh_oldest_pin();
2186 
2187     PER_HEAP
2188     BOOL loh_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit);
2189 
2190     PER_HEAP
2191     uint8_t* loh_allocate_in_condemned (uint8_t* old_loc, size_t size);
2192 
2193     PER_HEAP_ISOLATED
2194     BOOL loh_object_p (uint8_t* o);
2195 
2196     PER_HEAP_ISOLATED
2197     BOOL should_compact_loh();
2198 
2199     // If the LOH compaction mode is just to compact once,
2200     // we need to see if we should reset it back to not compact.
2201     // We would only reset if every heap's LOH was compacted.
2202     PER_HEAP_ISOLATED
2203     void check_loh_compact_mode  (BOOL all_heaps_compacted_p);
2204 #endif //FEATURE_LOH_COMPACTION
2205 
2206     PER_HEAP
2207     void decommit_ephemeral_segment_pages (int condemned_gen_number);
2208     PER_HEAP
2209     void fix_generation_bounds (int condemned_gen_number,
2210                                 generation* consing_gen);
2211     PER_HEAP
2212     uint8_t* generation_limit (int gen_number);
2213 
2214     struct make_free_args
2215     {
2216         int free_list_gen_number;
2217         uint8_t* current_gen_limit;
2218         generation* free_list_gen;
2219         uint8_t* highest_plug;
2220     };
2221     PER_HEAP
2222     uint8_t* allocate_at_end (size_t size);
2223     PER_HEAP
2224     BOOL ensure_gap_allocation (int condemned_gen_number);
2225     // make_free_lists is only called by blocking GCs.
2226     PER_HEAP
2227     void make_free_lists (int condemned_gen_number);
2228     PER_HEAP
2229     void make_free_list_in_brick (uint8_t* tree, make_free_args* args);
2230     PER_HEAP
2231     void thread_gap (uint8_t* gap_start, size_t size, generation*  gen);
2232     PER_HEAP
2233     void loh_thread_gap_front (uint8_t* gap_start, size_t size, generation*  gen);
2234     PER_HEAP
2235     void make_unused_array (uint8_t* x, size_t size, BOOL clearp=FALSE, BOOL resetp=FALSE);
2236     PER_HEAP
2237     void clear_unused_array (uint8_t* x, size_t size);
2238     PER_HEAP
2239     void relocate_address (uint8_t** old_address THREAD_NUMBER_DCL);
2240     struct relocate_args
2241     {
2242         uint8_t* last_plug;
2243         uint8_t* low;
2244         uint8_t* high;
2245         BOOL is_shortened;
2246         mark* pinned_plug_entry;
2247     };
2248 
2249     PER_HEAP
2250     void reloc_survivor_helper (uint8_t** pval);
2251     PER_HEAP
2252     void check_class_object_demotion (uint8_t* obj);
2253     PER_HEAP
2254     void check_class_object_demotion_internal (uint8_t* obj);
2255 
2256     PER_HEAP
2257     void check_demotion_helper (uint8_t** pval, uint8_t* parent_obj);
2258 
2259     PER_HEAP
2260     void relocate_survivor_helper (uint8_t* plug, uint8_t* plug_end);
2261 
2262     PER_HEAP
2263     void verify_pins_with_post_plug_info (const char* msg);
2264 
2265 #ifdef COLLECTIBLE_CLASS
2266     PER_HEAP
2267     void unconditional_set_card_collectible (uint8_t* obj);
2268 #endif //COLLECTIBLE_CLASS
2269 
2270     PER_HEAP
2271     void relocate_shortened_survivor_helper (uint8_t* plug, uint8_t* plug_end, mark* pinned_plug_entry);
2272 
2273     PER_HEAP
2274     void relocate_obj_helper (uint8_t* x, size_t s);
2275 
2276     PER_HEAP
2277     void reloc_ref_in_shortened_obj (uint8_t** address_to_set_card, uint8_t** address_to_reloc);
2278 
2279     PER_HEAP
2280     void relocate_pre_plug_info (mark* pinned_plug_entry);
2281 
2282     PER_HEAP
2283     void relocate_shortened_obj_helper (uint8_t* x, size_t s, uint8_t* end, mark* pinned_plug_entry, BOOL is_pinned);
2284 
2285     PER_HEAP
2286     void relocate_survivors_in_plug (uint8_t* plug, uint8_t* plug_end,
2287                                      BOOL check_last_object_p,
2288                                      mark* pinned_plug_entry);
2289     PER_HEAP
2290     void relocate_survivors_in_brick (uint8_t* tree, relocate_args* args);
2291 
2292     PER_HEAP
2293     void update_oldest_pinned_plug();
2294 
2295     PER_HEAP
2296     void relocate_survivors (int condemned_gen_number,
2297                              uint8_t* first_condemned_address );
2298     PER_HEAP
2299     void relocate_phase (int condemned_gen_number,
2300                          uint8_t* first_condemned_address);
2301 
2302     struct compact_args
2303     {
2304         BOOL copy_cards_p;
2305         uint8_t* last_plug;
2306         ptrdiff_t last_plug_relocation;
2307         uint8_t* before_last_plug;
2308         size_t current_compacted_brick;
2309         BOOL is_shortened;
2310         mark* pinned_plug_entry;
2311         BOOL check_gennum_p;
2312         int src_gennum;
2313 
printcompact_args2314         void print()
2315         {
2316             dprintf (3, ("last plug: %Ix, last plug reloc: %Ix, before last: %Ix, b: %Ix",
2317                 last_plug, last_plug_relocation, before_last_plug, current_compacted_brick));
2318         }
2319     };
2320 
2321     PER_HEAP
2322     void copy_cards_range (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p);
2323     PER_HEAP
2324     void  gcmemcopy (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p);
2325     PER_HEAP
2326     void compact_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, compact_args* args);
2327     PER_HEAP
2328     void compact_in_brick (uint8_t* tree, compact_args* args);
2329 
2330     PER_HEAP
2331     mark* get_next_pinned_entry (uint8_t* tree,
2332                                  BOOL* has_pre_plug_info_p,
2333                                  BOOL* has_post_plug_info_p,
2334                                  BOOL deque_p=TRUE);
2335 
2336     PER_HEAP
2337     mark* get_oldest_pinned_entry (BOOL* has_pre_plug_info_p, BOOL* has_post_plug_info_p);
2338 
2339     PER_HEAP
2340     void recover_saved_pinned_info();
2341 
2342     PER_HEAP
2343     void compact_phase (int condemned_gen_number, uint8_t*
2344                         first_condemned_address, BOOL clear_cards);
2345     PER_HEAP
2346     void clear_cards (size_t start_card, size_t end_card);
2347     PER_HEAP
2348     void clear_card_for_addresses (uint8_t* start_address, uint8_t* end_address);
2349     PER_HEAP
2350     void copy_cards (size_t dst_card, size_t src_card,
2351                      size_t end_card, BOOL nextp);
2352     PER_HEAP
2353     void copy_cards_for_addresses (uint8_t* dest, uint8_t* src, size_t len);
2354 
2355 #ifdef BACKGROUND_GC
2356     PER_HEAP
2357     void copy_mark_bits (size_t dst_mark_bit, size_t src_mark_bit, size_t end_mark_bit);
2358     PER_HEAP
2359     void copy_mark_bits_for_addresses (uint8_t* dest, uint8_t* src, size_t len);
2360 #endif //BACKGROUND_GC
2361 
2362 
2363     PER_HEAP
2364     BOOL ephemeral_pointer_p (uint8_t* o);
2365     PER_HEAP
2366     void fix_brick_to_highest (uint8_t* o, uint8_t* next_o);
2367     PER_HEAP
2368     uint8_t* find_first_object (uint8_t* start_address, uint8_t* first_object);
2369     PER_HEAP
2370     uint8_t* compute_next_boundary (uint8_t* low, int gen_number, BOOL relocating);
2371     PER_HEAP
2372     void keep_card_live (uint8_t* o, size_t& n_gen,
2373                          size_t& cg_pointers_found);
2374     PER_HEAP
2375     void mark_through_cards_helper (uint8_t** poo, size_t& ngen,
2376                                     size_t& cg_pointers_found,
2377                                     card_fn fn, uint8_t* nhigh,
2378                                     uint8_t* next_boundary);
2379 
2380     PER_HEAP
2381     BOOL card_transition (uint8_t* po, uint8_t* end, size_t card_word_end,
2382                                size_t& cg_pointers_found,
2383                                size_t& n_eph, size_t& n_card_set,
2384                                size_t& card, size_t& end_card,
2385                                BOOL& foundp, uint8_t*& start_address,
2386                                uint8_t*& limit, size_t& n_cards_cleared);
2387     PER_HEAP
2388     void mark_through_cards_for_segments (card_fn fn, BOOL relocating);
2389 
2390     PER_HEAP
2391     void repair_allocation_in_expanded_heap (generation* gen);
2392     PER_HEAP
2393     BOOL can_fit_in_spaces_p (size_t* ordered_blocks, int small_index, size_t* ordered_spaces, int big_index);
2394     PER_HEAP
2395     BOOL can_fit_blocks_p (size_t* ordered_blocks, int block_index, size_t* ordered_spaces, int* space_index);
2396     PER_HEAP
2397     BOOL can_fit_all_blocks_p (size_t* ordered_blocks, size_t* ordered_spaces, int count);
2398 #ifdef SEG_REUSE_STATS
2399     PER_HEAP
2400     size_t dump_buckets (size_t* ordered_indices, int count, size_t* total_size);
2401 #endif //SEG_REUSE_STATS
2402     PER_HEAP
2403     void build_ordered_free_spaces (heap_segment* seg);
2404     PER_HEAP
2405     void count_plug (size_t last_plug_size, uint8_t*& last_plug);
2406     PER_HEAP
2407     void count_plugs_in_brick (uint8_t* tree, uint8_t*& last_plug);
2408     PER_HEAP
2409     void build_ordered_plug_indices ();
2410     PER_HEAP
2411     void init_ordered_free_space_indices ();
2412     PER_HEAP
2413     void trim_free_spaces_indices ();
2414     PER_HEAP
2415     BOOL try_best_fit (BOOL end_of_segment_p);
2416     PER_HEAP
2417     BOOL best_fit (size_t free_space, size_t largest_free_space, size_t additional_space, BOOL* use_additional_space);
2418     PER_HEAP
2419     BOOL process_free_space (heap_segment* seg,
2420                              size_t free_space,
2421                              size_t min_free_size,
2422                              size_t min_cont_size,
2423                              size_t* total_free_space,
2424                              size_t* largest_free_space);
2425     PER_HEAP
2426     size_t compute_eph_gen_starts_size();
2427     PER_HEAP
2428     void compute_new_ephemeral_size();
2429     PER_HEAP
2430     BOOL expand_reused_seg_p();
2431     PER_HEAP
2432     BOOL can_expand_into_p (heap_segment* seg, size_t min_free_size,
2433                             size_t min_cont_size, allocator* al);
2434     PER_HEAP
2435     uint8_t* allocate_in_expanded_heap (generation* gen, size_t size,
2436                                      BOOL& adjacentp, uint8_t* old_loc,
2437 #ifdef SHORT_PLUGS
2438                                      BOOL set_padding_on_saved_p,
2439                                      mark* pinned_plug_entry,
2440 #endif //SHORT_PLUGS
2441                                      BOOL consider_bestfit, int active_new_gen_number
2442                                      REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
2443     PER_HEAP
2444     void realloc_plug (size_t last_plug_size, uint8_t*& last_plug,
2445                        generation* gen, uint8_t* start_address,
2446                        unsigned int& active_new_gen_number,
2447                        uint8_t*& last_pinned_gap, BOOL& leftp,
2448                        BOOL shortened_p
2449 #ifdef SHORT_PLUGS
2450                        , mark* pinned_plug_entry
2451 #endif //SHORT_PLUGS
2452                        );
2453     PER_HEAP
2454     void realloc_in_brick (uint8_t* tree, uint8_t*& last_plug, uint8_t* start_address,
2455                            generation* gen,
2456                            unsigned int& active_new_gen_number,
2457                            uint8_t*& last_pinned_gap, BOOL& leftp);
2458     PER_HEAP
2459     void realloc_plugs (generation* consing_gen, heap_segment* seg,
2460                         uint8_t* start_address, uint8_t* end_address,
2461                         unsigned active_new_gen_number);
2462 
2463     PER_HEAP
2464     void set_expand_in_full_gc (int condemned_gen_number);
2465 
2466     PER_HEAP
2467     void verify_no_pins (uint8_t* start, uint8_t* end);
2468 
2469     PER_HEAP
2470     generation* expand_heap (int condemned_generation,
2471                              generation* consing_gen,
2472                              heap_segment* new_heap_segment);
2473 
2474     PER_HEAP
2475     void save_ephemeral_generation_starts();
2476 
2477     PER_HEAP
2478     bool init_dynamic_data ();
2479     PER_HEAP
2480     float surv_to_growth (float cst, float limit, float max_limit);
2481     PER_HEAP
2482     size_t desired_new_allocation (dynamic_data* dd, size_t out,
2483                                    int gen_number, int pass);
2484 
2485     PER_HEAP
2486     void trim_youngest_desired_low_memory();
2487 
2488     PER_HEAP
2489     void decommit_ephemeral_segment_pages();
2490 
2491 #ifdef BIT64
2492     PER_HEAP_ISOLATED
2493     size_t trim_youngest_desired (uint32_t memory_load,
2494                                   size_t total_new_allocation,
2495                                   size_t total_min_allocation);
2496     PER_HEAP_ISOLATED
2497     size_t joined_youngest_desired (size_t new_allocation);
2498 #endif // BIT64
2499     PER_HEAP_ISOLATED
2500     size_t get_total_heap_size ();
2501     PER_HEAP_ISOLATED
2502     size_t get_total_committed_size();
2503 
2504     PER_HEAP_ISOLATED
2505     void get_memory_info (uint32_t* memory_load,
2506                           uint64_t* available_physical=NULL,
2507                           uint64_t* available_page_file=NULL);
2508     PER_HEAP
2509     size_t generation_size (int gen_number);
2510     PER_HEAP_ISOLATED
2511     size_t get_total_survived_size();
2512     PER_HEAP
2513     size_t get_current_allocated();
2514     PER_HEAP_ISOLATED
2515     size_t get_total_allocated();
2516     PER_HEAP
2517     size_t current_generation_size (int gen_number);
2518     PER_HEAP
2519     size_t generation_plan_size (int gen_number);
2520     PER_HEAP
2521     void  compute_promoted_allocation (int gen_number);
2522     PER_HEAP
2523     size_t  compute_in (int gen_number);
2524     PER_HEAP
2525     void compute_new_dynamic_data (int gen_number);
2526     PER_HEAP
2527     gc_history_per_heap* get_gc_data_per_heap();
2528     PER_HEAP
2529     size_t new_allocation_limit (size_t size, size_t free_size, int gen_number);
2530     PER_HEAP
2531     size_t generation_fragmentation (generation* gen,
2532                                      generation* consing_gen,
2533                                      uint8_t* end);
2534     PER_HEAP
2535     size_t generation_sizes (generation* gen);
2536     PER_HEAP
2537     size_t committed_size();
2538     PER_HEAP
2539     size_t approximate_new_allocation();
2540     PER_HEAP
2541     size_t end_space_after_gc();
2542     PER_HEAP
2543     BOOL decide_on_compacting (int condemned_gen_number,
2544                                size_t fragmentation,
2545                                BOOL& should_expand);
2546     PER_HEAP
2547     BOOL ephemeral_gen_fit_p (gc_tuning_point tp);
2548     PER_HEAP
2549     void reset_large_object (uint8_t* o);
2550     PER_HEAP
2551     void sweep_large_objects ();
2552     PER_HEAP
2553     void relocate_in_large_objects ();
2554     PER_HEAP
2555     void mark_through_cards_for_large_objects (card_fn fn, BOOL relocating);
2556     PER_HEAP
2557     void descr_segment (heap_segment* seg);
2558     PER_HEAP
2559     void descr_card_table ();
2560     PER_HEAP
2561     void descr_generations (BOOL begin_gc_p);
2562 
2563     PER_HEAP_ISOLATED
2564     void descr_generations_to_profiler (gen_walk_fn fn, void *context);
2565 
2566     /*------------ Multiple non isolated heaps ----------------*/
2567 #ifdef MULTIPLE_HEAPS
2568     PER_HEAP_ISOLATED
2569     BOOL   create_thread_support (unsigned number_of_heaps);
2570     PER_HEAP_ISOLATED
2571     void destroy_thread_support ();
2572     PER_HEAP
2573     bool create_gc_thread();
2574     PER_HEAP
2575     void gc_thread_function();
2576 #ifdef MARK_LIST
2577 #ifdef PARALLEL_MARK_LIST_SORT
2578     PER_HEAP
2579     void sort_mark_list();
2580     PER_HEAP
2581     void merge_mark_lists();
2582     PER_HEAP
2583     void append_to_mark_list(uint8_t **start, uint8_t **end);
2584 #else //PARALLEL_MARK_LIST_SORT
2585     PER_HEAP_ISOLATED
2586     void combine_mark_lists();
2587 #endif //PARALLEL_MARK_LIST_SORT
2588 #endif
2589 #endif //MULTIPLE_HEAPS
2590 
2591     /*------------ End of Multiple non isolated heaps ---------*/
2592 
2593 #ifndef SEG_MAPPING_TABLE
2594     PER_HEAP_ISOLATED
2595     heap_segment* segment_of (uint8_t* add,  ptrdiff_t & delta,
2596                               BOOL verify_p = FALSE);
2597 #endif //SEG_MAPPING_TABLE
2598 
2599 #ifdef BACKGROUND_GC
2600 
2601     //this is called by revisit....
2602     PER_HEAP
2603     uint8_t* high_page (heap_segment* seg, BOOL concurrent_p);
2604 
2605     PER_HEAP
2606     void revisit_written_page (uint8_t* page, uint8_t* end, BOOL concurrent_p,
2607                                heap_segment* seg,  uint8_t*& last_page,
2608                                uint8_t*& last_object, BOOL large_objects_p,
2609                                size_t& num_marked_objects);
2610     PER_HEAP
2611     void revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p=FALSE);
2612 
2613     PER_HEAP
2614     void concurrent_scan_dependent_handles (ScanContext *sc);
2615 
2616     PER_HEAP_ISOLATED
2617     void suspend_EE ();
2618 
2619     PER_HEAP_ISOLATED
2620     void bgc_suspend_EE ();
2621 
2622     PER_HEAP_ISOLATED
2623     void restart_EE ();
2624 
2625     PER_HEAP
2626     void background_verify_mark (Object*& object, ScanContext* sc, uint32_t flags);
2627 
2628     PER_HEAP
2629     void background_scan_dependent_handles (ScanContext *sc);
2630 
2631     PER_HEAP
2632     void allow_fgc();
2633 
2634     // Restores BGC settings if necessary.
2635     PER_HEAP_ISOLATED
2636     void recover_bgc_settings();
2637 
2638     PER_HEAP
2639     void save_bgc_data_per_heap();
2640 
2641     PER_HEAP
2642     BOOL should_commit_mark_array();
2643 
2644     PER_HEAP
2645     void clear_commit_flag();
2646 
2647     PER_HEAP_ISOLATED
2648     void clear_commit_flag_global();
2649 
2650     PER_HEAP_ISOLATED
2651     void verify_mark_array_cleared (heap_segment* seg, uint32_t* mark_array_addr);
2652 
2653     PER_HEAP_ISOLATED
2654     void verify_mark_array_cleared (uint8_t* begin, uint8_t* end, uint32_t* mark_array_addr);
2655 
2656     PER_HEAP_ISOLATED
2657     BOOL commit_mark_array_by_range (uint8_t* begin,
2658                                      uint8_t* end,
2659                                      uint32_t* mark_array_addr);
2660 
2661     PER_HEAP_ISOLATED
2662     BOOL commit_mark_array_new_seg (gc_heap* hp,
2663                                     heap_segment* seg,
2664                                     uint32_t* new_card_table = 0,
2665                                     uint8_t* new_lowest_address = 0);
2666 
2667     PER_HEAP_ISOLATED
2668     BOOL commit_mark_array_with_check (heap_segment* seg, uint32_t* mark_array_addr);
2669 
2670     // commit the portion of the mark array that corresponds to
2671     // this segment (from beginning to reserved).
2672     // seg and heap_segment_reserved (seg) are guaranteed to be
2673     // page aligned.
2674     PER_HEAP_ISOLATED
2675     BOOL commit_mark_array_by_seg (heap_segment* seg, uint32_t* mark_array_addr);
2676 
2677     // During BGC init, we commit the mark array for all in range
2678     // segments whose mark array hasn't been committed or fully
2679     // committed. All rw segments are in range, only ro segments
2680     // can be partial in range.
2681     PER_HEAP
2682     BOOL commit_mark_array_bgc_init (uint32_t* mark_array_addr);
2683 
2684     PER_HEAP
2685     BOOL commit_new_mark_array (uint32_t* new_mark_array);
2686 
2687     // We need to commit all segments that intersect with the bgc
2688     // range. If a segment is only partially in range, we still
2689     // should commit the mark array for the whole segment as
2690     // we will set the mark array commit flag for this segment.
2691     PER_HEAP_ISOLATED
2692     BOOL commit_new_mark_array_global (uint32_t* new_mark_array);
2693 
2694     // We can't decommit the first and the last page in the mark array
2695     // if the beginning and ending don't happen to be page aligned.
2696     PER_HEAP
2697     void decommit_mark_array_by_seg (heap_segment* seg);
2698 
2699     PER_HEAP
2700     void background_mark_phase();
2701 
2702     PER_HEAP
2703     void background_drain_mark_list (int thread);
2704 
2705     PER_HEAP
2706     void background_grow_c_mark_list();
2707 
2708     PER_HEAP_ISOLATED
2709     void background_promote_callback(Object** object, ScanContext* sc, uint32_t flags);
2710 
2711     PER_HEAP
2712     void mark_absorb_new_alloc();
2713 
2714     PER_HEAP
2715     void restart_vm();
2716 
2717     PER_HEAP
2718     BOOL prepare_bgc_thread(gc_heap* gh);
2719     PER_HEAP
2720     BOOL create_bgc_thread(gc_heap* gh);
2721     PER_HEAP_ISOLATED
2722     BOOL create_bgc_threads_support (int number_of_heaps);
2723     PER_HEAP
2724     BOOL create_bgc_thread_support();
2725     PER_HEAP_ISOLATED
2726     int check_for_ephemeral_alloc();
2727     PER_HEAP_ISOLATED
2728     void wait_to_proceed();
2729     PER_HEAP_ISOLATED
2730     void fire_alloc_wait_event_begin (alloc_wait_reason awr);
2731     PER_HEAP_ISOLATED
2732     void fire_alloc_wait_event_end (alloc_wait_reason awr);
2733     PER_HEAP
2734     void background_gc_wait_lh (alloc_wait_reason awr = awr_ignored);
2735     PER_HEAP
2736     uint32_t background_gc_wait (alloc_wait_reason awr = awr_ignored, int time_out_ms = INFINITE);
2737     PER_HEAP_ISOLATED
2738     void start_c_gc();
2739     PER_HEAP
2740     void kill_gc_thread();
2741     PER_HEAP
2742     uint32_t bgc_thread_function();
2743     PER_HEAP_ISOLATED
2744     void do_background_gc();
2745     static
2746     uint32_t __stdcall bgc_thread_stub (void* arg);
2747 
2748 #endif //BACKGROUND_GC
2749 
2750 public:
2751 
2752     PER_HEAP_ISOLATED
2753     VOLATILE(bool) internal_gc_done;
2754 
2755 #ifdef BACKGROUND_GC
2756     PER_HEAP_ISOLATED
2757     uint32_t cm_in_progress;
2758 
2759     PER_HEAP
2760     BOOL expanded_in_fgc;
2761 
2762     // normally this is FALSE; we set it to TRUE at the end of the gen1 GC
2763     // we do right before the bgc starts.
2764     PER_HEAP_ISOLATED
2765     BOOL     dont_restart_ee_p;
2766 
2767     PER_HEAP_ISOLATED
2768     CLREvent bgc_start_event;
2769 #endif //BACKGROUND_GC
2770 
2771     PER_HEAP_ISOLATED
2772     uint32_t wait_for_gc_done(int32_t timeOut = INFINITE);
2773 
2774     // Returns TRUE if the thread used to be in cooperative mode
2775     // before calling this function.
2776     PER_HEAP_ISOLATED
2777     BOOL enable_preemptive (Thread* current_thread);
2778     PER_HEAP_ISOLATED
2779     void disable_preemptive (Thread* current_thread, BOOL restore_cooperative);
2780 
2781     /* ------------------- per heap members --------------------------*/
2782 
2783     PER_HEAP
2784 #ifndef MULTIPLE_HEAPS
2785     CLREvent gc_done_event;
2786 #else // MULTIPLE_HEAPS
2787     CLREvent gc_done_event;
2788 #endif // MULTIPLE_HEAPS
2789 
2790     PER_HEAP
2791     VOLATILE(int32_t) gc_done_event_lock;
2792 
2793     PER_HEAP
2794     VOLATILE(bool) gc_done_event_set;
2795 
2796     PER_HEAP
2797     void set_gc_done();
2798 
2799     PER_HEAP
2800     void reset_gc_done();
2801 
2802     PER_HEAP
2803     void enter_gc_done_event_lock();
2804 
2805     PER_HEAP
2806     void exit_gc_done_event_lock();
2807 
2808     PER_HEAP
2809     uint8_t*  ephemeral_low;      //lowest ephemeral address
2810 
2811     PER_HEAP
2812     uint8_t*  ephemeral_high;     //highest ephemeral address
2813 
2814     PER_HEAP
2815     uint32_t* card_table;
2816 
2817     PER_HEAP
2818     short* brick_table;
2819 
2820 #ifdef MARK_ARRAY
2821 #ifdef MULTIPLE_HEAPS
2822     PER_HEAP
2823     uint32_t* mark_array;
2824 #else
2825     SPTR_DECL(uint32_t, mark_array);
2826 #endif //MULTIPLE_HEAPS
2827 #endif //MARK_ARRAY
2828 
2829 #ifdef CARD_BUNDLE
2830     PER_HEAP
2831     uint32_t* card_bundle_table;
2832 #endif //CARD_BUNDLE
2833 
2834 #if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
2835     PER_HEAP_ISOLATED
2836     sorted_table* seg_table;
2837 #endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
2838 
2839     PER_HEAP_ISOLATED
2840     VOLATILE(BOOL) gc_started;
2841 
2842     // The following 2 events are there to support the gen2
2843     // notification feature which is only enabled if concurrent
2844     // GC is disabled.
2845     PER_HEAP_ISOLATED
2846     CLREvent full_gc_approach_event;
2847 
2848     PER_HEAP_ISOLATED
2849     CLREvent full_gc_end_event;
2850 
2851     // Full GC Notification percentages.
2852     PER_HEAP_ISOLATED
2853     uint32_t fgn_maxgen_percent;
2854 
2855     PER_HEAP_ISOLATED
2856     uint32_t fgn_loh_percent;
2857 
2858     PER_HEAP_ISOLATED
2859     VOLATILE(bool) full_gc_approach_event_set;
2860 
2861 #ifdef BACKGROUND_GC
2862     PER_HEAP_ISOLATED
2863     BOOL fgn_last_gc_was_concurrent;
2864 #endif //BACKGROUND_GC
2865 
2866     PER_HEAP
2867     size_t fgn_last_alloc;
2868 
2869     static uint32_t user_thread_wait (CLREvent *event, BOOL no_mode_change, int time_out_ms=INFINITE);
2870 
2871     static wait_full_gc_status full_gc_wait (CLREvent *event, int time_out_ms);
2872 
2873     PER_HEAP
2874     uint8_t* demotion_low;
2875 
2876     PER_HEAP
2877     uint8_t* demotion_high;
2878 
2879     PER_HEAP
2880     BOOL demote_gen1_p;
2881 
2882     PER_HEAP
2883     uint8_t* last_gen1_pin_end;
2884 
2885     PER_HEAP
2886     gen_to_condemn_tuning gen_to_condemn_reasons;
2887 
2888     PER_HEAP
2889     size_t etw_allocation_running_amount[2];
2890 
2891     PER_HEAP
2892     int gc_policy;  //sweep, compact, expand
2893 
2894 #ifdef MULTIPLE_HEAPS
2895     PER_HEAP_ISOLATED
2896     bool gc_thread_no_affinitize_p;
2897 
2898     PER_HEAP_ISOLATED
2899     CLREvent gc_start_event;
2900 
2901     PER_HEAP_ISOLATED
2902     CLREvent ee_suspend_event;
2903 
2904     PER_HEAP
2905     heap_segment* new_heap_segment;
2906 
2907 #define alloc_quantum_balance_units (16)
2908 
2909     PER_HEAP_ISOLATED
2910     size_t min_balance_threshold;
2911 #else //MULTIPLE_HEAPS
2912 
2913     PER_HEAP
2914     size_t allocation_running_time;
2915 
2916     PER_HEAP
2917     size_t allocation_running_amount;
2918 
2919 #endif //MULTIPLE_HEAPS
2920 
2921     PER_HEAP_ISOLATED
2922     gc_mechanisms settings;
2923 
2924     PER_HEAP_ISOLATED
2925     gc_history_global gc_data_global;
2926 
2927     PER_HEAP_ISOLATED
2928     size_t gc_last_ephemeral_decommit_time;
2929 
2930     PER_HEAP_ISOLATED
2931     size_t gc_gen0_desired_high;
2932 
2933     PER_HEAP
2934     size_t gen0_big_free_spaces;
2935 
2936 #ifdef SHORT_PLUGS
2937     PER_HEAP_ISOLATED
2938     double short_plugs_pad_ratio;
2939 #endif //SHORT_PLUGS
2940 
2941 #ifdef BIT64
2942     PER_HEAP_ISOLATED
2943     size_t youngest_gen_desired_th;
2944 #endif //BIT64
2945 
2946     PER_HEAP_ISOLATED
2947     uint32_t high_memory_load_th;
2948 
2949     PER_HEAP_ISOLATED
2950     uint64_t mem_one_percent;
2951 
2952     PER_HEAP_ISOLATED
2953     uint64_t total_physical_mem;
2954 
2955     PER_HEAP_ISOLATED
2956     uint64_t entry_available_physical_mem;
2957 
2958     PER_HEAP_ISOLATED
2959     size_t last_gc_index;
2960 
2961     PER_HEAP_ISOLATED
2962     size_t min_segment_size;
2963 
2964     PER_HEAP
2965     uint8_t* lowest_address;
2966 
2967     PER_HEAP
2968     uint8_t* highest_address;
2969 
2970     PER_HEAP
2971     BOOL ephemeral_promotion;
2972     PER_HEAP
2973     uint8_t* saved_ephemeral_plan_start[NUMBERGENERATIONS-1];
2974     PER_HEAP
2975     size_t saved_ephemeral_plan_start_size[NUMBERGENERATIONS-1];
2976 
2977 protected:
2978 #ifdef MULTIPLE_HEAPS
2979     PER_HEAP
2980     GCHeap* vm_heap;
2981     PER_HEAP
2982     int heap_number;
2983     PER_HEAP
2984     VOLATILE(int) alloc_context_count;
2985 #else //MULTIPLE_HEAPS
2986 #define vm_heap ((GCHeap*) g_theGCHeap)
2987 #define heap_number (0)
2988 #endif //MULTIPLE_HEAPS
2989 
2990 #ifndef MULTIPLE_HEAPS
2991     SPTR_DECL(heap_segment,ephemeral_heap_segment);
2992 #else
2993     PER_HEAP
2994     heap_segment* ephemeral_heap_segment;
2995 #endif // !MULTIPLE_HEAPS
2996 
2997     PER_HEAP
2998     size_t time_bgc_last;
2999 
3000     PER_HEAP
3001     uint8_t*       gc_low; // lowest address being condemned
3002 
3003     PER_HEAP
3004     uint8_t*       gc_high; //highest address being condemned
3005 
3006     PER_HEAP
3007     size_t      mark_stack_tos;
3008 
3009     PER_HEAP
3010     size_t      mark_stack_bos;
3011 
3012     PER_HEAP
3013     size_t      mark_stack_array_length;
3014 
3015     PER_HEAP
3016     mark*       mark_stack_array;
3017 
3018     PER_HEAP
3019     BOOL        verify_pinned_queue_p;
3020 
3021     PER_HEAP
3022     uint8_t*    oldest_pinned_plug;
3023 
3024 #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
3025     PER_HEAP
3026     size_t      num_pinned_objects;
3027 #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
3028 
3029 #ifdef FEATURE_LOH_COMPACTION
3030     PER_HEAP
3031     size_t      loh_pinned_queue_tos;
3032 
3033     PER_HEAP
3034     size_t      loh_pinned_queue_bos;
3035 
3036     PER_HEAP
3037     size_t      loh_pinned_queue_length;
3038 
3039     PER_HEAP_ISOLATED
3040     int         loh_pinned_queue_decay;
3041 
3042     PER_HEAP
3043     mark*       loh_pinned_queue;
3044 
3045     // This is for forced LOH compaction via the complus env var
3046     PER_HEAP_ISOLATED
3047     BOOL        loh_compaction_always_p;
3048 
3049     // This is set by the user.
3050     PER_HEAP_ISOLATED
3051     gc_loh_compaction_mode loh_compaction_mode;
3052 
3053     // We may not compact LOH on every heap if we can't
3054     // grow the pinned queue. This is to indicate whether
3055     // this heap's LOH is compacted or not. So even if
3056     // settings.loh_compaction is TRUE this may not be TRUE.
3057     PER_HEAP
3058     BOOL        loh_compacted_p;
3059 #endif //FEATURE_LOH_COMPACTION
3060 
3061 #ifdef BACKGROUND_GC
3062 
3063     PER_HEAP
3064     EEThreadId bgc_thread_id;
3065 
3066 #ifdef WRITE_WATCH
3067     PER_HEAP
3068     uint8_t* background_written_addresses [array_size+2];
3069 #endif //WRITE_WATCH
3070 
3071 #if defined (DACCESS_COMPILE) && !defined (MULTIPLE_HEAPS)
3072     // doesn't need to be volatile for DAC.
3073     SVAL_DECL(c_gc_state, current_c_gc_state);
3074 #else
3075     PER_HEAP_ISOLATED
3076     VOLATILE(c_gc_state) current_c_gc_state;     //tells the large object allocator to
3077     //mark the object as new since the start of gc.
3078 #endif //DACCESS_COMPILE && !MULTIPLE_HEAPS
3079 
3080     PER_HEAP_ISOLATED
3081     gc_mechanisms saved_bgc_settings;
3082 
3083     PER_HEAP
3084     gc_history_per_heap bgc_data_per_heap;
3085 
3086     PER_HEAP
3087     BOOL bgc_thread_running; // gc thread is its main loop
3088 
3089     PER_HEAP_ISOLATED
3090     BOOL keep_bgc_threads_p;
3091 
3092     // This event is used by BGC threads to do something on
3093     // one specific thread while other BGC threads have to
3094     // wait. This is different from a join 'cause you can't
3095     // specify which thread should be doing some task
3096     // while other threads have to wait.
3097     // For example, to make the BGC threads managed threads
3098     // we need to create them on the thread that called
3099     // SuspendEE which is heap 0.
3100     PER_HEAP_ISOLATED
3101     CLREvent bgc_threads_sync_event;
3102 
3103     PER_HEAP
3104     Thread* bgc_thread;
3105 
3106     PER_HEAP
3107     CLRCriticalSection bgc_threads_timeout_cs;
3108 
3109     PER_HEAP_ISOLATED
3110     CLREvent background_gc_done_event;
3111 
3112     PER_HEAP_ISOLATED
3113     CLREvent ee_proceed_event;
3114 
3115     PER_HEAP
3116     CLREvent gc_lh_block_event;
3117 
3118     PER_HEAP_ISOLATED
3119     bool gc_can_use_concurrent;
3120 
3121     PER_HEAP_ISOLATED
3122     bool temp_disable_concurrent_p;
3123 
3124     PER_HEAP_ISOLATED
3125     BOOL do_ephemeral_gc_p;
3126 
3127     PER_HEAP_ISOLATED
3128     BOOL do_concurrent_p;
3129 
3130     PER_HEAP
3131     VOLATILE(bgc_state) current_bgc_state;
3132 
3133     struct gc_history
3134     {
3135         size_t gc_index;
3136         bgc_state current_bgc_state;
3137         uint32_t gc_time_ms;
3138         // This is in bytes per ms; consider breaking it
3139         // into the efficiency per phase.
3140         size_t gc_efficiency;
3141         uint8_t* eph_low;
3142         uint8_t* gen0_start;
3143         uint8_t* eph_high;
3144         uint8_t* bgc_highest;
3145         uint8_t* bgc_lowest;
3146         uint8_t* fgc_highest;
3147         uint8_t* fgc_lowest;
3148         uint8_t* g_highest;
3149         uint8_t* g_lowest;
3150     };
3151 
3152 #define max_history_count 64
3153 
3154     PER_HEAP
3155     int gchist_index_per_heap;
3156 
3157     PER_HEAP
3158     gc_history gchist_per_heap[max_history_count];
3159 
3160     PER_HEAP_ISOLATED
3161     int gchist_index;
3162 
3163     PER_HEAP_ISOLATED
3164     gc_mechanisms_store gchist[max_history_count];
3165 
3166     PER_HEAP
3167     void add_to_history_per_heap();
3168 
3169     PER_HEAP_ISOLATED
3170     void add_to_history();
3171 
3172     PER_HEAP
3173     size_t total_promoted_bytes;
3174 
3175     PER_HEAP
3176     size_t     bgc_overflow_count;
3177 
3178     PER_HEAP
3179     size_t     bgc_begin_loh_size;
3180     PER_HEAP
3181     size_t     end_loh_size;
3182 
3183     // We need to throttle the LOH allocations during BGC since we can't
3184     // collect LOH when BGC is in progress.
3185     // We allow the LOH heap size to double during a BGC. So for every
3186     // 10% increase we will have the LOH allocating thread sleep for one more
3187     // ms. So we are already 30% over the original heap size the thread will
3188     // sleep for 3ms.
3189     PER_HEAP
3190     uint32_t   bgc_alloc_spin_loh;
3191 
3192     // This includes what we allocate at the end of segment - allocating
3193     // in free list doesn't increase the heap size.
3194     PER_HEAP
3195     size_t     bgc_loh_size_increased;
3196 
3197     PER_HEAP
3198     size_t     bgc_loh_allocated_in_free;
3199 
3200     PER_HEAP
3201     size_t     background_soh_alloc_count;
3202 
3203     PER_HEAP
3204     size_t     background_loh_alloc_count;
3205 
3206     PER_HEAP
3207     uint8_t**  background_mark_stack_tos;
3208 
3209     PER_HEAP
3210     uint8_t**  background_mark_stack_array;
3211 
3212     PER_HEAP
3213     size_t    background_mark_stack_array_length;
3214 
3215     PER_HEAP
3216     uint8_t*  background_min_overflow_address;
3217 
3218     PER_HEAP
3219     uint8_t*  background_max_overflow_address;
3220 
3221     // We can't process the soh range concurrently so we
3222     // wait till final mark to process it.
3223     PER_HEAP
3224     BOOL      processed_soh_overflow_p;
3225 
3226     PER_HEAP
3227     uint8_t*  background_min_soh_overflow_address;
3228 
3229     PER_HEAP
3230     uint8_t*  background_max_soh_overflow_address;
3231 
3232     PER_HEAP
3233     heap_segment* saved_overflow_ephemeral_seg;
3234 
3235 #ifndef MULTIPLE_HEAPS
3236     SPTR_DECL(heap_segment, saved_sweep_ephemeral_seg);
3237 
3238     SPTR_DECL(uint8_t, saved_sweep_ephemeral_start);
3239 
3240     SPTR_DECL(uint8_t, background_saved_lowest_address);
3241 
3242     SPTR_DECL(uint8_t, background_saved_highest_address);
3243 #else
3244 
3245     PER_HEAP
3246     heap_segment* saved_sweep_ephemeral_seg;
3247 
3248     PER_HEAP
3249     uint8_t* saved_sweep_ephemeral_start;
3250 
3251     PER_HEAP
3252     uint8_t* background_saved_lowest_address;
3253 
3254     PER_HEAP
3255     uint8_t* background_saved_highest_address;
3256 #endif //!MULTIPLE_HEAPS
3257 
3258     // This is used for synchronization between the bgc thread
3259     // for this heap and the user threads allocating on this
3260     // heap.
3261     PER_HEAP
3262     exclusive_sync* bgc_alloc_lock;
3263 
3264 #ifdef SNOOP_STATS
3265     PER_HEAP
3266     snoop_stats_data snoop_stat;
3267 #endif //SNOOP_STATS
3268 
3269 
3270     PER_HEAP
3271     uint8_t**          c_mark_list;
3272 
3273     PER_HEAP
3274     size_t          c_mark_list_length;
3275 
3276     PER_HEAP
3277     size_t          c_mark_list_index;
3278 #endif //BACKGROUND_GC
3279 
3280 #ifdef MARK_LIST
3281     PER_HEAP
3282     uint8_t** mark_list;
3283 
3284     PER_HEAP_ISOLATED
3285     size_t mark_list_size;
3286 
3287     PER_HEAP
3288     uint8_t** mark_list_end;
3289 
3290     PER_HEAP
3291     uint8_t** mark_list_index;
3292 
3293     PER_HEAP_ISOLATED
3294     uint8_t** g_mark_list;
3295 #ifdef PARALLEL_MARK_LIST_SORT
3296     PER_HEAP_ISOLATED
3297     uint8_t** g_mark_list_copy;
3298     PER_HEAP
3299     uint8_t*** mark_list_piece_start;
3300     uint8_t*** mark_list_piece_end;
3301 #endif //PARALLEL_MARK_LIST_SORT
3302 #endif //MARK_LIST
3303 
3304     PER_HEAP
3305     uint8_t*  min_overflow_address;
3306 
3307     PER_HEAP
3308     uint8_t*  max_overflow_address;
3309 
3310     PER_HEAP
3311     uint8_t*  shigh; //keeps track of the highest marked object
3312 
3313     PER_HEAP
3314     uint8_t*  slow; //keeps track of the lowest marked object
3315 
3316     PER_HEAP
3317     size_t allocation_quantum;
3318 
3319     PER_HEAP
3320     size_t alloc_contexts_used;
3321 
3322     PER_HEAP_ISOLATED
3323     no_gc_region_info current_no_gc_region_info;
3324 
3325     PER_HEAP
3326     size_t soh_allocation_no_gc;
3327 
3328     PER_HEAP
3329     size_t loh_allocation_no_gc;
3330 
3331     PER_HEAP
3332     bool no_gc_oom_p;
3333 
3334     PER_HEAP
3335     heap_segment* saved_loh_segment_no_gc;
3336 
3337     PER_HEAP_ISOLATED
3338     BOOL proceed_with_gc_p;
3339 
3340 #define youngest_generation (generation_of (0))
3341 #define large_object_generation (generation_of (max_generation+1))
3342 
3343 #ifndef MULTIPLE_HEAPS
3344     SPTR_DECL(uint8_t,alloc_allocated);
3345 #else
3346     PER_HEAP
3347     uint8_t* alloc_allocated; //keeps track of the highest
3348     //address allocated by alloc
3349 #endif // !MULTIPLE_HEAPS
3350 
3351     // The more_space_lock and gc_lock is used for 3 purposes:
3352     //
3353     // 1) to coordinate threads that exceed their quantum (UP & MP) (more_space_lock)
3354     // 2) to synchronize allocations of large objects (more_space_lock)
3355     // 3) to synchronize the GC itself (gc_lock)
3356     //
3357     PER_HEAP_ISOLATED
3358     GCSpinLock gc_lock; //lock while doing GC
3359 
3360     PER_HEAP
3361     GCSpinLock more_space_lock; //lock while allocating more space
3362 
3363 #ifdef SYNCHRONIZATION_STATS
3364 
3365     PER_HEAP
3366     unsigned int good_suspension;
3367 
3368     PER_HEAP
3369     unsigned int bad_suspension;
3370 
3371     // Number of times when msl_acquire is > 200 cycles.
3372     PER_HEAP
3373     unsigned int num_high_msl_acquire;
3374 
3375     // Number of times when msl_acquire is < 200 cycles.
3376     PER_HEAP
3377     unsigned int num_low_msl_acquire;
3378 
3379     // Number of times the more_space_lock is acquired.
3380     PER_HEAP
3381     unsigned int num_msl_acquired;
3382 
3383     // Total cycles it takes to acquire the more_space_lock.
3384     PER_HEAP
3385     uint64_t total_msl_acquire;
3386 
3387     PER_HEAP
init_heap_sync_stats()3388     void init_heap_sync_stats()
3389     {
3390         good_suspension = 0;
3391         bad_suspension = 0;
3392         num_msl_acquired = 0;
3393         total_msl_acquire = 0;
3394         num_high_msl_acquire = 0;
3395         num_low_msl_acquire = 0;
3396         more_space_lock.init();
3397         gc_lock.init();
3398     }
3399 
3400     PER_HEAP
print_heap_sync_stats(unsigned int heap_num,unsigned int gc_count_during_log)3401     void print_heap_sync_stats(unsigned int heap_num, unsigned int gc_count_during_log)
3402     {
3403         printf("%2d%2d%10u%10u%12u%6u%4u%8u(%4u,%4u,%4u,%4u)\n",
3404             heap_num,
3405             alloc_contexts_used,
3406             good_suspension,
3407             bad_suspension,
3408             (unsigned int)(total_msl_acquire / gc_count_during_log),
3409             num_high_msl_acquire / gc_count_during_log,
3410             num_low_msl_acquire / gc_count_during_log,
3411             num_msl_acquired / gc_count_during_log,
3412             more_space_lock.num_switch_thread / gc_count_during_log,
3413             more_space_lock.num_wait_longer / gc_count_during_log,
3414             more_space_lock.num_switch_thread_w / gc_count_during_log,
3415             more_space_lock.num_disable_preemptive_w / gc_count_during_log);
3416     }
3417 
3418 #endif //SYNCHRONIZATION_STATS
3419 
3420 #ifdef MULTIPLE_HEAPS
3421     PER_HEAP
3422     generation generation_table [NUMBERGENERATIONS+1];
3423 #endif
3424 
3425 
3426 #define NUM_LOH_ALIST (7)
3427 #define BASE_LOH_ALIST (64*1024)
3428     PER_HEAP
3429     alloc_list loh_alloc_list[NUM_LOH_ALIST-1];
3430 
3431 #define NUM_GEN2_ALIST (12)
3432 #ifdef BIT64
3433 #define BASE_GEN2_ALIST (1*256)
3434 #else
3435 #define BASE_GEN2_ALIST (1*128)
3436 #endif // BIT64
3437     PER_HEAP
3438     alloc_list gen2_alloc_list[NUM_GEN2_ALIST-1];
3439 
3440 //------------------------------------------
3441 
3442     PER_HEAP
3443     dynamic_data dynamic_data_table [NUMBERGENERATIONS+1];
3444 
3445     PER_HEAP
3446     gc_history_per_heap gc_data_per_heap;
3447 
3448     PER_HEAP
3449     size_t maxgen_pinned_compact_before_advance;
3450 
3451     // dynamic tuning.
3452     PER_HEAP
3453     BOOL dt_low_ephemeral_space_p (gc_tuning_point tp);
3454     // if elevate_p is FALSE, it means we are determining fragmentation for a generation
3455     // to see if we should condemn this gen; otherwise it means we are determining if
3456     // we should elevate to doing max_gen from an ephemeral gen.
3457     PER_HEAP
3458     BOOL dt_high_frag_p (gc_tuning_point tp, int gen_number, BOOL elevate_p=FALSE);
3459     PER_HEAP
3460     BOOL
3461     dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number);
3462     PER_HEAP
3463     BOOL dt_estimate_high_frag_p (gc_tuning_point tp, int gen_number, uint64_t available_mem);
3464     PER_HEAP
3465     BOOL dt_low_card_table_efficiency_p (gc_tuning_point tp);
3466 
3467     PER_HEAP
3468     int generation_skip_ratio;//in %
3469 
3470     PER_HEAP
3471     BOOL gen0_bricks_cleared;
3472 #ifdef FFIND_OBJECT
3473     PER_HEAP
3474     int gen0_must_clear_bricks;
3475 #endif //FFIND_OBJECT
3476 
3477     PER_HEAP_ISOLATED
3478     size_t full_gc_counts[gc_type_max];
3479 
3480     // the # of bytes allocates since the last full compacting GC.
3481     PER_HEAP
3482     uint64_t loh_alloc_since_cg;
3483 
3484     PER_HEAP
3485     BOOL elevation_requested;
3486 
3487     // if this is TRUE, we should always guarantee that we do a
3488     // full compacting GC before we OOM.
3489     PER_HEAP
3490     BOOL last_gc_before_oom;
3491 
3492     PER_HEAP_ISOLATED
3493     BOOL should_expand_in_full_gc;
3494 
3495 #ifdef BACKGROUND_GC
3496     PER_HEAP_ISOLATED
3497     size_t ephemeral_fgc_counts[max_generation];
3498 
3499     PER_HEAP_ISOLATED
3500     BOOL alloc_wait_event_p;
3501 
3502 #ifndef MULTIPLE_HEAPS
3503     SPTR_DECL(uint8_t, next_sweep_obj);
3504 #else
3505     PER_HEAP
3506     uint8_t* next_sweep_obj;
3507 #endif //MULTIPLE_HEAPS
3508 
3509     PER_HEAP
3510     uint8_t* current_sweep_pos;
3511 
3512 #endif //BACKGROUND_GC
3513 
3514 #ifndef MULTIPLE_HEAPS
3515     SVAL_DECL(oom_history, oom_info);
3516 #ifdef FEATURE_PREMORTEM_FINALIZATION
3517     SPTR_DECL(CFinalize,finalize_queue);
3518 #endif //FEATURE_PREMORTEM_FINALIZATION
3519 #else
3520 
3521     PER_HEAP
3522     oom_history oom_info;
3523 
3524 #ifdef FEATURE_PREMORTEM_FINALIZATION
3525     PER_HEAP
3526     PTR_CFinalize finalize_queue;
3527 #endif //FEATURE_PREMORTEM_FINALIZATION
3528 #endif // !MULTIPLE_HEAPS
3529 
3530     PER_HEAP
3531     fgm_history fgm_result;
3532 
3533     PER_HEAP_ISOLATED
3534     size_t eph_gen_starts_size;
3535 
3536 #ifdef GC_CONFIG_DRIVEN
3537     PER_HEAP_ISOLATED
3538     size_t time_init;
3539 
3540     PER_HEAP_ISOLATED
3541     size_t time_since_init;
3542 
3543     // 0 stores compacting GCs;
3544     // 1 stores sweeping GCs;
3545     PER_HEAP_ISOLATED
3546     size_t compact_or_sweep_gcs[2];
3547 
3548     PER_HEAP
3549     size_t interesting_data_per_gc[max_idp_count];
3550 
3551 #ifdef MULTIPLE_HEAPS
3552     PER_HEAP
3553     size_t interesting_data_per_heap[max_idp_count];
3554 
3555     PER_HEAP
3556     size_t compact_reasons_per_heap[max_compact_reasons_count];
3557 
3558     PER_HEAP
3559     size_t expand_mechanisms_per_heap[max_expand_mechanisms_count];
3560 
3561     PER_HEAP
3562     size_t interesting_mechanism_bits_per_heap[max_gc_mechanism_bits_count];
3563 #endif //MULTIPLE_HEAPS
3564 #endif //GC_CONFIG_DRIVEN
3565 
3566     PER_HEAP
3567     BOOL        ro_segments_in_range;
3568 
3569 #ifdef BACKGROUND_GC
3570     PER_HEAP
3571     heap_segment* freeable_small_heap_segment;
3572 #endif //BACKGROUND_GC
3573 
3574     PER_HEAP
3575     heap_segment* freeable_large_heap_segment;
3576 
3577     PER_HEAP_ISOLATED
3578     heap_segment* segment_standby_list;
3579 
3580     PER_HEAP
3581     size_t ordered_free_space_indices[MAX_NUM_BUCKETS];
3582 
3583     PER_HEAP
3584     size_t saved_ordered_free_space_indices[MAX_NUM_BUCKETS];
3585 
3586     PER_HEAP
3587     size_t ordered_plug_indices[MAX_NUM_BUCKETS];
3588 
3589     PER_HEAP
3590     size_t saved_ordered_plug_indices[MAX_NUM_BUCKETS];
3591 
3592     PER_HEAP
3593     BOOL ordered_plug_indices_init;
3594 
3595     PER_HEAP
3596     BOOL use_bestfit;
3597 
3598     PER_HEAP
3599     uint8_t* bestfit_first_pin;
3600 
3601     PER_HEAP
3602     BOOL commit_end_of_seg;
3603 
3604     PER_HEAP
3605     size_t max_free_space_items; // dynamically adjusted.
3606 
3607     PER_HEAP
3608     size_t free_space_buckets;
3609 
3610     PER_HEAP
3611     size_t free_space_items;
3612 
3613     // -1 means we are using all the free
3614     // spaces we have (not including
3615     // end of seg space).
3616     PER_HEAP
3617     int trimmed_free_space_index;
3618 
3619     PER_HEAP
3620     size_t total_ephemeral_plugs;
3621 
3622     PER_HEAP
3623     seg_free_spaces* bestfit_seg;
3624 
3625     // Note: we know this from the plan phase.
3626     // total_ephemeral_plugs actually has the same value
3627     // but while we are calculating its value we also store
3628     // info on how big the plugs are for best fit which we
3629     // don't do in plan phase.
3630     // TODO: get rid of total_ephemeral_plugs.
3631     PER_HEAP
3632     size_t total_ephemeral_size;
3633 
3634 public:
3635 
3636 #ifdef HEAP_ANALYZE
3637 
3638     PER_HEAP_ISOLATED
3639     BOOL heap_analyze_enabled;
3640 
3641     PER_HEAP
3642     size_t internal_root_array_length;
3643 
3644 #ifndef MULTIPLE_HEAPS
3645     SPTR_DECL(PTR_uint8_t, internal_root_array);
3646     SVAL_DECL(size_t, internal_root_array_index);
3647     SVAL_DECL(BOOL,   heap_analyze_success);
3648 #else
3649     PER_HEAP
3650     uint8_t** internal_root_array;
3651 
3652     PER_HEAP
3653     size_t internal_root_array_index;
3654 
3655     PER_HEAP
3656     BOOL   heap_analyze_success;
3657 #endif // !MULTIPLE_HEAPS
3658 
3659     // next two fields are used to optimize the search for the object
3660     // enclosing the current reference handled by ha_mark_object_simple.
3661     PER_HEAP
3662     uint8_t*  current_obj;
3663 
3664     PER_HEAP
3665     size_t current_obj_size;
3666 
3667 #endif //HEAP_ANALYZE
3668 
3669     /* ----------------------- global members ----------------------- */
3670 public:
3671 
3672     PER_HEAP
3673     int         condemned_generation_num;
3674 
3675     PER_HEAP
3676     BOOL        blocking_collection;
3677 
3678 #ifdef MULTIPLE_HEAPS
3679     SVAL_DECL(int, n_heaps);
3680     SPTR_DECL(PTR_gc_heap, g_heaps);
3681 
3682     static
3683     size_t*   g_promoted;
3684 #ifdef BACKGROUND_GC
3685     static
3686     size_t*   g_bpromoted;
3687 #endif //BACKGROUND_GC
3688 #ifdef MH_SC_MARK
3689     PER_HEAP_ISOLATED
3690     int*  g_mark_stack_busy;
3691 #endif //MH_SC_MARK
3692 #else
3693     static
3694     size_t    g_promoted;
3695 #ifdef BACKGROUND_GC
3696     static
3697     size_t    g_bpromoted;
3698 #endif //BACKGROUND_GC
3699 #endif //MULTIPLE_HEAPS
3700 
3701     static
3702     size_t reserved_memory;
3703     static
3704     size_t reserved_memory_limit;
3705     static
3706     BOOL      g_low_memory_status;
3707 
3708 protected:
3709     PER_HEAP
3710     void update_collection_counts ();
3711 
3712 }; // class gc_heap
3713 
3714 
3715 #ifdef FEATURE_PREMORTEM_FINALIZATION
3716 class CFinalize
3717 {
3718 #ifdef DACCESS_COMPILE
3719     friend class ::ClrDataAccess;
3720 #endif // DACCESS_COMPILE
3721 private:
3722 
3723     //adjust the count and add a constant to add a segment
3724     static const int ExtraSegCount = 2;
3725     static const int FinalizerListSeg = NUMBERGENERATIONS+1;
3726     static const int CriticalFinalizerListSeg = NUMBERGENERATIONS;
3727     //Does not correspond to a segment
3728     static const int FreeList = NUMBERGENERATIONS+ExtraSegCount;
3729 
3730     PTR_PTR_Object m_Array;
3731     PTR_PTR_Object m_FillPointers[NUMBERGENERATIONS+ExtraSegCount];
3732     PTR_PTR_Object m_EndArray;
3733     size_t   m_PromotedCount;
3734 
3735     VOLATILE(int32_t) lock;
3736 #ifdef _DEBUG
3737     EEThreadId lockowner_threadid;
3738 #endif // _DEBUG
3739 
3740     BOOL GrowArray();
3741     void MoveItem (Object** fromIndex,
3742                    unsigned int fromSeg,
3743                    unsigned int toSeg);
3744 
SegQueue(unsigned int Seg)3745     inline PTR_PTR_Object& SegQueue (unsigned int Seg)
3746     {
3747         return (Seg ? m_FillPointers [Seg-1] : m_Array);
3748     }
SegQueueLimit(unsigned int Seg)3749     inline PTR_PTR_Object& SegQueueLimit (unsigned int Seg)
3750     {
3751         return m_FillPointers [Seg];
3752     }
3753 
IsSegEmpty(unsigned int i)3754     BOOL IsSegEmpty ( unsigned int i)
3755     {
3756         ASSERT ( (int)i < FreeList);
3757         return (SegQueueLimit(i) == SegQueue (i));
3758 
3759     }
3760 
3761     BOOL FinalizeSegForAppDomain (AppDomain *pDomain,
3762                                   BOOL fRunFinalizers,
3763                                   unsigned int Seg);
3764 
3765 public:
3766     ~CFinalize();
3767     bool Initialize();
3768     void EnterFinalizeLock();
3769     void LeaveFinalizeLock();
3770     bool RegisterForFinalization (int gen, Object* obj, size_t size=0);
3771     Object* GetNextFinalizableObject (BOOL only_non_critical=FALSE);
3772     BOOL ScanForFinalization (promote_func* fn, int gen,BOOL mark_only_p, gc_heap* hp);
3773     void RelocateFinalizationData (int gen, gc_heap* hp);
3774     void WalkFReachableObjects (fq_walk_fn fn);
3775     void GcScanRoots (promote_func* fn, int hn, ScanContext *pSC);
3776     void UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p);
3777     size_t GetPromotedCount();
3778 
3779     //Methods used by the shutdown code to call every finalizer
3780     void SetSegForShutDown(BOOL fHasLock);
3781     size_t GetNumberFinalizableObjects();
3782     void DiscardNonCriticalObjects();
3783 
3784     //Methods used by the app domain unloading call to finalize objects in an app domain
3785     BOOL FinalizeAppDomain (AppDomain *pDomain, BOOL fRunFinalizers);
3786 
3787     void CheckFinalizerObjects();
3788 };
3789 #endif // FEATURE_PREMORTEM_FINALIZATION
3790 
3791 inline
dd_begin_data_size(dynamic_data * inst)3792  size_t& dd_begin_data_size (dynamic_data* inst)
3793 {
3794   return inst->begin_data_size;
3795 }
3796 inline
dd_survived_size(dynamic_data * inst)3797  size_t& dd_survived_size (dynamic_data* inst)
3798 {
3799   return inst->survived_size;
3800 }
3801 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
3802 inline
dd_num_npinned_plugs(dynamic_data * inst)3803  size_t& dd_num_npinned_plugs(dynamic_data* inst)
3804 {
3805   return inst->num_npinned_plugs;
3806 }
3807 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
3808 inline
dd_pinned_survived_size(dynamic_data * inst)3809 size_t& dd_pinned_survived_size (dynamic_data* inst)
3810 {
3811   return inst->pinned_survived_size;
3812 }
3813 inline
dd_added_pinned_size(dynamic_data * inst)3814 size_t& dd_added_pinned_size (dynamic_data* inst)
3815 {
3816   return inst->added_pinned_size;
3817 }
3818 inline
dd_artificial_pinned_survived_size(dynamic_data * inst)3819 size_t& dd_artificial_pinned_survived_size (dynamic_data* inst)
3820 {
3821   return inst->artificial_pinned_survived_size;
3822 }
3823 #ifdef SHORT_PLUGS
3824 inline
dd_padding_size(dynamic_data * inst)3825 size_t& dd_padding_size (dynamic_data* inst)
3826 {
3827   return inst->padding_size;
3828 }
3829 #endif //SHORT_PLUGS
3830 inline
dd_current_size(dynamic_data * inst)3831  size_t& dd_current_size (dynamic_data* inst)
3832 {
3833   return inst->current_size;
3834 }
3835 inline
dd_surv(dynamic_data * inst)3836 float& dd_surv (dynamic_data* inst)
3837 {
3838   return inst->surv;
3839 }
3840 inline
dd_freach_previous_promotion(dynamic_data * inst)3841 size_t& dd_freach_previous_promotion (dynamic_data* inst)
3842 {
3843   return inst->freach_previous_promotion;
3844 }
3845 inline
dd_desired_allocation(dynamic_data * inst)3846 size_t& dd_desired_allocation (dynamic_data* inst)
3847 {
3848   return inst->desired_allocation;
3849 }
3850 inline
dd_collection_count(dynamic_data * inst)3851 size_t& dd_collection_count (dynamic_data* inst)
3852 {
3853     return inst->collection_count;
3854 }
3855 inline
dd_promoted_size(dynamic_data * inst)3856 size_t& dd_promoted_size (dynamic_data* inst)
3857 {
3858     return inst->promoted_size;
3859 }
3860 inline
dd_limit(dynamic_data * inst)3861 float& dd_limit (dynamic_data* inst)
3862 {
3863   return inst->limit;
3864 }
3865 inline
dd_max_limit(dynamic_data * inst)3866 float& dd_max_limit (dynamic_data* inst)
3867 {
3868   return inst->max_limit;
3869 }
3870 inline
dd_min_gc_size(dynamic_data * inst)3871 size_t& dd_min_gc_size (dynamic_data* inst)
3872 {
3873   return inst->min_gc_size;
3874 }
3875 inline
dd_max_size(dynamic_data * inst)3876 size_t& dd_max_size (dynamic_data* inst)
3877 {
3878   return inst->max_size;
3879 }
3880 inline
dd_min_size(dynamic_data * inst)3881 size_t& dd_min_size (dynamic_data* inst)
3882 {
3883   return inst->min_size;
3884 }
3885 inline
dd_new_allocation(dynamic_data * inst)3886 ptrdiff_t& dd_new_allocation (dynamic_data* inst)
3887 {
3888   return inst->new_allocation;
3889 }
3890 inline
dd_gc_new_allocation(dynamic_data * inst)3891 ptrdiff_t& dd_gc_new_allocation (dynamic_data* inst)
3892 {
3893   return inst->gc_new_allocation;
3894 }
3895 inline
dd_default_new_allocation(dynamic_data * inst)3896 size_t& dd_default_new_allocation (dynamic_data* inst)
3897 {
3898   return inst->default_new_allocation;
3899 }
3900 inline
dd_fragmentation_limit(dynamic_data * inst)3901 size_t& dd_fragmentation_limit (dynamic_data* inst)
3902 {
3903   return inst->fragmentation_limit;
3904 }
3905 inline
dd_fragmentation_burden_limit(dynamic_data * inst)3906 float& dd_fragmentation_burden_limit (dynamic_data* inst)
3907 {
3908   return inst->fragmentation_burden_limit;
3909 }
3910 inline
dd_v_fragmentation_burden_limit(dynamic_data * inst)3911 float dd_v_fragmentation_burden_limit (dynamic_data* inst)
3912 {
3913   return (min (2*dd_fragmentation_burden_limit (inst), 0.75f));
3914 }
3915 inline
dd_fragmentation(dynamic_data * inst)3916 size_t& dd_fragmentation (dynamic_data* inst)
3917 {
3918   return inst->fragmentation;
3919 }
3920 
3921 inline
dd_gc_clock(dynamic_data * inst)3922 size_t& dd_gc_clock (dynamic_data* inst)
3923 {
3924   return inst->gc_clock;
3925 }
3926 inline
dd_time_clock(dynamic_data * inst)3927 size_t& dd_time_clock (dynamic_data* inst)
3928 {
3929   return inst->time_clock;
3930 }
3931 
3932 inline
dd_gc_elapsed_time(dynamic_data * inst)3933 size_t& dd_gc_elapsed_time (dynamic_data* inst)
3934 {
3935     return inst->gc_elapsed_time;
3936 }
3937 
3938 inline
dd_gc_speed(dynamic_data * inst)3939 float& dd_gc_speed (dynamic_data* inst)
3940 {
3941     return inst->gc_speed;
3942 }
3943 
3944 inline
generation_alloc_context(generation * inst)3945 alloc_context* generation_alloc_context (generation* inst)
3946 {
3947     return &(inst->allocation_context);
3948 }
3949 
3950 inline
generation_allocation_start(generation * inst)3951 uint8_t*& generation_allocation_start (generation* inst)
3952 {
3953   return inst->allocation_start;
3954 }
3955 inline
generation_allocation_pointer(generation * inst)3956 uint8_t*& generation_allocation_pointer (generation* inst)
3957 {
3958   return inst->allocation_context.alloc_ptr;
3959 }
3960 inline
generation_allocation_limit(generation * inst)3961 uint8_t*& generation_allocation_limit (generation* inst)
3962 {
3963   return inst->allocation_context.alloc_limit;
3964 }
3965 inline
generation_allocator(generation * inst)3966 allocator* generation_allocator (generation* inst)
3967 {
3968     return &inst->free_list_allocator;
3969 }
3970 
3971 inline
generation_start_segment(generation * inst)3972 PTR_heap_segment& generation_start_segment (generation* inst)
3973 {
3974   return inst->start_segment;
3975 }
3976 inline
generation_allocation_segment(generation * inst)3977 heap_segment*& generation_allocation_segment (generation* inst)
3978 {
3979   return inst->allocation_segment;
3980 }
3981 inline
generation_plan_allocation_start(generation * inst)3982 uint8_t*& generation_plan_allocation_start (generation* inst)
3983 {
3984   return inst->plan_allocation_start;
3985 }
3986 inline
generation_plan_allocation_start_size(generation * inst)3987 size_t& generation_plan_allocation_start_size (generation* inst)
3988 {
3989   return inst->plan_allocation_start_size;
3990 }
3991 inline
generation_allocation_context_start_region(generation * inst)3992 uint8_t*& generation_allocation_context_start_region (generation* inst)
3993 {
3994   return inst->allocation_context_start_region;
3995 }
3996 inline
generation_free_list_space(generation * inst)3997 size_t& generation_free_list_space (generation* inst)
3998 {
3999   return inst->free_list_space;
4000 }
4001 inline
generation_free_obj_space(generation * inst)4002 size_t& generation_free_obj_space (generation* inst)
4003 {
4004   return inst->free_obj_space;
4005 }
4006 inline
generation_allocation_size(generation * inst)4007 size_t& generation_allocation_size (generation* inst)
4008 {
4009   return inst->allocation_size;
4010 }
4011 
4012 inline
generation_pinned_allocated(generation * inst)4013 size_t& generation_pinned_allocated (generation* inst)
4014 {
4015     return inst->pinned_allocated;
4016 }
4017 inline
generation_pinned_allocation_sweep_size(generation * inst)4018 size_t& generation_pinned_allocation_sweep_size (generation* inst)
4019 {
4020     return inst->pinned_allocation_sweep_size;
4021 }
4022 inline
generation_pinned_allocation_compact_size(generation * inst)4023 size_t& generation_pinned_allocation_compact_size (generation* inst)
4024 {
4025     return inst->pinned_allocation_compact_size;
4026 }
4027 inline
generation_free_list_allocated(generation * inst)4028 size_t&  generation_free_list_allocated (generation* inst)
4029 {
4030     return inst->free_list_allocated;
4031 }
4032 inline
generation_end_seg_allocated(generation * inst)4033 size_t&  generation_end_seg_allocated (generation* inst)
4034 {
4035     return inst->end_seg_allocated;
4036 }
4037 inline
generation_allocate_end_seg_p(generation * inst)4038 BOOL&  generation_allocate_end_seg_p (generation* inst)
4039 {
4040     return inst->allocate_end_seg_p;
4041 }
4042 inline
generation_condemned_allocated(generation * inst)4043 size_t& generation_condemned_allocated (generation* inst)
4044 {
4045     return inst->condemned_allocated;
4046 }
4047 #ifdef FREE_USAGE_STATS
4048 inline
generation_pinned_free_obj_space(generation * inst)4049 size_t& generation_pinned_free_obj_space (generation* inst)
4050 {
4051     return inst->pinned_free_obj_space;
4052 }
4053 inline
generation_allocated_in_pinned_free(generation * inst)4054 size_t& generation_allocated_in_pinned_free (generation* inst)
4055 {
4056     return inst->allocated_in_pinned_free;
4057 }
4058 inline
generation_allocated_since_last_pin(generation * inst)4059 size_t& generation_allocated_since_last_pin (generation* inst)
4060 {
4061     return inst->allocated_since_last_pin;
4062 }
4063 #endif //FREE_USAGE_STATS
4064 inline
generation_allocator_efficiency(generation * inst)4065 float generation_allocator_efficiency (generation* inst)
4066 {
4067     if ((generation_free_list_allocated (inst) + generation_free_obj_space (inst)) != 0)
4068     {
4069         return ((float) (generation_free_list_allocated (inst)) / (float)(generation_free_list_allocated (inst) + generation_free_obj_space (inst)));
4070     }
4071     else
4072         return 0;
4073 }
4074 inline
generation_unusable_fragmentation(generation * inst)4075 size_t generation_unusable_fragmentation (generation* inst)
4076 {
4077     return (size_t)(generation_free_obj_space (inst) +
4078                     (1.0f-generation_allocator_efficiency(inst))*generation_free_list_space (inst));
4079 }
4080 
4081 #define plug_skew           sizeof(ObjHeader)
4082 // We always use USE_PADDING_TAIL when fitting so items on the free list should be
4083 // twice the min_obj_size.
4084 #define min_free_list       (2*min_obj_size)
4085 struct plug
4086 {
4087     uint8_t *  skew[plug_skew / sizeof(uint8_t *)];
4088 };
4089 
4090 class pair
4091 {
4092 public:
4093     short left;
4094     short right;
4095 };
4096 
4097 //Note that these encode the fact that plug_skew is a multiple of uint8_t*.
4098 // Each of new field is prepended to the prior struct.
4099 
4100 struct plug_and_pair
4101 {
4102     pair        m_pair;
4103     plug        m_plug;
4104 };
4105 
4106 struct plug_and_reloc
4107 {
4108     ptrdiff_t   reloc;
4109     pair        m_pair;
4110     plug        m_plug;
4111 };
4112 
4113 struct plug_and_gap
4114 {
4115     ptrdiff_t   gap;
4116     ptrdiff_t   reloc;
4117     union
4118     {
4119         pair    m_pair;
4120         int     lr;  //for clearing the entire pair in one instruction
4121     };
4122     plug        m_plug;
4123 };
4124 
4125 struct gap_reloc_pair
4126 {
4127     size_t gap;
4128     size_t   reloc;
4129     pair        m_pair;
4130 };
4131 
4132 #define min_pre_pin_obj_size (sizeof (gap_reloc_pair) + min_obj_size)
4133 
4134 struct DECLSPEC_ALIGN(8) aligned_plug_and_gap
4135 {
4136     plug_and_gap plugandgap;
4137 };
4138 
4139 struct loh_obj_and_pad
4140 {
4141     ptrdiff_t   reloc;
4142     plug        m_plug;
4143 };
4144 
4145 struct loh_padding_obj
4146 {
4147     uint8_t*    mt;
4148     size_t      len;
4149     ptrdiff_t   reloc;
4150     plug        m_plug;
4151 };
4152 #define loh_padding_obj_size (sizeof(loh_padding_obj))
4153 
4154 //flags description
4155 #define heap_segment_flags_readonly     1
4156 #define heap_segment_flags_inrange      2
4157 #define heap_segment_flags_unmappable   4
4158 #define heap_segment_flags_loh          8
4159 #ifdef BACKGROUND_GC
4160 #define heap_segment_flags_swept        16
4161 #define heap_segment_flags_decommitted  32
4162 #define heap_segment_flags_ma_committed 64
4163 // for segments whose mark array is only partially committed.
4164 #define heap_segment_flags_ma_pcommitted 128
4165 #endif //BACKGROUND_GC
4166 
4167 //need to be careful to keep enough pad items to fit a relocation node
4168 //padded to QuadWord before the plug_skew
4169 
4170 class heap_segment
4171 {
4172 public:
4173     uint8_t*        allocated;
4174     uint8_t*        committed;
4175     uint8_t*        reserved;
4176     uint8_t*        used;
4177     uint8_t*        mem;
4178     size_t          flags;
4179     PTR_heap_segment next;
4180     uint8_t*        plan_allocated;
4181 #ifdef BACKGROUND_GC
4182     uint8_t*        background_allocated;
4183     uint8_t*        saved_bg_allocated;
4184 #endif //BACKGROUND_GC
4185 
4186 #ifdef MULTIPLE_HEAPS
4187     gc_heap*        heap;
4188 #endif //MULTIPLE_HEAPS
4189 
4190 #ifdef _MSC_VER
4191 // Disable this warning - we intentionally want __declspec(align()) to insert padding for us
4192 #pragma warning(disable:4324)  // structure was padded due to __declspec(align())
4193 #endif
4194     aligned_plug_and_gap padandplug;
4195 #ifdef _MSC_VER
4196 #pragma warning(default:4324)  // structure was padded due to __declspec(align())
4197 #endif
4198 };
4199 
4200 inline
heap_segment_reserved(heap_segment * inst)4201 uint8_t*& heap_segment_reserved (heap_segment* inst)
4202 {
4203   return inst->reserved;
4204 }
4205 inline
heap_segment_committed(heap_segment * inst)4206 uint8_t*& heap_segment_committed (heap_segment* inst)
4207 {
4208   return inst->committed;
4209 }
4210 inline
heap_segment_used(heap_segment * inst)4211 uint8_t*& heap_segment_used (heap_segment* inst)
4212 {
4213   return inst->used;
4214 }
4215 inline
heap_segment_allocated(heap_segment * inst)4216 uint8_t*& heap_segment_allocated (heap_segment* inst)
4217 {
4218   return inst->allocated;
4219 }
4220 
4221 inline
heap_segment_read_only_p(heap_segment * inst)4222 BOOL heap_segment_read_only_p (heap_segment* inst)
4223 {
4224     return ((inst->flags & heap_segment_flags_readonly) != 0);
4225 }
4226 
4227 inline
heap_segment_in_range_p(heap_segment * inst)4228 BOOL heap_segment_in_range_p (heap_segment* inst)
4229 {
4230     return (!(inst->flags & heap_segment_flags_readonly) ||
4231             ((inst->flags & heap_segment_flags_inrange) != 0));
4232 }
4233 
4234 inline
heap_segment_unmappable_p(heap_segment * inst)4235 BOOL heap_segment_unmappable_p (heap_segment* inst)
4236 {
4237     return (!(inst->flags & heap_segment_flags_readonly) ||
4238             ((inst->flags & heap_segment_flags_unmappable) != 0));
4239 }
4240 
4241 inline
heap_segment_loh_p(heap_segment * inst)4242 BOOL heap_segment_loh_p (heap_segment * inst)
4243 {
4244     return !!(inst->flags & heap_segment_flags_loh);
4245 }
4246 
4247 #ifdef BACKGROUND_GC
4248 inline
heap_segment_decommitted_p(heap_segment * inst)4249 BOOL heap_segment_decommitted_p (heap_segment * inst)
4250 {
4251     return !!(inst->flags & heap_segment_flags_decommitted);
4252 }
4253 #endif //BACKGROUND_GC
4254 
4255 inline
heap_segment_next(heap_segment * inst)4256 PTR_heap_segment & heap_segment_next (heap_segment* inst)
4257 {
4258   return inst->next;
4259 }
4260 inline
heap_segment_mem(heap_segment * inst)4261 uint8_t*& heap_segment_mem (heap_segment* inst)
4262 {
4263   return inst->mem;
4264 }
4265 inline
heap_segment_plan_allocated(heap_segment * inst)4266 uint8_t*& heap_segment_plan_allocated (heap_segment* inst)
4267 {
4268   return inst->plan_allocated;
4269 }
4270 
4271 #ifdef BACKGROUND_GC
4272 inline
heap_segment_background_allocated(heap_segment * inst)4273 uint8_t*& heap_segment_background_allocated (heap_segment* inst)
4274 {
4275   return inst->background_allocated;
4276 }
4277 inline
heap_segment_saved_bg_allocated(heap_segment * inst)4278 uint8_t*& heap_segment_saved_bg_allocated (heap_segment* inst)
4279 {
4280   return inst->saved_bg_allocated;
4281 }
4282 #endif //BACKGROUND_GC
4283 
4284 #ifdef MULTIPLE_HEAPS
4285 inline
heap_segment_heap(heap_segment * inst)4286 gc_heap*& heap_segment_heap (heap_segment* inst)
4287 {
4288     return inst->heap;
4289 }
4290 #endif //MULTIPLE_HEAPS
4291 
4292 #ifndef MULTIPLE_HEAPS
4293 
4294 #ifndef DACCESS_COMPILE
4295 extern "C" {
4296 #endif //!DACCESS_COMPILE
4297 
4298 GARY_DECL(generation,generation_table,NUMBERGENERATIONS+1);
4299 
4300 #ifdef GC_CONFIG_DRIVEN
4301 GARY_DECL(size_t, interesting_data_per_heap, max_idp_count);
4302 GARY_DECL(size_t, compact_reasons_per_heap, max_compact_reasons_count);
4303 GARY_DECL(size_t, expand_mechanisms_per_heap, max_expand_mechanisms_count);
4304 GARY_DECL(size_t, interesting_mechanism_bits_per_heap, max_gc_mechanism_bits_count);
4305 #endif //GC_CONFIG_DRIVEN
4306 
4307 #ifndef DACCESS_COMPILE
4308 }
4309 #endif //!DACCESS_COMPILE
4310 
4311 #endif //MULTIPLE_HEAPS
4312 
4313 inline
generation_of(int n)4314 generation* gc_heap::generation_of (int  n)
4315 {
4316     assert (((n <= max_generation+1) && (n >= 0)));
4317     return &generation_table [ n ];
4318 }
4319 
4320 inline
dynamic_data_of(int gen_number)4321 dynamic_data* gc_heap::dynamic_data_of (int gen_number)
4322 {
4323     return &dynamic_data_table [ gen_number ];
4324 }
4325 
4326 #define card_word_width ((size_t)32)
4327 
4328 //
4329 // The value of card_size is determined empirically according to the average size of an object
4330 // In the code we also rely on the assumption that one card_table entry (uint32_t) covers an entire os page
4331 //
4332 #if defined (BIT64)
4333 #define card_size ((size_t)(2*OS_PAGE_SIZE/card_word_width))
4334 #else
4335 #define card_size ((size_t)(OS_PAGE_SIZE/card_word_width))
4336 #endif // BIT64
4337 
4338 inline
card_word(size_t card)4339 size_t card_word (size_t card)
4340 {
4341     return card / card_word_width;
4342 }
4343 
4344 inline
card_bit(size_t card)4345 unsigned card_bit (size_t card)
4346 {
4347     return (unsigned)(card % card_word_width);
4348 }
4349 
4350 inline
gcard_of(uint8_t * object)4351 size_t gcard_of (uint8_t* object)
4352 {
4353     return (size_t)(object) / card_size;
4354 }
4355 
4356