1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4 
5 
6 /*++
7 
8 Module Name:
9 
10     gc.h
11 
12 --*/
13 
14 #ifndef __GC_H
15 #define __GC_H
16 
17 #ifdef Sleep
18 // This is a funny workaround for the fact that "common.h" defines Sleep to be
19 // Dont_Use_Sleep, with the hope of causing linker errors whenever someone tries to use sleep.
20 //
21 // However, GCToOSInterface defines a function called Sleep, which (due to this define) becomes
22 // "Dont_Use_Sleep", which the GC in turn happily uses. The symbol that GCToOSInterface actually
23 // exported was called "GCToOSInterface::Dont_Use_Sleep". While we progress in making the GC standalone,
24 // we'll need to break the dependency on common.h (the VM header) and this problem will become moot.
25 #undef Sleep
26 #endif // Sleep
27 
28 #include "gcinterface.h"
29 #include "env/gcenv.os.h"
30 #include "env/gcenv.ee.h"
31 
32 #ifdef FEATURE_STANDALONE_GC
33 #include "gcenv.ee.standalone.inl"
34 #endif // FEATURE_STANDALONE_GC
35 
36 /*
37  * Promotion Function Prototypes
38  */
39 typedef void enum_func (Object*);
40 
41 // callback functions for heap walkers
42 typedef void object_callback_func(void * pvContext, void * pvDataLoc);
43 
44 /*!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/
45 /* If you modify failure_get_memory and         */
46 /* oom_reason be sure to make the corresponding */
47 /* changes in toolbox\sos\strike\strike.cpp.    */
48 /*!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/
49 enum failure_get_memory
50 {
51     fgm_no_failure = 0,
52     fgm_reserve_segment = 1,
53     fgm_commit_segment_beg = 2,
54     fgm_commit_eph_segment = 3,
55     fgm_grow_table = 4,
56     fgm_commit_table = 5
57 };
58 
59 struct fgm_history
60 {
61     failure_get_memory fgm;
62     size_t size;
63     size_t available_pagefile_mb;
64     BOOL loh_p;
65 
set_fgmfgm_history66     void set_fgm (failure_get_memory f, size_t s, BOOL l)
67     {
68         fgm = f;
69         size = s;
70         loh_p = l;
71     }
72 };
73 
74 enum oom_reason
75 {
76     oom_no_failure = 0,
77     oom_budget = 1,
78     oom_cant_commit = 2,
79     oom_cant_reserve = 3,
80     oom_loh = 4,
81     oom_low_mem = 5,
82     oom_unproductive_full_gc = 6
83 };
84 
85 // TODO : it would be easier to make this an ORed value
86 enum gc_reason
87 {
88     reason_alloc_soh = 0,
89     reason_induced = 1,
90     reason_lowmemory = 2,
91     reason_empty = 3,
92     reason_alloc_loh = 4,
93     reason_oos_soh = 5,
94     reason_oos_loh = 6,
95     reason_induced_noforce = 7, // it's an induced GC and doesn't have to be blocking.
96     reason_gcstress = 8,        // this turns into reason_induced & gc_mechanisms.stress_induced = true
97     reason_lowmemory_blocking = 9,
98     reason_induced_compacting = 10,
99     reason_lowmemory_host = 11,
100     reason_max
101 };
102 
103 struct oom_history
104 {
105     oom_reason reason;
106     size_t alloc_size;
107     uint8_t* reserved;
108     uint8_t* allocated;
109     size_t gc_index;
110     failure_get_memory fgm;
111     size_t size;
112     size_t available_pagefile_mb;
113     BOOL loh_p;
114 };
115 
116 /* forward declerations */
117 class CObjectHeader;
118 class Object;
119 
120 class IGCHeapInternal;
121 
122 /* misc defines */
123 #define LARGE_OBJECT_SIZE ((size_t)(85000))
124 
125 #ifdef GC_CONFIG_DRIVEN
126 #define MAX_GLOBAL_GC_MECHANISMS_COUNT 6
127 GARY_DECL(size_t, gc_global_mechanisms, MAX_GLOBAL_GC_MECHANISMS_COUNT);
128 #endif //GC_CONFIG_DRIVEN
129 
130 #ifdef DACCESS_COMPILE
131 class DacHeapWalker;
132 #endif
133 
134 #ifdef _DEBUG
135 #define  _LOGALLOC
136 #endif
137 
138 #define MP_LOCKS
139 
140 extern "C" uint32_t* g_gc_card_table;
141 extern "C" uint8_t* g_gc_lowest_address;
142 extern "C" uint8_t* g_gc_highest_address;
143 extern "C" bool g_fFinalizerRunOnShutDown;
144 
145 namespace WKS {
146     ::IGCHeapInternal* CreateGCHeap();
147     class GCHeap;
148     class gc_heap;
149     }
150 
151 #if defined(FEATURE_SVR_GC)
152 namespace SVR {
153     ::IGCHeapInternal* CreateGCHeap();
154     class GCHeap;
155     class gc_heap;
156 }
157 #endif // defined(FEATURE_SVR_GC)
158 
159 #ifdef STRESS_HEAP
160 #define IN_STRESS_HEAP(x) x
161 #define STRESS_HEAP_ARG(x) ,x
162 #else // STRESS_HEAP
163 #define IN_STRESS_HEAP(x)
164 #define STRESS_HEAP_ARG(x)
165 #endif // STRESS_HEAP
166 
167 //dynamic data interface
168 struct gc_counters
169 {
170     size_t current_size;
171     size_t promoted_size;
172     size_t collection_count;
173 };
174 
175 enum bgc_state
176 {
177     bgc_not_in_process = 0,
178     bgc_initialized,
179     bgc_reset_ww,
180     bgc_mark_handles,
181     bgc_mark_stack,
182     bgc_revisit_soh,
183     bgc_revisit_loh,
184     bgc_overflow_soh,
185     bgc_overflow_loh,
186     bgc_final_marking,
187     bgc_sweep_soh,
188     bgc_sweep_loh,
189     bgc_plan_phase
190 };
191 
192 enum changed_seg_state
193 {
194     seg_deleted,
195     seg_added
196 };
197 
198 void record_changed_seg (uint8_t* start, uint8_t* end,
199                          size_t current_gc_index,
200                          bgc_state current_bgc_state,
201                          changed_seg_state changed_state);
202 
203 #ifdef GC_CONFIG_DRIVEN
204 void record_global_mechanism (int mech_index);
205 #endif //GC_CONFIG_DRIVEN
206 
207 struct alloc_context : gc_alloc_context
208 {
209 #ifdef FEATURE_SVR_GC
get_alloc_heapalloc_context210     inline SVR::GCHeap* get_alloc_heap()
211     {
212         return static_cast<SVR::GCHeap*>(gc_reserved_1);
213     }
214 
set_alloc_heapalloc_context215     inline void set_alloc_heap(SVR::GCHeap* heap)
216     {
217         gc_reserved_1 = heap;
218     }
219 
get_home_heapalloc_context220     inline SVR::GCHeap* get_home_heap()
221     {
222         return static_cast<SVR::GCHeap*>(gc_reserved_2);
223     }
224 
set_home_heapalloc_context225     inline void set_home_heap(SVR::GCHeap* heap)
226     {
227         gc_reserved_2 = heap;
228     }
229 #endif // FEATURE_SVR_GC
230 };
231 
232 class IGCHeapInternal : public IGCHeap {
233     friend struct ::_DacGlobals;
234 #ifdef DACCESS_COMPILE
235     friend class ClrDataAccess;
236 #endif
237 
238 public:
239 
~IGCHeapInternal()240     virtual ~IGCHeapInternal() {}
241 
242 private:
243     virtual Object* AllocAlign8Common (void* hp, alloc_context* acontext, size_t size, uint32_t flags) = 0;
244 public:
245     virtual int GetNumberOfHeaps () = 0;
246     virtual int GetHomeHeapNumber () = 0;
247     virtual size_t GetPromotedBytes(int heap_index) = 0;
248 
GetMaxGeneration()249     unsigned GetMaxGeneration()
250     {
251         return IGCHeap::maxGeneration;
252     }
253 
IsValidSegmentSize(size_t cbSize)254     BOOL IsValidSegmentSize(size_t cbSize)
255     {
256         //Must be aligned on a Mb and greater than 4Mb
257         return (((cbSize & (1024*1024-1)) ==0) && (cbSize >> 22));
258     }
259 
IsValidGen0MaxSize(size_t cbSize)260     BOOL IsValidGen0MaxSize(size_t cbSize)
261     {
262         return (cbSize >= 64*1024);
263     }
264 
IsLargeObject(MethodTable * mt)265     BOOL IsLargeObject(MethodTable *mt)
266     {
267         WRAPPER_NO_CONTRACT;
268 
269         return mt->GetBaseSize() >= LARGE_OBJECT_SIZE;
270     }
271 
SetFinalizeRunOnShutdown(bool value)272     void SetFinalizeRunOnShutdown(bool value)
273     {
274         g_fFinalizerRunOnShutDown = value;
275     }
276 
277 protected:
278 public:
279 #if defined(FEATURE_BASICFREEZE) && defined(VERIFY_HEAP)
280     // Return TRUE if object lives in frozen segment
281     virtual BOOL IsInFrozenSegment (Object * object) = 0;
282 #endif // defined(FEATURE_BASICFREEZE) && defined(VERIFY_HEAP)
283 };
284 
285 // Go through and touch (read) each page straddled by a memory block.
286 void TouchPages(void * pStart, size_t cb);
287 
288 #ifdef WRITE_BARRIER_CHECK
289 void updateGCShadow(Object** ptr, Object* val);
290 #endif
291 
292 // the method table for the WeakReference class
293 extern MethodTable  *pWeakReferenceMT;
294 // The canonical method table for WeakReference<T>
295 extern MethodTable  *pWeakReferenceOfTCanonMT;
296 extern void FinalizeWeakReference(Object * obj);
297 
298 // The single GC heap instance, shared with the VM.
299 extern IGCHeapInternal* g_theGCHeap;
300 
301 #ifndef DACCESS_COMPILE
302 inline BOOL IsGCInProgress(bool bConsiderGCStart = FALSE)
303 {
304     WRAPPER_NO_CONTRACT;
305 
306     return g_theGCHeap != nullptr ? g_theGCHeap->IsGCInProgressHelper(bConsiderGCStart) : false;
307 }
308 #endif // DACCESS_COMPILE
309 
IsServerHeap()310 inline BOOL IsServerHeap()
311 {
312     LIMITED_METHOD_CONTRACT;
313 #ifdef FEATURE_SVR_GC
314     _ASSERTE(IGCHeap::gcHeapType != IGCHeap::GC_HEAP_INVALID);
315     return (IGCHeap::gcHeapType == IGCHeap::GC_HEAP_SVR);
316 #else // FEATURE_SVR_GC
317     return false;
318 #endif // FEATURE_SVR_GC
319 }
320 
321 #endif // __GC_H
322