1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4 //
5
6 //
7
8
9 // sets up vars for GC
10
11 #include "gcpriv.h"
12
13 #ifndef DACCESS_COMPILE
14
15 COUNTER_ONLY(PERF_COUNTER_TIMER_PRECISION g_TotalTimeInGC = 0);
16 COUNTER_ONLY(PERF_COUNTER_TIMER_PRECISION g_TotalTimeSinceLastGCEnd = 0);
17
18 #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
19 size_t g_GenerationSizes[NUMBERGENERATIONS];
20 size_t g_GenerationPromotedSizes[NUMBERGENERATIONS];
21 #endif // ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
22
UpdatePreGCCounters()23 void GCHeap::UpdatePreGCCounters()
24 {
25 #if defined(ENABLE_PERF_COUNTERS)
26 #ifdef MULTIPLE_HEAPS
27 gc_heap* hp = 0;
28 #else
29 gc_heap* hp = pGenGCHeap;
30 #endif //MULTIPLE_HEAPS
31
32 size_t allocation_0 = 0;
33 size_t allocation_3 = 0;
34
35 // Publish perf stats
36 g_TotalTimeInGC = GET_CYCLE_COUNT();
37
38 #ifdef MULTIPLE_HEAPS
39 int hn = 0;
40 for (hn = 0; hn < gc_heap::n_heaps; hn++)
41 {
42 hp = gc_heap::g_heaps [hn];
43
44 allocation_0 +=
45 dd_desired_allocation (hp->dynamic_data_of (0))-
46 dd_new_allocation (hp->dynamic_data_of (0));
47 allocation_3 +=
48 dd_desired_allocation (hp->dynamic_data_of (max_generation+1))-
49 dd_new_allocation (hp->dynamic_data_of (max_generation+1));
50 }
51 #else
52 allocation_0 =
53 dd_desired_allocation (hp->dynamic_data_of (0))-
54 dd_new_allocation (hp->dynamic_data_of (0));
55 allocation_3 =
56 dd_desired_allocation (hp->dynamic_data_of (max_generation+1))-
57 dd_new_allocation (hp->dynamic_data_of (max_generation+1));
58
59 #endif //MULTIPLE_HEAPS
60
61 GetPerfCounters().m_GC.cbAlloc += allocation_0;
62 GetPerfCounters().m_GC.cbAlloc += allocation_3;
63 GetPerfCounters().m_GC.cbLargeAlloc += allocation_3;
64
65 #ifdef _PREFAST_
66 // prefix complains about us dereferencing hp in wks build even though we only access static members
67 // this way. not sure how to shut it up except for this ugly workaround:
68 PREFIX_ASSUME( hp != NULL);
69 #endif //_PREFAST_
70 if (hp->settings.reason == reason_induced IN_STRESS_HEAP( && !hp->settings.stress_induced))
71 {
72 COUNTER_ONLY(GetPerfCounters().m_GC.cInducedGCs++);
73 }
74
75 GetPerfCounters().m_Security.timeRTchecks = 0;
76 GetPerfCounters().m_Security.timeRTchecksBase = 1; // To avoid divide by zero
77
78 #endif //ENABLE_PERF_COUNTERS
79
80 #ifdef FEATURE_EVENT_TRACE
81 #ifdef MULTIPLE_HEAPS
82 //take the first heap....
83 gc_mechanisms *pSettings = &gc_heap::g_heaps[0]->settings;
84 #else
85 gc_mechanisms *pSettings = &gc_heap::settings;
86 #endif //MULTIPLE_HEAPS
87
88 ETW::GCLog::ETW_GC_INFO Info;
89
90 Info.GCStart.Count = (uint32_t)pSettings->gc_index;
91 Info.GCStart.Depth = (uint32_t)pSettings->condemned_generation;
92 Info.GCStart.Reason = (ETW::GCLog::ETW_GC_INFO::GC_REASON)((int)(pSettings->reason));
93
94 Info.GCStart.Type = ETW::GCLog::ETW_GC_INFO::GC_NGC;
95 if (pSettings->concurrent)
96 {
97 Info.GCStart.Type = ETW::GCLog::ETW_GC_INFO::GC_BGC;
98 }
99 #ifdef BACKGROUND_GC
100 else if (Info.GCStart.Depth < max_generation)
101 {
102 if (pSettings->background_p)
103 Info.GCStart.Type = ETW::GCLog::ETW_GC_INFO::GC_FGC;
104 }
105 #endif //BACKGROUND_GC
106
107 ETW::GCLog::FireGcStartAndGenerationRanges(&Info);
108 #endif // FEATURE_EVENT_TRACE
109 }
110
UpdatePostGCCounters()111 void GCHeap::UpdatePostGCCounters()
112 {
113 totalSurvivedSize = gc_heap::get_total_survived_size();
114
115 //
116 // The following is for instrumentation.
117 //
118 // Calculate the common ones for ETW and perf counters.
119 #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
120 #ifdef MULTIPLE_HEAPS
121 //take the first heap....
122 gc_heap* hp1 = gc_heap::g_heaps[0];
123 gc_mechanisms *pSettings = &hp1->settings;
124 #else
125 gc_heap* hp1 = pGenGCHeap;
126 gc_mechanisms *pSettings = &gc_heap::settings;
127 #endif //MULTIPLE_HEAPS
128
129 int condemned_gen = pSettings->condemned_generation;
130
131 memset (g_GenerationSizes, 0, sizeof (g_GenerationSizes));
132 memset (g_GenerationPromotedSizes, 0, sizeof (g_GenerationPromotedSizes));
133
134 size_t total_num_gc_handles = g_dwHandles;
135 uint32_t total_num_sync_blocks = SyncBlockCache::GetSyncBlockCache()->GetActiveCount();
136
137 // Note this is however for perf counter only, for legacy reasons. What we showed
138 // in perf counters for "gen0 size" was really the gen0 budget which made
139 // sense (somewhat) at the time. For backward compatibility we are keeping
140 // this calculated the same way. For ETW we use the true gen0 size (and
141 // gen0 budget is also reported in an event).
142 size_t youngest_budget = 0;
143
144 size_t promoted_finalization_mem = 0;
145 size_t total_num_pinned_objects = gc_heap::get_total_pinned_objects();
146
147 #ifndef FEATURE_REDHAWK
148 // if a max gen garbage collection was performed, resync the GC Handle counter;
149 // if threads are currently suspended, we do not need to obtain a lock on each handle table
150 if (condemned_gen == max_generation)
151 total_num_gc_handles = HndCountAllHandles(!IsGCInProgress());
152 #endif //FEATURE_REDHAWK
153
154 // per generation calculation.
155 for (int gen_index = 0; gen_index <= (max_generation+1); gen_index++)
156 {
157 #ifdef MULTIPLE_HEAPS
158 int hn = 0;
159 for (hn = 0; hn < gc_heap::n_heaps; hn++)
160 {
161 gc_heap* hp = gc_heap::g_heaps[hn];
162 #else
163 gc_heap* hp = pGenGCHeap;
164 {
165 #endif //MULTIPLE_HEAPS
166 dynamic_data* dd = hp->dynamic_data_of (gen_index);
167
168 if (gen_index == 0)
169 {
170 youngest_budget += dd_desired_allocation (hp->dynamic_data_of (gen_index));
171 }
172
173 g_GenerationSizes[gen_index] += hp->generation_size (gen_index);
174
175 if (gen_index <= condemned_gen)
176 {
177 g_GenerationPromotedSizes[gen_index] += dd_promoted_size (dd);
178 }
179
180 if ((gen_index == (max_generation+1)) && (condemned_gen == max_generation))
181 {
182 g_GenerationPromotedSizes[gen_index] += dd_promoted_size (dd);
183 }
184
185 if (gen_index == 0)
186 {
187 promoted_finalization_mem += dd_freach_previous_promotion (dd);
188 }
189 #ifdef MULTIPLE_HEAPS
190 }
191 #else
192 }
193 #endif //MULTIPLE_HEAPS
194 }
195 #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
196
197 #ifdef FEATURE_EVENT_TRACE
198 ETW::GCLog::ETW_GC_INFO Info;
199
200 Info.GCEnd.Depth = condemned_gen;
201 Info.GCEnd.Count = (uint32_t)pSettings->gc_index;
202 ETW::GCLog::FireGcEndAndGenerationRanges(Info.GCEnd.Count, Info.GCEnd.Depth);
203
204 ETW::GCLog::ETW_GC_INFO HeapInfo;
205 ZeroMemory(&HeapInfo, sizeof(HeapInfo));
206
207 for (int gen_index = 0; gen_index <= (max_generation+1); gen_index++)
208 {
209 HeapInfo.HeapStats.GenInfo[gen_index].GenerationSize = g_GenerationSizes[gen_index];
210 HeapInfo.HeapStats.GenInfo[gen_index].TotalPromotedSize = g_GenerationPromotedSizes[gen_index];
211 }
212
213 #ifdef SIMPLE_DPRINTF
214 dprintf (2, ("GC#%d: 0: %Id(%Id); 1: %Id(%Id); 2: %Id(%Id); 3: %Id(%Id)",
215 Info.GCEnd.Count,
216 HeapInfo.HeapStats.GenInfo[0].GenerationSize,
217 HeapInfo.HeapStats.GenInfo[0].TotalPromotedSize,
218 HeapInfo.HeapStats.GenInfo[1].GenerationSize,
219 HeapInfo.HeapStats.GenInfo[1].TotalPromotedSize,
220 HeapInfo.HeapStats.GenInfo[2].GenerationSize,
221 HeapInfo.HeapStats.GenInfo[2].TotalPromotedSize,
222 HeapInfo.HeapStats.GenInfo[3].GenerationSize,
223 HeapInfo.HeapStats.GenInfo[3].TotalPromotedSize));
224 #endif //SIMPLE_DPRINTF
225
226 HeapInfo.HeapStats.FinalizationPromotedSize = promoted_finalization_mem;
227 HeapInfo.HeapStats.FinalizationPromotedCount = GetFinalizablePromotedCount();
228 HeapInfo.HeapStats.PinnedObjectCount = (uint32_t)total_num_pinned_objects;
229 HeapInfo.HeapStats.SinkBlockCount = total_num_sync_blocks;
230 HeapInfo.HeapStats.GCHandleCount = (uint32_t)total_num_gc_handles;
231
232 FireEtwGCHeapStats_V1(HeapInfo.HeapStats.GenInfo[0].GenerationSize, HeapInfo.HeapStats.GenInfo[0].TotalPromotedSize,
233 HeapInfo.HeapStats.GenInfo[1].GenerationSize, HeapInfo.HeapStats.GenInfo[1].TotalPromotedSize,
234 HeapInfo.HeapStats.GenInfo[2].GenerationSize, HeapInfo.HeapStats.GenInfo[2].TotalPromotedSize,
235 HeapInfo.HeapStats.GenInfo[3].GenerationSize, HeapInfo.HeapStats.GenInfo[3].TotalPromotedSize,
236 HeapInfo.HeapStats.FinalizationPromotedSize,
237 HeapInfo.HeapStats.FinalizationPromotedCount,
238 HeapInfo.HeapStats.PinnedObjectCount,
239 HeapInfo.HeapStats.SinkBlockCount,
240 HeapInfo.HeapStats.GCHandleCount,
241 GetClrInstanceId());
242 #endif // FEATURE_EVENT_TRACE
243
244 #if defined(ENABLE_PERF_COUNTERS)
245 for (int gen_index = 0; gen_index <= (max_generation+1); gen_index++)
246 {
247 _ASSERTE(FitsIn<size_t>(g_GenerationSizes[gen_index]));
248 _ASSERTE(FitsIn<size_t>(g_GenerationPromotedSizes[gen_index]));
249
250 if (gen_index == (max_generation+1))
251 {
252 GetPerfCounters().m_GC.cLrgObjSize = static_cast<size_t>(g_GenerationSizes[gen_index]);
253 }
254 else
255 {
256 GetPerfCounters().m_GC.cGenHeapSize[gen_index] = ((gen_index == 0) ?
257 youngest_budget :
258 static_cast<size_t>(g_GenerationSizes[gen_index]));
259 }
260
261 // the perf counters only count the promoted size for gen0 and gen1.
262 if (gen_index < max_generation)
263 {
264 GetPerfCounters().m_GC.cbPromotedMem[gen_index] = static_cast<size_t>(g_GenerationPromotedSizes[gen_index]);
265 }
266
267 if (gen_index <= max_generation)
268 {
269 GetPerfCounters().m_GC.cGenCollections[gen_index] =
270 dd_collection_count (hp1->dynamic_data_of (gen_index));
271 }
272 }
273
274 // Committed and reserved memory
275 {
276 size_t committed_mem = 0;
277 size_t reserved_mem = 0;
278 #ifdef MULTIPLE_HEAPS
279 int hn = 0;
280 for (hn = 0; hn < gc_heap::n_heaps; hn++)
281 {
282 gc_heap* hp = gc_heap::g_heaps [hn];
283 #else
284 gc_heap* hp = pGenGCHeap;
285 {
286 #endif //MULTIPLE_HEAPS
287 heap_segment* seg = generation_start_segment (hp->generation_of (max_generation));
288 while (seg)
289 {
290 committed_mem += heap_segment_committed (seg) - heap_segment_mem (seg);
291 reserved_mem += heap_segment_reserved (seg) - heap_segment_mem (seg);
292 seg = heap_segment_next (seg);
293 }
294 //same for large segments
295 seg = generation_start_segment (hp->generation_of (max_generation + 1));
296 while (seg)
297 {
298 committed_mem += heap_segment_committed (seg) -
299 heap_segment_mem (seg);
300 reserved_mem += heap_segment_reserved (seg) -
301 heap_segment_mem (seg);
302 seg = heap_segment_next (seg);
303 }
304 #ifdef MULTIPLE_HEAPS
305 }
306 #else
307 }
308 #endif //MULTIPLE_HEAPS
309
310 GetPerfCounters().m_GC.cTotalCommittedBytes = committed_mem;
311 GetPerfCounters().m_GC.cTotalReservedBytes = reserved_mem;
312 }
313
314 _ASSERTE(FitsIn<size_t>(HeapInfo.HeapStats.FinalizationPromotedSize));
315 _ASSERTE(FitsIn<size_t>(HeapInfo.HeapStats.FinalizationPromotedCount));
316 GetPerfCounters().m_GC.cbPromotedFinalizationMem = static_cast<size_t>(HeapInfo.HeapStats.FinalizationPromotedSize);
317 GetPerfCounters().m_GC.cSurviveFinalize = static_cast<size_t>(HeapInfo.HeapStats.FinalizationPromotedCount);
318
319 // Compute Time in GC
320 PERF_COUNTER_TIMER_PRECISION _currentPerfCounterTimer = GET_CYCLE_COUNT();
321
322 g_TotalTimeInGC = _currentPerfCounterTimer - g_TotalTimeInGC;
323 PERF_COUNTER_TIMER_PRECISION _timeInGCBase = (_currentPerfCounterTimer - g_TotalTimeSinceLastGCEnd);
324
325 if (_timeInGCBase < g_TotalTimeInGC)
326 g_TotalTimeInGC = 0; // isn't likely except on some SMP machines-- perhaps make sure that
327 // _timeInGCBase >= g_TotalTimeInGC by setting affinity in GET_CYCLE_COUNT
328
329 while (_timeInGCBase > UINT_MAX)
330 {
331 _timeInGCBase = _timeInGCBase >> 8;
332 g_TotalTimeInGC = g_TotalTimeInGC >> 8;
333 }
334
335 // Update Total Time
336 GetPerfCounters().m_GC.timeInGC = (uint32_t)g_TotalTimeInGC;
337 GetPerfCounters().m_GC.timeInGCBase = (uint32_t)_timeInGCBase;
338
339 if (!GetPerfCounters().m_GC.cProcessID)
340 GetPerfCounters().m_GC.cProcessID = (size_t)GetCurrentProcessId();
341
342 g_TotalTimeSinceLastGCEnd = _currentPerfCounterTimer;
343
344 GetPerfCounters().m_GC.cPinnedObj = total_num_pinned_objects;
345 GetPerfCounters().m_GC.cHandles = total_num_gc_handles;
346 GetPerfCounters().m_GC.cSinkBlocks = total_num_sync_blocks;
347 #endif //ENABLE_PERF_COUNTERS
348 }
349
GetCurrentObjSize()350 size_t GCHeap::GetCurrentObjSize()
351 {
352 return (totalSurvivedSize + gc_heap::get_total_allocated());
353 }
354
GetLastGCStartTime(int generation)355 size_t GCHeap::GetLastGCStartTime(int generation)
356 {
357 #ifdef MULTIPLE_HEAPS
358 gc_heap* hp = gc_heap::g_heaps[0];
359 #else
360 gc_heap* hp = pGenGCHeap;
361 #endif //MULTIPLE_HEAPS
362
363 return dd_time_clock (hp->dynamic_data_of (generation));
364 }
365
GetLastGCDuration(int generation)366 size_t GCHeap::GetLastGCDuration(int generation)
367 {
368 #ifdef MULTIPLE_HEAPS
369 gc_heap* hp = gc_heap::g_heaps[0];
370 #else
371 gc_heap* hp = pGenGCHeap;
372 #endif //MULTIPLE_HEAPS
373
374 return dd_gc_elapsed_time (hp->dynamic_data_of (generation));
375 }
376
377 size_t GetHighPrecisionTimeStamp();
378
GetNow()379 size_t GCHeap::GetNow()
380 {
381 return GetHighPrecisionTimeStamp();
382 }
383
IsGCInProgressHelper(BOOL bConsiderGCStart)384 BOOL GCHeap::IsGCInProgressHelper (BOOL bConsiderGCStart)
385 {
386 return GcInProgress || (bConsiderGCStart? VolatileLoad(&gc_heap::gc_started) : FALSE);
387 }
388
WaitUntilGCComplete(BOOL bConsiderGCStart)389 uint32_t GCHeap::WaitUntilGCComplete(BOOL bConsiderGCStart)
390 {
391 if (bConsiderGCStart)
392 {
393 if (gc_heap::gc_started)
394 {
395 gc_heap::wait_for_gc_done();
396 }
397 }
398
399 uint32_t dwWaitResult = NOERROR;
400
401 if (GcInProgress)
402 {
403 ASSERT( WaitForGCEvent->IsValid() );
404
405 #ifdef DETECT_DEADLOCK
406 // wait for GC to complete
407 BlockAgain:
408 dwWaitResult = WaitForGCEvent->Wait(DETECT_DEADLOCK_TIMEOUT, FALSE );
409
410 if (dwWaitResult == WAIT_TIMEOUT) {
411 // Even in retail, stop in the debugger if available. Ideally, the
412 // following would use DebugBreak, but debspew.h makes this a null
413 // macro in retail. Note that in debug, we don't use the debspew.h
414 // macros because these take a critical section that may have been
415 // taken by a suspended thread.
416 FreeBuildDebugBreak();
417 goto BlockAgain;
418 }
419
420 #else //DETECT_DEADLOCK
421
422 dwWaitResult = WaitForGCEvent->Wait(INFINITE, FALSE );
423
424 #endif //DETECT_DEADLOCK
425 }
426
427 return dwWaitResult;
428 }
429
SetGCInProgress(BOOL fInProgress)430 void GCHeap::SetGCInProgress(BOOL fInProgress)
431 {
432 GcInProgress = fInProgress;
433 }
434
GetWaitForGCEvent()435 CLREvent * GCHeap::GetWaitForGCEvent()
436 {
437 return WaitForGCEvent;
438 }
439
WaitUntilConcurrentGCComplete()440 void GCHeap::WaitUntilConcurrentGCComplete()
441 {
442 #ifdef BACKGROUND_GC
443 if (pGenGCHeap->settings.concurrent)
444 pGenGCHeap->background_gc_wait();
445 #endif //BACKGROUND_GC
446 }
447
IsConcurrentGCInProgress()448 BOOL GCHeap::IsConcurrentGCInProgress()
449 {
450 #ifdef BACKGROUND_GC
451 return pGenGCHeap->settings.concurrent;
452 #else
453 return FALSE;
454 #endif //BACKGROUND_GC
455 }
456
457 #ifdef FEATURE_EVENT_TRACE
fire_etw_allocation_event(size_t allocation_amount,int gen_number,uint8_t * object_address)458 void gc_heap::fire_etw_allocation_event (size_t allocation_amount, int gen_number, uint8_t* object_address)
459 {
460 void * typeId = nullptr;
461 const WCHAR * name = nullptr;
462 #ifdef FEATURE_REDHAWK
463 typeId = RedhawkGCInterface::GetLastAllocEEType();
464 #else
465 InlineSString<MAX_CLASSNAME_LENGTH> strTypeName;
466
467 EX_TRY
468 {
469 TypeHandle th = GetThread()->GetTHAllocContextObj();
470
471 if (th != 0)
472 {
473 th.GetName(strTypeName);
474 name = strTypeName.GetUnicode();
475 typeId = th.GetMethodTable();
476 }
477 }
478 EX_CATCH {}
479 EX_END_CATCH(SwallowAllExceptions)
480 #endif
481
482 if (typeId != nullptr)
483 {
484 FireEtwGCAllocationTick_V3((uint32_t)allocation_amount,
485 ((gen_number == 0) ? ETW::GCLog::ETW_GC_INFO::AllocationSmall : ETW::GCLog::ETW_GC_INFO::AllocationLarge),
486 GetClrInstanceId(),
487 allocation_amount,
488 typeId,
489 name,
490 heap_number,
491 object_address
492 );
493 }
494 }
fire_etw_pin_object_event(uint8_t * object,uint8_t ** ppObject)495 void gc_heap::fire_etw_pin_object_event (uint8_t* object, uint8_t** ppObject)
496 {
497 #ifdef FEATURE_REDHAWK
498 UNREFERENCED_PARAMETER(object);
499 UNREFERENCED_PARAMETER(ppObject);
500 #else
501 Object* obj = (Object*)object;
502
503 InlineSString<MAX_CLASSNAME_LENGTH> strTypeName;
504
505 EX_TRY
506 {
507 FAULT_NOT_FATAL();
508
509 TypeHandle th = obj->GetGCSafeTypeHandleIfPossible();
510 if(th != NULL)
511 {
512 th.GetName(strTypeName);
513 }
514
515 FireEtwPinObjectAtGCTime(ppObject,
516 object,
517 obj->GetSize(),
518 strTypeName.GetUnicode(),
519 GetClrInstanceId());
520 }
521 EX_CATCH {}
522 EX_END_CATCH(SwallowAllExceptions)
523 #endif // FEATURE_REDHAWK
524 }
525 #endif // FEATURE_EVENT_TRACE
526
user_thread_wait(CLREvent * event,BOOL no_mode_change,int time_out_ms)527 uint32_t gc_heap::user_thread_wait (CLREvent *event, BOOL no_mode_change, int time_out_ms)
528 {
529 Thread* pCurThread = NULL;
530 bool mode = false;
531 uint32_t dwWaitResult = NOERROR;
532
533 if (!no_mode_change)
534 {
535 pCurThread = GetThread();
536 mode = pCurThread ? GCToEEInterface::IsPreemptiveGCDisabled(pCurThread) : false;
537 if (mode)
538 {
539 GCToEEInterface::EnablePreemptiveGC(pCurThread);
540 }
541 }
542
543 dwWaitResult = event->Wait(time_out_ms, FALSE);
544
545 if (!no_mode_change && mode)
546 {
547 GCToEEInterface::DisablePreemptiveGC(pCurThread);
548 }
549
550 return dwWaitResult;
551 }
552
553 #ifdef BACKGROUND_GC
554 // Wait for background gc to finish
background_gc_wait(alloc_wait_reason awr,int time_out_ms)555 uint32_t gc_heap::background_gc_wait (alloc_wait_reason awr, int time_out_ms)
556 {
557 dprintf(2, ("Waiting end of background gc"));
558 assert (background_gc_done_event.IsValid());
559 fire_alloc_wait_event_begin (awr);
560 uint32_t dwRet = user_thread_wait (&background_gc_done_event, FALSE, time_out_ms);
561 fire_alloc_wait_event_end (awr);
562 dprintf(2, ("Waiting end of background gc is done"));
563
564 return dwRet;
565 }
566
567 // Wait for background gc to finish sweeping large objects
background_gc_wait_lh(alloc_wait_reason awr)568 void gc_heap::background_gc_wait_lh (alloc_wait_reason awr)
569 {
570 dprintf(2, ("Waiting end of background large sweep"));
571 assert (gc_lh_block_event.IsValid());
572 fire_alloc_wait_event_begin (awr);
573 user_thread_wait (&gc_lh_block_event, FALSE);
574 fire_alloc_wait_event_end (awr);
575 dprintf(2, ("Waiting end of background large sweep is done"));
576 }
577
578 #endif //BACKGROUND_GC
579
580
581 /******************************************************************************/
CreateGCHeap()582 IGCHeapInternal* CreateGCHeap() {
583 return new(nothrow) GCHeap(); // we return wks or svr
584 }
585
DiagTraceGCSegments()586 void GCHeap::DiagTraceGCSegments()
587 {
588 #ifdef FEATURE_EVENT_TRACE
589 heap_segment* seg = 0;
590 #ifdef MULTIPLE_HEAPS
591 // walk segments in each heap
592 for (int i = 0; i < gc_heap::n_heaps; i++)
593 {
594 gc_heap* h = gc_heap::g_heaps [i];
595 #else
596 {
597 gc_heap* h = pGenGCHeap;
598 #endif //MULTIPLE_HEAPS
599
600 for (seg = generation_start_segment (h->generation_of (max_generation)); seg != 0; seg = heap_segment_next(seg))
601 {
602 ETW::GCLog::ETW_GC_INFO Info;
603 Info.GCCreateSegment.Address = (size_t)heap_segment_mem(seg);
604 Info.GCCreateSegment.Size = (size_t)(heap_segment_reserved (seg) - heap_segment_mem(seg));
605 Info.GCCreateSegment.Type = (heap_segment_read_only_p (seg) ?
606 ETW::GCLog::ETW_GC_INFO::READ_ONLY_HEAP :
607 ETW::GCLog::ETW_GC_INFO::SMALL_OBJECT_HEAP);
608 FireEtwGCCreateSegment_V1(Info.GCCreateSegment.Address, Info.GCCreateSegment.Size, Info.GCCreateSegment.Type, GetClrInstanceId());
609 }
610
611 // large obj segments
612 for (seg = generation_start_segment (h->generation_of (max_generation+1)); seg != 0; seg = heap_segment_next(seg))
613 {
614 FireEtwGCCreateSegment_V1((size_t)heap_segment_mem(seg),
615 (size_t)(heap_segment_reserved (seg) - heap_segment_mem(seg)),
616 ETW::GCLog::ETW_GC_INFO::LARGE_OBJECT_HEAP,
617 GetClrInstanceId());
618 }
619 }
620 #endif // FEATURE_EVENT_TRACE
621 }
622
623 void GCHeap::DiagDescrGenerations (gen_walk_fn fn, void *context)
624 {
625 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
626 pGenGCHeap->descr_generations_to_profiler(fn, context);
627 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
628 }
629
630 segment_handle GCHeap::RegisterFrozenSegment(segment_info *pseginfo)
631 {
632 #ifdef FEATURE_BASICFREEZE
633 heap_segment * seg = new (nothrow) heap_segment;
634 if (!seg)
635 {
636 return NULL;
637 }
638
639 uint8_t* base_mem = (uint8_t*)pseginfo->pvMem;
640 heap_segment_mem(seg) = base_mem + pseginfo->ibFirstObject;
641 heap_segment_allocated(seg) = base_mem + pseginfo->ibAllocated;
642 heap_segment_committed(seg) = base_mem + pseginfo->ibCommit;
643 heap_segment_reserved(seg) = base_mem + pseginfo->ibReserved;
644 heap_segment_next(seg) = 0;
645 heap_segment_used(seg) = heap_segment_allocated(seg);
646 heap_segment_plan_allocated(seg) = 0;
647 seg->flags = heap_segment_flags_readonly;
648
649 #if defined (MULTIPLE_HEAPS) && !defined (ISOLATED_HEAPS)
650 gc_heap* heap = gc_heap::g_heaps[0];
651 heap_segment_heap(seg) = heap;
652 #else
653 gc_heap* heap = pGenGCHeap;
654 #endif //MULTIPLE_HEAPS && !ISOLATED_HEAPS
655
656 if (heap->insert_ro_segment(seg) == FALSE)
657 {
658 delete seg;
659 return NULL;
660 }
661
662 return reinterpret_cast< segment_handle >(seg);
663 #else
664 assert(!"Should not call GCHeap::RegisterFrozenSegment without FEATURE_BASICFREEZE defined!");
665 return NULL;
666 #endif // FEATURE_BASICFREEZE
667 }
668
669 void GCHeap::UnregisterFrozenSegment(segment_handle seg)
670 {
671 #ifdef FEATURE_BASICFREEZE
672 #if defined (MULTIPLE_HEAPS) && !defined (ISOLATED_HEAPS)
673 gc_heap* heap = gc_heap::g_heaps[0];
674 #else
675 gc_heap* heap = pGenGCHeap;
676 #endif //MULTIPLE_HEAPS && !ISOLATED_HEAPS
677
678 heap->remove_ro_segment(reinterpret_cast<heap_segment*>(seg));
679 #else
680 assert(!"Should not call GCHeap::UnregisterFrozenSegment without FEATURE_BASICFREEZE defined!");
681 #endif // FEATURE_BASICFREEZE
682 }
683
684
685 #endif // !DACCESS_COMPILE
686
687
688