1 /**
2  * \file
3  * When to schedule collections based on memory usage.
4  *
5  * Author:
6  * 	Rodrigo Kumpera (rkumpera@novell.com)
7  *
8  * Copyright 2001-2003 Ximian, Inc
9  * Copyright 2003-2010 Novell, Inc.
10  * Copyright 2011 Xamarin Inc (http://www.xamarin.com)
11  * Copyright (C) 2012 Xamarin Inc
12  *
13  * Licensed under the MIT license. See LICENSE file in the project root for full license information.
14  */
15 
16 #include "config.h"
17 #ifdef HAVE_SGEN_GC
18 
19 #include <stdlib.h>
20 
21 #include "mono/sgen/sgen-gc.h"
22 #include "mono/sgen/sgen-memory-governor.h"
23 #include "mono/sgen/sgen-workers.h"
24 #include "mono/sgen/sgen-client.h"
25 
26 /*
27  * The allowance we are targeting is a third of the current heap size. Still, we
28  * allow the heap to grow at least 4 times the nursery size before triggering a
29  * major, to reduce unnecessary collections. We make sure we don't set the minimum
30  * allowance too high when using a soft heap limit.
31  */
32 #define MIN_MINOR_COLLECTION_ALLOWANCE	(MIN(((mword)(sgen_nursery_size * default_allowance_nursery_size_ratio)), (soft_heap_limit * SGEN_DEFAULT_ALLOWANCE_HEAP_SIZE_RATIO)))
33 
34 static SgenPointerQueue log_entries = SGEN_POINTER_QUEUE_INIT (INTERNAL_MEM_TEMPORARY);
35 static mono_mutex_t log_entries_mutex;
36 
37 mword total_promoted_size = 0;
38 mword total_allocated_major = 0;
39 static mword total_promoted_size_start;
40 static mword total_allocated_major_end;
41 
42 /*Heap limits and allocation knobs*/
43 static mword max_heap_size = ((mword)0)- ((mword)1);
44 static mword soft_heap_limit = ((mword)0) - ((mword)1);
45 
46 static double default_allowance_nursery_size_ratio = SGEN_DEFAULT_ALLOWANCE_NURSERY_SIZE_RATIO;
47 static double save_target_ratio = SGEN_DEFAULT_SAVE_TARGET_RATIO;
48 
49 /**/
50 static mword allocated_heap;
51 static mword total_alloc = 0;
52 static mword total_alloc_max = 0;
53 
54 static SGEN_TV_DECLARE(last_minor_start);
55 static SGEN_TV_DECLARE(last_major_start);
56 
57 /* GC triggers. */
58 
59 static gboolean debug_print_allowance = FALSE;
60 
61 
62 /* use this to tune when to do a major/minor collection */
63 static mword major_collection_trigger_size;
64 
65 static mword major_pre_sweep_heap_size;
66 static mword major_start_heap_size;
67 
68 static gboolean need_calculate_minor_collection_allowance;
69 
70 /* The size of the LOS after the last major collection, after sweeping. */
71 static mword last_collection_los_memory_usage = 0;
72 static mword last_used_slots_size = 0;
73 
74 static mword sgen_memgov_available_free_space (void);
75 
76 
77 /* GC trigger heuristics. */
78 
79 static void
sgen_memgov_calculate_minor_collection_allowance(void)80 sgen_memgov_calculate_minor_collection_allowance (void)
81 {
82 	size_t new_major, new_heap_size, allowance_target, allowance;
83 	size_t decrease;
84 
85 	if (!need_calculate_minor_collection_allowance)
86 		return;
87 
88 	SGEN_ASSERT (0, major_collector.have_swept (), "Can only calculate allowance if heap is swept");
89 
90 	new_major = major_collector.get_bytes_survived_last_sweep ();
91 	new_heap_size = new_major + last_collection_los_memory_usage;
92 
93 	/*
94 	 * We allow the heap to grow by one third its current size before we start the next
95 	 * major collection.
96 	 */
97 	allowance_target = new_heap_size * SGEN_DEFAULT_ALLOWANCE_HEAP_SIZE_RATIO;
98 
99 	allowance = MAX (allowance_target, MIN_MINOR_COLLECTION_ALLOWANCE);
100 
101 	/*
102 	 * For the concurrent collector, we decrease the allowance relative to the memory
103 	 * growth during the M&S phase, survival rate of the collection and the allowance
104 	 * ratio.
105 	 */
106 	decrease = (major_pre_sweep_heap_size - major_start_heap_size) * ((float)new_heap_size / major_pre_sweep_heap_size) * (SGEN_DEFAULT_ALLOWANCE_HEAP_SIZE_RATIO + 1);
107 	if (decrease > allowance)
108 		decrease = allowance;
109 	allowance -= decrease;
110 
111 	if (new_heap_size + allowance > soft_heap_limit) {
112 		if (new_heap_size > soft_heap_limit)
113 			allowance = MIN_MINOR_COLLECTION_ALLOWANCE;
114 		else
115 			allowance = MAX (soft_heap_limit - new_heap_size, MIN_MINOR_COLLECTION_ALLOWANCE);
116 	}
117 
118 	/* FIXME: Why is this here? */
119 	if (major_collector.free_swept_blocks)
120 		major_collector.free_swept_blocks (major_collector.get_num_major_sections () * SGEN_DEFAULT_ALLOWANCE_HEAP_SIZE_RATIO);
121 
122 	major_collection_trigger_size = new_heap_size + allowance;
123 
124 	need_calculate_minor_collection_allowance = FALSE;
125 
126 	if (debug_print_allowance) {
127 		SGEN_LOG (0, "Surviving sweep: %ld bytes (%ld major, %ld LOS)", (long)new_heap_size, (long)new_major, (long)last_collection_los_memory_usage);
128 		SGEN_LOG (0, "Allowance: %ld bytes", (long)allowance);
129 		SGEN_LOG (0, "Trigger size: %ld bytes", (long)major_collection_trigger_size);
130 	}
131 }
132 
133 static inline size_t
get_heap_size(void)134 get_heap_size (void)
135 {
136 	return major_collector.get_num_major_sections () * major_collector.section_size + los_memory_usage;
137 }
138 
139 gboolean
sgen_need_major_collection(mword space_needed)140 sgen_need_major_collection (mword space_needed)
141 {
142 	size_t heap_size;
143 
144 	if (sgen_concurrent_collection_in_progress ()) {
145 		heap_size = get_heap_size ();
146 
147 		if (heap_size <= major_collection_trigger_size)
148 			return FALSE;
149 
150 		/*
151 		 * The more the heap grows, the more we need to decrease the allowance above,
152 		 * in order to have similar trigger sizes as the synchronous collector.
153 		 * If the heap grows so much that we would need to have a negative allowance,
154 		 * we force the finishing of the collection, to avoid increased memory usage.
155 		 */
156 		if ((heap_size - major_start_heap_size) > major_start_heap_size * SGEN_DEFAULT_ALLOWANCE_HEAP_SIZE_RATIO)
157 			return TRUE;
158 		return FALSE;
159 	}
160 
161 	/* FIXME: This is a cop-out.  We should have some way of figuring this out. */
162 	if (!major_collector.have_swept ())
163 		return FALSE;
164 
165 	if (space_needed > sgen_memgov_available_free_space ())
166 		return TRUE;
167 
168 	sgen_memgov_calculate_minor_collection_allowance ();
169 
170 	heap_size = get_heap_size ();
171 
172 	return heap_size > major_collection_trigger_size;
173 }
174 
175 void
sgen_memgov_minor_collection_start(void)176 sgen_memgov_minor_collection_start (void)
177 {
178 	total_promoted_size_start = total_promoted_size;
179 	SGEN_TV_GETTIME (last_minor_start);
180 }
181 
182 static void
sgen_add_log_entry(SgenLogEntry * log_entry)183 sgen_add_log_entry (SgenLogEntry *log_entry)
184 {
185 	mono_os_mutex_lock (&log_entries_mutex);
186 	sgen_pointer_queue_add (&log_entries, log_entry);
187 	mono_os_mutex_unlock (&log_entries_mutex);
188 }
189 
190 void
sgen_memgov_minor_collection_end(const char * reason,gboolean is_overflow)191 sgen_memgov_minor_collection_end (const char *reason, gboolean is_overflow)
192 {
193 	if (mono_trace_is_traced (G_LOG_LEVEL_INFO, MONO_TRACE_GC)) {
194 		SgenLogEntry *log_entry = (SgenLogEntry*)sgen_alloc_internal (INTERNAL_MEM_LOG_ENTRY);
195 		SGEN_TV_DECLARE (current_time);
196 		SGEN_TV_GETTIME (current_time);
197 
198 		log_entry->type = SGEN_LOG_NURSERY;
199 		log_entry->reason = reason;
200 		log_entry->is_overflow = is_overflow;
201 		log_entry->time = SGEN_TV_ELAPSED (last_minor_start, current_time);
202 		log_entry->promoted_size = total_promoted_size - total_promoted_size_start;
203 		log_entry->major_size = major_collector.get_num_major_sections () * major_collector.section_size;
204 		log_entry->major_size_in_use = last_used_slots_size + total_allocated_major - total_allocated_major_end;
205 		log_entry->los_size = los_memory_usage_total;
206 		log_entry->los_size_in_use = los_memory_usage;
207 
208 		sgen_add_log_entry (log_entry);
209 	}
210 }
211 
212 void
sgen_memgov_major_pre_sweep(void)213 sgen_memgov_major_pre_sweep (void)
214 {
215 	if (sgen_concurrent_collection_in_progress ()) {
216 		major_pre_sweep_heap_size = get_heap_size ();
217 	} else {
218 		/* We decrease the allowance only in the concurrent case */
219 		major_pre_sweep_heap_size = major_start_heap_size;
220 	}
221 }
222 
223 void
sgen_memgov_major_post_sweep(mword used_slots_size)224 sgen_memgov_major_post_sweep (mword used_slots_size)
225 {
226 	if (mono_trace_is_traced (G_LOG_LEVEL_INFO, MONO_TRACE_GC)) {
227 		SgenLogEntry *log_entry = (SgenLogEntry*)sgen_alloc_internal (INTERNAL_MEM_LOG_ENTRY);
228 
229 		log_entry->type = SGEN_LOG_MAJOR_SWEEP_FINISH;
230 		log_entry->major_size = major_collector.get_num_major_sections () * major_collector.section_size;
231 		log_entry->major_size_in_use = used_slots_size + total_allocated_major - total_allocated_major_end;
232 
233 		sgen_add_log_entry (log_entry);
234 	}
235 	last_used_slots_size = used_slots_size;
236 }
237 
238 void
sgen_memgov_major_collection_start(gboolean concurrent,const char * reason)239 sgen_memgov_major_collection_start (gboolean concurrent, const char *reason)
240 {
241 	need_calculate_minor_collection_allowance = TRUE;
242 	major_start_heap_size = get_heap_size ();
243 
244 	if (debug_print_allowance) {
245 		SGEN_LOG (0, "Starting collection with heap size %ld bytes", (long)major_start_heap_size);
246 	}
247 	if (concurrent && mono_trace_is_traced (G_LOG_LEVEL_INFO, MONO_TRACE_GC)) {
248 		SgenLogEntry *log_entry = (SgenLogEntry*)sgen_alloc_internal (INTERNAL_MEM_LOG_ENTRY);
249 
250 		log_entry->type = SGEN_LOG_MAJOR_CONC_START;
251 		log_entry->reason = reason;
252 
253 		sgen_add_log_entry (log_entry);
254 	}
255 	SGEN_TV_GETTIME (last_major_start);
256 }
257 
258 void
sgen_memgov_major_collection_end(gboolean forced,gboolean concurrent,const char * reason,gboolean is_overflow)259 sgen_memgov_major_collection_end (gboolean forced, gboolean concurrent, const char *reason, gboolean is_overflow)
260 {
261 	if (mono_trace_is_traced (G_LOG_LEVEL_INFO, MONO_TRACE_GC)) {
262 		SgenLogEntry *log_entry = (SgenLogEntry*)sgen_alloc_internal (INTERNAL_MEM_LOG_ENTRY);
263 		SGEN_TV_DECLARE (current_time);
264 		SGEN_TV_GETTIME (current_time);
265 
266 		if (concurrent) {
267 			log_entry->type = SGEN_LOG_MAJOR_CONC_FINISH;
268 		} else {
269 			log_entry->type = SGEN_LOG_MAJOR_SERIAL;
270 		}
271 		log_entry->time = SGEN_TV_ELAPSED (last_major_start, current_time);
272 		log_entry->reason = reason;
273 		log_entry->is_overflow = is_overflow;
274 		log_entry->los_size = los_memory_usage_total;
275 		log_entry->los_size_in_use = los_memory_usage;
276 
277 		sgen_add_log_entry (log_entry);
278 	}
279 
280 	last_collection_los_memory_usage = los_memory_usage;
281 	total_allocated_major_end = total_allocated_major;
282 	if (forced) {
283 		sgen_get_major_collector ()->finish_sweeping ();
284 		sgen_memgov_calculate_minor_collection_allowance ();
285 	}
286 }
287 
288 void
sgen_memgov_collection_start(int generation)289 sgen_memgov_collection_start (int generation)
290 {
291 }
292 
293 static void
sgen_output_log_entry(SgenLogEntry * entry,gint64 stw_time,int generation)294 sgen_output_log_entry (SgenLogEntry *entry, gint64 stw_time, int generation)
295 {
296 	char full_timing_buff [1024];
297 	full_timing_buff [0] = '\0';
298 
299 	if (!entry->is_overflow)
300                 sprintf (full_timing_buff, "stw %.2fms", stw_time / 10000.0f);
301 
302 	switch (entry->type) {
303 		case SGEN_LOG_NURSERY:
304 			mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "GC_MINOR%s: (%s) time %.2fms, %s promoted %zdK major size: %zdK in use: %zdK los size: %zdK in use: %zdK",
305 				entry->is_overflow ? "_OVERFLOW" : "",
306 				entry->reason ? entry->reason : "",
307 				entry->time / 10000.0f,
308 				(generation == GENERATION_NURSERY) ? full_timing_buff : "",
309 				entry->promoted_size / 1024,
310 				entry->major_size / 1024,
311 				entry->major_size_in_use / 1024,
312 				entry->los_size / 1024,
313 				entry->los_size_in_use / 1024);
314 			break;
315 		case SGEN_LOG_MAJOR_SERIAL:
316 			mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "GC_MAJOR%s: (%s) time %.2fms, %s los size: %zdK in use: %zdK",
317 				entry->is_overflow ? "_OVERFLOW" : "",
318 				entry->reason ? entry->reason : "",
319 				(int)entry->time / 10000.0f,
320 				full_timing_buff,
321 				entry->los_size / 1024,
322 				entry->los_size_in_use / 1024);
323 			break;
324 		case SGEN_LOG_MAJOR_CONC_START:
325 			mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "GC_MAJOR_CONCURRENT_START: (%s)", entry->reason ? entry->reason : "");
326 			break;
327 		case SGEN_LOG_MAJOR_CONC_FINISH:
328 			mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "GC_MAJOR_CONCURRENT_FINISH: (%s) time %.2fms, %s los size: %zdK in use: %zdK",
329 				entry->reason ? entry->reason : "",
330 				entry->time / 10000.0f,
331 				full_timing_buff,
332 				entry->los_size / 1024,
333 				entry->los_size_in_use / 1024);
334 			break;
335 		case SGEN_LOG_MAJOR_SWEEP_FINISH:
336 			mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_GC, "GC_MAJOR_SWEEP: major size: %zdK in use: %zdK",
337 				entry->major_size / 1024,
338 				entry->major_size_in_use / 1024);
339 			break;
340 		default:
341 			SGEN_ASSERT (0, FALSE, "Invalid log entry type");
342 			break;
343 	}
344 }
345 
346 void
sgen_memgov_collection_end(int generation,gint64 stw_time)347 sgen_memgov_collection_end (int generation, gint64 stw_time)
348 {
349 	/*
350 	 * At this moment the world has been restarted which means we can log all pending entries
351 	 * without risking deadlocks.
352 	 */
353 	if (mono_trace_is_traced (G_LOG_LEVEL_INFO, MONO_TRACE_GC)) {
354 		size_t i;
355 		SGEN_ASSERT (0, !sgen_is_world_stopped (), "We can't log if the world is stopped");
356 		mono_os_mutex_lock (&log_entries_mutex);
357 		for (i = 0; i < log_entries.next_slot; i++) {
358 			sgen_output_log_entry (log_entries.data [i], stw_time, generation);
359 			sgen_free_internal (log_entries.data [i], INTERNAL_MEM_LOG_ENTRY);
360 		}
361 		sgen_pointer_queue_clear (&log_entries);
362 		mono_os_mutex_unlock (&log_entries_mutex);
363 	}
364 }
365 
366 /*
367 Global GC memory tracking.
368 This tracks the total usage of memory by the GC. This includes
369 managed and unmanaged memory.
370 */
371 
372 static unsigned long
prot_flags_for_activate(int activate)373 prot_flags_for_activate (int activate)
374 {
375 	unsigned long prot_flags = activate? MONO_MMAP_READ|MONO_MMAP_WRITE: MONO_MMAP_NONE;
376 	return prot_flags | MONO_MMAP_PRIVATE | MONO_MMAP_ANON;
377 }
378 
379 void
sgen_assert_memory_alloc(void * ptr,size_t requested_size,const char * assert_description)380 sgen_assert_memory_alloc (void *ptr, size_t requested_size, const char *assert_description)
381 {
382 	if (ptr || !assert_description)
383 		return;
384 	fprintf (stderr, "Error: Garbage collector could not allocate %zu bytes of memory for %s.\n", requested_size, assert_description);
385 	exit (1);
386 }
387 
388 /*
389  * Allocate a big chunk of memory from the OS (usually 64KB to several megabytes).
390  * This must not require any lock.
391  */
392 void*
sgen_alloc_os_memory(size_t size,SgenAllocFlags flags,const char * assert_description,MonoMemAccountType type)393 sgen_alloc_os_memory (size_t size, SgenAllocFlags flags, const char *assert_description, MonoMemAccountType type)
394 {
395 	void *ptr;
396 
397 	g_assert (!(flags & ~(SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE)));
398 
399 	ptr = mono_valloc (0, size, prot_flags_for_activate (flags & SGEN_ALLOC_ACTIVATE), type);
400 	sgen_assert_memory_alloc (ptr, size, assert_description);
401 	if (ptr) {
402 		SGEN_ATOMIC_ADD_P (total_alloc, size);
403 		total_alloc_max = MAX (total_alloc_max, total_alloc);
404 	}
405 	return ptr;
406 }
407 
408 /* size must be a power of 2 */
409 // FIXME: remove assert_description
410 void*
sgen_alloc_os_memory_aligned(size_t size,mword alignment,SgenAllocFlags flags,const char * assert_description,MonoMemAccountType type)411 sgen_alloc_os_memory_aligned (size_t size, mword alignment, SgenAllocFlags flags, const char *assert_description, MonoMemAccountType type)
412 {
413 	void *ptr;
414 
415 	g_assert (!(flags & ~(SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE)));
416 
417 	ptr = mono_valloc_aligned (size, alignment, prot_flags_for_activate (flags & SGEN_ALLOC_ACTIVATE), type);
418 	sgen_assert_memory_alloc (ptr, size, assert_description);
419 	if (ptr) {
420 		SGEN_ATOMIC_ADD_P (total_alloc, size);
421 		total_alloc_max = MAX (total_alloc_max, total_alloc);
422 	}
423 	return ptr;
424 }
425 
426 /*
427  * Free the memory returned by sgen_alloc_os_memory (), returning it to the OS.
428  */
429 void
sgen_free_os_memory(void * addr,size_t size,SgenAllocFlags flags,MonoMemAccountType type)430 sgen_free_os_memory (void *addr, size_t size, SgenAllocFlags flags, MonoMemAccountType type)
431 {
432 	g_assert (!(flags & ~SGEN_ALLOC_HEAP));
433 
434 	mono_vfree (addr, size, type);
435 	SGEN_ATOMIC_ADD_P (total_alloc, -(gssize)size);
436 	total_alloc_max = MAX (total_alloc_max, total_alloc);
437 }
438 
439 size_t
sgen_gc_get_total_heap_allocation(void)440 sgen_gc_get_total_heap_allocation (void)
441 {
442 	return total_alloc;
443 }
444 
445 
446 /*
447 Heap Sizing limits.
448 This limit the max size of the heap. It takes into account
449 only memory actively in use to hold heap objects and not
450 for other parts of the GC.
451  */
452 static mword
sgen_memgov_available_free_space(void)453 sgen_memgov_available_free_space (void)
454 {
455 	return max_heap_size - MIN (allocated_heap, max_heap_size);
456 }
457 
458 void
sgen_memgov_release_space(mword size,int space)459 sgen_memgov_release_space (mword size, int space)
460 {
461 	SGEN_ATOMIC_ADD_P (allocated_heap, -(gssize)size);
462 }
463 
464 gboolean
sgen_memgov_try_alloc_space(mword size,int space)465 sgen_memgov_try_alloc_space (mword size, int space)
466 {
467 	if (sgen_memgov_available_free_space () < size) {
468 		SGEN_ASSERT (4, !sgen_workers_is_worker_thread (mono_native_thread_id_get ()), "Memory shouldn't run out in worker thread");
469 		return FALSE;
470 	}
471 
472 	SGEN_ATOMIC_ADD_P (allocated_heap, size);
473 	sgen_client_total_allocated_heap_changed (allocated_heap);
474 	return TRUE;
475 }
476 
477 void
sgen_memgov_init(size_t max_heap,size_t soft_limit,gboolean debug_allowance,double allowance_ratio,double save_target)478 sgen_memgov_init (size_t max_heap, size_t soft_limit, gboolean debug_allowance, double allowance_ratio, double save_target)
479 {
480 	if (soft_limit)
481 		soft_heap_limit = soft_limit;
482 
483 	debug_print_allowance = debug_allowance;
484 	major_collection_trigger_size = MIN_MINOR_COLLECTION_ALLOWANCE;
485 
486 	mono_counters_register ("Memgov alloc", MONO_COUNTER_GC | MONO_COUNTER_WORD | MONO_COUNTER_BYTES | MONO_COUNTER_VARIABLE, (void*)&total_alloc);
487 	mono_counters_register ("Memgov max alloc", MONO_COUNTER_GC | MONO_COUNTER_WORD | MONO_COUNTER_BYTES | MONO_COUNTER_MONOTONIC, (void*)&total_alloc_max);
488 
489 	mono_os_mutex_init (&log_entries_mutex);
490 
491 	sgen_register_fixed_internal_mem_type (INTERNAL_MEM_LOG_ENTRY, sizeof (SgenLogEntry));
492 
493 	if (max_heap == 0)
494 		return;
495 
496 	if (max_heap < soft_limit) {
497 		sgen_env_var_error (MONO_GC_PARAMS_NAME, "Setting to minimum.", "`max-heap-size` must be at least as large as `soft-heap-limit`.");
498 		max_heap = soft_limit;
499 	}
500 
501 	if (max_heap < SGEN_DEFAULT_NURSERY_SIZE * 4) {
502 		sgen_env_var_error (MONO_GC_PARAMS_NAME, "Setting to minimum.", "`max-heap-size` must be at least 4 times as large as `nursery size`.");
503 		max_heap = SGEN_DEFAULT_NURSERY_SIZE * 4;
504 	}
505 	max_heap_size = max_heap - SGEN_DEFAULT_NURSERY_SIZE;
506 
507 	if (allowance_ratio)
508 		default_allowance_nursery_size_ratio = allowance_ratio;
509 
510 	if (save_target)
511 		save_target_ratio = save_target;
512 }
513 
514 #endif
515