1 /**
2  * \file
3  * Internal lock-free memory allocator.
4  *
5  * Copyright (C) 2012 Xamarin Inc
6  *
7  * Licensed under the MIT license. See LICENSE file in the project root for full license information.
8  */
9 
10 #include "config.h"
11 
12 #ifdef HAVE_SGEN_GC
13 
14 #include <string.h>
15 
16 #include "mono/sgen/sgen-gc.h"
17 #include "mono/utils/lock-free-alloc.h"
18 #include "mono/sgen/sgen-memory-governor.h"
19 #include "mono/sgen/sgen-client.h"
20 
21 /*
22  * When allocating sgen memory we choose the allocator with the smallest slot size
23  * that can fit our requested size. These slots are allocated within a block that
24  * can contain at least 2 slots of the specific size.
25  *
26  * Currently, slots from 8 to 2044/2040 are allocated inside 4096 sized blocks,
27  * 2728 to 4092/4088 inside 8192 sized blocks, and higher inside 16384 sized
28  * blocks. We also need to make sure the slots are pointer size aligned so we
29  * don't allocate unaligned memory.
30  *
31  * The computation of these sizes spawns from two basic rules :
32  * 	- if we use slots of size s1 that fit n times in a block, it is illogical
33  * to use another slot of size s2 which also fits the same n times in a block.
34  *	- if we use slots of size s1 that fit n times in a block, there is no
35  * s2 > s1 that can fit n times in the block. That would mean we are wasting memory
36  * when allocating size S where s1 < S <= s2.
37  */
38 #if SIZEOF_VOID_P == 4
39 static const int allocator_sizes [] = {
40 	   8,   16,   24,   32,   40,   48,   64,   80,
41 	  96,  124,  160,  192,  224,  252,  292,  340,
42 	 408,  452,  508,  584,  680,  816, 1020,
43 	1364, 2044, 2728, 4092, 5460, 8188 };
44 #else
45 static const int allocator_sizes [] = {
46 	   8,   16,   24,   32,   40,   48,   64,   80,
47 	  96,  128,  160,  192,  224,  248,  288,  336,
48 	 368,  448,  504,  584,  680,  816, 1016,
49 	1360, 2040, 2728, 4088, 5456, 8184 };
50 #endif
51 
52 #define NUM_ALLOCATORS	(sizeof (allocator_sizes) / sizeof (int))
53 
54 static int allocator_block_sizes [NUM_ALLOCATORS];
55 
56 static MonoLockFreeAllocSizeClass size_classes [NUM_ALLOCATORS];
57 static MonoLockFreeAllocator allocators [NUM_ALLOCATORS];
58 
59 #ifdef HEAVY_STATISTICS
60 static int allocator_sizes_stats [NUM_ALLOCATORS];
61 #endif
62 
63 static size_t
block_size(size_t slot_size)64 block_size (size_t slot_size)
65 {
66 	static int pagesize = -1;
67 
68 	int size;
69 	size_t aligned_slot_size = SGEN_ALIGN_UP_TO (slot_size, SIZEOF_VOID_P);
70 
71 	if (pagesize == -1)
72 		pagesize = mono_pagesize ();
73 
74 	for (size = pagesize; size < LOCK_FREE_ALLOC_SB_MAX_SIZE; size <<= 1) {
75 		if (aligned_slot_size * 2 <= LOCK_FREE_ALLOC_SB_USABLE_SIZE (size))
76 			return size;
77 	}
78 	return LOCK_FREE_ALLOC_SB_MAX_SIZE;
79 }
80 
81 /*
82  * Find the allocator index for memory chunks that can contain @size
83  * objects.
84  */
85 static int
index_for_size(size_t size)86 index_for_size (size_t size)
87 {
88 	int slot;
89 	/* do a binary search or lookup table later. */
90 	for (slot = 0; slot < NUM_ALLOCATORS; ++slot) {
91 		if (allocator_sizes [slot] >= size)
92 			return slot;
93 	}
94 	g_assert_not_reached ();
95 	return -1;
96 }
97 
98 /*
99  * Allocator indexes for the fixed INTERNAL_MEM_XXX types.  -1 if that
100  * type is dynamic.
101  */
102 static int fixed_type_allocator_indexes [INTERNAL_MEM_MAX];
103 
104 void
sgen_register_fixed_internal_mem_type(int type,size_t size)105 sgen_register_fixed_internal_mem_type (int type, size_t size)
106 {
107 	int slot;
108 
109 	g_assert (type >= 0 && type < INTERNAL_MEM_MAX);
110 	g_assert (size <= allocator_sizes [NUM_ALLOCATORS - 1]);
111 
112 	slot = index_for_size (size);
113 	g_assert (slot >= 0);
114 
115 	if (fixed_type_allocator_indexes [type] == -1)
116 		fixed_type_allocator_indexes [type] = slot;
117 	else {
118 		if (fixed_type_allocator_indexes [type] != slot)
119 			g_error ("Invalid double registration of type %d old slot %d new slot %d", type, fixed_type_allocator_indexes [type], slot);
120 	}
121 }
122 
123 static const char*
description_for_type(int type)124 description_for_type (int type)
125 {
126 	switch (type) {
127 	case INTERNAL_MEM_PIN_QUEUE: return "pin-queue";
128 	case INTERNAL_MEM_FRAGMENT: return "fragment";
129 	case INTERNAL_MEM_SECTION: return "section";
130 	case INTERNAL_MEM_SCAN_STARTS: return "scan-starts";
131 	case INTERNAL_MEM_FIN_TABLE: return "fin-table";
132 	case INTERNAL_MEM_FINALIZE_ENTRY: return "finalize-entry";
133 	case INTERNAL_MEM_FINALIZE_READY: return "finalize-ready";
134 	case INTERNAL_MEM_DISLINK_TABLE: return "dislink-table";
135 	case INTERNAL_MEM_DISLINK: return "dislink";
136 	case INTERNAL_MEM_ROOTS_TABLE: return "roots-table";
137 	case INTERNAL_MEM_ROOT_RECORD: return "root-record";
138 	case INTERNAL_MEM_STATISTICS: return "statistics";
139 	case INTERNAL_MEM_STAT_PINNED_CLASS: return "pinned-class";
140 	case INTERNAL_MEM_STAT_REMSET_CLASS: return "remset-class";
141 	case INTERNAL_MEM_GRAY_QUEUE: return "gray-queue";
142 	case INTERNAL_MEM_MS_TABLES: return "marksweep-tables";
143 	case INTERNAL_MEM_MS_BLOCK_INFO: return "marksweep-block-info";
144 	case INTERNAL_MEM_MS_BLOCK_INFO_SORT: return "marksweep-block-info-sort";
145 	case INTERNAL_MEM_WORKER_DATA: return "worker-data";
146 	case INTERNAL_MEM_THREAD_POOL_JOB: return "thread-pool-job";
147 	case INTERNAL_MEM_BRIDGE_DATA: return "bridge-data";
148 	case INTERNAL_MEM_OLD_BRIDGE_HASH_TABLE: return "old-bridge-hash-table";
149 	case INTERNAL_MEM_OLD_BRIDGE_HASH_TABLE_ENTRY: return "old-bridge-hash-table-entry";
150 	case INTERNAL_MEM_BRIDGE_HASH_TABLE: return "bridge-hash-table";
151 	case INTERNAL_MEM_BRIDGE_HASH_TABLE_ENTRY: return "bridge-hash-table-entry";
152 	case INTERNAL_MEM_TARJAN_BRIDGE_HASH_TABLE: return "tarjan-bridge-hash-table";
153 	case INTERNAL_MEM_TARJAN_BRIDGE_HASH_TABLE_ENTRY: return "tarjan-bridge-hash-table-entry";
154 	case INTERNAL_MEM_TARJAN_OBJ_BUCKET: return "tarjan-bridge-object-buckets";
155 	case INTERNAL_MEM_BRIDGE_ALIVE_HASH_TABLE: return "bridge-alive-hash-table";
156 	case INTERNAL_MEM_BRIDGE_ALIVE_HASH_TABLE_ENTRY: return "bridge-alive-hash-table-entry";
157 	case INTERNAL_MEM_BRIDGE_DEBUG: return "bridge-debug";
158 	case INTERNAL_MEM_TOGGLEREF_DATA: return "toggleref-data";
159 	case INTERNAL_MEM_CARDTABLE_MOD_UNION: return "cardtable-mod-union";
160 	case INTERNAL_MEM_BINARY_PROTOCOL: return "binary-protocol";
161 	case INTERNAL_MEM_TEMPORARY: return "temporary";
162 	case INTERNAL_MEM_LOG_ENTRY: return "log-entry";
163 	case INTERNAL_MEM_COMPLEX_DESCRIPTORS: return "complex-descriptors";
164 	default: {
165 		const char *description = sgen_client_description_for_internal_mem_type (type);
166 		SGEN_ASSERT (0, description, "Unknown internal mem type");
167 		return description;
168 	}
169 	}
170 }
171 
172 void*
sgen_alloc_internal_dynamic(size_t size,int type,gboolean assert_on_failure)173 sgen_alloc_internal_dynamic (size_t size, int type, gboolean assert_on_failure)
174 {
175 	int index;
176 	void *p;
177 
178 	if (size > allocator_sizes [NUM_ALLOCATORS - 1]) {
179 		p = sgen_alloc_os_memory (size, (SgenAllocFlags)(SGEN_ALLOC_INTERNAL | SGEN_ALLOC_ACTIVATE), NULL, MONO_MEM_ACCOUNT_SGEN_INTERNAL);
180 		if (!p)
181 			sgen_assert_memory_alloc (NULL, size, description_for_type (type));
182 	} else {
183 		index = index_for_size (size);
184 
185 #ifdef HEAVY_STATISTICS
186 		++ allocator_sizes_stats [index];
187 #endif
188 
189 		p = mono_lock_free_alloc (&allocators [index]);
190 		if (!p)
191 			sgen_assert_memory_alloc (NULL, size, description_for_type (type));
192 		memset (p, 0, size);
193 	}
194 
195 	SGEN_ASSERT (0, !(((mword)p) & (sizeof(gpointer) - 1)), "Why do we allocate unaligned addresses ?");
196 	return p;
197 }
198 
199 void
sgen_free_internal_dynamic(void * addr,size_t size,int type)200 sgen_free_internal_dynamic (void *addr, size_t size, int type)
201 {
202 	if (!addr)
203 		return;
204 
205 	if (size > allocator_sizes [NUM_ALLOCATORS - 1])
206 		sgen_free_os_memory (addr, size, SGEN_ALLOC_INTERNAL, MONO_MEM_ACCOUNT_SGEN_INTERNAL);
207 	else
208 		mono_lock_free_free (addr, block_size (size));
209 }
210 
211 void*
sgen_alloc_internal(int type)212 sgen_alloc_internal (int type)
213 {
214 	int index, size;
215 	void *p;
216 
217 	index = fixed_type_allocator_indexes [type];
218 	g_assert (index >= 0 && index < NUM_ALLOCATORS);
219 
220 #ifdef HEAVY_STATISTICS
221 	++ allocator_sizes_stats [index];
222 #endif
223 
224 	size = allocator_sizes [index];
225 
226 	p = mono_lock_free_alloc (&allocators [index]);
227 	memset (p, 0, size);
228 
229 	SGEN_ASSERT (0, !(((mword)p) & (sizeof(gpointer) - 1)), "Why do we allocate unaligned addresses ?");
230 
231 	return p;
232 }
233 
234 void
sgen_free_internal(void * addr,int type)235 sgen_free_internal (void *addr, int type)
236 {
237 	int index;
238 
239 	if (!addr)
240 		return;
241 
242 	index = fixed_type_allocator_indexes [type];
243 	g_assert (index >= 0 && index < NUM_ALLOCATORS);
244 
245 	mono_lock_free_free (addr, allocator_block_sizes [index]);
246 }
247 
248 void
sgen_dump_internal_mem_usage(FILE * heap_dump_file)249 sgen_dump_internal_mem_usage (FILE *heap_dump_file)
250 {
251 	/*
252 	int i;
253 
254 	fprintf (heap_dump_file, "<other-mem-usage type=\"large-internal\" size=\"%lld\"/>\n", large_internal_bytes_alloced);
255 	fprintf (heap_dump_file, "<other-mem-usage type=\"pinned-chunks\" size=\"%lld\"/>\n", pinned_chunk_bytes_alloced);
256 	for (i = 0; i < INTERNAL_MEM_MAX; ++i) {
257 		fprintf (heap_dump_file, "<other-mem-usage type=\"%s\" size=\"%ld\"/>\n",
258 				description_for_type (i), unmanaged_allocator.small_internal_mem_bytes [i]);
259 	}
260 	*/
261 }
262 
263 void
sgen_report_internal_mem_usage(void)264 sgen_report_internal_mem_usage (void)
265 {
266 	int i G_GNUC_UNUSED;
267 #ifdef HEAVY_STATISTICS
268 	printf ("size -> # allocations\n");
269 	for (i = 0; i < NUM_ALLOCATORS; ++i)
270 		printf ("%d -> %d\n", allocator_sizes [i], allocator_sizes_stats [i]);
271 #endif
272 }
273 
274 void
sgen_init_internal_allocator(void)275 sgen_init_internal_allocator (void)
276 {
277 	int i, size;
278 
279 	for (i = 0; i < INTERNAL_MEM_MAX; ++i)
280 		fixed_type_allocator_indexes [i] = -1;
281 
282 	for (i = 0; i < NUM_ALLOCATORS; ++i) {
283 		allocator_block_sizes [i] = block_size (allocator_sizes [i]);
284 		mono_lock_free_allocator_init_size_class (&size_classes [i], allocator_sizes [i], allocator_block_sizes [i]);
285 		mono_lock_free_allocator_init_allocator (&allocators [i], &size_classes [i], MONO_MEM_ACCOUNT_SGEN_INTERNAL);
286 	}
287 
288 	for (size = mono_pagesize (); size <= LOCK_FREE_ALLOC_SB_MAX_SIZE; size <<= 1) {
289 		int max_size = (LOCK_FREE_ALLOC_SB_USABLE_SIZE (size) / 2) & ~(SIZEOF_VOID_P - 1);
290 		/*
291 		 * we assert that allocator_sizes contains the biggest possible object size
292 		 * per block which has to be an aligned address.
293 		 * (4K => 2040, 8k => 4088, 16k => 8184 on 64bits),
294 		 * so that we do not get different block sizes for sizes that should go to the same one
295 		 */
296 		g_assert (allocator_sizes [index_for_size (max_size)] == max_size);
297 		g_assert (block_size (max_size) == size);
298 		if (size < LOCK_FREE_ALLOC_SB_MAX_SIZE)
299 			g_assert (block_size (max_size + 1) == size << 1);
300 	}
301 }
302 
303 #endif
304