1 /**
2  * \file
3  * Gray queue management.
4  *
5  * Copyright 2001-2003 Ximian, Inc
6  * Copyright 2003-2010 Novell, Inc.
7  * Copyright (C) 2012 Xamarin Inc
8  *
9  * Licensed under the MIT license. See LICENSE file in the project root for full license information.
10  */
11 #include "config.h"
12 #ifdef HAVE_SGEN_GC
13 
14 #include "mono/sgen/sgen-gc.h"
15 #include "mono/sgen/sgen-protocol.h"
16 
17 #ifdef HEAVY_STATISTICS
18 guint64 stat_gray_queue_section_alloc;
19 guint64 stat_gray_queue_section_free;
20 guint64 stat_gray_queue_enqueue_fast_path;
21 guint64 stat_gray_queue_dequeue_fast_path;
22 guint64 stat_gray_queue_enqueue_slow_path;
23 guint64 stat_gray_queue_dequeue_slow_path;
24 #endif
25 
26 #define GRAY_QUEUE_LENGTH_LIMIT	64
27 
28 #ifdef SGEN_CHECK_GRAY_OBJECT_SECTIONS
29 #define STATE_TRANSITION(s,o,n)	do {					\
30 		int __old = (o);					\
31 		if (mono_atomic_cas_i32 ((volatile int*)&(s)->state, (n), __old) != __old) \
32 			g_assert_not_reached ();			\
33 	} while (0)
34 #define STATE_SET(s,v)		(s)->state = (v)
35 #define STATE_ASSERT(s,v)	g_assert ((s)->state == (v))
36 #else
37 #define STATE_TRANSITION(s,o,n)
38 #define STATE_SET(s,v)
39 #define STATE_ASSERT(s,v)
40 #endif
41 
42 /*
43  * Whenever we dispose a gray queue, we save its free list.  Then, in the next collection,
44  * we reuse that free list for the new gray queue.
45  */
46 static GrayQueueSection *last_gray_queue_free_list;
47 
48 void
sgen_gray_object_alloc_queue_section(SgenGrayQueue * queue,gboolean is_parallel)49 sgen_gray_object_alloc_queue_section (SgenGrayQueue *queue, gboolean is_parallel)
50 {
51 	GrayQueueSection *section;
52 
53 	if (queue->free_list) {
54 		/* Use the previously allocated queue sections if possible */
55 		section = queue->free_list;
56 		queue->free_list = section->next;
57 		STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING);
58 	} else {
59 		HEAVY_STAT (stat_gray_queue_section_alloc ++);
60 
61 		/* Allocate a new section */
62 		section = (GrayQueueSection *)sgen_alloc_internal (INTERNAL_MEM_GRAY_QUEUE);
63 		STATE_SET (section, GRAY_QUEUE_SECTION_STATE_FLOATING);
64 	}
65 
66 	/* Section is empty */
67 	section->size = 0;
68 
69 	STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
70 
71 	/* Link it with the others */
72 	section->next = queue->first;
73 	section->prev = NULL;
74 	if (queue->first)
75 		queue->first->prev = section;
76 	else
77 		queue->last = section;
78 	queue->first = section;
79 	queue->cursor = section->entries - 1;
80 
81 	if (is_parallel) {
82 		mono_memory_write_barrier ();
83 		/*
84 		 * FIXME
85 		 * we could probably optimize the code to only rely on the write barrier
86 		 * for synchronization with the stealer thread. Additionally we could also
87 		 * do a write barrier once every other gray queue change, and request
88 		 * to have a minimum of sections before stealing, to keep consistency.
89 		 */
90 		mono_atomic_inc_i32 (&queue->num_sections);
91 	} else {
92 		queue->num_sections++;
93 	}
94 }
95 
96 void
sgen_gray_object_free_queue_section(GrayQueueSection * section)97 sgen_gray_object_free_queue_section (GrayQueueSection *section)
98 {
99 	HEAVY_STAT (stat_gray_queue_section_free ++);
100 
101 	STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_FREED);
102 	sgen_free_internal (section, INTERNAL_MEM_GRAY_QUEUE);
103 }
104 
105 /*
106  * The following two functions are called in the inner loops of the
107  * collector, so they need to be as fast as possible.  We have macros
108  * for them in sgen-gc.h.
109  */
110 
111 void
sgen_gray_object_enqueue(SgenGrayQueue * queue,GCObject * obj,SgenDescriptor desc,gboolean is_parallel)112 sgen_gray_object_enqueue (SgenGrayQueue *queue, GCObject *obj, SgenDescriptor desc, gboolean is_parallel)
113 {
114 	GrayQueueEntry entry = SGEN_GRAY_QUEUE_ENTRY (obj, desc);
115 
116 	HEAVY_STAT (stat_gray_queue_enqueue_slow_path ++);
117 
118 	SGEN_ASSERT (9, obj, "enqueueing a null object");
119 	//sgen_check_objref (obj);
120 
121 #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
122 	if (queue->enqueue_check_func)
123 		queue->enqueue_check_func (obj);
124 #endif
125 
126 	if (G_UNLIKELY (!queue->first || queue->cursor == GRAY_LAST_CURSOR_POSITION (queue->first))) {
127 		if (queue->first) {
128 			/*
129 			 * We don't actively update the section size with each push/pop. For the first
130 			 * section we determine the size from the cursor position. For the reset of the
131 			 * sections we need to have the size set.
132 			 */
133 			queue->first->size = SGEN_GRAY_QUEUE_SECTION_SIZE;
134 		}
135 
136 		sgen_gray_object_alloc_queue_section (queue, is_parallel);
137 	}
138 	STATE_ASSERT (queue->first, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
139 	SGEN_ASSERT (9, queue->cursor <= GRAY_LAST_CURSOR_POSITION (queue->first), "gray queue %p overflow, first %p, cursor %p", queue, queue->first, queue->cursor);
140 	*++queue->cursor = entry;
141 
142 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
143 	binary_protocol_gray_enqueue (queue, queue->cursor, obj);
144 #endif
145 }
146 
147 /*
148  * We attempt to spread the objects in the gray queue across a number
149  * of sections. If the queue has more sections, then it's already spread,
150  * if it doesn't have enough sections, then we allocate as many as we
151  * can.
152  */
153 void
sgen_gray_object_spread(SgenGrayQueue * queue,int num_sections)154 sgen_gray_object_spread (SgenGrayQueue *queue, int num_sections)
155 {
156 	GrayQueueSection *section_start, *section_end;
157 	int total_entries = 0, num_entries_per_section;
158 	int num_sections_final;
159 
160 	if (queue->num_sections >= num_sections)
161 		return;
162 
163 	if (!queue->first)
164 		return;
165 
166 	/* Compute number of elements in the gray queue */
167 	queue->first->size = queue->cursor - queue->first->entries + 1;
168 	total_entries = queue->first->size;
169 	for (section_start = queue->first->next; section_start != NULL; section_start = section_start->next) {
170 		SGEN_ASSERT (0, section_start->size == SGEN_GRAY_QUEUE_SECTION_SIZE, "We expect all section aside from the first one to be full");
171 		total_entries += section_start->size;
172 	}
173 
174 	/* Compute how many sections we should have and elements per section */
175 	num_sections_final = (total_entries > num_sections) ? num_sections : total_entries;
176 	num_entries_per_section = total_entries / num_sections_final;
177 
178 	/* Allocate all needed sections */
179 	while (queue->num_sections < num_sections_final)
180 		sgen_gray_object_alloc_queue_section (queue, TRUE);
181 
182 	/* Spread out the elements in the sections. By design, sections at the end are fuller. */
183 	section_start = queue->first;
184 	section_end = queue->last;
185 	while (section_start != section_end) {
186 		/* We move entries from end to start, until they meet */
187 		while (section_start->size < num_entries_per_section) {
188 			GrayQueueEntry entry;
189 			if (section_end->size <= num_entries_per_section) {
190 				section_end = section_end->prev;
191 				if (section_end == section_start)
192 					break;
193 			}
194 			if (section_end->size <= num_entries_per_section)
195 				break;
196 
197 			section_end->size--;
198 			entry = section_end->entries [section_end->size];
199 			section_start->entries [section_start->size] = entry;
200 			section_start->size++;
201 		}
202 		section_start = section_start->next;
203 	}
204 
205 	queue->cursor = queue->first->entries + queue->first->size - 1;
206 	queue->num_sections = num_sections_final;
207 }
208 
209 GrayQueueEntry
sgen_gray_object_dequeue(SgenGrayQueue * queue,gboolean is_parallel)210 sgen_gray_object_dequeue (SgenGrayQueue *queue, gboolean is_parallel)
211 {
212 	GrayQueueEntry entry;
213 
214 	HEAVY_STAT (stat_gray_queue_dequeue_slow_path ++);
215 
216 	if (sgen_gray_object_queue_is_empty (queue)) {
217 		entry.obj = NULL;
218 		return entry;
219 	}
220 
221 	STATE_ASSERT (queue->first, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
222 	SGEN_ASSERT (9, queue->cursor >= GRAY_FIRST_CURSOR_POSITION (queue->first), "gray queue %p underflow", queue);
223 
224 	entry = *queue->cursor--;
225 
226 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
227 	binary_protocol_gray_dequeue (queue, queue->cursor + 1, entry.obj);
228 #endif
229 
230 	if (G_UNLIKELY (queue->cursor < GRAY_FIRST_CURSOR_POSITION (queue->first))) {
231 		GrayQueueSection *section;
232 		gint32 old_num_sections = 0;
233 
234 		if (is_parallel)
235 			old_num_sections = mono_atomic_dec_i32 (&queue->num_sections);
236 		else
237 			queue->num_sections--;
238 
239 		if (is_parallel && old_num_sections <= 0) {
240 			mono_os_mutex_lock (&queue->steal_mutex);
241 		}
242 
243 		section = queue->first;
244 		queue->first = section->next;
245 		if (queue->first) {
246 			queue->first->prev = NULL;
247 		} else {
248 			queue->last = NULL;
249 			SGEN_ASSERT (0, !old_num_sections, "Why do we have an inconsistent number of sections ?");
250 		}
251 		section->next = queue->free_list;
252 
253 		STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FREE_LIST);
254 
255 		queue->free_list = section;
256 		queue->cursor = queue->first ? queue->first->entries + queue->first->size - 1 : NULL;
257 
258 		if (is_parallel && old_num_sections <= 0) {
259 			mono_os_mutex_unlock (&queue->steal_mutex);
260 		}
261 	}
262 
263 	return entry;
264 }
265 
266 GrayQueueSection*
sgen_gray_object_dequeue_section(SgenGrayQueue * queue)267 sgen_gray_object_dequeue_section (SgenGrayQueue *queue)
268 {
269 	GrayQueueSection *section;
270 
271 	if (!queue->first)
272 		return NULL;
273 
274 	/* We never steal from this queue */
275 	queue->num_sections--;
276 
277 	section = queue->first;
278 	queue->first = section->next;
279 	if (queue->first)
280 		queue->first->prev = NULL;
281 	else
282 		queue->last = NULL;
283 
284 	section->next = NULL;
285 	section->size = queue->cursor - section->entries + 1;
286 
287 	queue->cursor = queue->first ? queue->first->entries + queue->first->size - 1 : NULL;
288 
289 	STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FLOATING);
290 
291 	return section;
292 }
293 
294 GrayQueueSection*
sgen_gray_object_steal_section(SgenGrayQueue * queue)295 sgen_gray_object_steal_section (SgenGrayQueue *queue)
296 {
297 	gint32 sections_remaining;
298 	GrayQueueSection *section = NULL;
299 
300 	/*
301 	 * With each push/pop into the queue we increment the number of sections.
302 	 * There is only one thread accessing the top (the owner) and potentially
303 	 * multiple workers trying to steal sections from the bottom, so we need
304 	 * to lock. A num sections decrement from the owner means that the first
305 	 * section is reserved, while a decrement by the stealer means that the
306 	 * last section is reserved. If after we decrement the num sections, we
307 	 * have at least one more section present, it means we can't race with
308 	 * the other thread. If this is not the case the steal end abandons the
309 	 * pop, setting back the num_sections, while the owner end will take a
310 	 * lock to make sure we are not racing with the stealer (since the stealer
311 	 * might have popped an entry and be in the process of updating the entry
312 	 * that the owner is trying to pop.
313 	 */
314 
315 	if (queue->num_sections <= 1)
316 		return NULL;
317 
318 	/* Give up if there is contention on the last section */
319 	if (mono_os_mutex_trylock (&queue->steal_mutex) != 0)
320 		return NULL;
321 
322 	sections_remaining = mono_atomic_dec_i32 (&queue->num_sections);
323 	if (sections_remaining <= 0) {
324 		/* The section that we tried to steal might be the head of the queue. */
325 		mono_atomic_inc_i32 (&queue->num_sections);
326 	} else {
327 		/* We have reserved for us the tail section of the queue */
328 		section = queue->last;
329 		SGEN_ASSERT (0, section, "Why we don't have any sections to steal?");
330 		SGEN_ASSERT (0, !section->next, "Why aren't we stealing the tail?");
331 		queue->last = section->prev;
332 		section->prev = NULL;
333 		SGEN_ASSERT (0, queue->last, "Why are we stealing the last section?");
334 		queue->last->next = NULL;
335 
336 		STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FLOATING);
337 	}
338 
339 	mono_os_mutex_unlock (&queue->steal_mutex);
340 	return section;
341 }
342 
343 void
sgen_gray_object_enqueue_section(SgenGrayQueue * queue,GrayQueueSection * section,gboolean is_parallel)344 sgen_gray_object_enqueue_section (SgenGrayQueue *queue, GrayQueueSection *section, gboolean is_parallel)
345 {
346 	STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
347 
348 	if (queue->first)
349 		queue->first->size = queue->cursor - queue->first->entries + 1;
350 
351 	section->next = queue->first;
352 	section->prev = NULL;
353 	if (queue->first)
354 		queue->first->prev = section;
355 	else
356 		queue->last = section;
357 	queue->first = section;
358 	queue->cursor = queue->first->entries + queue->first->size - 1;
359 #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
360 	if (queue->enqueue_check_func) {
361 		int i;
362 		for (i = 0; i < section->size; ++i)
363 			queue->enqueue_check_func (section->entries [i].obj);
364 	}
365 #endif
366 	if (is_parallel) {
367 		mono_memory_write_barrier ();
368 		mono_atomic_inc_i32 (&queue->num_sections);
369 	} else {
370 		queue->num_sections++;
371 	}
372 }
373 
374 void
sgen_gray_object_queue_trim_free_list(SgenGrayQueue * queue)375 sgen_gray_object_queue_trim_free_list (SgenGrayQueue *queue)
376 {
377 	GrayQueueSection *section, *next;
378 	int i = 0;
379 	for (section = queue->free_list; section && i < GRAY_QUEUE_LENGTH_LIMIT - 1; section = section->next) {
380 		STATE_ASSERT (section, GRAY_QUEUE_SECTION_STATE_FREE_LIST);
381 		i ++;
382 	}
383 	if (!section)
384 		return;
385 	while (section->next) {
386 		next = section->next;
387 		section->next = next->next;
388 		STATE_TRANSITION (next, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING);
389 		sgen_gray_object_free_queue_section (next);
390 	}
391 }
392 
393 void
sgen_gray_object_queue_init(SgenGrayQueue * queue,GrayQueueEnqueueCheckFunc enqueue_check_func,gboolean reuse_free_list)394 sgen_gray_object_queue_init (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func, gboolean reuse_free_list)
395 {
396 	memset (queue, 0, sizeof (SgenGrayQueue));
397 
398 #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
399 	queue->enqueue_check_func = enqueue_check_func;
400 #endif
401 
402 	mono_os_mutex_init (&queue->steal_mutex);
403 
404 	if (reuse_free_list) {
405 		queue->free_list = last_gray_queue_free_list;
406 		last_gray_queue_free_list = NULL;
407 	}
408 }
409 
410 void
sgen_gray_object_queue_dispose(SgenGrayQueue * queue)411 sgen_gray_object_queue_dispose (SgenGrayQueue *queue)
412 {
413 	SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (queue), "Why are we disposing a gray queue that's not empty?");
414 
415 	/* Free the extra sections allocated during the last collection */
416 	sgen_gray_object_queue_trim_free_list (queue);
417 
418 	SGEN_ASSERT (0, !last_gray_queue_free_list, "Are we disposing two gray queues after another?");
419 	last_gray_queue_free_list = queue->free_list;
420 
421 	/* just to make sure */
422 	memset (queue, 0, sizeof (SgenGrayQueue));
423 }
424 
425 void
sgen_gray_object_queue_deinit(SgenGrayQueue * queue)426 sgen_gray_object_queue_deinit (SgenGrayQueue *queue)
427 {
428 	g_assert (!queue->first);
429 	while (queue->free_list) {
430 		GrayQueueSection *next = queue->free_list->next;
431 		STATE_TRANSITION (queue->free_list, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING);
432 		sgen_gray_object_free_queue_section (queue->free_list);
433 		queue->free_list = next;
434 	}
435 }
436 
437 static void
lock_section_queue(SgenSectionGrayQueue * queue)438 lock_section_queue (SgenSectionGrayQueue *queue)
439 {
440 	if (!queue->locked)
441 		return;
442 
443 	mono_os_mutex_lock (&queue->lock);
444 }
445 
446 static void
unlock_section_queue(SgenSectionGrayQueue * queue)447 unlock_section_queue (SgenSectionGrayQueue *queue)
448 {
449 	if (!queue->locked)
450 		return;
451 
452 	mono_os_mutex_unlock (&queue->lock);
453 }
454 
455 void
sgen_section_gray_queue_init(SgenSectionGrayQueue * queue,gboolean locked,GrayQueueEnqueueCheckFunc enqueue_check_func)456 sgen_section_gray_queue_init (SgenSectionGrayQueue *queue, gboolean locked, GrayQueueEnqueueCheckFunc enqueue_check_func)
457 {
458 	g_assert (sgen_section_gray_queue_is_empty (queue));
459 
460 	queue->locked = locked;
461 	if (locked) {
462 		mono_os_mutex_init_recursive (&queue->lock);
463 	}
464 
465 #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
466 	queue->enqueue_check_func = enqueue_check_func;
467 #endif
468 }
469 
470 gboolean
sgen_section_gray_queue_is_empty(SgenSectionGrayQueue * queue)471 sgen_section_gray_queue_is_empty (SgenSectionGrayQueue *queue)
472 {
473 	return !queue->first;
474 }
475 
476 GrayQueueSection*
sgen_section_gray_queue_dequeue(SgenSectionGrayQueue * queue)477 sgen_section_gray_queue_dequeue (SgenSectionGrayQueue *queue)
478 {
479 	GrayQueueSection *section;
480 
481 	lock_section_queue (queue);
482 
483 	if (queue->first) {
484 		section = queue->first;
485 		queue->first = section->next;
486 
487 		STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FLOATING);
488 
489 		section->next = NULL;
490 	} else {
491 		section = NULL;
492 	}
493 
494 	unlock_section_queue (queue);
495 
496 	return section;
497 }
498 
499 void
sgen_section_gray_queue_enqueue(SgenSectionGrayQueue * queue,GrayQueueSection * section)500 sgen_section_gray_queue_enqueue (SgenSectionGrayQueue *queue, GrayQueueSection *section)
501 {
502 	STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED);
503 
504 	lock_section_queue (queue);
505 
506 	section->next = queue->first;
507 	queue->first = section;
508 #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
509 	if (queue->enqueue_check_func) {
510 		int i;
511 		for (i = 0; i < section->size; ++i)
512 			queue->enqueue_check_func (section->entries [i].obj);
513 	}
514 #endif
515 
516 	unlock_section_queue (queue);
517 }
518 
519 void
sgen_init_gray_queues(void)520 sgen_init_gray_queues (void)
521 {
522 #ifdef HEAVY_STATISTICS
523 	mono_counters_register ("Gray Queue alloc section", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_section_alloc);
524 	mono_counters_register ("Gray Queue free section", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_section_free);
525 	mono_counters_register ("Gray Queue enqueue fast path", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_enqueue_fast_path);
526 	mono_counters_register ("Gray Queue dequeue fast path", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_dequeue_fast_path);
527 	mono_counters_register ("Gray Queue enqueue slow path", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_enqueue_slow_path);
528 	mono_counters_register ("Gray Queue dequeue slow path", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_gray_queue_dequeue_slow_path);
529 #endif
530 }
531 #endif
532