1 /* Simple garbage collection for the GNU compiler.
2 Copyright (C) 1999-2021 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* Generic garbage collection (GC) functions and data, not specific to
21 any particular GC implementation. */
22
23 #include "config.h"
24 #define INCLUDE_MALLOC_H
25 #include "system.h"
26 #include "coretypes.h"
27 #include "timevar.h"
28 #include "diagnostic-core.h"
29 #include "ggc-internal.h"
30 #include "hosthooks.h"
31 #include "plugin.h"
32 #include "options.h"
33
34 /* When true, protect the contents of the identifier hash table. */
35 bool ggc_protect_identifiers = true;
36
37 /* Statistics about the allocation. */
38 static ggc_statistics *ggc_stats;
39
40 struct traversal_state;
41
42 static int compare_ptr_data (const void *, const void *);
43 static void relocate_ptrs (void *, void *);
44 static void write_pch_globals (const struct ggc_root_tab * const *tab,
45 struct traversal_state *state);
46
47 /* Maintain global roots that are preserved during GC. */
48
49 /* This extra vector of dynamically registered root_tab-s is used by
50 ggc_mark_roots and gives the ability to dynamically add new GGC root
51 tables, for instance from some plugins; this vector is on the heap
52 since it is used by GGC internally. */
53 typedef const struct ggc_root_tab *const_ggc_root_tab_t;
54 static vec<const_ggc_root_tab_t> extra_root_vec;
55
56 /* Dynamically register a new GGC root table RT. This is useful for
57 plugins. */
58
59 void
ggc_register_root_tab(const struct ggc_root_tab * rt)60 ggc_register_root_tab (const struct ggc_root_tab* rt)
61 {
62 if (rt)
63 extra_root_vec.safe_push (rt);
64 }
65
66 /* Mark all the roots in the table RT. */
67
68 static void
ggc_mark_root_tab(const_ggc_root_tab_t rt)69 ggc_mark_root_tab (const_ggc_root_tab_t rt)
70 {
71 size_t i;
72
73 for ( ; rt->base != NULL; rt++)
74 for (i = 0; i < rt->nelt; i++)
75 (*rt->cb) (*(void **) ((char *)rt->base + rt->stride * i));
76 }
77
78 /* Iterate through all registered roots and mark each element. */
79
80 void
ggc_mark_roots(void)81 ggc_mark_roots (void)
82 {
83 const struct ggc_root_tab *const *rt;
84 const_ggc_root_tab_t rtp, rti;
85 size_t i;
86
87 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
88 for (rti = *rt; rti->base != NULL; rti++)
89 memset (rti->base, 0, rti->stride);
90
91 for (rt = gt_ggc_rtab; *rt; rt++)
92 ggc_mark_root_tab (*rt);
93
94 FOR_EACH_VEC_ELT (extra_root_vec, i, rtp)
95 ggc_mark_root_tab (rtp);
96
97 if (ggc_protect_identifiers)
98 ggc_mark_stringpool ();
99
100 gt_clear_caches ();
101
102 if (! ggc_protect_identifiers)
103 ggc_purge_stringpool ();
104
105 /* Some plugins may call ggc_set_mark from here. */
106 invoke_plugin_callbacks (PLUGIN_GGC_MARKING, NULL);
107 }
108
109 /* Allocate a block of memory, then clear it. */
110 void *
ggc_internal_cleared_alloc(size_t size,void (* f)(void *),size_t s,size_t n MEM_STAT_DECL)111 ggc_internal_cleared_alloc (size_t size, void (*f)(void *), size_t s, size_t n
112 MEM_STAT_DECL)
113 {
114 void *buf = ggc_internal_alloc (size, f, s, n PASS_MEM_STAT);
115 memset (buf, 0, size);
116 return buf;
117 }
118
119 /* Resize a block of memory, possibly re-allocating it. */
120 void *
ggc_realloc(void * x,size_t size MEM_STAT_DECL)121 ggc_realloc (void *x, size_t size MEM_STAT_DECL)
122 {
123 void *r;
124 size_t old_size;
125
126 if (x == NULL)
127 return ggc_internal_alloc (size PASS_MEM_STAT);
128
129 old_size = ggc_get_size (x);
130
131 if (size <= old_size)
132 {
133 /* Mark the unwanted memory as unaccessible. We also need to make
134 the "new" size accessible, since ggc_get_size returns the size of
135 the pool, not the size of the individually allocated object, the
136 size which was previously made accessible. Unfortunately, we
137 don't know that previously allocated size. Without that
138 knowledge we have to lose some initialization-tracking for the
139 old parts of the object. An alternative is to mark the whole
140 old_size as reachable, but that would lose tracking of writes
141 after the end of the object (by small offsets). Discard the
142 handle to avoid handle leak. */
143 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x + size,
144 old_size - size));
145 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, size));
146 return x;
147 }
148
149 r = ggc_internal_alloc (size PASS_MEM_STAT);
150
151 /* Since ggc_get_size returns the size of the pool, not the size of the
152 individually allocated object, we'd access parts of the old object
153 that were marked invalid with the memcpy below. We lose a bit of the
154 initialization-tracking since some of it may be uninitialized. */
155 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, old_size));
156
157 memcpy (r, x, old_size);
158
159 /* The old object is not supposed to be used anymore. */
160 ggc_free (x);
161
162 return r;
163 }
164
165 void *
ggc_cleared_alloc_htab_ignore_args(size_t c ATTRIBUTE_UNUSED,size_t n ATTRIBUTE_UNUSED)166 ggc_cleared_alloc_htab_ignore_args (size_t c ATTRIBUTE_UNUSED,
167 size_t n ATTRIBUTE_UNUSED)
168 {
169 gcc_assert (c * n == sizeof (struct htab));
170 return ggc_cleared_alloc<htab> ();
171 }
172
173 /* TODO: once we actually use type information in GGC, create a new tag
174 gt_gcc_ptr_array and use it for pointer arrays. */
175 void *
ggc_cleared_alloc_ptr_array_two_args(size_t c,size_t n)176 ggc_cleared_alloc_ptr_array_two_args (size_t c, size_t n)
177 {
178 gcc_assert (sizeof (PTR *) == n);
179 return ggc_cleared_vec_alloc<PTR *> (c);
180 }
181
182 /* These are for splay_tree_new_ggc. */
183 void *
ggc_splay_alloc(int sz,void * nl)184 ggc_splay_alloc (int sz, void *nl)
185 {
186 gcc_assert (!nl);
187 return ggc_internal_alloc (sz);
188 }
189
190 void
ggc_splay_dont_free(void * x ATTRIBUTE_UNUSED,void * nl)191 ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
192 {
193 gcc_assert (!nl);
194 }
195
196 void
ggc_print_common_statistics(FILE * stream ATTRIBUTE_UNUSED,ggc_statistics * stats)197 ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
198 ggc_statistics *stats)
199 {
200 /* Set the pointer so that during collection we will actually gather
201 the statistics. */
202 ggc_stats = stats;
203
204 /* Then do one collection to fill in the statistics. */
205 ggc_collect ();
206
207 /* At present, we don't really gather any interesting statistics. */
208
209 /* Don't gather statistics any more. */
210 ggc_stats = NULL;
211 }
212
213 /* Functions for saving and restoring GCable memory to disk. */
214
215 struct ptr_data
216 {
217 void *obj;
218 void *note_ptr_cookie;
219 gt_note_pointers note_ptr_fn;
220 gt_handle_reorder reorder_fn;
221 size_t size;
222 void *new_addr;
223 };
224
225 #define POINTER_HASH(x) (hashval_t)((intptr_t)x >> 3)
226
227 /* Helper for hashing saving_htab. */
228
229 struct saving_hasher : free_ptr_hash <ptr_data>
230 {
231 typedef void *compare_type;
232 static inline hashval_t hash (const ptr_data *);
233 static inline bool equal (const ptr_data *, const void *);
234 };
235
236 inline hashval_t
hash(const ptr_data * p)237 saving_hasher::hash (const ptr_data *p)
238 {
239 return POINTER_HASH (p->obj);
240 }
241
242 inline bool
equal(const ptr_data * p1,const void * p2)243 saving_hasher::equal (const ptr_data *p1, const void *p2)
244 {
245 return p1->obj == p2;
246 }
247
248 static hash_table<saving_hasher> *saving_htab;
249 static vec<void *> callback_vec;
250
251 /* Register an object in the hash table. */
252
253 int
gt_pch_note_object(void * obj,void * note_ptr_cookie,gt_note_pointers note_ptr_fn)254 gt_pch_note_object (void *obj, void *note_ptr_cookie,
255 gt_note_pointers note_ptr_fn)
256 {
257 struct ptr_data **slot;
258
259 if (obj == NULL || obj == (void *) 1)
260 return 0;
261
262 slot = (struct ptr_data **)
263 saving_htab->find_slot_with_hash (obj, POINTER_HASH (obj), INSERT);
264 if (*slot != NULL)
265 {
266 gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
267 && (*slot)->note_ptr_cookie == note_ptr_cookie);
268 return 0;
269 }
270
271 *slot = XCNEW (struct ptr_data);
272 (*slot)->obj = obj;
273 (*slot)->note_ptr_fn = note_ptr_fn;
274 (*slot)->note_ptr_cookie = note_ptr_cookie;
275 if (note_ptr_fn == gt_pch_p_S)
276 (*slot)->size = strlen ((const char *)obj) + 1;
277 else
278 (*slot)->size = ggc_get_size (obj);
279 return 1;
280 }
281
282 /* Register address of a callback pointer. */
283 void
gt_pch_note_callback(void * obj,void * base)284 gt_pch_note_callback (void *obj, void *base)
285 {
286 void *ptr;
287 memcpy (&ptr, obj, sizeof (void *));
288 if (ptr != NULL)
289 {
290 struct ptr_data *data
291 = (struct ptr_data *)
292 saving_htab->find_with_hash (base, POINTER_HASH (base));
293 gcc_assert (data);
294 callback_vec.safe_push ((char *) data->new_addr
295 + ((char *) obj - (char *) base));
296 }
297 }
298
299 /* Register an object in the hash table. */
300
301 void
gt_pch_note_reorder(void * obj,void * note_ptr_cookie,gt_handle_reorder reorder_fn)302 gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
303 gt_handle_reorder reorder_fn)
304 {
305 struct ptr_data *data;
306
307 if (obj == NULL || obj == (void *) 1)
308 return;
309
310 data = (struct ptr_data *)
311 saving_htab->find_with_hash (obj, POINTER_HASH (obj));
312 gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
313
314 data->reorder_fn = reorder_fn;
315 }
316
317 /* Handy state for the traversal functions. */
318
319 struct traversal_state
320 {
321 FILE *f;
322 struct ggc_pch_data *d;
323 size_t count;
324 struct ptr_data **ptrs;
325 size_t ptrs_i;
326 };
327
328 /* Callbacks for htab_traverse. */
329
330 int
ggc_call_count(ptr_data ** slot,traversal_state * state)331 ggc_call_count (ptr_data **slot, traversal_state *state)
332 {
333 struct ptr_data *d = *slot;
334
335 ggc_pch_count_object (state->d, d->obj, d->size,
336 d->note_ptr_fn == gt_pch_p_S);
337 state->count++;
338 return 1;
339 }
340
341 int
ggc_call_alloc(ptr_data ** slot,traversal_state * state)342 ggc_call_alloc (ptr_data **slot, traversal_state *state)
343 {
344 struct ptr_data *d = *slot;
345
346 d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size,
347 d->note_ptr_fn == gt_pch_p_S);
348 state->ptrs[state->ptrs_i++] = d;
349 return 1;
350 }
351
352 /* Callback for qsort. */
353
354 static int
compare_ptr_data(const void * p1_p,const void * p2_p)355 compare_ptr_data (const void *p1_p, const void *p2_p)
356 {
357 const struct ptr_data *const p1 = *(const struct ptr_data *const *)p1_p;
358 const struct ptr_data *const p2 = *(const struct ptr_data *const *)p2_p;
359 return (((size_t)p1->new_addr > (size_t)p2->new_addr)
360 - ((size_t)p1->new_addr < (size_t)p2->new_addr));
361 }
362
363 /* Callbacks for note_ptr_fn. */
364
365 static void
relocate_ptrs(void * ptr_p,void * state_p)366 relocate_ptrs (void *ptr_p, void *state_p)
367 {
368 void **ptr = (void **)ptr_p;
369 struct traversal_state *state ATTRIBUTE_UNUSED
370 = (struct traversal_state *)state_p;
371 struct ptr_data *result;
372
373 if (*ptr == NULL || *ptr == (void *)1)
374 return;
375
376 result = (struct ptr_data *)
377 saving_htab->find_with_hash (*ptr, POINTER_HASH (*ptr));
378 gcc_assert (result);
379 *ptr = result->new_addr;
380 }
381
382 /* Write out, after relocation, the pointers in TAB. */
383 static void
write_pch_globals(const struct ggc_root_tab * const * tab,struct traversal_state * state)384 write_pch_globals (const struct ggc_root_tab * const *tab,
385 struct traversal_state *state)
386 {
387 const struct ggc_root_tab *const *rt;
388 const struct ggc_root_tab *rti;
389 size_t i;
390
391 for (rt = tab; *rt; rt++)
392 for (rti = *rt; rti->base != NULL; rti++)
393 for (i = 0; i < rti->nelt; i++)
394 {
395 void *ptr = *(void **)((char *)rti->base + rti->stride * i);
396 struct ptr_data *new_ptr;
397 if (ptr == NULL || ptr == (void *)1)
398 {
399 if (fwrite (&ptr, sizeof (void *), 1, state->f)
400 != 1)
401 fatal_error (input_location, "cannot write PCH file: %m");
402 }
403 else
404 {
405 new_ptr = (struct ptr_data *)
406 saving_htab->find_with_hash (ptr, POINTER_HASH (ptr));
407 if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
408 != 1)
409 fatal_error (input_location, "cannot write PCH file: %m");
410 }
411 }
412 }
413
414 /* Hold the information we need to mmap the file back in. */
415
416 struct mmap_info
417 {
418 size_t offset;
419 size_t size;
420 void *preferred_base;
421 };
422
423 /* Write out the state of the compiler to F. */
424
425 void
gt_pch_save(FILE * f)426 gt_pch_save (FILE *f)
427 {
428 const struct ggc_root_tab *const *rt;
429 const struct ggc_root_tab *rti;
430 size_t i;
431 struct traversal_state state;
432 char *this_object = NULL;
433 size_t this_object_size = 0;
434 struct mmap_info mmi;
435 const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity ();
436
437 gt_pch_save_stringpool ();
438
439 timevar_push (TV_PCH_PTR_REALLOC);
440 saving_htab = new hash_table<saving_hasher> (50000);
441
442 for (rt = gt_ggc_rtab; *rt; rt++)
443 for (rti = *rt; rti->base != NULL; rti++)
444 for (i = 0; i < rti->nelt; i++)
445 (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
446
447 /* Prepare the objects for writing, determine addresses and such. */
448 state.f = f;
449 state.d = init_ggc_pch ();
450 state.count = 0;
451 saving_htab->traverse <traversal_state *, ggc_call_count> (&state);
452
453 mmi.size = ggc_pch_total_size (state.d);
454
455 /* Try to arrange things so that no relocation is necessary, but
456 don't try very hard. On most platforms, this will always work,
457 and on the rest it's a lot of work to do better.
458 (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
459 HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
460 mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
461 /* If the host cannot supply any suitable address for this, we are stuck. */
462 if (mmi.preferred_base == NULL)
463 fatal_error (input_location,
464 "cannot write PCH file: required memory segment unavailable");
465
466 ggc_pch_this_base (state.d, mmi.preferred_base);
467
468 state.ptrs = XNEWVEC (struct ptr_data *, state.count);
469 state.ptrs_i = 0;
470
471 saving_htab->traverse <traversal_state *, ggc_call_alloc> (&state);
472 timevar_pop (TV_PCH_PTR_REALLOC);
473
474 timevar_push (TV_PCH_PTR_SORT);
475 qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
476 timevar_pop (TV_PCH_PTR_SORT);
477
478 /* Write out all the scalar variables. */
479 for (rt = gt_pch_scalar_rtab; *rt; rt++)
480 for (rti = *rt; rti->base != NULL; rti++)
481 if (fwrite (rti->base, rti->stride, 1, f) != 1)
482 fatal_error (input_location, "cannot write PCH file: %m");
483
484 /* Write out all the global pointers, after translation. */
485 write_pch_globals (gt_ggc_rtab, &state);
486
487 /* Pad the PCH file so that the mmapped area starts on an allocation
488 granularity (usually page) boundary. */
489 {
490 long o;
491 o = ftell (state.f) + sizeof (mmi);
492 if (o == -1)
493 fatal_error (input_location, "cannot get position in PCH file: %m");
494 mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment;
495 if (mmi.offset == mmap_offset_alignment)
496 mmi.offset = 0;
497 mmi.offset += o;
498 }
499 if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
500 fatal_error (input_location, "cannot write PCH file: %m");
501 if (mmi.offset != 0
502 && fseek (state.f, mmi.offset, SEEK_SET) != 0)
503 fatal_error (input_location, "cannot write padding to PCH file: %m");
504
505 ggc_pch_prepare_write (state.d, state.f);
506
507 #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
508 vec<char> vbits = vNULL;
509 #endif
510
511 /* Actually write out the objects. */
512 for (i = 0; i < state.count; i++)
513 {
514 if (this_object_size < state.ptrs[i]->size)
515 {
516 this_object_size = state.ptrs[i]->size;
517 this_object = XRESIZEVAR (char, this_object, this_object_size);
518 }
519 #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
520 /* obj might contain uninitialized bytes, e.g. in the trailing
521 padding of the object. Avoid warnings by making the memory
522 temporarily defined and then restoring previous state. */
523 int get_vbits = 0;
524 size_t valid_size = state.ptrs[i]->size;
525 if (__builtin_expect (RUNNING_ON_VALGRIND, 0))
526 {
527 if (vbits.length () < valid_size)
528 vbits.safe_grow (valid_size, true);
529 get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
530 vbits.address (), valid_size);
531 if (get_vbits == 3)
532 {
533 /* We assume that first part of obj is addressable, and
534 the rest is unaddressable. Find out where the boundary is
535 using binary search. */
536 size_t lo = 0, hi = valid_size;
537 while (hi > lo)
538 {
539 size_t mid = (lo + hi) / 2;
540 get_vbits = VALGRIND_GET_VBITS ((char *) state.ptrs[i]->obj
541 + mid, vbits.address (),
542 1);
543 if (get_vbits == 3)
544 hi = mid;
545 else if (get_vbits == 1)
546 lo = mid + 1;
547 else
548 break;
549 }
550 if (get_vbits == 1 || get_vbits == 3)
551 {
552 valid_size = lo;
553 get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
554 vbits.address (),
555 valid_size);
556 }
557 }
558 if (get_vbits == 1)
559 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (state.ptrs[i]->obj,
560 state.ptrs[i]->size));
561 }
562 #endif
563 memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
564 if (state.ptrs[i]->reorder_fn != NULL)
565 state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
566 state.ptrs[i]->note_ptr_cookie,
567 relocate_ptrs, &state);
568 state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
569 state.ptrs[i]->note_ptr_cookie,
570 relocate_ptrs, &state);
571 ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
572 state.ptrs[i]->new_addr, state.ptrs[i]->size,
573 state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
574 if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
575 memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
576 #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
577 if (__builtin_expect (get_vbits == 1, 0))
578 {
579 (void) VALGRIND_SET_VBITS (state.ptrs[i]->obj, vbits.address (),
580 valid_size);
581 if (valid_size != state.ptrs[i]->size)
582 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *)
583 state.ptrs[i]->obj
584 + valid_size,
585 state.ptrs[i]->size
586 - valid_size));
587 }
588 #endif
589 }
590 #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
591 vbits.release ();
592 #endif
593
594 ggc_pch_finish (state.d, state.f);
595 gt_pch_fixup_stringpool ();
596
597 unsigned num_callbacks = callback_vec.length ();
598 void (*pch_save) (FILE *) = >_pch_save;
599 if (fwrite (&pch_save, sizeof (pch_save), 1, f) != 1
600 || fwrite (&num_callbacks, sizeof (num_callbacks), 1, f) != 1
601 || (num_callbacks
602 && fwrite (callback_vec.address (), sizeof (void *), num_callbacks,
603 f) != num_callbacks))
604 fatal_error (input_location, "cannot write PCH file: %m");
605
606 XDELETE (state.ptrs);
607 XDELETE (this_object);
608 delete saving_htab;
609 saving_htab = NULL;
610 callback_vec.release ();
611 }
612
613 /* Read the state of the compiler back in from F. */
614
615 void
gt_pch_restore(FILE * f)616 gt_pch_restore (FILE *f)
617 {
618 const struct ggc_root_tab *const *rt;
619 const struct ggc_root_tab *rti;
620 size_t i;
621 struct mmap_info mmi;
622 int result;
623
624 /* We are about to reload the line maps along with the rest of the PCH
625 data, which means that the (loaded) ones cannot be guaranteed to be
626 in any valid state for reporting diagnostics that happen during the
627 load. Save the current table (and use it during the loading process
628 below). */
629 class line_maps *save_line_table = line_table;
630
631 /* Delete any deletable objects. This makes ggc_pch_read much
632 faster, as it can be sure that no GCable objects remain other
633 than the ones just read in. */
634 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
635 for (rti = *rt; rti->base != NULL; rti++)
636 memset (rti->base, 0, rti->stride);
637
638 /* Read in all the scalar variables. */
639 for (rt = gt_pch_scalar_rtab; *rt; rt++)
640 for (rti = *rt; rti->base != NULL; rti++)
641 if (fread (rti->base, rti->stride, 1, f) != 1)
642 fatal_error (input_location, "cannot read PCH file: %m");
643
644 /* Read in all the global pointers, in 6 easy loops. */
645 bool error_reading_pointers = false;
646 for (rt = gt_ggc_rtab; *rt; rt++)
647 for (rti = *rt; rti->base != NULL; rti++)
648 for (i = 0; i < rti->nelt; i++)
649 if (fread ((char *)rti->base + rti->stride * i,
650 sizeof (void *), 1, f) != 1)
651 error_reading_pointers = true;
652
653 /* Stash the newly read-in line table pointer - it does not point to
654 anything meaningful yet, so swap the old one back in. */
655 class line_maps *new_line_table = line_table;
656 line_table = save_line_table;
657 if (error_reading_pointers)
658 fatal_error (input_location, "cannot read PCH file: %m");
659
660 if (fread (&mmi, sizeof (mmi), 1, f) != 1)
661 fatal_error (input_location, "cannot read PCH file: %m");
662
663 result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size,
664 fileno (f), mmi.offset);
665
666 /* We could not mmap or otherwise allocate the required memory at the
667 address needed. */
668 if (result < 0)
669 {
670 sorry_at (input_location, "PCH relocation is not yet supported");
671 /* There is no point in continuing from here, we will only end up
672 with a crashed (most likely hanging) compiler. */
673 exit (-1);
674 }
675
676 /* (0) We allocated memory, but did not mmap the file, so we need to read
677 the data in manually. (>0) Otherwise the mmap succeed for the address
678 we wanted. */
679 if (result == 0)
680 {
681 if (fseek (f, mmi.offset, SEEK_SET) != 0
682 || fread (mmi.preferred_base, mmi.size, 1, f) != 1)
683 fatal_error (input_location, "cannot read PCH file: %m");
684 }
685 else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
686 fatal_error (input_location, "cannot read PCH file: %m");
687
688 ggc_pch_read (f, mmi.preferred_base);
689
690 gt_pch_restore_stringpool ();
691
692 void (*pch_save) (FILE *);
693 unsigned num_callbacks;
694 if (fread (&pch_save, sizeof (pch_save), 1, f) != 1
695 || fread (&num_callbacks, sizeof (num_callbacks), 1, f) != 1)
696 fatal_error (input_location, "cannot read PCH file: %m");
697 if (pch_save != >_pch_save)
698 {
699 uintptr_t bias = (uintptr_t) >_pch_save - (uintptr_t) pch_save;
700 void **ptrs = XNEWVEC (void *, num_callbacks);
701 unsigned i;
702
703 if (fread (ptrs, sizeof (void *), num_callbacks, f) != num_callbacks)
704 fatal_error (input_location, "cannot read PCH file: %m");
705 for (i = 0; i < num_callbacks; ++i)
706 {
707 memcpy (&pch_save, ptrs[i], sizeof (pch_save));
708 pch_save = (void (*) (FILE *)) ((uintptr_t) pch_save + bias);
709 memcpy (ptrs[i], &pch_save, sizeof (pch_save));
710 }
711 XDELETE (ptrs);
712 }
713 else if (fseek (f, num_callbacks * sizeof (void *), SEEK_CUR) != 0)
714 fatal_error (input_location, "cannot read PCH file: %m");
715
716 /* Barring corruption of the PCH file, the restored line table should be
717 complete and usable. */
718 line_table = new_line_table;
719 }
720
721 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
722 Select no address whatsoever, and let gt_pch_save choose what it will with
723 malloc, presumably. */
724
725 void *
default_gt_pch_get_address(size_t size ATTRIBUTE_UNUSED,int fd ATTRIBUTE_UNUSED)726 default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED,
727 int fd ATTRIBUTE_UNUSED)
728 {
729 return NULL;
730 }
731
732 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
733 Allocate SIZE bytes with malloc. Return 0 if the address we got is the
734 same as base, indicating that the memory has been allocated but needs to
735 be read in from the file. Return -1 if the address differs, to relocation
736 of the PCH file would be required. */
737
738 int
default_gt_pch_use_address(void * base,size_t size,int fd ATTRIBUTE_UNUSED,size_t offset ATTRIBUTE_UNUSED)739 default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED,
740 size_t offset ATTRIBUTE_UNUSED)
741 {
742 void *addr = xmalloc (size);
743 return (addr == base) - 1;
744 }
745
746 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS. Return the
747 alignment required for allocating virtual memory. Usually this is the
748 same as pagesize. */
749
750 size_t
default_gt_pch_alloc_granularity(void)751 default_gt_pch_alloc_granularity (void)
752 {
753 return getpagesize ();
754 }
755
756 #if HAVE_MMAP_FILE
757 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
758 We temporarily allocate SIZE bytes, and let the kernel place the data
759 wherever it will. If it worked, that's our spot, if not we're likely
760 to be in trouble. */
761
762 void *
mmap_gt_pch_get_address(size_t size,int fd)763 mmap_gt_pch_get_address (size_t size, int fd)
764 {
765 void *ret;
766
767 ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
768 if (ret == (void *) MAP_FAILED)
769 ret = NULL;
770 else
771 munmap ((caddr_t) ret, size);
772
773 return ret;
774 }
775
776 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
777 Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
778 mapping the data at BASE, -1 if we couldn't.
779
780 This version assumes that the kernel honors the START operand of mmap
781 even without MAP_FIXED if START through START+SIZE are not currently
782 mapped with something. */
783
784 int
mmap_gt_pch_use_address(void * base,size_t size,int fd,size_t offset)785 mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
786 {
787 void *addr;
788
789 /* We're called with size == 0 if we're not planning to load a PCH
790 file at all. This allows the hook to free any static space that
791 we might have allocated at link time. */
792 if (size == 0)
793 return -1;
794
795 addr = mmap ((caddr_t) base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
796 fd, offset);
797
798 return addr == base ? 1 : -1;
799 }
800 #endif /* HAVE_MMAP_FILE */
801
802 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
803
804 /* Modify the bound based on rlimits. */
805 static double
ggc_rlimit_bound(double limit)806 ggc_rlimit_bound (double limit)
807 {
808 #if defined(HAVE_GETRLIMIT)
809 struct rlimit rlim;
810 # if defined (RLIMIT_AS)
811 /* RLIMIT_AS is what POSIX says is the limit on mmap. Presumably
812 any OS which has RLIMIT_AS also has a working mmap that GCC will use. */
813 if (getrlimit (RLIMIT_AS, &rlim) == 0
814 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
815 && rlim.rlim_cur < limit)
816 limit = rlim.rlim_cur;
817 # elif defined (RLIMIT_DATA)
818 /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
819 might be on an OS that has a broken mmap. (Others don't bound
820 mmap at all, apparently.) */
821 if (getrlimit (RLIMIT_DATA, &rlim) == 0
822 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
823 && rlim.rlim_cur < limit
824 /* Darwin has this horribly bogus default setting of
825 RLIMIT_DATA, to 6144Kb. No-one notices because RLIMIT_DATA
826 appears to be ignored. Ignore such silliness. If a limit
827 this small was actually effective for mmap, GCC wouldn't even
828 start up. */
829 && rlim.rlim_cur >= 8 * ONE_M)
830 limit = rlim.rlim_cur;
831 # endif /* RLIMIT_AS or RLIMIT_DATA */
832 #endif /* HAVE_GETRLIMIT */
833
834 return limit;
835 }
836
837 /* Heuristic to set a default for GGC_MIN_EXPAND. */
838 static int
ggc_min_expand_heuristic(void)839 ggc_min_expand_heuristic (void)
840 {
841 double min_expand = physmem_total ();
842
843 /* Adjust for rlimits. */
844 min_expand = ggc_rlimit_bound (min_expand);
845
846 /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
847 a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
848 min_expand /= ONE_G;
849 min_expand *= 70;
850 min_expand = MIN (min_expand, 70);
851 min_expand += 30;
852
853 return min_expand;
854 }
855
856 /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
857 static int
ggc_min_heapsize_heuristic(void)858 ggc_min_heapsize_heuristic (void)
859 {
860 double phys_kbytes = physmem_total ();
861 double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2);
862
863 phys_kbytes /= ONE_K; /* Convert to Kbytes. */
864 limit_kbytes /= ONE_K;
865
866 /* The heuristic is RAM/8, with a lower bound of 4M and an upper
867 bound of 128M (when RAM >= 1GB). */
868 phys_kbytes /= 8;
869
870 #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
871 /* Try not to overrun the RSS limit while doing garbage collection.
872 The RSS limit is only advisory, so no margin is subtracted. */
873 {
874 struct rlimit rlim;
875 if (getrlimit (RLIMIT_RSS, &rlim) == 0
876 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
877 phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / ONE_K);
878 }
879 # endif
880
881 /* Don't blindly run over our data limit; do GC at least when the
882 *next* GC would be within 20Mb of the limit or within a quarter of
883 the limit, whichever is larger. If GCC does hit the data limit,
884 compilation will fail, so this tries to be conservative. */
885 limit_kbytes = MAX (0, limit_kbytes - MAX (limit_kbytes / 4, 20 * ONE_K));
886 limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic ());
887 phys_kbytes = MIN (phys_kbytes, limit_kbytes);
888
889 phys_kbytes = MAX (phys_kbytes, 4 * ONE_K);
890 phys_kbytes = MIN (phys_kbytes, 128 * ONE_K);
891
892 return phys_kbytes;
893 }
894 #endif
895
896 void
init_ggc_heuristics(void)897 init_ggc_heuristics (void)
898 {
899 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
900 param_ggc_min_expand = ggc_min_expand_heuristic ();
901 param_ggc_min_heapsize = ggc_min_heapsize_heuristic ();
902 #endif
903 }
904
905 /* GGC memory usage. */
906 class ggc_usage: public mem_usage
907 {
908 public:
909 /* Default constructor. */
ggc_usage()910 ggc_usage (): m_freed (0), m_collected (0), m_overhead (0) {}
911 /* Constructor. */
ggc_usage(size_t allocated,size_t times,size_t peak,size_t freed,size_t collected,size_t overhead)912 ggc_usage (size_t allocated, size_t times, size_t peak,
913 size_t freed, size_t collected, size_t overhead)
914 : mem_usage (allocated, times, peak),
915 m_freed (freed), m_collected (collected), m_overhead (overhead) {}
916
917 /* Equality operator. */
918 inline bool
919 operator== (const ggc_usage &second) const
920 {
921 return (get_balance () == second.get_balance ()
922 && m_peak == second.m_peak
923 && m_times == second.m_times);
924 }
925
926 /* Comparison operator. */
927 inline bool
928 operator< (const ggc_usage &second) const
929 {
930 if (*this == second)
931 return false;
932
933 return (get_balance () == second.get_balance () ?
934 (m_peak == second.m_peak ? m_times < second.m_times
935 : m_peak < second.m_peak)
936 : get_balance () < second.get_balance ());
937 }
938
939 /* Register overhead of ALLOCATED and OVERHEAD bytes. */
940 inline void
register_overhead(size_t allocated,size_t overhead)941 register_overhead (size_t allocated, size_t overhead)
942 {
943 m_allocated += allocated;
944 m_overhead += overhead;
945 m_times++;
946 }
947
948 /* Release overhead of SIZE bytes. */
949 inline void
release_overhead(size_t size)950 release_overhead (size_t size)
951 {
952 m_freed += size;
953 }
954
955 /* Sum the usage with SECOND usage. */
956 ggc_usage
957 operator+ (const ggc_usage &second)
958 {
959 return ggc_usage (m_allocated + second.m_allocated,
960 m_times + second.m_times,
961 m_peak + second.m_peak,
962 m_freed + second.m_freed,
963 m_collected + second.m_collected,
964 m_overhead + second.m_overhead);
965 }
966
967 /* Dump usage with PREFIX, where TOTAL is sum of all rows. */
968 inline void
dump(const char * prefix,ggc_usage & total)969 dump (const char *prefix, ggc_usage &total) const
970 {
971 size_t balance = get_balance ();
972 fprintf (stderr,
973 "%-48s " PRsa (9) ":%5.1f%%" PRsa (9) ":%5.1f%%"
974 PRsa (9) ":%5.1f%%" PRsa (9) ":%5.1f%%" PRsa (9) "\n",
975 prefix,
976 SIZE_AMOUNT (balance), get_percent (balance, total.get_balance ()),
977 SIZE_AMOUNT (m_collected),
978 get_percent (m_collected, total.m_collected),
979 SIZE_AMOUNT (m_freed), get_percent (m_freed, total.m_freed),
980 SIZE_AMOUNT (m_overhead),
981 get_percent (m_overhead, total.m_overhead),
982 SIZE_AMOUNT (m_times));
983 }
984
985 /* Dump usage coupled to LOC location, where TOTAL is sum of all rows. */
986 inline void
dump(mem_location * loc,ggc_usage & total)987 dump (mem_location *loc, ggc_usage &total) const
988 {
989 char *location_string = loc->to_string ();
990
991 dump (location_string, total);
992
993 free (location_string);
994 }
995
996 /* Dump footer. */
997 inline void
dump_footer()998 dump_footer ()
999 {
1000 dump ("Total", *this);
1001 }
1002
1003 /* Get balance which is GGC allocation leak. */
1004 inline size_t
get_balance()1005 get_balance () const
1006 {
1007 return m_allocated + m_overhead - m_collected - m_freed;
1008 }
1009
1010 typedef std::pair<mem_location *, ggc_usage *> mem_pair_t;
1011
1012 /* Compare wrapper used by qsort method. */
1013 static int
compare(const void * first,const void * second)1014 compare (const void *first, const void *second)
1015 {
1016 const mem_pair_t mem1 = *(const mem_pair_t *) first;
1017 const mem_pair_t mem2 = *(const mem_pair_t *) second;
1018
1019 size_t balance1 = mem1.second->get_balance ();
1020 size_t balance2 = mem2.second->get_balance ();
1021
1022 return balance1 == balance2 ? 0 : (balance1 < balance2 ? 1 : -1);
1023 }
1024
1025 /* Dump header with NAME. */
1026 static inline void
dump_header(const char * name)1027 dump_header (const char *name)
1028 {
1029 fprintf (stderr, "%-48s %11s%17s%17s%16s%17s\n", name, "Leak", "Garbage",
1030 "Freed", "Overhead", "Times");
1031 }
1032
1033 /* Freed memory in bytes. */
1034 size_t m_freed;
1035 /* Collected memory in bytes. */
1036 size_t m_collected;
1037 /* Overhead memory in bytes. */
1038 size_t m_overhead;
1039 };
1040
1041 /* GCC memory description. */
1042 static mem_alloc_description<ggc_usage> ggc_mem_desc;
1043
1044 /* Dump per-site memory statistics. */
1045
1046 void
dump_ggc_loc_statistics()1047 dump_ggc_loc_statistics ()
1048 {
1049 if (! GATHER_STATISTICS)
1050 return;
1051
1052 ggc_collect (GGC_COLLECT_FORCE);
1053
1054 ggc_mem_desc.dump (GGC_ORIGIN);
1055 }
1056
1057 /* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION). */
1058 void
ggc_record_overhead(size_t allocated,size_t overhead,void * ptr MEM_STAT_DECL)1059 ggc_record_overhead (size_t allocated, size_t overhead, void *ptr MEM_STAT_DECL)
1060 {
1061 ggc_usage *usage = ggc_mem_desc.register_descriptor (ptr, GGC_ORIGIN, false
1062 FINAL_PASS_MEM_STAT);
1063
1064 ggc_mem_desc.register_object_overhead (usage, allocated + overhead, ptr);
1065 usage->register_overhead (allocated, overhead);
1066 }
1067
1068 /* Notice that the pointer has been freed. */
1069 void
ggc_free_overhead(void * ptr)1070 ggc_free_overhead (void *ptr)
1071 {
1072 ggc_mem_desc.release_object_overhead (ptr);
1073 }
1074
1075 /* After live values has been marked, walk all recorded pointers and see if
1076 they are still live. */
1077 void
ggc_prune_overhead_list(void)1078 ggc_prune_overhead_list (void)
1079 {
1080 typedef hash_map<const void *, std::pair<ggc_usage *, size_t > > map_t;
1081
1082 map_t::iterator it = ggc_mem_desc.m_reverse_object_map->begin ();
1083
1084 for (; it != ggc_mem_desc.m_reverse_object_map->end (); ++it)
1085 if (!ggc_marked_p ((*it).first))
1086 {
1087 (*it).second.first->m_collected += (*it).second.second;
1088 ggc_mem_desc.m_reverse_object_map->remove ((*it).first);
1089 }
1090 }
1091
1092 /* Print memory used by heap if this info is available. */
1093
1094 void
report_heap_memory_use()1095 report_heap_memory_use ()
1096 {
1097 #if defined(HAVE_MALLINFO) || defined(HAVE_MALLINFO2)
1098 #ifdef HAVE_MALLINFO2
1099 #define MALLINFO_FN mallinfo2
1100 #else
1101 #define MALLINFO_FN mallinfo
1102 #endif
1103 if (!quiet_flag)
1104 fprintf (stderr, " {heap " PRsa (0) "}",
1105 SIZE_AMOUNT (MALLINFO_FN ().arena));
1106 #endif
1107 }
1108