xref: /openbsd/gnu/gcc/gcc/ggc-zone.c (revision 404b540a)
1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2    Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005
3    Free Software Foundation, Inc.
4 
5    Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
6    (dberlin@dberlin.org).  Rewritten by Daniel Jacobowitz
7    <dan@codesourcery.com>.
8 
9 This file is part of GCC.
10 
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 2, or (at your option) any later
14 version.
15 
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19 for more details.
20 
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING.  If not, write to the Free
23 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
24 02110-1301, USA.  */
25 
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include "tree.h"
31 #include "rtl.h"
32 #include "tm_p.h"
33 #include "toplev.h"
34 #include "varray.h"
35 #include "flags.h"
36 #include "ggc.h"
37 #include "timevar.h"
38 #include "params.h"
39 #include "bitmap.h"
40 
41 #ifdef ENABLE_VALGRIND_CHECKING
42 # ifdef HAVE_VALGRIND_MEMCHECK_H
43 #  include <valgrind/memcheck.h>
44 # elif defined HAVE_MEMCHECK_H
45 #  include <memcheck.h>
46 # else
47 #  include <valgrind.h>
48 # endif
49 #else
50 /* Avoid #ifdef:s when we can help it.  */
51 #define VALGRIND_DISCARD(x)
52 #define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
53 #define VALGRIND_FREELIKE_BLOCK(x,y)
54 #endif
55 
56 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
57    file open.  Prefer either to valloc.  */
58 #ifdef HAVE_MMAP_ANON
59 # undef HAVE_MMAP_DEV_ZERO
60 
61 # include <sys/mman.h>
62 # ifndef MAP_FAILED
63 #  define MAP_FAILED -1
64 # endif
65 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
66 #  define MAP_ANONYMOUS MAP_ANON
67 # endif
68 # define USING_MMAP
69 #endif
70 
71 #ifdef HAVE_MMAP_DEV_ZERO
72 # include <sys/mman.h>
73 # ifndef MAP_FAILED
74 #  define MAP_FAILED -1
75 # endif
76 # define USING_MMAP
77 #endif
78 
79 #ifndef USING_MMAP
80 #error Zone collector requires mmap
81 #endif
82 
83 #if (GCC_VERSION < 3001)
84 #define prefetch(X) ((void) X)
85 #define prefetchw(X) ((void) X)
86 #else
87 #define prefetch(X) __builtin_prefetch (X)
88 #define prefetchw(X) __builtin_prefetch (X, 1, 3)
89 #endif
90 
91 /* FUTURE NOTES:
92 
93    If we track inter-zone pointers, we can mark single zones at a
94    time.
95 
96    If we have a zone where we guarantee no inter-zone pointers, we
97    could mark that zone separately.
98 
99    The garbage zone should not be marked, and we should return 1 in
100    ggc_set_mark for any object in the garbage zone, which cuts off
101    marking quickly.  */
102 
103 /* Strategy:
104 
105    This garbage-collecting allocator segregates objects into zones.
106    It also segregates objects into "large" and "small" bins.  Large
107    objects are greater than page size.
108 
109    Pages for small objects are broken up into chunks.  The page has
110    a bitmap which marks the start position of each chunk (whether
111    allocated or free).  Free chunks are on one of the zone's free
112    lists and contain a pointer to the next free chunk.  Chunks in
113    most of the free lists have a fixed size determined by the
114    free list.  Chunks in the "other" sized free list have their size
115    stored right after their chain pointer.
116 
117    Empty pages (of all sizes) are kept on a single page cache list,
118    and are considered first when new pages are required; they are
119    deallocated at the start of the next collection if they haven't
120    been recycled by then.  The free page list is currently per-zone.  */
121 
122 /* Define GGC_DEBUG_LEVEL to print debugging information.
123      0: No debugging output.
124      1: GC statistics only.
125      2: Page-entry allocations/deallocations as well.
126      3: Object allocations as well.
127      4: Object marks as well.  */
128 #define GGC_DEBUG_LEVEL (0)
129 
130 #ifndef HOST_BITS_PER_PTR
131 #define HOST_BITS_PER_PTR  HOST_BITS_PER_LONG
132 #endif
133 
134 /* This structure manages small free chunks.  The SIZE field is only
135    initialized if the chunk is in the "other" sized free list.  Large
136    chunks are allocated one at a time to their own page, and so don't
137    come in here.  */
138 
139 struct alloc_chunk {
140   struct alloc_chunk *next_free;
141   unsigned int size;
142 };
143 
144 /* The size of the fixed-size portion of a small page descriptor.  */
145 #define PAGE_OVERHEAD   (offsetof (struct small_page_entry, alloc_bits))
146 
147 /* The collector's idea of the page size.  This must be a power of two
148    no larger than the system page size, because pages must be aligned
149    to this amount and are tracked at this granularity in the page
150    table.  We choose a size at compile time for efficiency.
151 
152    We could make a better guess at compile time if PAGE_SIZE is a
153    constant in system headers, and PAGE_SHIFT is defined...  */
154 #define GGC_PAGE_SIZE	4096
155 #define GGC_PAGE_MASK	(GGC_PAGE_SIZE - 1)
156 #define GGC_PAGE_SHIFT	12
157 
158 #if 0
159 /* Alternative definitions which use the runtime page size.  */
160 #define GGC_PAGE_SIZE	G.pagesize
161 #define GGC_PAGE_MASK	G.page_mask
162 #define GGC_PAGE_SHIFT	G.lg_pagesize
163 #endif
164 
165 /* The size of a small page managed by the garbage collector.  This
166    must currently be GGC_PAGE_SIZE, but with a few changes could
167    be any multiple of it to reduce certain kinds of overhead.  */
168 #define SMALL_PAGE_SIZE GGC_PAGE_SIZE
169 
170 /* Free bin information.  These numbers may be in need of re-tuning.
171    In general, decreasing the number of free bins would seem to
172    increase the time it takes to allocate... */
173 
174 /* FIXME: We can't use anything but MAX_ALIGNMENT for the bin size
175    today.  */
176 
177 #define NUM_FREE_BINS		64
178 #define FREE_BIN_DELTA		MAX_ALIGNMENT
179 #define SIZE_BIN_DOWN(SIZE)	((SIZE) / FREE_BIN_DELTA)
180 
181 /* Allocation and marking parameters.  */
182 
183 /* The smallest allocatable unit to keep track of.  */
184 #define BYTES_PER_ALLOC_BIT	MAX_ALIGNMENT
185 
186 /* The smallest markable unit.  If we require each allocated object
187    to contain at least two allocatable units, we can use half as many
188    bits for the mark bitmap.  But this adds considerable complexity
189    to sweeping.  */
190 #define BYTES_PER_MARK_BIT	BYTES_PER_ALLOC_BIT
191 
192 #define BYTES_PER_MARK_WORD	(8 * BYTES_PER_MARK_BIT * sizeof (mark_type))
193 
194 /* We use this structure to determine the alignment required for
195    allocations.
196 
197    There are several things wrong with this estimation of alignment.
198 
199    The maximum alignment for a structure is often less than the
200    maximum alignment for a basic data type; for instance, on some
201    targets long long must be aligned to sizeof (int) in a structure
202    and sizeof (long long) in a variable.  i386-linux is one example;
203    Darwin is another (sometimes, depending on the compiler in use).
204 
205    Also, long double is not included.  Nothing in GCC uses long
206    double, so we assume that this is OK.  On powerpc-darwin, adding
207    long double would bring the maximum alignment up to 16 bytes,
208    and until we need long double (or to vectorize compiler operations)
209    that's painfully wasteful.  This will need to change, some day.  */
210 
211 struct max_alignment {
212   char c;
213   union {
214     HOST_WIDEST_INT i;
215     double d;
216   } u;
217 };
218 
219 /* The biggest alignment required.  */
220 
221 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
222 
223 /* Compute the smallest multiple of F that is >= X.  */
224 
225 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
226 
227 /* Types to use for the allocation and mark bitmaps.  It might be
228    a good idea to add ffsl to libiberty and use unsigned long
229    instead; that could speed us up where long is wider than int.  */
230 
231 typedef unsigned int alloc_type;
232 typedef unsigned int mark_type;
233 #define alloc_ffs(x) ffs(x)
234 
235 /* A page_entry records the status of an allocation page.  This is the
236    common data between all three kinds of pages - small, large, and
237    PCH.  */
238 typedef struct page_entry
239 {
240   /* The address at which the memory is allocated.  */
241   char *page;
242 
243   /* The zone that this page entry belongs to.  */
244   struct alloc_zone *zone;
245 
246 #ifdef GATHER_STATISTICS
247   /* How many collections we've survived.  */
248   size_t survived;
249 #endif
250 
251   /* Does this page contain small objects, or one large object?  */
252   bool large_p;
253 
254   /* Is this page part of the loaded PCH?  */
255   bool pch_p;
256 } page_entry;
257 
258 /* Additional data needed for small pages.  */
259 struct small_page_entry
260 {
261   struct page_entry common;
262 
263   /* The next small page entry, or NULL if this is the last.  */
264   struct small_page_entry *next;
265 
266   /* If currently marking this zone, a pointer to the mark bits
267      for this page.  If we aren't currently marking this zone,
268      this pointer may be stale (pointing to freed memory).  */
269   mark_type *mark_bits;
270 
271   /* The allocation bitmap.  This array extends far enough to have
272      one bit for every BYTES_PER_ALLOC_BIT bytes in the page.  */
273   alloc_type alloc_bits[1];
274 };
275 
276 /* Additional data needed for large pages.  */
277 struct large_page_entry
278 {
279   struct page_entry common;
280 
281   /* The next large page entry, or NULL if this is the last.  */
282   struct large_page_entry *next;
283 
284   /* The number of bytes allocated, not including the page entry.  */
285   size_t bytes;
286 
287   /* The previous page in the list, so that we can unlink this one.  */
288   struct large_page_entry *prev;
289 
290   /* During marking, is this object marked?  */
291   bool mark_p;
292 };
293 
294 /* A two-level tree is used to look up the page-entry for a given
295    pointer.  Two chunks of the pointer's bits are extracted to index
296    the first and second levels of the tree, as follows:
297 
298 				   HOST_PAGE_SIZE_BITS
299 			   32		|      |
300        msb +----------------+----+------+------+ lsb
301 			    |    |      |
302 			 PAGE_L1_BITS   |
303 				 |      |
304 			       PAGE_L2_BITS
305 
306    The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
307    pages are aligned on system page boundaries.  The next most
308    significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
309    index values in the lookup table, respectively.
310 
311    For 32-bit architectures and the settings below, there are no
312    leftover bits.  For architectures with wider pointers, the lookup
313    tree points to a list of pages, which must be scanned to find the
314    correct one.  */
315 
316 #define PAGE_L1_BITS	(8)
317 #define PAGE_L2_BITS	(32 - PAGE_L1_BITS - GGC_PAGE_SHIFT)
318 #define PAGE_L1_SIZE	((size_t) 1 << PAGE_L1_BITS)
319 #define PAGE_L2_SIZE	((size_t) 1 << PAGE_L2_BITS)
320 
321 #define LOOKUP_L1(p) \
322   (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
323 
324 #define LOOKUP_L2(p) \
325   (((size_t) (p) >> GGC_PAGE_SHIFT) & ((1 << PAGE_L2_BITS) - 1))
326 
327 #if HOST_BITS_PER_PTR <= 32
328 
329 /* On 32-bit hosts, we use a two level page table, as pictured above.  */
330 typedef page_entry **page_table[PAGE_L1_SIZE];
331 
332 #else
333 
334 /* On 64-bit hosts, we use the same two level page tables plus a linked
335    list that disambiguates the top 32-bits.  There will almost always be
336    exactly one entry in the list.  */
337 typedef struct page_table_chain
338 {
339   struct page_table_chain *next;
340   size_t high_bits;
341   page_entry **table[PAGE_L1_SIZE];
342 } *page_table;
343 
344 #endif
345 
346 /* The global variables.  */
347 static struct globals
348 {
349   /* The linked list of zones.  */
350   struct alloc_zone *zones;
351 
352   /* Lookup table for associating allocation pages with object addresses.  */
353   page_table lookup;
354 
355   /* The system's page size, and related constants.  */
356   size_t pagesize;
357   size_t lg_pagesize;
358   size_t page_mask;
359 
360   /* The size to allocate for a small page entry.  This includes
361      the size of the structure and the size of the allocation
362      bitmap.  */
363   size_t small_page_overhead;
364 
365 #if defined (HAVE_MMAP_DEV_ZERO)
366   /* A file descriptor open to /dev/zero for reading.  */
367   int dev_zero_fd;
368 #endif
369 
370   /* Allocate pages in chunks of this size, to throttle calls to memory
371      allocation routines.  The first page is used, the rest go onto the
372      free list.  */
373   size_t quire_size;
374 
375   /* The file descriptor for debugging output.  */
376   FILE *debug_file;
377 } G;
378 
379 /* A zone allocation structure.  There is one of these for every
380    distinct allocation zone.  */
381 struct alloc_zone
382 {
383   /* The most recent free chunk is saved here, instead of in the linked
384      free list, to decrease list manipulation.  It is most likely that we
385      will want this one.  */
386   char *cached_free;
387   size_t cached_free_size;
388 
389   /* Linked lists of free storage.  Slots 1 ... NUM_FREE_BINS have chunks of size
390      FREE_BIN_DELTA.  All other chunks are in slot 0.  */
391   struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
392 
393   /* The highest bin index which might be non-empty.  It may turn out
394      to be empty, in which case we have to search downwards.  */
395   size_t high_free_bin;
396 
397   /* Bytes currently allocated in this zone.  */
398   size_t allocated;
399 
400   /* Linked list of the small pages in this zone.  */
401   struct small_page_entry *pages;
402 
403   /* Doubly linked list of large pages in this zone.  */
404   struct large_page_entry *large_pages;
405 
406   /* If we are currently marking this zone, a pointer to the mark bits.  */
407   mark_type *mark_bits;
408 
409   /* Name of the zone.  */
410   const char *name;
411 
412   /* The number of small pages currently allocated in this zone.  */
413   size_t n_small_pages;
414 
415   /* Bytes allocated at the end of the last collection.  */
416   size_t allocated_last_gc;
417 
418   /* Total amount of memory mapped.  */
419   size_t bytes_mapped;
420 
421   /* A cache of free system pages.  */
422   struct small_page_entry *free_pages;
423 
424   /* Next zone in the linked list of zones.  */
425   struct alloc_zone *next_zone;
426 
427   /* True if this zone was collected during this collection.  */
428   bool was_collected;
429 
430   /* True if this zone should be destroyed after the next collection.  */
431   bool dead;
432 
433 #ifdef GATHER_STATISTICS
434   struct
435   {
436     /* Total memory allocated with ggc_alloc.  */
437     unsigned long long total_allocated;
438     /* Total overhead for memory to be allocated with ggc_alloc.  */
439     unsigned long long total_overhead;
440 
441     /* Total allocations and overhead for sizes less than 32, 64 and 128.
442        These sizes are interesting because they are typical cache line
443        sizes.  */
444 
445     unsigned long long total_allocated_under32;
446     unsigned long long total_overhead_under32;
447 
448     unsigned long long total_allocated_under64;
449     unsigned long long total_overhead_under64;
450 
451     unsigned long long total_allocated_under128;
452     unsigned long long total_overhead_under128;
453   } stats;
454 #endif
455 } main_zone;
456 
457 /* Some default zones.  */
458 struct alloc_zone rtl_zone;
459 struct alloc_zone tree_zone;
460 struct alloc_zone tree_id_zone;
461 
462 /* The PCH zone does not need a normal zone structure, and it does
463    not live on the linked list of zones.  */
464 struct pch_zone
465 {
466   /* The start of the PCH zone.  NULL if there is none.  */
467   char *page;
468 
469   /* The end of the PCH zone.  NULL if there is none.  */
470   char *end;
471 
472   /* The size of the PCH zone.  0 if there is none.  */
473   size_t bytes;
474 
475   /* The allocation bitmap for the PCH zone.  */
476   alloc_type *alloc_bits;
477 
478   /* If we are currently marking, the mark bitmap for the PCH zone.
479      When it is first read in, we could avoid marking the PCH,
480      because it will not contain any pointers to GC memory outside
481      of the PCH; however, the PCH is currently mapped as writable,
482      so we must mark it in case new pointers are added.  */
483   mark_type *mark_bits;
484 } pch_zone;
485 
486 #ifdef USING_MMAP
487 static char *alloc_anon (char *, size_t, struct alloc_zone *);
488 #endif
489 static struct small_page_entry * alloc_small_page (struct alloc_zone *);
490 static struct large_page_entry * alloc_large_page (size_t, struct alloc_zone *);
491 static void free_chunk (char *, size_t, struct alloc_zone *);
492 static void free_small_page (struct small_page_entry *);
493 static void free_large_page (struct large_page_entry *);
494 static void release_pages (struct alloc_zone *);
495 static void sweep_pages (struct alloc_zone *);
496 static bool ggc_collect_1 (struct alloc_zone *, bool);
497 static void new_ggc_zone_1 (struct alloc_zone *, const char *);
498 
499 /* Traverse the page table and find the entry for a page.
500    Die (probably) if the object wasn't allocated via GC.  */
501 
502 static inline page_entry *
lookup_page_table_entry(const void * p)503 lookup_page_table_entry (const void *p)
504 {
505   page_entry ***base;
506   size_t L1, L2;
507 
508 #if HOST_BITS_PER_PTR <= 32
509   base = &G.lookup[0];
510 #else
511   page_table table = G.lookup;
512   size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
513   while (table->high_bits != high_bits)
514     table = table->next;
515   base = &table->table[0];
516 #endif
517 
518   /* Extract the level 1 and 2 indices.  */
519   L1 = LOOKUP_L1 (p);
520   L2 = LOOKUP_L2 (p);
521 
522   return base[L1][L2];
523 }
524 
525 /* Set the page table entry for the page that starts at P.  If ENTRY
526    is NULL, clear the entry.  */
527 
528 static void
set_page_table_entry(void * p,page_entry * entry)529 set_page_table_entry (void *p, page_entry *entry)
530 {
531   page_entry ***base;
532   size_t L1, L2;
533 
534 #if HOST_BITS_PER_PTR <= 32
535   base = &G.lookup[0];
536 #else
537   page_table table;
538   size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
539   for (table = G.lookup; table; table = table->next)
540     if (table->high_bits == high_bits)
541       goto found;
542 
543   /* Not found -- allocate a new table.  */
544   table = xcalloc (1, sizeof(*table));
545   table->next = G.lookup;
546   table->high_bits = high_bits;
547   G.lookup = table;
548 found:
549   base = &table->table[0];
550 #endif
551 
552   /* Extract the level 1 and 2 indices.  */
553   L1 = LOOKUP_L1 (p);
554   L2 = LOOKUP_L2 (p);
555 
556   if (base[L1] == NULL)
557     base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
558 
559   base[L1][L2] = entry;
560 }
561 
562 /* Find the page table entry associated with OBJECT.  */
563 
564 static inline struct page_entry *
zone_get_object_page(const void * object)565 zone_get_object_page (const void *object)
566 {
567   return lookup_page_table_entry (object);
568 }
569 
570 /* Find which element of the alloc_bits array OBJECT should be
571    recorded in.  */
572 static inline unsigned int
zone_get_object_alloc_word(const void * object)573 zone_get_object_alloc_word (const void *object)
574 {
575   return (((size_t) object & (GGC_PAGE_SIZE - 1))
576 	  / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
577 }
578 
579 /* Find which bit of the appropriate word in the alloc_bits array
580    OBJECT should be recorded in.  */
581 static inline unsigned int
zone_get_object_alloc_bit(const void * object)582 zone_get_object_alloc_bit (const void *object)
583 {
584   return (((size_t) object / BYTES_PER_ALLOC_BIT)
585 	  % (8 * sizeof (alloc_type)));
586 }
587 
588 /* Find which element of the mark_bits array OBJECT should be recorded
589    in.  */
590 static inline unsigned int
zone_get_object_mark_word(const void * object)591 zone_get_object_mark_word (const void *object)
592 {
593   return (((size_t) object & (GGC_PAGE_SIZE - 1))
594 	  / (8 * sizeof (mark_type) * BYTES_PER_MARK_BIT));
595 }
596 
597 /* Find which bit of the appropriate word in the mark_bits array
598    OBJECT should be recorded in.  */
599 static inline unsigned int
zone_get_object_mark_bit(const void * object)600 zone_get_object_mark_bit (const void *object)
601 {
602   return (((size_t) object / BYTES_PER_MARK_BIT)
603 	  % (8 * sizeof (mark_type)));
604 }
605 
606 /* Set the allocation bit corresponding to OBJECT in its page's
607    bitmap.  Used to split this object from the preceding one.  */
608 static inline void
zone_set_object_alloc_bit(const void * object)609 zone_set_object_alloc_bit (const void *object)
610 {
611   struct small_page_entry *page
612     = (struct small_page_entry *) zone_get_object_page (object);
613   unsigned int start_word = zone_get_object_alloc_word (object);
614   unsigned int start_bit = zone_get_object_alloc_bit (object);
615 
616   page->alloc_bits[start_word] |= 1L << start_bit;
617 }
618 
619 /* Clear the allocation bit corresponding to OBJECT in PAGE's
620    bitmap.  Used to coalesce this object with the preceding
621    one.  */
622 static inline void
zone_clear_object_alloc_bit(struct small_page_entry * page,const void * object)623 zone_clear_object_alloc_bit (struct small_page_entry *page,
624 			     const void *object)
625 {
626   unsigned int start_word = zone_get_object_alloc_word (object);
627   unsigned int start_bit = zone_get_object_alloc_bit (object);
628 
629   /* Would xor be quicker?  */
630   page->alloc_bits[start_word] &= ~(1L << start_bit);
631 }
632 
633 /* Find the size of the object which starts at START_WORD and
634    START_BIT in ALLOC_BITS, which is at most MAX_SIZE bytes.
635    Helper function for ggc_get_size and zone_find_object_size.  */
636 
637 static inline size_t
zone_object_size_1(alloc_type * alloc_bits,size_t start_word,size_t start_bit,size_t max_size)638 zone_object_size_1 (alloc_type *alloc_bits,
639 		    size_t start_word, size_t start_bit,
640 		    size_t max_size)
641 {
642   size_t size;
643   alloc_type alloc_word;
644   int indx;
645 
646   /* Load the first word.  */
647   alloc_word = alloc_bits[start_word++];
648 
649   /* If that was the last bit in this word, we'll want to continue
650      with the next word.  Otherwise, handle the rest of this word.  */
651   if (start_bit)
652     {
653       indx = alloc_ffs (alloc_word >> start_bit);
654       if (indx)
655 	/* indx is 1-based.  We started at the bit after the object's
656 	   start, but we also ended at the bit after the object's end.
657 	   It cancels out.  */
658 	return indx * BYTES_PER_ALLOC_BIT;
659 
660       /* The extra 1 accounts for the starting unit, before start_bit.  */
661       size = (sizeof (alloc_type) * 8 - start_bit + 1) * BYTES_PER_ALLOC_BIT;
662 
663       if (size >= max_size)
664 	return max_size;
665 
666       alloc_word = alloc_bits[start_word++];
667     }
668   else
669     size = BYTES_PER_ALLOC_BIT;
670 
671   while (alloc_word == 0)
672     {
673       size += sizeof (alloc_type) * 8 * BYTES_PER_ALLOC_BIT;
674       if (size >= max_size)
675 	return max_size;
676       alloc_word = alloc_bits[start_word++];
677     }
678 
679   indx = alloc_ffs (alloc_word);
680   return size + (indx - 1) * BYTES_PER_ALLOC_BIT;
681 }
682 
683 /* Find the size of OBJECT on small page PAGE.  */
684 
685 static inline size_t
zone_find_object_size(struct small_page_entry * page,const void * object)686 zone_find_object_size (struct small_page_entry *page,
687 		       const void *object)
688 {
689   const char *object_midptr = (const char *) object + BYTES_PER_ALLOC_BIT;
690   unsigned int start_word = zone_get_object_alloc_word (object_midptr);
691   unsigned int start_bit = zone_get_object_alloc_bit (object_midptr);
692   size_t max_size = (page->common.page + SMALL_PAGE_SIZE
693 		     - (char *) object);
694 
695   return zone_object_size_1 (page->alloc_bits, start_word, start_bit,
696 			     max_size);
697 }
698 
699 /* Allocate the mark bits for every zone, and set the pointers on each
700    page.  */
701 static void
zone_allocate_marks(void)702 zone_allocate_marks (void)
703 {
704   struct alloc_zone *zone;
705 
706   for (zone = G.zones; zone; zone = zone->next_zone)
707     {
708       struct small_page_entry *page;
709       mark_type *cur_marks;
710       size_t mark_words, mark_words_per_page;
711 #ifdef ENABLE_CHECKING
712       size_t n = 0;
713 #endif
714 
715       mark_words_per_page
716 	= (GGC_PAGE_SIZE + BYTES_PER_MARK_WORD - 1) / BYTES_PER_MARK_WORD;
717       mark_words = zone->n_small_pages * mark_words_per_page;
718       zone->mark_bits = (mark_type *) xcalloc (sizeof (mark_type),
719 						   mark_words);
720       cur_marks = zone->mark_bits;
721       for (page = zone->pages; page; page = page->next)
722 	{
723 	  page->mark_bits = cur_marks;
724 	  cur_marks += mark_words_per_page;
725 #ifdef ENABLE_CHECKING
726 	  n++;
727 #endif
728 	}
729 #ifdef ENABLE_CHECKING
730       gcc_assert (n == zone->n_small_pages);
731 #endif
732     }
733 
734   /* We don't collect the PCH zone, but we do have to mark it
735      (for now).  */
736   if (pch_zone.bytes)
737     pch_zone.mark_bits
738       = (mark_type *) xcalloc (sizeof (mark_type),
739 			       CEIL (pch_zone.bytes, BYTES_PER_MARK_WORD));
740 }
741 
742 /* After marking and sweeping, release the memory used for mark bits.  */
743 static void
zone_free_marks(void)744 zone_free_marks (void)
745 {
746   struct alloc_zone *zone;
747 
748   for (zone = G.zones; zone; zone = zone->next_zone)
749     if (zone->mark_bits)
750       {
751 	free (zone->mark_bits);
752 	zone->mark_bits = NULL;
753       }
754 
755   if (pch_zone.bytes)
756     {
757       free (pch_zone.mark_bits);
758       pch_zone.mark_bits = NULL;
759     }
760 }
761 
762 #ifdef USING_MMAP
763 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
764    (if non-null).  The ifdef structure here is intended to cause a
765    compile error unless exactly one of the HAVE_* is defined.  */
766 
767 static inline char *
alloc_anon(char * pref ATTRIBUTE_UNUSED,size_t size,struct alloc_zone * zone)768 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
769 {
770 #ifdef HAVE_MMAP_ANON
771   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
772 			      MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
773 #endif
774 #ifdef HAVE_MMAP_DEV_ZERO
775   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
776 			      MAP_PRIVATE, G.dev_zero_fd, 0);
777 #endif
778 
779   if (page == (char *) MAP_FAILED)
780     {
781       perror ("virtual memory exhausted");
782       exit (FATAL_EXIT_CODE);
783     }
784 
785   /* Remember that we allocated this memory.  */
786   zone->bytes_mapped += size;
787 
788   /* Pretend we don't have access to the allocated pages.  We'll enable
789      access to smaller pieces of the area in ggc_alloc.  Discard the
790      handle to avoid handle leak.  */
791   VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
792 
793   return page;
794 }
795 #endif
796 
797 /* Allocate a new page for allocating small objects in ZONE, and
798    return an entry for it.  */
799 
800 static struct small_page_entry *
alloc_small_page(struct alloc_zone * zone)801 alloc_small_page (struct alloc_zone *zone)
802 {
803   struct small_page_entry *entry;
804 
805   /* Check the list of free pages for one we can use.  */
806   entry = zone->free_pages;
807   if (entry != NULL)
808     {
809       /* Recycle the allocated memory from this page ...  */
810       zone->free_pages = entry->next;
811     }
812   else
813     {
814       /* We want just one page.  Allocate a bunch of them and put the
815 	 extras on the freelist.  (Can only do this optimization with
816 	 mmap for backing store.)  */
817       struct small_page_entry *e, *f = zone->free_pages;
818       int i;
819       char *page;
820 
821       page = alloc_anon (NULL, GGC_PAGE_SIZE * G.quire_size, zone);
822 
823       /* This loop counts down so that the chain will be in ascending
824 	 memory order.  */
825       for (i = G.quire_size - 1; i >= 1; i--)
826 	{
827 	  e = xcalloc (1, G.small_page_overhead);
828 	  e->common.page = page + (i << GGC_PAGE_SHIFT);
829 	  e->common.zone = zone;
830 	  e->next = f;
831 	  f = e;
832 	  set_page_table_entry (e->common.page, &e->common);
833 	}
834 
835       zone->free_pages = f;
836 
837       entry = xcalloc (1, G.small_page_overhead);
838       entry->common.page = page;
839       entry->common.zone = zone;
840       set_page_table_entry (page, &entry->common);
841     }
842 
843   zone->n_small_pages++;
844 
845   if (GGC_DEBUG_LEVEL >= 2)
846     fprintf (G.debug_file,
847 	     "Allocating %s page at %p, data %p-%p\n",
848 	     entry->common.zone->name, (PTR) entry, entry->common.page,
849 	     entry->common.page + SMALL_PAGE_SIZE - 1);
850 
851   return entry;
852 }
853 
854 /* Allocate a large page of size SIZE in ZONE.  */
855 
856 static struct large_page_entry *
alloc_large_page(size_t size,struct alloc_zone * zone)857 alloc_large_page (size_t size, struct alloc_zone *zone)
858 {
859   struct large_page_entry *entry;
860   char *page;
861   size_t needed_size;
862 
863   needed_size = size + sizeof (struct large_page_entry);
864   page = xmalloc (needed_size);
865 
866   entry = (struct large_page_entry *) page;
867 
868   entry->next = NULL;
869   entry->common.page = page + sizeof (struct large_page_entry);
870   entry->common.large_p = true;
871   entry->common.pch_p = false;
872   entry->common.zone = zone;
873 #ifdef GATHER_STATISTICS
874   entry->common.survived = 0;
875 #endif
876   entry->mark_p = false;
877   entry->bytes = size;
878   entry->prev = NULL;
879 
880   set_page_table_entry (entry->common.page, &entry->common);
881 
882   if (GGC_DEBUG_LEVEL >= 2)
883     fprintf (G.debug_file,
884 	     "Allocating %s large page at %p, data %p-%p\n",
885 	     entry->common.zone->name, (PTR) entry, entry->common.page,
886 	     entry->common.page + SMALL_PAGE_SIZE - 1);
887 
888   return entry;
889 }
890 
891 
892 /* For a page that is no longer needed, put it on the free page list.  */
893 
894 static inline void
free_small_page(struct small_page_entry * entry)895 free_small_page (struct small_page_entry *entry)
896 {
897   if (GGC_DEBUG_LEVEL >= 2)
898     fprintf (G.debug_file,
899 	     "Deallocating %s page at %p, data %p-%p\n",
900 	     entry->common.zone->name, (PTR) entry,
901 	     entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
902 
903   gcc_assert (!entry->common.large_p);
904 
905   /* Mark the page as inaccessible.  Discard the handle to
906      avoid handle leak.  */
907   VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->common.page,
908 					    SMALL_PAGE_SIZE));
909 
910   entry->next = entry->common.zone->free_pages;
911   entry->common.zone->free_pages = entry;
912   entry->common.zone->n_small_pages--;
913 }
914 
915 /* Release a large page that is no longer needed.  */
916 
917 static inline void
free_large_page(struct large_page_entry * entry)918 free_large_page (struct large_page_entry *entry)
919 {
920   if (GGC_DEBUG_LEVEL >= 2)
921     fprintf (G.debug_file,
922 	     "Deallocating %s page at %p, data %p-%p\n",
923 	     entry->common.zone->name, (PTR) entry,
924 	     entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
925 
926   gcc_assert (entry->common.large_p);
927 
928   set_page_table_entry (entry->common.page, NULL);
929   free (entry);
930 }
931 
932 /* Release the free page cache to the system.  */
933 
934 static void
release_pages(struct alloc_zone * zone)935 release_pages (struct alloc_zone *zone)
936 {
937 #ifdef USING_MMAP
938   struct small_page_entry *p, *next;
939   char *start;
940   size_t len;
941 
942   /* Gather up adjacent pages so they are unmapped together.  */
943   p = zone->free_pages;
944 
945   while (p)
946     {
947       start = p->common.page;
948       next = p->next;
949       len = SMALL_PAGE_SIZE;
950       set_page_table_entry (p->common.page, NULL);
951       p = next;
952 
953       while (p && p->common.page == start + len)
954 	{
955 	  next = p->next;
956 	  len += SMALL_PAGE_SIZE;
957 	  set_page_table_entry (p->common.page, NULL);
958 	  p = next;
959 	}
960 
961       munmap (start, len);
962       zone->bytes_mapped -= len;
963     }
964 
965   zone->free_pages = NULL;
966 #endif
967 }
968 
969 /* Place the block at PTR of size SIZE on the free list for ZONE.  */
970 
971 static inline void
free_chunk(char * ptr,size_t size,struct alloc_zone * zone)972 free_chunk (char *ptr, size_t size, struct alloc_zone *zone)
973 {
974   struct alloc_chunk *chunk = (struct alloc_chunk *) ptr;
975   size_t bin = 0;
976 
977   bin = SIZE_BIN_DOWN (size);
978   gcc_assert (bin != 0);
979   if (bin > NUM_FREE_BINS)
980     {
981       bin = 0;
982       VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
983       chunk->size = size;
984       chunk->next_free = zone->free_chunks[bin];
985       VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (ptr + sizeof (struct alloc_chunk),
986 						size - sizeof (struct alloc_chunk)));
987     }
988   else
989     {
990       VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk *)));
991       chunk->next_free = zone->free_chunks[bin];
992       VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (ptr + sizeof (struct alloc_chunk *),
993 						size - sizeof (struct alloc_chunk *)));
994     }
995 
996   zone->free_chunks[bin] = chunk;
997   if (bin > zone->high_free_bin)
998     zone->high_free_bin = bin;
999   if (GGC_DEBUG_LEVEL >= 3)
1000     fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
1001 }
1002 
1003 /* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE.  */
1004 
1005 void *
ggc_alloc_zone_stat(size_t orig_size,struct alloc_zone * zone MEM_STAT_DECL)1006 ggc_alloc_zone_stat (size_t orig_size, struct alloc_zone *zone
1007 		     MEM_STAT_DECL)
1008 {
1009   size_t bin;
1010   size_t csize;
1011   struct small_page_entry *entry;
1012   struct alloc_chunk *chunk, **pp;
1013   void *result;
1014   size_t size = orig_size;
1015 
1016   /* Make sure that zero-sized allocations get a unique and freeable
1017      pointer.  */
1018   if (size == 0)
1019     size = MAX_ALIGNMENT;
1020   else
1021     size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
1022 
1023   /* Try to allocate the object from several different sources.  Each
1024      of these cases is responsible for setting RESULT and SIZE to
1025      describe the allocated block, before jumping to FOUND.  If a
1026      chunk is split, the allocate bit for the new chunk should also be
1027      set.
1028 
1029      Large objects are handled specially.  However, they'll just fail
1030      the next couple of conditions, so we can wait to check for them
1031      below.  The large object case is relatively rare (< 1%), so this
1032      is a win.  */
1033 
1034   /* First try to split the last chunk we allocated.  For best
1035      fragmentation behavior it would be better to look for a
1036      free bin of the appropriate size for a small object.  However,
1037      we're unlikely (1% - 7%) to find one, and this gives better
1038      locality behavior anyway.  This case handles the lion's share
1039      of all calls to this function.  */
1040   if (size <= zone->cached_free_size)
1041     {
1042       result = zone->cached_free;
1043 
1044       zone->cached_free_size -= size;
1045       if (zone->cached_free_size)
1046 	{
1047 	  zone->cached_free += size;
1048 	  zone_set_object_alloc_bit (zone->cached_free);
1049 	}
1050 
1051       goto found;
1052     }
1053 
1054   /* Next, try to find a free bin of the exactly correct size.  */
1055 
1056   /* We want to round SIZE up, rather than down, but we know it's
1057      already aligned to at least FREE_BIN_DELTA, so we can just
1058      shift.  */
1059   bin = SIZE_BIN_DOWN (size);
1060 
1061   if (bin <= NUM_FREE_BINS
1062       && (chunk = zone->free_chunks[bin]) != NULL)
1063     {
1064       /* We have a chunk of the right size.  Pull it off the free list
1065 	 and use it.  */
1066 
1067       zone->free_chunks[bin] = chunk->next_free;
1068 
1069       /* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT
1070 	 == FREE_BIN_DELTA.  */
1071       result = chunk;
1072 
1073       /* The allocation bits are already set correctly.  HIGH_FREE_BIN
1074 	 may now be wrong, if this was the last chunk in the high bin.
1075 	 Rather than fixing it up now, wait until we need to search
1076 	 the free bins.  */
1077 
1078       goto found;
1079     }
1080 
1081   /* Next, if there wasn't a chunk of the ideal size, look for a chunk
1082      to split.  We can find one in the too-big bin, or in the largest
1083      sized bin with a chunk in it.  Try the largest normal-sized bin
1084      first.  */
1085 
1086   if (zone->high_free_bin > bin)
1087     {
1088       /* Find the highest numbered free bin.  It will be at or below
1089 	 the watermark.  */
1090       while (zone->high_free_bin > bin
1091 	     && zone->free_chunks[zone->high_free_bin] == NULL)
1092 	zone->high_free_bin--;
1093 
1094       if (zone->high_free_bin > bin)
1095 	{
1096 	  size_t tbin = zone->high_free_bin;
1097 	  chunk = zone->free_chunks[tbin];
1098 
1099 	  /* Remove the chunk from its previous bin.  */
1100 	  zone->free_chunks[tbin] = chunk->next_free;
1101 
1102 	  result = (char *) chunk;
1103 
1104 	  /* Save the rest of the chunk for future allocation.  */
1105 	  if (zone->cached_free_size)
1106 	    free_chunk (zone->cached_free, zone->cached_free_size, zone);
1107 
1108 	  chunk = (struct alloc_chunk *) ((char *) result + size);
1109 	  zone->cached_free = (char *) chunk;
1110 	  zone->cached_free_size = (tbin - bin) * FREE_BIN_DELTA;
1111 
1112 	  /* Mark the new free chunk as an object, so that we can
1113 	     find the size of the newly allocated object.  */
1114 	  zone_set_object_alloc_bit (chunk);
1115 
1116 	  /* HIGH_FREE_BIN may now be wrong, if this was the last
1117 	     chunk in the high bin.  Rather than fixing it up now,
1118 	     wait until we need to search the free bins.  */
1119 
1120 	  goto found;
1121 	}
1122     }
1123 
1124   /* Failing that, look through the "other" bucket for a chunk
1125      that is large enough.  */
1126   pp = &(zone->free_chunks[0]);
1127   chunk = *pp;
1128   while (chunk && chunk->size < size)
1129     {
1130       pp = &chunk->next_free;
1131       chunk = *pp;
1132     }
1133 
1134   if (chunk)
1135     {
1136       /* Remove the chunk from its previous bin.  */
1137       *pp = chunk->next_free;
1138 
1139       result = (char *) chunk;
1140 
1141       /* Save the rest of the chunk for future allocation, if there's any
1142 	 left over.  */
1143       csize = chunk->size;
1144       if (csize > size)
1145 	{
1146 	  if (zone->cached_free_size)
1147 	    free_chunk (zone->cached_free, zone->cached_free_size, zone);
1148 
1149 	  chunk = (struct alloc_chunk *) ((char *) result + size);
1150 	  zone->cached_free = (char *) chunk;
1151 	  zone->cached_free_size = csize - size;
1152 
1153 	  /* Mark the new free chunk as an object.  */
1154 	  zone_set_object_alloc_bit (chunk);
1155 	}
1156 
1157       goto found;
1158     }
1159 
1160   /* Handle large allocations.  We could choose any threshold between
1161      GGC_PAGE_SIZE - sizeof (struct large_page_entry) and
1162      GGC_PAGE_SIZE.  It can't be smaller, because then it wouldn't
1163      be guaranteed to have a unique entry in the lookup table.  Large
1164      allocations will always fall through to here.  */
1165   if (size > GGC_PAGE_SIZE)
1166     {
1167       struct large_page_entry *entry = alloc_large_page (size, zone);
1168 
1169 #ifdef GATHER_STATISTICS
1170       entry->common.survived = 0;
1171 #endif
1172 
1173       entry->next = zone->large_pages;
1174       if (zone->large_pages)
1175 	zone->large_pages->prev = entry;
1176       zone->large_pages = entry;
1177 
1178       result = entry->common.page;
1179 
1180       goto found;
1181     }
1182 
1183   /* Failing everything above, allocate a new small page.  */
1184 
1185   entry = alloc_small_page (zone);
1186   entry->next = zone->pages;
1187   zone->pages = entry;
1188 
1189   /* Mark the first chunk in the new page.  */
1190   entry->alloc_bits[0] = 1;
1191 
1192   result = entry->common.page;
1193   if (size < SMALL_PAGE_SIZE)
1194     {
1195       if (zone->cached_free_size)
1196 	free_chunk (zone->cached_free, zone->cached_free_size, zone);
1197 
1198       zone->cached_free = (char *) result + size;
1199       zone->cached_free_size = SMALL_PAGE_SIZE - size;
1200 
1201       /* Mark the new free chunk as an object.  */
1202       zone_set_object_alloc_bit (zone->cached_free);
1203     }
1204 
1205  found:
1206 
1207   /* We could save TYPE in the chunk, but we don't use that for
1208      anything yet.  If we wanted to, we could do it by adding it
1209      either before the beginning of the chunk or after its end,
1210      and adjusting the size and pointer appropriately.  */
1211 
1212   /* We'll probably write to this after we return.  */
1213   prefetchw (result);
1214 
1215 #ifdef ENABLE_GC_CHECKING
1216   /* `Poison' the entire allocated object.  */
1217   VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
1218   memset (result, 0xaf, size);
1219   VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (result + orig_size,
1220 					    size - orig_size));
1221 #endif
1222 
1223   /* Tell Valgrind that the memory is there, but its content isn't
1224      defined.  The bytes at the end of the object are still marked
1225      unaccessible.  */
1226   VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, orig_size));
1227 
1228   /* Keep track of how many bytes are being allocated.  This
1229      information is used in deciding when to collect.  */
1230   zone->allocated += size;
1231 
1232   timevar_ggc_mem_total += size;
1233 
1234 #ifdef GATHER_STATISTICS
1235   ggc_record_overhead (orig_size, size - orig_size, result PASS_MEM_STAT);
1236 
1237   {
1238     size_t object_size = size;
1239     size_t overhead = object_size - orig_size;
1240 
1241     zone->stats.total_overhead += overhead;
1242     zone->stats.total_allocated += object_size;
1243 
1244     if (orig_size <= 32)
1245       {
1246 	zone->stats.total_overhead_under32 += overhead;
1247 	zone->stats.total_allocated_under32 += object_size;
1248       }
1249     if (orig_size <= 64)
1250       {
1251 	zone->stats.total_overhead_under64 += overhead;
1252 	zone->stats.total_allocated_under64 += object_size;
1253       }
1254     if (orig_size <= 128)
1255       {
1256 	zone->stats.total_overhead_under128 += overhead;
1257 	zone->stats.total_allocated_under128 += object_size;
1258       }
1259   }
1260 #endif
1261 
1262   if (GGC_DEBUG_LEVEL >= 3)
1263     fprintf (G.debug_file, "Allocating object, size=%lu at %p\n",
1264 	     (unsigned long) size, result);
1265 
1266   return result;
1267 }
1268 
1269 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
1270    for that type.  */
1271 
1272 void *
ggc_alloc_typed_stat(enum gt_types_enum gte,size_t size MEM_STAT_DECL)1273 ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size
1274 		      MEM_STAT_DECL)
1275 {
1276   switch (gte)
1277     {
1278     case gt_ggc_e_14lang_tree_node:
1279       return ggc_alloc_zone_pass_stat (size, &tree_zone);
1280 
1281     case gt_ggc_e_7rtx_def:
1282       return ggc_alloc_zone_pass_stat (size, &rtl_zone);
1283 
1284     case gt_ggc_e_9rtvec_def:
1285       return ggc_alloc_zone_pass_stat (size, &rtl_zone);
1286 
1287     default:
1288       return ggc_alloc_zone_pass_stat (size, &main_zone);
1289     }
1290 }
1291 
1292 /* Normal ggc_alloc simply allocates into the main zone.  */
1293 
1294 void *
ggc_alloc_stat(size_t size MEM_STAT_DECL)1295 ggc_alloc_stat (size_t size MEM_STAT_DECL)
1296 {
1297   return ggc_alloc_zone_pass_stat (size, &main_zone);
1298 }
1299 
1300 /* Poison the chunk.  */
1301 #ifdef ENABLE_GC_CHECKING
1302 #define poison_region(PTR, SIZE) \
1303   memset ((PTR), 0xa5, (SIZE))
1304 #else
1305 #define poison_region(PTR, SIZE)
1306 #endif
1307 
1308 /* Free the object at P.  */
1309 
1310 void
ggc_free(void * p)1311 ggc_free (void *p)
1312 {
1313   struct page_entry *page;
1314 
1315 #ifdef GATHER_STATISTICS
1316   ggc_free_overhead (p);
1317 #endif
1318 
1319   poison_region (p, ggc_get_size (p));
1320 
1321   page = zone_get_object_page (p);
1322 
1323   if (page->large_p)
1324     {
1325       struct large_page_entry *large_page
1326 	= (struct large_page_entry *) page;
1327 
1328       /* Remove the page from the linked list.  */
1329       if (large_page->prev)
1330 	large_page->prev->next = large_page->next;
1331       else
1332 	{
1333 	  gcc_assert (large_page->common.zone->large_pages == large_page);
1334 	  large_page->common.zone->large_pages = large_page->next;
1335 	}
1336       if (large_page->next)
1337 	large_page->next->prev = large_page->prev;
1338 
1339       large_page->common.zone->allocated -= large_page->bytes;
1340 
1341       /* Release the memory associated with this object.  */
1342       free_large_page (large_page);
1343     }
1344   else if (page->pch_p)
1345     /* Don't do anything.  We won't allocate a new object from the
1346        PCH zone so there's no point in releasing anything.  */
1347     ;
1348   else
1349     {
1350       size_t size = ggc_get_size (p);
1351 
1352       page->zone->allocated -= size;
1353 
1354       /* Add the chunk to the free list.  We don't bother with coalescing,
1355 	 since we are likely to want a chunk of this size again.  */
1356       free_chunk (p, size, page->zone);
1357     }
1358 }
1359 
1360 /* If P is not marked, mark it and return false.  Otherwise return true.
1361    P must have been allocated by the GC allocator; it mustn't point to
1362    static objects, stack variables, or memory allocated with malloc.  */
1363 
1364 int
ggc_set_mark(const void * p)1365 ggc_set_mark (const void *p)
1366 {
1367   struct page_entry *page;
1368   const char *ptr = (const char *) p;
1369 
1370   page = zone_get_object_page (p);
1371 
1372   if (page->pch_p)
1373     {
1374       size_t mark_word, mark_bit, offset;
1375       offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1376       mark_word = offset / (8 * sizeof (mark_type));
1377       mark_bit = offset % (8 * sizeof (mark_type));
1378 
1379       if (pch_zone.mark_bits[mark_word] & (1 << mark_bit))
1380 	return 1;
1381       pch_zone.mark_bits[mark_word] |= (1 << mark_bit);
1382     }
1383   else if (page->large_p)
1384     {
1385       struct large_page_entry *large_page
1386 	= (struct large_page_entry *) page;
1387 
1388       if (large_page->mark_p)
1389 	return 1;
1390       large_page->mark_p = true;
1391     }
1392   else
1393     {
1394       struct small_page_entry *small_page
1395 	= (struct small_page_entry *) page;
1396 
1397       if (small_page->mark_bits[zone_get_object_mark_word (p)]
1398 	  & (1 << zone_get_object_mark_bit (p)))
1399 	return 1;
1400       small_page->mark_bits[zone_get_object_mark_word (p)]
1401 	|= (1 << zone_get_object_mark_bit (p));
1402     }
1403 
1404   if (GGC_DEBUG_LEVEL >= 4)
1405     fprintf (G.debug_file, "Marking %p\n", p);
1406 
1407   return 0;
1408 }
1409 
1410 /* Return 1 if P has been marked, zero otherwise.
1411    P must have been allocated by the GC allocator; it mustn't point to
1412    static objects, stack variables, or memory allocated with malloc.  */
1413 
1414 int
ggc_marked_p(const void * p)1415 ggc_marked_p (const void *p)
1416 {
1417   struct page_entry *page;
1418   const char *ptr = p;
1419 
1420   page = zone_get_object_page (p);
1421 
1422   if (page->pch_p)
1423     {
1424       size_t mark_word, mark_bit, offset;
1425       offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1426       mark_word = offset / (8 * sizeof (mark_type));
1427       mark_bit = offset % (8 * sizeof (mark_type));
1428 
1429       return (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) != 0;
1430     }
1431 
1432   if (page->large_p)
1433     {
1434       struct large_page_entry *large_page
1435 	= (struct large_page_entry *) page;
1436 
1437       return large_page->mark_p;
1438     }
1439   else
1440     {
1441       struct small_page_entry *small_page
1442 	= (struct small_page_entry *) page;
1443 
1444       return 0 != (small_page->mark_bits[zone_get_object_mark_word (p)]
1445 		   & (1 << zone_get_object_mark_bit (p)));
1446     }
1447 }
1448 
1449 /* Return the size of the gc-able object P.  */
1450 
1451 size_t
ggc_get_size(const void * p)1452 ggc_get_size (const void *p)
1453 {
1454   struct page_entry *page;
1455   const char *ptr = (const char *) p;
1456 
1457   page = zone_get_object_page (p);
1458 
1459   if (page->pch_p)
1460     {
1461       size_t alloc_word, alloc_bit, offset, max_size;
1462       offset = (ptr - pch_zone.page) / BYTES_PER_ALLOC_BIT + 1;
1463       alloc_word = offset / (8 * sizeof (alloc_type));
1464       alloc_bit = offset % (8 * sizeof (alloc_type));
1465       max_size = pch_zone.bytes - (ptr - pch_zone.page);
1466       return zone_object_size_1 (pch_zone.alloc_bits, alloc_word, alloc_bit,
1467 				 max_size);
1468     }
1469 
1470   if (page->large_p)
1471     return ((struct large_page_entry *)page)->bytes;
1472   else
1473     return zone_find_object_size ((struct small_page_entry *) page, p);
1474 }
1475 
1476 /* Initialize the ggc-zone-mmap allocator.  */
1477 void
init_ggc(void)1478 init_ggc (void)
1479 {
1480   /* The allocation size must be greater than BYTES_PER_MARK_BIT, and
1481      a multiple of both BYTES_PER_ALLOC_BIT and FREE_BIN_DELTA, for
1482      the current assumptions to hold.  */
1483 
1484   gcc_assert (FREE_BIN_DELTA == MAX_ALIGNMENT);
1485 
1486   /* Set up the main zone by hand.  */
1487   main_zone.name = "Main zone";
1488   G.zones = &main_zone;
1489 
1490   /* Allocate the default zones.  */
1491   new_ggc_zone_1 (&rtl_zone, "RTL zone");
1492   new_ggc_zone_1 (&tree_zone, "Tree zone");
1493   new_ggc_zone_1 (&tree_id_zone, "Tree identifier zone");
1494 
1495   G.pagesize = getpagesize();
1496   G.lg_pagesize = exact_log2 (G.pagesize);
1497   G.page_mask = ~(G.pagesize - 1);
1498 
1499   /* Require the system page size to be a multiple of GGC_PAGE_SIZE.  */
1500   gcc_assert ((G.pagesize & (GGC_PAGE_SIZE - 1)) == 0);
1501 
1502   /* Allocate 16 system pages at a time.  */
1503   G.quire_size = 16 * G.pagesize / GGC_PAGE_SIZE;
1504 
1505   /* Calculate the size of the allocation bitmap and other overhead.  */
1506   /* Right now we allocate bits for the page header and bitmap.  These
1507      are wasted, but a little tricky to eliminate.  */
1508   G.small_page_overhead
1509     = PAGE_OVERHEAD + (GGC_PAGE_SIZE / BYTES_PER_ALLOC_BIT / 8);
1510   /* G.small_page_overhead = ROUND_UP (G.small_page_overhead, MAX_ALIGNMENT); */
1511 
1512 #ifdef HAVE_MMAP_DEV_ZERO
1513   G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1514   gcc_assert (G.dev_zero_fd != -1);
1515 #endif
1516 
1517 #if 0
1518   G.debug_file = fopen ("ggc-mmap.debug", "w");
1519   setlinebuf (G.debug_file);
1520 #else
1521   G.debug_file = stdout;
1522 #endif
1523 
1524 #ifdef USING_MMAP
1525   /* StunOS has an amazing off-by-one error for the first mmap allocation
1526      after fiddling with RLIMIT_STACK.  The result, as hard as it is to
1527      believe, is an unaligned page allocation, which would cause us to
1528      hork badly if we tried to use it.  */
1529   {
1530     char *p = alloc_anon (NULL, G.pagesize, &main_zone);
1531     struct small_page_entry *e;
1532     if ((size_t)p & (G.pagesize - 1))
1533       {
1534 	/* How losing.  Discard this one and try another.  If we still
1535 	   can't get something useful, give up.  */
1536 
1537 	p = alloc_anon (NULL, G.pagesize, &main_zone);
1538 	gcc_assert (!((size_t)p & (G.pagesize - 1)));
1539       }
1540 
1541     if (GGC_PAGE_SIZE == G.pagesize)
1542       {
1543 	/* We have a good page, might as well hold onto it...  */
1544 	e = xcalloc (1, G.small_page_overhead);
1545 	e->common.page = p;
1546 	e->common.zone = &main_zone;
1547 	e->next = main_zone.free_pages;
1548 	set_page_table_entry (e->common.page, &e->common);
1549 	main_zone.free_pages = e;
1550       }
1551     else
1552       {
1553 	munmap (p, G.pagesize);
1554       }
1555   }
1556 #endif
1557 }
1558 
1559 /* Start a new GGC zone.  */
1560 
1561 static void
new_ggc_zone_1(struct alloc_zone * new_zone,const char * name)1562 new_ggc_zone_1 (struct alloc_zone *new_zone, const char * name)
1563 {
1564   new_zone->name = name;
1565   new_zone->next_zone = G.zones->next_zone;
1566   G.zones->next_zone = new_zone;
1567 }
1568 
1569 struct alloc_zone *
new_ggc_zone(const char * name)1570 new_ggc_zone (const char * name)
1571 {
1572   struct alloc_zone *new_zone = xcalloc (1, sizeof (struct alloc_zone));
1573   new_ggc_zone_1 (new_zone, name);
1574   return new_zone;
1575 }
1576 
1577 /* Destroy a GGC zone.  */
1578 void
destroy_ggc_zone(struct alloc_zone * dead_zone)1579 destroy_ggc_zone (struct alloc_zone * dead_zone)
1580 {
1581   struct alloc_zone *z;
1582 
1583   for (z = G.zones; z && z->next_zone != dead_zone; z = z->next_zone)
1584     /* Just find that zone.  */
1585     continue;
1586 
1587   /* We should have found the zone in the list.  Anything else is fatal.  */
1588   gcc_assert (z);
1589 
1590   /* z is dead, baby. z is dead.  */
1591   z->dead = true;
1592 }
1593 
1594 /* Free all empty pages and objects within a page for a given zone  */
1595 
1596 static void
sweep_pages(struct alloc_zone * zone)1597 sweep_pages (struct alloc_zone *zone)
1598 {
1599   struct large_page_entry **lpp, *lp, *lnext;
1600   struct small_page_entry **spp, *sp, *snext;
1601   char *last_free;
1602   size_t allocated = 0;
1603   bool nomarksinpage;
1604 
1605   /* First, reset the free_chunks lists, since we are going to
1606      re-free free chunks in hopes of coalescing them into large chunks.  */
1607   memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
1608   zone->high_free_bin = 0;
1609   zone->cached_free = NULL;
1610   zone->cached_free_size = 0;
1611 
1612   /* Large pages are all or none affairs. Either they are completely
1613      empty, or they are completely full.  */
1614   lpp = &zone->large_pages;
1615   for (lp = zone->large_pages; lp != NULL; lp = lnext)
1616     {
1617       gcc_assert (lp->common.large_p);
1618 
1619       lnext = lp->next;
1620 
1621 #ifdef GATHER_STATISTICS
1622       /* This page has now survived another collection.  */
1623       lp->common.survived++;
1624 #endif
1625 
1626       if (lp->mark_p)
1627 	{
1628 	  lp->mark_p = false;
1629 	  allocated += lp->bytes;
1630 	  lpp = &lp->next;
1631 	}
1632       else
1633 	{
1634 	  *lpp = lnext;
1635 #ifdef ENABLE_GC_CHECKING
1636 	  /* Poison the page.  */
1637 	  memset (lp->common.page, 0xb5, SMALL_PAGE_SIZE);
1638 #endif
1639 	  if (lp->prev)
1640 	    lp->prev->next = lp->next;
1641 	  if (lp->next)
1642 	    lp->next->prev = lp->prev;
1643 	  free_large_page (lp);
1644 	}
1645     }
1646 
1647   spp = &zone->pages;
1648   for (sp = zone->pages; sp != NULL; sp = snext)
1649     {
1650       char *object, *last_object;
1651       char *end;
1652       alloc_type *alloc_word_p;
1653       mark_type *mark_word_p;
1654 
1655       gcc_assert (!sp->common.large_p);
1656 
1657       snext = sp->next;
1658 
1659 #ifdef GATHER_STATISTICS
1660       /* This page has now survived another collection.  */
1661       sp->common.survived++;
1662 #endif
1663 
1664       /* Step through all chunks, consolidate those that are free and
1665 	 insert them into the free lists.  Note that consolidation
1666 	 slows down collection slightly.  */
1667 
1668       last_object = object = sp->common.page;
1669       end = sp->common.page + SMALL_PAGE_SIZE;
1670       last_free = NULL;
1671       nomarksinpage = true;
1672       mark_word_p = sp->mark_bits;
1673       alloc_word_p = sp->alloc_bits;
1674 
1675       gcc_assert (BYTES_PER_ALLOC_BIT == BYTES_PER_MARK_BIT);
1676 
1677       object = sp->common.page;
1678       do
1679 	{
1680 	  unsigned int i, n;
1681 	  alloc_type alloc_word;
1682 	  mark_type mark_word;
1683 
1684 	  alloc_word = *alloc_word_p++;
1685 	  mark_word = *mark_word_p++;
1686 
1687 	  if (mark_word)
1688 	    nomarksinpage = false;
1689 
1690 	  /* There ought to be some way to do this without looping...  */
1691 	  i = 0;
1692 	  while ((n = alloc_ffs (alloc_word)) != 0)
1693 	    {
1694 	      /* Extend the current state for n - 1 bits.  We can't
1695 		 shift alloc_word by n, even though it isn't used in the
1696 		 loop, in case only the highest bit was set.  */
1697 	      alloc_word >>= n - 1;
1698 	      mark_word >>= n - 1;
1699 	      object += BYTES_PER_MARK_BIT * (n - 1);
1700 
1701 	      if (mark_word & 1)
1702 		{
1703 		  if (last_free)
1704 		    {
1705 		      VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (last_free,
1706 								object
1707 								- last_free));
1708 		      poison_region (last_free, object - last_free);
1709 		      free_chunk (last_free, object - last_free, zone);
1710 		      last_free = NULL;
1711 		    }
1712 		  else
1713 		    allocated += object - last_object;
1714 		  last_object = object;
1715 		}
1716 	      else
1717 		{
1718 		  if (last_free == NULL)
1719 		    {
1720 		      last_free = object;
1721 		      allocated += object - last_object;
1722 		    }
1723 		  else
1724 		    zone_clear_object_alloc_bit (sp, object);
1725 		}
1726 
1727 	      /* Shift to just after the alloc bit we handled.  */
1728 	      alloc_word >>= 1;
1729 	      mark_word >>= 1;
1730 	      object += BYTES_PER_MARK_BIT;
1731 
1732 	      i += n;
1733 	    }
1734 
1735 	  object += BYTES_PER_MARK_BIT * (8 * sizeof (alloc_type) - i);
1736 	}
1737       while (object < end);
1738 
1739       if (nomarksinpage)
1740 	{
1741 	  *spp = snext;
1742 #ifdef ENABLE_GC_CHECKING
1743 	  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (sp->common.page, SMALL_PAGE_SIZE));
1744 	  /* Poison the page.  */
1745 	  memset (sp->common.page, 0xb5, SMALL_PAGE_SIZE);
1746 #endif
1747 	  free_small_page (sp);
1748 	  continue;
1749 	}
1750       else if (last_free)
1751 	{
1752 	  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (last_free,
1753 						    object - last_free));
1754 	  poison_region (last_free, object - last_free);
1755 	  free_chunk (last_free, object - last_free, zone);
1756 	}
1757       else
1758 	allocated += object - last_object;
1759 
1760       spp = &sp->next;
1761     }
1762 
1763   zone->allocated = allocated;
1764 }
1765 
1766 /* mark-and-sweep routine for collecting a single zone.  NEED_MARKING
1767    is true if we need to mark before sweeping, false if some other
1768    zone collection has already performed marking for us.  Returns true
1769    if we collected, false otherwise.  */
1770 
1771 static bool
ggc_collect_1(struct alloc_zone * zone,bool need_marking)1772 ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1773 {
1774 #if 0
1775   /* */
1776   {
1777     int i;
1778     for (i = 0; i < NUM_FREE_BINS + 1; i++)
1779       {
1780 	struct alloc_chunk *chunk;
1781 	int n, tot;
1782 
1783 	n = 0;
1784 	tot = 0;
1785 	chunk = zone->free_chunks[i];
1786 	while (chunk)
1787 	  {
1788 	    n++;
1789 	    tot += chunk->size;
1790 	    chunk = chunk->next_free;
1791 	  }
1792 	fprintf (stderr, "Bin %d: %d free chunks (%d bytes)\n",
1793 		 i, n, tot);
1794       }
1795   }
1796   /* */
1797 #endif
1798 
1799   if (!quiet_flag)
1800     fprintf (stderr, " {%s GC %luk -> ",
1801 	     zone->name, (unsigned long) zone->allocated / 1024);
1802 
1803   /* Zero the total allocated bytes.  This will be recalculated in the
1804      sweep phase.  */
1805   zone->allocated = 0;
1806 
1807   /* Release the pages we freed the last time we collected, but didn't
1808      reuse in the interim.  */
1809   release_pages (zone);
1810 
1811   if (need_marking)
1812     {
1813       zone_allocate_marks ();
1814       ggc_mark_roots ();
1815 #ifdef GATHER_STATISTICS
1816       ggc_prune_overhead_list ();
1817 #endif
1818     }
1819 
1820   sweep_pages (zone);
1821   zone->was_collected = true;
1822   zone->allocated_last_gc = zone->allocated;
1823 
1824   if (!quiet_flag)
1825     fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1826   return true;
1827 }
1828 
1829 #ifdef GATHER_STATISTICS
1830 /* Calculate the average page survival rate in terms of number of
1831    collections.  */
1832 
1833 static float
calculate_average_page_survival(struct alloc_zone * zone)1834 calculate_average_page_survival (struct alloc_zone *zone)
1835 {
1836   float count = 0.0;
1837   float survival = 0.0;
1838   struct small_page_entry *p;
1839   struct large_page_entry *lp;
1840   for (p = zone->pages; p; p = p->next)
1841     {
1842       count += 1.0;
1843       survival += p->common.survived;
1844     }
1845   for (lp = zone->large_pages; lp; lp = lp->next)
1846     {
1847       count += 1.0;
1848       survival += lp->common.survived;
1849     }
1850   return survival/count;
1851 }
1852 #endif
1853 
1854 /* Top level collection routine.  */
1855 
1856 void
ggc_collect(void)1857 ggc_collect (void)
1858 {
1859   struct alloc_zone *zone;
1860   bool marked = false;
1861 
1862   timevar_push (TV_GC);
1863 
1864   if (!ggc_force_collect)
1865     {
1866       float allocated_last_gc = 0, allocated = 0, min_expand;
1867 
1868       for (zone = G.zones; zone; zone = zone->next_zone)
1869 	{
1870 	  allocated_last_gc += zone->allocated_last_gc;
1871 	  allocated += zone->allocated;
1872 	}
1873 
1874       allocated_last_gc =
1875 	MAX (allocated_last_gc,
1876 	     (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1877       min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1878 
1879       if (allocated < allocated_last_gc + min_expand)
1880 	{
1881 	  timevar_pop (TV_GC);
1882 	  return;
1883 	}
1884     }
1885 
1886   /* Start by possibly collecting the main zone.  */
1887   main_zone.was_collected = false;
1888   marked |= ggc_collect_1 (&main_zone, true);
1889 
1890   /* In order to keep the number of collections down, we don't
1891      collect other zones unless we are collecting the main zone.  This
1892      gives us roughly the same number of collections as we used to
1893      have with the old gc.  The number of collection is important
1894      because our main slowdown (according to profiling) is now in
1895      marking.  So if we mark twice as often as we used to, we'll be
1896      twice as slow.  Hopefully we'll avoid this cost when we mark
1897      zone-at-a-time.  */
1898   /* NOTE drow/2004-07-28: We now always collect the main zone, but
1899      keep this code in case the heuristics are further refined.  */
1900 
1901   if (main_zone.was_collected)
1902     {
1903       struct alloc_zone *zone;
1904 
1905       for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
1906 	{
1907 	  zone->was_collected = false;
1908 	  marked |= ggc_collect_1 (zone, !marked);
1909 	}
1910     }
1911 
1912 #ifdef GATHER_STATISTICS
1913   /* Print page survival stats, if someone wants them.  */
1914   if (GGC_DEBUG_LEVEL >= 2)
1915     {
1916       for (zone = G.zones; zone; zone = zone->next_zone)
1917 	{
1918 	  if (zone->was_collected)
1919 	    {
1920 	      float f = calculate_average_page_survival (zone);
1921 	      printf ("Average page survival in zone `%s' is %f\n",
1922 		      zone->name, f);
1923 	    }
1924 	}
1925     }
1926 #endif
1927 
1928   if (marked)
1929     zone_free_marks ();
1930 
1931   /* Free dead zones.  */
1932   for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
1933     {
1934       if (zone->next_zone->dead)
1935 	{
1936 	  struct alloc_zone *dead_zone = zone->next_zone;
1937 
1938 	  printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
1939 
1940 	  /* The zone must be empty.  */
1941 	  gcc_assert (!dead_zone->allocated);
1942 
1943 	  /* Unchain the dead zone, release all its pages and free it.  */
1944 	  zone->next_zone = zone->next_zone->next_zone;
1945 	  release_pages (dead_zone);
1946 	  free (dead_zone);
1947 	}
1948     }
1949 
1950   timevar_pop (TV_GC);
1951 }
1952 
1953 /* Print allocation statistics.  */
1954 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1955 		  ? (x) \
1956 		  : ((x) < 1024*1024*10 \
1957 		     ? (x) / 1024 \
1958 		     : (x) / (1024*1024))))
1959 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1960 
1961 void
ggc_print_statistics(void)1962 ggc_print_statistics (void)
1963 {
1964   struct alloc_zone *zone;
1965   struct ggc_statistics stats;
1966   size_t total_overhead = 0, total_allocated = 0, total_bytes_mapped = 0;
1967   size_t pte_overhead, i;
1968 
1969   /* Clear the statistics.  */
1970   memset (&stats, 0, sizeof (stats));
1971 
1972   /* Make sure collection will really occur.  */
1973   ggc_force_collect = true;
1974 
1975   /* Collect and print the statistics common across collectors.  */
1976   ggc_print_common_statistics (stderr, &stats);
1977 
1978   ggc_force_collect = false;
1979 
1980   /* Release free pages so that we will not count the bytes allocated
1981      there as part of the total allocated memory.  */
1982   for (zone = G.zones; zone; zone = zone->next_zone)
1983     release_pages (zone);
1984 
1985   /* Collect some information about the various sizes of
1986      allocation.  */
1987   fprintf (stderr,
1988            "Memory still allocated at the end of the compilation process\n");
1989 
1990   fprintf (stderr, "%20s %10s  %10s  %10s\n",
1991 	   "Zone", "Allocated", "Used", "Overhead");
1992   for (zone = G.zones; zone; zone = zone->next_zone)
1993     {
1994       struct large_page_entry *large_page;
1995       size_t overhead, allocated, in_use;
1996 
1997       /* Skip empty zones.  */
1998       if (!zone->pages && !zone->large_pages)
1999 	continue;
2000 
2001       allocated = in_use = 0;
2002 
2003       overhead = sizeof (struct alloc_zone);
2004 
2005       for (large_page = zone->large_pages; large_page != NULL;
2006 	   large_page = large_page->next)
2007 	{
2008 	  allocated += large_page->bytes;
2009 	  in_use += large_page->bytes;
2010 	  overhead += sizeof (struct large_page_entry);
2011 	}
2012 
2013       /* There's no easy way to walk through the small pages finding
2014 	 used and unused objects.  Instead, add all the pages, and
2015 	 subtract out the free list.  */
2016 
2017       allocated += GGC_PAGE_SIZE * zone->n_small_pages;
2018       in_use += GGC_PAGE_SIZE * zone->n_small_pages;
2019       overhead += G.small_page_overhead * zone->n_small_pages;
2020 
2021       for (i = 0; i <= NUM_FREE_BINS; i++)
2022 	{
2023 	  struct alloc_chunk *chunk = zone->free_chunks[i];
2024 	  while (chunk)
2025 	    {
2026 	      in_use -= ggc_get_size (chunk);
2027 	      chunk = chunk->next_free;
2028 	    }
2029 	}
2030 
2031       fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
2032 	       zone->name,
2033 	       SCALE (allocated), LABEL (allocated),
2034 	       SCALE (in_use), LABEL (in_use),
2035 	       SCALE (overhead), LABEL (overhead));
2036 
2037       gcc_assert (in_use == zone->allocated);
2038 
2039       total_overhead += overhead;
2040       total_allocated += zone->allocated;
2041       total_bytes_mapped += zone->bytes_mapped;
2042     }
2043 
2044   /* Count the size of the page table as best we can.  */
2045 #if HOST_BITS_PER_PTR <= 32
2046   pte_overhead = sizeof (G.lookup);
2047   for (i = 0; i < PAGE_L1_SIZE; i++)
2048     if (G.lookup[i])
2049       pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2050 #else
2051   {
2052     page_table table = G.lookup;
2053     pte_overhead = 0;
2054     while (table)
2055       {
2056 	pte_overhead += sizeof (*table);
2057 	for (i = 0; i < PAGE_L1_SIZE; i++)
2058 	  if (table->table[i])
2059 	    pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2060 	table = table->next;
2061       }
2062   }
2063 #endif
2064   fprintf (stderr, "%20s %11s %11s %10lu%c\n", "Page Table",
2065 	   "", "", SCALE (pte_overhead), LABEL (pte_overhead));
2066   total_overhead += pte_overhead;
2067 
2068   fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
2069 	   SCALE (total_bytes_mapped), LABEL (total_bytes_mapped),
2070 	   SCALE (total_allocated), LABEL(total_allocated),
2071 	   SCALE (total_overhead), LABEL (total_overhead));
2072 
2073 #ifdef GATHER_STATISTICS
2074   {
2075     unsigned long long all_overhead = 0, all_allocated = 0;
2076     unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0;
2077     unsigned long long all_overhead_under64 = 0, all_allocated_under64 = 0;
2078     unsigned long long all_overhead_under128 = 0, all_allocated_under128 = 0;
2079 
2080     fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2081 
2082     for (zone = G.zones; zone; zone = zone->next_zone)
2083       {
2084 	all_overhead += zone->stats.total_overhead;
2085 	all_allocated += zone->stats.total_allocated;
2086 
2087 	all_allocated_under32 += zone->stats.total_allocated_under32;
2088 	all_overhead_under32 += zone->stats.total_overhead_under32;
2089 
2090 	all_allocated_under64 += zone->stats.total_allocated_under64;
2091 	all_overhead_under64 += zone->stats.total_overhead_under64;
2092 
2093 	all_allocated_under128 += zone->stats.total_allocated_under128;
2094 	all_overhead_under128 += zone->stats.total_overhead_under128;
2095 
2096 	fprintf (stderr, "%20s:                  %10lld\n",
2097 		 zone->name, zone->stats.total_allocated);
2098       }
2099 
2100     fprintf (stderr, "\n");
2101 
2102     fprintf (stderr, "Total Overhead:                        %10lld\n",
2103              all_overhead);
2104     fprintf (stderr, "Total Allocated:                       %10lld\n",
2105              all_allocated);
2106 
2107     fprintf (stderr, "Total Overhead  under  32B:            %10lld\n",
2108              all_overhead_under32);
2109     fprintf (stderr, "Total Allocated under  32B:            %10lld\n",
2110              all_allocated_under32);
2111     fprintf (stderr, "Total Overhead  under  64B:            %10lld\n",
2112              all_overhead_under64);
2113     fprintf (stderr, "Total Allocated under  64B:            %10lld\n",
2114              all_allocated_under64);
2115     fprintf (stderr, "Total Overhead  under 128B:            %10lld\n",
2116              all_overhead_under128);
2117     fprintf (stderr, "Total Allocated under 128B:            %10lld\n",
2118              all_allocated_under128);
2119   }
2120 #endif
2121 }
2122 
2123 /* Precompiled header support.  */
2124 
2125 /* For precompiled headers, we sort objects based on their type.  We
2126    also sort various objects into their own buckets; currently this
2127    covers strings and IDENTIFIER_NODE trees.  The choices of how
2128    to sort buckets have not yet been tuned.  */
2129 
2130 #define NUM_PCH_BUCKETS		(gt_types_enum_last + 3)
2131 
2132 #define OTHER_BUCKET		(gt_types_enum_last + 0)
2133 #define IDENTIFIER_BUCKET	(gt_types_enum_last + 1)
2134 #define STRING_BUCKET		(gt_types_enum_last + 2)
2135 
2136 struct ggc_pch_ondisk
2137 {
2138   size_t total;
2139   size_t type_totals[NUM_PCH_BUCKETS];
2140 };
2141 
2142 struct ggc_pch_data
2143 {
2144   struct ggc_pch_ondisk d;
2145   size_t base;
2146   size_t orig_base;
2147   size_t alloc_size;
2148   alloc_type *alloc_bits;
2149   size_t type_bases[NUM_PCH_BUCKETS];
2150   size_t start_offset;
2151 };
2152 
2153 /* Initialize the PCH data structure.  */
2154 
2155 struct ggc_pch_data *
init_ggc_pch(void)2156 init_ggc_pch (void)
2157 {
2158   return xcalloc (sizeof (struct ggc_pch_data), 1);
2159 }
2160 
2161 /* Return which of the page-aligned buckets the object at X, with type
2162    TYPE, should be sorted into in the PCH.  Strings will have
2163    IS_STRING set and TYPE will be gt_types_enum_last.  Other objects
2164    of unknown type will also have TYPE equal to gt_types_enum_last.  */
2165 
2166 static int
pch_bucket(void * x,enum gt_types_enum type,bool is_string)2167 pch_bucket (void *x, enum gt_types_enum type,
2168 	    bool is_string)
2169 {
2170   /* Sort identifiers into their own bucket, to improve locality
2171      when searching the identifier hash table.  */
2172   if (type == gt_ggc_e_14lang_tree_node
2173       && TREE_CODE ((tree) x) == IDENTIFIER_NODE)
2174     return IDENTIFIER_BUCKET;
2175   else if (type == gt_types_enum_last)
2176     {
2177       if (is_string)
2178 	return STRING_BUCKET;
2179       return OTHER_BUCKET;
2180     }
2181   return type;
2182 }
2183 
2184 /* Add the size of object X to the size of the PCH data.  */
2185 
2186 void
ggc_pch_count_object(struct ggc_pch_data * d,void * x ATTRIBUTE_UNUSED,size_t size,bool is_string,enum gt_types_enum type)2187 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2188 		      size_t size, bool is_string, enum gt_types_enum type)
2189 {
2190   /* NOTE: Right now we don't need to align up the size of any objects.
2191      Strings can be unaligned, and everything else is allocated to a
2192      MAX_ALIGNMENT boundary already.  */
2193 
2194   d->d.type_totals[pch_bucket (x, type, is_string)] += size;
2195 }
2196 
2197 /* Return the total size of the PCH data.  */
2198 
2199 size_t
ggc_pch_total_size(struct ggc_pch_data * d)2200 ggc_pch_total_size (struct ggc_pch_data *d)
2201 {
2202   enum gt_types_enum i;
2203   size_t alloc_size, total_size;
2204 
2205   total_size = 0;
2206   for (i = 0; i < NUM_PCH_BUCKETS; i++)
2207     {
2208       d->d.type_totals[i] = ROUND_UP (d->d.type_totals[i], GGC_PAGE_SIZE);
2209       total_size += d->d.type_totals[i];
2210     }
2211   d->d.total = total_size;
2212 
2213   /* Include the size of the allocation bitmap.  */
2214   alloc_size = CEIL (d->d.total, BYTES_PER_ALLOC_BIT * 8);
2215   alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2216   d->alloc_size = alloc_size;
2217 
2218   return d->d.total + alloc_size;
2219 }
2220 
2221 /* Set the base address for the objects in the PCH file.  */
2222 
2223 void
ggc_pch_this_base(struct ggc_pch_data * d,void * base_)2224 ggc_pch_this_base (struct ggc_pch_data *d, void *base_)
2225 {
2226   int i;
2227   size_t base = (size_t) base_;
2228 
2229   d->base = d->orig_base = base;
2230   for (i = 0; i < NUM_PCH_BUCKETS; i++)
2231     {
2232       d->type_bases[i] = base;
2233       base += d->d.type_totals[i];
2234     }
2235 
2236   if (d->alloc_bits == NULL)
2237     d->alloc_bits = xcalloc (1, d->alloc_size);
2238 }
2239 
2240 /* Allocate a place for object X of size SIZE in the PCH file.  */
2241 
2242 char *
ggc_pch_alloc_object(struct ggc_pch_data * d,void * x,size_t size,bool is_string,enum gt_types_enum type)2243 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
2244 		      size_t size, bool is_string,
2245 		      enum gt_types_enum type)
2246 {
2247   size_t alloc_word, alloc_bit;
2248   char *result;
2249   int bucket = pch_bucket (x, type, is_string);
2250 
2251   /* Record the start of the object in the allocation bitmap.  We
2252      can't assert that the allocation bit is previously clear, because
2253      strings may violate the invariant that they are at least
2254      BYTES_PER_ALLOC_BIT long.  This is harmless - ggc_get_size
2255      should not be called for strings.  */
2256   alloc_word = ((d->type_bases[bucket] - d->orig_base)
2257 		/ (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
2258   alloc_bit = ((d->type_bases[bucket] - d->orig_base)
2259 	       / BYTES_PER_ALLOC_BIT) % (8 * sizeof (alloc_type));
2260   d->alloc_bits[alloc_word] |= 1L << alloc_bit;
2261 
2262   /* Place the object at the current pointer for this bucket.  */
2263   result = (char *) d->type_bases[bucket];
2264   d->type_bases[bucket] += size;
2265   return result;
2266 }
2267 
2268 /* Prepare to write out the PCH data to file F.  */
2269 
2270 void
ggc_pch_prepare_write(struct ggc_pch_data * d,FILE * f)2271 ggc_pch_prepare_write (struct ggc_pch_data *d,
2272 		       FILE *f)
2273 {
2274   /* We seek around a lot while writing.  Record where the end
2275      of the padding in the PCH file is, so that we can
2276      locate each object's offset.  */
2277   d->start_offset = ftell (f);
2278 }
2279 
2280 /* Write out object X of SIZE to file F.  */
2281 
2282 void
ggc_pch_write_object(struct ggc_pch_data * d,FILE * f,void * x,void * newx,size_t size,bool is_string ATTRIBUTE_UNUSED)2283 ggc_pch_write_object (struct ggc_pch_data *d,
2284 		      FILE *f, void *x, void *newx,
2285 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2286 {
2287   if (fseek (f, (size_t) newx - d->orig_base + d->start_offset, SEEK_SET) != 0)
2288     fatal_error ("can't seek PCH file: %m");
2289 
2290   if (fwrite (x, size, 1, f) != 1)
2291     fatal_error ("can't write PCH file: %m");
2292 }
2293 
2294 void
ggc_pch_finish(struct ggc_pch_data * d,FILE * f)2295 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2296 {
2297   /* Write out the allocation bitmap.  */
2298   if (fseek (f, d->start_offset + d->d.total, SEEK_SET) != 0)
2299     fatal_error ("can't seek PCH file: %m");
2300 
2301   if (fwrite (d->alloc_bits, d->alloc_size, 1, f) != 1)
2302     fatal_error ("can't write PCH fle: %m");
2303 
2304   /* Done with the PCH, so write out our footer.  */
2305   if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2306     fatal_error ("can't write PCH file: %m");
2307 
2308   free (d->alloc_bits);
2309   free (d);
2310 }
2311 
2312 /* The PCH file from F has been mapped at ADDR.  Read in any
2313    additional data from the file and set up the GC state.  */
2314 
2315 void
ggc_pch_read(FILE * f,void * addr)2316 ggc_pch_read (FILE *f, void *addr)
2317 {
2318   struct ggc_pch_ondisk d;
2319   size_t alloc_size;
2320   struct alloc_zone *zone;
2321   struct page_entry *pch_page;
2322   char *p;
2323 
2324   if (fread (&d, sizeof (d), 1, f) != 1)
2325     fatal_error ("can't read PCH file: %m");
2326 
2327   alloc_size = CEIL (d.total, BYTES_PER_ALLOC_BIT * 8);
2328   alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2329 
2330   pch_zone.bytes = d.total;
2331   pch_zone.alloc_bits = (alloc_type *) ((char *) addr + pch_zone.bytes);
2332   pch_zone.page = (char *) addr;
2333   pch_zone.end = (char *) pch_zone.alloc_bits;
2334 
2335   /* We've just read in a PCH file.  So, every object that used to be
2336      allocated is now free.  */
2337   for (zone = G.zones; zone; zone = zone->next_zone)
2338     {
2339       struct small_page_entry *page, *next_page;
2340       struct large_page_entry *large_page, *next_large_page;
2341 
2342       zone->allocated = 0;
2343 
2344       /* Clear the zone's free chunk list.  */
2345       memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
2346       zone->high_free_bin = 0;
2347       zone->cached_free = NULL;
2348       zone->cached_free_size = 0;
2349 
2350       /* Move all the small pages onto the free list.  */
2351       for (page = zone->pages; page != NULL; page = next_page)
2352 	{
2353 	  next_page = page->next;
2354 	  memset (page->alloc_bits, 0,
2355 		  G.small_page_overhead - PAGE_OVERHEAD);
2356 	  free_small_page (page);
2357 	}
2358 
2359       /* Discard all the large pages.  */
2360       for (large_page = zone->large_pages; large_page != NULL;
2361 	   large_page = next_large_page)
2362 	{
2363 	  next_large_page = large_page->next;
2364 	  free_large_page (large_page);
2365 	}
2366 
2367       zone->pages = NULL;
2368       zone->large_pages = NULL;
2369     }
2370 
2371   /* Allocate the dummy page entry for the PCH, and set all pages
2372      mapped into the PCH to reference it.  */
2373   pch_page = xcalloc (1, sizeof (struct page_entry));
2374   pch_page->page = pch_zone.page;
2375   pch_page->pch_p = true;
2376 
2377   for (p = pch_zone.page; p < pch_zone.end; p += GGC_PAGE_SIZE)
2378     set_page_table_entry (p, pch_page);
2379 }
2380